aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc')
-rw-r--r--include/asm-sparc/Kbuild15
-rw-r--r--include/asm-sparc/agp.h20
-rw-r--r--include/asm-sparc/apb.h36
-rw-r--r--include/asm-sparc/asi.h153
-rw-r--r--include/asm-sparc/atomic.h169
-rw-r--r--include/asm-sparc/atomic_32.h165
-rw-r--r--include/asm-sparc/atomic_64.h128
-rw-r--r--include/asm-sparc/auxio.h97
-rw-r--r--include/asm-sparc/auxio_32.h89
-rw-r--r--include/asm-sparc/auxio_64.h100
-rw-r--r--include/asm-sparc/backoff.h31
-rw-r--r--include/asm-sparc/bbc.h225
-rw-r--r--include/asm-sparc/bitops.h117
-rw-r--r--include/asm-sparc/bitops_32.h111
-rw-r--r--include/asm-sparc/bitops_64.h107
-rw-r--r--include/asm-sparc/cacheflush.h93
-rw-r--r--include/asm-sparc/cacheflush_32.h85
-rw-r--r--include/asm-sparc/cacheflush_64.h76
-rw-r--r--include/asm-sparc/chafsr.h241
-rw-r--r--include/asm-sparc/checksum.h249
-rw-r--r--include/asm-sparc/checksum_32.h241
-rw-r--r--include/asm-sparc/checksum_64.h167
-rw-r--r--include/asm-sparc/chmctrl.h183
-rw-r--r--include/asm-sparc/cmt.h59
-rw-r--r--include/asm-sparc/compat.h243
-rw-r--r--include/asm-sparc/compat_signal.h29
-rw-r--r--include/asm-sparc/cpudata.h35
-rw-r--r--include/asm-sparc/cpudata_32.h27
-rw-r--r--include/asm-sparc/cpudata_64.h240
-rw-r--r--include/asm-sparc/dcr.h14
-rw-r--r--include/asm-sparc/dcu.h27
-rw-r--r--include/asm-sparc/delay.h42
-rw-r--r--include/asm-sparc/delay_32.h34
-rw-r--r--include/asm-sparc/delay_64.h17
-rw-r--r--include/asm-sparc/display7seg.h79
-rw-r--r--include/asm-sparc/dma-mapping.h17
-rw-r--r--include/asm-sparc/dma-mapping_32.h11
-rw-r--r--include/asm-sparc/dma-mapping_64.h154
-rw-r--r--include/asm-sparc/dma.h290
-rw-r--r--include/asm-sparc/dma_32.h288
-rw-r--r--include/asm-sparc/dma_64.h205
-rw-r--r--include/asm-sparc/ebus.h105
-rw-r--r--include/asm-sparc/ebus_32.h99
-rw-r--r--include/asm-sparc/ebus_64.h94
-rw-r--r--include/asm-sparc/elf.h149
-rw-r--r--include/asm-sparc/elf_32.h145
-rw-r--r--include/asm-sparc/elf_64.h217
-rw-r--r--include/asm-sparc/envctrl.h103
-rw-r--r--include/asm-sparc/estate.h49
-rw-r--r--include/asm-sparc/fbio.h37
-rw-r--r--include/asm-sparc/fcntl.h4
-rw-r--r--include/asm-sparc/fhc.h121
-rw-r--r--include/asm-sparc/floppy.h394
-rw-r--r--include/asm-sparc/floppy_32.h388
-rw-r--r--include/asm-sparc/floppy_64.h782
-rw-r--r--include/asm-sparc/fpumacro.h33
-rw-r--r--include/asm-sparc/futex.h12
-rw-r--r--include/asm-sparc/futex_32.h6
-rw-r--r--include/asm-sparc/futex_64.h110
-rw-r--r--include/asm-sparc/hardirq.h31
-rw-r--r--include/asm-sparc/hardirq_32.h23
-rw-r--r--include/asm-sparc/hardirq_64.h19
-rw-r--r--include/asm-sparc/head.h106
-rw-r--r--include/asm-sparc/head_32.h102
-rw-r--r--include/asm-sparc/head_64.h76
-rw-r--r--include/asm-sparc/hugetlb.h85
-rw-r--r--include/asm-sparc/hvtramp.h37
-rw-r--r--include/asm-sparc/hypervisor.h2949
-rw-r--r--include/asm-sparc/ide.h58
-rw-r--r--include/asm-sparc/idprom.h2
-rw-r--r--include/asm-sparc/intr_queue.h15
-rw-r--r--include/asm-sparc/io.h331
-rw-r--r--include/asm-sparc/io_32.h326
-rw-r--r--include/asm-sparc/io_64.h511
-rw-r--r--include/asm-sparc/ioctls.h4
-rw-r--r--include/asm-sparc/iommu.h129
-rw-r--r--include/asm-sparc/iommu_32.h121
-rw-r--r--include/asm-sparc/iommu_64.h62
-rw-r--r--include/asm-sparc/ipcbuf.h39
-rw-r--r--include/asm-sparc/ipcbuf_32.h31
-rw-r--r--include/asm-sparc/ipcbuf_64.h28
-rw-r--r--include/asm-sparc/irq.h21
-rw-r--r--include/asm-sparc/irq_32.h15
-rw-r--r--include/asm-sparc/irq_64.h93
-rw-r--r--include/asm-sparc/irqflags.h47
-rw-r--r--include/asm-sparc/irqflags_32.h39
-rw-r--r--include/asm-sparc/irqflags_64.h89
-rw-r--r--include/asm-sparc/kdebug.h81
-rw-r--r--include/asm-sparc/kdebug_32.h73
-rw-r--r--include/asm-sparc/kdebug_64.h19
-rw-r--r--include/asm-sparc/kmap_types.h4
-rw-r--r--include/asm-sparc/kprobes.h49
-rw-r--r--include/asm-sparc/kvm.h6
-rw-r--r--include/asm-sparc/ldc.h138
-rw-r--r--include/asm-sparc/lmb.h10
-rw-r--r--include/asm-sparc/lsu.h19
-rw-r--r--include/asm-sparc/machines.h2
-rw-r--r--include/asm-sparc/mbus.h2
-rw-r--r--include/asm-sparc/mc146818rtc.h35
-rw-r--r--include/asm-sparc/mc146818rtc_32.h29
-rw-r--r--include/asm-sparc/mc146818rtc_64.h34
-rw-r--r--include/asm-sparc/mdesc.h78
-rw-r--r--include/asm-sparc/mmu.h13
-rw-r--r--include/asm-sparc/mmu_32.h7
-rw-r--r--include/asm-sparc/mmu_64.h123
-rw-r--r--include/asm-sparc/mmu_context.h50
-rw-r--r--include/asm-sparc/mmu_context_32.h42
-rw-r--r--include/asm-sparc/mmu_context_64.h155
-rw-r--r--include/asm-sparc/mmzone.h17
-rw-r--r--include/asm-sparc/module.h15
-rw-r--r--include/asm-sparc/module_32.h7
-rw-r--r--include/asm-sparc/module_64.h7
-rw-r--r--include/asm-sparc/mostek.h175
-rw-r--r--include/asm-sparc/mostek_32.h171
-rw-r--r--include/asm-sparc/mostek_64.h143
-rw-r--r--include/asm-sparc/msgbuf.h23
-rw-r--r--include/asm-sparc/namei.h13
-rw-r--r--include/asm-sparc/ns87303.h118
-rw-r--r--include/asm-sparc/of_platform.h32
-rw-r--r--include/asm-sparc/of_platform_32.h24
-rw-r--r--include/asm-sparc/of_platform_64.h25
-rw-r--r--include/asm-sparc/openprom.h265
-rw-r--r--include/asm-sparc/openprom_32.h255
-rw-r--r--include/asm-sparc/openprom_64.h280
-rw-r--r--include/asm-sparc/oplib.h279
-rw-r--r--include/asm-sparc/oplib_32.h272
-rw-r--r--include/asm-sparc/oplib_64.h322
-rw-r--r--include/asm-sparc/page.h167
-rw-r--r--include/asm-sparc/page_32.h160
-rw-r--r--include/asm-sparc/page_64.h135
-rw-r--r--include/asm-sparc/parport.h246
-rw-r--r--include/asm-sparc/pci.h176
-rw-r--r--include/asm-sparc/pci_32.h171
-rw-r--r--include/asm-sparc/pci_64.h210
-rw-r--r--include/asm-sparc/percpu.h14
-rw-r--r--include/asm-sparc/percpu_32.h6
-rw-r--r--include/asm-sparc/percpu_64.h28
-rw-r--r--include/asm-sparc/pgalloc.h76
-rw-r--r--include/asm-sparc/pgalloc_32.h68
-rw-r--r--include/asm-sparc/pgalloc_64.h81
-rw-r--r--include/asm-sparc/pgtable.h477
-rw-r--r--include/asm-sparc/pgtable_32.h480
-rw-r--r--include/asm-sparc/pgtable_64.h775
-rw-r--r--include/asm-sparc/pil.h22
-rw-r--r--include/asm-sparc/posix_types.h124
-rw-r--r--include/asm-sparc/posix_types_32.h118
-rw-r--r--include/asm-sparc/posix_types_64.h122
-rw-r--r--include/asm-sparc/processor.h132
-rw-r--r--include/asm-sparc/processor_32.h128
-rw-r--r--include/asm-sparc/processor_64.h237
-rw-r--r--include/asm-sparc/psrcompat.h45
-rw-r--r--include/asm-sparc/pstate.h91
-rw-r--r--include/asm-sparc/ptrace.h179
-rw-r--r--include/asm-sparc/ptrace_32.h175
-rw-r--r--include/asm-sparc/ptrace_64.h346
-rw-r--r--include/asm-sparc/reboot.h6
-rw-r--r--include/asm-sparc/reg.h87
-rw-r--r--include/asm-sparc/reg_32.h79
-rw-r--r--include/asm-sparc/reg_64.h56
-rw-r--r--include/asm-sparc/resource.h6
-rw-r--r--include/asm-sparc/rwsem-const.h12
-rw-r--r--include/asm-sparc/rwsem.h84
-rw-r--r--include/asm-sparc/sbus.h161
-rw-r--r--include/asm-sparc/sbus_32.h153
-rw-r--r--include/asm-sparc/sbus_64.h190
-rw-r--r--include/asm-sparc/scatterlist.h32
-rw-r--r--include/asm-sparc/scatterlist_32.h26
-rw-r--r--include/asm-sparc/scatterlist_64.h27
-rw-r--r--include/asm-sparc/scratchpad.h14
-rw-r--r--include/asm-sparc/seccomp.h21
-rw-r--r--include/asm-sparc/sections.h12
-rw-r--r--include/asm-sparc/sections_32.h6
-rw-r--r--include/asm-sparc/sections_64.h9
-rw-r--r--include/asm-sparc/semaphore.h1
-rw-r--r--include/asm-sparc/sembuf.h12
-rw-r--r--include/asm-sparc/setup.h6
-rw-r--r--include/asm-sparc/sfafsr.h82
-rw-r--r--include/asm-sparc/sfp-machine.h214
-rw-r--r--include/asm-sparc/sfp-machine_32.h212
-rw-r--r--include/asm-sparc/sfp-machine_64.h93
-rw-r--r--include/asm-sparc/shmbuf.h14
-rw-r--r--include/asm-sparc/shmparam.h19
-rw-r--r--include/asm-sparc/shmparam_32.h11
-rw-r--r--include/asm-sparc/shmparam_64.h10
-rw-r--r--include/asm-sparc/sigcontext.h70
-rw-r--r--include/asm-sparc/sigcontext_32.h62
-rw-r--r--include/asm-sparc/sigcontext_64.h87
-rw-r--r--include/asm-sparc/siginfo.h25
-rw-r--r--include/asm-sparc/siginfo_32.h17
-rw-r--r--include/asm-sparc/siginfo_64.h32
-rw-r--r--include/asm-sparc/signal.h209
-rw-r--r--include/asm-sparc/signal_32.h207
-rw-r--r--include/asm-sparc/signal_64.h194
-rw-r--r--include/asm-sparc/smp.h177
-rw-r--r--include/asm-sparc/smp_32.h173
-rw-r--r--include/asm-sparc/smp_64.h67
-rw-r--r--include/asm-sparc/sparsemem.h12
-rw-r--r--include/asm-sparc/spinlock.h200
-rw-r--r--include/asm-sparc/spinlock_32.h192
-rw-r--r--include/asm-sparc/spinlock_64.h250
-rw-r--r--include/asm-sparc/spinlock_types.h2
-rw-r--r--include/asm-sparc/spitfire.h342
-rw-r--r--include/asm-sparc/sstate.h13
-rw-r--r--include/asm-sparc/stacktrace.h6
-rw-r--r--include/asm-sparc/starfire.h21
-rw-r--r--include/asm-sparc/stat.h82
-rw-r--r--include/asm-sparc/stat_32.h76
-rw-r--r--include/asm-sparc/stat_64.h47
-rw-r--r--include/asm-sparc/statfs.h12
-rw-r--r--include/asm-sparc/statfs_32.h6
-rw-r--r--include/asm-sparc/statfs_64.h54
-rw-r--r--include/asm-sparc/string.h213
-rw-r--r--include/asm-sparc/string_32.h205
-rw-r--r--include/asm-sparc/string_64.h83
-rw-r--r--include/asm-sparc/syscalls.h13
-rw-r--r--include/asm-sparc/system.h290
-rw-r--r--include/asm-sparc/system_32.h288
-rw-r--r--include/asm-sparc/system_64.h355
-rw-r--r--include/asm-sparc/termbits.h5
-rw-r--r--include/asm-sparc/termios.h79
-rw-r--r--include/asm-sparc/thread_info.h157
-rw-r--r--include/asm-sparc/thread_info_32.h153
-rw-r--r--include/asm-sparc/thread_info_64.h279
-rw-r--r--include/asm-sparc/timer.h113
-rw-r--r--include/asm-sparc/timer_32.h107
-rw-r--r--include/asm-sparc/timer_64.h30
-rw-r--r--include/asm-sparc/timex.h21
-rw-r--r--include/asm-sparc/timex_32.h15
-rw-r--r--include/asm-sparc/timex_64.h19
-rw-r--r--include/asm-sparc/tlb.h32
-rw-r--r--include/asm-sparc/tlb_32.h24
-rw-r--r--include/asm-sparc/tlb_64.h111
-rw-r--r--include/asm-sparc/tlbflush.h68
-rw-r--r--include/asm-sparc/tlbflush_32.h60
-rw-r--r--include/asm-sparc/tlbflush_64.h44
-rw-r--r--include/asm-sparc/topology.h14
-rw-r--r--include/asm-sparc/topology_32.h6
-rw-r--r--include/asm-sparc/topology_64.h86
-rw-r--r--include/asm-sparc/tsb.h283
-rw-r--r--include/asm-sparc/ttable.h658
-rw-r--r--include/asm-sparc/types.h32
-rw-r--r--include/asm-sparc/uaccess.h342
-rw-r--r--include/asm-sparc/uaccess_32.h336
-rw-r--r--include/asm-sparc/uaccess_64.h273
-rw-r--r--include/asm-sparc/uctx.h71
-rw-r--r--include/asm-sparc/unistd.h386
-rw-r--r--include/asm-sparc/unistd_32.h384
-rw-r--r--include/asm-sparc/unistd_64.h379
-rw-r--r--include/asm-sparc/upa.h109
-rw-r--r--include/asm-sparc/utrap.h51
-rw-r--r--include/asm-sparc/vaddrs.h5
-rw-r--r--include/asm-sparc/vio.h406
-rw-r--r--include/asm-sparc/visasm.h62
-rw-r--r--include/asm-sparc/watchdog.h31
-rw-r--r--include/asm-sparc/xor.h277
-rw-r--r--include/asm-sparc/xor_32.h269
-rw-r--r--include/asm-sparc/xor_64.h70
257 files changed, 25256 insertions, 7675 deletions
diff --git a/include/asm-sparc/Kbuild b/include/asm-sparc/Kbuild
index 671223718f0a..6cdaf9d33b38 100644
--- a/include/asm-sparc/Kbuild
+++ b/include/asm-sparc/Kbuild
@@ -1,14 +1 @@
1include include/asm-generic/Kbuild.asm # dummy file to avoid breaking make headers_install
2
3header-y += apc.h
4header-y += asi.h
5header-y += bpp.h
6header-y += jsflash.h
7header-y += openpromio.h
8header-y += reg.h
9header-y += traps.h
10header-y += vfc_ioctls.h
11
12unifdef-y += fbio.h
13unifdef-y += perfctr.h
14unifdef-y += psr.h
diff --git a/include/asm-sparc/agp.h b/include/asm-sparc/agp.h
new file mode 100644
index 000000000000..c2456870b05c
--- /dev/null
+++ b/include/asm-sparc/agp.h
@@ -0,0 +1,20 @@
1#ifndef AGP_H
2#define AGP_H 1
3
4/* dummy for now */
5
6#define map_page_into_agp(page)
7#define unmap_page_from_agp(page)
8#define flush_agp_cache() mb()
9
10/* Convert a physical address to an address suitable for the GART. */
11#define phys_to_gart(x) (x)
12#define gart_to_phys(x) (x)
13
14/* GATT allocation. Returns/accepts GATT kernel virtual address. */
15#define alloc_gatt_pages(order) \
16 ((char *)__get_free_pages(GFP_KERNEL, (order)))
17#define free_gatt_pages(table, order) \
18 free_pages((unsigned long)(table), (order))
19
20#endif
diff --git a/include/asm-sparc/apb.h b/include/asm-sparc/apb.h
new file mode 100644
index 000000000000..8f3b57db810f
--- /dev/null
+++ b/include/asm-sparc/apb.h
@@ -0,0 +1,36 @@
1/*
2 * apb.h: Advanced PCI Bridge Configuration Registers and Bits
3 *
4 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
5 */
6
7#ifndef _SPARC64_APB_H
8#define _SPARC64_APB_H
9
10#define APB_TICK_REGISTER 0xb0
11#define APB_INT_ACK 0xb8
12#define APB_PRIMARY_MASTER_RETRY_LIMIT 0xc0
13#define APB_DMA_ASFR 0xc8
14#define APB_DMA_AFAR 0xd0
15#define APB_PIO_TARGET_RETRY_LIMIT 0xd8
16#define APB_PIO_TARGET_LATENCY_TIMER 0xd9
17#define APB_DMA_TARGET_RETRY_LIMIT 0xda
18#define APB_DMA_TARGET_LATENCY_TIMER 0xdb
19#define APB_SECONDARY_MASTER_RETRY_LIMIT 0xdc
20#define APB_SECONDARY_CONTROL 0xdd
21#define APB_IO_ADDRESS_MAP 0xde
22#define APB_MEM_ADDRESS_MAP 0xdf
23
24#define APB_PCI_CONTROL_LOW 0xe0
25# define APB_PCI_CTL_LOW_ARB_PARK (1 << 21)
26# define APB_PCI_CTL_LOW_ERRINT_EN (1 << 8)
27
28#define APB_PCI_CONTROL_HIGH 0xe4
29# define APB_PCI_CTL_HIGH_SERR (1 << 2)
30# define APB_PCI_CTL_HIGH_ARBITER_EN (1 << 0)
31
32#define APB_PIO_ASFR 0xe8
33#define APB_PIO_AFAR 0xf0
34#define APB_DIAG_REGISTER 0xf8
35
36#endif /* !(_SPARC64_APB_H) */
diff --git a/include/asm-sparc/asi.h b/include/asm-sparc/asi.h
index 158f9b00d43f..74703c5ef985 100644
--- a/include/asm-sparc/asi.h
+++ b/include/asm-sparc/asi.h
@@ -3,7 +3,7 @@
3 3
4/* asi.h: Address Space Identifier values for the sparc. 4/* asi.h: Address Space Identifier values for the sparc.
5 * 5 *
6 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 6 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
7 * 7 *
8 * Pioneer work for sun4m: Paul Hatchman (paul@sfe.com.au) 8 * Pioneer work for sun4m: Paul Hatchman (paul@sfe.com.au)
9 * Joint edition for sun4c+sun4m: Pete A. Zaitcev <zaitcev@ipmce.su> 9 * Joint edition for sun4c+sun4m: Pete A. Zaitcev <zaitcev@ipmce.su>
@@ -108,4 +108,155 @@
108 108
109#define ASI_M_ACTION 0x4c /* Breakpoint Action Register (GNU/Viking) */ 109#define ASI_M_ACTION 0x4c /* Breakpoint Action Register (GNU/Viking) */
110 110
111/* V9 Architecture mandary ASIs. */
112#define ASI_N 0x04 /* Nucleus */
113#define ASI_NL 0x0c /* Nucleus, little endian */
114#define ASI_AIUP 0x10 /* Primary, user */
115#define ASI_AIUS 0x11 /* Secondary, user */
116#define ASI_AIUPL 0x18 /* Primary, user, little endian */
117#define ASI_AIUSL 0x19 /* Secondary, user, little endian */
118#define ASI_P 0x80 /* Primary, implicit */
119#define ASI_S 0x81 /* Secondary, implicit */
120#define ASI_PNF 0x82 /* Primary, no fault */
121#define ASI_SNF 0x83 /* Secondary, no fault */
122#define ASI_PL 0x88 /* Primary, implicit, l-endian */
123#define ASI_SL 0x89 /* Secondary, implicit, l-endian */
124#define ASI_PNFL 0x8a /* Primary, no fault, l-endian */
125#define ASI_SNFL 0x8b /* Secondary, no fault, l-endian */
126
127/* SpitFire and later extended ASIs. The "(III)" marker designates
128 * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates
129 * Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific
130 * ASIs, "(4V)" designates SUN4V specific ASIs.
131 */
132#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */
133#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */
134#define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */
135#define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */
136#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/
137#define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */
138#define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/
139#define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */
140#define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */
141#define ASI_MMU 0x21 /* (4V) MMU Context Registers */
142#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load,
143 * secondary, user
144 */
145#define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */
146#define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */
147#define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */
148#define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */
149#define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */
150#define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */
151#define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */
152#define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */
153#define ASI_PCACHE_SNOOP_TAG 0x33 /* (III) PCache snoop tag RAM diag */
154#define ASI_QUAD_LDD_PHYS 0x34 /* (III+) PADDR, qword load */
155#define ASI_WCACHE_VALID_BITS 0x38 /* (III) WCache Valid Bits diag */
156#define ASI_WCACHE_DATA 0x39 /* (III) WCache data RAM diag */
157#define ASI_WCACHE_TAG 0x3a /* (III) WCache tag RAM diag */
158#define ASI_WCACHE_SNOOP_TAG 0x3b /* (III) WCache snoop tag RAM diag */
159#define ASI_QUAD_LDD_PHYS_L 0x3c /* (III+) PADDR, qw-load, l-endian */
160#define ASI_SRAM_FAST_INIT 0x40 /* (III+) Fast SRAM init */
161#define ASI_CORE_AVAILABLE 0x41 /* (CMT) LP Available */
162#define ASI_CORE_ENABLE_STAT 0x41 /* (CMT) LP Enable Status */
163#define ASI_CORE_ENABLE 0x41 /* (CMT) LP Enable RW */
164#define ASI_XIR_STEERING 0x41 /* (CMT) XIR Steering RW */
165#define ASI_CORE_RUNNING_RW 0x41 /* (CMT) LP Running RW */
166#define ASI_CORE_RUNNING_W1S 0x41 /* (CMT) LP Running Write-One Set */
167#define ASI_CORE_RUNNING_W1C 0x41 /* (CMT) LP Running Write-One Clr */
168#define ASI_CORE_RUNNING_STAT 0x41 /* (CMT) LP Running Status */
169#define ASI_CMT_ERROR_STEERING 0x41 /* (CMT) Error Steering RW */
170#define ASI_DCACHE_INVALIDATE 0x42 /* (III) DCache Invalidate diag */
171#define ASI_DCACHE_UTAG 0x43 /* (III) DCache uTag diag */
172#define ASI_DCACHE_SNOOP_TAG 0x44 /* (III) DCache snoop tag RAM diag */
173#define ASI_LSU_CONTROL 0x45 /* Load-store control unit */
174#define ASI_DCU_CONTROL_REG 0x45 /* (III) DCache Unit Control reg */
175#define ASI_DCACHE_DATA 0x46 /* DCache data-ram diag access */
176#define ASI_DCACHE_TAG 0x47 /* Dcache tag/valid ram diag access*/
177#define ASI_INTR_DISPATCH_STAT 0x48 /* IRQ vector dispatch status */
178#define ASI_INTR_RECEIVE 0x49 /* IRQ vector receive status */
179#define ASI_UPA_CONFIG 0x4a /* UPA config space */
180#define ASI_JBUS_CONFIG 0x4a /* (IIIi) JBUS Config Register */
181#define ASI_SAFARI_CONFIG 0x4a /* (III) Safari Config Register */
182#define ASI_SAFARI_ADDRESS 0x4a /* (III) Safari Address Register */
183#define ASI_ESTATE_ERROR_EN 0x4b /* E-cache error enable space */
184#define ASI_AFSR 0x4c /* Async fault status register */
185#define ASI_AFAR 0x4d /* Async fault address register */
186#define ASI_EC_TAG_DATA 0x4e /* E-cache tag/valid ram diag acc */
187#define ASI_IMMU 0x50 /* Insn-MMU main register space */
188#define ASI_IMMU_TSB_8KB_PTR 0x51 /* Insn-MMU 8KB TSB pointer reg */
189#define ASI_IMMU_TSB_64KB_PTR 0x52 /* Insn-MMU 64KB TSB pointer reg */
190#define ASI_ITLB_DATA_IN 0x54 /* Insn-MMU TLB data in reg */
191#define ASI_ITLB_DATA_ACCESS 0x55 /* Insn-MMU TLB data access reg */
192#define ASI_ITLB_TAG_READ 0x56 /* Insn-MMU TLB tag read reg */
193#define ASI_IMMU_DEMAP 0x57 /* Insn-MMU TLB demap */
194#define ASI_DMMU 0x58 /* Data-MMU main register space */
195#define ASI_DMMU_TSB_8KB_PTR 0x59 /* Data-MMU 8KB TSB pointer reg */
196#define ASI_DMMU_TSB_64KB_PTR 0x5a /* Data-MMU 16KB TSB pointer reg */
197#define ASI_DMMU_TSB_DIRECT_PTR 0x5b /* Data-MMU TSB direct pointer reg */
198#define ASI_DTLB_DATA_IN 0x5c /* Data-MMU TLB data in reg */
199#define ASI_DTLB_DATA_ACCESS 0x5d /* Data-MMU TLB data access reg */
200#define ASI_DTLB_TAG_READ 0x5e /* Data-MMU TLB tag read reg */
201#define ASI_DMMU_DEMAP 0x5f /* Data-MMU TLB demap */
202#define ASI_IIU_INST_TRAP 0x60 /* (III) Instruction Breakpoint */
203#define ASI_INTR_ID 0x63 /* (CMT) Interrupt ID register */
204#define ASI_CORE_ID 0x63 /* (CMT) LP ID register */
205#define ASI_CESR_ID 0x63 /* (CMT) CESR ID register */
206#define ASI_IC_INSTR 0x66 /* Insn cache instrucion ram diag */
207#define ASI_IC_TAG 0x67 /* Insn cache tag/valid ram diag */
208#define ASI_IC_STAG 0x68 /* (III) Insn cache snoop tag ram */
209#define ASI_IC_PRE_DECODE 0x6e /* Insn cache pre-decode ram diag */
210#define ASI_IC_NEXT_FIELD 0x6f /* Insn cache next-field ram diag */
211#define ASI_BRPRED_ARRAY 0x6f /* (III) Branch Prediction RAM diag*/
212#define ASI_BLK_AIUP 0x70 /* Primary, user, block load/store */
213#define ASI_BLK_AIUS 0x71 /* Secondary, user, block ld/st */
214#define ASI_MCU_CTRL_REG 0x72 /* (III) Memory controller regs */
215#define ASI_EC_DATA 0x74 /* (III) E-cache data staging reg */
216#define ASI_EC_CTRL 0x75 /* (III) E-cache control reg */
217#define ASI_EC_W 0x76 /* E-cache diag write access */
218#define ASI_UDB_ERROR_W 0x77 /* External UDB error regs W */
219#define ASI_UDB_CONTROL_W 0x77 /* External UDB control regs W */
220#define ASI_INTR_W 0x77 /* IRQ vector dispatch write */
221#define ASI_INTR_DATAN_W 0x77 /* (III) Out irq vector data reg N */
222#define ASI_INTR_DISPATCH_W 0x77 /* (III) Interrupt vector dispatch */
223#define ASI_BLK_AIUPL 0x78 /* Primary, user, little, blk ld/st*/
224#define ASI_BLK_AIUSL 0x79 /* Secondary, user, little, blk ld/st*/
225#define ASI_EC_R 0x7e /* E-cache diag read access */
226#define ASI_UDBH_ERROR_R 0x7f /* External UDB error regs rd hi */
227#define ASI_UDBL_ERROR_R 0x7f /* External UDB error regs rd low */
228#define ASI_UDBH_CONTROL_R 0x7f /* External UDB control regs rd hi */
229#define ASI_UDBL_CONTROL_R 0x7f /* External UDB control regs rd low*/
230#define ASI_INTR_R 0x7f /* IRQ vector dispatch read */
231#define ASI_INTR_DATAN_R 0x7f /* (III) In irq vector data reg N */
232#define ASI_PST8_P 0xc0 /* Primary, 8 8-bit, partial */
233#define ASI_PST8_S 0xc1 /* Secondary, 8 8-bit, partial */
234#define ASI_PST16_P 0xc2 /* Primary, 4 16-bit, partial */
235#define ASI_PST16_S 0xc3 /* Secondary, 4 16-bit, partial */
236#define ASI_PST32_P 0xc4 /* Primary, 2 32-bit, partial */
237#define ASI_PST32_S 0xc5 /* Secondary, 2 32-bit, partial */
238#define ASI_PST8_PL 0xc8 /* Primary, 8 8-bit, partial, L */
239#define ASI_PST8_SL 0xc9 /* Secondary, 8 8-bit, partial, L */
240#define ASI_PST16_PL 0xca /* Primary, 4 16-bit, partial, L */
241#define ASI_PST16_SL 0xcb /* Secondary, 4 16-bit, partial, L */
242#define ASI_PST32_PL 0xcc /* Primary, 2 32-bit, partial, L */
243#define ASI_PST32_SL 0xcd /* Secondary, 2 32-bit, partial, L */
244#define ASI_FL8_P 0xd0 /* Primary, 1 8-bit, fpu ld/st */
245#define ASI_FL8_S 0xd1 /* Secondary, 1 8-bit, fpu ld/st */
246#define ASI_FL16_P 0xd2 /* Primary, 1 16-bit, fpu ld/st */
247#define ASI_FL16_S 0xd3 /* Secondary, 1 16-bit, fpu ld/st */
248#define ASI_FL8_PL 0xd8 /* Primary, 1 8-bit, fpu ld/st, L */
249#define ASI_FL8_SL 0xd9 /* Secondary, 1 8-bit, fpu ld/st, L*/
250#define ASI_FL16_PL 0xda /* Primary, 1 16-bit, fpu ld/st, L */
251#define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/
252#define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */
253#define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */
254#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load,
255 * primary, implicit
256 */
257#define ASI_BLK_P 0xf0 /* Primary, blk ld/st */
258#define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */
259#define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */
260#define ASI_BLK_SL 0xf9 /* Secondary, blk ld/st, little */
261
111#endif /* _SPARC_ASI_H */ 262#endif /* _SPARC_ASI_H */
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 5c944b5a8040..66d8166ec1d7 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -1,165 +1,8 @@
1/* atomic.h: These still suck, but the I-cache hit rate is higher. 1#ifndef ___ASM_SPARC_ATOMIC_H
2 * 2#define ___ASM_SPARC_ATOMIC_H
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net) 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) 4#include <asm-sparc/atomic_64.h>
5 * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
6 *
7 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
8 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
9 */
10
11#ifndef __ARCH_SPARC_ATOMIC__
12#define __ARCH_SPARC_ATOMIC__
13
14#include <linux/types.h>
15
16typedef struct { volatile int counter; } atomic_t;
17
18#ifdef __KERNEL__
19
20#define ATOMIC_INIT(i) { (i) }
21
22extern int __atomic_add_return(int, atomic_t *);
23extern int atomic_cmpxchg(atomic_t *, int, int);
24#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
25extern int atomic_add_unless(atomic_t *, int, int);
26extern void atomic_set(atomic_t *, int);
27
28#define atomic_read(v) ((v)->counter)
29
30#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
31#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
32#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
33#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
34
35#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
36#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
37#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
38#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
39
40#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
41
42/*
43 * atomic_inc_and_test - increment and test
44 * @v: pointer of type atomic_t
45 *
46 * Atomically increments @v by 1
47 * and returns true if the result is zero, or false for all
48 * other cases.
49 */
50#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
51
52#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
53#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
54
55#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
56
57/* This is the old 24-bit implementation. It's still used internally
58 * by some sparc-specific code, notably the semaphore implementation.
59 */
60typedef struct { volatile int counter; } atomic24_t;
61
62#ifndef CONFIG_SMP
63
64#define ATOMIC24_INIT(i) { (i) }
65#define atomic24_read(v) ((v)->counter)
66#define atomic24_set(v, i) (((v)->counter) = i)
67
68#else 5#else
69/* We do the bulk of the actual work out of line in two common 6#include <asm-sparc/atomic_32.h>
70 * routines in assembler, see arch/sparc/lib/atomic.S for the 7#endif
71 * "fun" details.
72 *
73 * For SMP the trick is you embed the spin lock byte within
74 * the word, use the low byte so signedness is easily retained
75 * via a quick arithmetic shift. It looks like this:
76 *
77 * ----------------------------------------
78 * | signed 24-bit counter value | lock | atomic_t
79 * ----------------------------------------
80 * 31 8 7 0
81 */
82
83#define ATOMIC24_INIT(i) { ((i) << 8) }
84
85static inline int atomic24_read(const atomic24_t *v)
86{
87 int ret = v->counter;
88
89 while(ret & 0xff)
90 ret = v->counter;
91
92 return ret >> 8;
93}
94
95#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
96#endif 8#endif
97
98static inline int __atomic24_add(int i, atomic24_t *v)
99{
100 register volatile int *ptr asm("g1");
101 register int increment asm("g2");
102 register int tmp1 asm("g3");
103 register int tmp2 asm("g4");
104 register int tmp3 asm("g7");
105
106 ptr = &v->counter;
107 increment = i;
108
109 __asm__ __volatile__(
110 "mov %%o7, %%g4\n\t"
111 "call ___atomic24_add\n\t"
112 " add %%o7, 8, %%o7\n"
113 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
114 : "0" (increment), "r" (ptr)
115 : "memory", "cc");
116
117 return increment;
118}
119
120static inline int __atomic24_sub(int i, atomic24_t *v)
121{
122 register volatile int *ptr asm("g1");
123 register int increment asm("g2");
124 register int tmp1 asm("g3");
125 register int tmp2 asm("g4");
126 register int tmp3 asm("g7");
127
128 ptr = &v->counter;
129 increment = i;
130
131 __asm__ __volatile__(
132 "mov %%o7, %%g4\n\t"
133 "call ___atomic24_sub\n\t"
134 " add %%o7, 8, %%o7\n"
135 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
136 : "0" (increment), "r" (ptr)
137 : "memory", "cc");
138
139 return increment;
140}
141
142#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
143#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
144
145#define atomic24_dec_return(v) __atomic24_sub(1, (v))
146#define atomic24_inc_return(v) __atomic24_add(1, (v))
147
148#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
149#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
150
151#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
152#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
153
154#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
155
156/* Atomic operations are already serializing */
157#define smp_mb__before_atomic_dec() barrier()
158#define smp_mb__after_atomic_dec() barrier()
159#define smp_mb__before_atomic_inc() barrier()
160#define smp_mb__after_atomic_inc() barrier()
161
162#endif /* !(__KERNEL__) */
163
164#include <asm-generic/atomic.h>
165#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/include/asm-sparc/atomic_32.h b/include/asm-sparc/atomic_32.h
new file mode 100644
index 000000000000..5c944b5a8040
--- /dev/null
+++ b/include/asm-sparc/atomic_32.h
@@ -0,0 +1,165 @@
1/* atomic.h: These still suck, but the I-cache hit rate is higher.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
5 * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
6 *
7 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
8 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
9 */
10
11#ifndef __ARCH_SPARC_ATOMIC__
12#define __ARCH_SPARC_ATOMIC__
13
14#include <linux/types.h>
15
16typedef struct { volatile int counter; } atomic_t;
17
18#ifdef __KERNEL__
19
20#define ATOMIC_INIT(i) { (i) }
21
22extern int __atomic_add_return(int, atomic_t *);
23extern int atomic_cmpxchg(atomic_t *, int, int);
24#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
25extern int atomic_add_unless(atomic_t *, int, int);
26extern void atomic_set(atomic_t *, int);
27
28#define atomic_read(v) ((v)->counter)
29
30#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
31#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
32#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
33#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
34
35#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
36#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
37#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
38#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
39
40#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
41
42/*
43 * atomic_inc_and_test - increment and test
44 * @v: pointer of type atomic_t
45 *
46 * Atomically increments @v by 1
47 * and returns true if the result is zero, or false for all
48 * other cases.
49 */
50#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
51
52#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
53#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
54
55#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
56
57/* This is the old 24-bit implementation. It's still used internally
58 * by some sparc-specific code, notably the semaphore implementation.
59 */
60typedef struct { volatile int counter; } atomic24_t;
61
62#ifndef CONFIG_SMP
63
64#define ATOMIC24_INIT(i) { (i) }
65#define atomic24_read(v) ((v)->counter)
66#define atomic24_set(v, i) (((v)->counter) = i)
67
68#else
69/* We do the bulk of the actual work out of line in two common
70 * routines in assembler, see arch/sparc/lib/atomic.S for the
71 * "fun" details.
72 *
73 * For SMP the trick is you embed the spin lock byte within
74 * the word, use the low byte so signedness is easily retained
75 * via a quick arithmetic shift. It looks like this:
76 *
77 * ----------------------------------------
78 * | signed 24-bit counter value | lock | atomic_t
79 * ----------------------------------------
80 * 31 8 7 0
81 */
82
83#define ATOMIC24_INIT(i) { ((i) << 8) }
84
85static inline int atomic24_read(const atomic24_t *v)
86{
87 int ret = v->counter;
88
89 while(ret & 0xff)
90 ret = v->counter;
91
92 return ret >> 8;
93}
94
95#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
96#endif
97
98static inline int __atomic24_add(int i, atomic24_t *v)
99{
100 register volatile int *ptr asm("g1");
101 register int increment asm("g2");
102 register int tmp1 asm("g3");
103 register int tmp2 asm("g4");
104 register int tmp3 asm("g7");
105
106 ptr = &v->counter;
107 increment = i;
108
109 __asm__ __volatile__(
110 "mov %%o7, %%g4\n\t"
111 "call ___atomic24_add\n\t"
112 " add %%o7, 8, %%o7\n"
113 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
114 : "0" (increment), "r" (ptr)
115 : "memory", "cc");
116
117 return increment;
118}
119
120static inline int __atomic24_sub(int i, atomic24_t *v)
121{
122 register volatile int *ptr asm("g1");
123 register int increment asm("g2");
124 register int tmp1 asm("g3");
125 register int tmp2 asm("g4");
126 register int tmp3 asm("g7");
127
128 ptr = &v->counter;
129 increment = i;
130
131 __asm__ __volatile__(
132 "mov %%o7, %%g4\n\t"
133 "call ___atomic24_sub\n\t"
134 " add %%o7, 8, %%o7\n"
135 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
136 : "0" (increment), "r" (ptr)
137 : "memory", "cc");
138
139 return increment;
140}
141
142#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
143#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
144
145#define atomic24_dec_return(v) __atomic24_sub(1, (v))
146#define atomic24_inc_return(v) __atomic24_add(1, (v))
147
148#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
149#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
150
151#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
152#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
153
154#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
155
156/* Atomic operations are already serializing */
157#define smp_mb__before_atomic_dec() barrier()
158#define smp_mb__after_atomic_dec() barrier()
159#define smp_mb__before_atomic_inc() barrier()
160#define smp_mb__after_atomic_inc() barrier()
161
162#endif /* !(__KERNEL__) */
163
164#include <asm-generic/atomic.h>
165#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/include/asm-sparc/atomic_64.h b/include/asm-sparc/atomic_64.h
new file mode 100644
index 000000000000..2c71ec4a3b18
--- /dev/null
+++ b/include/asm-sparc/atomic_64.h
@@ -0,0 +1,128 @@
1/* atomic.h: Thankfully the V9 is at least reasonable for this
2 * stuff.
3 *
4 * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
5 */
6
7#ifndef __ARCH_SPARC64_ATOMIC__
8#define __ARCH_SPARC64_ATOMIC__
9
10#include <linux/types.h>
11#include <asm/system.h>
12
13typedef struct { volatile int counter; } atomic_t;
14typedef struct { volatile __s64 counter; } atomic64_t;
15
16#define ATOMIC_INIT(i) { (i) }
17#define ATOMIC64_INIT(i) { (i) }
18
19#define atomic_read(v) ((v)->counter)
20#define atomic64_read(v) ((v)->counter)
21
22#define atomic_set(v, i) (((v)->counter) = i)
23#define atomic64_set(v, i) (((v)->counter) = i)
24
25extern void atomic_add(int, atomic_t *);
26extern void atomic64_add(int, atomic64_t *);
27extern void atomic_sub(int, atomic_t *);
28extern void atomic64_sub(int, atomic64_t *);
29
30extern int atomic_add_ret(int, atomic_t *);
31extern int atomic64_add_ret(int, atomic64_t *);
32extern int atomic_sub_ret(int, atomic_t *);
33extern int atomic64_sub_ret(int, atomic64_t *);
34
35#define atomic_dec_return(v) atomic_sub_ret(1, v)
36#define atomic64_dec_return(v) atomic64_sub_ret(1, v)
37
38#define atomic_inc_return(v) atomic_add_ret(1, v)
39#define atomic64_inc_return(v) atomic64_add_ret(1, v)
40
41#define atomic_sub_return(i, v) atomic_sub_ret(i, v)
42#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
43
44#define atomic_add_return(i, v) atomic_add_ret(i, v)
45#define atomic64_add_return(i, v) atomic64_add_ret(i, v)
46
47/*
48 * atomic_inc_and_test - increment and test
49 * @v: pointer of type atomic_t
50 *
51 * Atomically increments @v by 1
52 * and returns true if the result is zero, or false for all
53 * other cases.
54 */
55#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
56#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
57
58#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
59#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
60
61#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0)
62#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
63
64#define atomic_inc(v) atomic_add(1, v)
65#define atomic64_inc(v) atomic64_add(1, v)
66
67#define atomic_dec(v) atomic_sub(1, v)
68#define atomic64_dec(v) atomic64_sub(1, v)
69
70#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
71#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
72
73#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
74#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
75
76static inline int atomic_add_unless(atomic_t *v, int a, int u)
77{
78 int c, old;
79 c = atomic_read(v);
80 for (;;) {
81 if (unlikely(c == (u)))
82 break;
83 old = atomic_cmpxchg((v), c, c + (a));
84 if (likely(old == c))
85 break;
86 c = old;
87 }
88 return c != (u);
89}
90
91#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
92
93#define atomic64_cmpxchg(v, o, n) \
94 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
95#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
96
97static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
98{
99 long c, old;
100 c = atomic64_read(v);
101 for (;;) {
102 if (unlikely(c == (u)))
103 break;
104 old = atomic64_cmpxchg((v), c, c + (a));
105 if (likely(old == c))
106 break;
107 c = old;
108 }
109 return c != (u);
110}
111
112#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
113
114/* Atomic operations are already serializing */
115#ifdef CONFIG_SMP
116#define smp_mb__before_atomic_dec() membar_storeload_loadload();
117#define smp_mb__after_atomic_dec() membar_storeload_storestore();
118#define smp_mb__before_atomic_inc() membar_storeload_loadload();
119#define smp_mb__after_atomic_inc() membar_storeload_storestore();
120#else
121#define smp_mb__before_atomic_dec() barrier()
122#define smp_mb__after_atomic_dec() barrier()
123#define smp_mb__before_atomic_inc() barrier()
124#define smp_mb__after_atomic_inc() barrier()
125#endif
126
127#include <asm-generic/atomic.h>
128#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/include/asm-sparc/auxio.h b/include/asm-sparc/auxio.h
index e552b8d68450..24c6f3c0f577 100644
--- a/include/asm-sparc/auxio.h
+++ b/include/asm-sparc/auxio.h
@@ -1,89 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_AUXIO_H
2 * auxio.h: Definitions and code for the Auxiliary I/O register. 2#define ___ASM_SPARC_AUXIO_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/auxio_64.h>
5 */ 5#else
6#ifndef _SPARC_AUXIO_H 6#include <asm-sparc/auxio_32.h>
7#define _SPARC_AUXIO_H 7#endif
8 8#endif
9#include <asm/system.h>
10#include <asm/vaddrs.h>
11
12/* This register is an unsigned char in IO space. It does two things.
13 * First, it is used to control the front panel LED light on machines
14 * that have it (good for testing entry points to trap handlers and irq's)
15 * Secondly, it controls various floppy drive parameters.
16 */
17#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
18#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
19#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
20#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
21#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
22#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
23#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
24
25/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
26#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
27
28/* Set the following to zero to eject the floppy. */
29#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
30#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
31
32#ifndef __ASSEMBLY__
33
34/*
35 * NOTE: these routines are implementation dependent--
36 * understand the hardware you are querying!
37 */
38extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
39extern unsigned char get_auxio(void); /* .../asm-sparc/floppy.h */
40
41/*
42 * The following routines are provided for driver-compatibility
43 * with sparc64 (primarily sunlance.c)
44 */
45
46#define AUXIO_LTE_ON 1
47#define AUXIO_LTE_OFF 0
48
49/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
50 *
51 * on - AUXIO_LTE_ON or AUXIO_LTE_OFF
52 */
53#define auxio_set_lte(on) \
54do { \
55 if(on) { \
56 set_auxio(AUXIO_LINK_TEST, 0); \
57 } else { \
58 set_auxio(0, AUXIO_LINK_TEST); \
59 } \
60} while (0)
61
62#define AUXIO_LED_ON 1
63#define AUXIO_LED_OFF 0
64
65/* auxio_set_led - Set system front panel LED
66 *
67 * on - AUXIO_LED_ON or AUXIO_LED_OFF
68 */
69#define auxio_set_led(on) \
70do { \
71 if(on) { \
72 set_auxio(AUXIO_LED, 0); \
73 } else { \
74 set_auxio(0, AUXIO_LED); \
75 } \
76} while (0)
77
78#endif /* !(__ASSEMBLY__) */
79
80
81/* AUXIO2 (Power Off Control) */
82extern __volatile__ unsigned char * auxio_power_register;
83
84#define AUXIO_POWER_DETECT_FAILURE 32
85#define AUXIO_POWER_CLEAR_FAILURE 2
86#define AUXIO_POWER_OFF 1
87
88
89#endif /* !(_SPARC_AUXIO_H) */
diff --git a/include/asm-sparc/auxio_32.h b/include/asm-sparc/auxio_32.h
new file mode 100644
index 000000000000..4db8f23db20f
--- /dev/null
+++ b/include/asm-sparc/auxio_32.h
@@ -0,0 +1,89 @@
1/*
2 * auxio.h: Definitions and code for the Auxiliary I/O register.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6#ifndef _SPARC_AUXIO_H
7#define _SPARC_AUXIO_H
8
9#include <asm/system.h>
10#include <asm/vaddrs.h>
11
12/* This register is an unsigned char in IO space. It does two things.
13 * First, it is used to control the front panel LED light on machines
14 * that have it (good for testing entry points to trap handlers and irq's)
15 * Secondly, it controls various floppy drive parameters.
16 */
17#define AUXIO_ORMEIN 0xf0 /* All writes must set these bits. */
18#define AUXIO_ORMEIN4M 0xc0 /* sun4m - All writes must set these bits. */
19#define AUXIO_FLPY_DENS 0x20 /* Floppy density, high if set. Read only. */
20#define AUXIO_FLPY_DCHG 0x10 /* A disk change occurred. Read only. */
21#define AUXIO_EDGE_ON 0x10 /* sun4m - On means Jumper block is in. */
22#define AUXIO_FLPY_DSEL 0x08 /* Drive select/start-motor. Write only. */
23#define AUXIO_LINK_TEST 0x08 /* sun4m - On means TPE Carrier detect. */
24
25/* Set the following to one, then zero, after doing a pseudo DMA transfer. */
26#define AUXIO_FLPY_TCNT 0x04 /* Floppy terminal count. Write only. */
27
28/* Set the following to zero to eject the floppy. */
29#define AUXIO_FLPY_EJCT 0x02 /* Eject floppy disk. Write only. */
30#define AUXIO_LED 0x01 /* On if set, off if unset. Read/Write */
31
32#ifndef __ASSEMBLY__
33
34/*
35 * NOTE: these routines are implementation dependent--
36 * understand the hardware you are querying!
37 */
38extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
39extern unsigned char get_auxio(void); /* .../asm-sparc/floppy.h */
40
41/*
42 * The following routines are provided for driver-compatibility
43 * with sparc64 (primarily sunlance.c)
44 */
45
46#define AUXIO_LTE_ON 1
47#define AUXIO_LTE_OFF 0
48
49/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
50 *
51 * on - AUXIO_LTE_ON or AUXIO_LTE_OFF
52 */
53#define auxio_set_lte(on) \
54do { \
55 if(on) { \
56 set_auxio(AUXIO_LINK_TEST, 0); \
57 } else { \
58 set_auxio(0, AUXIO_LINK_TEST); \
59 } \
60} while (0)
61
62#define AUXIO_LED_ON 1
63#define AUXIO_LED_OFF 0
64
65/* auxio_set_led - Set system front panel LED
66 *
67 * on - AUXIO_LED_ON or AUXIO_LED_OFF
68 */
69#define auxio_set_led(on) \
70do { \
71 if(on) { \
72 set_auxio(AUXIO_LED, 0); \
73 } else { \
74 set_auxio(0, AUXIO_LED); \
75 } \
76} while (0)
77
78#endif /* !(__ASSEMBLY__) */
79
80
81/* AUXIO2 (Power Off Control) */
82extern __volatile__ unsigned char * auxio_power_register;
83
84#define AUXIO_POWER_DETECT_FAILURE 32
85#define AUXIO_POWER_CLEAR_FAILURE 2
86#define AUXIO_POWER_OFF 1
87
88
89#endif /* !(_SPARC_AUXIO_H) */
diff --git a/include/asm-sparc/auxio_64.h b/include/asm-sparc/auxio_64.h
new file mode 100644
index 000000000000..f61cd1e3e395
--- /dev/null
+++ b/include/asm-sparc/auxio_64.h
@@ -0,0 +1,100 @@
1/*
2 * auxio.h: Definitions and code for the Auxiliary I/O registers.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 *
6 * Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
7 */
8#ifndef _SPARC64_AUXIO_H
9#define _SPARC64_AUXIO_H
10
11/* AUXIO implementations:
12 * sbus-based NCR89C105 "Slavio"
13 * LED/Floppy (AUX1) register
14 * Power (AUX2) register
15 *
16 * ebus-based auxio on PCIO
17 * LED Auxio Register
18 * Power Auxio Register
19 *
20 * Register definitions from NCR _NCR89C105 Chip Specification_
21 *
22 * SLAVIO AUX1 @ 0x1900000
23 * -------------------------------------------------
24 * | (R) | (R) | D | (R) | E | M | T | L |
25 * -------------------------------------------------
26 * (R) - bit 7:6,4 are reserved and should be masked in s/w
27 * D - Floppy Density Sense (1=high density) R/O
28 * E - Link Test Enable, directly reflected on AT&T 7213 LTE pin
29 * M - Monitor/Mouse Mux, directly reflected on MON_MSE_MUX pin
30 * T - Terminal Count: sends TC pulse to 82077 floppy controller
31 * L - System LED on front panel (0=off, 1=on)
32 */
33#define AUXIO_AUX1_MASK 0xc0 /* Mask bits */
34#define AUXIO_AUX1_FDENS 0x20 /* Floppy Density Sense */
35#define AUXIO_AUX1_LTE 0x08 /* Link Test Enable */
36#define AUXIO_AUX1_MMUX 0x04 /* Monitor/Mouse Mux */
37#define AUXIO_AUX1_FTCNT 0x02 /* Terminal Count, */
38#define AUXIO_AUX1_LED 0x01 /* System LED */
39
40/* SLAVIO AUX2 @ 0x1910000
41 * -------------------------------------------------
42 * | (R) | (R) | D | (R) | (R) | (R) | C | F |
43 * -------------------------------------------------
44 * (R) - bits 7:6,4:2 are reserved and should be masked in s/w
45 * D - Power Failure Detect (1=power fail)
46 * C - Clear Power Failure Detect Int (1=clear)
47 * F - Power Off (1=power off)
48 */
49#define AUXIO_AUX2_MASK 0xdc /* Mask Bits */
50#define AUXIO_AUX2_PFAILDET 0x20 /* Power Fail Detect */
51#define AUXIO_AUX2_PFAILCLR 0x02 /* Clear Pwr Fail Det Intr */
52#define AUXIO_AUX2_PWR_OFF 0x01 /* Power Off */
53
54/* Register definitions from Sun Microsystems _PCIO_ p/n 802-7837
55 *
56 * PCIO LED Auxio @ 0x726000
57 * -------------------------------------------------
58 * | 31:1 Unused | LED |
59 * -------------------------------------------------
60 * Bits 31:1 unused
61 * LED - System LED on front panel (0=off, 1=on)
62 */
63#define AUXIO_PCIO_LED 0x01 /* System LED */
64
65/* PCIO Power Auxio @ 0x724000
66 * -------------------------------------------------
67 * | 31:2 Unused | CPO | SPO |
68 * -------------------------------------------------
69 * Bits 31:2 unused
70 * CPO - Courtesy Power Off (1=off)
71 * SPO - System Power Off (1=off)
72 */
73#define AUXIO_PCIO_CPWR_OFF 0x02 /* Courtesy Power Off */
74#define AUXIO_PCIO_SPWR_OFF 0x01 /* System Power Off */
75
76#ifndef __ASSEMBLY__
77
78extern void __iomem *auxio_register;
79
80#define AUXIO_LTE_ON 1
81#define AUXIO_LTE_OFF 0
82
83/* auxio_set_lte - Set Link Test Enable (TPE Link Detect)
84 *
85 * on - AUXIO_LTE_ON or AUXIO_LTE_OFF
86 */
87extern void auxio_set_lte(int on);
88
89#define AUXIO_LED_ON 1
90#define AUXIO_LED_OFF 0
91
92/* auxio_set_led - Set system front panel LED
93 *
94 * on - AUXIO_LED_ON or AUXIO_LED_OFF
95 */
96extern void auxio_set_led(int on);
97
98#endif /* ifndef __ASSEMBLY__ */
99
100#endif /* !(_SPARC64_AUXIO_H) */
diff --git a/include/asm-sparc/backoff.h b/include/asm-sparc/backoff.h
new file mode 100644
index 000000000000..fa1fdf67e350
--- /dev/null
+++ b/include/asm-sparc/backoff.h
@@ -0,0 +1,31 @@
1#ifndef _SPARC64_BACKOFF_H
2#define _SPARC64_BACKOFF_H
3
4#define BACKOFF_LIMIT (4 * 1024)
5
6#ifdef CONFIG_SMP
7
8#define BACKOFF_SETUP(reg) \
9 mov 1, reg
10
11#define BACKOFF_SPIN(reg, tmp, label) \
12 mov reg, tmp; \
1388: brnz,pt tmp, 88b; \
14 sub tmp, 1, tmp; \
15 set BACKOFF_LIMIT, tmp; \
16 cmp reg, tmp; \
17 bg,pn %xcc, label; \
18 nop; \
19 ba,pt %xcc, label; \
20 sllx reg, 1, reg;
21
22#else
23
24#define BACKOFF_SETUP(reg)
25#define BACKOFF_SPIN(reg, tmp, label) \
26 ba,pt %xcc, label; \
27 nop;
28
29#endif
30
31#endif /* _SPARC64_BACKOFF_H */
diff --git a/include/asm-sparc/bbc.h b/include/asm-sparc/bbc.h
new file mode 100644
index 000000000000..423a85800aae
--- /dev/null
+++ b/include/asm-sparc/bbc.h
@@ -0,0 +1,225 @@
1/*
2 * bbc.h: Defines for BootBus Controller found on UltraSPARC-III
3 * systems.
4 *
5 * Copyright (C) 2000 David S. Miller (davem@redhat.com)
6 */
7
8#ifndef _SPARC64_BBC_H
9#define _SPARC64_BBC_H
10
11/* Register sizes are indicated by "B" (Byte, 1-byte),
12 * "H" (Half-word, 2 bytes), "W" (Word, 4 bytes) or
13 * "Q" (Quad, 8 bytes) inside brackets.
14 */
15
16#define BBC_AID 0x00 /* [B] Agent ID */
17#define BBC_DEVP 0x01 /* [B] Device Present */
18#define BBC_ARB 0x02 /* [B] Arbitration */
19#define BBC_QUIESCE 0x03 /* [B] Quiesce */
20#define BBC_WDACTION 0x04 /* [B] Watchdog Action */
21#define BBC_SPG 0x06 /* [B] Soft POR Gen */
22#define BBC_SXG 0x07 /* [B] Soft XIR Gen */
23#define BBC_PSRC 0x08 /* [W] POR Source */
24#define BBC_XSRC 0x0c /* [B] XIR Source */
25#define BBC_CSC 0x0d /* [B] Clock Synthesizers Control*/
26#define BBC_ES_CTRL 0x0e /* [H] Energy Star Control */
27#define BBC_ES_ACT 0x10 /* [W] E* Assert Change Time */
28#define BBC_ES_DACT 0x14 /* [B] E* De-Assert Change Time */
29#define BBC_ES_DABT 0x15 /* [B] E* De-Assert Bypass Time */
30#define BBC_ES_ABT 0x16 /* [H] E* Assert Bypass Time */
31#define BBC_ES_PST 0x18 /* [W] E* PLL Settle Time */
32#define BBC_ES_FSL 0x1c /* [W] E* Frequency Switch Latency*/
33#define BBC_EBUST 0x20 /* [Q] EBUS Timing */
34#define BBC_JTAG_CMD 0x28 /* [W] JTAG+ Command */
35#define BBC_JTAG_CTRL 0x2c /* [B] JTAG+ Control */
36#define BBC_I2C_SEL 0x2d /* [B] I2C Selection */
37#define BBC_I2C_0_S1 0x2e /* [B] I2C ctrlr-0 reg S1 */
38#define BBC_I2C_0_S0 0x2f /* [B] I2C ctrlr-0 regs S0,S0',S2,S3*/
39#define BBC_I2C_1_S1 0x30 /* [B] I2C ctrlr-1 reg S1 */
40#define BBC_I2C_1_S0 0x31 /* [B] I2C ctrlr-1 regs S0,S0',S2,S3*/
41#define BBC_KBD_BEEP 0x32 /* [B] Keyboard Beep */
42#define BBC_KBD_BCNT 0x34 /* [W] Keyboard Beep Counter */
43
44#define BBC_REGS_SIZE 0x40
45
46/* There is a 2K scratch ram area at offset 0x80000 but I doubt
47 * we will use it for anything.
48 */
49
50/* Agent ID register. This register shows the Safari Agent ID
51 * for the processors. The value returned depends upon which
52 * cpu is reading the register.
53 */
54#define BBC_AID_ID 0x07 /* Safari ID */
55#define BBC_AID_RESV 0xf8 /* Reserved */
56
57/* Device Present register. One can determine which cpus are actually
58 * present in the machine by interrogating this register.
59 */
60#define BBC_DEVP_CPU0 0x01 /* Processor 0 present */
61#define BBC_DEVP_CPU1 0x02 /* Processor 1 present */
62#define BBC_DEVP_CPU2 0x04 /* Processor 2 present */
63#define BBC_DEVP_CPU3 0x08 /* Processor 3 present */
64#define BBC_DEVP_RESV 0xf0 /* Reserved */
65
66/* Arbitration register. This register is used to block access to
67 * the BBC from a particular cpu.
68 */
69#define BBC_ARB_CPU0 0x01 /* Enable cpu 0 BBC arbitratrion */
70#define BBC_ARB_CPU1 0x02 /* Enable cpu 1 BBC arbitratrion */
71#define BBC_ARB_CPU2 0x04 /* Enable cpu 2 BBC arbitratrion */
72#define BBC_ARB_CPU3 0x08 /* Enable cpu 3 BBC arbitratrion */
73#define BBC_ARB_RESV 0xf0 /* Reserved */
74
75/* Quiesce register. Bus and BBC segments for cpus can be disabled
76 * with this register, ie. for hot plugging.
77 */
78#define BBC_QUIESCE_S02 0x01 /* Quiesce Safari segment for cpu 0 and 2 */
79#define BBC_QUIESCE_S13 0x02 /* Quiesce Safari segment for cpu 1 and 3 */
80#define BBC_QUIESCE_B02 0x04 /* Quiesce BBC segment for cpu 0 and 2 */
81#define BBC_QUIESCE_B13 0x08 /* Quiesce BBC segment for cpu 1 and 3 */
82#define BBC_QUIESCE_FD0 0x10 /* Disable Fatal_Error[0] reporting */
83#define BBC_QUIESCE_FD1 0x20 /* Disable Fatal_Error[1] reporting */
84#define BBC_QUIESCE_FD2 0x40 /* Disable Fatal_Error[2] reporting */
85#define BBC_QUIESCE_FD3 0x80 /* Disable Fatal_Error[3] reporting */
86
87/* Watchdog Action register. When the watchdog device timer expires
88 * a line is enabled to the BBC. The action BBC takes when this line
89 * is asserted can be controlled by this regiser.
90 */
91#define BBC_WDACTION_RST 0x01 /* When set, watchdog causes system reset.
92 * When clear, BBC ignores watchdog signal.
93 */
94#define BBC_WDACTION_RESV 0xfe /* Reserved */
95
96/* Soft_POR_GEN register. The POR (Power On Reset) signal may be asserted
97 * for specific processors or all processors via this register.
98 */
99#define BBC_SPG_CPU0 0x01 /* Assert POR for processor 0 */
100#define BBC_SPG_CPU1 0x02 /* Assert POR for processor 1 */
101#define BBC_SPG_CPU2 0x04 /* Assert POR for processor 2 */
102#define BBC_SPG_CPU3 0x08 /* Assert POR for processor 3 */
103#define BBC_SPG_CPUALL 0x10 /* Reset all processors and reset
104 * the entire system.
105 */
106#define BBC_SPG_RESV 0xe0 /* Reserved */
107
108/* Soft_XIR_GEN register. The XIR (eXternally Initiated Reset) signal
109 * may be asserted to specific processors via this register.
110 */
111#define BBC_SXG_CPU0 0x01 /* Assert XIR for processor 0 */
112#define BBC_SXG_CPU1 0x02 /* Assert XIR for processor 1 */
113#define BBC_SXG_CPU2 0x04 /* Assert XIR for processor 2 */
114#define BBC_SXG_CPU3 0x08 /* Assert XIR for processor 3 */
115#define BBC_SXG_RESV 0xf0 /* Reserved */
116
117/* POR Source register. One may identify the cause of the most recent
118 * reset by reading this register.
119 */
120#define BBC_PSRC_SPG0 0x0001 /* CPU 0 reset via BBC_SPG register */
121#define BBC_PSRC_SPG1 0x0002 /* CPU 1 reset via BBC_SPG register */
122#define BBC_PSRC_SPG2 0x0004 /* CPU 2 reset via BBC_SPG register */
123#define BBC_PSRC_SPG3 0x0008 /* CPU 3 reset via BBC_SPG register */
124#define BBC_PSRC_SPGSYS 0x0010 /* System reset via BBC_SPG register */
125#define BBC_PSRC_JTAG 0x0020 /* System reset via JTAG+ */
126#define BBC_PSRC_BUTTON 0x0040 /* System reset via push-button dongle */
127#define BBC_PSRC_PWRUP 0x0080 /* System reset via power-up */
128#define BBC_PSRC_FE0 0x0100 /* CPU 0 reported Fatal_Error */
129#define BBC_PSRC_FE1 0x0200 /* CPU 1 reported Fatal_Error */
130#define BBC_PSRC_FE2 0x0400 /* CPU 2 reported Fatal_Error */
131#define BBC_PSRC_FE3 0x0800 /* CPU 3 reported Fatal_Error */
132#define BBC_PSRC_FE4 0x1000 /* Schizo reported Fatal_Error */
133#define BBC_PSRC_FE5 0x2000 /* Safari device 5 reported Fatal_Error */
134#define BBC_PSRC_FE6 0x4000 /* CPMS reported Fatal_Error */
135#define BBC_PSRC_SYNTH 0x8000 /* System reset when on-board clock synthesizers
136 * were updated.
137 */
138#define BBC_PSRC_WDT 0x10000 /* System reset via Super I/O watchdog */
139#define BBC_PSRC_RSC 0x20000 /* System reset via RSC remote monitoring
140 * device
141 */
142
143/* XIR Source register. The source of an XIR event sent to a processor may
144 * be determined via this register.
145 */
146#define BBC_XSRC_SXG0 0x01 /* CPU 0 received XIR via Soft_XIR_GEN reg */
147#define BBC_XSRC_SXG1 0x02 /* CPU 1 received XIR via Soft_XIR_GEN reg */
148#define BBC_XSRC_SXG2 0x04 /* CPU 2 received XIR via Soft_XIR_GEN reg */
149#define BBC_XSRC_SXG3 0x08 /* CPU 3 received XIR via Soft_XIR_GEN reg */
150#define BBC_XSRC_JTAG 0x10 /* All CPUs received XIR via JTAG+ */
151#define BBC_XSRC_W_OR_B 0x20 /* All CPUs received XIR either because:
152 * a) Super I/O watchdog fired, or
153 * b) XIR push button was activated
154 */
155#define BBC_XSRC_RESV 0xc0 /* Reserved */
156
157/* Clock Synthesizers Control register. This register provides the big-bang
158 * programming interface to the two clock synthesizers of the machine.
159 */
160#define BBC_CSC_SLOAD 0x01 /* Directly connected to S_LOAD pins */
161#define BBC_CSC_SDATA 0x02 /* Directly connected to S_DATA pins */
162#define BBC_CSC_SCLOCK 0x04 /* Directly connected to S_CLOCK pins */
163#define BBC_CSC_RESV 0x78 /* Reserved */
164#define BBC_CSC_RST 0x80 /* Generate system reset when S_LOAD==1 */
165
166/* Energy Star Control register. This register is used to generate the
167 * clock frequency change trigger to the main system devices (Schizo and
168 * the processors). The transition occurs when bits in this register
169 * go from 0 to 1, only one bit must be set at once else no action
170 * occurs. Basically the sequence of events is:
171 * a) Choose new frequency: full, 1/2 or 1/32
172 * b) Program this desired frequency into the cpus and Schizo.
173 * c) Set the same value in this register.
174 * d) 16 system clocks later, clear this register.
175 */
176#define BBC_ES_CTRL_1_1 0x01 /* Full frequency */
177#define BBC_ES_CTRL_1_2 0x02 /* 1/2 frequency */
178#define BBC_ES_CTRL_1_32 0x20 /* 1/32 frequency */
179#define BBC_ES_RESV 0xdc /* Reserved */
180
181/* Energy Star Assert Change Time register. This determines the number
182 * of BBC clock cycles (which is half the system frequency) between
183 * the detection of FREEZE_ACK being asserted and the assertion of
184 * the CLK_CHANGE_L[2:0] signals.
185 */
186#define BBC_ES_ACT_VAL 0xff
187
188/* Energy Star Assert Bypass Time register. This determines the number
189 * of BBC clock cycles (which is half the system frequency) between
190 * the assertion of the CLK_CHANGE_L[2:0] signals and the assertion of
191 * the ESTAR_PLL_BYPASS signal.
192 */
193#define BBC_ES_ABT_VAL 0xffff
194
195/* Energy Star PLL Settle Time register. This determines the number of
196 * BBC clock cycles (which is half the system frequency) between the
197 * de-assertion of CLK_CHANGE_L[2:0] and the de-assertion of the FREEZE_L
198 * signal.
199 */
200#define BBC_ES_PST_VAL 0xffffffff
201
202/* Energy Star Frequency Switch Latency register. This is the number of
203 * BBC clocks between the de-assertion of CLK_CHANGE_L[2:0] and the first
204 * edge of the Safari clock at the new frequency.
205 */
206#define BBC_ES_FSL_VAL 0xffffffff
207
208/* Keyboard Beep control register. This is a simple enabler for the audio
209 * beep sound.
210 */
211#define BBC_KBD_BEEP_ENABLE 0x01 /* Enable beep */
212#define BBC_KBD_BEEP_RESV 0xfe /* Reserved */
213
214/* Keyboard Beep Counter register. There is a free-running counter inside
215 * the BBC which runs at half the system clock. The bit set in this register
216 * determines when the audio sound is generated. So for example if bit
217 * 10 is set, the audio beep will oscillate at 1/(2**12). The keyboard beep
218 * generator automatically selects a different bit to use if the system clock
219 * is changed via Energy Star.
220 */
221#define BBC_KBD_BCNT_BITS 0x0007fc00
222#define BBC_KBC_BCNT_RESV 0xfff803ff
223
224#endif /* _SPARC64_BBC_H */
225
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index 68b98a7e6454..1a2949d0193f 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -1,111 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_BITOPS_H
2 * bitops.h: Bit string operations on the Sparc. 2#define ___ASM_SPARC_BITOPS_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright 1995 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/bitops_64.h>
5 * Copyright 1996 Eddie C. Dost (ecd@skynet.be) 5#else
6 * Copyright 2001 Anton Blanchard (anton@samba.org) 6#include <asm-sparc/bitops_32.h>
7 */ 7#endif
8
9#ifndef _SPARC_BITOPS_H
10#define _SPARC_BITOPS_H
11
12#include <linux/compiler.h>
13#include <asm/byteorder.h>
14
15#ifdef __KERNEL__
16
17#ifndef _LINUX_BITOPS_H
18#error only <linux/bitops.h> can be included directly
19#endif 8#endif
20
21extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
22extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
23extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
24
25/*
26 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
27 * is in the highest of the four bytes and bit '31' is the high bit
28 * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
29 * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
30 */
31static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
32{
33 unsigned long *ADDR, mask;
34
35 ADDR = ((unsigned long *) addr) + (nr >> 5);
36 mask = 1 << (nr & 31);
37
38 return ___set_bit(ADDR, mask) != 0;
39}
40
41static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
42{
43 unsigned long *ADDR, mask;
44
45 ADDR = ((unsigned long *) addr) + (nr >> 5);
46 mask = 1 << (nr & 31);
47
48 (void) ___set_bit(ADDR, mask);
49}
50
51static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
52{
53 unsigned long *ADDR, mask;
54
55 ADDR = ((unsigned long *) addr) + (nr >> 5);
56 mask = 1 << (nr & 31);
57
58 return ___clear_bit(ADDR, mask) != 0;
59}
60
61static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
62{
63 unsigned long *ADDR, mask;
64
65 ADDR = ((unsigned long *) addr) + (nr >> 5);
66 mask = 1 << (nr & 31);
67
68 (void) ___clear_bit(ADDR, mask);
69}
70
71static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
72{
73 unsigned long *ADDR, mask;
74
75 ADDR = ((unsigned long *) addr) + (nr >> 5);
76 mask = 1 << (nr & 31);
77
78 return ___change_bit(ADDR, mask) != 0;
79}
80
81static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
82{
83 unsigned long *ADDR, mask;
84
85 ADDR = ((unsigned long *) addr) + (nr >> 5);
86 mask = 1 << (nr & 31);
87
88 (void) ___change_bit(ADDR, mask);
89}
90
91#include <asm-generic/bitops/non-atomic.h>
92
93#define smp_mb__before_clear_bit() do { } while(0)
94#define smp_mb__after_clear_bit() do { } while(0)
95
96#include <asm-generic/bitops/ffz.h>
97#include <asm-generic/bitops/__ffs.h>
98#include <asm-generic/bitops/sched.h>
99#include <asm-generic/bitops/ffs.h>
100#include <asm-generic/bitops/fls.h>
101#include <asm-generic/bitops/fls64.h>
102#include <asm-generic/bitops/hweight.h>
103#include <asm-generic/bitops/lock.h>
104#include <asm-generic/bitops/find.h>
105#include <asm-generic/bitops/ext2-non-atomic.h>
106#include <asm-generic/bitops/ext2-atomic.h>
107#include <asm-generic/bitops/minix.h>
108
109#endif /* __KERNEL__ */
110
111#endif /* defined(_SPARC_BITOPS_H) */
diff --git a/include/asm-sparc/bitops_32.h b/include/asm-sparc/bitops_32.h
new file mode 100644
index 000000000000..68b98a7e6454
--- /dev/null
+++ b/include/asm-sparc/bitops_32.h
@@ -0,0 +1,111 @@
1/*
2 * bitops.h: Bit string operations on the Sparc.
3 *
4 * Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright 2001 Anton Blanchard (anton@samba.org)
7 */
8
9#ifndef _SPARC_BITOPS_H
10#define _SPARC_BITOPS_H
11
12#include <linux/compiler.h>
13#include <asm/byteorder.h>
14
15#ifdef __KERNEL__
16
17#ifndef _LINUX_BITOPS_H
18#error only <linux/bitops.h> can be included directly
19#endif
20
21extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
22extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
23extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
24
25/*
26 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
27 * is in the highest of the four bytes and bit '31' is the high bit
28 * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
29 * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
30 */
31static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
32{
33 unsigned long *ADDR, mask;
34
35 ADDR = ((unsigned long *) addr) + (nr >> 5);
36 mask = 1 << (nr & 31);
37
38 return ___set_bit(ADDR, mask) != 0;
39}
40
41static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
42{
43 unsigned long *ADDR, mask;
44
45 ADDR = ((unsigned long *) addr) + (nr >> 5);
46 mask = 1 << (nr & 31);
47
48 (void) ___set_bit(ADDR, mask);
49}
50
51static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
52{
53 unsigned long *ADDR, mask;
54
55 ADDR = ((unsigned long *) addr) + (nr >> 5);
56 mask = 1 << (nr & 31);
57
58 return ___clear_bit(ADDR, mask) != 0;
59}
60
61static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
62{
63 unsigned long *ADDR, mask;
64
65 ADDR = ((unsigned long *) addr) + (nr >> 5);
66 mask = 1 << (nr & 31);
67
68 (void) ___clear_bit(ADDR, mask);
69}
70
71static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
72{
73 unsigned long *ADDR, mask;
74
75 ADDR = ((unsigned long *) addr) + (nr >> 5);
76 mask = 1 << (nr & 31);
77
78 return ___change_bit(ADDR, mask) != 0;
79}
80
81static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
82{
83 unsigned long *ADDR, mask;
84
85 ADDR = ((unsigned long *) addr) + (nr >> 5);
86 mask = 1 << (nr & 31);
87
88 (void) ___change_bit(ADDR, mask);
89}
90
91#include <asm-generic/bitops/non-atomic.h>
92
93#define smp_mb__before_clear_bit() do { } while(0)
94#define smp_mb__after_clear_bit() do { } while(0)
95
96#include <asm-generic/bitops/ffz.h>
97#include <asm-generic/bitops/__ffs.h>
98#include <asm-generic/bitops/sched.h>
99#include <asm-generic/bitops/ffs.h>
100#include <asm-generic/bitops/fls.h>
101#include <asm-generic/bitops/fls64.h>
102#include <asm-generic/bitops/hweight.h>
103#include <asm-generic/bitops/lock.h>
104#include <asm-generic/bitops/find.h>
105#include <asm-generic/bitops/ext2-non-atomic.h>
106#include <asm-generic/bitops/ext2-atomic.h>
107#include <asm-generic/bitops/minix.h>
108
109#endif /* __KERNEL__ */
110
111#endif /* defined(_SPARC_BITOPS_H) */
diff --git a/include/asm-sparc/bitops_64.h b/include/asm-sparc/bitops_64.h
new file mode 100644
index 000000000000..bb87b8080220
--- /dev/null
+++ b/include/asm-sparc/bitops_64.h
@@ -0,0 +1,107 @@
1/*
2 * bitops.h: Bit string operations on the V9.
3 *
4 * Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _SPARC64_BITOPS_H
8#define _SPARC64_BITOPS_H
9
10#ifndef _LINUX_BITOPS_H
11#error only <linux/bitops.h> can be included directly
12#endif
13
14#include <linux/compiler.h>
15#include <asm/byteorder.h>
16
17extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
18extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
19extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
20extern void set_bit(unsigned long nr, volatile unsigned long *addr);
21extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
22extern void change_bit(unsigned long nr, volatile unsigned long *addr);
23
24#include <asm-generic/bitops/non-atomic.h>
25
26#ifdef CONFIG_SMP
27#define smp_mb__before_clear_bit() membar_storeload_loadload()
28#define smp_mb__after_clear_bit() membar_storeload_storestore()
29#else
30#define smp_mb__before_clear_bit() barrier()
31#define smp_mb__after_clear_bit() barrier()
32#endif
33
34#include <asm-generic/bitops/ffz.h>
35#include <asm-generic/bitops/__ffs.h>
36#include <asm-generic/bitops/fls.h>
37#include <asm-generic/bitops/__fls.h>
38#include <asm-generic/bitops/fls64.h>
39
40#ifdef __KERNEL__
41
42#include <asm-generic/bitops/sched.h>
43#include <asm-generic/bitops/ffs.h>
44
45/*
46 * hweightN: returns the hamming weight (i.e. the number
47 * of bits set) of a N-bit word
48 */
49
50#ifdef ULTRA_HAS_POPULATION_COUNT
51
52static inline unsigned int hweight64(unsigned long w)
53{
54 unsigned int res;
55
56 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w));
57 return res;
58}
59
60static inline unsigned int hweight32(unsigned int w)
61{
62 unsigned int res;
63
64 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff));
65 return res;
66}
67
68static inline unsigned int hweight16(unsigned int w)
69{
70 unsigned int res;
71
72 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff));
73 return res;
74}
75
76static inline unsigned int hweight8(unsigned int w)
77{
78 unsigned int res;
79
80 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff));
81 return res;
82}
83
84#else
85
86#include <asm-generic/bitops/hweight.h>
87
88#endif
89#include <asm-generic/bitops/lock.h>
90#endif /* __KERNEL__ */
91
92#include <asm-generic/bitops/find.h>
93
94#ifdef __KERNEL__
95
96#include <asm-generic/bitops/ext2-non-atomic.h>
97
98#define ext2_set_bit_atomic(lock,nr,addr) \
99 test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr))
100#define ext2_clear_bit_atomic(lock,nr,addr) \
101 test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr))
102
103#include <asm-generic/bitops/minix.h>
104
105#endif /* __KERNEL__ */
106
107#endif /* defined(_SPARC64_BITOPS_H) */
diff --git a/include/asm-sparc/cacheflush.h b/include/asm-sparc/cacheflush.h
index 68ac10910271..2b6a37957c2d 100644
--- a/include/asm-sparc/cacheflush.h
+++ b/include/asm-sparc/cacheflush.h
@@ -1,85 +1,8 @@
1#ifndef _SPARC_CACHEFLUSH_H 1#ifndef ___ASM_SPARC_CACHEFLUSH_H
2#define _SPARC_CACHEFLUSH_H 2#define ___ASM_SPARC_CACHEFLUSH_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/mm.h> /* Common for other includes */ 4#include <asm-sparc/cacheflush_64.h>
5// #include <linux/kernel.h> from pgalloc.h 5#else
6// #include <linux/sched.h> from pgalloc.h 6#include <asm-sparc/cacheflush_32.h>
7 7#endif
8// #include <asm/page.h> 8#endif
9#include <asm/btfixup.h>
10
11/*
12 * Fine grained cache flushing.
13 */
14#ifdef CONFIG_SMP
15
16BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
17BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
18BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
19BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
20
21#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
22#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
23#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
24#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
25
26BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
27BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
28
29#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
30#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
31
32extern void smp_flush_cache_all(void);
33extern void smp_flush_cache_mm(struct mm_struct *mm);
34extern void smp_flush_cache_range(struct vm_area_struct *vma,
35 unsigned long start,
36 unsigned long end);
37extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
38
39extern void smp_flush_page_to_ram(unsigned long page);
40extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
41
42#endif /* CONFIG_SMP */
43
44BTFIXUPDEF_CALL(void, flush_cache_all, void)
45BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
46BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
47BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
48
49#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
50#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
51#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
52#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
53#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
54#define flush_icache_range(start, end) do { } while (0)
55#define flush_icache_page(vma, pg) do { } while (0)
56
57#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
58
59#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
60 do { \
61 flush_cache_page(vma, vaddr, page_to_pfn(page));\
62 memcpy(dst, src, len); \
63 } while (0)
64#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
65 do { \
66 flush_cache_page(vma, vaddr, page_to_pfn(page));\
67 memcpy(dst, src, len); \
68 } while (0)
69
70BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
71BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
72
73#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
74#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
75
76extern void sparc_flush_page_to_ram(struct page *page);
77
78#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
79#define flush_dcache_mmap_lock(mapping) do { } while (0)
80#define flush_dcache_mmap_unlock(mapping) do { } while (0)
81
82#define flush_cache_vmap(start, end) flush_cache_all()
83#define flush_cache_vunmap(start, end) flush_cache_all()
84
85#endif /* _SPARC_CACHEFLUSH_H */
diff --git a/include/asm-sparc/cacheflush_32.h b/include/asm-sparc/cacheflush_32.h
new file mode 100644
index 000000000000..68ac10910271
--- /dev/null
+++ b/include/asm-sparc/cacheflush_32.h
@@ -0,0 +1,85 @@
1#ifndef _SPARC_CACHEFLUSH_H
2#define _SPARC_CACHEFLUSH_H
3
4#include <linux/mm.h> /* Common for other includes */
5// #include <linux/kernel.h> from pgalloc.h
6// #include <linux/sched.h> from pgalloc.h
7
8// #include <asm/page.h>
9#include <asm/btfixup.h>
10
11/*
12 * Fine grained cache flushing.
13 */
14#ifdef CONFIG_SMP
15
16BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
17BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
18BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
19BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
20
21#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
22#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
23#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
24#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
25
26BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
27BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
28
29#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
30#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
31
32extern void smp_flush_cache_all(void);
33extern void smp_flush_cache_mm(struct mm_struct *mm);
34extern void smp_flush_cache_range(struct vm_area_struct *vma,
35 unsigned long start,
36 unsigned long end);
37extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
38
39extern void smp_flush_page_to_ram(unsigned long page);
40extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
41
42#endif /* CONFIG_SMP */
43
44BTFIXUPDEF_CALL(void, flush_cache_all, void)
45BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
46BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
47BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
48
49#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
50#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
51#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
52#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
53#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
54#define flush_icache_range(start, end) do { } while (0)
55#define flush_icache_page(vma, pg) do { } while (0)
56
57#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
58
59#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
60 do { \
61 flush_cache_page(vma, vaddr, page_to_pfn(page));\
62 memcpy(dst, src, len); \
63 } while (0)
64#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
65 do { \
66 flush_cache_page(vma, vaddr, page_to_pfn(page));\
67 memcpy(dst, src, len); \
68 } while (0)
69
70BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
71BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
72
73#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
74#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
75
76extern void sparc_flush_page_to_ram(struct page *page);
77
78#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
79#define flush_dcache_mmap_lock(mapping) do { } while (0)
80#define flush_dcache_mmap_unlock(mapping) do { } while (0)
81
82#define flush_cache_vmap(start, end) flush_cache_all()
83#define flush_cache_vunmap(start, end) flush_cache_all()
84
85#endif /* _SPARC_CACHEFLUSH_H */
diff --git a/include/asm-sparc/cacheflush_64.h b/include/asm-sparc/cacheflush_64.h
new file mode 100644
index 000000000000..c43321729b3b
--- /dev/null
+++ b/include/asm-sparc/cacheflush_64.h
@@ -0,0 +1,76 @@
1#ifndef _SPARC64_CACHEFLUSH_H
2#define _SPARC64_CACHEFLUSH_H
3
4#include <asm/page.h>
5
6#ifndef __ASSEMBLY__
7
8#include <linux/mm.h>
9
10/* Cache flush operations. */
11
12/* These are the same regardless of whether this is an SMP kernel or not. */
13#define flush_cache_mm(__mm) \
14 do { if ((__mm) == current->mm) flushw_user(); } while(0)
15#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
16#define flush_cache_range(vma, start, end) \
17 flush_cache_mm((vma)->vm_mm)
18#define flush_cache_page(vma, page, pfn) \
19 flush_cache_mm((vma)->vm_mm)
20
21/*
22 * On spitfire, the icache doesn't snoop local stores and we don't
23 * use block commit stores (which invalidate icache lines) during
24 * module load, so we need this.
25 */
26extern void flush_icache_range(unsigned long start, unsigned long end);
27extern void __flush_icache_page(unsigned long);
28
29extern void __flush_dcache_page(void *addr, int flush_icache);
30extern void flush_dcache_page_impl(struct page *page);
31#ifdef CONFIG_SMP
32extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
33extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
34#else
35#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
36#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
37#endif
38
39extern void __flush_dcache_range(unsigned long start, unsigned long end);
40extern void flush_dcache_page(struct page *page);
41
42#define flush_icache_page(vma, pg) do { } while(0)
43#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
44
45extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
46 unsigned long uaddr, void *kaddr,
47 unsigned long len, int write);
48
49#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
50 do { \
51 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
52 memcpy(dst, src, len); \
53 flush_ptrace_access(vma, page, vaddr, src, len, 0); \
54 } while (0)
55
56#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
57 do { \
58 flush_cache_page(vma, vaddr, page_to_pfn(page)); \
59 memcpy(dst, src, len); \
60 flush_ptrace_access(vma, page, vaddr, dst, len, 1); \
61 } while (0)
62
63#define flush_dcache_mmap_lock(mapping) do { } while (0)
64#define flush_dcache_mmap_unlock(mapping) do { } while (0)
65
66#define flush_cache_vmap(start, end) do { } while (0)
67#define flush_cache_vunmap(start, end) do { } while (0)
68
69#ifdef CONFIG_DEBUG_PAGEALLOC
70/* internal debugging function */
71void kernel_map_pages(struct page *page, int numpages, int enable);
72#endif
73
74#endif /* !__ASSEMBLY__ */
75
76#endif /* _SPARC64_CACHEFLUSH_H */
diff --git a/include/asm-sparc/chafsr.h b/include/asm-sparc/chafsr.h
new file mode 100644
index 000000000000..85c69b38220b
--- /dev/null
+++ b/include/asm-sparc/chafsr.h
@@ -0,0 +1,241 @@
1#ifndef _SPARC64_CHAFSR_H
2#define _SPARC64_CHAFSR_H
3
4/* Cheetah Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
5
6/* Comments indicate which processor variants on which the bit definition
7 * is valid. Codes are:
8 * ch --> cheetah
9 * ch+ --> cheetah plus
10 * jp --> jalapeno
11 */
12
13/* All bits of this register except M_SYNDROME and E_SYNDROME are
14 * read, write 1 to clear. M_SYNDROME and E_SYNDROME are read-only.
15 */
16
17/* Software bit set by linux trap handlers to indicate that the trap was
18 * signalled at %tl >= 1.
19 */
20#define CHAFSR_TL1 (1UL << 63UL) /* n/a */
21
22/* Unmapped error from system bus for prefetch queue or
23 * store queue read operation
24 */
25#define CHPAFSR_DTO (1UL << 59UL) /* ch+ */
26
27/* Bus error from system bus for prefetch queue or store queue
28 * read operation
29 */
30#define CHPAFSR_DBERR (1UL << 58UL) /* ch+ */
31
32/* Hardware corrected E-cache Tag ECC error */
33#define CHPAFSR_THCE (1UL << 57UL) /* ch+ */
34/* System interface protocol error, hw timeout caused */
35#define JPAFSR_JETO (1UL << 57UL) /* jp */
36
37/* SW handled correctable E-cache Tag ECC error */
38#define CHPAFSR_TSCE (1UL << 56UL) /* ch+ */
39/* Parity error on system snoop results */
40#define JPAFSR_SCE (1UL << 56UL) /* jp */
41
42/* Uncorrectable E-cache Tag ECC error */
43#define CHPAFSR_TUE (1UL << 55UL) /* ch+ */
44/* System interface protocol error, illegal command detected */
45#define JPAFSR_JEIC (1UL << 55UL) /* jp */
46
47/* Uncorrectable system bus data ECC error due to prefetch
48 * or store fill request
49 */
50#define CHPAFSR_DUE (1UL << 54UL) /* ch+ */
51/* System interface protocol error, illegal ADTYPE detected */
52#define JPAFSR_JEIT (1UL << 54UL) /* jp */
53
54/* Multiple errors of the same type have occurred. This bit is set when
55 * an uncorrectable error or a SW correctable error occurs and the status
56 * bit to report that error is already set. When multiple errors of
57 * different types are indicated by setting multiple status bits.
58 *
59 * This bit is not set if multiple HW corrected errors with the same
60 * status bit occur, only uncorrectable and SW correctable ones have
61 * this behavior.
62 *
63 * This bit is not set when multiple ECC errors happen within a single
64 * 64-byte system bus transaction. Only the first ECC error in a 16-byte
65 * subunit will be logged. All errors in subsequent 16-byte subunits
66 * from the same 64-byte transaction are ignored.
67 */
68#define CHAFSR_ME (1UL << 53UL) /* ch,ch+,jp */
69
70/* Privileged state error has occurred. This is a capture of PSTATE.PRIV
71 * at the time the error is detected.
72 */
73#define CHAFSR_PRIV (1UL << 52UL) /* ch,ch+,jp */
74
75/* The following bits 51 (CHAFSR_PERR) to 33 (CHAFSR_CE) are sticky error
76 * bits and record the most recently detected errors. Bits accumulate
77 * errors that have been detected since the last write to clear the bit.
78 */
79
80/* System interface protocol error. The processor asserts its' ERROR
81 * pin when this event occurs and it also logs a specific cause code
82 * into a JTAG scannable flop.
83 */
84#define CHAFSR_PERR (1UL << 51UL) /* ch,ch+,jp */
85
86/* Internal processor error. The processor asserts its' ERROR
87 * pin when this event occurs and it also logs a specific cause code
88 * into a JTAG scannable flop.
89 */
90#define CHAFSR_IERR (1UL << 50UL) /* ch,ch+,jp */
91
92/* System request parity error on incoming address */
93#define CHAFSR_ISAP (1UL << 49UL) /* ch,ch+,jp */
94
95/* HW Corrected system bus MTAG ECC error */
96#define CHAFSR_EMC (1UL << 48UL) /* ch,ch+ */
97/* Parity error on L2 cache tag SRAM */
98#define JPAFSR_ETP (1UL << 48UL) /* jp */
99
100/* Uncorrectable system bus MTAG ECC error */
101#define CHAFSR_EMU (1UL << 47UL) /* ch,ch+ */
102/* Out of range memory error has occurred */
103#define JPAFSR_OM (1UL << 47UL) /* jp */
104
105/* HW Corrected system bus data ECC error for read of interrupt vector */
106#define CHAFSR_IVC (1UL << 46UL) /* ch,ch+ */
107/* Error due to unsupported store */
108#define JPAFSR_UMS (1UL << 46UL) /* jp */
109
110/* Uncorrectable system bus data ECC error for read of interrupt vector */
111#define CHAFSR_IVU (1UL << 45UL) /* ch,ch+,jp */
112
113/* Unmapped error from system bus */
114#define CHAFSR_TO (1UL << 44UL) /* ch,ch+,jp */
115
116/* Bus error response from system bus */
117#define CHAFSR_BERR (1UL << 43UL) /* ch,ch+,jp */
118
119/* SW Correctable E-cache ECC error for instruction fetch or data access
120 * other than block load.
121 */
122#define CHAFSR_UCC (1UL << 42UL) /* ch,ch+,jp */
123
124/* Uncorrectable E-cache ECC error for instruction fetch or data access
125 * other than block load.
126 */
127#define CHAFSR_UCU (1UL << 41UL) /* ch,ch+,jp */
128
129/* Copyout HW Corrected ECC error */
130#define CHAFSR_CPC (1UL << 40UL) /* ch,ch+,jp */
131
132/* Copyout Uncorrectable ECC error */
133#define CHAFSR_CPU (1UL << 39UL) /* ch,ch+,jp */
134
135/* HW Corrected ECC error from E-cache for writeback */
136#define CHAFSR_WDC (1UL << 38UL) /* ch,ch+,jp */
137
138/* Uncorrectable ECC error from E-cache for writeback */
139#define CHAFSR_WDU (1UL << 37UL) /* ch,ch+,jp */
140
141/* HW Corrected ECC error from E-cache for store merge or block load */
142#define CHAFSR_EDC (1UL << 36UL) /* ch,ch+,jp */
143
144/* Uncorrectable ECC error from E-cache for store merge or block load */
145#define CHAFSR_EDU (1UL << 35UL) /* ch,ch+,jp */
146
147/* Uncorrectable system bus data ECC error for read of memory or I/O */
148#define CHAFSR_UE (1UL << 34UL) /* ch,ch+,jp */
149
150/* HW Corrected system bus data ECC error for read of memory or I/O */
151#define CHAFSR_CE (1UL << 33UL) /* ch,ch+,jp */
152
153/* Uncorrectable ECC error from remote cache/memory */
154#define JPAFSR_RUE (1UL << 32UL) /* jp */
155
156/* Correctable ECC error from remote cache/memory */
157#define JPAFSR_RCE (1UL << 31UL) /* jp */
158
159/* JBUS parity error on returned read data */
160#define JPAFSR_BP (1UL << 30UL) /* jp */
161
162/* JBUS parity error on data for writeback or block store */
163#define JPAFSR_WBP (1UL << 29UL) /* jp */
164
165/* Foreign read to DRAM incurring correctable ECC error */
166#define JPAFSR_FRC (1UL << 28UL) /* jp */
167
168/* Foreign read to DRAM incurring uncorrectable ECC error */
169#define JPAFSR_FRU (1UL << 27UL) /* jp */
170
171#define CHAFSR_ERRORS (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP | CHAFSR_EMC | \
172 CHAFSR_EMU | CHAFSR_IVC | CHAFSR_IVU | CHAFSR_TO | \
173 CHAFSR_BERR | CHAFSR_UCC | CHAFSR_UCU | CHAFSR_CPC | \
174 CHAFSR_CPU | CHAFSR_WDC | CHAFSR_WDU | CHAFSR_EDC | \
175 CHAFSR_EDU | CHAFSR_UE | CHAFSR_CE)
176#define CHPAFSR_ERRORS (CHPAFSR_DTO | CHPAFSR_DBERR | CHPAFSR_THCE | \
177 CHPAFSR_TSCE | CHPAFSR_TUE | CHPAFSR_DUE | \
178 CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP | CHAFSR_EMC | \
179 CHAFSR_EMU | CHAFSR_IVC | CHAFSR_IVU | CHAFSR_TO | \
180 CHAFSR_BERR | CHAFSR_UCC | CHAFSR_UCU | CHAFSR_CPC | \
181 CHAFSR_CPU | CHAFSR_WDC | CHAFSR_WDU | CHAFSR_EDC | \
182 CHAFSR_EDU | CHAFSR_UE | CHAFSR_CE)
183#define JPAFSR_ERRORS (JPAFSR_JETO | JPAFSR_SCE | JPAFSR_JEIC | \
184 JPAFSR_JEIT | CHAFSR_PERR | CHAFSR_IERR | \
185 CHAFSR_ISAP | JPAFSR_ETP | JPAFSR_OM | \
186 JPAFSR_UMS | CHAFSR_IVU | CHAFSR_TO | \
187 CHAFSR_BERR | CHAFSR_UCC | CHAFSR_UCU | \
188 CHAFSR_CPC | CHAFSR_CPU | CHAFSR_WDC | \
189 CHAFSR_WDU | CHAFSR_EDC | CHAFSR_EDU | \
190 CHAFSR_UE | CHAFSR_CE | JPAFSR_RUE | \
191 JPAFSR_RCE | JPAFSR_BP | JPAFSR_WBP | \
192 JPAFSR_FRC | JPAFSR_FRU)
193
194/* Active JBUS request signal when error occurred */
195#define JPAFSR_JBREQ (0x7UL << 24UL) /* jp */
196#define JPAFSR_JBREQ_SHIFT 24UL
197
198/* L2 cache way information */
199#define JPAFSR_ETW (0x3UL << 22UL) /* jp */
200#define JPAFSR_ETW_SHIFT 22UL
201
202/* System bus MTAG ECC syndrome. This field captures the status of the
203 * first occurrence of the highest-priority error according to the M_SYND
204 * overwrite policy. After the AFSR sticky bit, corresponding to the error
205 * for which the M_SYND is reported, is cleared, the contents of the M_SYND
206 * field will be unchanged by will be unfrozen for further error capture.
207 */
208#define CHAFSR_M_SYNDROME (0xfUL << 16UL) /* ch,ch+,jp */
209#define CHAFSR_M_SYNDROME_SHIFT 16UL
210
211/* Agenid Id of the foreign device causing the UE/CE errors */
212#define JPAFSR_AID (0x1fUL << 9UL) /* jp */
213#define JPAFSR_AID_SHIFT 9UL
214
215/* System bus or E-cache data ECC syndrome. This field captures the status
216 * of the first occurrence of the highest-priority error according to the
217 * E_SYND overwrite policy. After the AFSR sticky bit, corresponding to the
218 * error for which the E_SYND is reported, is cleare, the contents of the E_SYND
219 * field will be unchanged but will be unfrozen for further error capture.
220 */
221#define CHAFSR_E_SYNDROME (0x1ffUL << 0UL) /* ch,ch+,jp */
222#define CHAFSR_E_SYNDROME_SHIFT 0UL
223
224/* The AFSR must be explicitly cleared by software, it is not cleared automatically
225 * by a read. Writes to bits <51:33> with bits set will clear the corresponding
226 * bits in the AFSR. Bits associated with disrupting traps must be cleared before
227 * interrupts are re-enabled to prevent multiple traps for the same error. I.e.
228 * PSTATE.IE and AFSR bits control delivery of disrupting traps.
229 *
230 * Since there is only one AFAR, when multiple events have been logged by the
231 * bits in the AFSR, at most one of these events will have its status captured
232 * in the AFAR. The highest priority of those event bits will get AFAR logging.
233 * The AFAR will be unlocked and available to capture the address of another event
234 * as soon as the one bit in AFSR that corresponds to the event logged in AFAR is
235 * cleared. For example, if AFSR.CE is detected, then AFSR.UE (which overwrites
236 * the AFAR), and AFSR.UE is cleared by not AFSR.CE, then the AFAR will be unlocked
237 * and ready for another event, even though AFSR.CE is still set. The same rules
238 * also apply to the M_SYNDROME and E_SYNDROME fields of the AFSR.
239 */
240
241#endif /* _SPARC64_CHAFSR_H */
diff --git a/include/asm-sparc/checksum.h b/include/asm-sparc/checksum.h
index d044ddb5a3cf..4e3553d4f6e1 100644
--- a/include/asm-sparc/checksum.h
+++ b/include/asm-sparc/checksum.h
@@ -1,241 +1,8 @@
1#ifndef __SPARC_CHECKSUM_H 1#ifndef ___ASM_SPARC_CHECKSUM_H
2#define __SPARC_CHECKSUM_H 2#define ___ASM_SPARC_CHECKSUM_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* checksum.h: IP/UDP/TCP checksum routines on the Sparc. 4#include <asm-sparc/checksum_64.h>
5 * 5#else
6 * Copyright(C) 1995 Linus Torvalds 6#include <asm-sparc/checksum_32.h>
7 * Copyright(C) 1995 Miguel de Icaza 7#endif
8 * Copyright(C) 1996 David S. Miller 8#endif
9 * Copyright(C) 1996 Eddie C. Dost
10 * Copyright(C) 1997 Jakub Jelinek
11 *
12 * derived from:
13 * Alpha checksum c-code
14 * ix86 inline assembly
15 * RFC1071 Computing the Internet Checksum
16 */
17
18#include <linux/in6.h>
19#include <asm/uaccess.h>
20
21/* computes the checksum of a memory block at buff, length len,
22 * and adds in "sum" (32-bit)
23 *
24 * returns a 32-bit number suitable for feeding into itself
25 * or csum_tcpudp_magic
26 *
27 * this function must be called with even lengths, except
28 * for the last fragment, which may be odd
29 *
30 * it's best to have buff aligned on a 32-bit boundary
31 */
32extern __wsum csum_partial(const void *buff, int len, __wsum sum);
33
34/* the same as csum_partial, but copies from fs:src while it
35 * checksums
36 *
37 * here even more important to align src and dst on a 32-bit (or even
38 * better 64-bit) boundary
39 */
40
41extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
42
43static inline __wsum
44csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
45{
46 register unsigned int ret asm("o0") = (unsigned int)src;
47 register char *d asm("o1") = dst;
48 register int l asm("g1") = len;
49
50 __asm__ __volatile__ (
51 "call __csum_partial_copy_sparc_generic\n\t"
52 " mov %6, %%g7\n"
53 : "=&r" (ret), "=&r" (d), "=&r" (l)
54 : "0" (ret), "1" (d), "2" (l), "r" (sum)
55 : "o2", "o3", "o4", "o5", "o7",
56 "g2", "g3", "g4", "g5", "g7",
57 "memory", "cc");
58 return (__force __wsum)ret;
59}
60
61static inline __wsum
62csum_partial_copy_from_user(const void __user *src, void *dst, int len,
63 __wsum sum, int *err)
64 {
65 register unsigned long ret asm("o0") = (unsigned long)src;
66 register char *d asm("o1") = dst;
67 register int l asm("g1") = len;
68 register __wsum s asm("g7") = sum;
69
70 __asm__ __volatile__ (
71 ".section __ex_table,#alloc\n\t"
72 ".align 4\n\t"
73 ".word 1f,2\n\t"
74 ".previous\n"
75 "1:\n\t"
76 "call __csum_partial_copy_sparc_generic\n\t"
77 " st %8, [%%sp + 64]\n"
78 : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
79 : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
80 : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
81 "cc", "memory");
82 return (__force __wsum)ret;
83}
84
85static inline __wsum
86csum_partial_copy_to_user(const void *src, void __user *dst, int len,
87 __wsum sum, int *err)
88{
89 if (!access_ok (VERIFY_WRITE, dst, len)) {
90 *err = -EFAULT;
91 return sum;
92 } else {
93 register unsigned long ret asm("o0") = (unsigned long)src;
94 register char __user *d asm("o1") = dst;
95 register int l asm("g1") = len;
96 register __wsum s asm("g7") = sum;
97
98 __asm__ __volatile__ (
99 ".section __ex_table,#alloc\n\t"
100 ".align 4\n\t"
101 ".word 1f,1\n\t"
102 ".previous\n"
103 "1:\n\t"
104 "call __csum_partial_copy_sparc_generic\n\t"
105 " st %8, [%%sp + 64]\n"
106 : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
107 : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
108 : "o2", "o3", "o4", "o5", "o7",
109 "g2", "g3", "g4", "g5",
110 "cc", "memory");
111 return (__force __wsum)ret;
112 }
113}
114
115#define HAVE_CSUM_COPY_USER
116#define csum_and_copy_to_user csum_partial_copy_to_user
117
118/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
119 * the majority of the time.
120 */
121static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
122{
123 __sum16 sum;
124
125 /* Note: We must read %2 before we touch %0 for the first time,
126 * because GCC can legitimately use the same register for
127 * both operands.
128 */
129 __asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
130 "ld\t[%1 + 0x00], %0\n\t"
131 "ld\t[%1 + 0x04], %%g2\n\t"
132 "ld\t[%1 + 0x08], %%g3\n\t"
133 "addcc\t%%g2, %0, %0\n\t"
134 "addxcc\t%%g3, %0, %0\n\t"
135 "ld\t[%1 + 0x0c], %%g2\n\t"
136 "ld\t[%1 + 0x10], %%g3\n\t"
137 "addxcc\t%%g2, %0, %0\n\t"
138 "addx\t%0, %%g0, %0\n"
139 "1:\taddcc\t%%g3, %0, %0\n\t"
140 "add\t%1, 4, %1\n\t"
141 "addxcc\t%0, %%g0, %0\n\t"
142 "subcc\t%%g4, 1, %%g4\n\t"
143 "be,a\t2f\n\t"
144 "sll\t%0, 16, %%g2\n\t"
145 "b\t1b\n\t"
146 "ld\t[%1 + 0x10], %%g3\n"
147 "2:\taddcc\t%0, %%g2, %%g2\n\t"
148 "srl\t%%g2, 16, %0\n\t"
149 "addx\t%0, %%g0, %0\n\t"
150 "xnor\t%%g0, %0, %0"
151 : "=r" (sum), "=&r" (iph)
152 : "r" (ihl), "1" (iph)
153 : "g2", "g3", "g4", "cc", "memory");
154 return sum;
155}
156
157/* Fold a partial checksum without adding pseudo headers. */
158static inline __sum16 csum_fold(__wsum sum)
159{
160 unsigned int tmp;
161
162 __asm__ __volatile__("addcc\t%0, %1, %1\n\t"
163 "srl\t%1, 16, %1\n\t"
164 "addx\t%1, %%g0, %1\n\t"
165 "xnor\t%%g0, %1, %0"
166 : "=&r" (sum), "=r" (tmp)
167 : "0" (sum), "1" ((__force u32)sum<<16)
168 : "cc");
169 return (__force __sum16)sum;
170}
171
172static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
173 unsigned short len,
174 unsigned short proto,
175 __wsum sum)
176{
177 __asm__ __volatile__("addcc\t%1, %0, %0\n\t"
178 "addxcc\t%2, %0, %0\n\t"
179 "addxcc\t%3, %0, %0\n\t"
180 "addx\t%0, %%g0, %0\n\t"
181 : "=r" (sum), "=r" (saddr)
182 : "r" (daddr), "r" (proto + len), "0" (sum),
183 "1" (saddr)
184 : "cc");
185 return sum;
186}
187
188/*
189 * computes the checksum of the TCP/UDP pseudo-header
190 * returns a 16-bit checksum, already complemented
191 */
192static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
193 unsigned short len,
194 unsigned short proto,
195 __wsum sum)
196{
197 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
198}
199
200#define _HAVE_ARCH_IPV6_CSUM
201
202static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
203 const struct in6_addr *daddr,
204 __u32 len, unsigned short proto,
205 __wsum sum)
206{
207 __asm__ __volatile__ (
208 "addcc %3, %4, %%g4\n\t"
209 "addxcc %5, %%g4, %%g4\n\t"
210 "ld [%2 + 0x0c], %%g2\n\t"
211 "ld [%2 + 0x08], %%g3\n\t"
212 "addxcc %%g2, %%g4, %%g4\n\t"
213 "ld [%2 + 0x04], %%g2\n\t"
214 "addxcc %%g3, %%g4, %%g4\n\t"
215 "ld [%2 + 0x00], %%g3\n\t"
216 "addxcc %%g2, %%g4, %%g4\n\t"
217 "ld [%1 + 0x0c], %%g2\n\t"
218 "addxcc %%g3, %%g4, %%g4\n\t"
219 "ld [%1 + 0x08], %%g3\n\t"
220 "addxcc %%g2, %%g4, %%g4\n\t"
221 "ld [%1 + 0x04], %%g2\n\t"
222 "addxcc %%g3, %%g4, %%g4\n\t"
223 "ld [%1 + 0x00], %%g3\n\t"
224 "addxcc %%g2, %%g4, %%g4\n\t"
225 "addxcc %%g3, %%g4, %0\n\t"
226 "addx 0, %0, %0\n"
227 : "=&r" (sum)
228 : "r" (saddr), "r" (daddr),
229 "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
230 : "g2", "g3", "g4", "cc");
231
232 return csum_fold(sum);
233}
234
235/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
236static inline __sum16 ip_compute_csum(const void *buff, int len)
237{
238 return csum_fold(csum_partial(buff, len, 0));
239}
240
241#endif /* !(__SPARC_CHECKSUM_H) */
diff --git a/include/asm-sparc/checksum_32.h b/include/asm-sparc/checksum_32.h
new file mode 100644
index 000000000000..bdbda1453aa9
--- /dev/null
+++ b/include/asm-sparc/checksum_32.h
@@ -0,0 +1,241 @@
1#ifndef __SPARC_CHECKSUM_H
2#define __SPARC_CHECKSUM_H
3
4/* checksum.h: IP/UDP/TCP checksum routines on the Sparc.
5 *
6 * Copyright(C) 1995 Linus Torvalds
7 * Copyright(C) 1995 Miguel de Icaza
8 * Copyright(C) 1996 David S. Miller
9 * Copyright(C) 1996 Eddie C. Dost
10 * Copyright(C) 1997 Jakub Jelinek
11 *
12 * derived from:
13 * Alpha checksum c-code
14 * ix86 inline assembly
15 * RFC1071 Computing the Internet Checksum
16 */
17
18#include <linux/in6.h>
19#include <asm/uaccess.h>
20
21/* computes the checksum of a memory block at buff, length len,
22 * and adds in "sum" (32-bit)
23 *
24 * returns a 32-bit number suitable for feeding into itself
25 * or csum_tcpudp_magic
26 *
27 * this function must be called with even lengths, except
28 * for the last fragment, which may be odd
29 *
30 * it's best to have buff aligned on a 32-bit boundary
31 */
32extern __wsum csum_partial(const void *buff, int len, __wsum sum);
33
34/* the same as csum_partial, but copies from fs:src while it
35 * checksums
36 *
37 * here even more important to align src and dst on a 32-bit (or even
38 * better 64-bit) boundary
39 */
40
41extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
42
43static inline __wsum
44csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
45{
46 register unsigned int ret asm("o0") = (unsigned int)src;
47 register char *d asm("o1") = dst;
48 register int l asm("g1") = len;
49
50 __asm__ __volatile__ (
51 "call __csum_partial_copy_sparc_generic\n\t"
52 " mov %6, %%g7\n"
53 : "=&r" (ret), "=&r" (d), "=&r" (l)
54 : "0" (ret), "1" (d), "2" (l), "r" (sum)
55 : "o2", "o3", "o4", "o5", "o7",
56 "g2", "g3", "g4", "g5", "g7",
57 "memory", "cc");
58 return (__force __wsum)ret;
59}
60
61static inline __wsum
62csum_partial_copy_from_user(const void __user *src, void *dst, int len,
63 __wsum sum, int *err)
64 {
65 register unsigned long ret asm("o0") = (unsigned long)src;
66 register char *d asm("o1") = dst;
67 register int l asm("g1") = len;
68 register __wsum s asm("g7") = sum;
69
70 __asm__ __volatile__ (
71 ".section __ex_table,#alloc\n\t"
72 ".align 4\n\t"
73 ".word 1f,2\n\t"
74 ".previous\n"
75 "1:\n\t"
76 "call __csum_partial_copy_sparc_generic\n\t"
77 " st %8, [%%sp + 64]\n"
78 : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
79 : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
80 : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
81 "cc", "memory");
82 return (__force __wsum)ret;
83}
84
85static inline __wsum
86csum_partial_copy_to_user(const void *src, void __user *dst, int len,
87 __wsum sum, int *err)
88{
89 if (!access_ok (VERIFY_WRITE, dst, len)) {
90 *err = -EFAULT;
91 return sum;
92 } else {
93 register unsigned long ret asm("o0") = (unsigned long)src;
94 register char __user *d asm("o1") = dst;
95 register int l asm("g1") = len;
96 register __wsum s asm("g7") = sum;
97
98 __asm__ __volatile__ (
99 ".section __ex_table,#alloc\n\t"
100 ".align 4\n\t"
101 ".word 1f,1\n\t"
102 ".previous\n"
103 "1:\n\t"
104 "call __csum_partial_copy_sparc_generic\n\t"
105 " st %8, [%%sp + 64]\n"
106 : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
107 : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
108 : "o2", "o3", "o4", "o5", "o7",
109 "g2", "g3", "g4", "g5",
110 "cc", "memory");
111 return (__force __wsum)ret;
112 }
113}
114
115#define HAVE_CSUM_COPY_USER
116#define csum_and_copy_to_user csum_partial_copy_to_user
117
118/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
119 * the majority of the time.
120 */
121static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
122{
123 __sum16 sum;
124
125 /* Note: We must read %2 before we touch %0 for the first time,
126 * because GCC can legitimately use the same register for
127 * both operands.
128 */
129 __asm__ __volatile__("sub\t%2, 4, %%g4\n\t"
130 "ld\t[%1 + 0x00], %0\n\t"
131 "ld\t[%1 + 0x04], %%g2\n\t"
132 "ld\t[%1 + 0x08], %%g3\n\t"
133 "addcc\t%%g2, %0, %0\n\t"
134 "addxcc\t%%g3, %0, %0\n\t"
135 "ld\t[%1 + 0x0c], %%g2\n\t"
136 "ld\t[%1 + 0x10], %%g3\n\t"
137 "addxcc\t%%g2, %0, %0\n\t"
138 "addx\t%0, %%g0, %0\n"
139 "1:\taddcc\t%%g3, %0, %0\n\t"
140 "add\t%1, 4, %1\n\t"
141 "addxcc\t%0, %%g0, %0\n\t"
142 "subcc\t%%g4, 1, %%g4\n\t"
143 "be,a\t2f\n\t"
144 "sll\t%0, 16, %%g2\n\t"
145 "b\t1b\n\t"
146 "ld\t[%1 + 0x10], %%g3\n"
147 "2:\taddcc\t%0, %%g2, %%g2\n\t"
148 "srl\t%%g2, 16, %0\n\t"
149 "addx\t%0, %%g0, %0\n\t"
150 "xnor\t%%g0, %0, %0"
151 : "=r" (sum), "=&r" (iph)
152 : "r" (ihl), "1" (iph)
153 : "g2", "g3", "g4", "cc", "memory");
154 return sum;
155}
156
157/* Fold a partial checksum without adding pseudo headers. */
158static inline __sum16 csum_fold(__wsum sum)
159{
160 unsigned int tmp;
161
162 __asm__ __volatile__("addcc\t%0, %1, %1\n\t"
163 "srl\t%1, 16, %1\n\t"
164 "addx\t%1, %%g0, %1\n\t"
165 "xnor\t%%g0, %1, %0"
166 : "=&r" (sum), "=r" (tmp)
167 : "0" (sum), "1" ((__force u32)sum<<16)
168 : "cc");
169 return (__force __sum16)sum;
170}
171
172static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
173 unsigned short len,
174 unsigned short proto,
175 __wsum sum)
176{
177 __asm__ __volatile__("addcc\t%1, %0, %0\n\t"
178 "addxcc\t%2, %0, %0\n\t"
179 "addxcc\t%3, %0, %0\n\t"
180 "addx\t%0, %%g0, %0\n\t"
181 : "=r" (sum), "=r" (saddr)
182 : "r" (daddr), "r" (proto + len), "0" (sum),
183 "1" (saddr)
184 : "cc");
185 return sum;
186}
187
188/*
189 * computes the checksum of the TCP/UDP pseudo-header
190 * returns a 16-bit checksum, already complemented
191 */
192static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
193 unsigned short len,
194 unsigned short proto,
195 __wsum sum)
196{
197 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
198}
199
200#define _HAVE_ARCH_IPV6_CSUM
201
202static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
203 const struct in6_addr *daddr,
204 __u32 len, unsigned short proto,
205 __wsum sum)
206{
207 __asm__ __volatile__ (
208 "addcc %3, %4, %%g4\n\t"
209 "addxcc %5, %%g4, %%g4\n\t"
210 "ld [%2 + 0x0c], %%g2\n\t"
211 "ld [%2 + 0x08], %%g3\n\t"
212 "addxcc %%g2, %%g4, %%g4\n\t"
213 "ld [%2 + 0x04], %%g2\n\t"
214 "addxcc %%g3, %%g4, %%g4\n\t"
215 "ld [%2 + 0x00], %%g3\n\t"
216 "addxcc %%g2, %%g4, %%g4\n\t"
217 "ld [%1 + 0x0c], %%g2\n\t"
218 "addxcc %%g3, %%g4, %%g4\n\t"
219 "ld [%1 + 0x08], %%g3\n\t"
220 "addxcc %%g2, %%g4, %%g4\n\t"
221 "ld [%1 + 0x04], %%g2\n\t"
222 "addxcc %%g3, %%g4, %%g4\n\t"
223 "ld [%1 + 0x00], %%g3\n\t"
224 "addxcc %%g2, %%g4, %%g4\n\t"
225 "addxcc %%g3, %%g4, %0\n\t"
226 "addx 0, %0, %0\n"
227 : "=&r" (sum)
228 : "r" (saddr), "r" (daddr),
229 "r"(htonl(len)), "r"(htonl(proto)), "r"(sum)
230 : "g2", "g3", "g4", "cc");
231
232 return csum_fold(sum);
233}
234
235/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
236static inline __sum16 ip_compute_csum(const void *buff, int len)
237{
238 return csum_fold(csum_partial(buff, len, 0));
239}
240
241#endif /* !(__SPARC_CHECKSUM_H) */
diff --git a/include/asm-sparc/checksum_64.h b/include/asm-sparc/checksum_64.h
new file mode 100644
index 000000000000..019b9615e43c
--- /dev/null
+++ b/include/asm-sparc/checksum_64.h
@@ -0,0 +1,167 @@
1#ifndef __SPARC64_CHECKSUM_H
2#define __SPARC64_CHECKSUM_H
3
4/* checksum.h: IP/UDP/TCP checksum routines on the V9.
5 *
6 * Copyright(C) 1995 Linus Torvalds
7 * Copyright(C) 1995 Miguel de Icaza
8 * Copyright(C) 1996 David S. Miller
9 * Copyright(C) 1996 Eddie C. Dost
10 * Copyright(C) 1997 Jakub Jelinek
11 *
12 * derived from:
13 * Alpha checksum c-code
14 * ix86 inline assembly
15 * RFC1071 Computing the Internet Checksum
16 */
17
18#include <linux/in6.h>
19#include <asm/uaccess.h>
20
21/* computes the checksum of a memory block at buff, length len,
22 * and adds in "sum" (32-bit)
23 *
24 * returns a 32-bit number suitable for feeding into itself
25 * or csum_tcpudp_magic
26 *
27 * this function must be called with even lengths, except
28 * for the last fragment, which may be odd
29 *
30 * it's best to have buff aligned on a 32-bit boundary
31 */
32extern __wsum csum_partial(const void * buff, int len, __wsum sum);
33
34/* the same as csum_partial, but copies from user space while it
35 * checksums
36 *
37 * here even more important to align src and dst on a 32-bit (or even
38 * better 64-bit) boundary
39 */
40extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
41 int len, __wsum sum);
42
43extern long __csum_partial_copy_from_user(const void __user *src,
44 void *dst, int len,
45 __wsum sum);
46
47static inline __wsum
48csum_partial_copy_from_user(const void __user *src,
49 void *dst, int len,
50 __wsum sum, int *err)
51{
52 long ret = __csum_partial_copy_from_user(src, dst, len, sum);
53 if (ret < 0)
54 *err = -EFAULT;
55 return (__force __wsum) ret;
56}
57
58/*
59 * Copy and checksum to user
60 */
61#define HAVE_CSUM_COPY_USER
62extern long __csum_partial_copy_to_user(const void *src,
63 void __user *dst, int len,
64 __wsum sum);
65
66static inline __wsum
67csum_and_copy_to_user(const void *src,
68 void __user *dst, int len,
69 __wsum sum, int *err)
70{
71 long ret = __csum_partial_copy_to_user(src, dst, len, sum);
72 if (ret < 0)
73 *err = -EFAULT;
74 return (__force __wsum) ret;
75}
76
77/* ihl is always 5 or greater, almost always is 5, and iph is word aligned
78 * the majority of the time.
79 */
80extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
81
82/* Fold a partial checksum without adding pseudo headers. */
83static inline __sum16 csum_fold(__wsum sum)
84{
85 unsigned int tmp;
86
87 __asm__ __volatile__(
88" addcc %0, %1, %1\n"
89" srl %1, 16, %1\n"
90" addc %1, %%g0, %1\n"
91" xnor %%g0, %1, %0\n"
92 : "=&r" (sum), "=r" (tmp)
93 : "0" (sum), "1" ((__force u32)sum<<16)
94 : "cc");
95 return (__force __sum16)sum;
96}
97
98static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
99 unsigned int len,
100 unsigned short proto,
101 __wsum sum)
102{
103 __asm__ __volatile__(
104" addcc %1, %0, %0\n"
105" addccc %2, %0, %0\n"
106" addccc %3, %0, %0\n"
107" addc %0, %%g0, %0\n"
108 : "=r" (sum), "=r" (saddr)
109 : "r" (daddr), "r" (proto + len), "0" (sum), "1" (saddr)
110 : "cc");
111 return sum;
112}
113
114/*
115 * computes the checksum of the TCP/UDP pseudo-header
116 * returns a 16-bit checksum, already complemented
117 */
118static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
119 unsigned short len,
120 unsigned short proto,
121 __wsum sum)
122{
123 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
124}
125
126#define _HAVE_ARCH_IPV6_CSUM
127
128static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
129 const struct in6_addr *daddr,
130 __u32 len, unsigned short proto,
131 __wsum sum)
132{
133 __asm__ __volatile__ (
134" addcc %3, %4, %%g7\n"
135" addccc %5, %%g7, %%g7\n"
136" lduw [%2 + 0x0c], %%g2\n"
137" lduw [%2 + 0x08], %%g3\n"
138" addccc %%g2, %%g7, %%g7\n"
139" lduw [%2 + 0x04], %%g2\n"
140" addccc %%g3, %%g7, %%g7\n"
141" lduw [%2 + 0x00], %%g3\n"
142" addccc %%g2, %%g7, %%g7\n"
143" lduw [%1 + 0x0c], %%g2\n"
144" addccc %%g3, %%g7, %%g7\n"
145" lduw [%1 + 0x08], %%g3\n"
146" addccc %%g2, %%g7, %%g7\n"
147" lduw [%1 + 0x04], %%g2\n"
148" addccc %%g3, %%g7, %%g7\n"
149" lduw [%1 + 0x00], %%g3\n"
150" addccc %%g2, %%g7, %%g7\n"
151" addccc %%g3, %%g7, %0\n"
152" addc 0, %0, %0\n"
153 : "=&r" (sum)
154 : "r" (saddr), "r" (daddr), "r"(htonl(len)),
155 "r"(htonl(proto)), "r"(sum)
156 : "g2", "g3", "g7", "cc");
157
158 return csum_fold(sum);
159}
160
161/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
162static inline __sum16 ip_compute_csum(const void *buff, int len)
163{
164 return csum_fold(csum_partial(buff, len, 0));
165}
166
167#endif /* !(__SPARC64_CHECKSUM_H) */
diff --git a/include/asm-sparc/chmctrl.h b/include/asm-sparc/chmctrl.h
new file mode 100644
index 000000000000..859b4a4b0d30
--- /dev/null
+++ b/include/asm-sparc/chmctrl.h
@@ -0,0 +1,183 @@
1#ifndef _SPARC64_CHMCTRL_H
2#define _SPARC64_CHMCTRL_H
3
4/* Cheetah memory controller programmable registers. */
5#define CHMCTRL_TCTRL1 0x00 /* Memory Timing Control I */
6#define CHMCTRL_TCTRL2 0x08 /* Memory Timing Control II */
7#define CHMCTRL_TCTRL3 0x38 /* Memory Timing Control III */
8#define CHMCTRL_TCTRL4 0x40 /* Memory Timing Control IV */
9#define CHMCTRL_DECODE1 0x10 /* Memory Address Decode I */
10#define CHMCTRL_DECODE2 0x18 /* Memory Address Decode II */
11#define CHMCTRL_DECODE3 0x20 /* Memory Address Decode III */
12#define CHMCTRL_DECODE4 0x28 /* Memory Address Decode IV */
13#define CHMCTRL_MACTRL 0x30 /* Memory Address Control */
14
15/* Memory Timing Control I */
16#define TCTRL1_SDRAMCTL_DLY 0xf000000000000000UL
17#define TCTRL1_SDRAMCTL_DLY_SHIFT 60
18#define TCTRL1_SDRAMCLK_DLY 0x0e00000000000000UL
19#define TCTRL1_SDRAMCLK_DLY_SHIFT 57
20#define TCTRL1_R 0x0100000000000000UL
21#define TCTRL1_R_SHIFT 56
22#define TCTRL1_AUTORFR_CYCLE 0x00fe000000000000UL
23#define TCTRL1_AUTORFR_CYCLE_SHIFT 49
24#define TCTRL1_RD_WAIT 0x0001f00000000000UL
25#define TCTRL1_RD_WAIT_SHIFT 44
26#define TCTRL1_PC_CYCLE 0x00000fc000000000UL
27#define TCTRL1_PC_CYCLE_SHIFT 38
28#define TCTRL1_WR_MORE_RAS_PW 0x0000003f00000000UL
29#define TCTRL1_WR_MORE_RAS_PW_SHIFT 32
30#define TCTRL1_RD_MORE_RAW_PW 0x00000000fc000000UL
31#define TCTRL1_RD_MORE_RAS_PW_SHIFT 26
32#define TCTRL1_ACT_WR_DLY 0x0000000003f00000UL
33#define TCTRL1_ACT_WR_DLY_SHIFT 20
34#define TCTRL1_ACT_RD_DLY 0x00000000000fc000UL
35#define TCTRL1_ACT_RD_DLY_SHIFT 14
36#define TCTRL1_BANK_PRESENT 0x0000000000003000UL
37#define TCTRL1_BANK_PRESENT_SHIFT 12
38#define TCTRL1_RFR_INT 0x0000000000000ff8UL
39#define TCTRL1_RFR_INT_SHIFT 3
40#define TCTRL1_SET_MODE_REG 0x0000000000000004UL
41#define TCTRL1_SET_MODE_REG_SHIFT 2
42#define TCTRL1_RFR_ENABLE 0x0000000000000002UL
43#define TCTRL1_RFR_ENABLE_SHIFT 1
44#define TCTRL1_PRECHG_ALL 0x0000000000000001UL
45#define TCTRL1_PRECHG_ALL_SHIFT 0
46
47/* Memory Timing Control II */
48#define TCTRL2_WR_MSEL_DLY 0xfc00000000000000UL
49#define TCTRL2_WR_MSEL_DLY_SHIFT 58
50#define TCTRL2_RD_MSEL_DLY 0x03f0000000000000UL
51#define TCTRL2_RD_MSEL_DLY_SHIFT 52
52#define TCTRL2_WRDATA_THLD 0x000c000000000000UL
53#define TCTRL2_WRDATA_THLD_SHIFT 50
54#define TCTRL2_RDWR_RD_TI_DLY 0x0003f00000000000UL
55#define TCTRL2_RDWR_RD_TI_DLY_SHIFT 44
56#define TCTRL2_AUTOPRECHG_ENBL 0x0000080000000000UL
57#define TCTRL2_AUTOPRECHG_ENBL_SHIFT 43
58#define TCTRL2_RDWR_PI_MORE_DLY 0x000007c000000000UL
59#define TCTRL2_RDWR_PI_MORE_DLY_SHIFT 38
60#define TCTRL2_RDWR_1_DLY 0x0000003f00000000UL
61#define TCTRL2_RDWR_1_DLY_SHIFT 32
62#define TCTRL2_WRWR_PI_MORE_DLY 0x00000000f8000000UL
63#define TCTRL2_WRWR_PI_MORE_DLY_SHIFT 27
64#define TCTRL2_WRWR_1_DLY 0x0000000007e00000UL
65#define TCTRL2_WRWR_1_DLY_SHIFT 21
66#define TCTRL2_RDWR_RD_PI_MORE_DLY 0x00000000001f0000UL
67#define TCTRL2_RDWR_RD_PI_MORE_DLY_SHIFT 16
68#define TCTRL2_R 0x0000000000008000UL
69#define TCTRL2_R_SHIFT 15
70#define TCTRL2_SDRAM_MODE_REG_DATA 0x0000000000007fffUL
71#define TCTRL2_SDRAM_MODE_REG_DATA_SHIFT 0
72
73/* Memory Timing Control III */
74#define TCTRL3_SDRAM_CTL_DLY 0xf000000000000000UL
75#define TCTRL3_SDRAM_CTL_DLY_SHIFT 60
76#define TCTRL3_SDRAM_CLK_DLY 0x0e00000000000000UL
77#define TCTRL3_SDRAM_CLK_DLY_SHIFT 57
78#define TCTRL3_R 0x0100000000000000UL
79#define TCTRL3_R_SHIFT 56
80#define TCTRL3_AUTO_RFR_CYCLE 0x00fe000000000000UL
81#define TCTRL3_AUTO_RFR_CYCLE_SHIFT 49
82#define TCTRL3_RD_WAIT 0x0001f00000000000UL
83#define TCTRL3_RD_WAIT_SHIFT 44
84#define TCTRL3_PC_CYCLE 0x00000fc000000000UL
85#define TCTRL3_PC_CYCLE_SHIFT 38
86#define TCTRL3_WR_MORE_RAW_PW 0x0000003f00000000UL
87#define TCTRL3_WR_MORE_RAW_PW_SHIFT 32
88#define TCTRL3_RD_MORE_RAW_PW 0x00000000fc000000UL
89#define TCTRL3_RD_MORE_RAW_PW_SHIFT 26
90#define TCTRL3_ACT_WR_DLY 0x0000000003f00000UL
91#define TCTRL3_ACT_WR_DLY_SHIFT 20
92#define TCTRL3_ACT_RD_DLY 0x00000000000fc000UL
93#define TCTRL3_ACT_RD_DLY_SHIFT 14
94#define TCTRL3_BANK_PRESENT 0x0000000000003000UL
95#define TCTRL3_BANK_PRESENT_SHIFT 12
96#define TCTRL3_RFR_INT 0x0000000000000ff8UL
97#define TCTRL3_RFR_INT_SHIFT 3
98#define TCTRL3_SET_MODE_REG 0x0000000000000004UL
99#define TCTRL3_SET_MODE_REG_SHIFT 2
100#define TCTRL3_RFR_ENABLE 0x0000000000000002UL
101#define TCTRL3_RFR_ENABLE_SHIFT 1
102#define TCTRL3_PRECHG_ALL 0x0000000000000001UL
103#define TCTRL3_PRECHG_ALL_SHIFT 0
104
105/* Memory Timing Control IV */
106#define TCTRL4_WR_MSEL_DLY 0xfc00000000000000UL
107#define TCTRL4_WR_MSEL_DLY_SHIFT 58
108#define TCTRL4_RD_MSEL_DLY 0x03f0000000000000UL
109#define TCTRL4_RD_MSEL_DLY_SHIFT 52
110#define TCTRL4_WRDATA_THLD 0x000c000000000000UL
111#define TCTRL4_WRDATA_THLD_SHIFT 50
112#define TCTRL4_RDWR_RD_RI_DLY 0x0003f00000000000UL
113#define TCTRL4_RDWR_RD_RI_DLY_SHIFT 44
114#define TCTRL4_AUTO_PRECHG_ENBL 0x0000080000000000UL
115#define TCTRL4_AUTO_PRECHG_ENBL_SHIFT 43
116#define TCTRL4_RD_WR_PI_MORE_DLY 0x000007c000000000UL
117#define TCTRL4_RD_WR_PI_MORE_DLY_SHIFT 38
118#define TCTRL4_RD_WR_TI_DLY 0x0000003f00000000UL
119#define TCTRL4_RD_WR_TI_DLY_SHIFT 32
120#define TCTRL4_WR_WR_PI_MORE_DLY 0x00000000f8000000UL
121#define TCTRL4_WR_WR_PI_MORE_DLY_SHIFT 27
122#define TCTRL4_WR_WR_TI_DLY 0x0000000007e00000UL
123#define TCTRL4_WR_WR_TI_DLY_SHIFT 21
124#define TCTRL4_RDWR_RD_PI_MORE_DLY 0x00000000001f000UL0
125#define TCTRL4_RDWR_RD_PI_MORE_DLY_SHIFT 16
126#define TCTRL4_R 0x0000000000008000UL
127#define TCTRL4_R_SHIFT 15
128#define TCTRL4_SDRAM_MODE_REG_DATA 0x0000000000007fffUL
129#define TCTRL4_SDRAM_MODE_REG_DATA_SHIFT 0
130
131/* All 4 memory address decoding registers have the
132 * same layout.
133 */
134#define MEM_DECODE_VALID 0x8000000000000000UL /* Valid */
135#define MEM_DECODE_VALID_SHIFT 63
136#define MEM_DECODE_UK 0x001ffe0000000000UL /* Upper mask */
137#define MEM_DECODE_UK_SHIFT 41
138#define MEM_DECODE_UM 0x0000001ffff00000UL /* Upper match */
139#define MEM_DECODE_UM_SHIFT 20
140#define MEM_DECODE_LK 0x000000000003c000UL /* Lower mask */
141#define MEM_DECODE_LK_SHIFT 14
142#define MEM_DECODE_LM 0x0000000000000f00UL /* Lower match */
143#define MEM_DECODE_LM_SHIFT 8
144
145#define PA_UPPER_BITS 0x000007fffc000000UL
146#define PA_UPPER_BITS_SHIFT 26
147#define PA_LOWER_BITS 0x00000000000003c0UL
148#define PA_LOWER_BITS_SHIFT 6
149
150#define MACTRL_R0 0x8000000000000000UL
151#define MACTRL_R0_SHIFT 63
152#define MACTRL_ADDR_LE_PW 0x7000000000000000UL
153#define MACTRL_ADDR_LE_PW_SHIFT 60
154#define MACTRL_CMD_PW 0x0f00000000000000UL
155#define MACTRL_CMD_PW_SHIFT 56
156#define MACTRL_HALF_MODE_WR_MSEL_DLY 0x00fc000000000000UL
157#define MACTRL_HALF_MODE_WR_MSEL_DLY_SHIFT 50
158#define MACTRL_HALF_MODE_RD_MSEL_DLY 0x0003f00000000000UL
159#define MACTRL_HALF_MODE_RD_MSEL_DLY_SHIFT 44
160#define MACTRL_HALF_MODE_SDRAM_CTL_DLY 0x00000f0000000000UL
161#define MACTRL_HALF_MODE_SDRAM_CTL_DLY_SHIFT 40
162#define MACTRL_HALF_MODE_SDRAM_CLK_DLY 0x000000e000000000UL
163#define MACTRL_HALF_MODE_SDRAM_CLK_DLY_SHIFT 37
164#define MACTRL_R1 0x0000001000000000UL
165#define MACTRL_R1_SHIFT 36
166#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B3 0x0000000f00000000UL
167#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B3_SHIFT 32
168#define MACTRL_ENC_INTLV_B3 0x00000000f8000000UL
169#define MACTRL_ENC_INTLV_B3_SHIFT 27
170#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B2 0x0000000007800000UL
171#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B2_SHIFT 23
172#define MACTRL_ENC_INTLV_B2 0x00000000007c0000UL
173#define MACTRL_ENC_INTLV_B2_SHIFT 18
174#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B1 0x000000000003c000UL
175#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B1_SHIFT 14
176#define MACTRL_ENC_INTLV_B1 0x0000000000003e00UL
177#define MACTRL_ENC_INTLV_B1_SHIFT 9
178#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B0 0x00000000000001e0UL
179#define MACTRL_BANKSEL_N_ROWADDR_SIZE_B0_SHIFT 5
180#define MACTRL_ENC_INTLV_B0 0x000000000000001fUL
181#define MACTRL_ENC_INTLV_B0_SHIFT 0
182
183#endif /* _SPARC64_CHMCTRL_H */
diff --git a/include/asm-sparc/cmt.h b/include/asm-sparc/cmt.h
new file mode 100644
index 000000000000..870db5928577
--- /dev/null
+++ b/include/asm-sparc/cmt.h
@@ -0,0 +1,59 @@
1#ifndef _SPARC64_CMT_H
2#define _SPARC64_CMT_H
3
4/* cmt.h: Chip Multi-Threading register definitions
5 *
6 * Copyright (C) 2004 David S. Miller (davem@redhat.com)
7 */
8
9/* ASI_CORE_ID - private */
10#define LP_ID 0x0000000000000010UL
11#define LP_ID_MAX 0x00000000003f0000UL
12#define LP_ID_ID 0x000000000000003fUL
13
14/* ASI_INTR_ID - private */
15#define LP_INTR_ID 0x0000000000000000UL
16#define LP_INTR_ID_ID 0x00000000000003ffUL
17
18/* ASI_CESR_ID - private */
19#define CESR_ID 0x0000000000000040UL
20#define CESR_ID_ID 0x00000000000000ffUL
21
22/* ASI_CORE_AVAILABLE - shared */
23#define LP_AVAIL 0x0000000000000000UL
24#define LP_AVAIL_1 0x0000000000000002UL
25#define LP_AVAIL_0 0x0000000000000001UL
26
27/* ASI_CORE_ENABLE_STATUS - shared */
28#define LP_ENAB_STAT 0x0000000000000010UL
29#define LP_ENAB_STAT_1 0x0000000000000002UL
30#define LP_ENAB_STAT_0 0x0000000000000001UL
31
32/* ASI_CORE_ENABLE - shared */
33#define LP_ENAB 0x0000000000000020UL
34#define LP_ENAB_1 0x0000000000000002UL
35#define LP_ENAB_0 0x0000000000000001UL
36
37/* ASI_CORE_RUNNING - shared */
38#define LP_RUNNING_RW 0x0000000000000050UL
39#define LP_RUNNING_W1S 0x0000000000000060UL
40#define LP_RUNNING_W1C 0x0000000000000068UL
41#define LP_RUNNING_1 0x0000000000000002UL
42#define LP_RUNNING_0 0x0000000000000001UL
43
44/* ASI_CORE_RUNNING_STAT - shared */
45#define LP_RUN_STAT 0x0000000000000058UL
46#define LP_RUN_STAT_1 0x0000000000000002UL
47#define LP_RUN_STAT_0 0x0000000000000001UL
48
49/* ASI_XIR_STEERING - shared */
50#define LP_XIR_STEER 0x0000000000000030UL
51#define LP_XIR_STEER_1 0x0000000000000002UL
52#define LP_XIR_STEER_0 0x0000000000000001UL
53
54/* ASI_CMT_ERROR_STEERING - shared */
55#define CMT_ER_STEER 0x0000000000000040UL
56#define CMT_ER_STEER_1 0x0000000000000002UL
57#define CMT_ER_STEER_0 0x0000000000000001UL
58
59#endif /* _SPARC64_CMT_H */
diff --git a/include/asm-sparc/compat.h b/include/asm-sparc/compat.h
new file mode 100644
index 000000000000..f260b58f5ce9
--- /dev/null
+++ b/include/asm-sparc/compat.h
@@ -0,0 +1,243 @@
1#ifndef _ASM_SPARC64_COMPAT_H
2#define _ASM_SPARC64_COMPAT_H
3/*
4 * Architecture specific compatibility types
5 */
6#include <linux/types.h>
7
8#define COMPAT_USER_HZ 100
9
10typedef u32 compat_size_t;
11typedef s32 compat_ssize_t;
12typedef s32 compat_time_t;
13typedef s32 compat_clock_t;
14typedef s32 compat_pid_t;
15typedef u16 __compat_uid_t;
16typedef u16 __compat_gid_t;
17typedef u32 __compat_uid32_t;
18typedef u32 __compat_gid32_t;
19typedef u16 compat_mode_t;
20typedef u32 compat_ino_t;
21typedef u16 compat_dev_t;
22typedef s32 compat_off_t;
23typedef s64 compat_loff_t;
24typedef s16 compat_nlink_t;
25typedef u16 compat_ipc_pid_t;
26typedef s32 compat_daddr_t;
27typedef u32 compat_caddr_t;
28typedef __kernel_fsid_t compat_fsid_t;
29typedef s32 compat_key_t;
30typedef s32 compat_timer_t;
31
32typedef s32 compat_int_t;
33typedef s32 compat_long_t;
34typedef s64 compat_s64;
35typedef u32 compat_uint_t;
36typedef u32 compat_ulong_t;
37typedef u64 compat_u64;
38
39struct compat_timespec {
40 compat_time_t tv_sec;
41 s32 tv_nsec;
42};
43
44struct compat_timeval {
45 compat_time_t tv_sec;
46 s32 tv_usec;
47};
48
49struct compat_stat {
50 compat_dev_t st_dev;
51 compat_ino_t st_ino;
52 compat_mode_t st_mode;
53 compat_nlink_t st_nlink;
54 __compat_uid_t st_uid;
55 __compat_gid_t st_gid;
56 compat_dev_t st_rdev;
57 compat_off_t st_size;
58 compat_time_t st_atime;
59 compat_ulong_t st_atime_nsec;
60 compat_time_t st_mtime;
61 compat_ulong_t st_mtime_nsec;
62 compat_time_t st_ctime;
63 compat_ulong_t st_ctime_nsec;
64 compat_off_t st_blksize;
65 compat_off_t st_blocks;
66 u32 __unused4[2];
67};
68
69struct compat_stat64 {
70 unsigned long long st_dev;
71
72 unsigned long long st_ino;
73
74 unsigned int st_mode;
75 unsigned int st_nlink;
76
77 unsigned int st_uid;
78 unsigned int st_gid;
79
80 unsigned long long st_rdev;
81
82 unsigned char __pad3[8];
83
84 long long st_size;
85 unsigned int st_blksize;
86
87 unsigned char __pad4[8];
88 unsigned int st_blocks;
89
90 unsigned int st_atime;
91 unsigned int st_atime_nsec;
92
93 unsigned int st_mtime;
94 unsigned int st_mtime_nsec;
95
96 unsigned int st_ctime;
97 unsigned int st_ctime_nsec;
98
99 unsigned int __unused4;
100 unsigned int __unused5;
101};
102
103struct compat_flock {
104 short l_type;
105 short l_whence;
106 compat_off_t l_start;
107 compat_off_t l_len;
108 compat_pid_t l_pid;
109 short __unused;
110};
111
112#define F_GETLK64 12
113#define F_SETLK64 13
114#define F_SETLKW64 14
115
116struct compat_flock64 {
117 short l_type;
118 short l_whence;
119 compat_loff_t l_start;
120 compat_loff_t l_len;
121 compat_pid_t l_pid;
122 short __unused;
123};
124
125struct compat_statfs {
126 int f_type;
127 int f_bsize;
128 int f_blocks;
129 int f_bfree;
130 int f_bavail;
131 int f_files;
132 int f_ffree;
133 compat_fsid_t f_fsid;
134 int f_namelen; /* SunOS ignores this field. */
135 int f_frsize;
136 int f_spare[5];
137};
138
139#define COMPAT_RLIM_INFINITY 0x7fffffff
140
141typedef u32 compat_old_sigset_t;
142
143#define _COMPAT_NSIG 64
144#define _COMPAT_NSIG_BPW 32
145
146typedef u32 compat_sigset_word;
147
148#define COMPAT_OFF_T_MAX 0x7fffffff
149#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
150
151/*
152 * A pointer passed in from user mode. This should not
153 * be used for syscall parameters, just declare them
154 * as pointers because the syscall entry code will have
155 * appropriately converted them already.
156 */
157typedef u32 compat_uptr_t;
158
159static inline void __user *compat_ptr(compat_uptr_t uptr)
160{
161 return (void __user *)(unsigned long)uptr;
162}
163
164static inline compat_uptr_t ptr_to_compat(void __user *uptr)
165{
166 return (u32)(unsigned long)uptr;
167}
168
169static inline void __user *compat_alloc_user_space(long len)
170{
171 struct pt_regs *regs = current_thread_info()->kregs;
172 unsigned long usp = regs->u_regs[UREG_I6];
173
174 if (!(test_thread_flag(TIF_32BIT)))
175 usp += STACK_BIAS;
176 else
177 usp &= 0xffffffffUL;
178
179 usp -= len;
180 usp &= ~0x7UL;
181
182 return (void __user *) usp;
183}
184
185struct compat_ipc64_perm {
186 compat_key_t key;
187 __compat_uid32_t uid;
188 __compat_gid32_t gid;
189 __compat_uid32_t cuid;
190 __compat_gid32_t cgid;
191 unsigned short __pad1;
192 compat_mode_t mode;
193 unsigned short __pad2;
194 unsigned short seq;
195 unsigned long __unused1; /* yes they really are 64bit pads */
196 unsigned long __unused2;
197};
198
199struct compat_semid64_ds {
200 struct compat_ipc64_perm sem_perm;
201 unsigned int __pad1;
202 compat_time_t sem_otime;
203 unsigned int __pad2;
204 compat_time_t sem_ctime;
205 u32 sem_nsems;
206 u32 __unused1;
207 u32 __unused2;
208};
209
210struct compat_msqid64_ds {
211 struct compat_ipc64_perm msg_perm;
212 unsigned int __pad1;
213 compat_time_t msg_stime;
214 unsigned int __pad2;
215 compat_time_t msg_rtime;
216 unsigned int __pad3;
217 compat_time_t msg_ctime;
218 unsigned int msg_cbytes;
219 unsigned int msg_qnum;
220 unsigned int msg_qbytes;
221 compat_pid_t msg_lspid;
222 compat_pid_t msg_lrpid;
223 unsigned int __unused1;
224 unsigned int __unused2;
225};
226
227struct compat_shmid64_ds {
228 struct compat_ipc64_perm shm_perm;
229 unsigned int __pad1;
230 compat_time_t shm_atime;
231 unsigned int __pad2;
232 compat_time_t shm_dtime;
233 unsigned int __pad3;
234 compat_time_t shm_ctime;
235 compat_size_t shm_segsz;
236 compat_pid_t shm_cpid;
237 compat_pid_t shm_lpid;
238 unsigned int shm_nattch;
239 unsigned int __unused1;
240 unsigned int __unused2;
241};
242
243#endif /* _ASM_SPARC64_COMPAT_H */
diff --git a/include/asm-sparc/compat_signal.h b/include/asm-sparc/compat_signal.h
new file mode 100644
index 000000000000..b759eab9b51c
--- /dev/null
+++ b/include/asm-sparc/compat_signal.h
@@ -0,0 +1,29 @@
1#ifndef _COMPAT_SIGNAL_H
2#define _COMPAT_SIGNAL_H
3
4#include <linux/compat.h>
5#include <asm/signal.h>
6
7#ifdef CONFIG_COMPAT
8struct __new_sigaction32 {
9 unsigned sa_handler;
10 unsigned int sa_flags;
11 unsigned sa_restorer; /* not used by Linux/SPARC yet */
12 compat_sigset_t sa_mask;
13};
14
15struct __old_sigaction32 {
16 unsigned sa_handler;
17 compat_old_sigset_t sa_mask;
18 unsigned int sa_flags;
19 unsigned sa_restorer; /* not used by Linux/SPARC yet */
20};
21
22typedef struct sigaltstack32 {
23 u32 ss_sp;
24 int ss_flags;
25 compat_size_t ss_size;
26} stack_t32;
27#endif
28
29#endif /* !(_COMPAT_SIGNAL_H) */
diff --git a/include/asm-sparc/cpudata.h b/include/asm-sparc/cpudata.h
index a2c4d51d36c4..b76fac0c8d8f 100644
--- a/include/asm-sparc/cpudata.h
+++ b/include/asm-sparc/cpudata.h
@@ -1,27 +1,8 @@
1/* cpudata.h: Per-cpu parameters. 1#ifndef ___ASM_SPARC_CPUDATA_H
2 * 2#define ___ASM_SPARC_CPUDATA_H
3 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org) 3#if defined(__sparc__) && defined(__arch64__)
4 * 4#include <asm-sparc/cpudata_64.h>
5 * Based on include/asm-sparc64/cpudata.h and Linux 2.4 smp.h 5#else
6 * both (C) David S. Miller. 6#include <asm-sparc/cpudata_32.h>
7 */ 7#endif
8 8#endif
9#ifndef _SPARC_CPUDATA_H
10#define _SPARC_CPUDATA_H
11
12#include <linux/percpu.h>
13
14typedef struct {
15 unsigned long udelay_val;
16 unsigned long clock_tick;
17 unsigned int multiplier;
18 unsigned int counter;
19 int prom_node;
20 int mid;
21 int next;
22} cpuinfo_sparc;
23
24DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
25#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
26
27#endif /* _SPARC_CPUDATA_H */
diff --git a/include/asm-sparc/cpudata_32.h b/include/asm-sparc/cpudata_32.h
new file mode 100644
index 000000000000..a2c4d51d36c4
--- /dev/null
+++ b/include/asm-sparc/cpudata_32.h
@@ -0,0 +1,27 @@
1/* cpudata.h: Per-cpu parameters.
2 *
3 * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
4 *
5 * Based on include/asm-sparc64/cpudata.h and Linux 2.4 smp.h
6 * both (C) David S. Miller.
7 */
8
9#ifndef _SPARC_CPUDATA_H
10#define _SPARC_CPUDATA_H
11
12#include <linux/percpu.h>
13
14typedef struct {
15 unsigned long udelay_val;
16 unsigned long clock_tick;
17 unsigned int multiplier;
18 unsigned int counter;
19 int prom_node;
20 int mid;
21 int next;
22} cpuinfo_sparc;
23
24DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
25#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
26
27#endif /* _SPARC_CPUDATA_H */
diff --git a/include/asm-sparc/cpudata_64.h b/include/asm-sparc/cpudata_64.h
new file mode 100644
index 000000000000..532975ecfe10
--- /dev/null
+++ b/include/asm-sparc/cpudata_64.h
@@ -0,0 +1,240 @@
1/* cpudata.h: Per-cpu parameters.
2 *
3 * Copyright (C) 2003, 2005, 2006 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_CPUDATA_H
7#define _SPARC64_CPUDATA_H
8
9#include <asm/hypervisor.h>
10#include <asm/asi.h>
11
12#ifndef __ASSEMBLY__
13
14#include <linux/percpu.h>
15#include <linux/threads.h>
16
17typedef struct {
18 /* Dcache line 1 */
19 unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
20 unsigned int __pad0;
21 unsigned long clock_tick; /* %tick's per second */
22 unsigned long __pad;
23 unsigned int __pad1;
24 unsigned int __pad2;
25
26 /* Dcache line 2, rarely used */
27 unsigned int dcache_size;
28 unsigned int dcache_line_size;
29 unsigned int icache_size;
30 unsigned int icache_line_size;
31 unsigned int ecache_size;
32 unsigned int ecache_line_size;
33 int core_id;
34 int proc_id;
35} cpuinfo_sparc;
36
37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
38#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
39#define local_cpu_data() __get_cpu_var(__cpu_data)
40
41/* Trap handling code needs to get at a few critical values upon
42 * trap entry and to process TSB misses. These cannot be in the
43 * per_cpu() area as we really need to lock them into the TLB and
44 * thus make them part of the main kernel image. As a result we
45 * try to make this as small as possible.
46 *
47 * This is padded out and aligned to 64-bytes to avoid false sharing
48 * on SMP.
49 */
50
51/* If you modify the size of this structure, please update
52 * TRAP_BLOCK_SZ_SHIFT below.
53 */
54struct thread_info;
55struct trap_per_cpu {
56/* D-cache line 1: Basic thread information, cpu and device mondo queues */
57 struct thread_info *thread;
58 unsigned long pgd_paddr;
59 unsigned long cpu_mondo_pa;
60 unsigned long dev_mondo_pa;
61
62/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
63 unsigned long resum_mondo_pa;
64 unsigned long resum_kernel_buf_pa;
65 unsigned long nonresum_mondo_pa;
66 unsigned long nonresum_kernel_buf_pa;
67
68/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
69 struct hv_fault_status fault_info;
70
71/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
72 unsigned long cpu_mondo_block_pa;
73 unsigned long cpu_list_pa;
74 unsigned long tsb_huge;
75 unsigned long tsb_huge_temp;
76
77/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
78 unsigned long irq_worklist_pa;
79 unsigned int cpu_mondo_qmask;
80 unsigned int dev_mondo_qmask;
81 unsigned int resum_qmask;
82 unsigned int nonresum_qmask;
83 void *hdesc;
84} __attribute__((aligned(64)));
85extern struct trap_per_cpu trap_block[NR_CPUS];
86extern void init_cur_cpu_trap(struct thread_info *);
87extern void setup_tba(void);
88extern int ncpus_probed;
89extern void __init cpu_probe(void);
90extern const struct seq_operations cpuinfo_op;
91
92extern unsigned long real_hard_smp_processor_id(void);
93
94struct cpuid_patch_entry {
95 unsigned int addr;
96 unsigned int cheetah_safari[4];
97 unsigned int cheetah_jbus[4];
98 unsigned int starfire[4];
99 unsigned int sun4v[4];
100};
101extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
102
103struct sun4v_1insn_patch_entry {
104 unsigned int addr;
105 unsigned int insn;
106};
107extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
108 __sun4v_1insn_patch_end;
109
110struct sun4v_2insn_patch_entry {
111 unsigned int addr;
112 unsigned int insns[2];
113};
114extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
115 __sun4v_2insn_patch_end;
116
117#endif /* !(__ASSEMBLY__) */
118
119#define TRAP_PER_CPU_THREAD 0x00
120#define TRAP_PER_CPU_PGD_PADDR 0x08
121#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
122#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
123#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
124#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
125#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
126#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
127#define TRAP_PER_CPU_FAULT_INFO 0x40
128#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
129#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
130#define TRAP_PER_CPU_TSB_HUGE 0xd0
131#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
132#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
133#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
134#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
135#define TRAP_PER_CPU_RESUM_QMASK 0xf0
136#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
137
138#define TRAP_BLOCK_SZ_SHIFT 8
139
140#include <asm/scratchpad.h>
141
142#define __GET_CPUID(REG) \
143 /* Spitfire implementation (default). */ \
144661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
145 srlx REG, 17, REG; \
146 and REG, 0x1f, REG; \
147 nop; \
148 .section .cpuid_patch, "ax"; \
149 /* Instruction location. */ \
150 .word 661b; \
151 /* Cheetah Safari implementation. */ \
152 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
153 srlx REG, 17, REG; \
154 and REG, 0x3ff, REG; \
155 nop; \
156 /* Cheetah JBUS implementation. */ \
157 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
158 srlx REG, 17, REG; \
159 and REG, 0x1f, REG; \
160 nop; \
161 /* Starfire implementation. */ \
162 sethi %hi(0x1fff40000d0 >> 9), REG; \
163 sllx REG, 9, REG; \
164 or REG, 0xd0, REG; \
165 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
166 /* sun4v implementation. */ \
167 mov SCRATCHPAD_CPUID, REG; \
168 ldxa [REG] ASI_SCRATCHPAD, REG; \
169 nop; \
170 nop; \
171 .previous;
172
173#ifdef CONFIG_SMP
174
175#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
176 __GET_CPUID(TMP) \
177 sethi %hi(trap_block), DEST; \
178 sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
179 or DEST, %lo(trap_block), DEST; \
180 add DEST, TMP, DEST; \
181
182/* Clobbers TMP, current address space PGD phys address into DEST. */
183#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
184 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
185 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
186
187/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
188#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
189 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
190 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
191
192/* Clobbers TMP, loads DEST with current thread info pointer. */
193#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
194 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
195 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
196
197/* Given the current thread info pointer in THR, load the per-cpu
198 * area base of the current processor into DEST. REG1, REG2, and REG3 are
199 * clobbered.
200 *
201 * You absolutely cannot use DEST as a temporary in this code. The
202 * reason is that traps can happen during execution, and return from
203 * trap will load the fully resolved DEST per-cpu base. This can corrupt
204 * the calculations done by the macro mid-stream.
205 */
206#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
207 lduh [THR + TI_CPU], REG1; \
208 sethi %hi(__per_cpu_shift), REG3; \
209 sethi %hi(__per_cpu_base), REG2; \
210 ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
211 ldx [REG2 + %lo(__per_cpu_base)], REG2; \
212 sllx REG1, REG3, REG3; \
213 add REG3, REG2, DEST;
214
215#else
216
217#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
218 sethi %hi(trap_block), DEST; \
219 or DEST, %lo(trap_block), DEST; \
220
221/* Uniprocessor versions, we know the cpuid is zero. */
222#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
223 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
224 ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
225
226/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
227#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
228 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
229 add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
230
231#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
232 TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
233 ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
234
235/* No per-cpu areas on uniprocessor, so no need to load DEST. */
236#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
237
238#endif /* !(CONFIG_SMP) */
239
240#endif /* _SPARC64_CPUDATA_H */
diff --git a/include/asm-sparc/dcr.h b/include/asm-sparc/dcr.h
new file mode 100644
index 000000000000..620c9ba642e9
--- /dev/null
+++ b/include/asm-sparc/dcr.h
@@ -0,0 +1,14 @@
1#ifndef _SPARC64_DCR_H
2#define _SPARC64_DCR_H
3
4/* UltraSparc-III/III+ Dispatch Control Register, ASR 0x12 */
5#define DCR_DPE 0x0000000000001000 /* III+: D$ Parity Error Enable */
6#define DCR_OBS 0x0000000000000fc0 /* Observability Bus Controls */
7#define DCR_BPE 0x0000000000000020 /* Branch Predict Enable */
8#define DCR_RPE 0x0000000000000010 /* Return Address Prediction Enable */
9#define DCR_SI 0x0000000000000008 /* Single Instruction Disable */
10#define DCR_IPE 0x0000000000000004 /* III+: I$ Parity Error Enable */
11#define DCR_IFPOE 0x0000000000000002 /* IRQ FP Operation Enable */
12#define DCR_MS 0x0000000000000001 /* Multi-Scalar dispatch */
13
14#endif /* _SPARC64_DCR_H */
diff --git a/include/asm-sparc/dcu.h b/include/asm-sparc/dcu.h
new file mode 100644
index 000000000000..0f704e106a1b
--- /dev/null
+++ b/include/asm-sparc/dcu.h
@@ -0,0 +1,27 @@
1#ifndef _SPARC64_DCU_H
2#define _SPARC64_DCU_H
3
4#include <linux/const.h>
5
6/* UltraSparc-III Data Cache Unit Control Register */
7#define DCU_CP _AC(0x0002000000000000,UL) /* Phys Cache Enable w/o mmu */
8#define DCU_CV _AC(0x0001000000000000,UL) /* Virt Cache Enable w/o mmu */
9#define DCU_ME _AC(0x0000800000000000,UL) /* NC-store Merging Enable */
10#define DCU_RE _AC(0x0000400000000000,UL) /* RAW bypass Enable */
11#define DCU_PE _AC(0x0000200000000000,UL) /* PCache Enable */
12#define DCU_HPE _AC(0x0000100000000000,UL) /* HW prefetch Enable */
13#define DCU_SPE _AC(0x0000080000000000,UL) /* SW prefetch Enable */
14#define DCU_SL _AC(0x0000040000000000,UL) /* Secondary ld-steering Enab*/
15#define DCU_WE _AC(0x0000020000000000,UL) /* WCache enable */
16#define DCU_PM _AC(0x000001fe00000000,UL) /* PA Watchpoint Byte Mask */
17#define DCU_VM _AC(0x00000001fe000000,UL) /* VA Watchpoint Byte Mask */
18#define DCU_PR _AC(0x0000000001000000,UL) /* PA Watchpoint Read Enable */
19#define DCU_PW _AC(0x0000000000800000,UL) /* PA Watchpoint Write Enable*/
20#define DCU_VR _AC(0x0000000000400000,UL) /* VA Watchpoint Read Enable */
21#define DCU_VW _AC(0x0000000000200000,UL) /* VA Watchpoint Write Enable*/
22#define DCU_DM _AC(0x0000000000000008,UL) /* DMMU Enable */
23#define DCU_IM _AC(0x0000000000000004,UL) /* IMMU Enable */
24#define DCU_DC _AC(0x0000000000000002,UL) /* Data Cache Enable */
25#define DCU_IC _AC(0x0000000000000001,UL) /* Instruction Cache Enable */
26
27#endif /* _SPARC64_DCU_H */
diff --git a/include/asm-sparc/delay.h b/include/asm-sparc/delay.h
index bc9aba2bead6..6210a3ce9751 100644
--- a/include/asm-sparc/delay.h
+++ b/include/asm-sparc/delay.h
@@ -1,34 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_DELAY_H
2 * delay.h: Linux delay routines on the Sparc. 2#define ___ASM_SPARC_DELAY_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu). 4#include <asm-sparc/delay_64.h>
5 */ 5#else
6 6#include <asm-sparc/delay_32.h>
7#ifndef __SPARC_DELAY_H 7#endif
8#define __SPARC_DELAY_H 8#endif
9
10#include <asm/cpudata.h>
11
12static inline void __delay(unsigned long loops)
13{
14 __asm__ __volatile__("cmp %0, 0\n\t"
15 "1: bne 1b\n\t"
16 "subcc %0, 1, %0\n" :
17 "=&r" (loops) :
18 "0" (loops) :
19 "cc");
20}
21
22/* This is too messy with inline asm on the Sparc. */
23extern void __udelay(unsigned long usecs, unsigned long lpj);
24extern void __ndelay(unsigned long nsecs, unsigned long lpj);
25
26#ifdef CONFIG_SMP
27#define __udelay_val cpu_data(smp_processor_id()).udelay_val
28#else /* SMP */
29#define __udelay_val loops_per_jiffy
30#endif /* SMP */
31#define udelay(__usecs) __udelay(__usecs, __udelay_val)
32#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
33
34#endif /* defined(__SPARC_DELAY_H) */
diff --git a/include/asm-sparc/delay_32.h b/include/asm-sparc/delay_32.h
new file mode 100644
index 000000000000..bc9aba2bead6
--- /dev/null
+++ b/include/asm-sparc/delay_32.h
@@ -0,0 +1,34 @@
1/*
2 * delay.h: Linux delay routines on the Sparc.
3 *
4 * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu).
5 */
6
7#ifndef __SPARC_DELAY_H
8#define __SPARC_DELAY_H
9
10#include <asm/cpudata.h>
11
12static inline void __delay(unsigned long loops)
13{
14 __asm__ __volatile__("cmp %0, 0\n\t"
15 "1: bne 1b\n\t"
16 "subcc %0, 1, %0\n" :
17 "=&r" (loops) :
18 "0" (loops) :
19 "cc");
20}
21
22/* This is too messy with inline asm on the Sparc. */
23extern void __udelay(unsigned long usecs, unsigned long lpj);
24extern void __ndelay(unsigned long nsecs, unsigned long lpj);
25
26#ifdef CONFIG_SMP
27#define __udelay_val cpu_data(smp_processor_id()).udelay_val
28#else /* SMP */
29#define __udelay_val loops_per_jiffy
30#endif /* SMP */
31#define udelay(__usecs) __udelay(__usecs, __udelay_val)
32#define ndelay(__nsecs) __ndelay(__nsecs, __udelay_val)
33
34#endif /* defined(__SPARC_DELAY_H) */
diff --git a/include/asm-sparc/delay_64.h b/include/asm-sparc/delay_64.h
new file mode 100644
index 000000000000..a77aa622d762
--- /dev/null
+++ b/include/asm-sparc/delay_64.h
@@ -0,0 +1,17 @@
1/* delay.h: Linux delay routines on sparc64.
2 *
3 * Copyright (C) 1996, 2004, 2007 David S. Miller (davem@davemloft.net).
4 */
5
6#ifndef _SPARC64_DELAY_H
7#define _SPARC64_DELAY_H
8
9#ifndef __ASSEMBLY__
10
11extern void __delay(unsigned long loops);
12extern void udelay(unsigned long usecs);
13#define mdelay(n) udelay((n) * 1000)
14
15#endif /* !__ASSEMBLY__ */
16
17#endif /* _SPARC64_DELAY_H */
diff --git a/include/asm-sparc/display7seg.h b/include/asm-sparc/display7seg.h
new file mode 100644
index 000000000000..86d4a901df24
--- /dev/null
+++ b/include/asm-sparc/display7seg.h
@@ -0,0 +1,79 @@
1/*
2 *
3 * display7seg - Driver interface for the 7-segment display
4 * present on Sun Microsystems CP1400 and CP1500
5 *
6 * Copyright (c) 2000 Eric Brower <ebrower@usa.net>
7 *
8 */
9
10#ifndef __display7seg_h__
11#define __display7seg_h__
12
13#define D7S_IOC 'p'
14
15#define D7SIOCRD _IOR(D7S_IOC, 0x45, int) /* Read device state */
16#define D7SIOCWR _IOW(D7S_IOC, 0x46, int) /* Write device state */
17#define D7SIOCTM _IO (D7S_IOC, 0x47) /* Translate mode (FLIP)*/
18
19/*
20 * ioctl flag definitions
21 *
22 * POINT - Toggle decimal point (0=absent 1=present)
23 * ALARM - Toggle alarm LED (0=green 1=red)
24 * FLIP - Toggle inverted mode (0=normal 1=flipped)
25 * bits 0-4 - Character displayed (see definitions below)
26 *
27 * Display segments are defined as follows,
28 * subject to D7S_FLIP register state:
29 *
30 * a
31 * ---
32 * f| |b
33 * -g-
34 * e| |c
35 * ---
36 * d
37 */
38
39#define D7S_POINT (1 << 7) /* Decimal point*/
40#define D7S_ALARM (1 << 6) /* Alarm LED */
41#define D7S_FLIP (1 << 5) /* Flip display */
42
43#define D7S_0 0x00 /* Numerals 0-9 */
44#define D7S_1 0x01
45#define D7S_2 0x02
46#define D7S_3 0x03
47#define D7S_4 0x04
48#define D7S_5 0x05
49#define D7S_6 0x06
50#define D7S_7 0x07
51#define D7S_8 0x08
52#define D7S_9 0x09
53#define D7S_A 0x0A /* Letters A-F, H, L, P */
54#define D7S_B 0x0B
55#define D7S_C 0x0C
56#define D7S_D 0x0D
57#define D7S_E 0x0E
58#define D7S_F 0x0F
59#define D7S_H 0x10
60#define D7S_E2 0x11
61#define D7S_L 0x12
62#define D7S_P 0x13
63#define D7S_SEGA 0x14 /* Individual segments */
64#define D7S_SEGB 0x15
65#define D7S_SEGC 0x16
66#define D7S_SEGD 0x17
67#define D7S_SEGE 0x18
68#define D7S_SEGF 0x19
69#define D7S_SEGG 0x1A
70#define D7S_SEGABFG 0x1B /* Segment groupings */
71#define D7S_SEGCDEG 0x1C
72#define D7S_SEGBCEF 0x1D
73#define D7S_SEGADG 0x1E
74#define D7S_BLANK 0x1F /* Clear all segments */
75
76#define D7S_MIN_VAL 0x0
77#define D7S_MAX_VAL 0x1F
78
79#endif /* ifndef __display7seg_h__ */
diff --git a/include/asm-sparc/dma-mapping.h b/include/asm-sparc/dma-mapping.h
index f3a641e6b2c8..7483504259ce 100644
--- a/include/asm-sparc/dma-mapping.h
+++ b/include/asm-sparc/dma-mapping.h
@@ -1,11 +1,8 @@
1#ifndef _ASM_SPARC_DMA_MAPPING_H 1#ifndef ___ASM_SPARC_DMA_MAPPING_H
2#define _ASM_SPARC_DMA_MAPPING_H 2#define ___ASM_SPARC_DMA_MAPPING_H
3 3#if defined(__sparc__) && defined(__arch64__)
4 4#include <asm-sparc/dma-mapping_64.h>
5#ifdef CONFIG_PCI
6#include <asm-generic/dma-mapping.h>
7#else 5#else
8#include <asm-generic/dma-mapping-broken.h> 6#include <asm-sparc/dma-mapping_32.h>
9#endif /* PCI */ 7#endif
10 8#endif
11#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/include/asm-sparc/dma-mapping_32.h b/include/asm-sparc/dma-mapping_32.h
new file mode 100644
index 000000000000..f3a641e6b2c8
--- /dev/null
+++ b/include/asm-sparc/dma-mapping_32.h
@@ -0,0 +1,11 @@
1#ifndef _ASM_SPARC_DMA_MAPPING_H
2#define _ASM_SPARC_DMA_MAPPING_H
3
4
5#ifdef CONFIG_PCI
6#include <asm-generic/dma-mapping.h>
7#else
8#include <asm-generic/dma-mapping-broken.h>
9#endif /* PCI */
10
11#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/include/asm-sparc/dma-mapping_64.h b/include/asm-sparc/dma-mapping_64.h
new file mode 100644
index 000000000000..bfa64f9702d5
--- /dev/null
+++ b/include/asm-sparc/dma-mapping_64.h
@@ -0,0 +1,154 @@
1#ifndef _ASM_SPARC64_DMA_MAPPING_H
2#define _ASM_SPARC64_DMA_MAPPING_H
3
4#include <linux/scatterlist.h>
5#include <linux/mm.h>
6
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8
9struct dma_ops {
10 void *(*alloc_coherent)(struct device *dev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flag);
12 void (*free_coherent)(struct device *dev, size_t size,
13 void *cpu_addr, dma_addr_t dma_handle);
14 dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
15 size_t size,
16 enum dma_data_direction direction);
17 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
18 size_t size,
19 enum dma_data_direction direction);
20 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
21 enum dma_data_direction direction);
22 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
23 int nhwentries,
24 enum dma_data_direction direction);
25 void (*sync_single_for_cpu)(struct device *dev,
26 dma_addr_t dma_handle, size_t size,
27 enum dma_data_direction direction);
28 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
29 int nelems,
30 enum dma_data_direction direction);
31};
32extern const struct dma_ops *dma_ops;
33
34extern int dma_supported(struct device *dev, u64 mask);
35extern int dma_set_mask(struct device *dev, u64 dma_mask);
36
37static inline void *dma_alloc_coherent(struct device *dev, size_t size,
38 dma_addr_t *dma_handle, gfp_t flag)
39{
40 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
41}
42
43static inline void dma_free_coherent(struct device *dev, size_t size,
44 void *cpu_addr, dma_addr_t dma_handle)
45{
46 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
47}
48
49static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
50 size_t size,
51 enum dma_data_direction direction)
52{
53 return dma_ops->map_single(dev, cpu_addr, size, direction);
54}
55
56static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
57 size_t size,
58 enum dma_data_direction direction)
59{
60 dma_ops->unmap_single(dev, dma_addr, size, direction);
61}
62
63static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
64 unsigned long offset, size_t size,
65 enum dma_data_direction direction)
66{
67 return dma_ops->map_single(dev, page_address(page) + offset,
68 size, direction);
69}
70
71static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
72 size_t size,
73 enum dma_data_direction direction)
74{
75 dma_ops->unmap_single(dev, dma_address, size, direction);
76}
77
78static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
79 int nents, enum dma_data_direction direction)
80{
81 return dma_ops->map_sg(dev, sg, nents, direction);
82}
83
84static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
85 int nents, enum dma_data_direction direction)
86{
87 dma_ops->unmap_sg(dev, sg, nents, direction);
88}
89
90static inline void dma_sync_single_for_cpu(struct device *dev,
91 dma_addr_t dma_handle, size_t size,
92 enum dma_data_direction direction)
93{
94 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
95}
96
97static inline void dma_sync_single_for_device(struct device *dev,
98 dma_addr_t dma_handle,
99 size_t size,
100 enum dma_data_direction direction)
101{
102 /* No flushing needed to sync cpu writes to the device. */
103}
104
105static inline void dma_sync_single_range_for_cpu(struct device *dev,
106 dma_addr_t dma_handle,
107 unsigned long offset,
108 size_t size,
109 enum dma_data_direction direction)
110{
111 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
112}
113
114static inline void dma_sync_single_range_for_device(struct device *dev,
115 dma_addr_t dma_handle,
116 unsigned long offset,
117 size_t size,
118 enum dma_data_direction direction)
119{
120 /* No flushing needed to sync cpu writes to the device. */
121}
122
123
124static inline void dma_sync_sg_for_cpu(struct device *dev,
125 struct scatterlist *sg, int nelems,
126 enum dma_data_direction direction)
127{
128 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
129}
130
131static inline void dma_sync_sg_for_device(struct device *dev,
132 struct scatterlist *sg, int nelems,
133 enum dma_data_direction direction)
134{
135 /* No flushing needed to sync cpu writes to the device. */
136}
137
138static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
139{
140 return (dma_addr == DMA_ERROR_CODE);
141}
142
143static inline int dma_get_cache_alignment(void)
144{
145 /* no easy way to get cache size on all processors, so return
146 * the maximum possible, to be safe */
147 return (1 << INTERNODE_CACHE_SHIFT);
148}
149
150#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
151#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
152#define dma_is_consistent(d, h) (1)
153
154#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/include/asm-sparc/dma.h b/include/asm-sparc/dma.h
index 959d6c8a71ae..8cc69bfaae2a 100644
--- a/include/asm-sparc/dma.h
+++ b/include/asm-sparc/dma.h
@@ -1,288 +1,8 @@
1/* include/asm-sparc/dma.h 1#ifndef ___ASM_SPARC_DMA_H
2 * 2#define ___ASM_SPARC_DMA_H
3 * Copyright 1995 (C) David S. Miller (davem@davemloft.net) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/dma_64.h>
5
6#ifndef _ASM_SPARC_DMA_H
7#define _ASM_SPARC_DMA_H
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11
12#include <asm/vac-ops.h> /* for invalidate's, etc. */
13#include <asm/sbus.h>
14#include <asm/delay.h>
15#include <asm/oplib.h>
16#include <asm/system.h>
17#include <asm/io.h>
18#include <linux/spinlock.h>
19
20struct page;
21extern spinlock_t dma_spin_lock;
22
23static inline unsigned long claim_dma_lock(void)
24{
25 unsigned long flags;
26 spin_lock_irqsave(&dma_spin_lock, flags);
27 return flags;
28}
29
30static inline void release_dma_lock(unsigned long flags)
31{
32 spin_unlock_irqrestore(&dma_spin_lock, flags);
33}
34
35/* These are irrelevant for Sparc DMA, but we leave it in so that
36 * things can compile.
37 */
38#define MAX_DMA_CHANNELS 8
39#define MAX_DMA_ADDRESS (~0UL)
40#define DMA_MODE_READ 1
41#define DMA_MODE_WRITE 2
42
43/* Useful constants */
44#define SIZE_16MB (16*1024*1024)
45#define SIZE_64K (64*1024)
46
47/* SBUS DMA controller reg offsets */
48#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
49#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
50#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
51#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
52
53/* DVMA chip revisions */
54enum dvma_rev {
55 dvmarev0,
56 dvmaesc1,
57 dvmarev1,
58 dvmarev2,
59 dvmarev3,
60 dvmarevplus,
61 dvmahme
62};
63
64#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
65
66/* Linux DMA information structure, filled during probe. */
67struct sbus_dma {
68 struct sbus_dma *next;
69 struct sbus_dev *sdev;
70 void __iomem *regs;
71
72 /* Status, misc info */
73 int node; /* Prom node for this DMA device */
74 int running; /* Are we doing DMA now? */
75 int allocated; /* Are we "owned" by anyone yet? */
76
77 /* Transfer information. */
78 unsigned long addr; /* Start address of current transfer */
79 int nbytes; /* Size of current transfer */
80 int realbytes; /* For splitting up large transfers, etc. */
81
82 /* DMA revision */
83 enum dvma_rev revision;
84};
85
86extern struct sbus_dma *dma_chain;
87
88/* Broken hardware... */
89#ifdef CONFIG_SUN4
90/* Have to sort this out. Does rev0 work fine on sun4[cmd] without isbroken?
91 * Or is rev0 present only on sun4 boxes? -jj */
92#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev0 || (dma)->revision == dvmarev1)
93#else 5#else
94#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1) 6#include <asm-sparc/dma_32.h>
95#endif 7#endif
96#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
97
98/* Main routines in dma.c */
99extern void dvma_init(struct sbus_bus *);
100
101/* Fields in the cond_reg register */
102/* First, the version identification bits */
103#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
104#define DMA_VERS0 0x00000000 /* Sunray DMA version */
105#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
106#define DMA_VERS1 0x80000000 /* DMA rev 1 */
107#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
108#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
109#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
110
111#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
112#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
113#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
114#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
115#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
116#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
117#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
118#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
119#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
120#define DMA_RST_BPP DMA_RST_SCSI /* Reset the BPP controller */
121#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
122#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
123#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
124#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
125#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
126#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
127#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
128#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
129#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
130#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
131#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
132#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
133#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
134#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
135#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
136#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
137#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
138#define DMA_BRST64 0x00080000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
139#define DMA_BRST32 0x00040000 /* SCSI/BPP: 32byte bursts */
140#define DMA_BRST16 0x00000000 /* SCSI/BPP: 16byte bursts */
141#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
142#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
143#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
144#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
145#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
146#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
147#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
148#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
149#define DMA_BPP_ON DMA_SCSI_ON /* Enable BPP dma */
150#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
151#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
152#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
153#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
154
155/* Values describing the burst-size property from the PROM */
156#define DMA_BURST1 0x01
157#define DMA_BURST2 0x02
158#define DMA_BURST4 0x04
159#define DMA_BURST8 0x08
160#define DMA_BURST16 0x10
161#define DMA_BURST32 0x20
162#define DMA_BURST64 0x40
163#define DMA_BURSTBITS 0x7f
164
165/* Determine highest possible final transfer address given a base */
166#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
167
168/* Yes, I hack a lot of elisp in my spare time... */
169#define DMA_ERROR_P(regs) ((((regs)->cond_reg) & DMA_HNDL_ERROR))
170#define DMA_IRQ_P(regs) ((((regs)->cond_reg) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)))
171#define DMA_WRITE_P(regs) ((((regs)->cond_reg) & DMA_ST_WRITE))
172#define DMA_OFF(regs) ((((regs)->cond_reg) &= (~DMA_ENABLE)))
173#define DMA_INTSOFF(regs) ((((regs)->cond_reg) &= (~DMA_INT_ENAB)))
174#define DMA_INTSON(regs) ((((regs)->cond_reg) |= (DMA_INT_ENAB)))
175#define DMA_PUNTFIFO(regs) ((((regs)->cond_reg) |= DMA_FIFO_INV))
176#define DMA_SETSTART(regs, addr) ((((regs)->st_addr) = (char *) addr))
177#define DMA_BEGINDMA_W(regs) \
178 ((((regs)->cond_reg |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB))))
179#define DMA_BEGINDMA_R(regs) \
180 ((((regs)->cond_reg |= ((DMA_ENABLE|DMA_INT_ENAB)&(~DMA_ST_WRITE)))))
181
182/* For certain DMA chips, we need to disable ints upon irq entry
183 * and turn them back on when we are done. So in any ESP interrupt
184 * handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
185 * when leaving the handler. You have been warned...
186 */
187#define DMA_IRQ_ENTRY(dma, dregs) do { \
188 if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
189 } while (0)
190
191#define DMA_IRQ_EXIT(dma, dregs) do { \
192 if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
193 } while(0)
194
195#if 0 /* P3 this stuff is inline in ledma.c:init_restart_ledma() */
196/* Pause until counter runs out or BIT isn't set in the DMA condition
197 * register.
198 */
199static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
200 unsigned long bit)
201{
202 int ctr = 50000; /* Let's find some bugs ;) */
203
204 /* Busy wait until the bit is not set any more */
205 while((regs->cond_reg&bit) && (ctr>0)) {
206 ctr--;
207 __delay(5);
208 }
209
210 /* Check for bogus outcome. */
211 if(!ctr)
212 panic("DMA timeout");
213}
214
215/* Reset the friggin' thing... */
216#define DMA_RESET(dma) do { \
217 struct sparc_dma_registers *regs = dma->regs; \
218 /* Let the current FIFO drain itself */ \
219 sparc_dma_pause(regs, (DMA_FIFO_ISDRAIN)); \
220 /* Reset the logic */ \
221 regs->cond_reg |= (DMA_RST_SCSI); /* assert */ \
222 __delay(400); /* let the bits set ;) */ \
223 regs->cond_reg &= ~(DMA_RST_SCSI); /* de-assert */ \
224 sparc_dma_enable_interrupts(regs); /* Re-enable interrupts */ \
225 /* Enable FAST transfers if available */ \
226 if(dma->revision>dvmarev1) regs->cond_reg |= DMA_3CLKS; \
227 dma->running = 0; \
228} while(0)
229#endif 8#endif
230
231#define for_each_dvma(dma) \
232 for((dma) = dma_chain; (dma); (dma) = (dma)->next)
233
234extern int get_dma_list(char *);
235extern int request_dma(unsigned int, __const__ char *);
236extern void free_dma(unsigned int);
237
238/* From PCI */
239
240#ifdef CONFIG_PCI
241extern int isa_dma_bridge_buggy;
242#else
243#define isa_dma_bridge_buggy (0)
244#endif
245
246/* Routines for data transfer buffers. */
247BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
248BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
249
250#define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
251#define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
252
253/* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
254BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
255BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
256BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
257BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
258
259#define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
260#define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
261#define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
262#define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
263
264/*
265 * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
266 *
267 * The mmu_map_dma_area establishes two mappings in one go.
268 * These mappings point to pages normally mapped at 'va' (linear address).
269 * First mapping is for CPU visible address at 'a', uncached.
270 * This is an alias, but it works because it is an uncached mapping.
271 * Second mapping is for device visible address, or "bus" address.
272 * The bus address is returned at '*pba'.
273 *
274 * These functions seem distinct, but are hard to split. On sun4c,
275 * at least for now, 'a' is equal to bus address, and retured in *pba.
276 * On sun4m, page attributes depend on the CPU type, so we have to
277 * know if we are mapping RAM or I/O, so it has to be an additional argument
278 * to a separate mapping function for CPU visible mappings.
279 */
280BTFIXUPDEF_CALL(int, mmu_map_dma_area, dma_addr_t *, unsigned long, unsigned long, int len)
281BTFIXUPDEF_CALL(struct page *, mmu_translate_dvma, unsigned long busa)
282BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
283
284#define mmu_map_dma_area(pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(pba,va,a,len)
285#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
286#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
287
288#endif /* !(_ASM_SPARC_DMA_H) */
diff --git a/include/asm-sparc/dma_32.h b/include/asm-sparc/dma_32.h
new file mode 100644
index 000000000000..959d6c8a71ae
--- /dev/null
+++ b/include/asm-sparc/dma_32.h
@@ -0,0 +1,288 @@
1/* include/asm-sparc/dma.h
2 *
3 * Copyright 1995 (C) David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _ASM_SPARC_DMA_H
7#define _ASM_SPARC_DMA_H
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11
12#include <asm/vac-ops.h> /* for invalidate's, etc. */
13#include <asm/sbus.h>
14#include <asm/delay.h>
15#include <asm/oplib.h>
16#include <asm/system.h>
17#include <asm/io.h>
18#include <linux/spinlock.h>
19
20struct page;
21extern spinlock_t dma_spin_lock;
22
23static inline unsigned long claim_dma_lock(void)
24{
25 unsigned long flags;
26 spin_lock_irqsave(&dma_spin_lock, flags);
27 return flags;
28}
29
30static inline void release_dma_lock(unsigned long flags)
31{
32 spin_unlock_irqrestore(&dma_spin_lock, flags);
33}
34
35/* These are irrelevant for Sparc DMA, but we leave it in so that
36 * things can compile.
37 */
38#define MAX_DMA_CHANNELS 8
39#define MAX_DMA_ADDRESS (~0UL)
40#define DMA_MODE_READ 1
41#define DMA_MODE_WRITE 2
42
43/* Useful constants */
44#define SIZE_16MB (16*1024*1024)
45#define SIZE_64K (64*1024)
46
47/* SBUS DMA controller reg offsets */
48#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
49#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
50#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
51#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
52
53/* DVMA chip revisions */
54enum dvma_rev {
55 dvmarev0,
56 dvmaesc1,
57 dvmarev1,
58 dvmarev2,
59 dvmarev3,
60 dvmarevplus,
61 dvmahme
62};
63
64#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
65
66/* Linux DMA information structure, filled during probe. */
67struct sbus_dma {
68 struct sbus_dma *next;
69 struct sbus_dev *sdev;
70 void __iomem *regs;
71
72 /* Status, misc info */
73 int node; /* Prom node for this DMA device */
74 int running; /* Are we doing DMA now? */
75 int allocated; /* Are we "owned" by anyone yet? */
76
77 /* Transfer information. */
78 unsigned long addr; /* Start address of current transfer */
79 int nbytes; /* Size of current transfer */
80 int realbytes; /* For splitting up large transfers, etc. */
81
82 /* DMA revision */
83 enum dvma_rev revision;
84};
85
86extern struct sbus_dma *dma_chain;
87
88/* Broken hardware... */
89#ifdef CONFIG_SUN4
90/* Have to sort this out. Does rev0 work fine on sun4[cmd] without isbroken?
91 * Or is rev0 present only on sun4 boxes? -jj */
92#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev0 || (dma)->revision == dvmarev1)
93#else
94#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
95#endif
96#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
97
98/* Main routines in dma.c */
99extern void dvma_init(struct sbus_bus *);
100
101/* Fields in the cond_reg register */
102/* First, the version identification bits */
103#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
104#define DMA_VERS0 0x00000000 /* Sunray DMA version */
105#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
106#define DMA_VERS1 0x80000000 /* DMA rev 1 */
107#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
108#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
109#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
110
111#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
112#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
113#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
114#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
115#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
116#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
117#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
118#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
119#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
120#define DMA_RST_BPP DMA_RST_SCSI /* Reset the BPP controller */
121#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
122#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
123#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
124#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
125#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
126#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
127#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
128#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
129#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
130#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
131#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
132#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
133#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
134#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
135#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
136#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
137#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
138#define DMA_BRST64 0x00080000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
139#define DMA_BRST32 0x00040000 /* SCSI/BPP: 32byte bursts */
140#define DMA_BRST16 0x00000000 /* SCSI/BPP: 16byte bursts */
141#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
142#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
143#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
144#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
145#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
146#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
147#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
148#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
149#define DMA_BPP_ON DMA_SCSI_ON /* Enable BPP dma */
150#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
151#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
152#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
153#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
154
155/* Values describing the burst-size property from the PROM */
156#define DMA_BURST1 0x01
157#define DMA_BURST2 0x02
158#define DMA_BURST4 0x04
159#define DMA_BURST8 0x08
160#define DMA_BURST16 0x10
161#define DMA_BURST32 0x20
162#define DMA_BURST64 0x40
163#define DMA_BURSTBITS 0x7f
164
165/* Determine highest possible final transfer address given a base */
166#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
167
168/* Yes, I hack a lot of elisp in my spare time... */
169#define DMA_ERROR_P(regs) ((((regs)->cond_reg) & DMA_HNDL_ERROR))
170#define DMA_IRQ_P(regs) ((((regs)->cond_reg) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)))
171#define DMA_WRITE_P(regs) ((((regs)->cond_reg) & DMA_ST_WRITE))
172#define DMA_OFF(regs) ((((regs)->cond_reg) &= (~DMA_ENABLE)))
173#define DMA_INTSOFF(regs) ((((regs)->cond_reg) &= (~DMA_INT_ENAB)))
174#define DMA_INTSON(regs) ((((regs)->cond_reg) |= (DMA_INT_ENAB)))
175#define DMA_PUNTFIFO(regs) ((((regs)->cond_reg) |= DMA_FIFO_INV))
176#define DMA_SETSTART(regs, addr) ((((regs)->st_addr) = (char *) addr))
177#define DMA_BEGINDMA_W(regs) \
178 ((((regs)->cond_reg |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB))))
179#define DMA_BEGINDMA_R(regs) \
180 ((((regs)->cond_reg |= ((DMA_ENABLE|DMA_INT_ENAB)&(~DMA_ST_WRITE)))))
181
182/* For certain DMA chips, we need to disable ints upon irq entry
183 * and turn them back on when we are done. So in any ESP interrupt
184 * handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
185 * when leaving the handler. You have been warned...
186 */
187#define DMA_IRQ_ENTRY(dma, dregs) do { \
188 if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
189 } while (0)
190
191#define DMA_IRQ_EXIT(dma, dregs) do { \
192 if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
193 } while(0)
194
195#if 0 /* P3 this stuff is inline in ledma.c:init_restart_ledma() */
196/* Pause until counter runs out or BIT isn't set in the DMA condition
197 * register.
198 */
199static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
200 unsigned long bit)
201{
202 int ctr = 50000; /* Let's find some bugs ;) */
203
204 /* Busy wait until the bit is not set any more */
205 while((regs->cond_reg&bit) && (ctr>0)) {
206 ctr--;
207 __delay(5);
208 }
209
210 /* Check for bogus outcome. */
211 if(!ctr)
212 panic("DMA timeout");
213}
214
215/* Reset the friggin' thing... */
216#define DMA_RESET(dma) do { \
217 struct sparc_dma_registers *regs = dma->regs; \
218 /* Let the current FIFO drain itself */ \
219 sparc_dma_pause(regs, (DMA_FIFO_ISDRAIN)); \
220 /* Reset the logic */ \
221 regs->cond_reg |= (DMA_RST_SCSI); /* assert */ \
222 __delay(400); /* let the bits set ;) */ \
223 regs->cond_reg &= ~(DMA_RST_SCSI); /* de-assert */ \
224 sparc_dma_enable_interrupts(regs); /* Re-enable interrupts */ \
225 /* Enable FAST transfers if available */ \
226 if(dma->revision>dvmarev1) regs->cond_reg |= DMA_3CLKS; \
227 dma->running = 0; \
228} while(0)
229#endif
230
231#define for_each_dvma(dma) \
232 for((dma) = dma_chain; (dma); (dma) = (dma)->next)
233
234extern int get_dma_list(char *);
235extern int request_dma(unsigned int, __const__ char *);
236extern void free_dma(unsigned int);
237
238/* From PCI */
239
240#ifdef CONFIG_PCI
241extern int isa_dma_bridge_buggy;
242#else
243#define isa_dma_bridge_buggy (0)
244#endif
245
246/* Routines for data transfer buffers. */
247BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
248BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
249
250#define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
251#define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
252
253/* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
254BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
255BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
256BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
257BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
258
259#define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
260#define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
261#define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
262#define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
263
264/*
265 * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
266 *
267 * The mmu_map_dma_area establishes two mappings in one go.
268 * These mappings point to pages normally mapped at 'va' (linear address).
269 * First mapping is for CPU visible address at 'a', uncached.
270 * This is an alias, but it works because it is an uncached mapping.
271 * Second mapping is for device visible address, or "bus" address.
272 * The bus address is returned at '*pba'.
273 *
274 * These functions seem distinct, but are hard to split. On sun4c,
275 * at least for now, 'a' is equal to bus address, and retured in *pba.
276 * On sun4m, page attributes depend on the CPU type, so we have to
277 * know if we are mapping RAM or I/O, so it has to be an additional argument
278 * to a separate mapping function for CPU visible mappings.
279 */
280BTFIXUPDEF_CALL(int, mmu_map_dma_area, dma_addr_t *, unsigned long, unsigned long, int len)
281BTFIXUPDEF_CALL(struct page *, mmu_translate_dvma, unsigned long busa)
282BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
283
284#define mmu_map_dma_area(pba,va,a,len) BTFIXUP_CALL(mmu_map_dma_area)(pba,va,a,len)
285#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
286#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
287
288#endif /* !(_ASM_SPARC_DMA_H) */
diff --git a/include/asm-sparc/dma_64.h b/include/asm-sparc/dma_64.h
new file mode 100644
index 000000000000..9d4c024bd3b3
--- /dev/null
+++ b/include/asm-sparc/dma_64.h
@@ -0,0 +1,205 @@
1/*
2 * include/asm-sparc64/dma.h
3 *
4 * Copyright 1996 (C) David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _ASM_SPARC64_DMA_H
8#define _ASM_SPARC64_DMA_H
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/spinlock.h>
13
14#include <asm/sbus.h>
15#include <asm/delay.h>
16#include <asm/oplib.h>
17
18/* These are irrelevant for Sparc DMA, but we leave it in so that
19 * things can compile.
20 */
21#define MAX_DMA_CHANNELS 8
22#define DMA_MODE_READ 1
23#define DMA_MODE_WRITE 2
24#define MAX_DMA_ADDRESS (~0UL)
25
26/* Useful constants */
27#define SIZE_16MB (16*1024*1024)
28#define SIZE_64K (64*1024)
29
30/* SBUS DMA controller reg offsets */
31#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
32#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
33#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
34#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
35
36/* DVMA chip revisions */
37enum dvma_rev {
38 dvmarev0,
39 dvmaesc1,
40 dvmarev1,
41 dvmarev2,
42 dvmarev3,
43 dvmarevplus,
44 dvmahme
45};
46
47#define DMA_HASCOUNT(rev) ((rev)==dvmaesc1)
48
49/* Linux DMA information structure, filled during probe. */
50struct sbus_dma {
51 struct sbus_dma *next;
52 struct sbus_dev *sdev;
53 void __iomem *regs;
54
55 /* Status, misc info */
56 int node; /* Prom node for this DMA device */
57 int running; /* Are we doing DMA now? */
58 int allocated; /* Are we "owned" by anyone yet? */
59
60 /* Transfer information. */
61 u32 addr; /* Start address of current transfer */
62 int nbytes; /* Size of current transfer */
63 int realbytes; /* For splitting up large transfers, etc. */
64
65 /* DMA revision */
66 enum dvma_rev revision;
67};
68
69extern struct sbus_dma *dma_chain;
70
71/* Broken hardware... */
72#define DMA_ISBROKEN(dma) ((dma)->revision == dvmarev1)
73#define DMA_ISESC1(dma) ((dma)->revision == dvmaesc1)
74
75/* Main routines in dma.c */
76extern void dvma_init(struct sbus_bus *);
77
78/* Fields in the cond_reg register */
79/* First, the version identification bits */
80#define DMA_DEVICE_ID 0xf0000000 /* Device identification bits */
81#define DMA_VERS0 0x00000000 /* Sunray DMA version */
82#define DMA_ESCV1 0x40000000 /* DMA ESC Version 1 */
83#define DMA_VERS1 0x80000000 /* DMA rev 1 */
84#define DMA_VERS2 0xa0000000 /* DMA rev 2 */
85#define DMA_VERHME 0xb0000000 /* DMA hme gate array */
86#define DMA_VERSPLUS 0x90000000 /* DMA rev 1 PLUS */
87
88#define DMA_HNDL_INTR 0x00000001 /* An IRQ needs to be handled */
89#define DMA_HNDL_ERROR 0x00000002 /* We need to take an error */
90#define DMA_FIFO_ISDRAIN 0x0000000c /* The DMA FIFO is draining */
91#define DMA_INT_ENAB 0x00000010 /* Turn on interrupts */
92#define DMA_FIFO_INV 0x00000020 /* Invalidate the FIFO */
93#define DMA_ACC_SZ_ERR 0x00000040 /* The access size was bad */
94#define DMA_FIFO_STDRAIN 0x00000040 /* DMA_VERS1 Drain the FIFO */
95#define DMA_RST_SCSI 0x00000080 /* Reset the SCSI controller */
96#define DMA_RST_ENET DMA_RST_SCSI /* Reset the ENET controller */
97#define DMA_ST_WRITE 0x00000100 /* write from device to memory */
98#define DMA_ENABLE 0x00000200 /* Fire up DMA, handle requests */
99#define DMA_PEND_READ 0x00000400 /* DMA_VERS1/0/PLUS Pending Read */
100#define DMA_ESC_BURST 0x00000800 /* 1=16byte 0=32byte */
101#define DMA_READ_AHEAD 0x00001800 /* DMA read ahead partial longword */
102#define DMA_DSBL_RD_DRN 0x00001000 /* No EC drain on slave reads */
103#define DMA_BCNT_ENAB 0x00002000 /* If on, use the byte counter */
104#define DMA_TERM_CNTR 0x00004000 /* Terminal counter */
105#define DMA_SCSI_SBUS64 0x00008000 /* HME: Enable 64-bit SBUS mode. */
106#define DMA_CSR_DISAB 0x00010000 /* No FIFO drains during csr */
107#define DMA_SCSI_DISAB 0x00020000 /* No FIFO drains during reg */
108#define DMA_DSBL_WR_INV 0x00020000 /* No EC inval. on slave writes */
109#define DMA_ADD_ENABLE 0x00040000 /* Special ESC DVMA optimization */
110#define DMA_E_BURSTS 0x000c0000 /* ENET: SBUS r/w burst mask */
111#define DMA_E_BURST32 0x00040000 /* ENET: SBUS 32 byte r/w burst */
112#define DMA_E_BURST16 0x00000000 /* ENET: SBUS 16 byte r/w burst */
113#define DMA_BRST_SZ 0x000c0000 /* SCSI: SBUS r/w burst size */
114#define DMA_BRST64 0x000c0000 /* SCSI: 64byte bursts (HME on UltraSparc only) */
115#define DMA_BRST32 0x00040000 /* SCSI: 32byte bursts */
116#define DMA_BRST16 0x00000000 /* SCSI: 16byte bursts */
117#define DMA_BRST0 0x00080000 /* SCSI: no bursts (non-HME gate arrays) */
118#define DMA_ADDR_DISAB 0x00100000 /* No FIFO drains during addr */
119#define DMA_2CLKS 0x00200000 /* Each transfer = 2 clock ticks */
120#define DMA_3CLKS 0x00400000 /* Each transfer = 3 clock ticks */
121#define DMA_EN_ENETAUI DMA_3CLKS /* Put lance into AUI-cable mode */
122#define DMA_CNTR_DISAB 0x00800000 /* No IRQ when DMA_TERM_CNTR set */
123#define DMA_AUTO_NADDR 0x01000000 /* Use "auto nxt addr" feature */
124#define DMA_SCSI_ON 0x02000000 /* Enable SCSI dma */
125#define DMA_PARITY_OFF 0x02000000 /* HME: disable parity checking */
126#define DMA_LOADED_ADDR 0x04000000 /* Address has been loaded */
127#define DMA_LOADED_NADDR 0x08000000 /* Next address has been loaded */
128#define DMA_RESET_FAS366 0x08000000 /* HME: Assert RESET to FAS366 */
129
130/* Values describing the burst-size property from the PROM */
131#define DMA_BURST1 0x01
132#define DMA_BURST2 0x02
133#define DMA_BURST4 0x04
134#define DMA_BURST8 0x08
135#define DMA_BURST16 0x10
136#define DMA_BURST32 0x20
137#define DMA_BURST64 0x40
138#define DMA_BURSTBITS 0x7f
139
140/* Determine highest possible final transfer address given a base */
141#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
142
143/* Yes, I hack a lot of elisp in my spare time... */
144#define DMA_ERROR_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR))
145#define DMA_IRQ_P(regs) ((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
146#define DMA_WRITE_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE))
147#define DMA_OFF(__regs) \
148do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
149 tmp &= ~DMA_ENABLE; \
150 sbus_writel(tmp, (__regs) + DMA_CSR); \
151} while(0)
152#define DMA_INTSOFF(__regs) \
153do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
154 tmp &= ~DMA_INT_ENAB; \
155 sbus_writel(tmp, (__regs) + DMA_CSR); \
156} while(0)
157#define DMA_INTSON(__regs) \
158do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
159 tmp |= DMA_INT_ENAB; \
160 sbus_writel(tmp, (__regs) + DMA_CSR); \
161} while(0)
162#define DMA_PUNTFIFO(__regs) \
163do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
164 tmp |= DMA_FIFO_INV; \
165 sbus_writel(tmp, (__regs) + DMA_CSR); \
166} while(0)
167#define DMA_SETSTART(__regs, __addr) \
168 sbus_writel((u32)(__addr), (__regs) + DMA_ADDR);
169#define DMA_BEGINDMA_W(__regs) \
170do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
171 tmp |= (DMA_ST_WRITE|DMA_ENABLE|DMA_INT_ENAB); \
172 sbus_writel(tmp, (__regs) + DMA_CSR); \
173} while(0)
174#define DMA_BEGINDMA_R(__regs) \
175do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
176 tmp |= (DMA_ENABLE|DMA_INT_ENAB); \
177 tmp &= ~DMA_ST_WRITE; \
178 sbus_writel(tmp, (__regs) + DMA_CSR); \
179} while(0)
180
181/* For certain DMA chips, we need to disable ints upon irq entry
182 * and turn them back on when we are done. So in any ESP interrupt
183 * handler you *must* call DMA_IRQ_ENTRY upon entry and DMA_IRQ_EXIT
184 * when leaving the handler. You have been warned...
185 */
186#define DMA_IRQ_ENTRY(dma, dregs) do { \
187 if(DMA_ISBROKEN(dma)) DMA_INTSOFF(dregs); \
188 } while (0)
189
190#define DMA_IRQ_EXIT(dma, dregs) do { \
191 if(DMA_ISBROKEN(dma)) DMA_INTSON(dregs); \
192 } while(0)
193
194#define for_each_dvma(dma) \
195 for((dma) = dma_chain; (dma); (dma) = (dma)->next)
196
197/* From PCI */
198
199#ifdef CONFIG_PCI
200extern int isa_dma_bridge_buggy;
201#else
202#define isa_dma_bridge_buggy (0)
203#endif
204
205#endif /* !(_ASM_SPARC64_DMA_H) */
diff --git a/include/asm-sparc/ebus.h b/include/asm-sparc/ebus.h
index 491f85d662df..a5da2d00cd18 100644
--- a/include/asm-sparc/ebus.h
+++ b/include/asm-sparc/ebus.h
@@ -1,99 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_EBUS_H
2 * ebus.h: PCI to Ebus pseudo driver software state. 2#define ___ASM_SPARC_EBUS_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) 4#include <asm-sparc/ebus_64.h>
5 * 5#else
6 * Adopted for sparc by V. Roganov and G. Raiko. 6#include <asm-sparc/ebus_32.h>
7 */ 7#endif
8
9#ifndef __SPARC_EBUS_H
10#define __SPARC_EBUS_H
11
12#ifndef _LINUX_IOPORT_H
13#include <linux/ioport.h>
14#endif 8#endif
15#include <asm/oplib.h>
16#include <asm/prom.h>
17#include <asm/of_device.h>
18
19struct linux_ebus_child {
20 struct linux_ebus_child *next;
21 struct linux_ebus_device *parent;
22 struct linux_ebus *bus;
23 struct device_node *prom_node;
24 struct resource resource[PROMREG_MAX];
25 int num_addrs;
26 unsigned int irqs[PROMINTR_MAX];
27 int num_irqs;
28};
29
30struct linux_ebus_device {
31 struct of_device ofdev;
32 struct linux_ebus_device *next;
33 struct linux_ebus_child *children;
34 struct linux_ebus *bus;
35 struct device_node *prom_node;
36 struct resource resource[PROMREG_MAX];
37 int num_addrs;
38 unsigned int irqs[PROMINTR_MAX];
39 int num_irqs;
40};
41#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
42
43struct linux_ebus {
44 struct of_device ofdev;
45 struct linux_ebus *next;
46 struct linux_ebus_device *devices;
47 struct linux_pbm_info *parent;
48 struct pci_dev *self;
49 struct device_node *prom_node;
50};
51#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
52
53struct linux_ebus_dma {
54 unsigned int dcsr;
55 unsigned int dacr;
56 unsigned int dbcr;
57};
58
59#define EBUS_DCSR_INT_PEND 0x00000001
60#define EBUS_DCSR_ERR_PEND 0x00000002
61#define EBUS_DCSR_DRAIN 0x00000004
62#define EBUS_DCSR_INT_EN 0x00000010
63#define EBUS_DCSR_RESET 0x00000080
64#define EBUS_DCSR_WRITE 0x00000100
65#define EBUS_DCSR_EN_DMA 0x00000200
66#define EBUS_DCSR_CYC_PEND 0x00000400
67#define EBUS_DCSR_DIAG_RD_DONE 0x00000800
68#define EBUS_DCSR_DIAG_WR_DONE 0x00001000
69#define EBUS_DCSR_EN_CNT 0x00002000
70#define EBUS_DCSR_TC 0x00004000
71#define EBUS_DCSR_DIS_CSR_DRN 0x00010000
72#define EBUS_DCSR_BURST_SZ_MASK 0x000c0000
73#define EBUS_DCSR_BURST_SZ_1 0x00080000
74#define EBUS_DCSR_BURST_SZ_4 0x00000000
75#define EBUS_DCSR_BURST_SZ_8 0x00040000
76#define EBUS_DCSR_BURST_SZ_16 0x000c0000
77#define EBUS_DCSR_DIAG_EN 0x00100000
78#define EBUS_DCSR_DIS_ERR_PEND 0x00400000
79#define EBUS_DCSR_TCI_DIS 0x00800000
80#define EBUS_DCSR_EN_NEXT 0x01000000
81#define EBUS_DCSR_DMA_ON 0x02000000
82#define EBUS_DCSR_A_LOADED 0x04000000
83#define EBUS_DCSR_NA_LOADED 0x08000000
84#define EBUS_DCSR_DEV_ID_MASK 0xf0000000
85
86extern struct linux_ebus *ebus_chain;
87
88extern void ebus_init(void);
89
90#define for_each_ebus(bus) \
91 for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
92
93#define for_each_ebusdev(dev, bus) \
94 for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
95
96#define for_each_edevchild(dev, child) \
97 for((child) = (dev)->children; (child); (child) = (child)->next)
98
99#endif /* !(__SPARC_EBUS_H) */
diff --git a/include/asm-sparc/ebus_32.h b/include/asm-sparc/ebus_32.h
new file mode 100644
index 000000000000..29cb7dfc6b79
--- /dev/null
+++ b/include/asm-sparc/ebus_32.h
@@ -0,0 +1,99 @@
1/*
2 * ebus.h: PCI to Ebus pseudo driver software state.
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 *
6 * Adopted for sparc by V. Roganov and G. Raiko.
7 */
8
9#ifndef __SPARC_EBUS_H
10#define __SPARC_EBUS_H
11
12#ifndef _LINUX_IOPORT_H
13#include <linux/ioport.h>
14#endif
15#include <asm/oplib.h>
16#include <asm/prom.h>
17#include <asm/of_device.h>
18
19struct linux_ebus_child {
20 struct linux_ebus_child *next;
21 struct linux_ebus_device *parent;
22 struct linux_ebus *bus;
23 struct device_node *prom_node;
24 struct resource resource[PROMREG_MAX];
25 int num_addrs;
26 unsigned int irqs[PROMINTR_MAX];
27 int num_irqs;
28};
29
30struct linux_ebus_device {
31 struct of_device ofdev;
32 struct linux_ebus_device *next;
33 struct linux_ebus_child *children;
34 struct linux_ebus *bus;
35 struct device_node *prom_node;
36 struct resource resource[PROMREG_MAX];
37 int num_addrs;
38 unsigned int irqs[PROMINTR_MAX];
39 int num_irqs;
40};
41#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
42
43struct linux_ebus {
44 struct of_device ofdev;
45 struct linux_ebus *next;
46 struct linux_ebus_device *devices;
47 struct linux_pbm_info *parent;
48 struct pci_dev *self;
49 struct device_node *prom_node;
50};
51#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
52
53struct linux_ebus_dma {
54 unsigned int dcsr;
55 unsigned int dacr;
56 unsigned int dbcr;
57};
58
59#define EBUS_DCSR_INT_PEND 0x00000001
60#define EBUS_DCSR_ERR_PEND 0x00000002
61#define EBUS_DCSR_DRAIN 0x00000004
62#define EBUS_DCSR_INT_EN 0x00000010
63#define EBUS_DCSR_RESET 0x00000080
64#define EBUS_DCSR_WRITE 0x00000100
65#define EBUS_DCSR_EN_DMA 0x00000200
66#define EBUS_DCSR_CYC_PEND 0x00000400
67#define EBUS_DCSR_DIAG_RD_DONE 0x00000800
68#define EBUS_DCSR_DIAG_WR_DONE 0x00001000
69#define EBUS_DCSR_EN_CNT 0x00002000
70#define EBUS_DCSR_TC 0x00004000
71#define EBUS_DCSR_DIS_CSR_DRN 0x00010000
72#define EBUS_DCSR_BURST_SZ_MASK 0x000c0000
73#define EBUS_DCSR_BURST_SZ_1 0x00080000
74#define EBUS_DCSR_BURST_SZ_4 0x00000000
75#define EBUS_DCSR_BURST_SZ_8 0x00040000
76#define EBUS_DCSR_BURST_SZ_16 0x000c0000
77#define EBUS_DCSR_DIAG_EN 0x00100000
78#define EBUS_DCSR_DIS_ERR_PEND 0x00400000
79#define EBUS_DCSR_TCI_DIS 0x00800000
80#define EBUS_DCSR_EN_NEXT 0x01000000
81#define EBUS_DCSR_DMA_ON 0x02000000
82#define EBUS_DCSR_A_LOADED 0x04000000
83#define EBUS_DCSR_NA_LOADED 0x08000000
84#define EBUS_DCSR_DEV_ID_MASK 0xf0000000
85
86extern struct linux_ebus *ebus_chain;
87
88extern void ebus_init(void);
89
90#define for_each_ebus(bus) \
91 for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
92
93#define for_each_ebusdev(dev, bus) \
94 for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
95
96#define for_each_edevchild(dev, child) \
97 for((child) = (dev)->children; (child); (child) = (child)->next)
98
99#endif /* !(__SPARC_EBUS_H) */
diff --git a/include/asm-sparc/ebus_64.h b/include/asm-sparc/ebus_64.h
new file mode 100644
index 000000000000..fcc62b97ced5
--- /dev/null
+++ b/include/asm-sparc/ebus_64.h
@@ -0,0 +1,94 @@
1/*
2 * ebus.h: PCI to Ebus pseudo driver software state.
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
6 */
7
8#ifndef __SPARC64_EBUS_H
9#define __SPARC64_EBUS_H
10
11#include <asm/oplib.h>
12#include <asm/prom.h>
13#include <asm/of_device.h>
14
15struct linux_ebus_child {
16 struct linux_ebus_child *next;
17 struct linux_ebus_device *parent;
18 struct linux_ebus *bus;
19 struct device_node *prom_node;
20 struct resource resource[PROMREG_MAX];
21 int num_addrs;
22 unsigned int irqs[PROMINTR_MAX];
23 int num_irqs;
24};
25
26struct linux_ebus_device {
27 struct of_device ofdev;
28 struct linux_ebus_device *next;
29 struct linux_ebus_child *children;
30 struct linux_ebus *bus;
31 struct device_node *prom_node;
32 struct resource resource[PROMREG_MAX];
33 int num_addrs;
34 unsigned int irqs[PROMINTR_MAX];
35 int num_irqs;
36};
37#define to_ebus_device(d) container_of(d, struct linux_ebus_device, ofdev.dev)
38
39struct linux_ebus {
40 struct of_device ofdev;
41 struct linux_ebus *next;
42 struct linux_ebus_device *devices;
43 struct pci_dev *self;
44 int index;
45 int is_rio;
46 struct device_node *prom_node;
47};
48#define to_ebus(d) container_of(d, struct linux_ebus, ofdev.dev)
49
50struct ebus_dma_info {
51 spinlock_t lock;
52 void __iomem *regs;
53
54 unsigned int flags;
55#define EBUS_DMA_FLAG_USE_EBDMA_HANDLER 0x00000001
56#define EBUS_DMA_FLAG_TCI_DISABLE 0x00000002
57
58 /* These are only valid is EBUS_DMA_FLAG_USE_EBDMA_HANDLER is
59 * set.
60 */
61 void (*callback)(struct ebus_dma_info *p, int event, void *cookie);
62 void *client_cookie;
63 unsigned int irq;
64#define EBUS_DMA_EVENT_ERROR 1
65#define EBUS_DMA_EVENT_DMA 2
66#define EBUS_DMA_EVENT_DEVICE 4
67
68 unsigned char name[64];
69};
70
71extern int ebus_dma_register(struct ebus_dma_info *p);
72extern int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
73extern void ebus_dma_unregister(struct ebus_dma_info *p);
74extern int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
75 size_t len);
76extern void ebus_dma_prepare(struct ebus_dma_info *p, int write);
77extern unsigned int ebus_dma_residue(struct ebus_dma_info *p);
78extern unsigned int ebus_dma_addr(struct ebus_dma_info *p);
79extern void ebus_dma_enable(struct ebus_dma_info *p, int on);
80
81extern struct linux_ebus *ebus_chain;
82
83extern void ebus_init(void);
84
85#define for_each_ebus(bus) \
86 for((bus) = ebus_chain; (bus); (bus) = (bus)->next)
87
88#define for_each_ebusdev(dev, bus) \
89 for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
90
91#define for_each_edevchild(dev, child) \
92 for((child) = (dev)->children; (child); (child) = (child)->next)
93
94#endif /* !(__SPARC64_EBUS_H) */
diff --git a/include/asm-sparc/elf.h b/include/asm-sparc/elf.h
index d043f80bc2fd..f035c45d7b5e 100644
--- a/include/asm-sparc/elf.h
+++ b/include/asm-sparc/elf.h
@@ -1,145 +1,8 @@
1#ifndef __ASMSPARC_ELF_H 1#ifndef ___ASM_SPARC_ELF_H
2#define __ASMSPARC_ELF_H 2#define ___ASM_SPARC_ELF_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* 4#include <asm-sparc/elf_64.h>
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9
10/*
11 * Sparc section types
12 */
13#define STT_REGISTER 13
14
15/*
16 * Sparc ELF relocation types
17 */
18#define R_SPARC_NONE 0
19#define R_SPARC_8 1
20#define R_SPARC_16 2
21#define R_SPARC_32 3
22#define R_SPARC_DISP8 4
23#define R_SPARC_DISP16 5
24#define R_SPARC_DISP32 6
25#define R_SPARC_WDISP30 7
26#define R_SPARC_WDISP22 8
27#define R_SPARC_HI22 9
28#define R_SPARC_22 10
29#define R_SPARC_13 11
30#define R_SPARC_LO10 12
31#define R_SPARC_GOT10 13
32#define R_SPARC_GOT13 14
33#define R_SPARC_GOT22 15
34#define R_SPARC_PC10 16
35#define R_SPARC_PC22 17
36#define R_SPARC_WPLT30 18
37#define R_SPARC_COPY 19
38#define R_SPARC_GLOB_DAT 20
39#define R_SPARC_JMP_SLOT 21
40#define R_SPARC_RELATIVE 22
41#define R_SPARC_UA32 23
42#define R_SPARC_PLT32 24
43#define R_SPARC_HIPLT22 25
44#define R_SPARC_LOPLT10 26
45#define R_SPARC_PCPLT32 27
46#define R_SPARC_PCPLT22 28
47#define R_SPARC_PCPLT10 29
48#define R_SPARC_10 30
49#define R_SPARC_11 31
50#define R_SPARC_64 32
51#define R_SPARC_OLO10 33
52#define R_SPARC_WDISP16 40
53#define R_SPARC_WDISP19 41
54#define R_SPARC_7 43
55#define R_SPARC_5 44
56#define R_SPARC_6 45
57
58/* Bits present in AT_HWCAP, primarily for Sparc32. */
59
60#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
61#define HWCAP_SPARC_STBAR 2
62#define HWCAP_SPARC_SWAP 4
63#define HWCAP_SPARC_MULDIV 8
64#define HWCAP_SPARC_V9 16
65#define HWCAP_SPARC_ULTRA3 32
66
67#define CORE_DUMP_USE_REGSET
68
69/* Format is:
70 * G0 --> G7
71 * O0 --> O7
72 * L0 --> L7
73 * I0 --> I7
74 * PSR, PC, nPC, Y, WIM, TBR
75 */
76typedef unsigned long elf_greg_t;
77#define ELF_NGREG 38
78typedef elf_greg_t elf_gregset_t[ELF_NGREG];
79
80typedef struct {
81 union {
82 unsigned long pr_regs[32];
83 double pr_dregs[16];
84 } pr_fr;
85 unsigned long __unused;
86 unsigned long pr_fsr;
87 unsigned char pr_qcnt;
88 unsigned char pr_q_entrysize;
89 unsigned char pr_en;
90 unsigned int pr_q[64];
91} elf_fpregset_t;
92
93#include <asm/mbus.h>
94
95/*
96 * This is used to ensure we don't load something for the wrong architecture.
97 */
98#define elf_check_arch(x) ((x)->e_machine == EM_SPARC)
99
100/*
101 * These are used to set parameters in the core dumps.
102 */
103#define ELF_ARCH EM_SPARC
104#define ELF_CLASS ELFCLASS32
105#define ELF_DATA ELFDATA2MSB
106
107#define USE_ELF_CORE_DUMP
108#ifndef CONFIG_SUN4
109#define ELF_EXEC_PAGESIZE 4096
110#else 5#else
111#define ELF_EXEC_PAGESIZE 8192 6#include <asm-sparc/elf_32.h>
7#endif
112#endif 8#endif
113
114
115/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
116 use of this is to invoke "./ld.so someprog" to test out a new version of
117 the loader. We need to make sure that it is out of the way of the program
118 that it will "exec", and that there is sufficient room for the brk. */
119
120#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
121
122/* This yields a mask that user programs can use to figure out what
123 instruction set this cpu supports. This can NOT be done in userspace
124 on Sparc. */
125
126/* Sun4c has none of the capabilities, most sun4m's have them all.
127 * XXX This is gross, set some global variable at boot time. -DaveM
128 */
129#define ELF_HWCAP ((ARCH_SUN4C_SUN4) ? 0 : \
130 (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
131 HWCAP_SPARC_SWAP | \
132 ((srmmu_modtype != Cypress && \
133 srmmu_modtype != Cypress_vE && \
134 srmmu_modtype != Cypress_vD) ? \
135 HWCAP_SPARC_MULDIV : 0)))
136
137/* This yields a string that ld.so will use to load implementation
138 specific libraries for optimization. This is more specific in
139 intent than poking at uname or /proc/cpuinfo. */
140
141#define ELF_PLATFORM (NULL)
142
143#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
144
145#endif /* !(__ASMSPARC_ELF_H) */
diff --git a/include/asm-sparc/elf_32.h b/include/asm-sparc/elf_32.h
new file mode 100644
index 000000000000..d043f80bc2fd
--- /dev/null
+++ b/include/asm-sparc/elf_32.h
@@ -0,0 +1,145 @@
1#ifndef __ASMSPARC_ELF_H
2#define __ASMSPARC_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9
10/*
11 * Sparc section types
12 */
13#define STT_REGISTER 13
14
15/*
16 * Sparc ELF relocation types
17 */
18#define R_SPARC_NONE 0
19#define R_SPARC_8 1
20#define R_SPARC_16 2
21#define R_SPARC_32 3
22#define R_SPARC_DISP8 4
23#define R_SPARC_DISP16 5
24#define R_SPARC_DISP32 6
25#define R_SPARC_WDISP30 7
26#define R_SPARC_WDISP22 8
27#define R_SPARC_HI22 9
28#define R_SPARC_22 10
29#define R_SPARC_13 11
30#define R_SPARC_LO10 12
31#define R_SPARC_GOT10 13
32#define R_SPARC_GOT13 14
33#define R_SPARC_GOT22 15
34#define R_SPARC_PC10 16
35#define R_SPARC_PC22 17
36#define R_SPARC_WPLT30 18
37#define R_SPARC_COPY 19
38#define R_SPARC_GLOB_DAT 20
39#define R_SPARC_JMP_SLOT 21
40#define R_SPARC_RELATIVE 22
41#define R_SPARC_UA32 23
42#define R_SPARC_PLT32 24
43#define R_SPARC_HIPLT22 25
44#define R_SPARC_LOPLT10 26
45#define R_SPARC_PCPLT32 27
46#define R_SPARC_PCPLT22 28
47#define R_SPARC_PCPLT10 29
48#define R_SPARC_10 30
49#define R_SPARC_11 31
50#define R_SPARC_64 32
51#define R_SPARC_OLO10 33
52#define R_SPARC_WDISP16 40
53#define R_SPARC_WDISP19 41
54#define R_SPARC_7 43
55#define R_SPARC_5 44
56#define R_SPARC_6 45
57
58/* Bits present in AT_HWCAP, primarily for Sparc32. */
59
60#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
61#define HWCAP_SPARC_STBAR 2
62#define HWCAP_SPARC_SWAP 4
63#define HWCAP_SPARC_MULDIV 8
64#define HWCAP_SPARC_V9 16
65#define HWCAP_SPARC_ULTRA3 32
66
67#define CORE_DUMP_USE_REGSET
68
69/* Format is:
70 * G0 --> G7
71 * O0 --> O7
72 * L0 --> L7
73 * I0 --> I7
74 * PSR, PC, nPC, Y, WIM, TBR
75 */
76typedef unsigned long elf_greg_t;
77#define ELF_NGREG 38
78typedef elf_greg_t elf_gregset_t[ELF_NGREG];
79
80typedef struct {
81 union {
82 unsigned long pr_regs[32];
83 double pr_dregs[16];
84 } pr_fr;
85 unsigned long __unused;
86 unsigned long pr_fsr;
87 unsigned char pr_qcnt;
88 unsigned char pr_q_entrysize;
89 unsigned char pr_en;
90 unsigned int pr_q[64];
91} elf_fpregset_t;
92
93#include <asm/mbus.h>
94
95/*
96 * This is used to ensure we don't load something for the wrong architecture.
97 */
98#define elf_check_arch(x) ((x)->e_machine == EM_SPARC)
99
100/*
101 * These are used to set parameters in the core dumps.
102 */
103#define ELF_ARCH EM_SPARC
104#define ELF_CLASS ELFCLASS32
105#define ELF_DATA ELFDATA2MSB
106
107#define USE_ELF_CORE_DUMP
108#ifndef CONFIG_SUN4
109#define ELF_EXEC_PAGESIZE 4096
110#else
111#define ELF_EXEC_PAGESIZE 8192
112#endif
113
114
115/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
116 use of this is to invoke "./ld.so someprog" to test out a new version of
117 the loader. We need to make sure that it is out of the way of the program
118 that it will "exec", and that there is sufficient room for the brk. */
119
120#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
121
122/* This yields a mask that user programs can use to figure out what
123 instruction set this cpu supports. This can NOT be done in userspace
124 on Sparc. */
125
126/* Sun4c has none of the capabilities, most sun4m's have them all.
127 * XXX This is gross, set some global variable at boot time. -DaveM
128 */
129#define ELF_HWCAP ((ARCH_SUN4C_SUN4) ? 0 : \
130 (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | \
131 HWCAP_SPARC_SWAP | \
132 ((srmmu_modtype != Cypress && \
133 srmmu_modtype != Cypress_vE && \
134 srmmu_modtype != Cypress_vD) ? \
135 HWCAP_SPARC_MULDIV : 0)))
136
137/* This yields a string that ld.so will use to load implementation
138 specific libraries for optimization. This is more specific in
139 intent than poking at uname or /proc/cpuinfo. */
140
141#define ELF_PLATFORM (NULL)
142
143#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
144
145#endif /* !(__ASMSPARC_ELF_H) */
diff --git a/include/asm-sparc/elf_64.h b/include/asm-sparc/elf_64.h
new file mode 100644
index 000000000000..0818a1308f4e
--- /dev/null
+++ b/include/asm-sparc/elf_64.h
@@ -0,0 +1,217 @@
1#ifndef __ASM_SPARC64_ELF_H
2#define __ASM_SPARC64_ELF_H
3
4/*
5 * ELF register definitions..
6 */
7
8#include <asm/ptrace.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11#include <asm/spitfire.h>
12
13/*
14 * Sparc section types
15 */
16#define STT_REGISTER 13
17
18/*
19 * Sparc ELF relocation types
20 */
21#define R_SPARC_NONE 0
22#define R_SPARC_8 1
23#define R_SPARC_16 2
24#define R_SPARC_32 3
25#define R_SPARC_DISP8 4
26#define R_SPARC_DISP16 5
27#define R_SPARC_DISP32 6
28#define R_SPARC_WDISP30 7
29#define R_SPARC_WDISP22 8
30#define R_SPARC_HI22 9
31#define R_SPARC_22 10
32#define R_SPARC_13 11
33#define R_SPARC_LO10 12
34#define R_SPARC_GOT10 13
35#define R_SPARC_GOT13 14
36#define R_SPARC_GOT22 15
37#define R_SPARC_PC10 16
38#define R_SPARC_PC22 17
39#define R_SPARC_WPLT30 18
40#define R_SPARC_COPY 19
41#define R_SPARC_GLOB_DAT 20
42#define R_SPARC_JMP_SLOT 21
43#define R_SPARC_RELATIVE 22
44#define R_SPARC_UA32 23
45#define R_SPARC_PLT32 24
46#define R_SPARC_HIPLT22 25
47#define R_SPARC_LOPLT10 26
48#define R_SPARC_PCPLT32 27
49#define R_SPARC_PCPLT22 28
50#define R_SPARC_PCPLT10 29
51#define R_SPARC_10 30
52#define R_SPARC_11 31
53#define R_SPARC_64 32
54#define R_SPARC_OLO10 33
55#define R_SPARC_WDISP16 40
56#define R_SPARC_WDISP19 41
57#define R_SPARC_7 43
58#define R_SPARC_5 44
59#define R_SPARC_6 45
60
61/* Bits present in AT_HWCAP, primarily for Sparc32. */
62
63#define HWCAP_SPARC_FLUSH 1 /* CPU supports flush instruction. */
64#define HWCAP_SPARC_STBAR 2
65#define HWCAP_SPARC_SWAP 4
66#define HWCAP_SPARC_MULDIV 8
67#define HWCAP_SPARC_V9 16
68#define HWCAP_SPARC_ULTRA3 32
69#define HWCAP_SPARC_BLKINIT 64
70#define HWCAP_SPARC_N2 128
71
72#define CORE_DUMP_USE_REGSET
73
74/*
75 * These are used to set parameters in the core dumps.
76 */
77#define ELF_ARCH EM_SPARCV9
78#define ELF_CLASS ELFCLASS64
79#define ELF_DATA ELFDATA2MSB
80
81/* Format of 64-bit elf_gregset_t is:
82 * G0 --> G7
83 * O0 --> O7
84 * L0 --> L7
85 * I0 --> I7
86 * TSTATE
87 * TPC
88 * TNPC
89 * Y
90 */
91typedef unsigned long elf_greg_t;
92#define ELF_NGREG 36
93typedef elf_greg_t elf_gregset_t[ELF_NGREG];
94
95typedef struct {
96 unsigned long pr_regs[32];
97 unsigned long pr_fsr;
98 unsigned long pr_gsr;
99 unsigned long pr_fprs;
100} elf_fpregset_t;
101
102/* Format of 32-bit elf_gregset_t is:
103 * G0 --> G7
104 * O0 --> O7
105 * L0 --> L7
106 * I0 --> I7
107 * PSR, PC, nPC, Y, WIM, TBR
108 */
109typedef unsigned int compat_elf_greg_t;
110#define COMPAT_ELF_NGREG 38
111typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
112
113typedef struct {
114 union {
115 unsigned int pr_regs[32];
116 unsigned long pr_dregs[16];
117 } pr_fr;
118 unsigned int __unused;
119 unsigned int pr_fsr;
120 unsigned char pr_qcnt;
121 unsigned char pr_q_entrysize;
122 unsigned char pr_en;
123 unsigned int pr_q[64];
124} compat_elf_fpregset_t;
125
126/* UltraSparc extensions. Still unused, but will be eventually. */
127typedef struct {
128 unsigned int pr_type;
129 unsigned int pr_align;
130 union {
131 struct {
132 union {
133 unsigned int pr_regs[32];
134 unsigned long pr_dregs[16];
135 long double pr_qregs[8];
136 } pr_xfr;
137 } pr_v8p;
138 unsigned int pr_xfsr;
139 unsigned int pr_fprs;
140 unsigned int pr_xg[8];
141 unsigned int pr_xo[8];
142 unsigned long pr_tstate;
143 unsigned int pr_filler[8];
144 } pr_un;
145} elf_xregset_t;
146
147/*
148 * This is used to ensure we don't load something for the wrong architecture.
149 */
150#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
151#define compat_elf_check_arch(x) ((x)->e_machine == EM_SPARC || \
152 (x)->e_machine == EM_SPARC32PLUS)
153#define compat_start_thread start_thread32
154
155#define USE_ELF_CORE_DUMP
156#define ELF_EXEC_PAGESIZE PAGE_SIZE
157
158/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
159 use of this is to invoke "./ld.so someprog" to test out a new version of
160 the loader. We need to make sure that it is out of the way of the program
161 that it will "exec", and that there is sufficient room for the brk. */
162
163#define ELF_ET_DYN_BASE 0x0000010000000000UL
164#define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
165
166
167/* This yields a mask that user programs can use to figure out what
168 instruction set this cpu supports. */
169
170/* On Ultra, we support all of the v8 capabilities. */
171static inline unsigned int sparc64_elf_hwcap(void)
172{
173 unsigned int cap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
174 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
175 HWCAP_SPARC_V9);
176
177 if (tlb_type == cheetah || tlb_type == cheetah_plus)
178 cap |= HWCAP_SPARC_ULTRA3;
179 else if (tlb_type == hypervisor) {
180 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
181 sun4v_chip_type == SUN4V_CHIP_NIAGARA2)
182 cap |= HWCAP_SPARC_BLKINIT;
183 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2)
184 cap |= HWCAP_SPARC_N2;
185 }
186
187 return cap;
188}
189
190#define ELF_HWCAP sparc64_elf_hwcap();
191
192/* This yields a string that ld.so will use to load implementation
193 specific libraries for optimization. This is more specific in
194 intent than poking at uname or /proc/cpuinfo. */
195
196#define ELF_PLATFORM (NULL)
197
198#define SET_PERSONALITY(ex, ibcs2) \
199do { unsigned long new_flags = current_thread_info()->flags; \
200 new_flags &= _TIF_32BIT; \
201 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
202 new_flags |= _TIF_32BIT; \
203 else \
204 new_flags &= ~_TIF_32BIT; \
205 if ((current_thread_info()->flags & _TIF_32BIT) \
206 != new_flags) \
207 set_thread_flag(TIF_ABI_PENDING); \
208 else \
209 clear_thread_flag(TIF_ABI_PENDING); \
210 /* flush_thread will update pgd cache */ \
211 if (ibcs2) \
212 set_personality(PER_SVR4); \
213 else if (current->personality != PER_LINUX32) \
214 set_personality(PER_LINUX); \
215} while (0)
216
217#endif /* !(__ASM_SPARC64_ELF_H) */
diff --git a/include/asm-sparc/envctrl.h b/include/asm-sparc/envctrl.h
new file mode 100644
index 000000000000..624fa7e2da8e
--- /dev/null
+++ b/include/asm-sparc/envctrl.h
@@ -0,0 +1,103 @@
1/*
2 *
3 * envctrl.h: Definitions for access to the i2c environment
4 * monitoring on Ultrasparc systems.
5 *
6 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 2000 Vinh Truong (vinh.truong@eng.sun.com)
8 * VT - Add all ioctl commands and environment status definitions
9 * VT - Add application note
10 */
11#ifndef _SPARC64_ENVCTRL_H
12#define _SPARC64_ENVCTRL_H 1
13
14#include <linux/ioctl.h>
15
16/* Application note:
17 *
18 * The driver supports 4 operations: open(), close(), ioctl(), read()
19 * The device name is /dev/envctrl.
20 * Below is sample usage:
21 *
22 * fd = open("/dev/envtrl", O_RDONLY);
23 * if (ioctl(fd, ENVCTRL_READ_SHUTDOWN_TEMPERATURE, 0) < 0)
24 * printf("error\n");
25 * ret = read(fd, buf, 10);
26 * close(fd);
27 *
28 * Notice in the case of cpu voltage and temperature, the default is
29 * cpu0. If we need to know the info of cpu1, cpu2, cpu3, we need to
30 * pass in cpu number in ioctl() last parameter. For example, to
31 * get the voltage of cpu2:
32 *
33 * ioctlbuf[0] = 2;
34 * if (ioctl(fd, ENVCTRL_READ_CPU_VOLTAGE, ioctlbuf) < 0)
35 * printf("error\n");
36 * ret = read(fd, buf, 10);
37 *
38 * All the return values are in ascii. So check read return value
39 * and do appropriate conversions in your application.
40 */
41
42/* IOCTL commands */
43
44/* Note: these commands reflect possible monitor features.
45 * Some boards choose to support some of the features only.
46 */
47#define ENVCTRL_RD_CPU_TEMPERATURE _IOR('p', 0x40, int)
48#define ENVCTRL_RD_CPU_VOLTAGE _IOR('p', 0x41, int)
49#define ENVCTRL_RD_FAN_STATUS _IOR('p', 0x42, int)
50#define ENVCTRL_RD_WARNING_TEMPERATURE _IOR('p', 0x43, int)
51#define ENVCTRL_RD_SHUTDOWN_TEMPERATURE _IOR('p', 0x44, int)
52#define ENVCTRL_RD_VOLTAGE_STATUS _IOR('p', 0x45, int)
53#define ENVCTRL_RD_SCSI_TEMPERATURE _IOR('p', 0x46, int)
54#define ENVCTRL_RD_ETHERNET_TEMPERATURE _IOR('p', 0x47, int)
55#define ENVCTRL_RD_MTHRBD_TEMPERATURE _IOR('p', 0x48, int)
56
57#define ENVCTRL_RD_GLOBALADDRESS _IOR('p', 0x49, int)
58
59/* Read return values for a voltage status request. */
60#define ENVCTRL_VOLTAGE_POWERSUPPLY_GOOD 0x01
61#define ENVCTRL_VOLTAGE_BAD 0x02
62#define ENVCTRL_POWERSUPPLY_BAD 0x03
63#define ENVCTRL_VOLTAGE_POWERSUPPLY_BAD 0x04
64
65/* Read return values for a fan status request.
66 * A failure match means either the fan fails or
67 * the fan is not connected. Some boards have optional
68 * connectors to connect extra fans.
69 *
70 * There are maximum 8 monitor fans. Some are cpu fans
71 * some are system fans. The mask below only indicates
72 * fan by order number.
73 * Below is a sample application:
74 *
75 * if (ioctl(fd, ENVCTRL_READ_FAN_STATUS, 0) < 0) {
76 * printf("ioctl fan failed\n");
77 * }
78 * if (read(fd, rslt, 1) <= 0) {
79 * printf("error or fan not monitored\n");
80 * } else {
81 * if (rslt[0] == ENVCTRL_ALL_FANS_GOOD) {
82 * printf("all fans good\n");
83 * } else if (rslt[0] == ENVCTRL_ALL_FANS_BAD) {
84 * printf("all fans bad\n");
85 * } else {
86 * if (rslt[0] & ENVCTRL_FAN0_FAILURE_MASK) {
87 * printf("fan 0 failed or not connected\n");
88 * }
89 * ......
90 */
91
92#define ENVCTRL_ALL_FANS_GOOD 0x00
93#define ENVCTRL_FAN0_FAILURE_MASK 0x01
94#define ENVCTRL_FAN1_FAILURE_MASK 0x02
95#define ENVCTRL_FAN2_FAILURE_MASK 0x04
96#define ENVCTRL_FAN3_FAILURE_MASK 0x08
97#define ENVCTRL_FAN4_FAILURE_MASK 0x10
98#define ENVCTRL_FAN5_FAILURE_MASK 0x20
99#define ENVCTRL_FAN6_FAILURE_MASK 0x40
100#define ENVCTRL_FAN7_FAILURE_MASK 0x80
101#define ENVCTRL_ALL_FANS_BAD 0xFF
102
103#endif /* !(_SPARC64_ENVCTRL_H) */
diff --git a/include/asm-sparc/estate.h b/include/asm-sparc/estate.h
new file mode 100644
index 000000000000..520c08560d1b
--- /dev/null
+++ b/include/asm-sparc/estate.h
@@ -0,0 +1,49 @@
1#ifndef _SPARC64_ESTATE_H
2#define _SPARC64_ESTATE_H
3
4/* UltraSPARC-III E-cache Error Enable */
5#define ESTATE_ERROR_FMT 0x0000000000040000 /* Force MTAG ECC */
6#define ESTATE_ERROR_FMESS 0x000000000003c000 /* Forced MTAG ECC val */
7#define ESTATE_ERROR_FMD 0x0000000000002000 /* Force DATA ECC */
8#define ESTATE_ERROR_FDECC 0x0000000000001ff0 /* Forced DATA ECC val */
9#define ESTATE_ERROR_UCEEN 0x0000000000000008 /* See below */
10#define ESTATE_ERROR_NCEEN 0x0000000000000002 /* See below */
11#define ESTATE_ERROR_CEEN 0x0000000000000001 /* See below */
12
13/* UCEEN enables the fast_ECC_error trap for: 1) software correctable E-cache
14 * errors 2) uncorrectable E-cache errors. Such events only occur on reads
15 * of the E-cache by the local processor for: 1) data loads 2) instruction
16 * fetches 3) atomic operations. Such events _cannot_ occur for: 1) merge
17 * 2) writeback 2) copyout. The AFSR bits associated with these traps are
18 * UCC and UCU.
19 */
20
21/* NCEEN enables instruction_access_error, data_access_error, and ECC_error traps
22 * for uncorrectable ECC errors and system errors.
23 *
24 * Uncorrectable system bus data error or MTAG ECC error, system bus TimeOUT,
25 * or system bus BusERR:
26 * 1) As the result of an instruction fetch, will generate instruction_access_error
27 * 2) As the result of a load etc. will generate data_access_error.
28 * 3) As the result of store merge completion, writeback, or copyout will
29 * generate a disrupting ECC_error trap.
30 * 4) As the result of such errors on instruction vector fetch can generate any
31 * of the 3 trap types.
32 *
33 * The AFSR bits associated with these traps are EMU, EDU, WDU, CPU, IVU, UE,
34 * BERR, and TO.
35 */
36
37/* CEEN enables the ECC_error trap for hardware corrected ECC errors. System bus
38 * reads resulting in a hardware corrected data or MTAG ECC error will generate an
39 * ECC_error disrupting trap with this bit enabled.
40 *
41 * This same trap will also be generated when a hardware corrected ECC error results
42 * during store merge, writeback, and copyout operations.
43 */
44
45/* In general, if the trap enable bits above are disabled the AFSR bits will still
46 * log the events even though the trap will not be generated by the processor.
47 */
48
49#endif /* _SPARC64_ESTATE_H */
diff --git a/include/asm-sparc/fbio.h b/include/asm-sparc/fbio.h
index c2b27e7a7cad..b9215a0907d3 100644
--- a/include/asm-sparc/fbio.h
+++ b/include/asm-sparc/fbio.h
@@ -1,6 +1,9 @@
1#ifndef __LINUX_FBIO_H 1#ifndef __LINUX_FBIO_H
2#define __LINUX_FBIO_H 2#define __LINUX_FBIO_H
3 3
4#include <linux/compiler.h>
5#include <linux/types.h>
6
4/* Constants used for fbio SunOS compatibility */ 7/* Constants used for fbio SunOS compatibility */
5/* (C) 1996 Miguel de Icaza */ 8/* (C) 1996 Miguel de Icaza */
6 9
@@ -38,6 +41,9 @@
38#define FBTYPE_PCI_IGA1682 23 41#define FBTYPE_PCI_IGA1682 23
39#define FBTYPE_P9100COLOR 24 42#define FBTYPE_P9100COLOR 24
40 43
44#define FBTYPE_PCI_GENERIC 1000
45#define FBTYPE_PCI_MACH64 1001
46
41/* fbio ioctls */ 47/* fbio ioctls */
42/* Returned by FBIOGTYPE */ 48/* Returned by FBIOGTYPE */
43struct fbtype { 49struct fbtype {
@@ -97,8 +103,8 @@ struct fbcursor {
97 struct fbcurpos hot; /* cursor hot spot */ 103 struct fbcurpos hot; /* cursor hot spot */
98 struct fbcmap cmap; /* color map info */ 104 struct fbcmap cmap; /* color map info */
99 struct fbcurpos size; /* cursor bit map size */ 105 struct fbcurpos size; /* cursor bit map size */
100 char *image; /* cursor image bits */ 106 char __user *image; /* cursor image bits */
101 char *mask; /* cursor mask bits */ 107 char __user *mask; /* cursor mask bits */
102}; 108};
103 109
104/* set/get cursor attributes/shape */ 110/* set/get cursor attributes/shape */
@@ -294,4 +300,31 @@ struct fb_clut32 {
294#define LEO_LD_GBL_MAP 0x01009000 300#define LEO_LD_GBL_MAP 0x01009000
295#define LEO_UNK2_MAP 0x0100a000 301#define LEO_UNK2_MAP 0x0100a000
296 302
303#ifdef __KERNEL__
304struct fbcmap32 {
305 int index; /* first element (0 origin) */
306 int count;
307 u32 red;
308 u32 green;
309 u32 blue;
310};
311
312#define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32)
313#define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32)
314
315struct fbcursor32 {
316 short set; /* what to set, choose from the list above */
317 short enable; /* cursor on/off */
318 struct fbcurpos pos; /* cursor position */
319 struct fbcurpos hot; /* cursor hot spot */
320 struct fbcmap32 cmap; /* color map info */
321 struct fbcurpos size; /* cursor bit map size */
322 u32 image; /* cursor image bits */
323 u32 mask; /* cursor mask bits */
324};
325
326#define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32)
327#define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32)
328#endif
329
297#endif /* __LINUX_FBIO_H */ 330#endif /* __LINUX_FBIO_H */
diff --git a/include/asm-sparc/fcntl.h b/include/asm-sparc/fcntl.h
index 07bd2d80257f..d4d9c9d852c3 100644
--- a/include/asm-sparc/fcntl.h
+++ b/include/asm-sparc/fcntl.h
@@ -10,7 +10,11 @@
10#define O_EXCL 0x0800 /* not fcntl */ 10#define O_EXCL 0x0800 /* not fcntl */
11#define O_SYNC 0x2000 11#define O_SYNC 0x2000
12#define O_NONBLOCK 0x4000 12#define O_NONBLOCK 0x4000
13#if defined(__sparc__) && defined(__arch64__)
14#define O_NDELAY 0x0004
15#else
13#define O_NDELAY (0x0004 | O_NONBLOCK) 16#define O_NDELAY (0x0004 | O_NONBLOCK)
17#endif
14#define O_NOCTTY 0x8000 /* not fcntl */ 18#define O_NOCTTY 0x8000 /* not fcntl */
15#define O_LARGEFILE 0x40000 19#define O_LARGEFILE 0x40000
16#define O_DIRECT 0x100000 /* direct disk access hint */ 20#define O_DIRECT 0x100000 /* direct disk access hint */
diff --git a/include/asm-sparc/fhc.h b/include/asm-sparc/fhc.h
new file mode 100644
index 000000000000..788cbc46a116
--- /dev/null
+++ b/include/asm-sparc/fhc.h
@@ -0,0 +1,121 @@
1/*
2 * fhc.h: Structures for central/fhc pseudo driver on Sunfire/Starfire/Wildfire.
3 *
4 * Copyright (C) 1997, 1999 David S. Miller (davem@redhat.com)
5 */
6
7#ifndef _SPARC64_FHC_H
8#define _SPARC64_FHC_H
9
10#include <linux/timer.h>
11
12#include <asm/oplib.h>
13#include <asm/prom.h>
14#include <asm/upa.h>
15
16struct linux_fhc;
17
18/* Clock board register offsets. */
19#define CLOCK_CTRL 0x00UL /* Main control */
20#define CLOCK_STAT1 0x10UL /* Status one */
21#define CLOCK_STAT2 0x20UL /* Status two */
22#define CLOCK_PWRSTAT 0x30UL /* Power status */
23#define CLOCK_PWRPRES 0x40UL /* Power presence */
24#define CLOCK_TEMP 0x50UL /* Temperature */
25#define CLOCK_IRQDIAG 0x60UL /* IRQ diagnostics */
26#define CLOCK_PWRSTAT2 0x70UL /* Power status two */
27
28#define CLOCK_CTRL_LLED 0x04 /* Left LED, 0 == on */
29#define CLOCK_CTRL_MLED 0x02 /* Mid LED, 1 == on */
30#define CLOCK_CTRL_RLED 0x01 /* RIght LED, 1 == on */
31
32struct linux_central {
33 struct linux_fhc *child;
34 unsigned long cfreg;
35 unsigned long clkregs;
36 unsigned long clkver;
37 int slots;
38 struct device_node *prom_node;
39
40 struct linux_prom_ranges central_ranges[PROMREG_MAX];
41 int num_central_ranges;
42};
43
44/* Firehose controller register offsets */
45struct fhc_regs {
46 unsigned long pregs; /* FHC internal regs */
47#define FHC_PREGS_ID 0x00UL /* FHC ID */
48#define FHC_ID_VERS 0xf0000000 /* Version of this FHC */
49#define FHC_ID_PARTID 0x0ffff000 /* Part ID code (0x0f9f == FHC) */
50#define FHC_ID_MANUF 0x0000007e /* Manufacturer (0x3e == SUN's JEDEC)*/
51#define FHC_ID_RESV 0x00000001 /* Read as one */
52#define FHC_PREGS_RCS 0x10UL /* FHC Reset Control/Status Register */
53#define FHC_RCS_POR 0x80000000 /* Last reset was a power cycle */
54#define FHC_RCS_SPOR 0x40000000 /* Last reset was sw power on reset */
55#define FHC_RCS_SXIR 0x20000000 /* Last reset was sw XIR reset */
56#define FHC_RCS_BPOR 0x10000000 /* Last reset was due to POR button */
57#define FHC_RCS_BXIR 0x08000000 /* Last reset was due to XIR button */
58#define FHC_RCS_WEVENT 0x04000000 /* CPU reset was due to wakeup event */
59#define FHC_RCS_CFATAL 0x02000000 /* Centerplane Fatal Error signalled */
60#define FHC_RCS_FENAB 0x01000000 /* Fatal errors elicit system reset */
61#define FHC_PREGS_CTRL 0x20UL /* FHC Control Register */
62#define FHC_CONTROL_ICS 0x00100000 /* Ignore Centerplane Signals */
63#define FHC_CONTROL_FRST 0x00080000 /* Fatal Error Reset Enable */
64#define FHC_CONTROL_LFAT 0x00040000 /* AC/DC signalled a local error */
65#define FHC_CONTROL_SLINE 0x00010000 /* Firmware Synchronization Line */
66#define FHC_CONTROL_DCD 0x00008000 /* DC-->DC Converter Disable */
67#define FHC_CONTROL_POFF 0x00004000 /* AC/DC Controller PLL Disable */
68#define FHC_CONTROL_FOFF 0x00002000 /* FHC Controller PLL Disable */
69#define FHC_CONTROL_AOFF 0x00001000 /* CPU A SRAM/SBD Low Power Mode */
70#define FHC_CONTROL_BOFF 0x00000800 /* CPU B SRAM/SBD Low Power Mode */
71#define FHC_CONTROL_PSOFF 0x00000400 /* Turns off this FHC's power supply */
72#define FHC_CONTROL_IXIST 0x00000200 /* 0=FHC tells clock board it exists */
73#define FHC_CONTROL_XMSTR 0x00000100 /* 1=Causes this FHC to be XIR master*/
74#define FHC_CONTROL_LLED 0x00000040 /* 0=Left LED ON */
75#define FHC_CONTROL_MLED 0x00000020 /* 1=Middle LED ON */
76#define FHC_CONTROL_RLED 0x00000010 /* 1=Right LED */
77#define FHC_CONTROL_BPINS 0x00000003 /* Spare Bidirectional Pins */
78#define FHC_PREGS_BSR 0x30UL /* FHC Board Status Register */
79#define FHC_BSR_DA64 0x00040000 /* Port A: 0=128bit 1=64bit data path */
80#define FHC_BSR_DB64 0x00020000 /* Port B: 0=128bit 1=64bit data path */
81#define FHC_BSR_BID 0x0001e000 /* Board ID */
82#define FHC_BSR_SA 0x00001c00 /* Port A UPA Speed (from the pins) */
83#define FHC_BSR_SB 0x00000380 /* Port B UPA Speed (from the pins) */
84#define FHC_BSR_NDIAG 0x00000040 /* Not in Diag Mode */
85#define FHC_BSR_NTBED 0x00000020 /* Not in TestBED Mode */
86#define FHC_BSR_NIA 0x0000001c /* Jumper, bit 18 in PROM space */
87#define FHC_BSR_SI 0x00000001 /* Spare input pin value */
88#define FHC_PREGS_ECC 0x40UL /* FHC ECC Control Register (16 bits) */
89#define FHC_PREGS_JCTRL 0xf0UL /* FHC JTAG Control Register */
90#define FHC_JTAG_CTRL_MENAB 0x80000000 /* Indicates this is JTAG Master */
91#define FHC_JTAG_CTRL_MNONE 0x40000000 /* Indicates no JTAG Master present */
92#define FHC_PREGS_JCMD 0x100UL /* FHC JTAG Command Register */
93 unsigned long ireg; /* FHC IGN reg */
94#define FHC_IREG_IGN 0x00UL /* This FHC's IGN */
95 unsigned long ffregs; /* FHC fanfail regs */
96#define FHC_FFREGS_IMAP 0x00UL /* FHC Fanfail IMAP */
97#define FHC_FFREGS_ICLR 0x10UL /* FHC Fanfail ICLR */
98 unsigned long sregs; /* FHC system regs */
99#define FHC_SREGS_IMAP 0x00UL /* FHC System IMAP */
100#define FHC_SREGS_ICLR 0x10UL /* FHC System ICLR */
101 unsigned long uregs; /* FHC uart regs */
102#define FHC_UREGS_IMAP 0x00UL /* FHC Uart IMAP */
103#define FHC_UREGS_ICLR 0x10UL /* FHC Uart ICLR */
104 unsigned long tregs; /* FHC TOD regs */
105#define FHC_TREGS_IMAP 0x00UL /* FHC TOD IMAP */
106#define FHC_TREGS_ICLR 0x10UL /* FHC TOD ICLR */
107};
108
109struct linux_fhc {
110 struct linux_fhc *next;
111 struct linux_central *parent; /* NULL if not central FHC */
112 struct fhc_regs fhc_regs;
113 int board;
114 int jtag_master;
115 struct device_node *prom_node;
116
117 struct linux_prom_ranges fhc_ranges[PROMREG_MAX];
118 int num_fhc_ranges;
119};
120
121#endif /* !(_SPARC64_FHC_H) */
diff --git a/include/asm-sparc/floppy.h b/include/asm-sparc/floppy.h
index d3978e068e2b..6c628ba15a8d 100644
--- a/include/asm-sparc/floppy.h
+++ b/include/asm-sparc/floppy.h
@@ -1,388 +1,8 @@
1/* asm-sparc/floppy.h: Sparc specific parts of the Floppy driver. 1#ifndef ___ASM_SPARC_FLOPPY_H
2 * 2#define ___ASM_SPARC_FLOPPY_H
3 * Copyright (C) 1995 David S. Miller (davem@davemloft.net) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/floppy_64.h>
5 5#else
6#ifndef __ASM_SPARC_FLOPPY_H 6#include <asm-sparc/floppy_32.h>
7#define __ASM_SPARC_FLOPPY_H 7#endif
8
9#include <asm/page.h>
10#include <asm/pgtable.h>
11#include <asm/system.h>
12#include <asm/idprom.h>
13#include <asm/machines.h>
14#include <asm/oplib.h>
15#include <asm/auxio.h>
16#include <asm/irq.h>
17
18/* We don't need no stinkin' I/O port allocation crap. */
19#undef release_region
20#undef request_region
21#define release_region(X, Y) do { } while(0)
22#define request_region(X, Y, Z) (1)
23
24/* References:
25 * 1) Netbsd Sun floppy driver.
26 * 2) NCR 82077 controller manual
27 * 3) Intel 82077 controller manual
28 */
29struct sun_flpy_controller {
30 volatile unsigned char status_82072; /* Main Status reg. */
31#define dcr_82072 status_82072 /* Digital Control reg. */
32#define status1_82077 status_82072 /* Auxiliary Status reg. 1 */
33
34 volatile unsigned char data_82072; /* Data fifo. */
35#define status2_82077 data_82072 /* Auxiliary Status reg. 2 */
36
37 volatile unsigned char dor_82077; /* Digital Output reg. */
38 volatile unsigned char tapectl_82077; /* What the? Tape control reg? */
39
40 volatile unsigned char status_82077; /* Main Status Register. */
41#define drs_82077 status_82077 /* Digital Rate Select reg. */
42
43 volatile unsigned char data_82077; /* Data fifo. */
44 volatile unsigned char ___unused;
45 volatile unsigned char dir_82077; /* Digital Input reg. */
46#define dcr_82077 dir_82077 /* Config Control reg. */
47};
48
49/* You'll only ever find one controller on a SparcStation anyways. */
50static struct sun_flpy_controller *sun_fdc = NULL;
51extern volatile unsigned char *fdc_status;
52
53struct sun_floppy_ops {
54 unsigned char (*fd_inb)(int port);
55 void (*fd_outb)(unsigned char value, int port);
56};
57
58static struct sun_floppy_ops sun_fdops;
59
60#define fd_inb(port) sun_fdops.fd_inb(port)
61#define fd_outb(value,port) sun_fdops.fd_outb(value,port)
62#define fd_enable_dma() sun_fd_enable_dma()
63#define fd_disable_dma() sun_fd_disable_dma()
64#define fd_request_dma() (0) /* nothing... */
65#define fd_free_dma() /* nothing... */
66#define fd_clear_dma_ff() /* nothing... */
67#define fd_set_dma_mode(mode) sun_fd_set_dma_mode(mode)
68#define fd_set_dma_addr(addr) sun_fd_set_dma_addr(addr)
69#define fd_set_dma_count(count) sun_fd_set_dma_count(count)
70#define fd_enable_irq() /* nothing... */
71#define fd_disable_irq() /* nothing... */
72#define fd_cacheflush(addr, size) /* nothing... */
73#define fd_request_irq() sun_fd_request_irq()
74#define fd_free_irq() /* nothing... */
75#if 0 /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */
76#define fd_dma_mem_alloc(size) ((unsigned long) vmalloc(size))
77#define fd_dma_mem_free(addr,size) (vfree((void *)(addr)))
78#endif 8#endif
79
80/* XXX This isn't really correct. XXX */
81#define get_dma_residue(x) (0)
82
83#define FLOPPY0_TYPE 4
84#define FLOPPY1_TYPE 0
85
86/* Super paranoid... */
87#undef HAVE_DISABLE_HLT
88
89/* Here is where we catch the floppy driver trying to initialize,
90 * therefore this is where we call the PROM device tree probing
91 * routine etc. on the Sparc.
92 */
93#define FDC1 sun_floppy_init()
94
95#define N_FDC 1
96#define N_DRIVE 8
97
98/* No 64k boundary crossing problems on the Sparc. */
99#define CROSS_64KB(a,s) (0)
100
101/* Routines unique to each controller type on a Sun. */
102static void sun_set_dor(unsigned char value, int fdc_82077)
103{
104 if (sparc_cpu_model == sun4c) {
105 unsigned int bits = 0;
106 if (value & 0x10)
107 bits |= AUXIO_FLPY_DSEL;
108 if ((value & 0x80) == 0)
109 bits |= AUXIO_FLPY_EJCT;
110 set_auxio(bits, (~bits) & (AUXIO_FLPY_DSEL|AUXIO_FLPY_EJCT));
111 }
112 if (fdc_82077) {
113 sun_fdc->dor_82077 = value;
114 }
115}
116
117static unsigned char sun_read_dir(void)
118{
119 if (sparc_cpu_model == sun4c)
120 return (get_auxio() & AUXIO_FLPY_DCHG) ? 0x80 : 0;
121 else
122 return sun_fdc->dir_82077;
123}
124
125static unsigned char sun_82072_fd_inb(int port)
126{
127 udelay(5);
128 switch(port & 7) {
129 default:
130 printk("floppy: Asked to read unknown port %d\n", port);
131 panic("floppy: Port bolixed.");
132 case 4: /* FD_STATUS */
133 return sun_fdc->status_82072 & ~STATUS_DMA;
134 case 5: /* FD_DATA */
135 return sun_fdc->data_82072;
136 case 7: /* FD_DIR */
137 return sun_read_dir();
138 };
139 panic("sun_82072_fd_inb: How did I get here?");
140}
141
142static void sun_82072_fd_outb(unsigned char value, int port)
143{
144 udelay(5);
145 switch(port & 7) {
146 default:
147 printk("floppy: Asked to write to unknown port %d\n", port);
148 panic("floppy: Port bolixed.");
149 case 2: /* FD_DOR */
150 sun_set_dor(value, 0);
151 break;
152 case 5: /* FD_DATA */
153 sun_fdc->data_82072 = value;
154 break;
155 case 7: /* FD_DCR */
156 sun_fdc->dcr_82072 = value;
157 break;
158 case 4: /* FD_STATUS */
159 sun_fdc->status_82072 = value;
160 break;
161 };
162 return;
163}
164
165static unsigned char sun_82077_fd_inb(int port)
166{
167 udelay(5);
168 switch(port & 7) {
169 default:
170 printk("floppy: Asked to read unknown port %d\n", port);
171 panic("floppy: Port bolixed.");
172 case 0: /* FD_STATUS_0 */
173 return sun_fdc->status1_82077;
174 case 1: /* FD_STATUS_1 */
175 return sun_fdc->status2_82077;
176 case 2: /* FD_DOR */
177 return sun_fdc->dor_82077;
178 case 3: /* FD_TDR */
179 return sun_fdc->tapectl_82077;
180 case 4: /* FD_STATUS */
181 return sun_fdc->status_82077 & ~STATUS_DMA;
182 case 5: /* FD_DATA */
183 return sun_fdc->data_82077;
184 case 7: /* FD_DIR */
185 return sun_read_dir();
186 };
187 panic("sun_82077_fd_inb: How did I get here?");
188}
189
190static void sun_82077_fd_outb(unsigned char value, int port)
191{
192 udelay(5);
193 switch(port & 7) {
194 default:
195 printk("floppy: Asked to write to unknown port %d\n", port);
196 panic("floppy: Port bolixed.");
197 case 2: /* FD_DOR */
198 sun_set_dor(value, 1);
199 break;
200 case 5: /* FD_DATA */
201 sun_fdc->data_82077 = value;
202 break;
203 case 7: /* FD_DCR */
204 sun_fdc->dcr_82077 = value;
205 break;
206 case 4: /* FD_STATUS */
207 sun_fdc->status_82077 = value;
208 break;
209 case 3: /* FD_TDR */
210 sun_fdc->tapectl_82077 = value;
211 break;
212 };
213 return;
214}
215
216/* For pseudo-dma (Sun floppy drives have no real DMA available to
217 * them so we must eat the data fifo bytes directly ourselves) we have
218 * three state variables. doing_pdma tells our inline low-level
219 * assembly floppy interrupt entry point whether it should sit and eat
220 * bytes from the fifo or just transfer control up to the higher level
221 * floppy interrupt c-code. I tried very hard but I could not get the
222 * pseudo-dma to work in c-code without getting many overruns and
223 * underruns. If non-zero, doing_pdma encodes the direction of
224 * the transfer for debugging. 1=read 2=write
225 */
226extern char *pdma_vaddr;
227extern unsigned long pdma_size;
228extern volatile int doing_pdma;
229
230/* This is software state */
231extern char *pdma_base;
232extern unsigned long pdma_areasize;
233
234/* Common routines to all controller types on the Sparc. */
235static inline void virtual_dma_init(void)
236{
237 /* nothing... */
238}
239
240static inline void sun_fd_disable_dma(void)
241{
242 doing_pdma = 0;
243 if (pdma_base) {
244 mmu_unlockarea(pdma_base, pdma_areasize);
245 pdma_base = NULL;
246 }
247}
248
249static inline void sun_fd_set_dma_mode(int mode)
250{
251 switch(mode) {
252 case DMA_MODE_READ:
253 doing_pdma = 1;
254 break;
255 case DMA_MODE_WRITE:
256 doing_pdma = 2;
257 break;
258 default:
259 printk("Unknown dma mode %d\n", mode);
260 panic("floppy: Giving up...");
261 }
262}
263
264static inline void sun_fd_set_dma_addr(char *buffer)
265{
266 pdma_vaddr = buffer;
267}
268
269static inline void sun_fd_set_dma_count(int length)
270{
271 pdma_size = length;
272}
273
274static inline void sun_fd_enable_dma(void)
275{
276 pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
277 pdma_base = pdma_vaddr;
278 pdma_areasize = pdma_size;
279}
280
281/* Our low-level entry point in arch/sparc/kernel/entry.S */
282extern int sparc_floppy_request_irq(int irq, unsigned long flags,
283 irq_handler_t irq_handler);
284
285static int sun_fd_request_irq(void)
286{
287 static int once = 0;
288 int error;
289
290 if(!once) {
291 once = 1;
292 error = sparc_floppy_request_irq(FLOPPY_IRQ,
293 IRQF_DISABLED,
294 floppy_interrupt);
295 return ((error == 0) ? 0 : -1);
296 } else return 0;
297}
298
299static struct linux_prom_registers fd_regs[2];
300
301static int sun_floppy_init(void)
302{
303 char state[128];
304 int tnode, fd_node, num_regs;
305 struct resource r;
306
307 use_virtual_dma = 1;
308
309 FLOPPY_IRQ = 11;
310 /* Forget it if we aren't on a machine that could possibly
311 * ever have a floppy drive.
312 */
313 if((sparc_cpu_model != sun4c && sparc_cpu_model != sun4m) ||
314 ((idprom->id_machtype == (SM_SUN4C | SM_4C_SLC)) ||
315 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC)))) {
316 /* We certainly don't have a floppy controller. */
317 goto no_sun_fdc;
318 }
319 /* Well, try to find one. */
320 tnode = prom_getchild(prom_root_node);
321 fd_node = prom_searchsiblings(tnode, "obio");
322 if(fd_node != 0) {
323 tnode = prom_getchild(fd_node);
324 fd_node = prom_searchsiblings(tnode, "SUNW,fdtwo");
325 } else {
326 fd_node = prom_searchsiblings(tnode, "fd");
327 }
328 if(fd_node == 0) {
329 goto no_sun_fdc;
330 }
331
332 /* The sun4m lets us know if the controller is actually usable. */
333 if(sparc_cpu_model == sun4m &&
334 prom_getproperty(fd_node, "status", state, sizeof(state)) != -1) {
335 if(!strcmp(state, "disabled")) {
336 goto no_sun_fdc;
337 }
338 }
339 num_regs = prom_getproperty(fd_node, "reg", (char *) fd_regs, sizeof(fd_regs));
340 num_regs = (num_regs / sizeof(fd_regs[0]));
341 prom_apply_obio_ranges(fd_regs, num_regs);
342 memset(&r, 0, sizeof(r));
343 r.flags = fd_regs[0].which_io;
344 r.start = fd_regs[0].phys_addr;
345 sun_fdc = (struct sun_flpy_controller *)
346 sbus_ioremap(&r, 0, fd_regs[0].reg_size, "floppy");
347
348 /* Last minute sanity check... */
349 if(sun_fdc->status_82072 == 0xff) {
350 sun_fdc = NULL;
351 goto no_sun_fdc;
352 }
353
354 sun_fdops.fd_inb = sun_82077_fd_inb;
355 sun_fdops.fd_outb = sun_82077_fd_outb;
356 fdc_status = &sun_fdc->status_82077;
357
358 if (sun_fdc->dor_82077 == 0x80) {
359 sun_fdc->dor_82077 = 0x02;
360 if (sun_fdc->dor_82077 == 0x80) {
361 sun_fdops.fd_inb = sun_82072_fd_inb;
362 sun_fdops.fd_outb = sun_82072_fd_outb;
363 fdc_status = &sun_fdc->status_82072;
364 }
365 }
366
367 /* Success... */
368 allowed_drive_mask = 0x01;
369 return (int) sun_fdc;
370
371no_sun_fdc:
372 return -1;
373}
374
375static int sparc_eject(void)
376{
377 set_dor(0x00, 0xff, 0x90);
378 udelay(500);
379 set_dor(0x00, 0x6f, 0x00);
380 udelay(500);
381 return 0;
382}
383
384#define fd_eject(drive) sparc_eject()
385
386#define EXTRA_FLOPPY_PARAMS
387
388#endif /* !(__ASM_SPARC_FLOPPY_H) */
diff --git a/include/asm-sparc/floppy_32.h b/include/asm-sparc/floppy_32.h
new file mode 100644
index 000000000000..acdd06eafe59
--- /dev/null
+++ b/include/asm-sparc/floppy_32.h
@@ -0,0 +1,388 @@
1/* asm-sparc/floppy.h: Sparc specific parts of the Floppy driver.
2 *
3 * Copyright (C) 1995 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef __ASM_SPARC_FLOPPY_H
7#define __ASM_SPARC_FLOPPY_H
8
9#include <asm/page.h>
10#include <asm/pgtable.h>
11#include <asm/system.h>
12#include <asm/idprom.h>
13#include <asm/machines.h>
14#include <asm/oplib.h>
15#include <asm/auxio.h>
16#include <asm/irq.h>
17
18/* We don't need no stinkin' I/O port allocation crap. */
19#undef release_region
20#undef request_region
21#define release_region(X, Y) do { } while(0)
22#define request_region(X, Y, Z) (1)
23
24/* References:
25 * 1) Netbsd Sun floppy driver.
26 * 2) NCR 82077 controller manual
27 * 3) Intel 82077 controller manual
28 */
29struct sun_flpy_controller {
30 volatile unsigned char status_82072; /* Main Status reg. */
31#define dcr_82072 status_82072 /* Digital Control reg. */
32#define status1_82077 status_82072 /* Auxiliary Status reg. 1 */
33
34 volatile unsigned char data_82072; /* Data fifo. */
35#define status2_82077 data_82072 /* Auxiliary Status reg. 2 */
36
37 volatile unsigned char dor_82077; /* Digital Output reg. */
38 volatile unsigned char tapectl_82077; /* What the? Tape control reg? */
39
40 volatile unsigned char status_82077; /* Main Status Register. */
41#define drs_82077 status_82077 /* Digital Rate Select reg. */
42
43 volatile unsigned char data_82077; /* Data fifo. */
44 volatile unsigned char ___unused;
45 volatile unsigned char dir_82077; /* Digital Input reg. */
46#define dcr_82077 dir_82077 /* Config Control reg. */
47};
48
49/* You'll only ever find one controller on a SparcStation anyways. */
50static struct sun_flpy_controller *sun_fdc = NULL;
51extern volatile unsigned char *fdc_status;
52
53struct sun_floppy_ops {
54 unsigned char (*fd_inb)(int port);
55 void (*fd_outb)(unsigned char value, int port);
56};
57
58static struct sun_floppy_ops sun_fdops;
59
60#define fd_inb(port) sun_fdops.fd_inb(port)
61#define fd_outb(value,port) sun_fdops.fd_outb(value,port)
62#define fd_enable_dma() sun_fd_enable_dma()
63#define fd_disable_dma() sun_fd_disable_dma()
64#define fd_request_dma() (0) /* nothing... */
65#define fd_free_dma() /* nothing... */
66#define fd_clear_dma_ff() /* nothing... */
67#define fd_set_dma_mode(mode) sun_fd_set_dma_mode(mode)
68#define fd_set_dma_addr(addr) sun_fd_set_dma_addr(addr)
69#define fd_set_dma_count(count) sun_fd_set_dma_count(count)
70#define fd_enable_irq() /* nothing... */
71#define fd_disable_irq() /* nothing... */
72#define fd_cacheflush(addr, size) /* nothing... */
73#define fd_request_irq() sun_fd_request_irq()
74#define fd_free_irq() /* nothing... */
75#if 0 /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */
76#define fd_dma_mem_alloc(size) ((unsigned long) vmalloc(size))
77#define fd_dma_mem_free(addr,size) (vfree((void *)(addr)))
78#endif
79
80/* XXX This isn't really correct. XXX */
81#define get_dma_residue(x) (0)
82
83#define FLOPPY0_TYPE 4
84#define FLOPPY1_TYPE 0
85
86/* Super paranoid... */
87#undef HAVE_DISABLE_HLT
88
89/* Here is where we catch the floppy driver trying to initialize,
90 * therefore this is where we call the PROM device tree probing
91 * routine etc. on the Sparc.
92 */
93#define FDC1 sun_floppy_init()
94
95#define N_FDC 1
96#define N_DRIVE 8
97
98/* No 64k boundary crossing problems on the Sparc. */
99#define CROSS_64KB(a,s) (0)
100
101/* Routines unique to each controller type on a Sun. */
102static void sun_set_dor(unsigned char value, int fdc_82077)
103{
104 if (sparc_cpu_model == sun4c) {
105 unsigned int bits = 0;
106 if (value & 0x10)
107 bits |= AUXIO_FLPY_DSEL;
108 if ((value & 0x80) == 0)
109 bits |= AUXIO_FLPY_EJCT;
110 set_auxio(bits, (~bits) & (AUXIO_FLPY_DSEL|AUXIO_FLPY_EJCT));
111 }
112 if (fdc_82077) {
113 sun_fdc->dor_82077 = value;
114 }
115}
116
117static unsigned char sun_read_dir(void)
118{
119 if (sparc_cpu_model == sun4c)
120 return (get_auxio() & AUXIO_FLPY_DCHG) ? 0x80 : 0;
121 else
122 return sun_fdc->dir_82077;
123}
124
125static unsigned char sun_82072_fd_inb(int port)
126{
127 udelay(5);
128 switch(port & 7) {
129 default:
130 printk("floppy: Asked to read unknown port %d\n", port);
131 panic("floppy: Port bolixed.");
132 case 4: /* FD_STATUS */
133 return sun_fdc->status_82072 & ~STATUS_DMA;
134 case 5: /* FD_DATA */
135 return sun_fdc->data_82072;
136 case 7: /* FD_DIR */
137 return sun_read_dir();
138 };
139 panic("sun_82072_fd_inb: How did I get here?");
140}
141
142static void sun_82072_fd_outb(unsigned char value, int port)
143{
144 udelay(5);
145 switch(port & 7) {
146 default:
147 printk("floppy: Asked to write to unknown port %d\n", port);
148 panic("floppy: Port bolixed.");
149 case 2: /* FD_DOR */
150 sun_set_dor(value, 0);
151 break;
152 case 5: /* FD_DATA */
153 sun_fdc->data_82072 = value;
154 break;
155 case 7: /* FD_DCR */
156 sun_fdc->dcr_82072 = value;
157 break;
158 case 4: /* FD_STATUS */
159 sun_fdc->status_82072 = value;
160 break;
161 };
162 return;
163}
164
165static unsigned char sun_82077_fd_inb(int port)
166{
167 udelay(5);
168 switch(port & 7) {
169 default:
170 printk("floppy: Asked to read unknown port %d\n", port);
171 panic("floppy: Port bolixed.");
172 case 0: /* FD_STATUS_0 */
173 return sun_fdc->status1_82077;
174 case 1: /* FD_STATUS_1 */
175 return sun_fdc->status2_82077;
176 case 2: /* FD_DOR */
177 return sun_fdc->dor_82077;
178 case 3: /* FD_TDR */
179 return sun_fdc->tapectl_82077;
180 case 4: /* FD_STATUS */
181 return sun_fdc->status_82077 & ~STATUS_DMA;
182 case 5: /* FD_DATA */
183 return sun_fdc->data_82077;
184 case 7: /* FD_DIR */
185 return sun_read_dir();
186 };
187 panic("sun_82077_fd_inb: How did I get here?");
188}
189
190static void sun_82077_fd_outb(unsigned char value, int port)
191{
192 udelay(5);
193 switch(port & 7) {
194 default:
195 printk("floppy: Asked to write to unknown port %d\n", port);
196 panic("floppy: Port bolixed.");
197 case 2: /* FD_DOR */
198 sun_set_dor(value, 1);
199 break;
200 case 5: /* FD_DATA */
201 sun_fdc->data_82077 = value;
202 break;
203 case 7: /* FD_DCR */
204 sun_fdc->dcr_82077 = value;
205 break;
206 case 4: /* FD_STATUS */
207 sun_fdc->status_82077 = value;
208 break;
209 case 3: /* FD_TDR */
210 sun_fdc->tapectl_82077 = value;
211 break;
212 };
213 return;
214}
215
216/* For pseudo-dma (Sun floppy drives have no real DMA available to
217 * them so we must eat the data fifo bytes directly ourselves) we have
218 * three state variables. doing_pdma tells our inline low-level
219 * assembly floppy interrupt entry point whether it should sit and eat
220 * bytes from the fifo or just transfer control up to the higher level
221 * floppy interrupt c-code. I tried very hard but I could not get the
222 * pseudo-dma to work in c-code without getting many overruns and
223 * underruns. If non-zero, doing_pdma encodes the direction of
224 * the transfer for debugging. 1=read 2=write
225 */
226extern char *pdma_vaddr;
227extern unsigned long pdma_size;
228extern volatile int doing_pdma;
229
230/* This is software state */
231extern char *pdma_base;
232extern unsigned long pdma_areasize;
233
234/* Common routines to all controller types on the Sparc. */
235static inline void virtual_dma_init(void)
236{
237 /* nothing... */
238}
239
240static inline void sun_fd_disable_dma(void)
241{
242 doing_pdma = 0;
243 if (pdma_base) {
244 mmu_unlockarea(pdma_base, pdma_areasize);
245 pdma_base = NULL;
246 }
247}
248
249static inline void sun_fd_set_dma_mode(int mode)
250{
251 switch(mode) {
252 case DMA_MODE_READ:
253 doing_pdma = 1;
254 break;
255 case DMA_MODE_WRITE:
256 doing_pdma = 2;
257 break;
258 default:
259 printk("Unknown dma mode %d\n", mode);
260 panic("floppy: Giving up...");
261 }
262}
263
264static inline void sun_fd_set_dma_addr(char *buffer)
265{
266 pdma_vaddr = buffer;
267}
268
269static inline void sun_fd_set_dma_count(int length)
270{
271 pdma_size = length;
272}
273
274static inline void sun_fd_enable_dma(void)
275{
276 pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
277 pdma_base = pdma_vaddr;
278 pdma_areasize = pdma_size;
279}
280
281/* Our low-level entry point in arch/sparc/kernel/entry.S */
282extern int sparc_floppy_request_irq(int irq, unsigned long flags,
283 irq_handler_t irq_handler);
284
285static int sun_fd_request_irq(void)
286{
287 static int once = 0;
288 int error;
289
290 if(!once) {
291 once = 1;
292 error = sparc_floppy_request_irq(FLOPPY_IRQ,
293 IRQF_DISABLED,
294 floppy_interrupt);
295 return ((error == 0) ? 0 : -1);
296 } else return 0;
297}
298
299static struct linux_prom_registers fd_regs[2];
300
301static int sun_floppy_init(void)
302{
303 char state[128];
304 int tnode, fd_node, num_regs;
305 struct resource r;
306
307 use_virtual_dma = 1;
308
309 FLOPPY_IRQ = 11;
310 /* Forget it if we aren't on a machine that could possibly
311 * ever have a floppy drive.
312 */
313 if((sparc_cpu_model != sun4c && sparc_cpu_model != sun4m) ||
314 ((idprom->id_machtype == (SM_SUN4C | SM_4C_SLC)) ||
315 (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC)))) {
316 /* We certainly don't have a floppy controller. */
317 goto no_sun_fdc;
318 }
319 /* Well, try to find one. */
320 tnode = prom_getchild(prom_root_node);
321 fd_node = prom_searchsiblings(tnode, "obio");
322 if(fd_node != 0) {
323 tnode = prom_getchild(fd_node);
324 fd_node = prom_searchsiblings(tnode, "SUNW,fdtwo");
325 } else {
326 fd_node = prom_searchsiblings(tnode, "fd");
327 }
328 if(fd_node == 0) {
329 goto no_sun_fdc;
330 }
331
332 /* The sun4m lets us know if the controller is actually usable. */
333 if(sparc_cpu_model == sun4m &&
334 prom_getproperty(fd_node, "status", state, sizeof(state)) != -1) {
335 if(!strcmp(state, "disabled")) {
336 goto no_sun_fdc;
337 }
338 }
339 num_regs = prom_getproperty(fd_node, "reg", (char *) fd_regs, sizeof(fd_regs));
340 num_regs = (num_regs / sizeof(fd_regs[0]));
341 prom_apply_obio_ranges(fd_regs, num_regs);
342 memset(&r, 0, sizeof(r));
343 r.flags = fd_regs[0].which_io;
344 r.start = fd_regs[0].phys_addr;
345 sun_fdc = (struct sun_flpy_controller *)
346 sbus_ioremap(&r, 0, fd_regs[0].reg_size, "floppy");
347
348 /* Last minute sanity check... */
349 if(sun_fdc->status_82072 == 0xff) {
350 sun_fdc = NULL;
351 goto no_sun_fdc;
352 }
353
354 sun_fdops.fd_inb = sun_82077_fd_inb;
355 sun_fdops.fd_outb = sun_82077_fd_outb;
356 fdc_status = &sun_fdc->status_82077;
357
358 if (sun_fdc->dor_82077 == 0x80) {
359 sun_fdc->dor_82077 = 0x02;
360 if (sun_fdc->dor_82077 == 0x80) {
361 sun_fdops.fd_inb = sun_82072_fd_inb;
362 sun_fdops.fd_outb = sun_82072_fd_outb;
363 fdc_status = &sun_fdc->status_82072;
364 }
365 }
366
367 /* Success... */
368 allowed_drive_mask = 0x01;
369 return (int) sun_fdc;
370
371no_sun_fdc:
372 return -1;
373}
374
375static int sparc_eject(void)
376{
377 set_dor(0x00, 0xff, 0x90);
378 udelay(500);
379 set_dor(0x00, 0x6f, 0x00);
380 udelay(500);
381 return 0;
382}
383
384#define fd_eject(drive) sparc_eject()
385
386#define EXTRA_FLOPPY_PARAMS
387
388#endif /* !(__ASM_SPARC_FLOPPY_H) */
diff --git a/include/asm-sparc/floppy_64.h b/include/asm-sparc/floppy_64.h
new file mode 100644
index 000000000000..c39db1060bc7
--- /dev/null
+++ b/include/asm-sparc/floppy_64.h
@@ -0,0 +1,782 @@
1/* floppy.h: Sparc specific parts of the Floppy driver.
2 *
3 * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 *
6 * Ultra/PCI support added: Sep 1997 Eddie C. Dost (ecd@skynet.be)
7 */
8
9#ifndef __ASM_SPARC64_FLOPPY_H
10#define __ASM_SPARC64_FLOPPY_H
11
12#include <linux/init.h>
13#include <linux/pci.h>
14
15#include <asm/page.h>
16#include <asm/pgtable.h>
17#include <asm/system.h>
18#include <asm/idprom.h>
19#include <asm/oplib.h>
20#include <asm/auxio.h>
21#include <asm/sbus.h>
22#include <asm/irq.h>
23
24
25/*
26 * Define this to enable exchanging drive 0 and 1 if only drive 1 is
27 * probed on PCI machines.
28 */
29#undef PCI_FDC_SWAP_DRIVES
30
31
32/* References:
33 * 1) Netbsd Sun floppy driver.
34 * 2) NCR 82077 controller manual
35 * 3) Intel 82077 controller manual
36 */
37struct sun_flpy_controller {
38 volatile unsigned char status1_82077; /* Auxiliary Status reg. 1 */
39 volatile unsigned char status2_82077; /* Auxiliary Status reg. 2 */
40 volatile unsigned char dor_82077; /* Digital Output reg. */
41 volatile unsigned char tapectl_82077; /* Tape Control reg */
42 volatile unsigned char status_82077; /* Main Status Register. */
43#define drs_82077 status_82077 /* Digital Rate Select reg. */
44 volatile unsigned char data_82077; /* Data fifo. */
45 volatile unsigned char ___unused;
46 volatile unsigned char dir_82077; /* Digital Input reg. */
47#define dcr_82077 dir_82077 /* Config Control reg. */
48};
49
50/* You'll only ever find one controller on an Ultra anyways. */
51static struct sun_flpy_controller *sun_fdc = (struct sun_flpy_controller *)-1;
52unsigned long fdc_status;
53static struct sbus_dev *floppy_sdev = NULL;
54
55struct sun_floppy_ops {
56 unsigned char (*fd_inb) (unsigned long port);
57 void (*fd_outb) (unsigned char value, unsigned long port);
58 void (*fd_enable_dma) (void);
59 void (*fd_disable_dma) (void);
60 void (*fd_set_dma_mode) (int);
61 void (*fd_set_dma_addr) (char *);
62 void (*fd_set_dma_count) (int);
63 unsigned int (*get_dma_residue) (void);
64 int (*fd_request_irq) (void);
65 void (*fd_free_irq) (void);
66 int (*fd_eject) (int);
67};
68
69static struct sun_floppy_ops sun_fdops;
70
71#define fd_inb(port) sun_fdops.fd_inb(port)
72#define fd_outb(value,port) sun_fdops.fd_outb(value,port)
73#define fd_enable_dma() sun_fdops.fd_enable_dma()
74#define fd_disable_dma() sun_fdops.fd_disable_dma()
75#define fd_request_dma() (0) /* nothing... */
76#define fd_free_dma() /* nothing... */
77#define fd_clear_dma_ff() /* nothing... */
78#define fd_set_dma_mode(mode) sun_fdops.fd_set_dma_mode(mode)
79#define fd_set_dma_addr(addr) sun_fdops.fd_set_dma_addr(addr)
80#define fd_set_dma_count(count) sun_fdops.fd_set_dma_count(count)
81#define get_dma_residue(x) sun_fdops.get_dma_residue()
82#define fd_cacheflush(addr, size) /* nothing... */
83#define fd_request_irq() sun_fdops.fd_request_irq()
84#define fd_free_irq() sun_fdops.fd_free_irq()
85#define fd_eject(drive) sun_fdops.fd_eject(drive)
86
87/* Super paranoid... */
88#undef HAVE_DISABLE_HLT
89
90static int sun_floppy_types[2] = { 0, 0 };
91
92/* Here is where we catch the floppy driver trying to initialize,
93 * therefore this is where we call the PROM device tree probing
94 * routine etc. on the Sparc.
95 */
96#define FLOPPY0_TYPE sun_floppy_init()
97#define FLOPPY1_TYPE sun_floppy_types[1]
98
99#define FDC1 ((unsigned long)sun_fdc)
100
101#define N_FDC 1
102#define N_DRIVE 8
103
104/* No 64k boundary crossing problems on the Sparc. */
105#define CROSS_64KB(a,s) (0)
106
107static unsigned char sun_82077_fd_inb(unsigned long port)
108{
109 udelay(5);
110 switch(port & 7) {
111 default:
112 printk("floppy: Asked to read unknown port %lx\n", port);
113 panic("floppy: Port bolixed.");
114 case 4: /* FD_STATUS */
115 return sbus_readb(&sun_fdc->status_82077) & ~STATUS_DMA;
116 case 5: /* FD_DATA */
117 return sbus_readb(&sun_fdc->data_82077);
118 case 7: /* FD_DIR */
119 /* XXX: Is DCL on 0x80 in sun4m? */
120 return sbus_readb(&sun_fdc->dir_82077);
121 };
122 panic("sun_82072_fd_inb: How did I get here?");
123}
124
125static void sun_82077_fd_outb(unsigned char value, unsigned long port)
126{
127 udelay(5);
128 switch(port & 7) {
129 default:
130 printk("floppy: Asked to write to unknown port %lx\n", port);
131 panic("floppy: Port bolixed.");
132 case 2: /* FD_DOR */
133 /* Happily, the 82077 has a real DOR register. */
134 sbus_writeb(value, &sun_fdc->dor_82077);
135 break;
136 case 5: /* FD_DATA */
137 sbus_writeb(value, &sun_fdc->data_82077);
138 break;
139 case 7: /* FD_DCR */
140 sbus_writeb(value, &sun_fdc->dcr_82077);
141 break;
142 case 4: /* FD_STATUS */
143 sbus_writeb(value, &sun_fdc->status_82077);
144 break;
145 };
146 return;
147}
148
149/* For pseudo-dma (Sun floppy drives have no real DMA available to
150 * them so we must eat the data fifo bytes directly ourselves) we have
151 * three state variables. doing_pdma tells our inline low-level
152 * assembly floppy interrupt entry point whether it should sit and eat
153 * bytes from the fifo or just transfer control up to the higher level
154 * floppy interrupt c-code. I tried very hard but I could not get the
155 * pseudo-dma to work in c-code without getting many overruns and
156 * underruns. If non-zero, doing_pdma encodes the direction of
157 * the transfer for debugging. 1=read 2=write
158 */
159unsigned char *pdma_vaddr;
160unsigned long pdma_size;
161volatile int doing_pdma = 0;
162
163/* This is software state */
164char *pdma_base = NULL;
165unsigned long pdma_areasize;
166
167/* Common routines to all controller types on the Sparc. */
168static void sun_fd_disable_dma(void)
169{
170 doing_pdma = 0;
171 if (pdma_base) {
172 mmu_unlockarea(pdma_base, pdma_areasize);
173 pdma_base = NULL;
174 }
175}
176
177static void sun_fd_set_dma_mode(int mode)
178{
179 switch(mode) {
180 case DMA_MODE_READ:
181 doing_pdma = 1;
182 break;
183 case DMA_MODE_WRITE:
184 doing_pdma = 2;
185 break;
186 default:
187 printk("Unknown dma mode %d\n", mode);
188 panic("floppy: Giving up...");
189 }
190}
191
192static void sun_fd_set_dma_addr(char *buffer)
193{
194 pdma_vaddr = buffer;
195}
196
197static void sun_fd_set_dma_count(int length)
198{
199 pdma_size = length;
200}
201
202static void sun_fd_enable_dma(void)
203{
204 pdma_vaddr = mmu_lockarea(pdma_vaddr, pdma_size);
205 pdma_base = pdma_vaddr;
206 pdma_areasize = pdma_size;
207}
208
209irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie)
210{
211 if (likely(doing_pdma)) {
212 void __iomem *stat = (void __iomem *) fdc_status;
213 unsigned char *vaddr = pdma_vaddr;
214 unsigned long size = pdma_size;
215 u8 val;
216
217 while (size) {
218 val = readb(stat);
219 if (unlikely(!(val & 0x80))) {
220 pdma_vaddr = vaddr;
221 pdma_size = size;
222 return IRQ_HANDLED;
223 }
224 if (unlikely(!(val & 0x20))) {
225 pdma_vaddr = vaddr;
226 pdma_size = size;
227 doing_pdma = 0;
228 goto main_interrupt;
229 }
230 if (val & 0x40) {
231 /* read */
232 *vaddr++ = readb(stat + 1);
233 } else {
234 unsigned char data = *vaddr++;
235
236 /* write */
237 writeb(data, stat + 1);
238 }
239 size--;
240 }
241
242 pdma_vaddr = vaddr;
243 pdma_size = size;
244
245 /* Send Terminal Count pulse to floppy controller. */
246 val = readb(auxio_register);
247 val |= AUXIO_AUX1_FTCNT;
248 writeb(val, auxio_register);
249 val &= ~AUXIO_AUX1_FTCNT;
250 writeb(val, auxio_register);
251
252 doing_pdma = 0;
253 }
254
255main_interrupt:
256 return floppy_interrupt(irq, dev_cookie);
257}
258
259static int sun_fd_request_irq(void)
260{
261 static int once = 0;
262 int error;
263
264 if(!once) {
265 once = 1;
266
267 error = request_irq(FLOPPY_IRQ, sparc_floppy_irq,
268 IRQF_DISABLED, "floppy", NULL);
269
270 return ((error == 0) ? 0 : -1);
271 }
272 return 0;
273}
274
275static void sun_fd_free_irq(void)
276{
277}
278
279static unsigned int sun_get_dma_residue(void)
280{
281 /* XXX This isn't really correct. XXX */
282 return 0;
283}
284
285static int sun_fd_eject(int drive)
286{
287 set_dor(0x00, 0xff, 0x90);
288 udelay(500);
289 set_dor(0x00, 0x6f, 0x00);
290 udelay(500);
291 return 0;
292}
293
294#ifdef CONFIG_PCI
295#include <asm/ebus.h>
296#include <asm/ns87303.h>
297
298static struct ebus_dma_info sun_pci_fd_ebus_dma;
299static struct pci_dev *sun_pci_ebus_dev;
300static int sun_pci_broken_drive = -1;
301
302struct sun_pci_dma_op {
303 unsigned int addr;
304 int len;
305 int direction;
306 char *buf;
307};
308static struct sun_pci_dma_op sun_pci_dma_current = { -1U, 0, 0, NULL};
309static struct sun_pci_dma_op sun_pci_dma_pending = { -1U, 0, 0, NULL};
310
311extern irqreturn_t floppy_interrupt(int irq, void *dev_id);
312
313static unsigned char sun_pci_fd_inb(unsigned long port)
314{
315 udelay(5);
316 return inb(port);
317}
318
319static void sun_pci_fd_outb(unsigned char val, unsigned long port)
320{
321 udelay(5);
322 outb(val, port);
323}
324
325static void sun_pci_fd_broken_outb(unsigned char val, unsigned long port)
326{
327 udelay(5);
328 /*
329 * XXX: Due to SUN's broken floppy connector on AX and AXi
330 * we need to turn on MOTOR_0 also, if the floppy is
331 * jumpered to DS1 (like most PC floppies are). I hope
332 * this does not hurt correct hardware like the AXmp.
333 * (Eddie, Sep 12 1998).
334 */
335 if (port == ((unsigned long)sun_fdc) + 2) {
336 if (((val & 0x03) == sun_pci_broken_drive) && (val & 0x20)) {
337 val |= 0x10;
338 }
339 }
340 outb(val, port);
341}
342
343#ifdef PCI_FDC_SWAP_DRIVES
344static void sun_pci_fd_lde_broken_outb(unsigned char val, unsigned long port)
345{
346 udelay(5);
347 /*
348 * XXX: Due to SUN's broken floppy connector on AX and AXi
349 * we need to turn on MOTOR_0 also, if the floppy is
350 * jumpered to DS1 (like most PC floppies are). I hope
351 * this does not hurt correct hardware like the AXmp.
352 * (Eddie, Sep 12 1998).
353 */
354 if (port == ((unsigned long)sun_fdc) + 2) {
355 if (((val & 0x03) == sun_pci_broken_drive) && (val & 0x10)) {
356 val &= ~(0x03);
357 val |= 0x21;
358 }
359 }
360 outb(val, port);
361}
362#endif /* PCI_FDC_SWAP_DRIVES */
363
364static void sun_pci_fd_enable_dma(void)
365{
366 BUG_ON((NULL == sun_pci_dma_pending.buf) ||
367 (0 == sun_pci_dma_pending.len) ||
368 (0 == sun_pci_dma_pending.direction));
369
370 sun_pci_dma_current.buf = sun_pci_dma_pending.buf;
371 sun_pci_dma_current.len = sun_pci_dma_pending.len;
372 sun_pci_dma_current.direction = sun_pci_dma_pending.direction;
373
374 sun_pci_dma_pending.buf = NULL;
375 sun_pci_dma_pending.len = 0;
376 sun_pci_dma_pending.direction = 0;
377 sun_pci_dma_pending.addr = -1U;
378
379 sun_pci_dma_current.addr =
380 pci_map_single(sun_pci_ebus_dev,
381 sun_pci_dma_current.buf,
382 sun_pci_dma_current.len,
383 sun_pci_dma_current.direction);
384
385 ebus_dma_enable(&sun_pci_fd_ebus_dma, 1);
386
387 if (ebus_dma_request(&sun_pci_fd_ebus_dma,
388 sun_pci_dma_current.addr,
389 sun_pci_dma_current.len))
390 BUG();
391}
392
393static void sun_pci_fd_disable_dma(void)
394{
395 ebus_dma_enable(&sun_pci_fd_ebus_dma, 0);
396 if (sun_pci_dma_current.addr != -1U)
397 pci_unmap_single(sun_pci_ebus_dev,
398 sun_pci_dma_current.addr,
399 sun_pci_dma_current.len,
400 sun_pci_dma_current.direction);
401 sun_pci_dma_current.addr = -1U;
402}
403
404static void sun_pci_fd_set_dma_mode(int mode)
405{
406 if (mode == DMA_MODE_WRITE)
407 sun_pci_dma_pending.direction = PCI_DMA_TODEVICE;
408 else
409 sun_pci_dma_pending.direction = PCI_DMA_FROMDEVICE;
410
411 ebus_dma_prepare(&sun_pci_fd_ebus_dma, mode != DMA_MODE_WRITE);
412}
413
414static void sun_pci_fd_set_dma_count(int length)
415{
416 sun_pci_dma_pending.len = length;
417}
418
419static void sun_pci_fd_set_dma_addr(char *buffer)
420{
421 sun_pci_dma_pending.buf = buffer;
422}
423
424static unsigned int sun_pci_get_dma_residue(void)
425{
426 return ebus_dma_residue(&sun_pci_fd_ebus_dma);
427}
428
429static int sun_pci_fd_request_irq(void)
430{
431 return ebus_dma_irq_enable(&sun_pci_fd_ebus_dma, 1);
432}
433
434static void sun_pci_fd_free_irq(void)
435{
436 ebus_dma_irq_enable(&sun_pci_fd_ebus_dma, 0);
437}
438
439static int sun_pci_fd_eject(int drive)
440{
441 return -EINVAL;
442}
443
444void sun_pci_fd_dma_callback(struct ebus_dma_info *p, int event, void *cookie)
445{
446 floppy_interrupt(0, NULL);
447}
448
449/*
450 * Floppy probing, we'd like to use /dev/fd0 for a single Floppy on PCI,
451 * even if this is configured using DS1, thus looks like /dev/fd1 with
452 * the cabling used in Ultras.
453 */
454#define DOR (port + 2)
455#define MSR (port + 4)
456#define FIFO (port + 5)
457
458static void sun_pci_fd_out_byte(unsigned long port, unsigned char val,
459 unsigned long reg)
460{
461 unsigned char status;
462 int timeout = 1000;
463
464 while (!((status = inb(MSR)) & 0x80) && --timeout)
465 udelay(100);
466 outb(val, reg);
467}
468
469static unsigned char sun_pci_fd_sensei(unsigned long port)
470{
471 unsigned char result[2] = { 0x70, 0x00 };
472 unsigned char status;
473 int i = 0;
474
475 sun_pci_fd_out_byte(port, 0x08, FIFO);
476 do {
477 int timeout = 1000;
478
479 while (!((status = inb(MSR)) & 0x80) && --timeout)
480 udelay(100);
481
482 if (!timeout)
483 break;
484
485 if ((status & 0xf0) == 0xd0)
486 result[i++] = inb(FIFO);
487 else
488 break;
489 } while (i < 2);
490
491 return result[0];
492}
493
494static void sun_pci_fd_reset(unsigned long port)
495{
496 unsigned char mask = 0x00;
497 unsigned char status;
498 int timeout = 10000;
499
500 outb(0x80, MSR);
501 do {
502 status = sun_pci_fd_sensei(port);
503 if ((status & 0xc0) == 0xc0)
504 mask |= 1 << (status & 0x03);
505 else
506 udelay(100);
507 } while ((mask != 0x0f) && --timeout);
508}
509
510static int sun_pci_fd_test_drive(unsigned long port, int drive)
511{
512 unsigned char status, data;
513 int timeout = 1000;
514 int ready;
515
516 sun_pci_fd_reset(port);
517
518 data = (0x10 << drive) | 0x0c | drive;
519 sun_pci_fd_out_byte(port, data, DOR);
520
521 sun_pci_fd_out_byte(port, 0x07, FIFO);
522 sun_pci_fd_out_byte(port, drive & 0x03, FIFO);
523
524 do {
525 udelay(100);
526 status = sun_pci_fd_sensei(port);
527 } while (((status & 0xc0) == 0x80) && --timeout);
528
529 if (!timeout)
530 ready = 0;
531 else
532 ready = (status & 0x10) ? 0 : 1;
533
534 sun_pci_fd_reset(port);
535 return ready;
536}
537#undef FIFO
538#undef MSR
539#undef DOR
540
541#endif /* CONFIG_PCI */
542
543#ifdef CONFIG_PCI
544static int __init ebus_fdthree_p(struct linux_ebus_device *edev)
545{
546 if (!strcmp(edev->prom_node->name, "fdthree"))
547 return 1;
548 if (!strcmp(edev->prom_node->name, "floppy")) {
549 const char *compat;
550
551 compat = of_get_property(edev->prom_node,
552 "compatible", NULL);
553 if (compat && !strcmp(compat, "fdthree"))
554 return 1;
555 }
556 return 0;
557}
558#endif
559
560static unsigned long __init sun_floppy_init(void)
561{
562 char state[128];
563 struct sbus_bus *bus;
564 struct sbus_dev *sdev = NULL;
565 static int initialized = 0;
566
567 if (initialized)
568 return sun_floppy_types[0];
569 initialized = 1;
570
571 for_all_sbusdev (sdev, bus) {
572 if (!strcmp(sdev->prom_name, "SUNW,fdtwo"))
573 break;
574 }
575 if(sdev) {
576 floppy_sdev = sdev;
577 FLOPPY_IRQ = sdev->irqs[0];
578 } else {
579#ifdef CONFIG_PCI
580 struct linux_ebus *ebus;
581 struct linux_ebus_device *edev = NULL;
582 unsigned long config = 0;
583 void __iomem *auxio_reg;
584 const char *state_prop;
585
586 for_each_ebus(ebus) {
587 for_each_ebusdev(edev, ebus) {
588 if (ebus_fdthree_p(edev))
589 goto ebus_done;
590 }
591 }
592 ebus_done:
593 if (!edev)
594 return 0;
595
596 state_prop = of_get_property(edev->prom_node, "status", NULL);
597 if (state_prop && !strncmp(state_prop, "disabled", 8))
598 return 0;
599
600 FLOPPY_IRQ = edev->irqs[0];
601
602 /* Make sure the high density bit is set, some systems
603 * (most notably Ultra5/Ultra10) come up with it clear.
604 */
605 auxio_reg = (void __iomem *) edev->resource[2].start;
606 writel(readl(auxio_reg)|0x2, auxio_reg);
607
608 sun_pci_ebus_dev = ebus->self;
609
610 spin_lock_init(&sun_pci_fd_ebus_dma.lock);
611
612 /* XXX ioremap */
613 sun_pci_fd_ebus_dma.regs = (void __iomem *)
614 edev->resource[1].start;
615 if (!sun_pci_fd_ebus_dma.regs)
616 return 0;
617
618 sun_pci_fd_ebus_dma.flags = (EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
619 EBUS_DMA_FLAG_TCI_DISABLE);
620 sun_pci_fd_ebus_dma.callback = sun_pci_fd_dma_callback;
621 sun_pci_fd_ebus_dma.client_cookie = NULL;
622 sun_pci_fd_ebus_dma.irq = FLOPPY_IRQ;
623 strcpy(sun_pci_fd_ebus_dma.name, "floppy");
624 if (ebus_dma_register(&sun_pci_fd_ebus_dma))
625 return 0;
626
627 /* XXX ioremap */
628 sun_fdc = (struct sun_flpy_controller *)edev->resource[0].start;
629
630 sun_fdops.fd_inb = sun_pci_fd_inb;
631 sun_fdops.fd_outb = sun_pci_fd_outb;
632
633 can_use_virtual_dma = use_virtual_dma = 0;
634 sun_fdops.fd_enable_dma = sun_pci_fd_enable_dma;
635 sun_fdops.fd_disable_dma = sun_pci_fd_disable_dma;
636 sun_fdops.fd_set_dma_mode = sun_pci_fd_set_dma_mode;
637 sun_fdops.fd_set_dma_addr = sun_pci_fd_set_dma_addr;
638 sun_fdops.fd_set_dma_count = sun_pci_fd_set_dma_count;
639 sun_fdops.get_dma_residue = sun_pci_get_dma_residue;
640
641 sun_fdops.fd_request_irq = sun_pci_fd_request_irq;
642 sun_fdops.fd_free_irq = sun_pci_fd_free_irq;
643
644 sun_fdops.fd_eject = sun_pci_fd_eject;
645
646 fdc_status = (unsigned long) &sun_fdc->status_82077;
647
648 /*
649 * XXX: Find out on which machines this is really needed.
650 */
651 if (1) {
652 sun_pci_broken_drive = 1;
653 sun_fdops.fd_outb = sun_pci_fd_broken_outb;
654 }
655
656 allowed_drive_mask = 0;
657 if (sun_pci_fd_test_drive((unsigned long)sun_fdc, 0))
658 sun_floppy_types[0] = 4;
659 if (sun_pci_fd_test_drive((unsigned long)sun_fdc, 1))
660 sun_floppy_types[1] = 4;
661
662 /*
663 * Find NS87303 SuperIO config registers (through ecpp).
664 */
665 for_each_ebus(ebus) {
666 for_each_ebusdev(edev, ebus) {
667 if (!strcmp(edev->prom_node->name, "ecpp")) {
668 config = edev->resource[1].start;
669 goto config_done;
670 }
671 }
672 }
673 config_done:
674
675 /*
676 * Sanity check, is this really the NS87303?
677 */
678 switch (config & 0x3ff) {
679 case 0x02e:
680 case 0x15c:
681 case 0x26e:
682 case 0x398:
683 break;
684 default:
685 config = 0;
686 }
687
688 if (!config)
689 return sun_floppy_types[0];
690
691 /* Enable PC-AT mode. */
692 ns87303_modify(config, ASC, 0, 0xc0);
693
694#ifdef PCI_FDC_SWAP_DRIVES
695 /*
696 * If only Floppy 1 is present, swap drives.
697 */
698 if (!sun_floppy_types[0] && sun_floppy_types[1]) {
699 /*
700 * Set the drive exchange bit in FCR on NS87303,
701 * make sure other bits are sane before doing so.
702 */
703 ns87303_modify(config, FER, FER_EDM, 0);
704 ns87303_modify(config, ASC, ASC_DRV2_SEL, 0);
705 ns87303_modify(config, FCR, 0, FCR_LDE);
706
707 config = sun_floppy_types[0];
708 sun_floppy_types[0] = sun_floppy_types[1];
709 sun_floppy_types[1] = config;
710
711 if (sun_pci_broken_drive != -1) {
712 sun_pci_broken_drive = 1 - sun_pci_broken_drive;
713 sun_fdops.fd_outb = sun_pci_fd_lde_broken_outb;
714 }
715 }
716#endif /* PCI_FDC_SWAP_DRIVES */
717
718 return sun_floppy_types[0];
719#else
720 return 0;
721#endif
722 }
723 prom_getproperty(sdev->prom_node, "status", state, sizeof(state));
724 if(!strncmp(state, "disabled", 8))
725 return 0;
726
727 /*
728 * We cannot do sbus_ioremap here: it does request_region,
729 * which the generic floppy driver tries to do once again.
730 * But we must use the sdev resource values as they have
731 * had parent ranges applied.
732 */
733 sun_fdc = (struct sun_flpy_controller *)
734 (sdev->resource[0].start +
735 ((sdev->resource[0].flags & 0x1ffUL) << 32UL));
736
737 /* Last minute sanity check... */
738 if(sbus_readb(&sun_fdc->status1_82077) == 0xff) {
739 sun_fdc = (struct sun_flpy_controller *)-1;
740 return 0;
741 }
742
743 sun_fdops.fd_inb = sun_82077_fd_inb;
744 sun_fdops.fd_outb = sun_82077_fd_outb;
745
746 can_use_virtual_dma = use_virtual_dma = 1;
747 sun_fdops.fd_enable_dma = sun_fd_enable_dma;
748 sun_fdops.fd_disable_dma = sun_fd_disable_dma;
749 sun_fdops.fd_set_dma_mode = sun_fd_set_dma_mode;
750 sun_fdops.fd_set_dma_addr = sun_fd_set_dma_addr;
751 sun_fdops.fd_set_dma_count = sun_fd_set_dma_count;
752 sun_fdops.get_dma_residue = sun_get_dma_residue;
753
754 sun_fdops.fd_request_irq = sun_fd_request_irq;
755 sun_fdops.fd_free_irq = sun_fd_free_irq;
756
757 sun_fdops.fd_eject = sun_fd_eject;
758
759 fdc_status = (unsigned long) &sun_fdc->status_82077;
760
761 /* Success... */
762 allowed_drive_mask = 0x01;
763 sun_floppy_types[0] = 4;
764 sun_floppy_types[1] = 0;
765
766 return sun_floppy_types[0];
767}
768
769#define EXTRA_FLOPPY_PARAMS
770
771static DEFINE_SPINLOCK(dma_spin_lock);
772
773#define claim_dma_lock() \
774({ unsigned long flags; \
775 spin_lock_irqsave(&dma_spin_lock, flags); \
776 flags; \
777})
778
779#define release_dma_lock(__flags) \
780 spin_unlock_irqrestore(&dma_spin_lock, __flags);
781
782#endif /* !(__ASM_SPARC64_FLOPPY_H) */
diff --git a/include/asm-sparc/fpumacro.h b/include/asm-sparc/fpumacro.h
new file mode 100644
index 000000000000..cc463fec806f
--- /dev/null
+++ b/include/asm-sparc/fpumacro.h
@@ -0,0 +1,33 @@
1/* fpumacro.h: FPU related macros.
2 *
3 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _SPARC64_FPUMACRO_H
8#define _SPARC64_FPUMACRO_H
9
10#include <asm/asi.h>
11#include <asm/visasm.h>
12
13struct fpustate {
14 u32 regs[64];
15};
16
17#define FPUSTATE (struct fpustate *)(current_thread_info()->fpregs)
18
19static inline unsigned long fprs_read(void)
20{
21 unsigned long retval;
22
23 __asm__ __volatile__("rd %%fprs, %0" : "=r" (retval));
24
25 return retval;
26}
27
28static inline void fprs_write(unsigned long val)
29{
30 __asm__ __volatile__("wr %0, 0x0, %%fprs" : : "r" (val));
31}
32
33#endif /* !(_SPARC64_FPUMACRO_H) */
diff --git a/include/asm-sparc/futex.h b/include/asm-sparc/futex.h
index 6a332a9f099c..c6a9f038c531 100644
--- a/include/asm-sparc/futex.h
+++ b/include/asm-sparc/futex.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_FUTEX_H 1#ifndef ___ASM_SPARC_FUTEX_H
2#define _ASM_FUTEX_H 2#define ___ASM_SPARC_FUTEX_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm-generic/futex.h> 4#include <asm-sparc/futex_64.h>
5 5#else
6#include <asm-sparc/futex_32.h>
7#endif
6#endif 8#endif
diff --git a/include/asm-sparc/futex_32.h b/include/asm-sparc/futex_32.h
new file mode 100644
index 000000000000..6a332a9f099c
--- /dev/null
+++ b/include/asm-sparc/futex_32.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#include <asm-generic/futex.h>
5
6#endif
diff --git a/include/asm-sparc/futex_64.h b/include/asm-sparc/futex_64.h
new file mode 100644
index 000000000000..d8378935ae90
--- /dev/null
+++ b/include/asm-sparc/futex_64.h
@@ -0,0 +1,110 @@
1#ifndef _SPARC64_FUTEX_H
2#define _SPARC64_FUTEX_H
3
4#include <linux/futex.h>
5#include <linux/uaccess.h>
6#include <asm/errno.h>
7#include <asm/system.h>
8
9#define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \
10 __asm__ __volatile__( \
11 "\n1: lduwa [%3] %%asi, %2\n" \
12 " " insn "\n" \
13 "2: casa [%3] %%asi, %2, %1\n" \
14 " cmp %2, %1\n" \
15 " bne,pn %%icc, 1b\n" \
16 " mov 0, %0\n" \
17 "3:\n" \
18 " .section .fixup,#alloc,#execinstr\n" \
19 " .align 4\n" \
20 "4: sethi %%hi(3b), %0\n" \
21 " jmpl %0 + %%lo(3b), %%g0\n" \
22 " mov %5, %0\n" \
23 " .previous\n" \
24 " .section __ex_table,\"a\"\n" \
25 " .align 4\n" \
26 " .word 1b, 4b\n" \
27 " .word 2b, 4b\n" \
28 " .previous\n" \
29 : "=&r" (ret), "=&r" (oldval), "=&r" (tem) \
30 : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
31 : "memory")
32
33static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
34{
35 int op = (encoded_op >> 28) & 7;
36 int cmp = (encoded_op >> 24) & 15;
37 int oparg = (encoded_op << 8) >> 20;
38 int cmparg = (encoded_op << 20) >> 20;
39 int oldval = 0, ret, tem;
40
41 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int))))
42 return -EFAULT;
43 if (unlikely((((unsigned long) uaddr) & 0x3UL)))
44 return -EINVAL;
45
46 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
47 oparg = 1 << oparg;
48
49 pagefault_disable();
50
51 switch (op) {
52 case FUTEX_OP_SET:
53 __futex_cas_op("mov\t%4, %1", ret, oldval, uaddr, oparg);
54 break;
55 case FUTEX_OP_ADD:
56 __futex_cas_op("add\t%2, %4, %1", ret, oldval, uaddr, oparg);
57 break;
58 case FUTEX_OP_OR:
59 __futex_cas_op("or\t%2, %4, %1", ret, oldval, uaddr, oparg);
60 break;
61 case FUTEX_OP_ANDN:
62 __futex_cas_op("and\t%2, %4, %1", ret, oldval, uaddr, oparg);
63 break;
64 case FUTEX_OP_XOR:
65 __futex_cas_op("xor\t%2, %4, %1", ret, oldval, uaddr, oparg);
66 break;
67 default:
68 ret = -ENOSYS;
69 }
70
71 pagefault_enable();
72
73 if (!ret) {
74 switch (cmp) {
75 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
76 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
77 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
78 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
79 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
80 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
81 default: ret = -ENOSYS;
82 }
83 }
84 return ret;
85}
86
87static inline int
88futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
89{
90 __asm__ __volatile__(
91 "\n1: casa [%3] %%asi, %2, %0\n"
92 "2:\n"
93 " .section .fixup,#alloc,#execinstr\n"
94 " .align 4\n"
95 "3: sethi %%hi(2b), %0\n"
96 " jmpl %0 + %%lo(2b), %%g0\n"
97 " mov %4, %0\n"
98 " .previous\n"
99 " .section __ex_table,\"a\"\n"
100 " .align 4\n"
101 " .word 1b, 3b\n"
102 " .previous\n"
103 : "=r" (newval)
104 : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
105 : "memory");
106
107 return newval;
108}
109
110#endif /* !(_SPARC64_FUTEX_H) */
diff --git a/include/asm-sparc/hardirq.h b/include/asm-sparc/hardirq.h
index 4f63ed8df551..156478773100 100644
--- a/include/asm-sparc/hardirq.h
+++ b/include/asm-sparc/hardirq.h
@@ -1,23 +1,8 @@
1/* hardirq.h: 32-bit Sparc hard IRQ support. 1#ifndef ___ASM_SPARC_HARDIRQ_H
2 * 2#define ___ASM_SPARC_HARDIRQ_H
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) 4#include <asm-sparc/hardirq_64.h>
5 */ 5#else
6 6#include <asm-sparc/hardirq_32.h>
7#ifndef __SPARC_HARDIRQ_H 7#endif
8#define __SPARC_HARDIRQ_H 8#endif
9
10#include <linux/threads.h>
11#include <linux/spinlock.h>
12#include <linux/cache.h>
13
14/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
15typedef struct {
16 unsigned int __softirq_pending;
17} ____cacheline_aligned irq_cpustat_t;
18
19#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
20
21#define HARDIRQ_BITS 8
22
23#endif /* __SPARC_HARDIRQ_H */
diff --git a/include/asm-sparc/hardirq_32.h b/include/asm-sparc/hardirq_32.h
new file mode 100644
index 000000000000..4f63ed8df551
--- /dev/null
+++ b/include/asm-sparc/hardirq_32.h
@@ -0,0 +1,23 @@
1/* hardirq.h: 32-bit Sparc hard IRQ support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
5 */
6
7#ifndef __SPARC_HARDIRQ_H
8#define __SPARC_HARDIRQ_H
9
10#include <linux/threads.h>
11#include <linux/spinlock.h>
12#include <linux/cache.h>
13
14/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */
15typedef struct {
16 unsigned int __softirq_pending;
17} ____cacheline_aligned irq_cpustat_t;
18
19#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
20
21#define HARDIRQ_BITS 8
22
23#endif /* __SPARC_HARDIRQ_H */
diff --git a/include/asm-sparc/hardirq_64.h b/include/asm-sparc/hardirq_64.h
new file mode 100644
index 000000000000..7c29fd1a87aa
--- /dev/null
+++ b/include/asm-sparc/hardirq_64.h
@@ -0,0 +1,19 @@
1/* hardirq.h: 64-bit Sparc hard IRQ support.
2 *
3 * Copyright (C) 1997, 1998, 2005 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef __SPARC64_HARDIRQ_H
7#define __SPARC64_HARDIRQ_H
8
9#include <asm/cpudata.h>
10
11#define __ARCH_IRQ_STAT
12#define local_softirq_pending() \
13 (local_cpu_data().__softirq_pending)
14
15void ack_bad_irq(unsigned int irq);
16
17#define HARDIRQ_BITS 8
18
19#endif /* !(__SPARC64_HARDIRQ_H) */
diff --git a/include/asm-sparc/head.h b/include/asm-sparc/head.h
index 7c35491a8b53..14652abdea31 100644
--- a/include/asm-sparc/head.h
+++ b/include/asm-sparc/head.h
@@ -1,102 +1,8 @@
1#ifndef __SPARC_HEAD_H 1#ifndef ___ASM_SPARC_HEAD_H
2#define __SPARC_HEAD_H 2#define ___ASM_SPARC_HEAD_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#define KERNBASE 0xf0000000 /* First address the kernel will eventually be */ 4#include <asm-sparc/head_64.h>
5#define LOAD_ADDR 0x4000 /* prom jumps to us here unless this is elf /boot */
6#define SUN4C_SEGSZ (1 << 18)
7#define SRMMU_L1_KBASE_OFFSET ((KERNBASE>>24)<<2) /* Used in boot remapping. */
8#define INTS_ENAB 0x01 /* entry.S uses this. */
9
10#define SUN4_PROM_VECTOR 0xFFE81000 /* SUN4 PROM needs to be hardwired */
11
12#define WRITE_PAUSE nop; nop; nop; /* Have to do this after %wim/%psr chg */
13#define NOP_INSN 0x01000000 /* Used to patch sparc_save_state */
14
15/* Here are some trap goodies */
16
17/* Generic trap entry. */
18#define TRAP_ENTRY(type, label) \
19 rd %psr, %l0; b label; rd %wim, %l3; nop;
20
21/* Data/text faults. Defaults to sun4c version at boot time. */
22#define SPARC_TFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 1, %l7;
23#define SPARC_DFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 0, %l7;
24#define SRMMU_TFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 1, %l7;
25#define SRMMU_DFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 0, %l7;
26
27/* This is for traps we should NEVER get. */
28#define BAD_TRAP(num) \
29 rd %psr, %l0; mov num, %l7; b bad_trap_handler; rd %wim, %l3;
30
31/* This is for traps when we want just skip the instruction which caused it */
32#define SKIP_TRAP(type, name) \
33 jmpl %l2, %g0; rett %l2 + 4; nop; nop;
34
35/* Notice that for the system calls we pull a trick. We load up a
36 * different pointer to the system call vector table in %l7, but call
37 * the same generic system call low-level entry point. The trap table
38 * entry sequences are also HyperSparc pipeline friendly ;-)
39 */
40
41/* Software trap for Linux system calls. */
42#define LINUX_SYSCALL_TRAP \
43 sethi %hi(sys_call_table), %l7; \
44 or %l7, %lo(sys_call_table), %l7; \
45 b linux_sparc_syscall; \
46 rd %psr, %l0;
47
48#define BREAKPOINT_TRAP \
49 b breakpoint_trap; \
50 rd %psr,%l0; \
51 nop; \
52 nop;
53
54#ifdef CONFIG_KGDB
55#define KGDB_TRAP(num) \
56 b kgdb_trap_low; \
57 rd %psr,%l0; \
58 nop; \
59 nop;
60#else 5#else
61#define KGDB_TRAP(num) \ 6#include <asm-sparc/head_32.h>
62 BAD_TRAP(num) 7#endif
63#endif 8#endif
64
65/* The Get Condition Codes software trap for userland. */
66#define GETCC_TRAP \
67 b getcc_trap_handler; mov %psr, %l0; nop; nop;
68
69/* The Set Condition Codes software trap for userland. */
70#define SETCC_TRAP \
71 b setcc_trap_handler; mov %psr, %l0; nop; nop;
72
73/* The Get PSR software trap for userland. */
74#define GETPSR_TRAP \
75 mov %psr, %i0; jmp %l2; rett %l2 + 4; nop;
76
77/* This is for hard interrupts from level 1-14, 15 is non-maskable (nmi) and
78 * gets handled with another macro.
79 */
80#define TRAP_ENTRY_INTERRUPT(int_level) \
81 mov int_level, %l7; rd %psr, %l0; b real_irq_entry; rd %wim, %l3;
82
83/* NMI's (Non Maskable Interrupts) are special, you can't keep them
84 * from coming in, and basically if you get one, the shows over. ;(
85 * On the sun4c they are usually asynchronous memory errors, on the
86 * the sun4m they could be either due to mem errors or a software
87 * initiated interrupt from the prom/kern on an SMP box saying "I
88 * command you to do CPU tricks, read your mailbox for more info."
89 */
90#define NMI_TRAP \
91 rd %wim, %l3; b linux_trap_nmi_sun4c; mov %psr, %l0; nop;
92
93/* Window overflows/underflows are special and we need to try to be as
94 * efficient as possible here....
95 */
96#define WINDOW_SPILL \
97 rd %psr, %l0; rd %wim, %l3; b spill_window_entry; andcc %l0, PSR_PS, %g0;
98
99#define WINDOW_FILL \
100 rd %psr, %l0; rd %wim, %l3; b fill_window_entry; andcc %l0, PSR_PS, %g0;
101
102#endif /* __SPARC_HEAD_H */
diff --git a/include/asm-sparc/head_32.h b/include/asm-sparc/head_32.h
new file mode 100644
index 000000000000..7c35491a8b53
--- /dev/null
+++ b/include/asm-sparc/head_32.h
@@ -0,0 +1,102 @@
1#ifndef __SPARC_HEAD_H
2#define __SPARC_HEAD_H
3
4#define KERNBASE 0xf0000000 /* First address the kernel will eventually be */
5#define LOAD_ADDR 0x4000 /* prom jumps to us here unless this is elf /boot */
6#define SUN4C_SEGSZ (1 << 18)
7#define SRMMU_L1_KBASE_OFFSET ((KERNBASE>>24)<<2) /* Used in boot remapping. */
8#define INTS_ENAB 0x01 /* entry.S uses this. */
9
10#define SUN4_PROM_VECTOR 0xFFE81000 /* SUN4 PROM needs to be hardwired */
11
12#define WRITE_PAUSE nop; nop; nop; /* Have to do this after %wim/%psr chg */
13#define NOP_INSN 0x01000000 /* Used to patch sparc_save_state */
14
15/* Here are some trap goodies */
16
17/* Generic trap entry. */
18#define TRAP_ENTRY(type, label) \
19 rd %psr, %l0; b label; rd %wim, %l3; nop;
20
21/* Data/text faults. Defaults to sun4c version at boot time. */
22#define SPARC_TFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 1, %l7;
23#define SPARC_DFAULT rd %psr, %l0; rd %wim, %l3; b sun4c_fault; mov 0, %l7;
24#define SRMMU_TFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 1, %l7;
25#define SRMMU_DFAULT rd %psr, %l0; rd %wim, %l3; b srmmu_fault; mov 0, %l7;
26
27/* This is for traps we should NEVER get. */
28#define BAD_TRAP(num) \
29 rd %psr, %l0; mov num, %l7; b bad_trap_handler; rd %wim, %l3;
30
31/* This is for traps when we want just skip the instruction which caused it */
32#define SKIP_TRAP(type, name) \
33 jmpl %l2, %g0; rett %l2 + 4; nop; nop;
34
35/* Notice that for the system calls we pull a trick. We load up a
36 * different pointer to the system call vector table in %l7, but call
37 * the same generic system call low-level entry point. The trap table
38 * entry sequences are also HyperSparc pipeline friendly ;-)
39 */
40
41/* Software trap for Linux system calls. */
42#define LINUX_SYSCALL_TRAP \
43 sethi %hi(sys_call_table), %l7; \
44 or %l7, %lo(sys_call_table), %l7; \
45 b linux_sparc_syscall; \
46 rd %psr, %l0;
47
48#define BREAKPOINT_TRAP \
49 b breakpoint_trap; \
50 rd %psr,%l0; \
51 nop; \
52 nop;
53
54#ifdef CONFIG_KGDB
55#define KGDB_TRAP(num) \
56 b kgdb_trap_low; \
57 rd %psr,%l0; \
58 nop; \
59 nop;
60#else
61#define KGDB_TRAP(num) \
62 BAD_TRAP(num)
63#endif
64
65/* The Get Condition Codes software trap for userland. */
66#define GETCC_TRAP \
67 b getcc_trap_handler; mov %psr, %l0; nop; nop;
68
69/* The Set Condition Codes software trap for userland. */
70#define SETCC_TRAP \
71 b setcc_trap_handler; mov %psr, %l0; nop; nop;
72
73/* The Get PSR software trap for userland. */
74#define GETPSR_TRAP \
75 mov %psr, %i0; jmp %l2; rett %l2 + 4; nop;
76
77/* This is for hard interrupts from level 1-14, 15 is non-maskable (nmi) and
78 * gets handled with another macro.
79 */
80#define TRAP_ENTRY_INTERRUPT(int_level) \
81 mov int_level, %l7; rd %psr, %l0; b real_irq_entry; rd %wim, %l3;
82
83/* NMI's (Non Maskable Interrupts) are special, you can't keep them
84 * from coming in, and basically if you get one, the shows over. ;(
85 * On the sun4c they are usually asynchronous memory errors, on the
86 * the sun4m they could be either due to mem errors or a software
87 * initiated interrupt from the prom/kern on an SMP box saying "I
88 * command you to do CPU tricks, read your mailbox for more info."
89 */
90#define NMI_TRAP \
91 rd %wim, %l3; b linux_trap_nmi_sun4c; mov %psr, %l0; nop;
92
93/* Window overflows/underflows are special and we need to try to be as
94 * efficient as possible here....
95 */
96#define WINDOW_SPILL \
97 rd %psr, %l0; rd %wim, %l3; b spill_window_entry; andcc %l0, PSR_PS, %g0;
98
99#define WINDOW_FILL \
100 rd %psr, %l0; rd %wim, %l3; b fill_window_entry; andcc %l0, PSR_PS, %g0;
101
102#endif /* __SPARC_HEAD_H */
diff --git a/include/asm-sparc/head_64.h b/include/asm-sparc/head_64.h
new file mode 100644
index 000000000000..10e9dabc4c41
--- /dev/null
+++ b/include/asm-sparc/head_64.h
@@ -0,0 +1,76 @@
1#ifndef _SPARC64_HEAD_H
2#define _SPARC64_HEAD_H
3
4#include <asm/pstate.h>
5
6 /* wrpr %g0, val, %gl */
7#define SET_GL(val) \
8 .word 0xa1902000 | val
9
10 /* rdpr %gl, %gN */
11#define GET_GL_GLOBAL(N) \
12 .word 0x81540000 | (N << 25)
13
14#define KERNBASE 0x400000
15
16#define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ)
17
18#define __CHEETAH_ID 0x003e0014
19#define __JALAPENO_ID 0x003e0016
20#define __SERRANO_ID 0x003e0022
21
22#define CHEETAH_MANUF 0x003e
23#define CHEETAH_IMPL 0x0014 /* Ultra-III */
24#define CHEETAH_PLUS_IMPL 0x0015 /* Ultra-III+ */
25#define JALAPENO_IMPL 0x0016 /* Ultra-IIIi */
26#define JAGUAR_IMPL 0x0018 /* Ultra-IV */
27#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */
28#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */
29
30#define BRANCH_IF_SUN4V(tmp1,label) \
31 sethi %hi(is_sun4v), %tmp1; \
32 lduw [%tmp1 + %lo(is_sun4v)], %tmp1; \
33 brnz,pn %tmp1, label; \
34 nop
35
36#define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \
37 rdpr %ver, %tmp1; \
38 sethi %hi(__CHEETAH_ID), %tmp2; \
39 srlx %tmp1, 32, %tmp1; \
40 or %tmp2, %lo(__CHEETAH_ID), %tmp2;\
41 cmp %tmp1, %tmp2; \
42 be,pn %icc, label; \
43 nop;
44
45#define BRANCH_IF_JALAPENO(tmp1,tmp2,label) \
46 rdpr %ver, %tmp1; \
47 sethi %hi(__JALAPENO_ID), %tmp2; \
48 srlx %tmp1, 32, %tmp1; \
49 or %tmp2, %lo(__JALAPENO_ID), %tmp2;\
50 cmp %tmp1, %tmp2; \
51 be,pn %icc, label; \
52 nop;
53
54#define BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(tmp1,tmp2,label) \
55 rdpr %ver, %tmp1; \
56 srlx %tmp1, (32 + 16), %tmp2; \
57 cmp %tmp2, CHEETAH_MANUF; \
58 bne,pt %xcc, 99f; \
59 sllx %tmp1, 16, %tmp1; \
60 srlx %tmp1, (32 + 16), %tmp2; \
61 cmp %tmp2, CHEETAH_PLUS_IMPL; \
62 bgeu,pt %xcc, label; \
6399: nop;
64
65#define BRANCH_IF_ANY_CHEETAH(tmp1,tmp2,label) \
66 rdpr %ver, %tmp1; \
67 srlx %tmp1, (32 + 16), %tmp2; \
68 cmp %tmp2, CHEETAH_MANUF; \
69 bne,pt %xcc, 99f; \
70 sllx %tmp1, 16, %tmp1; \
71 srlx %tmp1, (32 + 16), %tmp2; \
72 cmp %tmp2, CHEETAH_IMPL; \
73 bgeu,pt %xcc, label; \
7499: nop;
75
76#endif /* !(_SPARC64_HEAD_H) */
diff --git a/include/asm-sparc/hugetlb.h b/include/asm-sparc/hugetlb.h
new file mode 100644
index 000000000000..177061064ee6
--- /dev/null
+++ b/include/asm-sparc/hugetlb.h
@@ -0,0 +1,85 @@
1#ifndef _ASM_SPARC64_HUGETLB_H
2#define _ASM_SPARC64_HUGETLB_H
3
4#include <asm/page.h>
5
6
7void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
8 pte_t *ptep, pte_t pte);
9
10pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
11 pte_t *ptep);
12
13void hugetlb_prefault_arch_hook(struct mm_struct *mm);
14
15static inline int is_hugepage_only_range(struct mm_struct *mm,
16 unsigned long addr,
17 unsigned long len) {
18 return 0;
19}
20
21/*
22 * If the arch doesn't supply something else, assume that hugepage
23 * size aligned regions are ok without further preparation.
24 */
25static inline int prepare_hugepage_range(struct file *file,
26 unsigned long addr, unsigned long len)
27{
28 if (len & ~HPAGE_MASK)
29 return -EINVAL;
30 if (addr & ~HPAGE_MASK)
31 return -EINVAL;
32 return 0;
33}
34
35static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
36 unsigned long addr, unsigned long end,
37 unsigned long floor,
38 unsigned long ceiling)
39{
40 free_pgd_range(tlb, addr, end, floor, ceiling);
41}
42
43static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
44 unsigned long addr, pte_t *ptep)
45{
46}
47
48static inline int huge_pte_none(pte_t pte)
49{
50 return pte_none(pte);
51}
52
53static inline pte_t huge_pte_wrprotect(pte_t pte)
54{
55 return pte_wrprotect(pte);
56}
57
58static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
59 unsigned long addr, pte_t *ptep)
60{
61 ptep_set_wrprotect(mm, addr, ptep);
62}
63
64static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long addr, pte_t *ptep,
66 pte_t pte, int dirty)
67{
68 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
69}
70
71static inline pte_t huge_ptep_get(pte_t *ptep)
72{
73 return *ptep;
74}
75
76static inline int arch_prepare_hugepage(struct page *page)
77{
78 return 0;
79}
80
81static inline void arch_release_hugepage(struct page *page)
82{
83}
84
85#endif /* _ASM_SPARC64_HUGETLB_H */
diff --git a/include/asm-sparc/hvtramp.h b/include/asm-sparc/hvtramp.h
new file mode 100644
index 000000000000..b2b9b947b3a4
--- /dev/null
+++ b/include/asm-sparc/hvtramp.h
@@ -0,0 +1,37 @@
1#ifndef _SPARC64_HVTRAP_H
2#define _SPARC64_HVTRAP_H
3
4#ifndef __ASSEMBLY__
5
6#include <linux/types.h>
7
8struct hvtramp_mapping {
9 __u64 vaddr;
10 __u64 tte;
11};
12
13struct hvtramp_descr {
14 __u32 cpu;
15 __u32 num_mappings;
16 __u64 fault_info_va;
17 __u64 fault_info_pa;
18 __u64 thread_reg;
19 struct hvtramp_mapping maps[1];
20};
21
22extern void hv_cpu_startup(unsigned long hvdescr_pa);
23
24#endif
25
26#define HVTRAMP_DESCR_CPU 0x00
27#define HVTRAMP_DESCR_NUM_MAPPINGS 0x04
28#define HVTRAMP_DESCR_FAULT_INFO_VA 0x08
29#define HVTRAMP_DESCR_FAULT_INFO_PA 0x10
30#define HVTRAMP_DESCR_THREAD_REG 0x18
31#define HVTRAMP_DESCR_MAPS 0x20
32
33#define HVTRAMP_MAPPING_VADDR 0x00
34#define HVTRAMP_MAPPING_TTE 0x08
35#define HVTRAMP_MAPPING_SIZE 0x10
36
37#endif /* _SPARC64_HVTRAP_H */
diff --git a/include/asm-sparc/hypervisor.h b/include/asm-sparc/hypervisor.h
new file mode 100644
index 000000000000..109ae24ba242
--- /dev/null
+++ b/include/asm-sparc/hypervisor.h
@@ -0,0 +1,2949 @@
1#ifndef _SPARC64_HYPERVISOR_H
2#define _SPARC64_HYPERVISOR_H
3
4/* Sun4v hypervisor interfaces and defines.
5 *
6 * Hypervisor calls are made via traps to software traps number 0x80
7 * and above. Registers %o0 to %o5 serve as argument, status, and
8 * return value registers.
9 *
10 * There are two kinds of these traps. First there are the normal
11 * "fast traps" which use software trap 0x80 and encode the function
12 * to invoke by number in register %o5. Argument and return value
13 * handling is as follows:
14 *
15 * -----------------------------------------------
16 * | %o5 | function number | undefined |
17 * | %o0 | argument 0 | return status |
18 * | %o1 | argument 1 | return value 1 |
19 * | %o2 | argument 2 | return value 2 |
20 * | %o3 | argument 3 | return value 3 |
21 * | %o4 | argument 4 | return value 4 |
22 * -----------------------------------------------
23 *
24 * The second type are "hyper-fast traps" which encode the function
25 * number in the software trap number itself. So these use trap
26 * numbers > 0x80. The register usage for hyper-fast traps is as
27 * follows:
28 *
29 * -----------------------------------------------
30 * | %o0 | argument 0 | return status |
31 * | %o1 | argument 1 | return value 1 |
32 * | %o2 | argument 2 | return value 2 |
33 * | %o3 | argument 3 | return value 3 |
34 * | %o4 | argument 4 | return value 4 |
35 * -----------------------------------------------
36 *
37 * Registers providing explicit arguments to the hypervisor calls
38 * are volatile across the call. Upon return their values are
39 * undefined unless explicitly specified as containing a particular
40 * return value by the specific call. The return status is always
41 * returned in register %o0, zero indicates a successful execution of
42 * the hypervisor call and other values indicate an error status as
43 * defined below. So, for example, if a hyper-fast trap takes
44 * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across
45 * the call and %o3, %o4, and %o5 would be preserved.
46 *
47 * If the hypervisor trap is invalid, or the fast trap function number
48 * is invalid, HV_EBADTRAP will be returned in %o0. Also, all 64-bits
49 * of the argument and return values are significant.
50 */
51
52/* Trap numbers. */
53#define HV_FAST_TRAP 0x80
54#define HV_MMU_MAP_ADDR_TRAP 0x83
55#define HV_MMU_UNMAP_ADDR_TRAP 0x84
56#define HV_TTRACE_ADDENTRY_TRAP 0x85
57#define HV_CORE_TRAP 0xff
58
59/* Error codes. */
60#define HV_EOK 0 /* Successful return */
61#define HV_ENOCPU 1 /* Invalid CPU id */
62#define HV_ENORADDR 2 /* Invalid real address */
63#define HV_ENOINTR 3 /* Invalid interrupt id */
64#define HV_EBADPGSZ 4 /* Invalid pagesize encoding */
65#define HV_EBADTSB 5 /* Invalid TSB description */
66#define HV_EINVAL 6 /* Invalid argument */
67#define HV_EBADTRAP 7 /* Invalid function number */
68#define HV_EBADALIGN 8 /* Invalid address alignment */
69#define HV_EWOULDBLOCK 9 /* Cannot complete w/o blocking */
70#define HV_ENOACCESS 10 /* No access to resource */
71#define HV_EIO 11 /* I/O error */
72#define HV_ECPUERROR 12 /* CPU in error state */
73#define HV_ENOTSUPPORTED 13 /* Function not supported */
74#define HV_ENOMAP 14 /* No mapping found */
75#define HV_ETOOMANY 15 /* Too many items specified */
76#define HV_ECHANNEL 16 /* Invalid LDC channel */
77#define HV_EBUSY 17 /* Resource busy */
78
79/* mach_exit()
80 * TRAP: HV_FAST_TRAP
81 * FUNCTION: HV_FAST_MACH_EXIT
82 * ARG0: exit code
83 * ERRORS: This service does not return.
84 *
85 * Stop all CPUs in the virtual domain and place them into the stopped
86 * state. The 64-bit exit code may be passed to a service entity as
87 * the domain's exit status. On systems without a service entity, the
88 * domain will undergo a reset, and the boot firmware will be
89 * reloaded.
90 *
91 * This function will never return to the guest that invokes it.
92 *
93 * Note: By convention an exit code of zero denotes a successful exit by
94 * the guest code. A non-zero exit code denotes a guest specific
95 * error indication.
96 *
97 */
98#define HV_FAST_MACH_EXIT 0x00
99
100#ifndef __ASSEMBLY__
101extern void sun4v_mach_exit(unsigned long exit_code);
102#endif
103
104/* Domain services. */
105
106/* mach_desc()
107 * TRAP: HV_FAST_TRAP
108 * FUNCTION: HV_FAST_MACH_DESC
109 * ARG0: buffer
110 * ARG1: length
111 * RET0: status
112 * RET1: length
113 * ERRORS: HV_EBADALIGN Buffer is badly aligned
114 * HV_ENORADDR Buffer is to an illegal real address.
115 * HV_EINVAL Buffer length is too small for complete
116 * machine description.
117 *
118 * Copy the most current machine description into the buffer indicated
119 * by the real address in ARG0. The buffer provided must be 16 byte
120 * aligned. Upon success or HV_EINVAL, this service returns the
121 * actual size of the machine description in the RET1 return value.
122 *
123 * Note: A method of determining the appropriate buffer size for the
124 * machine description is to first call this service with a buffer
125 * length of 0 bytes.
126 */
127#define HV_FAST_MACH_DESC 0x01
128
129#ifndef __ASSEMBLY__
130extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
131 unsigned long buf_len,
132 unsigned long *real_buf_len);
133#endif
134
135/* mach_sir()
136 * TRAP: HV_FAST_TRAP
137 * FUNCTION: HV_FAST_MACH_SIR
138 * ERRORS: This service does not return.
139 *
140 * Perform a software initiated reset of the virtual machine domain.
141 * All CPUs are captured as soon as possible, all hardware devices are
142 * returned to the entry default state, and the domain is restarted at
143 * the SIR (trap type 0x04) real trap table (RTBA) entry point on one
144 * of the CPUs. The single CPU restarted is selected as determined by
145 * platform specific policy. Memory is preserved across this
146 * operation.
147 */
148#define HV_FAST_MACH_SIR 0x02
149
150#ifndef __ASSEMBLY__
151extern void sun4v_mach_sir(void);
152#endif
153
154/* mach_set_watchdog()
155 * TRAP: HV_FAST_TRAP
156 * FUNCTION: HV_FAST_MACH_SET_WATCHDOG
157 * ARG0: timeout in milliseconds
158 * RET0: status
159 * RET1: time remaining in milliseconds
160 *
161 * A guest uses this API to set a watchdog timer. Once the gues has set
162 * the timer, it must call the timer service again either to disable or
163 * postpone the expiration. If the timer expires before being reset or
164 * disabled, then the hypervisor take a platform specific action leading
165 * to guest termination within a bounded time period. The platform action
166 * may include recovery actions such as reporting the expiration to a
167 * Service Processor, and/or automatically restarting the gues.
168 *
169 * The 'timeout' parameter is specified in milliseconds, however the
170 * implementated granularity is given by the 'watchdog-resolution'
171 * property in the 'platform' node of the guest's machine description.
172 * The largest allowed timeout value is specified by the
173 * 'watchdog-max-timeout' property of the 'platform' node.
174 *
175 * If the 'timeout' argument is not zero, the watchdog timer is set to
176 * expire after a minimum of 'timeout' milliseconds.
177 *
178 * If the 'timeout' argument is zero, the watchdog timer is disabled.
179 *
180 * If the 'timeout' value exceeds the value of the 'max-watchdog-timeout'
181 * property, the hypervisor leaves the watchdog timer state unchanged,
182 * and returns a status of EINVAL.
183 *
184 * The 'time remaining' return value is valid regardless of whether the
185 * return status is EOK or EINVAL. A non-zero return value indicates the
186 * number of milliseconds that were remaining until the timer was to expire.
187 * If less than one millisecond remains, the return value is '1'. If the
188 * watchdog timer was disabled at the time of the call, the return value is
189 * zero.
190 *
191 * If the hypervisor cannot support the exact timeout value requested, but
192 * can support a larger timeout value, the hypervisor may round the actual
193 * timeout to a value larger than the requested timeout, consequently the
194 * 'time remaining' return value may be larger than the previously requested
195 * timeout value.
196 *
197 * Any guest OS debugger should be aware that the watchdog service may be in
198 * use. Consequently, it is recommended that the watchdog service is
199 * disabled upon debugger entry (e.g. reaching a breakpoint), and then
200 * re-enabled upon returning to normal execution. The API has been designed
201 * with this in mind, and the 'time remaining' result of the disable call may
202 * be used directly as the timeout argument of the re-enable call.
203 */
204#define HV_FAST_MACH_SET_WATCHDOG 0x05
205
206#ifndef __ASSEMBLY__
207extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
208 unsigned long *orig_timeout);
209#endif
210
211/* CPU services.
212 *
213 * CPUs represent devices that can execute software threads. A single
214 * chip that contains multiple cores or strands is represented as
215 * multiple CPUs with unique CPU identifiers. CPUs are exported to
216 * OBP via the machine description (and to the OS via the OBP device
217 * tree). CPUs are always in one of three states: stopped, running,
218 * or error.
219 *
220 * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a
221 * CPU within a logical domain. Operations that are to be performed
222 * on multiple CPUs specify them via a CPU list. A CPU list is an
223 * array in real memory, of which each 16-bit word is a CPU ID. CPU
224 * lists are passed through the API as two arguments. The first is
225 * the number of entries (16-bit words) in the CPU list, and the
226 * second is the (real address) pointer to the CPU ID list.
227 */
228
229/* cpu_start()
230 * TRAP: HV_FAST_TRAP
231 * FUNCTION: HV_FAST_CPU_START
232 * ARG0: CPU ID
233 * ARG1: PC
234 * ARG2: RTBA
235 * ARG3: target ARG0
236 * RET0: status
237 * ERRORS: ENOCPU Invalid CPU ID
238 * EINVAL Target CPU ID is not in the stopped state
239 * ENORADDR Invalid PC or RTBA real address
240 * EBADALIGN Unaligned PC or unaligned RTBA
241 * EWOULDBLOCK Starting resources are not available
242 *
243 * Start CPU with given CPU ID with PC in %pc and with a real trap
244 * base address value of RTBA. The indicated CPU must be in the
245 * stopped state. The supplied RTBA must be aligned on a 256 byte
246 * boundary. On successful completion, the specified CPU will be in
247 * the running state and will be supplied with "target ARG0" in %o0
248 * and RTBA in %tba.
249 */
250#define HV_FAST_CPU_START 0x10
251
252#ifndef __ASSEMBLY__
253extern unsigned long sun4v_cpu_start(unsigned long cpuid,
254 unsigned long pc,
255 unsigned long rtba,
256 unsigned long arg0);
257#endif
258
259/* cpu_stop()
260 * TRAP: HV_FAST_TRAP
261 * FUNCTION: HV_FAST_CPU_STOP
262 * ARG0: CPU ID
263 * RET0: status
264 * ERRORS: ENOCPU Invalid CPU ID
265 * EINVAL Target CPU ID is the current cpu
266 * EINVAL Target CPU ID is not in the running state
267 * EWOULDBLOCK Stopping resources are not available
268 * ENOTSUPPORTED Not supported on this platform
269 *
270 * The specified CPU is stopped. The indicated CPU must be in the
271 * running state. On completion, it will be in the stopped state. It
272 * is not legal to stop the current CPU.
273 *
274 * Note: As this service cannot be used to stop the current cpu, this service
275 * may not be used to stop the last running CPU in a domain. To stop
276 * and exit a running domain, a guest must use the mach_exit() service.
277 */
278#define HV_FAST_CPU_STOP 0x11
279
280#ifndef __ASSEMBLY__
281extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
282#endif
283
284/* cpu_yield()
285 * TRAP: HV_FAST_TRAP
286 * FUNCTION: HV_FAST_CPU_YIELD
287 * RET0: status
288 * ERRORS: No possible error.
289 *
290 * Suspend execution on the current CPU. Execution will resume when
291 * an interrupt (device, %stick_compare, or cross-call) is targeted to
292 * the CPU. On some CPUs, this API may be used by the hypervisor to
293 * save power by disabling hardware strands.
294 */
295#define HV_FAST_CPU_YIELD 0x12
296
297#ifndef __ASSEMBLY__
298extern unsigned long sun4v_cpu_yield(void);
299#endif
300
301/* cpu_qconf()
302 * TRAP: HV_FAST_TRAP
303 * FUNCTION: HV_FAST_CPU_QCONF
304 * ARG0: queue
305 * ARG1: base real address
306 * ARG2: number of entries
307 * RET0: status
308 * ERRORS: ENORADDR Invalid base real address
309 * EINVAL Invalid queue or number of entries is less
310 * than 2 or too large.
311 * EBADALIGN Base real address is not correctly aligned
312 * for size.
313 *
314 * Configure the given queue to be placed at the given base real
315 * address, with the given number of entries. The number of entries
316 * must be a power of 2. The base real address must be aligned
317 * exactly to match the queue size. Each queue entry is 64 bytes
318 * long, so for example a 32 entry queue must be aligned on a 2048
319 * byte real address boundary.
320 *
321 * The specified queue is unconfigured if the number of entries is given
322 * as zero.
323 *
324 * For the current version of this API service, the argument queue is defined
325 * as follows:
326 *
327 * queue description
328 * ----- -------------------------
329 * 0x3c cpu mondo queue
330 * 0x3d device mondo queue
331 * 0x3e resumable error queue
332 * 0x3f non-resumable error queue
333 *
334 * Note: The maximum number of entries for each queue for a specific cpu may
335 * be determined from the machine description.
336 */
337#define HV_FAST_CPU_QCONF 0x14
338#define HV_CPU_QUEUE_CPU_MONDO 0x3c
339#define HV_CPU_QUEUE_DEVICE_MONDO 0x3d
340#define HV_CPU_QUEUE_RES_ERROR 0x3e
341#define HV_CPU_QUEUE_NONRES_ERROR 0x3f
342
343#ifndef __ASSEMBLY__
344extern unsigned long sun4v_cpu_qconf(unsigned long type,
345 unsigned long queue_paddr,
346 unsigned long num_queue_entries);
347#endif
348
349/* cpu_qinfo()
350 * TRAP: HV_FAST_TRAP
351 * FUNCTION: HV_FAST_CPU_QINFO
352 * ARG0: queue
353 * RET0: status
354 * RET1: base real address
355 * RET1: number of entries
356 * ERRORS: EINVAL Invalid queue
357 *
358 * Return the configuration info for the given queue. The base real
359 * address and number of entries of the defined queue are returned.
360 * The queue argument values are the same as for cpu_qconf() above.
361 *
362 * If the specified queue is a valid queue number, but no queue has
363 * been defined, the number of entries will be set to zero and the
364 * base real address returned is undefined.
365 */
366#define HV_FAST_CPU_QINFO 0x15
367
368/* cpu_mondo_send()
369 * TRAP: HV_FAST_TRAP
370 * FUNCTION: HV_FAST_CPU_MONDO_SEND
371 * ARG0-1: CPU list
372 * ARG2: data real address
373 * RET0: status
374 * ERRORS: EBADALIGN Mondo data is not 64-byte aligned or CPU list
375 * is not 2-byte aligned.
376 * ENORADDR Invalid data mondo address, or invalid cpu list
377 * address.
378 * ENOCPU Invalid cpu in CPU list
379 * EWOULDBLOCK Some or all of the listed CPUs did not receive
380 * the mondo
381 * ECPUERROR One or more of the listed CPUs are in error
382 * state, use HV_FAST_CPU_STATE to see which ones
383 * EINVAL CPU list includes caller's CPU ID
384 *
385 * Send a mondo interrupt to the CPUs in the given CPU list with the
386 * 64-bytes at the given data real address. The data must be 64-byte
387 * aligned. The mondo data will be delivered to the cpu_mondo queues
388 * of the recipient CPUs.
389 *
390 * In all cases, error or not, the CPUs in the CPU list to which the
391 * mondo has been successfully delivered will be indicated by having
392 * their entry in CPU list updated with the value 0xffff.
393 */
394#define HV_FAST_CPU_MONDO_SEND 0x42
395
396#ifndef __ASSEMBLY__
397extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa);
398#endif
399
400/* cpu_myid()
401 * TRAP: HV_FAST_TRAP
402 * FUNCTION: HV_FAST_CPU_MYID
403 * RET0: status
404 * RET1: CPU ID
405 * ERRORS: No errors defined.
406 *
407 * Return the hypervisor ID handle for the current CPU. Use by a
408 * virtual CPU to discover it's own identity.
409 */
410#define HV_FAST_CPU_MYID 0x16
411
412/* cpu_state()
413 * TRAP: HV_FAST_TRAP
414 * FUNCTION: HV_FAST_CPU_STATE
415 * ARG0: CPU ID
416 * RET0: status
417 * RET1: state
418 * ERRORS: ENOCPU Invalid CPU ID
419 *
420 * Retrieve the current state of the CPU with the given CPU ID.
421 */
422#define HV_FAST_CPU_STATE 0x17
423#define HV_CPU_STATE_STOPPED 0x01
424#define HV_CPU_STATE_RUNNING 0x02
425#define HV_CPU_STATE_ERROR 0x03
426
427#ifndef __ASSEMBLY__
428extern long sun4v_cpu_state(unsigned long cpuid);
429#endif
430
431/* cpu_set_rtba()
432 * TRAP: HV_FAST_TRAP
433 * FUNCTION: HV_FAST_CPU_SET_RTBA
434 * ARG0: RTBA
435 * RET0: status
436 * RET1: previous RTBA
437 * ERRORS: ENORADDR Invalid RTBA real address
438 * EBADALIGN RTBA is incorrectly aligned for a trap table
439 *
440 * Set the real trap base address of the local cpu to the given RTBA.
441 * The supplied RTBA must be aligned on a 256 byte boundary. Upon
442 * success the previous value of the RTBA is returned in RET1.
443 *
444 * Note: This service does not affect %tba
445 */
446#define HV_FAST_CPU_SET_RTBA 0x18
447
448/* cpu_set_rtba()
449 * TRAP: HV_FAST_TRAP
450 * FUNCTION: HV_FAST_CPU_GET_RTBA
451 * RET0: status
452 * RET1: previous RTBA
453 * ERRORS: No possible error.
454 *
455 * Returns the current value of RTBA in RET1.
456 */
457#define HV_FAST_CPU_GET_RTBA 0x19
458
459/* MMU services.
460 *
461 * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls.
462 */
463#ifndef __ASSEMBLY__
464struct hv_tsb_descr {
465 unsigned short pgsz_idx;
466 unsigned short assoc;
467 unsigned int num_ttes; /* in TTEs */
468 unsigned int ctx_idx;
469 unsigned int pgsz_mask;
470 unsigned long tsb_base;
471 unsigned long resv;
472};
473#endif
474#define HV_TSB_DESCR_PGSZ_IDX_OFFSET 0x00
475#define HV_TSB_DESCR_ASSOC_OFFSET 0x02
476#define HV_TSB_DESCR_NUM_TTES_OFFSET 0x04
477#define HV_TSB_DESCR_CTX_IDX_OFFSET 0x08
478#define HV_TSB_DESCR_PGSZ_MASK_OFFSET 0x0c
479#define HV_TSB_DESCR_TSB_BASE_OFFSET 0x10
480#define HV_TSB_DESCR_RESV_OFFSET 0x18
481
482/* Page size bitmask. */
483#define HV_PGSZ_MASK_8K (1 << 0)
484#define HV_PGSZ_MASK_64K (1 << 1)
485#define HV_PGSZ_MASK_512K (1 << 2)
486#define HV_PGSZ_MASK_4MB (1 << 3)
487#define HV_PGSZ_MASK_32MB (1 << 4)
488#define HV_PGSZ_MASK_256MB (1 << 5)
489#define HV_PGSZ_MASK_2GB (1 << 6)
490#define HV_PGSZ_MASK_16GB (1 << 7)
491
492/* Page size index. The value given in the TSB descriptor must correspond
493 * to the smallest page size specified in the pgsz_mask page size bitmask.
494 */
495#define HV_PGSZ_IDX_8K 0
496#define HV_PGSZ_IDX_64K 1
497#define HV_PGSZ_IDX_512K 2
498#define HV_PGSZ_IDX_4MB 3
499#define HV_PGSZ_IDX_32MB 4
500#define HV_PGSZ_IDX_256MB 5
501#define HV_PGSZ_IDX_2GB 6
502#define HV_PGSZ_IDX_16GB 7
503
504/* MMU fault status area.
505 *
506 * MMU related faults have their status and fault address information
507 * placed into a memory region made available by privileged code. Each
508 * virtual processor must make a mmu_fault_area_conf() call to tell the
509 * hypervisor where that processor's fault status should be stored.
510 *
511 * The fault status block is a multiple of 64-bytes and must be aligned
512 * on a 64-byte boundary.
513 */
514#ifndef __ASSEMBLY__
515struct hv_fault_status {
516 unsigned long i_fault_type;
517 unsigned long i_fault_addr;
518 unsigned long i_fault_ctx;
519 unsigned long i_reserved[5];
520 unsigned long d_fault_type;
521 unsigned long d_fault_addr;
522 unsigned long d_fault_ctx;
523 unsigned long d_reserved[5];
524};
525#endif
526#define HV_FAULT_I_TYPE_OFFSET 0x00
527#define HV_FAULT_I_ADDR_OFFSET 0x08
528#define HV_FAULT_I_CTX_OFFSET 0x10
529#define HV_FAULT_D_TYPE_OFFSET 0x40
530#define HV_FAULT_D_ADDR_OFFSET 0x48
531#define HV_FAULT_D_CTX_OFFSET 0x50
532
533#define HV_FAULT_TYPE_FAST_MISS 1
534#define HV_FAULT_TYPE_FAST_PROT 2
535#define HV_FAULT_TYPE_MMU_MISS 3
536#define HV_FAULT_TYPE_INV_RA 4
537#define HV_FAULT_TYPE_PRIV_VIOL 5
538#define HV_FAULT_TYPE_PROT_VIOL 6
539#define HV_FAULT_TYPE_NFO 7
540#define HV_FAULT_TYPE_NFO_SEFF 8
541#define HV_FAULT_TYPE_INV_VA 9
542#define HV_FAULT_TYPE_INV_ASI 10
543#define HV_FAULT_TYPE_NC_ATOMIC 11
544#define HV_FAULT_TYPE_PRIV_ACT 12
545#define HV_FAULT_TYPE_RESV1 13
546#define HV_FAULT_TYPE_UNALIGNED 14
547#define HV_FAULT_TYPE_INV_PGSZ 15
548/* Values 16 --> -2 are reserved. */
549#define HV_FAULT_TYPE_MULTIPLE -1
550
551/* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(),
552 * and mmu_{map,unmap}_perm_addr().
553 */
554#define HV_MMU_DMMU 0x01
555#define HV_MMU_IMMU 0x02
556#define HV_MMU_ALL (HV_MMU_DMMU | HV_MMU_IMMU)
557
558/* mmu_map_addr()
559 * TRAP: HV_MMU_MAP_ADDR_TRAP
560 * ARG0: virtual address
561 * ARG1: mmu context
562 * ARG2: TTE
563 * ARG3: flags (HV_MMU_{IMMU,DMMU})
564 * ERRORS: EINVAL Invalid virtual address, mmu context, or flags
565 * EBADPGSZ Invalid page size value
566 * ENORADDR Invalid real address in TTE
567 *
568 * Create a non-permanent mapping using the given TTE, virtual
569 * address, and mmu context. The flags argument determines which
570 * (data, or instruction, or both) TLB the mapping gets loaded into.
571 *
572 * The behavior is undefined if the valid bit is clear in the TTE.
573 *
574 * Note: This API call is for privileged code to specify temporary translation
575 * mappings without the need to create and manage a TSB.
576 */
577
578/* mmu_unmap_addr()
579 * TRAP: HV_MMU_UNMAP_ADDR_TRAP
580 * ARG0: virtual address
581 * ARG1: mmu context
582 * ARG2: flags (HV_MMU_{IMMU,DMMU})
583 * ERRORS: EINVAL Invalid virtual address, mmu context, or flags
584 *
585 * Demaps the given virtual address in the given mmu context on this
586 * CPU. This function is intended to be used to demap pages mapped
587 * with mmu_map_addr. This service is equivalent to invoking
588 * mmu_demap_page() with only the current CPU in the CPU list. The
589 * flags argument determines which (data, or instruction, or both) TLB
590 * the mapping gets unmapped from.
591 *
592 * Attempting to perform an unmap operation for a previously defined
593 * permanent mapping will have undefined results.
594 */
595
596/* mmu_tsb_ctx0()
597 * TRAP: HV_FAST_TRAP
598 * FUNCTION: HV_FAST_MMU_TSB_CTX0
599 * ARG0: number of TSB descriptions
600 * ARG1: TSB descriptions pointer
601 * RET0: status
602 * ERRORS: ENORADDR Invalid TSB descriptions pointer or
603 * TSB base within a descriptor
604 * EBADALIGN TSB descriptions pointer is not aligned
605 * to an 8-byte boundary, or TSB base
606 * within a descriptor is not aligned for
607 * the given TSB size
608 * EBADPGSZ Invalid page size in a TSB descriptor
609 * EBADTSB Invalid associativity or size in a TSB
610 * descriptor
611 * EINVAL Invalid number of TSB descriptions, or
612 * invalid context index in a TSB
613 * descriptor, or index page size not
614 * equal to smallest page size in page
615 * size bitmask field.
616 *
617 * Configures the TSBs for the current CPU for virtual addresses with
618 * context zero. The TSB descriptions pointer is a pointer to an
619 * array of the given number of TSB descriptions.
620 *
621 * Note: The maximum number of TSBs available to a virtual CPU is given by the
622 * mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the
623 * machine description.
624 */
625#define HV_FAST_MMU_TSB_CTX0 0x20
626
627#ifndef __ASSEMBLY__
628extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
629 unsigned long tsb_desc_ra);
630#endif
631
632/* mmu_tsb_ctxnon0()
633 * TRAP: HV_FAST_TRAP
634 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0
635 * ARG0: number of TSB descriptions
636 * ARG1: TSB descriptions pointer
637 * RET0: status
638 * ERRORS: Same as for mmu_tsb_ctx0() above.
639 *
640 * Configures the TSBs for the current CPU for virtual addresses with
641 * non-zero contexts. The TSB descriptions pointer is a pointer to an
642 * array of the given number of TSB descriptions.
643 *
644 * Note: A maximum of 16 TSBs may be specified in the TSB description list.
645 */
646#define HV_FAST_MMU_TSB_CTXNON0 0x21
647
648/* mmu_demap_page()
649 * TRAP: HV_FAST_TRAP
650 * FUNCTION: HV_FAST_MMU_DEMAP_PAGE
651 * ARG0: reserved, must be zero
652 * ARG1: reserved, must be zero
653 * ARG2: virtual address
654 * ARG3: mmu context
655 * ARG4: flags (HV_MMU_{IMMU,DMMU})
656 * RET0: status
657 * ERRORS: EINVAL Invalid virutal address, context, or
658 * flags value
659 * ENOTSUPPORTED ARG0 or ARG1 is non-zero
660 *
661 * Demaps any page mapping of the given virtual address in the given
662 * mmu context for the current virtual CPU. Any virtually tagged
663 * caches are guaranteed to be kept consistent. The flags argument
664 * determines which TLB (instruction, or data, or both) participate in
665 * the operation.
666 *
667 * ARG0 and ARG1 are both reserved and must be set to zero.
668 */
669#define HV_FAST_MMU_DEMAP_PAGE 0x22
670
671/* mmu_demap_ctx()
672 * TRAP: HV_FAST_TRAP
673 * FUNCTION: HV_FAST_MMU_DEMAP_CTX
674 * ARG0: reserved, must be zero
675 * ARG1: reserved, must be zero
676 * ARG2: mmu context
677 * ARG3: flags (HV_MMU_{IMMU,DMMU})
678 * RET0: status
679 * ERRORS: EINVAL Invalid context or flags value
680 * ENOTSUPPORTED ARG0 or ARG1 is non-zero
681 *
682 * Demaps all non-permanent virtual page mappings previously specified
683 * for the given context for the current virtual CPU. Any virtual
684 * tagged caches are guaranteed to be kept consistent. The flags
685 * argument determines which TLB (instruction, or data, or both)
686 * participate in the operation.
687 *
688 * ARG0 and ARG1 are both reserved and must be set to zero.
689 */
690#define HV_FAST_MMU_DEMAP_CTX 0x23
691
692/* mmu_demap_all()
693 * TRAP: HV_FAST_TRAP
694 * FUNCTION: HV_FAST_MMU_DEMAP_ALL
695 * ARG0: reserved, must be zero
696 * ARG1: reserved, must be zero
697 * ARG2: flags (HV_MMU_{IMMU,DMMU})
698 * RET0: status
699 * ERRORS: EINVAL Invalid flags value
700 * ENOTSUPPORTED ARG0 or ARG1 is non-zero
701 *
702 * Demaps all non-permanent virtual page mappings previously specified
703 * for the current virtual CPU. Any virtual tagged caches are
704 * guaranteed to be kept consistent. The flags argument determines
705 * which TLB (instruction, or data, or both) participate in the
706 * operation.
707 *
708 * ARG0 and ARG1 are both reserved and must be set to zero.
709 */
710#define HV_FAST_MMU_DEMAP_ALL 0x24
711
712#ifndef __ASSEMBLY__
713extern void sun4v_mmu_demap_all(void);
714#endif
715
716/* mmu_map_perm_addr()
717 * TRAP: HV_FAST_TRAP
718 * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR
719 * ARG0: virtual address
720 * ARG1: reserved, must be zero
721 * ARG2: TTE
722 * ARG3: flags (HV_MMU_{IMMU,DMMU})
723 * RET0: status
724 * ERRORS: EINVAL Invalid virutal address or flags value
725 * EBADPGSZ Invalid page size value
726 * ENORADDR Invalid real address in TTE
727 * ETOOMANY Too many mappings (max of 8 reached)
728 *
729 * Create a permanent mapping using the given TTE and virtual address
730 * for context 0 on the calling virtual CPU. A maximum of 8 such
731 * permanent mappings may be specified by privileged code. Mappings
732 * may be removed with mmu_unmap_perm_addr().
733 *
734 * The behavior is undefined if a TTE with the valid bit clear is given.
735 *
736 * Note: This call is used to specify address space mappings for which
737 * privileged code does not expect to receive misses. For example,
738 * this mechanism can be used to map kernel nucleus code and data.
739 */
740#define HV_FAST_MMU_MAP_PERM_ADDR 0x25
741
742#ifndef __ASSEMBLY__
743extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
744 unsigned long set_to_zero,
745 unsigned long tte,
746 unsigned long flags);
747#endif
748
749/* mmu_fault_area_conf()
750 * TRAP: HV_FAST_TRAP
751 * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF
752 * ARG0: real address
753 * RET0: status
754 * RET1: previous mmu fault area real address
755 * ERRORS: ENORADDR Invalid real address
756 * EBADALIGN Invalid alignment for fault area
757 *
758 * Configure the MMU fault status area for the calling CPU. A 64-byte
759 * aligned real address specifies where MMU fault status information
760 * is placed. The return value is the previously specified area, or 0
761 * for the first invocation. Specifying a fault area at real address
762 * 0 is not allowed.
763 */
764#define HV_FAST_MMU_FAULT_AREA_CONF 0x26
765
766/* mmu_enable()
767 * TRAP: HV_FAST_TRAP
768 * FUNCTION: HV_FAST_MMU_ENABLE
769 * ARG0: enable flag
770 * ARG1: return target address
771 * RET0: status
772 * ERRORS: ENORADDR Invalid real address when disabling
773 * translation.
774 * EBADALIGN The return target address is not
775 * aligned to an instruction.
776 * EINVAL The enable flag request the current
777 * operating mode (e.g. disable if already
778 * disabled)
779 *
780 * Enable or disable virtual address translation for the calling CPU
781 * within the virtual machine domain. If the enable flag is zero,
782 * translation is disabled, any non-zero value will enable
783 * translation.
784 *
785 * When this function returns, the newly selected translation mode
786 * will be active. If the mmu is being enabled, then the return
787 * target address is a virtual address else it is a real address.
788 *
789 * Upon successful completion, control will be returned to the given
790 * return target address (ie. the cpu will jump to that address). On
791 * failure, the previous mmu mode remains and the trap simply returns
792 * as normal with the appropriate error code in RET0.
793 */
794#define HV_FAST_MMU_ENABLE 0x27
795
796/* mmu_unmap_perm_addr()
797 * TRAP: HV_FAST_TRAP
798 * FUNCTION: HV_FAST_MMU_UNMAP_PERM_ADDR
799 * ARG0: virtual address
800 * ARG1: reserved, must be zero
801 * ARG2: flags (HV_MMU_{IMMU,DMMU})
802 * RET0: status
803 * ERRORS: EINVAL Invalid virutal address or flags value
804 * ENOMAP Specified mapping was not found
805 *
806 * Demaps any permanent page mapping (established via
807 * mmu_map_perm_addr()) at the given virtual address for context 0 on
808 * the current virtual CPU. Any virtual tagged caches are guaranteed
809 * to be kept consistent.
810 */
811#define HV_FAST_MMU_UNMAP_PERM_ADDR 0x28
812
813/* mmu_tsb_ctx0_info()
814 * TRAP: HV_FAST_TRAP
815 * FUNCTION: HV_FAST_MMU_TSB_CTX0_INFO
816 * ARG0: max TSBs
817 * ARG1: buffer pointer
818 * RET0: status
819 * RET1: number of TSBs
820 * ERRORS: EINVAL Supplied buffer is too small
821 * EBADALIGN The buffer pointer is badly aligned
822 * ENORADDR Invalid real address for buffer pointer
823 *
824 * Return the TSB configuration as previous defined by mmu_tsb_ctx0()
825 * into the provided buffer. The size of the buffer is given in ARG1
826 * in terms of the number of TSB description entries.
827 *
828 * Upon return, RET1 always contains the number of TSB descriptions
829 * previously configured. If zero TSBs were configured, EOK is
830 * returned with RET1 containing 0.
831 */
832#define HV_FAST_MMU_TSB_CTX0_INFO 0x29
833
834/* mmu_tsb_ctxnon0_info()
835 * TRAP: HV_FAST_TRAP
836 * FUNCTION: HV_FAST_MMU_TSB_CTXNON0_INFO
837 * ARG0: max TSBs
838 * ARG1: buffer pointer
839 * RET0: status
840 * RET1: number of TSBs
841 * ERRORS: EINVAL Supplied buffer is too small
842 * EBADALIGN The buffer pointer is badly aligned
843 * ENORADDR Invalid real address for buffer pointer
844 *
845 * Return the TSB configuration as previous defined by
846 * mmu_tsb_ctxnon0() into the provided buffer. The size of the buffer
847 * is given in ARG1 in terms of the number of TSB description entries.
848 *
849 * Upon return, RET1 always contains the number of TSB descriptions
850 * previously configured. If zero TSBs were configured, EOK is
851 * returned with RET1 containing 0.
852 */
853#define HV_FAST_MMU_TSB_CTXNON0_INFO 0x2a
854
855/* mmu_fault_area_info()
856 * TRAP: HV_FAST_TRAP
857 * FUNCTION: HV_FAST_MMU_FAULT_AREA_INFO
858 * RET0: status
859 * RET1: fault area real address
860 * ERRORS: No errors defined.
861 *
862 * Return the currently defined MMU fault status area for the current
863 * CPU. The real address of the fault status area is returned in
864 * RET1, or 0 is returned in RET1 if no fault status area is defined.
865 *
866 * Note: mmu_fault_area_conf() may be called with the return value (RET1)
867 * from this service if there is a need to save and restore the fault
868 * area for a cpu.
869 */
870#define HV_FAST_MMU_FAULT_AREA_INFO 0x2b
871
872/* Cache and Memory services. */
873
874/* mem_scrub()
875 * TRAP: HV_FAST_TRAP
876 * FUNCTION: HV_FAST_MEM_SCRUB
877 * ARG0: real address
878 * ARG1: length
879 * RET0: status
880 * RET1: length scrubbed
881 * ERRORS: ENORADDR Invalid real address
882 * EBADALIGN Start address or length are not correctly
883 * aligned
884 * EINVAL Length is zero
885 *
886 * Zero the memory contents in the range real address to real address
887 * plus length minus 1. Also, valid ECC will be generated for that
888 * memory address range. Scrubbing is started at the given real
889 * address, but may not scrub the entire given length. The actual
890 * length scrubbed will be returned in RET1.
891 *
892 * The real address and length must be aligned on an 8K boundary, or
893 * contain the start address and length from a sun4v error report.
894 *
895 * Note: There are two uses for this function. The first use is to block clear
896 * and initialize memory and the second is to scrub an u ncorrectable
897 * error reported via a resumable or non-resumable trap. The second
898 * use requires the arguments to be equal to the real address and length
899 * provided in a sun4v memory error report.
900 */
901#define HV_FAST_MEM_SCRUB 0x31
902
903/* mem_sync()
904 * TRAP: HV_FAST_TRAP
905 * FUNCTION: HV_FAST_MEM_SYNC
906 * ARG0: real address
907 * ARG1: length
908 * RET0: status
909 * RET1: length synced
910 * ERRORS: ENORADDR Invalid real address
911 * EBADALIGN Start address or length are not correctly
912 * aligned
913 * EINVAL Length is zero
914 *
915 * Force the next access within the real address to real address plus
916 * length minus 1 to be fetches from main system memory. Less than
917 * the given length may be synced, the actual amount synced is
918 * returned in RET1. The real address and length must be aligned on
919 * an 8K boundary.
920 */
921#define HV_FAST_MEM_SYNC 0x32
922
923/* Time of day services.
924 *
925 * The hypervisor maintains the time of day on a per-domain basis.
926 * Changing the time of day in one domain does not affect the time of
927 * day on any other domain.
928 *
929 * Time is described by a single unsigned 64-bit word which is the
930 * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1,
931 * 1970).
932 */
933
934/* tod_get()
935 * TRAP: HV_FAST_TRAP
936 * FUNCTION: HV_FAST_TOD_GET
937 * RET0: status
938 * RET1: TOD
939 * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable
940 * ENOTSUPPORTED If TOD not supported on this platform
941 *
942 * Return the current time of day. May block if TOD access is
943 * temporarily not possible.
944 */
945#define HV_FAST_TOD_GET 0x50
946
947#ifndef __ASSEMBLY__
948extern unsigned long sun4v_tod_get(unsigned long *time);
949#endif
950
951/* tod_set()
952 * TRAP: HV_FAST_TRAP
953 * FUNCTION: HV_FAST_TOD_SET
954 * ARG0: TOD
955 * RET0: status
956 * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable
957 * ENOTSUPPORTED If TOD not supported on this platform
958 *
959 * The current time of day is set to the value specified in ARG0. May
960 * block if TOD access is temporarily not possible.
961 */
962#define HV_FAST_TOD_SET 0x51
963
964#ifndef __ASSEMBLY__
965extern unsigned long sun4v_tod_set(unsigned long time);
966#endif
967
968/* Console services */
969
970/* con_getchar()
971 * TRAP: HV_FAST_TRAP
972 * FUNCTION: HV_FAST_CONS_GETCHAR
973 * RET0: status
974 * RET1: character
975 * ERRORS: EWOULDBLOCK No character available.
976 *
977 * Returns a character from the console device. If no character is
978 * available then an EWOULDBLOCK error is returned. If a character is
979 * available, then the returned status is EOK and the character value
980 * is in RET1.
981 *
982 * A virtual BREAK is represented by the 64-bit value -1.
983 *
984 * A virtual HUP signal is represented by the 64-bit value -2.
985 */
986#define HV_FAST_CONS_GETCHAR 0x60
987
988/* con_putchar()
989 * TRAP: HV_FAST_TRAP
990 * FUNCTION: HV_FAST_CONS_PUTCHAR
991 * ARG0: character
992 * RET0: status
993 * ERRORS: EINVAL Illegal character
994 * EWOULDBLOCK Output buffer currently full, would block
995 *
996 * Send a character to the console device. Only character values
997 * between 0 and 255 may be used. Values outside this range are
998 * invalid except for the 64-bit value -1 which is used to send a
999 * virtual BREAK.
1000 */
1001#define HV_FAST_CONS_PUTCHAR 0x61
1002
1003/* con_read()
1004 * TRAP: HV_FAST_TRAP
1005 * FUNCTION: HV_FAST_CONS_READ
1006 * ARG0: buffer real address
1007 * ARG1: buffer size in bytes
1008 * RET0: status
1009 * RET1: bytes read or BREAK or HUP
1010 * ERRORS: EWOULDBLOCK No character available.
1011 *
1012 * Reads characters into a buffer from the console device. If no
1013 * character is available then an EWOULDBLOCK error is returned.
1014 * If a character is available, then the returned status is EOK
1015 * and the number of bytes read into the given buffer is provided
1016 * in RET1.
1017 *
1018 * A virtual BREAK is represented by the 64-bit RET1 value -1.
1019 *
1020 * A virtual HUP signal is represented by the 64-bit RET1 value -2.
1021 *
1022 * If BREAK or HUP are indicated, no bytes were read into buffer.
1023 */
1024#define HV_FAST_CONS_READ 0x62
1025
1026/* con_write()
1027 * TRAP: HV_FAST_TRAP
1028 * FUNCTION: HV_FAST_CONS_WRITE
1029 * ARG0: buffer real address
1030 * ARG1: buffer size in bytes
1031 * RET0: status
1032 * RET1: bytes written
1033 * ERRORS: EWOULDBLOCK Output buffer currently full, would block
1034 *
1035 * Send a characters in buffer to the console device. Breaks must be
1036 * sent using con_putchar().
1037 */
1038#define HV_FAST_CONS_WRITE 0x63
1039
1040#ifndef __ASSEMBLY__
1041extern long sun4v_con_getchar(long *status);
1042extern long sun4v_con_putchar(long c);
1043extern long sun4v_con_read(unsigned long buffer,
1044 unsigned long size,
1045 unsigned long *bytes_read);
1046extern unsigned long sun4v_con_write(unsigned long buffer,
1047 unsigned long size,
1048 unsigned long *bytes_written);
1049#endif
1050
1051/* mach_set_soft_state()
1052 * TRAP: HV_FAST_TRAP
1053 * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE
1054 * ARG0: software state
1055 * ARG1: software state description pointer
1056 * RET0: status
1057 * ERRORS: EINVAL software state not valid or software state
1058 * description is not NULL terminated
1059 * ENORADDR software state description pointer is not a
1060 * valid real address
1061 * EBADALIGNED software state description is not correctly
1062 * aligned
1063 *
1064 * This allows the guest to report it's soft state to the hypervisor. There
1065 * are two primary components to this state. The first part states whether
1066 * the guest software is running or not. The second containts optional
1067 * details specific to the software.
1068 *
1069 * The software state argument is defined below in HV_SOFT_STATE_*, and
1070 * indicates whether the guest is operating normally or in a transitional
1071 * state.
1072 *
1073 * The software state description argument is a real address of a data buffer
1074 * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL
1075 * terminated 7-bit ASCII string of up to 31 characters not including the
1076 * NULL termination.
1077 */
1078#define HV_FAST_MACH_SET_SOFT_STATE 0x70
1079#define HV_SOFT_STATE_NORMAL 0x01
1080#define HV_SOFT_STATE_TRANSITION 0x02
1081
1082#ifndef __ASSEMBLY__
1083extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
1084 unsigned long msg_string_ra);
1085#endif
1086
1087/* mach_get_soft_state()
1088 * TRAP: HV_FAST_TRAP
1089 * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE
1090 * ARG0: software state description pointer
1091 * RET0: status
1092 * RET1: software state
1093 * ERRORS: ENORADDR software state description pointer is not a
1094 * valid real address
1095 * EBADALIGNED software state description is not correctly
1096 * aligned
1097 *
1098 * Retrieve the current value of the guest's software state. The rules
1099 * for the software state pointer are the same as for mach_set_soft_state()
1100 * above.
1101 */
1102#define HV_FAST_MACH_GET_SOFT_STATE 0x71
1103
1104/* svc_send()
1105 * TRAP: HV_FAST_TRAP
1106 * FUNCTION: HV_FAST_SVC_SEND
1107 * ARG0: service ID
1108 * ARG1: buffer real address
1109 * ARG2: buffer size
1110 * RET0: STATUS
1111 * RET1: sent_bytes
1112 *
1113 * Be careful, all output registers are clobbered by this operation,
1114 * so for example it is not possible to save away a value in %o4
1115 * across the trap.
1116 */
1117#define HV_FAST_SVC_SEND 0x80
1118
1119/* svc_recv()
1120 * TRAP: HV_FAST_TRAP
1121 * FUNCTION: HV_FAST_SVC_RECV
1122 * ARG0: service ID
1123 * ARG1: buffer real address
1124 * ARG2: buffer size
1125 * RET0: STATUS
1126 * RET1: recv_bytes
1127 *
1128 * Be careful, all output registers are clobbered by this operation,
1129 * so for example it is not possible to save away a value in %o4
1130 * across the trap.
1131 */
1132#define HV_FAST_SVC_RECV 0x81
1133
1134/* svc_getstatus()
1135 * TRAP: HV_FAST_TRAP
1136 * FUNCTION: HV_FAST_SVC_GETSTATUS
1137 * ARG0: service ID
1138 * RET0: STATUS
1139 * RET1: status bits
1140 */
1141#define HV_FAST_SVC_GETSTATUS 0x82
1142
1143/* svc_setstatus()
1144 * TRAP: HV_FAST_TRAP
1145 * FUNCTION: HV_FAST_SVC_SETSTATUS
1146 * ARG0: service ID
1147 * ARG1: bits to set
1148 * RET0: STATUS
1149 */
1150#define HV_FAST_SVC_SETSTATUS 0x83
1151
1152/* svc_clrstatus()
1153 * TRAP: HV_FAST_TRAP
1154 * FUNCTION: HV_FAST_SVC_CLRSTATUS
1155 * ARG0: service ID
1156 * ARG1: bits to clear
1157 * RET0: STATUS
1158 */
1159#define HV_FAST_SVC_CLRSTATUS 0x84
1160
1161#ifndef __ASSEMBLY__
1162extern unsigned long sun4v_svc_send(unsigned long svc_id,
1163 unsigned long buffer,
1164 unsigned long buffer_size,
1165 unsigned long *sent_bytes);
1166extern unsigned long sun4v_svc_recv(unsigned long svc_id,
1167 unsigned long buffer,
1168 unsigned long buffer_size,
1169 unsigned long *recv_bytes);
1170extern unsigned long sun4v_svc_getstatus(unsigned long svc_id,
1171 unsigned long *status_bits);
1172extern unsigned long sun4v_svc_setstatus(unsigned long svc_id,
1173 unsigned long status_bits);
1174extern unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
1175 unsigned long status_bits);
1176#endif
1177
1178/* Trap trace services.
1179 *
1180 * The hypervisor provides a trap tracing capability for privileged
1181 * code running on each virtual CPU. Privileged code provides a
1182 * round-robin trap trace queue within which the hypervisor writes
1183 * 64-byte entries detailing hyperprivileged traps taken n behalf of
1184 * privileged code. This is provided as a debugging capability for
1185 * privileged code.
1186 *
1187 * The trap trace control structure is 64-bytes long and placed at the
1188 * start (offset 0) of the trap trace buffer, and is described as
1189 * follows:
1190 */
1191#ifndef __ASSEMBLY__
1192struct hv_trap_trace_control {
1193 unsigned long head_offset;
1194 unsigned long tail_offset;
1195 unsigned long __reserved[0x30 / sizeof(unsigned long)];
1196};
1197#endif
1198#define HV_TRAP_TRACE_CTRL_HEAD_OFFSET 0x00
1199#define HV_TRAP_TRACE_CTRL_TAIL_OFFSET 0x08
1200
1201/* The head offset is the offset of the most recently completed entry
1202 * in the trap-trace buffer. The tail offset is the offset of the
1203 * next entry to be written. The control structure is owned and
1204 * modified by the hypervisor. A guest may not modify the control
1205 * structure contents. Attempts to do so will result in undefined
1206 * behavior for the guest.
1207 *
1208 * Each trap trace buffer entry is layed out as follows:
1209 */
1210#ifndef __ASSEMBLY__
1211struct hv_trap_trace_entry {
1212 unsigned char type; /* Hypervisor or guest entry? */
1213 unsigned char hpstate; /* Hyper-privileged state */
1214 unsigned char tl; /* Trap level */
1215 unsigned char gl; /* Global register level */
1216 unsigned short tt; /* Trap type */
1217 unsigned short tag; /* Extended trap identifier */
1218 unsigned long tstate; /* Trap state */
1219 unsigned long tick; /* Tick */
1220 unsigned long tpc; /* Trap PC */
1221 unsigned long f1; /* Entry specific */
1222 unsigned long f2; /* Entry specific */
1223 unsigned long f3; /* Entry specific */
1224 unsigned long f4; /* Entry specific */
1225};
1226#endif
1227#define HV_TRAP_TRACE_ENTRY_TYPE 0x00
1228#define HV_TRAP_TRACE_ENTRY_HPSTATE 0x01
1229#define HV_TRAP_TRACE_ENTRY_TL 0x02
1230#define HV_TRAP_TRACE_ENTRY_GL 0x03
1231#define HV_TRAP_TRACE_ENTRY_TT 0x04
1232#define HV_TRAP_TRACE_ENTRY_TAG 0x06
1233#define HV_TRAP_TRACE_ENTRY_TSTATE 0x08
1234#define HV_TRAP_TRACE_ENTRY_TICK 0x10
1235#define HV_TRAP_TRACE_ENTRY_TPC 0x18
1236#define HV_TRAP_TRACE_ENTRY_F1 0x20
1237#define HV_TRAP_TRACE_ENTRY_F2 0x28
1238#define HV_TRAP_TRACE_ENTRY_F3 0x30
1239#define HV_TRAP_TRACE_ENTRY_F4 0x38
1240
1241/* The type field is encoded as follows. */
1242#define HV_TRAP_TYPE_UNDEF 0x00 /* Entry content undefined */
1243#define HV_TRAP_TYPE_HV 0x01 /* Hypervisor trap entry */
1244#define HV_TRAP_TYPE_GUEST 0xff /* Added via ttrace_addentry() */
1245
1246/* ttrace_buf_conf()
1247 * TRAP: HV_FAST_TRAP
1248 * FUNCTION: HV_FAST_TTRACE_BUF_CONF
1249 * ARG0: real address
1250 * ARG1: number of entries
1251 * RET0: status
1252 * RET1: number of entries
1253 * ERRORS: ENORADDR Invalid real address
1254 * EINVAL Size is too small
1255 * EBADALIGN Real address not aligned on 64-byte boundary
1256 *
1257 * Requests hypervisor trap tracing and declares a virtual CPU's trap
1258 * trace buffer to the hypervisor. The real address supplies the real
1259 * base address of the trap trace queue and must be 64-byte aligned.
1260 * Specifying a value of 0 for the number of entries disables trap
1261 * tracing for the calling virtual CPU. The buffer allocated must be
1262 * sized for a power of two number of 64-byte trap trace entries plus
1263 * an initial 64-byte control structure.
1264 *
1265 * This may be invoked any number of times so that a virtual CPU may
1266 * relocate a trap trace buffer or create "snapshots" of information.
1267 *
1268 * If the real address is illegal or badly aligned, then trap tracing
1269 * is disabled and an error is returned.
1270 *
1271 * Upon failure with EINVAL, this service call returns in RET1 the
1272 * minimum number of buffer entries required. Upon other failures
1273 * RET1 is undefined.
1274 */
1275#define HV_FAST_TTRACE_BUF_CONF 0x90
1276
1277/* ttrace_buf_info()
1278 * TRAP: HV_FAST_TRAP
1279 * FUNCTION: HV_FAST_TTRACE_BUF_INFO
1280 * RET0: status
1281 * RET1: real address
1282 * RET2: size
1283 * ERRORS: None defined.
1284 *
1285 * Returns the size and location of the previously declared trap-trace
1286 * buffer. In the event that no buffer was previously defined, or the
1287 * buffer is disabled, this call will return a size of zero bytes.
1288 */
1289#define HV_FAST_TTRACE_BUF_INFO 0x91
1290
1291/* ttrace_enable()
1292 * TRAP: HV_FAST_TRAP
1293 * FUNCTION: HV_FAST_TTRACE_ENABLE
1294 * ARG0: enable
1295 * RET0: status
1296 * RET1: previous enable state
1297 * ERRORS: EINVAL No trap trace buffer currently defined
1298 *
1299 * Enable or disable trap tracing, and return the previous enabled
1300 * state in RET1. Future systems may define various flags for the
1301 * enable argument (ARG0), for the moment a guest should pass
1302 * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all
1303 * tracing - which will ensure future compatability.
1304 */
1305#define HV_FAST_TTRACE_ENABLE 0x92
1306
1307/* ttrace_freeze()
1308 * TRAP: HV_FAST_TRAP
1309 * FUNCTION: HV_FAST_TTRACE_FREEZE
1310 * ARG0: freeze
1311 * RET0: status
1312 * RET1: previous freeze state
1313 * ERRORS: EINVAL No trap trace buffer currently defined
1314 *
1315 * Freeze or unfreeze trap tracing, returning the previous freeze
1316 * state in RET1. A guest should pass a non-zero value to freeze and
1317 * a zero value to unfreeze all tracing. The returned previous state
1318 * is 0 for not frozen and 1 for frozen.
1319 */
1320#define HV_FAST_TTRACE_FREEZE 0x93
1321
1322/* ttrace_addentry()
1323 * TRAP: HV_TTRACE_ADDENTRY_TRAP
1324 * ARG0: tag (16-bits)
1325 * ARG1: data word 0
1326 * ARG2: data word 1
1327 * ARG3: data word 2
1328 * ARG4: data word 3
1329 * RET0: status
1330 * ERRORS: EINVAL No trap trace buffer currently defined
1331 *
1332 * Add an entry to the trap trace buffer. Upon return only ARG0/RET0
1333 * is modified - none of the other registers holding arguments are
1334 * volatile across this hypervisor service.
1335 */
1336
1337/* Core dump services.
1338 *
1339 * Since the hypervisor viraulizes and thus obscures a lot of the
1340 * physical machine layout and state, traditional OS crash dumps can
1341 * be difficult to diagnose especially when the problem is a
1342 * configuration error of some sort.
1343 *
1344 * The dump services provide an opaque buffer into which the
1345 * hypervisor can place it's internal state in order to assist in
1346 * debugging such situations. The contents are opaque and extremely
1347 * platform and hypervisor implementation specific. The guest, during
1348 * a core dump, requests that the hypervisor update any information in
1349 * the dump buffer in preparation to being dumped as part of the
1350 * domain's memory image.
1351 */
1352
1353/* dump_buf_update()
1354 * TRAP: HV_FAST_TRAP
1355 * FUNCTION: HV_FAST_DUMP_BUF_UPDATE
1356 * ARG0: real address
1357 * ARG1: size
1358 * RET0: status
1359 * RET1: required size of dump buffer
1360 * ERRORS: ENORADDR Invalid real address
1361 * EBADALIGN Real address is not aligned on a 64-byte
1362 * boundary
1363 * EINVAL Size is non-zero but less than minimum size
1364 * required
1365 * ENOTSUPPORTED Operation not supported on current logical
1366 * domain
1367 *
1368 * Declare a domain dump buffer to the hypervisor. The real address
1369 * provided for the domain dump buffer must be 64-byte aligned. The
1370 * size specifies the size of the dump buffer and may be larger than
1371 * the minimum size specified in the machine description. The
1372 * hypervisor will fill the dump buffer with opaque data.
1373 *
1374 * Note: A guest may elect to include dump buffer contents as part of a crash
1375 * dump to assist with debugging. This function may be called any number
1376 * of times so that a guest may relocate a dump buffer, or create
1377 * "snapshots" of any dump-buffer information. Each call to
1378 * dump_buf_update() atomically declares the new dump buffer to the
1379 * hypervisor.
1380 *
1381 * A specified size of 0 unconfigures the dump buffer. If the real
1382 * address is illegal or badly aligned, then any currently active dump
1383 * buffer is disabled and an error is returned.
1384 *
1385 * In the event that the call fails with EINVAL, RET1 contains the
1386 * minimum size requires by the hypervisor for a valid dump buffer.
1387 */
1388#define HV_FAST_DUMP_BUF_UPDATE 0x94
1389
1390/* dump_buf_info()
1391 * TRAP: HV_FAST_TRAP
1392 * FUNCTION: HV_FAST_DUMP_BUF_INFO
1393 * RET0: status
1394 * RET1: real address of current dump buffer
1395 * RET2: size of current dump buffer
1396 * ERRORS: No errors defined.
1397 *
1398 * Return the currently configures dump buffer description. A
1399 * returned size of 0 bytes indicates an undefined dump buffer. In
1400 * this case the return address in RET1 is undefined.
1401 */
1402#define HV_FAST_DUMP_BUF_INFO 0x95
1403
1404/* Device interrupt services.
1405 *
1406 * Device interrupts are allocated to system bus bridges by the hypervisor,
1407 * and described to OBP in the machine description. OBP then describes
1408 * these interrupts to the OS via properties in the device tree.
1409 *
1410 * Terminology:
1411 *
1412 * cpuid Unique opaque value which represents a target cpu.
1413 *
1414 * devhandle Device handle. It uniquely identifies a device, and
1415 * consistes of the lower 28-bits of the hi-cell of the
1416 * first entry of the device's "reg" property in the
1417 * OBP device tree.
1418 *
1419 * devino Device interrupt number. Specifies the relative
1420 * interrupt number within the device. The unique
1421 * combination of devhandle and devino are used to
1422 * identify a specific device interrupt.
1423 *
1424 * Note: The devino value is the same as the values in the
1425 * "interrupts" property or "interrupt-map" property
1426 * in the OBP device tree for that device.
1427 *
1428 * sysino System interrupt number. A 64-bit unsigned interger
1429 * representing a unique interrupt within a virtual
1430 * machine.
1431 *
1432 * intr_state A flag representing the interrupt state for a given
1433 * sysino. The state values are defined below.
1434 *
1435 * intr_enabled A flag representing the 'enabled' state for a given
1436 * sysino. The enable values are defined below.
1437 */
1438
1439#define HV_INTR_STATE_IDLE 0 /* Nothing pending */
1440#define HV_INTR_STATE_RECEIVED 1 /* Interrupt received by hardware */
1441#define HV_INTR_STATE_DELIVERED 2 /* Interrupt delivered to queue */
1442
1443#define HV_INTR_DISABLED 0 /* sysino not enabled */
1444#define HV_INTR_ENABLED 1 /* sysino enabled */
1445
1446/* intr_devino_to_sysino()
1447 * TRAP: HV_FAST_TRAP
1448 * FUNCTION: HV_FAST_INTR_DEVINO2SYSINO
1449 * ARG0: devhandle
1450 * ARG1: devino
1451 * RET0: status
1452 * RET1: sysino
1453 * ERRORS: EINVAL Invalid devhandle/devino
1454 *
1455 * Converts a device specific interrupt number of the given
1456 * devhandle/devino into a system specific ino (sysino).
1457 */
1458#define HV_FAST_INTR_DEVINO2SYSINO 0xa0
1459
1460#ifndef __ASSEMBLY__
1461extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
1462 unsigned long devino);
1463#endif
1464
1465/* intr_getenabled()
1466 * TRAP: HV_FAST_TRAP
1467 * FUNCTION: HV_FAST_INTR_GETENABLED
1468 * ARG0: sysino
1469 * RET0: status
1470 * RET1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1471 * ERRORS: EINVAL Invalid sysino
1472 *
1473 * Returns interrupt enabled state in RET1 for the interrupt defined
1474 * by the given sysino.
1475 */
1476#define HV_FAST_INTR_GETENABLED 0xa1
1477
1478#ifndef __ASSEMBLY__
1479extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
1480#endif
1481
1482/* intr_setenabled()
1483 * TRAP: HV_FAST_TRAP
1484 * FUNCTION: HV_FAST_INTR_SETENABLED
1485 * ARG0: sysino
1486 * ARG1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
1487 * RET0: status
1488 * ERRORS: EINVAL Invalid sysino or intr_enabled value
1489 *
1490 * Set the 'enabled' state of the interrupt sysino.
1491 */
1492#define HV_FAST_INTR_SETENABLED 0xa2
1493
1494#ifndef __ASSEMBLY__
1495extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled);
1496#endif
1497
1498/* intr_getstate()
1499 * TRAP: HV_FAST_TRAP
1500 * FUNCTION: HV_FAST_INTR_GETSTATE
1501 * ARG0: sysino
1502 * RET0: status
1503 * RET1: intr_state (HV_INTR_STATE_*)
1504 * ERRORS: EINVAL Invalid sysino
1505 *
1506 * Returns current state of the interrupt defined by the given sysino.
1507 */
1508#define HV_FAST_INTR_GETSTATE 0xa3
1509
1510#ifndef __ASSEMBLY__
1511extern unsigned long sun4v_intr_getstate(unsigned long sysino);
1512#endif
1513
1514/* intr_setstate()
1515 * TRAP: HV_FAST_TRAP
1516 * FUNCTION: HV_FAST_INTR_SETSTATE
1517 * ARG0: sysino
1518 * ARG1: intr_state (HV_INTR_STATE_*)
1519 * RET0: status
1520 * ERRORS: EINVAL Invalid sysino or intr_state value
1521 *
1522 * Sets the current state of the interrupt described by the given sysino
1523 * value.
1524 *
1525 * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending
1526 * interrupt for sysino.
1527 */
1528#define HV_FAST_INTR_SETSTATE 0xa4
1529
1530#ifndef __ASSEMBLY__
1531extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
1532#endif
1533
1534/* intr_gettarget()
1535 * TRAP: HV_FAST_TRAP
1536 * FUNCTION: HV_FAST_INTR_GETTARGET
1537 * ARG0: sysino
1538 * RET0: status
1539 * RET1: cpuid
1540 * ERRORS: EINVAL Invalid sysino
1541 *
1542 * Returns CPU that is the current target of the interrupt defined by
1543 * the given sysino. The CPU value returned is undefined if the target
1544 * has not been set via intr_settarget().
1545 */
1546#define HV_FAST_INTR_GETTARGET 0xa5
1547
1548#ifndef __ASSEMBLY__
1549extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
1550#endif
1551
1552/* intr_settarget()
1553 * TRAP: HV_FAST_TRAP
1554 * FUNCTION: HV_FAST_INTR_SETTARGET
1555 * ARG0: sysino
1556 * ARG1: cpuid
1557 * RET0: status
1558 * ERRORS: EINVAL Invalid sysino
1559 * ENOCPU Invalid cpuid
1560 *
1561 * Set the target CPU for the interrupt defined by the given sysino.
1562 */
1563#define HV_FAST_INTR_SETTARGET 0xa6
1564
1565#ifndef __ASSEMBLY__
1566extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
1567#endif
1568
1569/* vintr_get_cookie()
1570 * TRAP: HV_FAST_TRAP
1571 * FUNCTION: HV_FAST_VINTR_GET_COOKIE
1572 * ARG0: device handle
1573 * ARG1: device ino
1574 * RET0: status
1575 * RET1: cookie
1576 */
1577#define HV_FAST_VINTR_GET_COOKIE 0xa7
1578
1579/* vintr_set_cookie()
1580 * TRAP: HV_FAST_TRAP
1581 * FUNCTION: HV_FAST_VINTR_SET_COOKIE
1582 * ARG0: device handle
1583 * ARG1: device ino
1584 * ARG2: cookie
1585 * RET0: status
1586 */
1587#define HV_FAST_VINTR_SET_COOKIE 0xa8
1588
1589/* vintr_get_valid()
1590 * TRAP: HV_FAST_TRAP
1591 * FUNCTION: HV_FAST_VINTR_GET_VALID
1592 * ARG0: device handle
1593 * ARG1: device ino
1594 * RET0: status
1595 * RET1: valid state
1596 */
1597#define HV_FAST_VINTR_GET_VALID 0xa9
1598
1599/* vintr_set_valid()
1600 * TRAP: HV_FAST_TRAP
1601 * FUNCTION: HV_FAST_VINTR_SET_VALID
1602 * ARG0: device handle
1603 * ARG1: device ino
1604 * ARG2: valid state
1605 * RET0: status
1606 */
1607#define HV_FAST_VINTR_SET_VALID 0xaa
1608
1609/* vintr_get_state()
1610 * TRAP: HV_FAST_TRAP
1611 * FUNCTION: HV_FAST_VINTR_GET_STATE
1612 * ARG0: device handle
1613 * ARG1: device ino
1614 * RET0: status
1615 * RET1: state
1616 */
1617#define HV_FAST_VINTR_GET_STATE 0xab
1618
1619/* vintr_set_state()
1620 * TRAP: HV_FAST_TRAP
1621 * FUNCTION: HV_FAST_VINTR_SET_STATE
1622 * ARG0: device handle
1623 * ARG1: device ino
1624 * ARG2: state
1625 * RET0: status
1626 */
1627#define HV_FAST_VINTR_SET_STATE 0xac
1628
1629/* vintr_get_target()
1630 * TRAP: HV_FAST_TRAP
1631 * FUNCTION: HV_FAST_VINTR_GET_TARGET
1632 * ARG0: device handle
1633 * ARG1: device ino
1634 * RET0: status
1635 * RET1: cpuid
1636 */
1637#define HV_FAST_VINTR_GET_TARGET 0xad
1638
1639/* vintr_set_target()
1640 * TRAP: HV_FAST_TRAP
1641 * FUNCTION: HV_FAST_VINTR_SET_TARGET
1642 * ARG0: device handle
1643 * ARG1: device ino
1644 * ARG2: cpuid
1645 * RET0: status
1646 */
1647#define HV_FAST_VINTR_SET_TARGET 0xae
1648
1649#ifndef __ASSEMBLY__
1650extern unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
1651 unsigned long dev_ino,
1652 unsigned long *cookie);
1653extern unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
1654 unsigned long dev_ino,
1655 unsigned long cookie);
1656extern unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
1657 unsigned long dev_ino,
1658 unsigned long *valid);
1659extern unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
1660 unsigned long dev_ino,
1661 unsigned long valid);
1662extern unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
1663 unsigned long dev_ino,
1664 unsigned long *state);
1665extern unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
1666 unsigned long dev_ino,
1667 unsigned long state);
1668extern unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
1669 unsigned long dev_ino,
1670 unsigned long *cpuid);
1671extern unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
1672 unsigned long dev_ino,
1673 unsigned long cpuid);
1674#endif
1675
1676/* PCI IO services.
1677 *
1678 * See the terminology descriptions in the device interrupt services
1679 * section above as those apply here too. Here are terminology
1680 * definitions specific to these PCI IO services:
1681 *
1682 * tsbnum TSB number. Indentifies which io-tsb is used.
1683 * For this version of the specification, tsbnum
1684 * must be zero.
1685 *
1686 * tsbindex TSB index. Identifies which entry in the TSB
1687 * is used. The first entry is zero.
1688 *
1689 * tsbid A 64-bit aligned data structure which contains
1690 * a tsbnum and a tsbindex. Bits 63:32 contain the
1691 * tsbnum and bits 31:00 contain the tsbindex.
1692 *
1693 * Use the HV_PCI_TSBID() macro to construct such
1694 * values.
1695 *
1696 * io_attributes IO attributes for IOMMU mappings. One of more
1697 * of the attritbute bits are stores in a 64-bit
1698 * value. The values are defined below.
1699 *
1700 * r_addr 64-bit real address
1701 *
1702 * pci_device PCI device address. A PCI device address identifies
1703 * a specific device on a specific PCI bus segment.
1704 * A PCI device address ia a 32-bit unsigned integer
1705 * with the following format:
1706 *
1707 * 00000000.bbbbbbbb.dddddfff.00000000
1708 *
1709 * Use the HV_PCI_DEVICE_BUILD() macro to construct
1710 * such values.
1711 *
1712 * pci_config_offset
1713 * PCI configureation space offset. For conventional
1714 * PCI a value between 0 and 255. For extended
1715 * configuration space, a value between 0 and 4095.
1716 *
1717 * Note: For PCI configuration space accesses, the offset
1718 * must be aligned to the access size.
1719 *
1720 * error_flag A return value which specifies if the action succeeded
1721 * or failed. 0 means no error, non-0 means some error
1722 * occurred while performing the service.
1723 *
1724 * io_sync_direction
1725 * Direction definition for pci_dma_sync(), defined
1726 * below in HV_PCI_SYNC_*.
1727 *
1728 * io_page_list A list of io_page_addresses, an io_page_address is
1729 * a real address.
1730 *
1731 * io_page_list_p A pointer to an io_page_list.
1732 *
1733 * "size based byte swap" - Some functions do size based byte swapping
1734 * which allows sw to access pointers and
1735 * counters in native form when the processor
1736 * operates in a different endianness than the
1737 * IO bus. Size-based byte swapping converts a
1738 * multi-byte field between big-endian and
1739 * little-endian format.
1740 */
1741
1742#define HV_PCI_MAP_ATTR_READ 0x01
1743#define HV_PCI_MAP_ATTR_WRITE 0x02
1744
1745#define HV_PCI_DEVICE_BUILD(b,d,f) \
1746 ((((b) & 0xff) << 16) | \
1747 (((d) & 0x1f) << 11) | \
1748 (((f) & 0x07) << 8))
1749
1750#define HV_PCI_TSBID(__tsb_num, __tsb_index) \
1751 ((((u64)(__tsb_num)) << 32UL) | ((u64)(__tsb_index)))
1752
1753#define HV_PCI_SYNC_FOR_DEVICE 0x01
1754#define HV_PCI_SYNC_FOR_CPU 0x02
1755
1756/* pci_iommu_map()
1757 * TRAP: HV_FAST_TRAP
1758 * FUNCTION: HV_FAST_PCI_IOMMU_MAP
1759 * ARG0: devhandle
1760 * ARG1: tsbid
1761 * ARG2: #ttes
1762 * ARG3: io_attributes
1763 * ARG4: io_page_list_p
1764 * RET0: status
1765 * RET1: #ttes mapped
1766 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex/io_attributes
1767 * EBADALIGN Improperly aligned real address
1768 * ENORADDR Invalid real address
1769 *
1770 * Create IOMMU mappings in the sun4v device defined by the given
1771 * devhandle. The mappings are created in the TSB defined by the
1772 * tsbnum component of the given tsbid. The first mapping is created
1773 * in the TSB i ndex defined by the tsbindex component of the given tsbid.
1774 * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex,
1775 * the second at tsbnum, tsbindex + 1, etc.
1776 *
1777 * All mappings are created with the attributes defined by the io_attributes
1778 * argument. The page mapping addresses are described in the io_page_list
1779 * defined by the given io_page_list_p, which is a pointer to the io_page_list.
1780 * The first entry in the io_page_list is the address for the first iotte, the
1781 * 2nd for the 2nd iotte, and so on.
1782 *
1783 * Each io_page_address in the io_page_list must be appropriately aligned.
1784 * #ttes must be greater than zero. For this version of the spec, the tsbnum
1785 * component of the given tsbid must be zero.
1786 *
1787 * Returns the actual number of mappings creates, which may be less than
1788 * or equal to the argument #ttes. If the function returns a value which
1789 * is less than the #ttes, the caller may continus to call the function with
1790 * an updated tsbid, #ttes, io_page_list_p arguments until all pages are
1791 * mapped.
1792 *
1793 * Note: This function does not imply an iotte cache flush. The guest must
1794 * demap an entry before re-mapping it.
1795 */
1796#define HV_FAST_PCI_IOMMU_MAP 0xb0
1797
1798/* pci_iommu_demap()
1799 * TRAP: HV_FAST_TRAP
1800 * FUNCTION: HV_FAST_PCI_IOMMU_DEMAP
1801 * ARG0: devhandle
1802 * ARG1: tsbid
1803 * ARG2: #ttes
1804 * RET0: status
1805 * RET1: #ttes demapped
1806 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex
1807 *
1808 * Demap and flush IOMMU mappings in the device defined by the given
1809 * devhandle. Demaps up to #ttes entries in the TSB defined by the tsbnum
1810 * component of the given tsbid, starting at the TSB index defined by the
1811 * tsbindex component of the given tsbid.
1812 *
1813 * For this version of the spec, the tsbnum of the given tsbid must be zero.
1814 * #ttes must be greater than zero.
1815 *
1816 * Returns the actual number of ttes demapped, which may be less than or equal
1817 * to the argument #ttes. If #ttes demapped is less than #ttes, the caller
1818 * may continue to call this function with updated tsbid and #ttes arguments
1819 * until all pages are demapped.
1820 *
1821 * Note: Entries do not have to be mapped to be demapped. A demap of an
1822 * unmapped page will flush the entry from the tte cache.
1823 */
1824#define HV_FAST_PCI_IOMMU_DEMAP 0xb1
1825
1826/* pci_iommu_getmap()
1827 * TRAP: HV_FAST_TRAP
1828 * FUNCTION: HV_FAST_PCI_IOMMU_GETMAP
1829 * ARG0: devhandle
1830 * ARG1: tsbid
1831 * RET0: status
1832 * RET1: io_attributes
1833 * RET2: real address
1834 * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex
1835 * ENOMAP Mapping is not valid, no translation exists
1836 *
1837 * Read and return the mapping in the device described by the given devhandle
1838 * and tsbid. If successful, the io_attributes shall be returned in RET1
1839 * and the page address of the mapping shall be returned in RET2.
1840 *
1841 * For this version of the spec, the tsbnum component of the given tsbid
1842 * must be zero.
1843 */
1844#define HV_FAST_PCI_IOMMU_GETMAP 0xb2
1845
1846/* pci_iommu_getbypass()
1847 * TRAP: HV_FAST_TRAP
1848 * FUNCTION: HV_FAST_PCI_IOMMU_GETBYPASS
1849 * ARG0: devhandle
1850 * ARG1: real address
1851 * ARG2: io_attributes
1852 * RET0: status
1853 * RET1: io_addr
1854 * ERRORS: EINVAL Invalid devhandle/io_attributes
1855 * ENORADDR Invalid real address
1856 * ENOTSUPPORTED Function not supported in this implementation.
1857 *
1858 * Create a "special" mapping in the device described by the given devhandle,
1859 * for the given real address and attributes. Return the IO address in RET1
1860 * if successful.
1861 */
1862#define HV_FAST_PCI_IOMMU_GETBYPASS 0xb3
1863
1864/* pci_config_get()
1865 * TRAP: HV_FAST_TRAP
1866 * FUNCTION: HV_FAST_PCI_CONFIG_GET
1867 * ARG0: devhandle
1868 * ARG1: pci_device
1869 * ARG2: pci_config_offset
1870 * ARG3: size
1871 * RET0: status
1872 * RET1: error_flag
1873 * RET2: data
1874 * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size
1875 * EBADALIGN pci_config_offset not size aligned
1876 * ENOACCESS Access to this offset is not permitted
1877 *
1878 * Read PCI configuration space for the adapter described by the given
1879 * devhandle. Read size (1, 2, or 4) bytes of data from the given
1880 * pci_device, at pci_config_offset from the beginning of the device's
1881 * configuration space. If there was no error, RET1 is set to zero and
1882 * RET2 is set to the data read. Insignificant bits in RET2 are not
1883 * guarenteed to have any specific value and therefore must be ignored.
1884 *
1885 * The data returned in RET2 is size based byte swapped.
1886 *
1887 * If an error occurs during the read, set RET1 to a non-zero value. The
1888 * given pci_config_offset must be 'size' aligned.
1889 */
1890#define HV_FAST_PCI_CONFIG_GET 0xb4
1891
1892/* pci_config_put()
1893 * TRAP: HV_FAST_TRAP
1894 * FUNCTION: HV_FAST_PCI_CONFIG_PUT
1895 * ARG0: devhandle
1896 * ARG1: pci_device
1897 * ARG2: pci_config_offset
1898 * ARG3: size
1899 * ARG4: data
1900 * RET0: status
1901 * RET1: error_flag
1902 * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size
1903 * EBADALIGN pci_config_offset not size aligned
1904 * ENOACCESS Access to this offset is not permitted
1905 *
1906 * Write PCI configuration space for the adapter described by the given
1907 * devhandle. Write size (1, 2, or 4) bytes of data in a single operation,
1908 * at pci_config_offset from the beginning of the device's configuration
1909 * space. The data argument contains the data to be written to configuration
1910 * space. Prior to writing, the data is size based byte swapped.
1911 *
1912 * If an error occurs during the write access, do not generate an error
1913 * report, do set RET1 to a non-zero value. Otherwise RET1 is zero.
1914 * The given pci_config_offset must be 'size' aligned.
1915 *
1916 * This function is permitted to read from offset zero in the configuration
1917 * space described by the given pci_device if necessary to ensure that the
1918 * write access to config space completes.
1919 */
1920#define HV_FAST_PCI_CONFIG_PUT 0xb5
1921
1922/* pci_peek()
1923 * TRAP: HV_FAST_TRAP
1924 * FUNCTION: HV_FAST_PCI_PEEK
1925 * ARG0: devhandle
1926 * ARG1: real address
1927 * ARG2: size
1928 * RET0: status
1929 * RET1: error_flag
1930 * RET2: data
1931 * ERRORS: EINVAL Invalid devhandle or size
1932 * EBADALIGN Improperly aligned real address
1933 * ENORADDR Bad real address
1934 * ENOACCESS Guest access prohibited
1935 *
1936 * Attempt to read the IO address given by the given devhandle, real address,
1937 * and size. Size must be 1, 2, 4, or 8. The read is performed as a single
1938 * access operation using the given size. If an error occurs when reading
1939 * from the given location, do not generate an error report, but return a
1940 * non-zero value in RET1. If the read was successful, return zero in RET1
1941 * and return the actual data read in RET2. The data returned is size based
1942 * byte swapped.
1943 *
1944 * Non-significant bits in RET2 are not guarenteed to have any specific value
1945 * and therefore must be ignored. If RET1 is returned as non-zero, the data
1946 * value is not guarenteed to have any specific value and should be ignored.
1947 *
1948 * The caller must have permission to read from the given devhandle, real
1949 * address, which must be an IO address. The argument real address must be a
1950 * size aligned address.
1951 *
1952 * The hypervisor implementation of this function must block access to any
1953 * IO address that the guest does not have explicit permission to access.
1954 */
1955#define HV_FAST_PCI_PEEK 0xb6
1956
1957/* pci_poke()
1958 * TRAP: HV_FAST_TRAP
1959 * FUNCTION: HV_FAST_PCI_POKE
1960 * ARG0: devhandle
1961 * ARG1: real address
1962 * ARG2: size
1963 * ARG3: data
1964 * ARG4: pci_device
1965 * RET0: status
1966 * RET1: error_flag
1967 * ERRORS: EINVAL Invalid devhandle, size, or pci_device
1968 * EBADALIGN Improperly aligned real address
1969 * ENORADDR Bad real address
1970 * ENOACCESS Guest access prohibited
1971 * ENOTSUPPORTED Function is not supported by implementation
1972 *
1973 * Attempt to write data to the IO address given by the given devhandle,
1974 * real address, and size. Size must be 1, 2, 4, or 8. The write is
1975 * performed as a single access operation using the given size. Prior to
1976 * writing the data is size based swapped.
1977 *
1978 * If an error occurs when writing to the given location, do not generate an
1979 * error report, but return a non-zero value in RET1. If the write was
1980 * successful, return zero in RET1.
1981 *
1982 * pci_device describes the configuration address of the device being
1983 * written to. The implementation may safely read from offset 0 with
1984 * the configuration space of the device described by devhandle and
1985 * pci_device in order to guarantee that the write portion of the operation
1986 * completes
1987 *
1988 * Any error that occurs due to the read shall be reported using the normal
1989 * error reporting mechanisms .. the read error is not suppressed.
1990 *
1991 * The caller must have permission to write to the given devhandle, real
1992 * address, which must be an IO address. The argument real address must be a
1993 * size aligned address. The caller must have permission to read from
1994 * the given devhandle, pci_device cofiguration space offset 0.
1995 *
1996 * The hypervisor implementation of this function must block access to any
1997 * IO address that the guest does not have explicit permission to access.
1998 */
1999#define HV_FAST_PCI_POKE 0xb7
2000
2001/* pci_dma_sync()
2002 * TRAP: HV_FAST_TRAP
2003 * FUNCTION: HV_FAST_PCI_DMA_SYNC
2004 * ARG0: devhandle
2005 * ARG1: real address
2006 * ARG2: size
2007 * ARG3: io_sync_direction
2008 * RET0: status
2009 * RET1: #synced
2010 * ERRORS: EINVAL Invalid devhandle or io_sync_direction
2011 * ENORADDR Bad real address
2012 *
2013 * Synchronize a memory region described by the given real address and size,
2014 * for the device defined by the given devhandle using the direction(s)
2015 * defined by the given io_sync_direction. The argument size is the size of
2016 * the memory region in bytes.
2017 *
2018 * Return the actual number of bytes synchronized in the return value #synced,
2019 * which may be less than or equal to the argument size. If the return
2020 * value #synced is less than size, the caller must continue to call this
2021 * function with updated real address and size arguments until the entire
2022 * memory region is synchronized.
2023 */
2024#define HV_FAST_PCI_DMA_SYNC 0xb8
2025
2026/* PCI MSI services. */
2027
2028#define HV_MSITYPE_MSI32 0x00
2029#define HV_MSITYPE_MSI64 0x01
2030
2031#define HV_MSIQSTATE_IDLE 0x00
2032#define HV_MSIQSTATE_ERROR 0x01
2033
2034#define HV_MSIQ_INVALID 0x00
2035#define HV_MSIQ_VALID 0x01
2036
2037#define HV_MSISTATE_IDLE 0x00
2038#define HV_MSISTATE_DELIVERED 0x01
2039
2040#define HV_MSIVALID_INVALID 0x00
2041#define HV_MSIVALID_VALID 0x01
2042
2043#define HV_PCIE_MSGTYPE_PME_MSG 0x18
2044#define HV_PCIE_MSGTYPE_PME_ACK_MSG 0x1b
2045#define HV_PCIE_MSGTYPE_CORR_MSG 0x30
2046#define HV_PCIE_MSGTYPE_NONFATAL_MSG 0x31
2047#define HV_PCIE_MSGTYPE_FATAL_MSG 0x33
2048
2049#define HV_MSG_INVALID 0x00
2050#define HV_MSG_VALID 0x01
2051
2052/* pci_msiq_conf()
2053 * TRAP: HV_FAST_TRAP
2054 * FUNCTION: HV_FAST_PCI_MSIQ_CONF
2055 * ARG0: devhandle
2056 * ARG1: msiqid
2057 * ARG2: real address
2058 * ARG3: number of entries
2059 * RET0: status
2060 * ERRORS: EINVAL Invalid devhandle, msiqid or nentries
2061 * EBADALIGN Improperly aligned real address
2062 * ENORADDR Bad real address
2063 *
2064 * Configure the MSI queue given by the devhandle and msiqid arguments,
2065 * and to be placed at the given real address and be of the given
2066 * number of entries. The real address must be aligned exactly to match
2067 * the queue size. Each queue entry is 64-bytes long, so f.e. a 32 entry
2068 * queue must be aligned on a 2048 byte real address boundary. The MSI-EQ
2069 * Head and Tail are initialized so that the MSI-EQ is 'empty'.
2070 *
2071 * Implementation Note: Certain implementations have fixed sized queues. In
2072 * that case, number of entries must contain the correct
2073 * value.
2074 */
2075#define HV_FAST_PCI_MSIQ_CONF 0xc0
2076
2077/* pci_msiq_info()
2078 * TRAP: HV_FAST_TRAP
2079 * FUNCTION: HV_FAST_PCI_MSIQ_INFO
2080 * ARG0: devhandle
2081 * ARG1: msiqid
2082 * RET0: status
2083 * RET1: real address
2084 * RET2: number of entries
2085 * ERRORS: EINVAL Invalid devhandle or msiqid
2086 *
2087 * Return the configuration information for the MSI queue described
2088 * by the given devhandle and msiqid. The base address of the queue
2089 * is returned in ARG1 and the number of entries is returned in ARG2.
2090 * If the queue is unconfigured, the real address is undefined and the
2091 * number of entries will be returned as zero.
2092 */
2093#define HV_FAST_PCI_MSIQ_INFO 0xc1
2094
2095/* pci_msiq_getvalid()
2096 * TRAP: HV_FAST_TRAP
2097 * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID
2098 * ARG0: devhandle
2099 * ARG1: msiqid
2100 * RET0: status
2101 * RET1: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID)
2102 * ERRORS: EINVAL Invalid devhandle or msiqid
2103 *
2104 * Get the valid state of the MSI-EQ described by the given devhandle and
2105 * msiqid.
2106 */
2107#define HV_FAST_PCI_MSIQ_GETVALID 0xc2
2108
2109/* pci_msiq_setvalid()
2110 * TRAP: HV_FAST_TRAP
2111 * FUNCTION: HV_FAST_PCI_MSIQ_SETVALID
2112 * ARG0: devhandle
2113 * ARG1: msiqid
2114 * ARG2: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID)
2115 * RET0: status
2116 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqvalid
2117 * value or MSI EQ is uninitialized
2118 *
2119 * Set the valid state of the MSI-EQ described by the given devhandle and
2120 * msiqid to the given msiqvalid.
2121 */
2122#define HV_FAST_PCI_MSIQ_SETVALID 0xc3
2123
2124/* pci_msiq_getstate()
2125 * TRAP: HV_FAST_TRAP
2126 * FUNCTION: HV_FAST_PCI_MSIQ_GETSTATE
2127 * ARG0: devhandle
2128 * ARG1: msiqid
2129 * RET0: status
2130 * RET1: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
2131 * ERRORS: EINVAL Invalid devhandle or msiqid
2132 *
2133 * Get the state of the MSI-EQ described by the given devhandle and
2134 * msiqid.
2135 */
2136#define HV_FAST_PCI_MSIQ_GETSTATE 0xc4
2137
2138/* pci_msiq_getvalid()
2139 * TRAP: HV_FAST_TRAP
2140 * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID
2141 * ARG0: devhandle
2142 * ARG1: msiqid
2143 * ARG2: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR)
2144 * RET0: status
2145 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqstate
2146 * value or MSI EQ is uninitialized
2147 *
2148 * Set the state of the MSI-EQ described by the given devhandle and
2149 * msiqid to the given msiqvalid.
2150 */
2151#define HV_FAST_PCI_MSIQ_SETSTATE 0xc5
2152
2153/* pci_msiq_gethead()
2154 * TRAP: HV_FAST_TRAP
2155 * FUNCTION: HV_FAST_PCI_MSIQ_GETHEAD
2156 * ARG0: devhandle
2157 * ARG1: msiqid
2158 * RET0: status
2159 * RET1: msiqhead
2160 * ERRORS: EINVAL Invalid devhandle or msiqid
2161 *
2162 * Get the current MSI EQ queue head for the MSI-EQ described by the
2163 * given devhandle and msiqid.
2164 */
2165#define HV_FAST_PCI_MSIQ_GETHEAD 0xc6
2166
2167/* pci_msiq_sethead()
2168 * TRAP: HV_FAST_TRAP
2169 * FUNCTION: HV_FAST_PCI_MSIQ_SETHEAD
2170 * ARG0: devhandle
2171 * ARG1: msiqid
2172 * ARG2: msiqhead
2173 * RET0: status
2174 * ERRORS: EINVAL Invalid devhandle or msiqid or msiqhead,
2175 * or MSI EQ is uninitialized
2176 *
2177 * Set the current MSI EQ queue head for the MSI-EQ described by the
2178 * given devhandle and msiqid.
2179 */
2180#define HV_FAST_PCI_MSIQ_SETHEAD 0xc7
2181
2182/* pci_msiq_gettail()
2183 * TRAP: HV_FAST_TRAP
2184 * FUNCTION: HV_FAST_PCI_MSIQ_GETTAIL
2185 * ARG0: devhandle
2186 * ARG1: msiqid
2187 * RET0: status
2188 * RET1: msiqtail
2189 * ERRORS: EINVAL Invalid devhandle or msiqid
2190 *
2191 * Get the current MSI EQ queue tail for the MSI-EQ described by the
2192 * given devhandle and msiqid.
2193 */
2194#define HV_FAST_PCI_MSIQ_GETTAIL 0xc8
2195
2196/* pci_msi_getvalid()
2197 * TRAP: HV_FAST_TRAP
2198 * FUNCTION: HV_FAST_PCI_MSI_GETVALID
2199 * ARG0: devhandle
2200 * ARG1: msinum
2201 * RET0: status
2202 * RET1: msivalidstate
2203 * ERRORS: EINVAL Invalid devhandle or msinum
2204 *
2205 * Get the current valid/enabled state for the MSI defined by the
2206 * given devhandle and msinum.
2207 */
2208#define HV_FAST_PCI_MSI_GETVALID 0xc9
2209
2210/* pci_msi_setvalid()
2211 * TRAP: HV_FAST_TRAP
2212 * FUNCTION: HV_FAST_PCI_MSI_SETVALID
2213 * ARG0: devhandle
2214 * ARG1: msinum
2215 * ARG2: msivalidstate
2216 * RET0: status
2217 * ERRORS: EINVAL Invalid devhandle or msinum or msivalidstate
2218 *
2219 * Set the current valid/enabled state for the MSI defined by the
2220 * given devhandle and msinum.
2221 */
2222#define HV_FAST_PCI_MSI_SETVALID 0xca
2223
2224/* pci_msi_getmsiq()
2225 * TRAP: HV_FAST_TRAP
2226 * FUNCTION: HV_FAST_PCI_MSI_GETMSIQ
2227 * ARG0: devhandle
2228 * ARG1: msinum
2229 * RET0: status
2230 * RET1: msiqid
2231 * ERRORS: EINVAL Invalid devhandle or msinum or MSI is unbound
2232 *
2233 * Get the MSI EQ that the MSI defined by the given devhandle and
2234 * msinum is bound to.
2235 */
2236#define HV_FAST_PCI_MSI_GETMSIQ 0xcb
2237
2238/* pci_msi_setmsiq()
2239 * TRAP: HV_FAST_TRAP
2240 * FUNCTION: HV_FAST_PCI_MSI_SETMSIQ
2241 * ARG0: devhandle
2242 * ARG1: msinum
2243 * ARG2: msitype
2244 * ARG3: msiqid
2245 * RET0: status
2246 * ERRORS: EINVAL Invalid devhandle or msinum or msiqid
2247 *
2248 * Set the MSI EQ that the MSI defined by the given devhandle and
2249 * msinum is bound to.
2250 */
2251#define HV_FAST_PCI_MSI_SETMSIQ 0xcc
2252
2253/* pci_msi_getstate()
2254 * TRAP: HV_FAST_TRAP
2255 * FUNCTION: HV_FAST_PCI_MSI_GETSTATE
2256 * ARG0: devhandle
2257 * ARG1: msinum
2258 * RET0: status
2259 * RET1: msistate
2260 * ERRORS: EINVAL Invalid devhandle or msinum
2261 *
2262 * Get the state of the MSI defined by the given devhandle and msinum.
2263 * If not initialized, return HV_MSISTATE_IDLE.
2264 */
2265#define HV_FAST_PCI_MSI_GETSTATE 0xcd
2266
2267/* pci_msi_setstate()
2268 * TRAP: HV_FAST_TRAP
2269 * FUNCTION: HV_FAST_PCI_MSI_SETSTATE
2270 * ARG0: devhandle
2271 * ARG1: msinum
2272 * ARG2: msistate
2273 * RET0: status
2274 * ERRORS: EINVAL Invalid devhandle or msinum or msistate
2275 *
2276 * Set the state of the MSI defined by the given devhandle and msinum.
2277 */
2278#define HV_FAST_PCI_MSI_SETSTATE 0xce
2279
2280/* pci_msg_getmsiq()
2281 * TRAP: HV_FAST_TRAP
2282 * FUNCTION: HV_FAST_PCI_MSG_GETMSIQ
2283 * ARG0: devhandle
2284 * ARG1: msgtype
2285 * RET0: status
2286 * RET1: msiqid
2287 * ERRORS: EINVAL Invalid devhandle or msgtype
2288 *
2289 * Get the MSI EQ of the MSG defined by the given devhandle and msgtype.
2290 */
2291#define HV_FAST_PCI_MSG_GETMSIQ 0xd0
2292
2293/* pci_msg_setmsiq()
2294 * TRAP: HV_FAST_TRAP
2295 * FUNCTION: HV_FAST_PCI_MSG_SETMSIQ
2296 * ARG0: devhandle
2297 * ARG1: msgtype
2298 * ARG2: msiqid
2299 * RET0: status
2300 * ERRORS: EINVAL Invalid devhandle, msgtype, or msiqid
2301 *
2302 * Set the MSI EQ of the MSG defined by the given devhandle and msgtype.
2303 */
2304#define HV_FAST_PCI_MSG_SETMSIQ 0xd1
2305
2306/* pci_msg_getvalid()
2307 * TRAP: HV_FAST_TRAP
2308 * FUNCTION: HV_FAST_PCI_MSG_GETVALID
2309 * ARG0: devhandle
2310 * ARG1: msgtype
2311 * RET0: status
2312 * RET1: msgvalidstate
2313 * ERRORS: EINVAL Invalid devhandle or msgtype
2314 *
2315 * Get the valid/enabled state of the MSG defined by the given
2316 * devhandle and msgtype.
2317 */
2318#define HV_FAST_PCI_MSG_GETVALID 0xd2
2319
2320/* pci_msg_setvalid()
2321 * TRAP: HV_FAST_TRAP
2322 * FUNCTION: HV_FAST_PCI_MSG_SETVALID
2323 * ARG0: devhandle
2324 * ARG1: msgtype
2325 * ARG2: msgvalidstate
2326 * RET0: status
2327 * ERRORS: EINVAL Invalid devhandle or msgtype or msgvalidstate
2328 *
2329 * Set the valid/enabled state of the MSG defined by the given
2330 * devhandle and msgtype.
2331 */
2332#define HV_FAST_PCI_MSG_SETVALID 0xd3
2333
2334/* Logical Domain Channel services. */
2335
2336#define LDC_CHANNEL_DOWN 0
2337#define LDC_CHANNEL_UP 1
2338#define LDC_CHANNEL_RESETTING 2
2339
2340/* ldc_tx_qconf()
2341 * TRAP: HV_FAST_TRAP
2342 * FUNCTION: HV_FAST_LDC_TX_QCONF
2343 * ARG0: channel ID
2344 * ARG1: real address base of queue
2345 * ARG2: num entries in queue
2346 * RET0: status
2347 *
2348 * Configure transmit queue for the LDC endpoint specified by the
2349 * given channel ID, to be placed at the given real address, and
2350 * be of the given num entries. Num entries must be a power of two.
2351 * The real address base of the queue must be aligned on the queue
2352 * size. Each queue entry is 64-bytes, so for example, a 32 entry
2353 * queue must be aligned on a 2048 byte real address boundary.
2354 *
2355 * Upon configuration of a valid transmit queue the head and tail
2356 * pointers are set to a hypervisor specific identical value indicating
2357 * that the queue initially is empty.
2358 *
2359 * The endpoint's transmit queue is un-configured if num entries is zero.
2360 *
2361 * The maximum number of entries for each queue for a specific cpu may be
2362 * determined from the machine description. A transmit queue may be
2363 * specified even in the event that the LDC is down (peer endpoint has no
2364 * receive queue specified). Transmission will begin as soon as the peer
2365 * endpoint defines a receive queue.
2366 *
2367 * It is recommended that a guest wait for a transmit queue to empty prior
2368 * to reconfiguring it, or un-configuring it. Re or un-configuring of a
2369 * non-empty transmit queue behaves exactly as defined above, however it
2370 * is undefined as to how many of the pending entries in the original queue
2371 * will be delivered prior to the re-configuration taking effect.
2372 * Furthermore, as the queue configuration causes a reset of the head and
2373 * tail pointers there is no way for a guest to determine how many entries
2374 * have been sent after the configuration operation.
2375 */
2376#define HV_FAST_LDC_TX_QCONF 0xe0
2377
2378/* ldc_tx_qinfo()
2379 * TRAP: HV_FAST_TRAP
2380 * FUNCTION: HV_FAST_LDC_TX_QINFO
2381 * ARG0: channel ID
2382 * RET0: status
2383 * RET1: real address base of queue
2384 * RET2: num entries in queue
2385 *
2386 * Return the configuration info for the transmit queue of LDC endpoint
2387 * defined by the given channel ID. The real address is the currently
2388 * defined real address base of the defined queue, and num entries is the
2389 * size of the queue in terms of number of entries.
2390 *
2391 * If the specified channel ID is a valid endpoint number, but no transmit
2392 * queue has been defined this service will return success, but with num
2393 * entries set to zero and the real address will have an undefined value.
2394 */
2395#define HV_FAST_LDC_TX_QINFO 0xe1
2396
2397/* ldc_tx_get_state()
2398 * TRAP: HV_FAST_TRAP
2399 * FUNCTION: HV_FAST_LDC_TX_GET_STATE
2400 * ARG0: channel ID
2401 * RET0: status
2402 * RET1: head offset
2403 * RET2: tail offset
2404 * RET3: channel state
2405 *
2406 * Return the transmit state, and the head and tail queue pointers, for
2407 * the transmit queue of the LDC endpoint defined by the given channel ID.
2408 * The head and tail values are the byte offset of the head and tail
2409 * positions of the transmit queue for the specified endpoint.
2410 */
2411#define HV_FAST_LDC_TX_GET_STATE 0xe2
2412
2413/* ldc_tx_set_qtail()
2414 * TRAP: HV_FAST_TRAP
2415 * FUNCTION: HV_FAST_LDC_TX_SET_QTAIL
2416 * ARG0: channel ID
2417 * ARG1: tail offset
2418 * RET0: status
2419 *
2420 * Update the tail pointer for the transmit queue associated with the LDC
2421 * endpoint defined by the given channel ID. The tail offset specified
2422 * must be aligned on a 64 byte boundary, and calculated so as to increase
2423 * the number of pending entries on the transmit queue. Any attempt to
2424 * decrease the number of pending transmit queue entires is considered
2425 * an invalid tail offset and will result in an EINVAL error.
2426 *
2427 * Since the tail of the transmit queue may not be moved backwards, the
2428 * transmit queue may be flushed by configuring a new transmit queue,
2429 * whereupon the hypervisor will configure the initial transmit head and
2430 * tail pointers to be equal.
2431 */
2432#define HV_FAST_LDC_TX_SET_QTAIL 0xe3
2433
2434/* ldc_rx_qconf()
2435 * TRAP: HV_FAST_TRAP
2436 * FUNCTION: HV_FAST_LDC_RX_QCONF
2437 * ARG0: channel ID
2438 * ARG1: real address base of queue
2439 * ARG2: num entries in queue
2440 * RET0: status
2441 *
2442 * Configure receive queue for the LDC endpoint specified by the
2443 * given channel ID, to be placed at the given real address, and
2444 * be of the given num entries. Num entries must be a power of two.
2445 * The real address base of the queue must be aligned on the queue
2446 * size. Each queue entry is 64-bytes, so for example, a 32 entry
2447 * queue must be aligned on a 2048 byte real address boundary.
2448 *
2449 * The endpoint's transmit queue is un-configured if num entries is zero.
2450 *
2451 * If a valid receive queue is specified for a local endpoint the LDC is
2452 * in the up state for the purpose of transmission to this endpoint.
2453 *
2454 * The maximum number of entries for each queue for a specific cpu may be
2455 * determined from the machine description.
2456 *
2457 * As receive queue configuration causes a reset of the queue's head and
2458 * tail pointers there is no way for a gues to determine how many entries
2459 * have been received between a preceeding ldc_get_rx_state() API call
2460 * and the completion of the configuration operation. It should be noted
2461 * that datagram delivery is not guarenteed via domain channels anyway,
2462 * and therefore any higher protocol should be resilient to datagram
2463 * loss if necessary. However, to overcome this specific race potential
2464 * it is recommended, for example, that a higher level protocol be employed
2465 * to ensure either retransmission, or ensure that no datagrams are pending
2466 * on the peer endpoint's transmit queue prior to the configuration process.
2467 */
2468#define HV_FAST_LDC_RX_QCONF 0xe4
2469
2470/* ldc_rx_qinfo()
2471 * TRAP: HV_FAST_TRAP
2472 * FUNCTION: HV_FAST_LDC_RX_QINFO
2473 * ARG0: channel ID
2474 * RET0: status
2475 * RET1: real address base of queue
2476 * RET2: num entries in queue
2477 *
2478 * Return the configuration info for the receive queue of LDC endpoint
2479 * defined by the given channel ID. The real address is the currently
2480 * defined real address base of the defined queue, and num entries is the
2481 * size of the queue in terms of number of entries.
2482 *
2483 * If the specified channel ID is a valid endpoint number, but no receive
2484 * queue has been defined this service will return success, but with num
2485 * entries set to zero and the real address will have an undefined value.
2486 */
2487#define HV_FAST_LDC_RX_QINFO 0xe5
2488
2489/* ldc_rx_get_state()
2490 * TRAP: HV_FAST_TRAP
2491 * FUNCTION: HV_FAST_LDC_RX_GET_STATE
2492 * ARG0: channel ID
2493 * RET0: status
2494 * RET1: head offset
2495 * RET2: tail offset
2496 * RET3: channel state
2497 *
2498 * Return the receive state, and the head and tail queue pointers, for
2499 * the receive queue of the LDC endpoint defined by the given channel ID.
2500 * The head and tail values are the byte offset of the head and tail
2501 * positions of the receive queue for the specified endpoint.
2502 */
2503#define HV_FAST_LDC_RX_GET_STATE 0xe6
2504
2505/* ldc_rx_set_qhead()
2506 * TRAP: HV_FAST_TRAP
2507 * FUNCTION: HV_FAST_LDC_RX_SET_QHEAD
2508 * ARG0: channel ID
2509 * ARG1: head offset
2510 * RET0: status
2511 *
2512 * Update the head pointer for the receive queue associated with the LDC
2513 * endpoint defined by the given channel ID. The head offset specified
2514 * must be aligned on a 64 byte boundary, and calculated so as to decrease
2515 * the number of pending entries on the receive queue. Any attempt to
2516 * increase the number of pending receive queue entires is considered
2517 * an invalid head offset and will result in an EINVAL error.
2518 *
2519 * The receive queue may be flushed by setting the head offset equal
2520 * to the current tail offset.
2521 */
2522#define HV_FAST_LDC_RX_SET_QHEAD 0xe7
2523
2524/* LDC Map Table Entry. Each slot is defined by a translation table
2525 * entry, as specified by the LDC_MTE_* bits below, and a 64-bit
2526 * hypervisor invalidation cookie.
2527 */
2528#define LDC_MTE_PADDR 0x0fffffffffffe000 /* pa[55:13] */
2529#define LDC_MTE_COPY_W 0x0000000000000400 /* copy write access */
2530#define LDC_MTE_COPY_R 0x0000000000000200 /* copy read access */
2531#define LDC_MTE_IOMMU_W 0x0000000000000100 /* IOMMU write access */
2532#define LDC_MTE_IOMMU_R 0x0000000000000080 /* IOMMU read access */
2533#define LDC_MTE_EXEC 0x0000000000000040 /* execute */
2534#define LDC_MTE_WRITE 0x0000000000000020 /* read */
2535#define LDC_MTE_READ 0x0000000000000010 /* write */
2536#define LDC_MTE_SZALL 0x000000000000000f /* page size bits */
2537#define LDC_MTE_SZ16GB 0x0000000000000007 /* 16GB page */
2538#define LDC_MTE_SZ2GB 0x0000000000000006 /* 2GB page */
2539#define LDC_MTE_SZ256MB 0x0000000000000005 /* 256MB page */
2540#define LDC_MTE_SZ32MB 0x0000000000000004 /* 32MB page */
2541#define LDC_MTE_SZ4MB 0x0000000000000003 /* 4MB page */
2542#define LDC_MTE_SZ512K 0x0000000000000002 /* 512K page */
2543#define LDC_MTE_SZ64K 0x0000000000000001 /* 64K page */
2544#define LDC_MTE_SZ8K 0x0000000000000000 /* 8K page */
2545
2546#ifndef __ASSEMBLY__
2547struct ldc_mtable_entry {
2548 unsigned long mte;
2549 unsigned long cookie;
2550};
2551#endif
2552
2553/* ldc_set_map_table()
2554 * TRAP: HV_FAST_TRAP
2555 * FUNCTION: HV_FAST_LDC_SET_MAP_TABLE
2556 * ARG0: channel ID
2557 * ARG1: table real address
2558 * ARG2: num entries
2559 * RET0: status
2560 *
2561 * Register the MTE table at the given table real address, with the
2562 * specified num entries, for the LDC indicated by the given channel
2563 * ID.
2564 */
2565#define HV_FAST_LDC_SET_MAP_TABLE 0xea
2566
2567/* ldc_get_map_table()
2568 * TRAP: HV_FAST_TRAP
2569 * FUNCTION: HV_FAST_LDC_GET_MAP_TABLE
2570 * ARG0: channel ID
2571 * RET0: status
2572 * RET1: table real address
2573 * RET2: num entries
2574 *
2575 * Return the configuration of the current mapping table registered
2576 * for the given channel ID.
2577 */
2578#define HV_FAST_LDC_GET_MAP_TABLE 0xeb
2579
2580#define LDC_COPY_IN 0
2581#define LDC_COPY_OUT 1
2582
2583/* ldc_copy()
2584 * TRAP: HV_FAST_TRAP
2585 * FUNCTION: HV_FAST_LDC_COPY
2586 * ARG0: channel ID
2587 * ARG1: LDC_COPY_* direction code
2588 * ARG2: target real address
2589 * ARG3: local real address
2590 * ARG4: length in bytes
2591 * RET0: status
2592 * RET1: actual length in bytes
2593 */
2594#define HV_FAST_LDC_COPY 0xec
2595
2596#define LDC_MEM_READ 1
2597#define LDC_MEM_WRITE 2
2598#define LDC_MEM_EXEC 4
2599
2600/* ldc_mapin()
2601 * TRAP: HV_FAST_TRAP
2602 * FUNCTION: HV_FAST_LDC_MAPIN
2603 * ARG0: channel ID
2604 * ARG1: cookie
2605 * RET0: status
2606 * RET1: real address
2607 * RET2: LDC_MEM_* permissions
2608 */
2609#define HV_FAST_LDC_MAPIN 0xed
2610
2611/* ldc_unmap()
2612 * TRAP: HV_FAST_TRAP
2613 * FUNCTION: HV_FAST_LDC_UNMAP
2614 * ARG0: real address
2615 * RET0: status
2616 */
2617#define HV_FAST_LDC_UNMAP 0xee
2618
2619/* ldc_revoke()
2620 * TRAP: HV_FAST_TRAP
2621 * FUNCTION: HV_FAST_LDC_REVOKE
2622 * ARG0: channel ID
2623 * ARG1: cookie
2624 * ARG2: ldc_mtable_entry cookie
2625 * RET0: status
2626 */
2627#define HV_FAST_LDC_REVOKE 0xef
2628
2629#ifndef __ASSEMBLY__
2630extern unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
2631 unsigned long ra,
2632 unsigned long num_entries);
2633extern unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
2634 unsigned long *ra,
2635 unsigned long *num_entries);
2636extern unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
2637 unsigned long *head_off,
2638 unsigned long *tail_off,
2639 unsigned long *chan_state);
2640extern unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
2641 unsigned long tail_off);
2642extern unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
2643 unsigned long ra,
2644 unsigned long num_entries);
2645extern unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
2646 unsigned long *ra,
2647 unsigned long *num_entries);
2648extern unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
2649 unsigned long *head_off,
2650 unsigned long *tail_off,
2651 unsigned long *chan_state);
2652extern unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
2653 unsigned long head_off);
2654extern unsigned long sun4v_ldc_set_map_table(unsigned long channel,
2655 unsigned long ra,
2656 unsigned long num_entries);
2657extern unsigned long sun4v_ldc_get_map_table(unsigned long channel,
2658 unsigned long *ra,
2659 unsigned long *num_entries);
2660extern unsigned long sun4v_ldc_copy(unsigned long channel,
2661 unsigned long dir_code,
2662 unsigned long tgt_raddr,
2663 unsigned long lcl_raddr,
2664 unsigned long len,
2665 unsigned long *actual_len);
2666extern unsigned long sun4v_ldc_mapin(unsigned long channel,
2667 unsigned long cookie,
2668 unsigned long *ra,
2669 unsigned long *perm);
2670extern unsigned long sun4v_ldc_unmap(unsigned long ra);
2671extern unsigned long sun4v_ldc_revoke(unsigned long channel,
2672 unsigned long cookie,
2673 unsigned long mte_cookie);
2674#endif
2675
2676/* Performance counter services. */
2677
2678#define HV_PERF_JBUS_PERF_CTRL_REG 0x00
2679#define HV_PERF_JBUS_PERF_CNT_REG 0x01
2680#define HV_PERF_DRAM_PERF_CTRL_REG_0 0x02
2681#define HV_PERF_DRAM_PERF_CNT_REG_0 0x03
2682#define HV_PERF_DRAM_PERF_CTRL_REG_1 0x04
2683#define HV_PERF_DRAM_PERF_CNT_REG_1 0x05
2684#define HV_PERF_DRAM_PERF_CTRL_REG_2 0x06
2685#define HV_PERF_DRAM_PERF_CNT_REG_2 0x07
2686#define HV_PERF_DRAM_PERF_CTRL_REG_3 0x08
2687#define HV_PERF_DRAM_PERF_CNT_REG_3 0x09
2688
2689/* get_perfreg()
2690 * TRAP: HV_FAST_TRAP
2691 * FUNCTION: HV_FAST_GET_PERFREG
2692 * ARG0: performance reg number
2693 * RET0: status
2694 * RET1: performance reg value
2695 * ERRORS: EINVAL Invalid performance register number
2696 * ENOACCESS No access allowed to performance counters
2697 *
2698 * Read the value of the given DRAM/JBUS performance counter/control register.
2699 */
2700#define HV_FAST_GET_PERFREG 0x100
2701
2702/* set_perfreg()
2703 * TRAP: HV_FAST_TRAP
2704 * FUNCTION: HV_FAST_SET_PERFREG
2705 * ARG0: performance reg number
2706 * ARG1: performance reg value
2707 * RET0: status
2708 * ERRORS: EINVAL Invalid performance register number
2709 * ENOACCESS No access allowed to performance counters
2710 *
2711 * Write the given performance reg value to the given DRAM/JBUS
2712 * performance counter/control register.
2713 */
2714#define HV_FAST_SET_PERFREG 0x101
2715
2716/* MMU statistics services.
2717 *
2718 * The hypervisor maintains MMU statistics and privileged code provides
2719 * a buffer where these statistics can be collected. It is continually
2720 * updated once configured. The layout is as follows:
2721 */
2722#ifndef __ASSEMBLY__
2723struct hv_mmu_statistics {
2724 unsigned long immu_tsb_hits_ctx0_8k_tte;
2725 unsigned long immu_tsb_ticks_ctx0_8k_tte;
2726 unsigned long immu_tsb_hits_ctx0_64k_tte;
2727 unsigned long immu_tsb_ticks_ctx0_64k_tte;
2728 unsigned long __reserved1[2];
2729 unsigned long immu_tsb_hits_ctx0_4mb_tte;
2730 unsigned long immu_tsb_ticks_ctx0_4mb_tte;
2731 unsigned long __reserved2[2];
2732 unsigned long immu_tsb_hits_ctx0_256mb_tte;
2733 unsigned long immu_tsb_ticks_ctx0_256mb_tte;
2734 unsigned long __reserved3[4];
2735 unsigned long immu_tsb_hits_ctxnon0_8k_tte;
2736 unsigned long immu_tsb_ticks_ctxnon0_8k_tte;
2737 unsigned long immu_tsb_hits_ctxnon0_64k_tte;
2738 unsigned long immu_tsb_ticks_ctxnon0_64k_tte;
2739 unsigned long __reserved4[2];
2740 unsigned long immu_tsb_hits_ctxnon0_4mb_tte;
2741 unsigned long immu_tsb_ticks_ctxnon0_4mb_tte;
2742 unsigned long __reserved5[2];
2743 unsigned long immu_tsb_hits_ctxnon0_256mb_tte;
2744 unsigned long immu_tsb_ticks_ctxnon0_256mb_tte;
2745 unsigned long __reserved6[4];
2746 unsigned long dmmu_tsb_hits_ctx0_8k_tte;
2747 unsigned long dmmu_tsb_ticks_ctx0_8k_tte;
2748 unsigned long dmmu_tsb_hits_ctx0_64k_tte;
2749 unsigned long dmmu_tsb_ticks_ctx0_64k_tte;
2750 unsigned long __reserved7[2];
2751 unsigned long dmmu_tsb_hits_ctx0_4mb_tte;
2752 unsigned long dmmu_tsb_ticks_ctx0_4mb_tte;
2753 unsigned long __reserved8[2];
2754 unsigned long dmmu_tsb_hits_ctx0_256mb_tte;
2755 unsigned long dmmu_tsb_ticks_ctx0_256mb_tte;
2756 unsigned long __reserved9[4];
2757 unsigned long dmmu_tsb_hits_ctxnon0_8k_tte;
2758 unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte;
2759 unsigned long dmmu_tsb_hits_ctxnon0_64k_tte;
2760 unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte;
2761 unsigned long __reserved10[2];
2762 unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte;
2763 unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte;
2764 unsigned long __reserved11[2];
2765 unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte;
2766 unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte;
2767 unsigned long __reserved12[4];
2768};
2769#endif
2770
2771/* mmustat_conf()
2772 * TRAP: HV_FAST_TRAP
2773 * FUNCTION: HV_FAST_MMUSTAT_CONF
2774 * ARG0: real address
2775 * RET0: status
2776 * RET1: real address
2777 * ERRORS: ENORADDR Invalid real address
2778 * EBADALIGN Real address not aligned on 64-byte boundary
2779 * EBADTRAP API not supported on this processor
2780 *
2781 * Enable MMU statistic gathering using the buffer at the given real
2782 * address on the current virtual CPU. The new buffer real address
2783 * is given in ARG1, and the previously specified buffer real address
2784 * is returned in RET1, or is returned as zero for the first invocation.
2785 *
2786 * If the passed in real address argument is zero, this will disable
2787 * MMU statistic collection on the current virtual CPU. If an error is
2788 * returned then no statistics are collected.
2789 *
2790 * The buffer contents should be initialized to all zeros before being
2791 * given to the hypervisor or else the statistics will be meaningless.
2792 */
2793#define HV_FAST_MMUSTAT_CONF 0x102
2794
2795/* mmustat_info()
2796 * TRAP: HV_FAST_TRAP
2797 * FUNCTION: HV_FAST_MMUSTAT_INFO
2798 * RET0: status
2799 * RET1: real address
2800 * ERRORS: EBADTRAP API not supported on this processor
2801 *
2802 * Return the current state and real address of the currently configured
2803 * MMU statistics buffer on the current virtual CPU.
2804 */
2805#define HV_FAST_MMUSTAT_INFO 0x103
2806
2807#ifndef __ASSEMBLY__
2808extern unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
2809extern unsigned long sun4v_mmustat_info(unsigned long *ra);
2810#endif
2811
2812/* NCS crypto services */
2813
2814/* ncs_request() sub-function numbers */
2815#define HV_NCS_QCONF 0x01
2816#define HV_NCS_QTAIL_UPDATE 0x02
2817
2818#ifndef __ASSEMBLY__
2819struct hv_ncs_queue_entry {
2820 /* MAU Control Register */
2821 unsigned long mau_control;
2822#define MAU_CONTROL_INV_PARITY 0x0000000000002000
2823#define MAU_CONTROL_STRAND 0x0000000000001800
2824#define MAU_CONTROL_BUSY 0x0000000000000400
2825#define MAU_CONTROL_INT 0x0000000000000200
2826#define MAU_CONTROL_OP 0x00000000000001c0
2827#define MAU_CONTROL_OP_SHIFT 6
2828#define MAU_OP_LOAD_MA_MEMORY 0x0
2829#define MAU_OP_STORE_MA_MEMORY 0x1
2830#define MAU_OP_MODULAR_MULT 0x2
2831#define MAU_OP_MODULAR_REDUCE 0x3
2832#define MAU_OP_MODULAR_EXP_LOOP 0x4
2833#define MAU_CONTROL_LEN 0x000000000000003f
2834#define MAU_CONTROL_LEN_SHIFT 0
2835
2836 /* Real address of bytes to load or store bytes
2837 * into/out-of the MAU.
2838 */
2839 unsigned long mau_mpa;
2840
2841 /* Modular Arithmetic MA Offset Register. */
2842 unsigned long mau_ma;
2843
2844 /* Modular Arithmetic N Prime Register. */
2845 unsigned long mau_np;
2846};
2847
2848struct hv_ncs_qconf_arg {
2849 unsigned long mid; /* MAU ID, 1 per core on Niagara */
2850 unsigned long base; /* Real address base of queue */
2851 unsigned long end; /* Real address end of queue */
2852 unsigned long num_ents; /* Number of entries in queue */
2853};
2854
2855struct hv_ncs_qtail_update_arg {
2856 unsigned long mid; /* MAU ID, 1 per core on Niagara */
2857 unsigned long tail; /* New tail index to use */
2858 unsigned long syncflag; /* only SYNCFLAG_SYNC is implemented */
2859#define HV_NCS_SYNCFLAG_SYNC 0x00
2860#define HV_NCS_SYNCFLAG_ASYNC 0x01
2861};
2862#endif
2863
2864/* ncs_request()
2865 * TRAP: HV_FAST_TRAP
2866 * FUNCTION: HV_FAST_NCS_REQUEST
2867 * ARG0: NCS sub-function
2868 * ARG1: sub-function argument real address
2869 * ARG2: size in bytes of sub-function argument
2870 * RET0: status
2871 *
2872 * The MAU chip of the Niagara processor is not directly accessible
2873 * to privileged code, instead it is programmed indirectly via this
2874 * hypervisor API.
2875 *
2876 * The interfaces defines a queue of MAU operations to perform.
2877 * Privileged code registers a queue with the hypervisor by invoking
2878 * this HVAPI with the HV_NCS_QCONF sub-function, which defines the
2879 * base, end, and number of entries of the queue. Each queue entry
2880 * contains a MAU register struct block.
2881 *
2882 * The privileged code then proceeds to add entries to the queue and
2883 * then invoke the HV_NCS_QTAIL_UPDATE sub-function. Since only
2884 * synchronous operations are supported by the current hypervisor,
2885 * HV_NCS_QTAIL_UPDATE will run all the pending queue entries to
2886 * completion and return HV_EOK, or return an error code.
2887 *
2888 * The real address of the sub-function argument must be aligned on at
2889 * least an 8-byte boundary.
2890 *
2891 * The tail argument of HV_NCS_QTAIL_UPDATE is an index, not a byte
2892 * offset, into the queue and must be less than or equal the 'num_ents'
2893 * argument given in the HV_NCS_QCONF call.
2894 */
2895#define HV_FAST_NCS_REQUEST 0x110
2896
2897#ifndef __ASSEMBLY__
2898extern unsigned long sun4v_ncs_request(unsigned long request,
2899 unsigned long arg_ra,
2900 unsigned long arg_size);
2901#endif
2902
2903#define HV_FAST_FIRE_GET_PERFREG 0x120
2904#define HV_FAST_FIRE_SET_PERFREG 0x121
2905
2906/* Function numbers for HV_CORE_TRAP. */
2907#define HV_CORE_SET_VER 0x00
2908#define HV_CORE_PUTCHAR 0x01
2909#define HV_CORE_EXIT 0x02
2910#define HV_CORE_GET_VER 0x03
2911
2912/* Hypervisor API groups for use with HV_CORE_SET_VER and
2913 * HV_CORE_GET_VER.
2914 */
2915#define HV_GRP_SUN4V 0x0000
2916#define HV_GRP_CORE 0x0001
2917#define HV_GRP_INTR 0x0002
2918#define HV_GRP_SOFT_STATE 0x0003
2919#define HV_GRP_PCI 0x0100
2920#define HV_GRP_LDOM 0x0101
2921#define HV_GRP_SVC_CHAN 0x0102
2922#define HV_GRP_NCS 0x0103
2923#define HV_GRP_RNG 0x0104
2924#define HV_GRP_NIAG_PERF 0x0200
2925#define HV_GRP_FIRE_PERF 0x0201
2926#define HV_GRP_N2_CPU 0x0202
2927#define HV_GRP_NIU 0x0204
2928#define HV_GRP_VF_CPU 0x0205
2929#define HV_GRP_DIAG 0x0300
2930
2931#ifndef __ASSEMBLY__
2932extern unsigned long sun4v_get_version(unsigned long group,
2933 unsigned long *major,
2934 unsigned long *minor);
2935extern unsigned long sun4v_set_version(unsigned long group,
2936 unsigned long major,
2937 unsigned long minor,
2938 unsigned long *actual_minor);
2939
2940extern int sun4v_hvapi_register(unsigned long group, unsigned long major,
2941 unsigned long *minor);
2942extern void sun4v_hvapi_unregister(unsigned long group);
2943extern int sun4v_hvapi_get(unsigned long group,
2944 unsigned long *major,
2945 unsigned long *minor);
2946extern void sun4v_hvapi_init(void);
2947#endif
2948
2949#endif /* !(_SPARC64_HYPERVISOR_H) */
diff --git a/include/asm-sparc/ide.h b/include/asm-sparc/ide.h
index afd1736ed480..b7af3d658239 100644
--- a/include/asm-sparc/ide.h
+++ b/include/asm-sparc/ide.h
@@ -10,12 +10,16 @@
10 10
11#ifdef __KERNEL__ 11#ifdef __KERNEL__
12 12
13#include <asm/pgtable.h>
14#include <asm/io.h> 13#include <asm/io.h>
14#ifdef CONFIG_SPARC64
15#include <asm/pgalloc.h>
16#include <asm/spitfire.h>
17#include <asm/cacheflush.h>
18#include <asm/page.h>
19#else
20#include <asm/pgtable.h>
15#include <asm/psr.h> 21#include <asm/psr.h>
16 22#endif
17#undef MAX_HWIFS
18#define MAX_HWIFS 2
19 23
20#define __ide_insl(data_reg, buffer, wcount) \ 24#define __ide_insl(data_reg, buffer, wcount) \
21 __ide_insw(data_reg, buffer, (wcount)<<1) 25 __ide_insw(data_reg, buffer, (wcount)<<1)
@@ -28,50 +32,46 @@
28#define __ide_mm_outsw __ide_outsw 32#define __ide_mm_outsw __ide_outsw
29#define __ide_mm_outsl __ide_outsl 33#define __ide_mm_outsl __ide_outsl
30 34
31static inline void __ide_insw(unsigned long port, 35static inline void __ide_insw(void __iomem *port, void *dst, u32 count)
32 void *dst,
33 unsigned long count)
34{ 36{
35 volatile unsigned short *data_port; 37#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
36 /* unsigned long end = (unsigned long)dst + (count << 1); */ /* P3 */ 38 unsigned long end = (unsigned long)dst + (count << 1);
39#endif
37 u16 *ps = dst; 40 u16 *ps = dst;
38 u32 *pi; 41 u32 *pi;
39 42
40 data_port = (volatile unsigned short *)port;
41
42 if(((unsigned long)ps) & 0x2) { 43 if(((unsigned long)ps) & 0x2) {
43 *ps++ = *data_port; 44 *ps++ = __raw_readw(port);
44 count--; 45 count--;
45 } 46 }
46 pi = (u32 *)ps; 47 pi = (u32 *)ps;
47 while(count >= 2) { 48 while(count >= 2) {
48 u32 w; 49 u32 w;
49 50
50 w = (*data_port) << 16; 51 w = __raw_readw(port) << 16;
51 w |= (*data_port); 52 w |= __raw_readw(port);
52 *pi++ = w; 53 *pi++ = w;
53 count -= 2; 54 count -= 2;
54 } 55 }
55 ps = (u16 *)pi; 56 ps = (u16 *)pi;
56 if(count) 57 if(count)
57 *ps++ = *data_port; 58 *ps++ = __raw_readw(port);
58 59
59 /* __flush_dcache_range((unsigned long)dst, end); */ /* P3 see hme */ 60#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
61 __flush_dcache_range((unsigned long)dst, end);
62#endif
60} 63}
61 64
62static inline void __ide_outsw(unsigned long port, 65static inline void __ide_outsw(void __iomem *port, const void *src, u32 count)
63 const void *src,
64 unsigned long count)
65{ 66{
66 volatile unsigned short *data_port; 67#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
67 /* unsigned long end = (unsigned long)src + (count << 1); */ 68 unsigned long end = (unsigned long)src + (count << 1);
69#endif
68 const u16 *ps = src; 70 const u16 *ps = src;
69 const u32 *pi; 71 const u32 *pi;
70 72
71 data_port = (volatile unsigned short *)port;
72
73 if(((unsigned long)src) & 0x2) { 73 if(((unsigned long)src) & 0x2) {
74 *data_port = *ps++; 74 __raw_writew(*ps++, port);
75 count--; 75 count--;
76 } 76 }
77 pi = (const u32 *)ps; 77 pi = (const u32 *)ps;
@@ -79,15 +79,17 @@ static inline void __ide_outsw(unsigned long port,
79 u32 w; 79 u32 w;
80 80
81 w = *pi++; 81 w = *pi++;
82 *data_port = (w >> 16); 82 __raw_writew((w >> 16), port);
83 *data_port = w; 83 __raw_writew(w, port);
84 count -= 2; 84 count -= 2;
85 } 85 }
86 ps = (const u16 *)pi; 86 ps = (const u16 *)pi;
87 if(count) 87 if(count)
88 *data_port = *ps; 88 __raw_writew(*ps, port);
89 89
90 /* __flush_dcache_range((unsigned long)src, end); */ /* P3 see hme */ 90#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
91 __flush_dcache_range((unsigned long)src, end);
92#endif
91} 93}
92 94
93#endif /* __KERNEL__ */ 95#endif /* __KERNEL__ */
diff --git a/include/asm-sparc/idprom.h b/include/asm-sparc/idprom.h
index 41adb417a4e5..6976aa2439c6 100644
--- a/include/asm-sparc/idprom.h
+++ b/include/asm-sparc/idprom.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * idprom.h: Macros and defines for idprom routines 2 * idprom.h: Macros and defines for idprom routines
3 * 3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
5 */ 5 */
6 6
7#ifndef _SPARC_IDPROM_H 7#ifndef _SPARC_IDPROM_H
diff --git a/include/asm-sparc/intr_queue.h b/include/asm-sparc/intr_queue.h
new file mode 100644
index 000000000000..206077dedc2a
--- /dev/null
+++ b/include/asm-sparc/intr_queue.h
@@ -0,0 +1,15 @@
1#ifndef _SPARC64_INTR_QUEUE_H
2#define _SPARC64_INTR_QUEUE_H
3
4/* Sun4v interrupt queue registers, accessed via ASI_QUEUE. */
5
6#define INTRQ_CPU_MONDO_HEAD 0x3c0 /* CPU mondo head */
7#define INTRQ_CPU_MONDO_TAIL 0x3c8 /* CPU mondo tail */
8#define INTRQ_DEVICE_MONDO_HEAD 0x3d0 /* Device mondo head */
9#define INTRQ_DEVICE_MONDO_TAIL 0x3d8 /* Device mondo tail */
10#define INTRQ_RESUM_MONDO_HEAD 0x3e0 /* Resumable error mondo head */
11#define INTRQ_RESUM_MONDO_TAIL 0x3e8 /* Resumable error mondo tail */
12#define INTRQ_NONRESUM_MONDO_HEAD 0x3f0 /* Non-resumable error mondo head */
13#define INTRQ_NONRESUM_MONDO_TAIL 0x3f8 /* Non-resumable error mondo head */
14
15#endif /* !(_SPARC64_INTR_QUEUE_H) */
diff --git a/include/asm-sparc/io.h b/include/asm-sparc/io.h
index 3a3e7bdb06b3..fc9024d3dfc3 100644
--- a/include/asm-sparc/io.h
+++ b/include/asm-sparc/io.h
@@ -1,325 +1,8 @@
1#ifndef __SPARC_IO_H 1#ifndef ___ASM_SPARC_IO_H
2#define __SPARC_IO_H 2#define ___ASM_SPARC_IO_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/kernel.h> 4#include <asm-sparc/io_64.h>
5#include <linux/types.h> 5#else
6#include <linux/ioport.h> /* struct resource */ 6#include <asm-sparc/io_32.h>
7 7#endif
8#include <asm/page.h> /* IO address mapping routines need this */
9#include <asm/system.h>
10
11#define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT)
12
13static inline u32 flip_dword (u32 l)
14{
15 return ((l&0xff)<<24) | (((l>>8)&0xff)<<16) | (((l>>16)&0xff)<<8)| ((l>>24)&0xff);
16}
17
18static inline u16 flip_word (u16 w)
19{
20 return ((w&0xff) << 8) | ((w>>8)&0xff);
21}
22
23#define mmiowb()
24
25/*
26 * Memory mapped I/O to PCI
27 */
28
29static inline u8 __raw_readb(const volatile void __iomem *addr)
30{
31 return *(__force volatile u8 *)addr;
32}
33
34static inline u16 __raw_readw(const volatile void __iomem *addr)
35{
36 return *(__force volatile u16 *)addr;
37}
38
39static inline u32 __raw_readl(const volatile void __iomem *addr)
40{
41 return *(__force volatile u32 *)addr;
42}
43
44static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
45{
46 *(__force volatile u8 *)addr = b;
47}
48
49static inline void __raw_writew(u16 w, volatile void __iomem *addr)
50{
51 *(__force volatile u16 *)addr = w;
52}
53
54static inline void __raw_writel(u32 l, volatile void __iomem *addr)
55{
56 *(__force volatile u32 *)addr = l;
57}
58
59static inline u8 __readb(const volatile void __iomem *addr)
60{
61 return *(__force volatile u8 *)addr;
62}
63
64static inline u16 __readw(const volatile void __iomem *addr)
65{
66 return flip_word(*(__force volatile u16 *)addr);
67}
68
69static inline u32 __readl(const volatile void __iomem *addr)
70{
71 return flip_dword(*(__force volatile u32 *)addr);
72}
73
74static inline void __writeb(u8 b, volatile void __iomem *addr)
75{
76 *(__force volatile u8 *)addr = b;
77}
78
79static inline void __writew(u16 w, volatile void __iomem *addr)
80{
81 *(__force volatile u16 *)addr = flip_word(w);
82}
83
84static inline void __writel(u32 l, volatile void __iomem *addr)
85{
86 *(__force volatile u32 *)addr = flip_dword(l);
87}
88
89#define readb(__addr) __readb(__addr)
90#define readw(__addr) __readw(__addr)
91#define readl(__addr) __readl(__addr)
92#define readb_relaxed(__addr) readb(__addr)
93#define readw_relaxed(__addr) readw(__addr)
94#define readl_relaxed(__addr) readl(__addr)
95
96#define writeb(__b, __addr) __writeb((__b),(__addr))
97#define writew(__w, __addr) __writew((__w),(__addr))
98#define writel(__l, __addr) __writel((__l),(__addr))
99
100/*
101 * I/O space operations
102 *
103 * Arrangement on a Sun is somewhat complicated.
104 *
105 * First of all, we want to use standard Linux drivers
106 * for keyboard, PC serial, etc. These drivers think
107 * they access I/O space and use inb/outb.
108 * On the other hand, EBus bridge accepts PCI *memory*
109 * cycles and converts them into ISA *I/O* cycles.
110 * Ergo, we want inb & outb to generate PCI memory cycles.
111 *
112 * If we want to issue PCI *I/O* cycles, we do this
113 * with a low 64K fixed window in PCIC. This window gets
114 * mapped somewhere into virtual kernel space and we
115 * can use inb/outb again.
116 */
117#define inb_local(__addr) __readb((void __iomem *)(unsigned long)(__addr))
118#define inb(__addr) __readb((void __iomem *)(unsigned long)(__addr))
119#define inw(__addr) __readw((void __iomem *)(unsigned long)(__addr))
120#define inl(__addr) __readl((void __iomem *)(unsigned long)(__addr))
121
122#define outb_local(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
123#define outb(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
124#define outw(__w, __addr) __writew(__w, (void __iomem *)(unsigned long)(__addr))
125#define outl(__l, __addr) __writel(__l, (void __iomem *)(unsigned long)(__addr))
126
127#define inb_p(__addr) inb(__addr)
128#define outb_p(__b, __addr) outb(__b, __addr)
129#define inw_p(__addr) inw(__addr)
130#define outw_p(__w, __addr) outw(__w, __addr)
131#define inl_p(__addr) inl(__addr)
132#define outl_p(__l, __addr) outl(__l, __addr)
133
134void outsb(unsigned long addr, const void *src, unsigned long cnt);
135void outsw(unsigned long addr, const void *src, unsigned long cnt);
136void outsl(unsigned long addr, const void *src, unsigned long cnt);
137void insb(unsigned long addr, void *dst, unsigned long count);
138void insw(unsigned long addr, void *dst, unsigned long count);
139void insl(unsigned long addr, void *dst, unsigned long count);
140
141#define IO_SPACE_LIMIT 0xffffffff
142
143/*
144 * SBus accessors.
145 *
146 * SBus has only one, memory mapped, I/O space.
147 * We do not need to flip bytes for SBus of course.
148 */
149static inline u8 _sbus_readb(const volatile void __iomem *addr)
150{
151 return *(__force volatile u8 *)addr;
152}
153
154static inline u16 _sbus_readw(const volatile void __iomem *addr)
155{
156 return *(__force volatile u16 *)addr;
157}
158
159static inline u32 _sbus_readl(const volatile void __iomem *addr)
160{
161 return *(__force volatile u32 *)addr;
162}
163
164static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
165{
166 *(__force volatile u8 *)addr = b;
167}
168
169static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
170{
171 *(__force volatile u16 *)addr = w;
172}
173
174static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
175{
176 *(__force volatile u32 *)addr = l;
177}
178
179/*
180 * The only reason for #define's is to hide casts to unsigned long.
181 */
182#define sbus_readb(__addr) _sbus_readb(__addr)
183#define sbus_readw(__addr) _sbus_readw(__addr)
184#define sbus_readl(__addr) _sbus_readl(__addr)
185#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
186#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
187#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
188
189static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_size_t n)
190{
191 while(n--) {
192 sbus_writeb(c, __dst);
193 __dst++;
194 }
195}
196
197static inline void
198_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
199{
200 volatile void __iomem *d = dst;
201
202 while (n--) {
203 writeb(c, d);
204 d++;
205 }
206}
207
208#define memset_io(d,c,sz) _memset_io(d,c,sz)
209
210static inline void
211_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
212{
213 char *d = dst;
214
215 while (n--) {
216 char tmp = readb(src);
217 *d++ = tmp;
218 src++;
219 }
220}
221
222#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
223
224static inline void
225_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
226{
227 const char *s = src;
228 volatile void __iomem *d = dst;
229
230 while (n--) {
231 char tmp = *s++;
232 writeb(tmp, d);
233 d++;
234 }
235}
236
237#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
238
239#ifdef __KERNEL__
240
241/*
242 * Bus number may be embedded in the higher bits of the physical address.
243 * This is why we have no bus number argument to ioremap().
244 */
245extern void __iomem *ioremap(unsigned long offset, unsigned long size);
246#define ioremap_nocache(X,Y) ioremap((X),(Y))
247extern void iounmap(volatile void __iomem *addr);
248
249#define ioread8(X) readb(X)
250#define ioread16(X) readw(X)
251#define ioread32(X) readl(X)
252#define iowrite8(val,X) writeb(val,X)
253#define iowrite16(val,X) writew(val,X)
254#define iowrite32(val,X) writel(val,X)
255
256static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
257{
258 insb((unsigned long __force)port, buf, count);
259}
260static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
261{
262 insw((unsigned long __force)port, buf, count);
263}
264
265static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
266{
267 insl((unsigned long __force)port, buf, count);
268}
269
270static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
271{
272 outsb((unsigned long __force)port, buf, count);
273}
274
275static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
276{
277 outsw((unsigned long __force)port, buf, count);
278}
279
280static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
281{
282 outsl((unsigned long __force)port, buf, count);
283}
284
285/* Create a virtual mapping cookie for an IO port range */
286extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
287extern void ioport_unmap(void __iomem *);
288
289/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
290struct pci_dev;
291extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
292extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
293
294/*
295 * Bus number may be in res->flags... somewhere.
296 */
297extern void __iomem *sbus_ioremap(struct resource *res, unsigned long offset,
298 unsigned long size, char *name);
299extern void sbus_iounmap(volatile void __iomem *vaddr, unsigned long size);
300
301
302/*
303 * At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
304 * so rtc_port is static in it. This should not change unless a new
305 * hardware pops up.
306 */
307#define RTC_PORT(x) (rtc_port + (x))
308#define RTC_ALWAYS_BCD 0
309
310#endif 8#endif
311
312#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
313
314/*
315 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
316 * access
317 */
318#define xlate_dev_mem_ptr(p) __va(p)
319
320/*
321 * Convert a virtual cached pointer to an uncached pointer
322 */
323#define xlate_dev_kmem_ptr(p) p
324
325#endif /* !(__SPARC_IO_H) */
diff --git a/include/asm-sparc/io_32.h b/include/asm-sparc/io_32.h
new file mode 100644
index 000000000000..10d7da450070
--- /dev/null
+++ b/include/asm-sparc/io_32.h
@@ -0,0 +1,326 @@
1#ifndef __SPARC_IO_H
2#define __SPARC_IO_H
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/ioport.h> /* struct resource */
7
8#include <asm/page.h> /* IO address mapping routines need this */
9#include <asm/system.h>
10
11#define page_to_phys(page) (((page) - mem_map) << PAGE_SHIFT)
12
13static inline u32 flip_dword (u32 l)
14{
15 return ((l&0xff)<<24) | (((l>>8)&0xff)<<16) | (((l>>16)&0xff)<<8)| ((l>>24)&0xff);
16}
17
18static inline u16 flip_word (u16 w)
19{
20 return ((w&0xff) << 8) | ((w>>8)&0xff);
21}
22
23#define mmiowb()
24
25/*
26 * Memory mapped I/O to PCI
27 */
28
29static inline u8 __raw_readb(const volatile void __iomem *addr)
30{
31 return *(__force volatile u8 *)addr;
32}
33
34static inline u16 __raw_readw(const volatile void __iomem *addr)
35{
36 return *(__force volatile u16 *)addr;
37}
38
39static inline u32 __raw_readl(const volatile void __iomem *addr)
40{
41 return *(__force volatile u32 *)addr;
42}
43
44static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
45{
46 *(__force volatile u8 *)addr = b;
47}
48
49static inline void __raw_writew(u16 w, volatile void __iomem *addr)
50{
51 *(__force volatile u16 *)addr = w;
52}
53
54static inline void __raw_writel(u32 l, volatile void __iomem *addr)
55{
56 *(__force volatile u32 *)addr = l;
57}
58
59static inline u8 __readb(const volatile void __iomem *addr)
60{
61 return *(__force volatile u8 *)addr;
62}
63
64static inline u16 __readw(const volatile void __iomem *addr)
65{
66 return flip_word(*(__force volatile u16 *)addr);
67}
68
69static inline u32 __readl(const volatile void __iomem *addr)
70{
71 return flip_dword(*(__force volatile u32 *)addr);
72}
73
74static inline void __writeb(u8 b, volatile void __iomem *addr)
75{
76 *(__force volatile u8 *)addr = b;
77}
78
79static inline void __writew(u16 w, volatile void __iomem *addr)
80{
81 *(__force volatile u16 *)addr = flip_word(w);
82}
83
84static inline void __writel(u32 l, volatile void __iomem *addr)
85{
86 *(__force volatile u32 *)addr = flip_dword(l);
87}
88
89#define readb(__addr) __readb(__addr)
90#define readw(__addr) __readw(__addr)
91#define readl(__addr) __readl(__addr)
92#define readb_relaxed(__addr) readb(__addr)
93#define readw_relaxed(__addr) readw(__addr)
94#define readl_relaxed(__addr) readl(__addr)
95
96#define writeb(__b, __addr) __writeb((__b),(__addr))
97#define writew(__w, __addr) __writew((__w),(__addr))
98#define writel(__l, __addr) __writel((__l),(__addr))
99
100/*
101 * I/O space operations
102 *
103 * Arrangement on a Sun is somewhat complicated.
104 *
105 * First of all, we want to use standard Linux drivers
106 * for keyboard, PC serial, etc. These drivers think
107 * they access I/O space and use inb/outb.
108 * On the other hand, EBus bridge accepts PCI *memory*
109 * cycles and converts them into ISA *I/O* cycles.
110 * Ergo, we want inb & outb to generate PCI memory cycles.
111 *
112 * If we want to issue PCI *I/O* cycles, we do this
113 * with a low 64K fixed window in PCIC. This window gets
114 * mapped somewhere into virtual kernel space and we
115 * can use inb/outb again.
116 */
117#define inb_local(__addr) __readb((void __iomem *)(unsigned long)(__addr))
118#define inb(__addr) __readb((void __iomem *)(unsigned long)(__addr))
119#define inw(__addr) __readw((void __iomem *)(unsigned long)(__addr))
120#define inl(__addr) __readl((void __iomem *)(unsigned long)(__addr))
121
122#define outb_local(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
123#define outb(__b, __addr) __writeb(__b, (void __iomem *)(unsigned long)(__addr))
124#define outw(__w, __addr) __writew(__w, (void __iomem *)(unsigned long)(__addr))
125#define outl(__l, __addr) __writel(__l, (void __iomem *)(unsigned long)(__addr))
126
127#define inb_p(__addr) inb(__addr)
128#define outb_p(__b, __addr) outb(__b, __addr)
129#define inw_p(__addr) inw(__addr)
130#define outw_p(__w, __addr) outw(__w, __addr)
131#define inl_p(__addr) inl(__addr)
132#define outl_p(__l, __addr) outl(__l, __addr)
133
134void outsb(unsigned long addr, const void *src, unsigned long cnt);
135void outsw(unsigned long addr, const void *src, unsigned long cnt);
136void outsl(unsigned long addr, const void *src, unsigned long cnt);
137void insb(unsigned long addr, void *dst, unsigned long count);
138void insw(unsigned long addr, void *dst, unsigned long count);
139void insl(unsigned long addr, void *dst, unsigned long count);
140
141#define IO_SPACE_LIMIT 0xffffffff
142
143/*
144 * SBus accessors.
145 *
146 * SBus has only one, memory mapped, I/O space.
147 * We do not need to flip bytes for SBus of course.
148 */
149static inline u8 _sbus_readb(const volatile void __iomem *addr)
150{
151 return *(__force volatile u8 *)addr;
152}
153
154static inline u16 _sbus_readw(const volatile void __iomem *addr)
155{
156 return *(__force volatile u16 *)addr;
157}
158
159static inline u32 _sbus_readl(const volatile void __iomem *addr)
160{
161 return *(__force volatile u32 *)addr;
162}
163
164static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
165{
166 *(__force volatile u8 *)addr = b;
167}
168
169static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
170{
171 *(__force volatile u16 *)addr = w;
172}
173
174static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
175{
176 *(__force volatile u32 *)addr = l;
177}
178
179/*
180 * The only reason for #define's is to hide casts to unsigned long.
181 */
182#define sbus_readb(__addr) _sbus_readb(__addr)
183#define sbus_readw(__addr) _sbus_readw(__addr)
184#define sbus_readl(__addr) _sbus_readl(__addr)
185#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
186#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
187#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
188
189static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_size_t n)
190{
191 while(n--) {
192 sbus_writeb(c, __dst);
193 __dst++;
194 }
195}
196
197static inline void
198_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
199{
200 volatile void __iomem *d = dst;
201
202 while (n--) {
203 writeb(c, d);
204 d++;
205 }
206}
207
208#define memset_io(d,c,sz) _memset_io(d,c,sz)
209
210static inline void
211_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
212{
213 char *d = dst;
214
215 while (n--) {
216 char tmp = readb(src);
217 *d++ = tmp;
218 src++;
219 }
220}
221
222#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
223
224static inline void
225_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
226{
227 const char *s = src;
228 volatile void __iomem *d = dst;
229
230 while (n--) {
231 char tmp = *s++;
232 writeb(tmp, d);
233 d++;
234 }
235}
236
237#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
238
239#ifdef __KERNEL__
240
241/*
242 * Bus number may be embedded in the higher bits of the physical address.
243 * This is why we have no bus number argument to ioremap().
244 */
245extern void __iomem *ioremap(unsigned long offset, unsigned long size);
246#define ioremap_nocache(X,Y) ioremap((X),(Y))
247#define ioremap_wc(X,Y) ioremap((X),(Y))
248extern void iounmap(volatile void __iomem *addr);
249
250#define ioread8(X) readb(X)
251#define ioread16(X) readw(X)
252#define ioread32(X) readl(X)
253#define iowrite8(val,X) writeb(val,X)
254#define iowrite16(val,X) writew(val,X)
255#define iowrite32(val,X) writel(val,X)
256
257static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
258{
259 insb((unsigned long __force)port, buf, count);
260}
261static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
262{
263 insw((unsigned long __force)port, buf, count);
264}
265
266static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
267{
268 insl((unsigned long __force)port, buf, count);
269}
270
271static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
272{
273 outsb((unsigned long __force)port, buf, count);
274}
275
276static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
277{
278 outsw((unsigned long __force)port, buf, count);
279}
280
281static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
282{
283 outsl((unsigned long __force)port, buf, count);
284}
285
286/* Create a virtual mapping cookie for an IO port range */
287extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
288extern void ioport_unmap(void __iomem *);
289
290/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
291struct pci_dev;
292extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
293extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
294
295/*
296 * Bus number may be in res->flags... somewhere.
297 */
298extern void __iomem *sbus_ioremap(struct resource *res, unsigned long offset,
299 unsigned long size, char *name);
300extern void sbus_iounmap(volatile void __iomem *vaddr, unsigned long size);
301
302
303/*
304 * At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
305 * so rtc_port is static in it. This should not change unless a new
306 * hardware pops up.
307 */
308#define RTC_PORT(x) (rtc_port + (x))
309#define RTC_ALWAYS_BCD 0
310
311#endif
312
313#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
314
315/*
316 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
317 * access
318 */
319#define xlate_dev_mem_ptr(p) __va(p)
320
321/*
322 * Convert a virtual cached pointer to an uncached pointer
323 */
324#define xlate_dev_kmem_ptr(p) p
325
326#endif /* !(__SPARC_IO_H) */
diff --git a/include/asm-sparc/io_64.h b/include/asm-sparc/io_64.h
new file mode 100644
index 000000000000..0bff078ffdd0
--- /dev/null
+++ b/include/asm-sparc/io_64.h
@@ -0,0 +1,511 @@
1#ifndef __SPARC64_IO_H
2#define __SPARC64_IO_H
3
4#include <linux/kernel.h>
5#include <linux/compiler.h>
6#include <linux/types.h>
7
8#include <asm/page.h> /* IO address mapping routines need this */
9#include <asm/system.h>
10#include <asm/asi.h>
11
12/* PC crapola... */
13#define __SLOW_DOWN_IO do { } while (0)
14#define SLOW_DOWN_IO do { } while (0)
15
16/* BIO layer definitions. */
17extern unsigned long kern_base, kern_size;
18#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
19
20static inline u8 _inb(unsigned long addr)
21{
22 u8 ret;
23
24 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_inb */"
25 : "=r" (ret)
26 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
27 : "memory");
28
29 return ret;
30}
31
32static inline u16 _inw(unsigned long addr)
33{
34 u16 ret;
35
36 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_inw */"
37 : "=r" (ret)
38 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
39 : "memory");
40
41 return ret;
42}
43
44static inline u32 _inl(unsigned long addr)
45{
46 u32 ret;
47
48 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_inl */"
49 : "=r" (ret)
50 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
51 : "memory");
52
53 return ret;
54}
55
56static inline void _outb(u8 b, unsigned long addr)
57{
58 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_outb */"
59 : /* no outputs */
60 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
61 : "memory");
62}
63
64static inline void _outw(u16 w, unsigned long addr)
65{
66 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_outw */"
67 : /* no outputs */
68 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
69 : "memory");
70}
71
72static inline void _outl(u32 l, unsigned long addr)
73{
74 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_outl */"
75 : /* no outputs */
76 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
77 : "memory");
78}
79
80#define inb(__addr) (_inb((unsigned long)(__addr)))
81#define inw(__addr) (_inw((unsigned long)(__addr)))
82#define inl(__addr) (_inl((unsigned long)(__addr)))
83#define outb(__b, __addr) (_outb((u8)(__b), (unsigned long)(__addr)))
84#define outw(__w, __addr) (_outw((u16)(__w), (unsigned long)(__addr)))
85#define outl(__l, __addr) (_outl((u32)(__l), (unsigned long)(__addr)))
86
87#define inb_p(__addr) inb(__addr)
88#define outb_p(__b, __addr) outb(__b, __addr)
89#define inw_p(__addr) inw(__addr)
90#define outw_p(__w, __addr) outw(__w, __addr)
91#define inl_p(__addr) inl(__addr)
92#define outl_p(__l, __addr) outl(__l, __addr)
93
94extern void outsb(unsigned long, const void *, unsigned long);
95extern void outsw(unsigned long, const void *, unsigned long);
96extern void outsl(unsigned long, const void *, unsigned long);
97extern void insb(unsigned long, void *, unsigned long);
98extern void insw(unsigned long, void *, unsigned long);
99extern void insl(unsigned long, void *, unsigned long);
100
101static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
102{
103 insb((unsigned long __force)port, buf, count);
104}
105static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
106{
107 insw((unsigned long __force)port, buf, count);
108}
109
110static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
111{
112 insl((unsigned long __force)port, buf, count);
113}
114
115static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
116{
117 outsb((unsigned long __force)port, buf, count);
118}
119
120static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
121{
122 outsw((unsigned long __force)port, buf, count);
123}
124
125static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
126{
127 outsl((unsigned long __force)port, buf, count);
128}
129
130/* Memory functions, same as I/O accesses on Ultra. */
131static inline u8 _readb(const volatile void __iomem *addr)
132{ u8 ret;
133
134 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
135 : "=r" (ret)
136 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
137 : "memory");
138 return ret;
139}
140
141static inline u16 _readw(const volatile void __iomem *addr)
142{ u16 ret;
143
144 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
145 : "=r" (ret)
146 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
147 : "memory");
148
149 return ret;
150}
151
152static inline u32 _readl(const volatile void __iomem *addr)
153{ u32 ret;
154
155 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
156 : "=r" (ret)
157 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
158 : "memory");
159
160 return ret;
161}
162
163static inline u64 _readq(const volatile void __iomem *addr)
164{ u64 ret;
165
166 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
167 : "=r" (ret)
168 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
169 : "memory");
170
171 return ret;
172}
173
174static inline void _writeb(u8 b, volatile void __iomem *addr)
175{
176 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
177 : /* no outputs */
178 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
179 : "memory");
180}
181
182static inline void _writew(u16 w, volatile void __iomem *addr)
183{
184 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
185 : /* no outputs */
186 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
187 : "memory");
188}
189
190static inline void _writel(u32 l, volatile void __iomem *addr)
191{
192 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
193 : /* no outputs */
194 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
195 : "memory");
196}
197
198static inline void _writeq(u64 q, volatile void __iomem *addr)
199{
200 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
201 : /* no outputs */
202 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
203 : "memory");
204}
205
206#define readb(__addr) _readb(__addr)
207#define readw(__addr) _readw(__addr)
208#define readl(__addr) _readl(__addr)
209#define readq(__addr) _readq(__addr)
210#define readb_relaxed(__addr) _readb(__addr)
211#define readw_relaxed(__addr) _readw(__addr)
212#define readl_relaxed(__addr) _readl(__addr)
213#define readq_relaxed(__addr) _readq(__addr)
214#define writeb(__b, __addr) _writeb(__b, __addr)
215#define writew(__w, __addr) _writew(__w, __addr)
216#define writel(__l, __addr) _writel(__l, __addr)
217#define writeq(__q, __addr) _writeq(__q, __addr)
218
219/* Now versions without byte-swapping. */
220static inline u8 _raw_readb(unsigned long addr)
221{
222 u8 ret;
223
224 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
225 : "=r" (ret)
226 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
227
228 return ret;
229}
230
231static inline u16 _raw_readw(unsigned long addr)
232{
233 u16 ret;
234
235 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
236 : "=r" (ret)
237 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
238
239 return ret;
240}
241
242static inline u32 _raw_readl(unsigned long addr)
243{
244 u32 ret;
245
246 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
247 : "=r" (ret)
248 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
249
250 return ret;
251}
252
253static inline u64 _raw_readq(unsigned long addr)
254{
255 u64 ret;
256
257 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
258 : "=r" (ret)
259 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
260
261 return ret;
262}
263
264static inline void _raw_writeb(u8 b, unsigned long addr)
265{
266 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
267 : /* no outputs */
268 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
269}
270
271static inline void _raw_writew(u16 w, unsigned long addr)
272{
273 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
274 : /* no outputs */
275 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
276}
277
278static inline void _raw_writel(u32 l, unsigned long addr)
279{
280 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
281 : /* no outputs */
282 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
283}
284
285static inline void _raw_writeq(u64 q, unsigned long addr)
286{
287 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
288 : /* no outputs */
289 : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
290}
291
292#define __raw_readb(__addr) (_raw_readb((unsigned long)(__addr)))
293#define __raw_readw(__addr) (_raw_readw((unsigned long)(__addr)))
294#define __raw_readl(__addr) (_raw_readl((unsigned long)(__addr)))
295#define __raw_readq(__addr) (_raw_readq((unsigned long)(__addr)))
296#define __raw_writeb(__b, __addr) (_raw_writeb((u8)(__b), (unsigned long)(__addr)))
297#define __raw_writew(__w, __addr) (_raw_writew((u16)(__w), (unsigned long)(__addr)))
298#define __raw_writel(__l, __addr) (_raw_writel((u32)(__l), (unsigned long)(__addr)))
299#define __raw_writeq(__q, __addr) (_raw_writeq((u64)(__q), (unsigned long)(__addr)))
300
301/* Valid I/O Space regions are anywhere, because each PCI bus supported
302 * can live in an arbitrary area of the physical address range.
303 */
304#define IO_SPACE_LIMIT 0xffffffffffffffffUL
305
306/* Now, SBUS variants, only difference from PCI is that we do
307 * not use little-endian ASIs.
308 */
309static inline u8 _sbus_readb(const volatile void __iomem *addr)
310{
311 u8 ret;
312
313 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* sbus_readb */"
314 : "=r" (ret)
315 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
316 : "memory");
317
318 return ret;
319}
320
321static inline u16 _sbus_readw(const volatile void __iomem *addr)
322{
323 u16 ret;
324
325 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* sbus_readw */"
326 : "=r" (ret)
327 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
328 : "memory");
329
330 return ret;
331}
332
333static inline u32 _sbus_readl(const volatile void __iomem *addr)
334{
335 u32 ret;
336
337 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* sbus_readl */"
338 : "=r" (ret)
339 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
340 : "memory");
341
342 return ret;
343}
344
345static inline u64 _sbus_readq(const volatile void __iomem *addr)
346{
347 u64 ret;
348
349 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* sbus_readq */"
350 : "=r" (ret)
351 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
352 : "memory");
353
354 return ret;
355}
356
357static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
358{
359 __asm__ __volatile__("stba\t%r0, [%1] %2\t/* sbus_writeb */"
360 : /* no outputs */
361 : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
362 : "memory");
363}
364
365static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
366{
367 __asm__ __volatile__("stha\t%r0, [%1] %2\t/* sbus_writew */"
368 : /* no outputs */
369 : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
370 : "memory");
371}
372
373static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
374{
375 __asm__ __volatile__("stwa\t%r0, [%1] %2\t/* sbus_writel */"
376 : /* no outputs */
377 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
378 : "memory");
379}
380
381static inline void _sbus_writeq(u64 l, volatile void __iomem *addr)
382{
383 __asm__ __volatile__("stxa\t%r0, [%1] %2\t/* sbus_writeq */"
384 : /* no outputs */
385 : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
386 : "memory");
387}
388
389#define sbus_readb(__addr) _sbus_readb(__addr)
390#define sbus_readw(__addr) _sbus_readw(__addr)
391#define sbus_readl(__addr) _sbus_readl(__addr)
392#define sbus_readq(__addr) _sbus_readq(__addr)
393#define sbus_writeb(__b, __addr) _sbus_writeb(__b, __addr)
394#define sbus_writew(__w, __addr) _sbus_writew(__w, __addr)
395#define sbus_writel(__l, __addr) _sbus_writel(__l, __addr)
396#define sbus_writeq(__l, __addr) _sbus_writeq(__l, __addr)
397
398static inline void _sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
399{
400 while(n--) {
401 sbus_writeb(c, dst);
402 dst++;
403 }
404}
405
406#define sbus_memset_io(d,c,sz) _sbus_memset_io(d,c,sz)
407
408static inline void
409_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
410{
411 volatile void __iomem *d = dst;
412
413 while (n--) {
414 writeb(c, d);
415 d++;
416 }
417}
418
419#define memset_io(d,c,sz) _memset_io(d,c,sz)
420
421static inline void
422_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
423{
424 char *d = dst;
425
426 while (n--) {
427 char tmp = readb(src);
428 *d++ = tmp;
429 src++;
430 }
431}
432
433#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
434
435static inline void
436_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
437{
438 const char *s = src;
439 volatile void __iomem *d = dst;
440
441 while (n--) {
442 char tmp = *s++;
443 writeb(tmp, d);
444 d++;
445 }
446}
447
448#define memcpy_toio(d,s,sz) _memcpy_toio(d,s,sz)
449
450#define mmiowb()
451
452#ifdef __KERNEL__
453
454/* On sparc64 we have the whole physical IO address space accessible
455 * using physically addressed loads and stores, so this does nothing.
456 */
457static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
458{
459 return (void __iomem *)offset;
460}
461
462#define ioremap_nocache(X,Y) ioremap((X),(Y))
463#define ioremap_wc(X,Y) ioremap((X),(Y))
464
465static inline void iounmap(volatile void __iomem *addr)
466{
467}
468
469#define ioread8(X) readb(X)
470#define ioread16(X) readw(X)
471#define ioread32(X) readl(X)
472#define iowrite8(val,X) writeb(val,X)
473#define iowrite16(val,X) writew(val,X)
474#define iowrite32(val,X) writel(val,X)
475
476/* Create a virtual mapping cookie for an IO port range */
477extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
478extern void ioport_unmap(void __iomem *);
479
480/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
481struct pci_dev;
482extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
483extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
484
485/* Similarly for SBUS. */
486#define sbus_ioremap(__res, __offset, __size, __name) \
487({ unsigned long __ret; \
488 __ret = (__res)->start + (((__res)->flags & 0x1ffUL) << 32UL); \
489 __ret += (unsigned long) (__offset); \
490 if (! request_region((__ret), (__size), (__name))) \
491 __ret = 0UL; \
492 (void __iomem *) __ret; \
493})
494
495#define sbus_iounmap(__addr, __size) \
496 release_region((unsigned long)(__addr), (__size))
497
498/*
499 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
500 * access
501 */
502#define xlate_dev_mem_ptr(p) __va(p)
503
504/*
505 * Convert a virtual cached pointer to an uncached pointer
506 */
507#define xlate_dev_kmem_ptr(p) p
508
509#endif
510
511#endif /* !(__SPARC64_IO_H) */
diff --git a/include/asm-sparc/ioctls.h b/include/asm-sparc/ioctls.h
index 3f4d0087b6a3..1fe6855c5c18 100644
--- a/include/asm-sparc/ioctls.h
+++ b/include/asm-sparc/ioctls.h
@@ -22,7 +22,7 @@
22 22
23/* Note that all the ioctls that are not available in Linux have a 23/* Note that all the ioctls that are not available in Linux have a
24 * double underscore on the front to: a) avoid some programs to 24 * double underscore on the front to: a) avoid some programs to
25 * thing we support some ioctls under Linux (autoconfiguration stuff) 25 * think we support some ioctls under Linux (autoconfiguration stuff)
26 */ 26 */
27/* Little t */ 27/* Little t */
28#define TIOCGETD _IOR('t', 0, int) 28#define TIOCGETD _IOR('t', 0, int)
@@ -110,7 +110,7 @@
110#define TIOCSERGETLSR 0x5459 /* Get line status register */ 110#define TIOCSERGETLSR 0x5459 /* Get line status register */
111#define TIOCSERGETMULTI 0x545A /* Get multiport config */ 111#define TIOCSERGETMULTI 0x545A /* Get multiport config */
112#define TIOCSERSETMULTI 0x545B /* Set multiport config */ 112#define TIOCSERSETMULTI 0x545B /* Set multiport config */
113#define TIOCMIWAIT 0x545C /* Wait input */ 113#define TIOCMIWAIT 0x545C /* Wait for change on serial input line(s) */
114#define TIOCGICOUNT 0x545D /* Read serial port inline interrupt counts */ 114#define TIOCGICOUNT 0x545D /* Read serial port inline interrupt counts */
115 115
116/* Kernel definitions */ 116/* Kernel definitions */
diff --git a/include/asm-sparc/iommu.h b/include/asm-sparc/iommu.h
index 70c589c05a10..91b072b0d7a0 100644
--- a/include/asm-sparc/iommu.h
+++ b/include/asm-sparc/iommu.h
@@ -1,121 +1,8 @@
1/* iommu.h: Definitions for the sun4m IOMMU. 1#ifndef ___ASM_SPARC_IOMMU_H
2 * 2#define ___ASM_SPARC_IOMMU_H
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/iommu_64.h>
5#ifndef _SPARC_IOMMU_H 5#else
6#define _SPARC_IOMMU_H 6#include <asm-sparc/iommu_32.h>
7 7#endif
8#include <asm/page.h> 8#endif
9#include <asm/bitext.h>
10
11/* The iommu handles all virtual to physical address translations
12 * that occur between the SBUS and physical memory. Access by
13 * the cpu to IO registers and similar go over the mbus so are
14 * translated by the on chip SRMMU. The iommu and the srmmu do
15 * not need to have the same translations at all, in fact most
16 * of the time the translations they handle are a disjunct set.
17 * Basically the iommu handles all dvma sbus activity.
18 */
19
20/* The IOMMU registers occupy three pages in IO space. */
21struct iommu_regs {
22 /* First page */
23 volatile unsigned long control; /* IOMMU control */
24 volatile unsigned long base; /* Physical base of iopte page table */
25 volatile unsigned long _unused1[3];
26 volatile unsigned long tlbflush; /* write only */
27 volatile unsigned long pageflush; /* write only */
28 volatile unsigned long _unused2[1017];
29 /* Second page */
30 volatile unsigned long afsr; /* Async-fault status register */
31 volatile unsigned long afar; /* Async-fault physical address */
32 volatile unsigned long _unused3[2];
33 volatile unsigned long sbuscfg0; /* SBUS configuration registers, per-slot */
34 volatile unsigned long sbuscfg1;
35 volatile unsigned long sbuscfg2;
36 volatile unsigned long sbuscfg3;
37 volatile unsigned long mfsr; /* Memory-fault status register */
38 volatile unsigned long mfar; /* Memory-fault physical address */
39 volatile unsigned long _unused4[1014];
40 /* Third page */
41 volatile unsigned long mid; /* IOMMU module-id */
42};
43
44#define IOMMU_CTRL_IMPL 0xf0000000 /* Implementation */
45#define IOMMU_CTRL_VERS 0x0f000000 /* Version */
46#define IOMMU_CTRL_RNGE 0x0000001c /* Mapping RANGE */
47#define IOMMU_RNGE_16MB 0x00000000 /* 0xff000000 -> 0xffffffff */
48#define IOMMU_RNGE_32MB 0x00000004 /* 0xfe000000 -> 0xffffffff */
49#define IOMMU_RNGE_64MB 0x00000008 /* 0xfc000000 -> 0xffffffff */
50#define IOMMU_RNGE_128MB 0x0000000c /* 0xf8000000 -> 0xffffffff */
51#define IOMMU_RNGE_256MB 0x00000010 /* 0xf0000000 -> 0xffffffff */
52#define IOMMU_RNGE_512MB 0x00000014 /* 0xe0000000 -> 0xffffffff */
53#define IOMMU_RNGE_1GB 0x00000018 /* 0xc0000000 -> 0xffffffff */
54#define IOMMU_RNGE_2GB 0x0000001c /* 0x80000000 -> 0xffffffff */
55#define IOMMU_CTRL_ENAB 0x00000001 /* IOMMU Enable */
56
57#define IOMMU_AFSR_ERR 0x80000000 /* LE, TO, or BE asserted */
58#define IOMMU_AFSR_LE 0x40000000 /* SBUS reports error after transaction */
59#define IOMMU_AFSR_TO 0x20000000 /* Write access took more than 12.8 us. */
60#define IOMMU_AFSR_BE 0x10000000 /* Write access received error acknowledge */
61#define IOMMU_AFSR_SIZE 0x0e000000 /* Size of transaction causing error */
62#define IOMMU_AFSR_S 0x01000000 /* Sparc was in supervisor mode */
63#define IOMMU_AFSR_RESV 0x00f00000 /* Reserver, forced to 0x8 by hardware */
64#define IOMMU_AFSR_ME 0x00080000 /* Multiple errors occurred */
65#define IOMMU_AFSR_RD 0x00040000 /* A read operation was in progress */
66#define IOMMU_AFSR_FAV 0x00020000 /* IOMMU afar has valid contents */
67
68#define IOMMU_SBCFG_SAB30 0x00010000 /* Phys-address bit 30 when bypass enabled */
69#define IOMMU_SBCFG_BA16 0x00000004 /* Slave supports 16 byte bursts */
70#define IOMMU_SBCFG_BA8 0x00000002 /* Slave supports 8 byte bursts */
71#define IOMMU_SBCFG_BYPASS 0x00000001 /* Bypass IOMMU, treat all addresses
72 produced by this device as pure
73 physical. */
74
75#define IOMMU_MFSR_ERR 0x80000000 /* One or more of PERR1 or PERR0 */
76#define IOMMU_MFSR_S 0x01000000 /* Sparc was in supervisor mode */
77#define IOMMU_MFSR_CPU 0x00800000 /* CPU transaction caused parity error */
78#define IOMMU_MFSR_ME 0x00080000 /* Multiple parity errors occurred */
79#define IOMMU_MFSR_PERR 0x00006000 /* high bit indicates parity error occurred
80 on the even word of the access, low bit
81 indicated odd word caused the parity error */
82#define IOMMU_MFSR_BM 0x00001000 /* Error occurred while in boot mode */
83#define IOMMU_MFSR_C 0x00000800 /* Address causing error was marked cacheable */
84#define IOMMU_MFSR_RTYP 0x000000f0 /* Memory request transaction type */
85
86#define IOMMU_MID_SBAE 0x001f0000 /* SBus arbitration enable */
87#define IOMMU_MID_SE 0x00100000 /* Enables SCSI/ETHERNET arbitration */
88#define IOMMU_MID_SB3 0x00080000 /* Enable SBUS device 3 arbitration */
89#define IOMMU_MID_SB2 0x00040000 /* Enable SBUS device 2 arbitration */
90#define IOMMU_MID_SB1 0x00020000 /* Enable SBUS device 1 arbitration */
91#define IOMMU_MID_SB0 0x00010000 /* Enable SBUS device 0 arbitration */
92#define IOMMU_MID_MID 0x0000000f /* Module-id, hardcoded to 0x8 */
93
94/* The format of an iopte in the page tables */
95#define IOPTE_PAGE 0x07ffff00 /* Physical page number (PA[30:12]) */
96#define IOPTE_CACHE 0x00000080 /* Cached (in vme IOCACHE or Viking/MXCC) */
97#define IOPTE_WRITE 0x00000004 /* Writeable */
98#define IOPTE_VALID 0x00000002 /* IOPTE is valid */
99#define IOPTE_WAZ 0x00000001 /* Write as zeros */
100
101struct iommu_struct {
102 struct iommu_regs *regs;
103 iopte_t *page_table;
104 /* For convenience */
105 unsigned long start; /* First managed virtual address */
106 unsigned long end; /* Last managed virtual address */
107
108 struct bit_map usemap;
109};
110
111static inline void iommu_invalidate(struct iommu_regs *regs)
112{
113 regs->tlbflush = 0;
114}
115
116static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
117{
118 regs->pageflush = (ba & PAGE_MASK);
119}
120
121#endif /* !(_SPARC_IOMMU_H) */
diff --git a/include/asm-sparc/iommu_32.h b/include/asm-sparc/iommu_32.h
new file mode 100644
index 000000000000..70c589c05a10
--- /dev/null
+++ b/include/asm-sparc/iommu_32.h
@@ -0,0 +1,121 @@
1/* iommu.h: Definitions for the sun4m IOMMU.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5#ifndef _SPARC_IOMMU_H
6#define _SPARC_IOMMU_H
7
8#include <asm/page.h>
9#include <asm/bitext.h>
10
11/* The iommu handles all virtual to physical address translations
12 * that occur between the SBUS and physical memory. Access by
13 * the cpu to IO registers and similar go over the mbus so are
14 * translated by the on chip SRMMU. The iommu and the srmmu do
15 * not need to have the same translations at all, in fact most
16 * of the time the translations they handle are a disjunct set.
17 * Basically the iommu handles all dvma sbus activity.
18 */
19
20/* The IOMMU registers occupy three pages in IO space. */
21struct iommu_regs {
22 /* First page */
23 volatile unsigned long control; /* IOMMU control */
24 volatile unsigned long base; /* Physical base of iopte page table */
25 volatile unsigned long _unused1[3];
26 volatile unsigned long tlbflush; /* write only */
27 volatile unsigned long pageflush; /* write only */
28 volatile unsigned long _unused2[1017];
29 /* Second page */
30 volatile unsigned long afsr; /* Async-fault status register */
31 volatile unsigned long afar; /* Async-fault physical address */
32 volatile unsigned long _unused3[2];
33 volatile unsigned long sbuscfg0; /* SBUS configuration registers, per-slot */
34 volatile unsigned long sbuscfg1;
35 volatile unsigned long sbuscfg2;
36 volatile unsigned long sbuscfg3;
37 volatile unsigned long mfsr; /* Memory-fault status register */
38 volatile unsigned long mfar; /* Memory-fault physical address */
39 volatile unsigned long _unused4[1014];
40 /* Third page */
41 volatile unsigned long mid; /* IOMMU module-id */
42};
43
44#define IOMMU_CTRL_IMPL 0xf0000000 /* Implementation */
45#define IOMMU_CTRL_VERS 0x0f000000 /* Version */
46#define IOMMU_CTRL_RNGE 0x0000001c /* Mapping RANGE */
47#define IOMMU_RNGE_16MB 0x00000000 /* 0xff000000 -> 0xffffffff */
48#define IOMMU_RNGE_32MB 0x00000004 /* 0xfe000000 -> 0xffffffff */
49#define IOMMU_RNGE_64MB 0x00000008 /* 0xfc000000 -> 0xffffffff */
50#define IOMMU_RNGE_128MB 0x0000000c /* 0xf8000000 -> 0xffffffff */
51#define IOMMU_RNGE_256MB 0x00000010 /* 0xf0000000 -> 0xffffffff */
52#define IOMMU_RNGE_512MB 0x00000014 /* 0xe0000000 -> 0xffffffff */
53#define IOMMU_RNGE_1GB 0x00000018 /* 0xc0000000 -> 0xffffffff */
54#define IOMMU_RNGE_2GB 0x0000001c /* 0x80000000 -> 0xffffffff */
55#define IOMMU_CTRL_ENAB 0x00000001 /* IOMMU Enable */
56
57#define IOMMU_AFSR_ERR 0x80000000 /* LE, TO, or BE asserted */
58#define IOMMU_AFSR_LE 0x40000000 /* SBUS reports error after transaction */
59#define IOMMU_AFSR_TO 0x20000000 /* Write access took more than 12.8 us. */
60#define IOMMU_AFSR_BE 0x10000000 /* Write access received error acknowledge */
61#define IOMMU_AFSR_SIZE 0x0e000000 /* Size of transaction causing error */
62#define IOMMU_AFSR_S 0x01000000 /* Sparc was in supervisor mode */
63#define IOMMU_AFSR_RESV 0x00f00000 /* Reserver, forced to 0x8 by hardware */
64#define IOMMU_AFSR_ME 0x00080000 /* Multiple errors occurred */
65#define IOMMU_AFSR_RD 0x00040000 /* A read operation was in progress */
66#define IOMMU_AFSR_FAV 0x00020000 /* IOMMU afar has valid contents */
67
68#define IOMMU_SBCFG_SAB30 0x00010000 /* Phys-address bit 30 when bypass enabled */
69#define IOMMU_SBCFG_BA16 0x00000004 /* Slave supports 16 byte bursts */
70#define IOMMU_SBCFG_BA8 0x00000002 /* Slave supports 8 byte bursts */
71#define IOMMU_SBCFG_BYPASS 0x00000001 /* Bypass IOMMU, treat all addresses
72 produced by this device as pure
73 physical. */
74
75#define IOMMU_MFSR_ERR 0x80000000 /* One or more of PERR1 or PERR0 */
76#define IOMMU_MFSR_S 0x01000000 /* Sparc was in supervisor mode */
77#define IOMMU_MFSR_CPU 0x00800000 /* CPU transaction caused parity error */
78#define IOMMU_MFSR_ME 0x00080000 /* Multiple parity errors occurred */
79#define IOMMU_MFSR_PERR 0x00006000 /* high bit indicates parity error occurred
80 on the even word of the access, low bit
81 indicated odd word caused the parity error */
82#define IOMMU_MFSR_BM 0x00001000 /* Error occurred while in boot mode */
83#define IOMMU_MFSR_C 0x00000800 /* Address causing error was marked cacheable */
84#define IOMMU_MFSR_RTYP 0x000000f0 /* Memory request transaction type */
85
86#define IOMMU_MID_SBAE 0x001f0000 /* SBus arbitration enable */
87#define IOMMU_MID_SE 0x00100000 /* Enables SCSI/ETHERNET arbitration */
88#define IOMMU_MID_SB3 0x00080000 /* Enable SBUS device 3 arbitration */
89#define IOMMU_MID_SB2 0x00040000 /* Enable SBUS device 2 arbitration */
90#define IOMMU_MID_SB1 0x00020000 /* Enable SBUS device 1 arbitration */
91#define IOMMU_MID_SB0 0x00010000 /* Enable SBUS device 0 arbitration */
92#define IOMMU_MID_MID 0x0000000f /* Module-id, hardcoded to 0x8 */
93
94/* The format of an iopte in the page tables */
95#define IOPTE_PAGE 0x07ffff00 /* Physical page number (PA[30:12]) */
96#define IOPTE_CACHE 0x00000080 /* Cached (in vme IOCACHE or Viking/MXCC) */
97#define IOPTE_WRITE 0x00000004 /* Writeable */
98#define IOPTE_VALID 0x00000002 /* IOPTE is valid */
99#define IOPTE_WAZ 0x00000001 /* Write as zeros */
100
101struct iommu_struct {
102 struct iommu_regs *regs;
103 iopte_t *page_table;
104 /* For convenience */
105 unsigned long start; /* First managed virtual address */
106 unsigned long end; /* Last managed virtual address */
107
108 struct bit_map usemap;
109};
110
111static inline void iommu_invalidate(struct iommu_regs *regs)
112{
113 regs->tlbflush = 0;
114}
115
116static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
117{
118 regs->pageflush = (ba & PAGE_MASK);
119}
120
121#endif /* !(_SPARC_IOMMU_H) */
diff --git a/include/asm-sparc/iommu_64.h b/include/asm-sparc/iommu_64.h
new file mode 100644
index 000000000000..d7b9afcba08b
--- /dev/null
+++ b/include/asm-sparc/iommu_64.h
@@ -0,0 +1,62 @@
1/* iommu.h: Definitions for the sun5 IOMMU.
2 *
3 * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
4 */
5#ifndef _SPARC64_IOMMU_H
6#define _SPARC64_IOMMU_H
7
8/* The format of an iopte in the page tables. */
9#define IOPTE_VALID 0x8000000000000000UL
10#define IOPTE_64K 0x2000000000000000UL
11#define IOPTE_STBUF 0x1000000000000000UL
12#define IOPTE_INTRA 0x0800000000000000UL
13#define IOPTE_CONTEXT 0x07ff800000000000UL
14#define IOPTE_PAGE 0x00007fffffffe000UL
15#define IOPTE_CACHE 0x0000000000000010UL
16#define IOPTE_WRITE 0x0000000000000002UL
17
18#define IOMMU_NUM_CTXS 4096
19
20struct iommu_arena {
21 unsigned long *map;
22 unsigned int hint;
23 unsigned int limit;
24};
25
26struct iommu {
27 spinlock_t lock;
28 struct iommu_arena arena;
29 void (*flush_all)(struct iommu *);
30 iopte_t *page_table;
31 u32 page_table_map_base;
32 unsigned long iommu_control;
33 unsigned long iommu_tsbbase;
34 unsigned long iommu_flush;
35 unsigned long iommu_flushinv;
36 unsigned long iommu_tags;
37 unsigned long iommu_ctxflush;
38 unsigned long write_complete_reg;
39 unsigned long dummy_page;
40 unsigned long dummy_page_pa;
41 unsigned long ctx_lowest_free;
42 DECLARE_BITMAP(ctx_bitmap, IOMMU_NUM_CTXS);
43 u32 dma_addr_mask;
44};
45
46struct strbuf {
47 int strbuf_enabled;
48 unsigned long strbuf_control;
49 unsigned long strbuf_pflush;
50 unsigned long strbuf_fsync;
51 unsigned long strbuf_ctxflush;
52 unsigned long strbuf_ctxmatch_base;
53 unsigned long strbuf_flushflag_pa;
54 volatile unsigned long *strbuf_flushflag;
55 volatile unsigned long __flushflag_buf[(64+(64-1)) / sizeof(long)];
56};
57
58extern int iommu_table_init(struct iommu *iommu, int tsbsize,
59 u32 dma_offset, u32 dma_addr_mask,
60 int numa_node);
61
62#endif /* !(_SPARC64_IOMMU_H) */
diff --git a/include/asm-sparc/ipcbuf.h b/include/asm-sparc/ipcbuf.h
index 9bef02d04e4b..037605d986e2 100644
--- a/include/asm-sparc/ipcbuf.h
+++ b/include/asm-sparc/ipcbuf.h
@@ -1,31 +1,8 @@
1#ifndef _SPARC_IPCBUF_H 1#ifndef ___ASM_SPARC_IPCBUF_H
2#define _SPARC_IPCBUF_H 2#define ___ASM_SPARC_IPCBUF_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* 4#include <asm-sparc/ipcbuf_64.h>
5 * The ipc64_perm structure for sparc architecture. 5#else
6 * Note extra padding because this structure is passed back and forth 6#include <asm-sparc/ipcbuf_32.h>
7 * between kernel and user space. 7#endif
8 * 8#endif
9 * Pad space is left for:
10 * - 32-bit mode
11 * - 32-bit seq
12 * - 2 miscellaneous 64-bit values (so that this structure matches
13 * sparc64 ipc64_perm)
14 */
15
16struct ipc64_perm
17{
18 __kernel_key_t key;
19 __kernel_uid32_t uid;
20 __kernel_gid32_t gid;
21 __kernel_uid32_t cuid;
22 __kernel_gid32_t cgid;
23 unsigned short __pad1;
24 __kernel_mode_t mode;
25 unsigned short __pad2;
26 unsigned short seq;
27 unsigned long long __unused1;
28 unsigned long long __unused2;
29};
30
31#endif /* _SPARC_IPCBUF_H */
diff --git a/include/asm-sparc/ipcbuf_32.h b/include/asm-sparc/ipcbuf_32.h
new file mode 100644
index 000000000000..6387209518f2
--- /dev/null
+++ b/include/asm-sparc/ipcbuf_32.h
@@ -0,0 +1,31 @@
1#ifndef _SPARC_IPCBUF_H
2#define _SPARC_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for sparc architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode
11 * - 32-bit seq
12 * - 2 miscellaneous 64-bit values (so that this structure matches
13 * sparc64 ipc64_perm)
14 */
15
16struct ipc64_perm
17{
18 __kernel_key_t key;
19 __kernel_uid32_t uid;
20 __kernel_gid32_t gid;
21 __kernel_uid32_t cuid;
22 __kernel_gid32_t cgid;
23 unsigned short __pad1;
24 __kernel_mode_t mode;
25 unsigned short __pad2;
26 unsigned short seq;
27 unsigned long long __unused1;
28 unsigned long long __unused2;
29};
30
31#endif /* _SPARC_IPCBUF_H */
diff --git a/include/asm-sparc/ipcbuf_64.h b/include/asm-sparc/ipcbuf_64.h
new file mode 100644
index 000000000000..a44b855b98db
--- /dev/null
+++ b/include/asm-sparc/ipcbuf_64.h
@@ -0,0 +1,28 @@
1#ifndef _SPARC64_IPCBUF_H
2#define _SPARC64_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for sparc64 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit seq
11 * - 2 miscellaneous 64-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid_t uid;
18 __kernel_gid_t gid;
19 __kernel_uid_t cuid;
20 __kernel_gid_t cgid;
21 __kernel_mode_t mode;
22 unsigned short __pad1;
23 unsigned short seq;
24 unsigned long __unused1;
25 unsigned long __unused2;
26};
27
28#endif /* _SPARC64_IPCBUF_H */
diff --git a/include/asm-sparc/irq.h b/include/asm-sparc/irq.h
index fe205cc444b8..7af6bb4aa09c 100644
--- a/include/asm-sparc/irq.h
+++ b/include/asm-sparc/irq.h
@@ -1,15 +1,8 @@
1/* irq.h: IRQ registers on the Sparc. 1#ifndef ___ASM_SPARC_IRQ_H
2 * 2#define ___ASM_SPARC_IRQ_H
3 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/irq_64.h>
5 5#else
6#ifndef _SPARC_IRQ_H 6#include <asm-sparc/irq_32.h>
7#define _SPARC_IRQ_H 7#endif
8
9#include <linux/interrupt.h>
10
11#define NR_IRQS 16
12
13#define irq_canonicalize(irq) (irq)
14
15#endif 8#endif
diff --git a/include/asm-sparc/irq_32.h b/include/asm-sparc/irq_32.h
new file mode 100644
index 000000000000..fe205cc444b8
--- /dev/null
+++ b/include/asm-sparc/irq_32.h
@@ -0,0 +1,15 @@
1/* irq.h: IRQ registers on the Sparc.
2 *
3 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC_IRQ_H
7#define _SPARC_IRQ_H
8
9#include <linux/interrupt.h>
10
11#define NR_IRQS 16
12
13#define irq_canonicalize(irq) (irq)
14
15#endif
diff --git a/include/asm-sparc/irq_64.h b/include/asm-sparc/irq_64.h
new file mode 100644
index 000000000000..0bb9bf531745
--- /dev/null
+++ b/include/asm-sparc/irq_64.h
@@ -0,0 +1,93 @@
1/* irq.h: IRQ registers on the 64-bit Sparc.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
5 */
6
7#ifndef _SPARC64_IRQ_H
8#define _SPARC64_IRQ_H
9
10#include <linux/linkage.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/interrupt.h>
14#include <asm/pil.h>
15#include <asm/ptrace.h>
16
17/* IMAP/ICLR register defines */
18#define IMAP_VALID 0x80000000UL /* IRQ Enabled */
19#define IMAP_TID_UPA 0x7c000000UL /* UPA TargetID */
20#define IMAP_TID_JBUS 0x7c000000UL /* JBUS TargetID */
21#define IMAP_TID_SHIFT 26
22#define IMAP_AID_SAFARI 0x7c000000UL /* Safari AgentID */
23#define IMAP_AID_SHIFT 26
24#define IMAP_NID_SAFARI 0x03e00000UL /* Safari NodeID */
25#define IMAP_NID_SHIFT 21
26#define IMAP_IGN 0x000007c0UL /* IRQ Group Number */
27#define IMAP_INO 0x0000003fUL /* IRQ Number */
28#define IMAP_INR 0x000007ffUL /* Full interrupt number*/
29
30#define ICLR_IDLE 0x00000000UL /* Idle state */
31#define ICLR_TRANSMIT 0x00000001UL /* Transmit state */
32#define ICLR_PENDING 0x00000003UL /* Pending state */
33
34/* The largest number of unique interrupt sources we support.
35 * If this needs to ever be larger than 255, you need to change
36 * the type of ino_bucket->virt_irq as appropriate.
37 *
38 * ino_bucket->virt_irq allocation is made during {sun4v_,}build_irq().
39 */
40#define NR_IRQS 255
41
42extern void irq_install_pre_handler(int virt_irq,
43 void (*func)(unsigned int, void *, void *),
44 void *arg1, void *arg2);
45#define irq_canonicalize(irq) (irq)
46extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
47extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
48extern unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
49extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
50 unsigned int msi_devino_start,
51 unsigned int msi_devino_end);
52extern void sun4v_destroy_msi(unsigned int virt_irq);
53extern unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p,
54 unsigned int msi_devino_start,
55 unsigned int msi_devino_end,
56 unsigned long imap_base,
57 unsigned long iclr_base);
58extern void sun4u_destroy_msi(unsigned int virt_irq);
59extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
60
61extern unsigned char virt_irq_alloc(unsigned int dev_handle,
62 unsigned int dev_ino);
63#ifdef CONFIG_PCI_MSI
64extern void virt_irq_free(unsigned int virt_irq);
65#endif
66
67extern void __init init_IRQ(void);
68extern void fixup_irqs(void);
69
70static inline void set_softint(unsigned long bits)
71{
72 __asm__ __volatile__("wr %0, 0x0, %%set_softint"
73 : /* No outputs */
74 : "r" (bits));
75}
76
77static inline void clear_softint(unsigned long bits)
78{
79 __asm__ __volatile__("wr %0, 0x0, %%clear_softint"
80 : /* No outputs */
81 : "r" (bits));
82}
83
84static inline unsigned long get_softint(void)
85{
86 unsigned long retval;
87
88 __asm__ __volatile__("rd %%softint, %0"
89 : "=r" (retval));
90 return retval;
91}
92
93#endif
diff --git a/include/asm-sparc/irqflags.h b/include/asm-sparc/irqflags.h
index db398fb32826..c6402b187e23 100644
--- a/include/asm-sparc/irqflags.h
+++ b/include/asm-sparc/irqflags.h
@@ -1,39 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_IRQFLAGS_H
2 * include/asm-sparc/irqflags.h 2#define ___ASM_SPARC_IRQFLAGS_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * IRQ flags handling 4#include <asm-sparc/irqflags_64.h>
5 * 5#else
6 * This file gets included from lowlevel asm headers too, to provide 6#include <asm-sparc/irqflags_32.h>
7 * wrapped versions of the local_irq_*() APIs, based on the 7#endif
8 * raw_local_irq_*() functions from the lowlevel headers. 8#endif
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15extern void raw_local_irq_restore(unsigned long);
16extern unsigned long __raw_local_irq_save(void);
17extern void raw_local_irq_enable(void);
18
19static inline unsigned long getipl(void)
20{
21 unsigned long retval;
22
23 __asm__ __volatile__("rd %%psr, %0" : "=r" (retval));
24 return retval;
25}
26
27#define raw_local_save_flags(flags) ((flags) = getipl())
28#define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save())
29#define raw_local_irq_disable() ((void) __raw_local_irq_save())
30#define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0)
31
32static inline int raw_irqs_disabled_flags(unsigned long flags)
33{
34 return ((flags & PSR_PIL) != 0);
35}
36
37#endif /* (__ASSEMBLY__) */
38
39#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/include/asm-sparc/irqflags_32.h b/include/asm-sparc/irqflags_32.h
new file mode 100644
index 000000000000..db398fb32826
--- /dev/null
+++ b/include/asm-sparc/irqflags_32.h
@@ -0,0 +1,39 @@
1/*
2 * include/asm-sparc/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15extern void raw_local_irq_restore(unsigned long);
16extern unsigned long __raw_local_irq_save(void);
17extern void raw_local_irq_enable(void);
18
19static inline unsigned long getipl(void)
20{
21 unsigned long retval;
22
23 __asm__ __volatile__("rd %%psr, %0" : "=r" (retval));
24 return retval;
25}
26
27#define raw_local_save_flags(flags) ((flags) = getipl())
28#define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save())
29#define raw_local_irq_disable() ((void) __raw_local_irq_save())
30#define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0)
31
32static inline int raw_irqs_disabled_flags(unsigned long flags)
33{
34 return ((flags & PSR_PIL) != 0);
35}
36
37#endif /* (__ASSEMBLY__) */
38
39#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/include/asm-sparc/irqflags_64.h b/include/asm-sparc/irqflags_64.h
new file mode 100644
index 000000000000..024fc54d0682
--- /dev/null
+++ b/include/asm-sparc/irqflags_64.h
@@ -0,0 +1,89 @@
1/*
2 * include/asm-sparc64/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15static inline unsigned long __raw_local_save_flags(void)
16{
17 unsigned long flags;
18
19 __asm__ __volatile__(
20 "rdpr %%pil, %0"
21 : "=r" (flags)
22 );
23
24 return flags;
25}
26
27#define raw_local_save_flags(flags) \
28 do { (flags) = __raw_local_save_flags(); } while (0)
29
30static inline void raw_local_irq_restore(unsigned long flags)
31{
32 __asm__ __volatile__(
33 "wrpr %0, %%pil"
34 : /* no output */
35 : "r" (flags)
36 : "memory"
37 );
38}
39
40static inline void raw_local_irq_disable(void)
41{
42 __asm__ __volatile__(
43 "wrpr 15, %%pil"
44 : /* no outputs */
45 : /* no inputs */
46 : "memory"
47 );
48}
49
50static inline void raw_local_irq_enable(void)
51{
52 __asm__ __volatile__(
53 "wrpr 0, %%pil"
54 : /* no outputs */
55 : /* no inputs */
56 : "memory"
57 );
58}
59
60static inline int raw_irqs_disabled_flags(unsigned long flags)
61{
62 return (flags > 0);
63}
64
65static inline int raw_irqs_disabled(void)
66{
67 unsigned long flags = __raw_local_save_flags();
68
69 return raw_irqs_disabled_flags(flags);
70}
71
72/*
73 * For spinlocks, etc:
74 */
75static inline unsigned long __raw_local_irq_save(void)
76{
77 unsigned long flags = __raw_local_save_flags();
78
79 raw_local_irq_disable();
80
81 return flags;
82}
83
84#define raw_local_irq_save(flags) \
85 do { (flags) = __raw_local_irq_save(); } while (0)
86
87#endif /* (__ASSEMBLY__) */
88
89#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/include/asm-sparc/kdebug.h b/include/asm-sparc/kdebug.h
index f69fe7d84b3c..fe07d00d0534 100644
--- a/include/asm-sparc/kdebug.h
+++ b/include/asm-sparc/kdebug.h
@@ -1,73 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_KDEBUG_H
2 * kdebug.h: Defines and definitions for debugging the Linux kernel 2#define ___ASM_SPARC_KDEBUG_H
3 * under various kernel debuggers. 3#if defined(__sparc__) && defined(__arch64__)
4 * 4#include <asm-sparc/kdebug_64.h>
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5#else
6 */ 6#include <asm-sparc/kdebug_32.h>
7#ifndef _SPARC_KDEBUG_H 7#endif
8#define _SPARC_KDEBUG_H 8#endif
9
10#include <asm/openprom.h>
11#include <asm/vaddrs.h>
12
13/* Breakpoints are enter through trap table entry 126. So in sparc assembly
14 * if you want to drop into the debugger you do:
15 *
16 * t DEBUG_BP_TRAP
17 */
18
19#define DEBUG_BP_TRAP 126
20
21#ifndef __ASSEMBLY__
22/* The debug vector is passed in %o1 at boot time. It is a pointer to
23 * a structure in the debuggers address space. Here is its format.
24 */
25
26typedef unsigned int (*debugger_funct)(void);
27
28struct kernel_debug {
29 /* First the entry point into the debugger. You jump here
30 * to give control over to the debugger.
31 */
32 unsigned long kdebug_entry;
33 unsigned long kdebug_trapme; /* Figure out later... */
34 /* The following is the number of pages that the debugger has
35 * taken from to total pool.
36 */
37 unsigned long *kdebug_stolen_pages;
38 /* Ok, after you remap yourself and/or change the trap table
39 * from what you were left with at boot time you have to call
40 * this synchronization function so the debugger can check out
41 * what you have done.
42 */
43 debugger_funct teach_debugger;
44}; /* I think that is it... */
45
46extern struct kernel_debug *linux_dbvec;
47
48/* Use this macro in C-code to enter the debugger. */
49static inline void sp_enter_debugger(void)
50{
51 __asm__ __volatile__("jmpl %0, %%o7\n\t"
52 "nop\n\t" : :
53 "r" (linux_dbvec) : "o7", "memory");
54}
55
56#define SP_ENTER_DEBUGGER do { \
57 if((linux_dbvec!=0) && ((*(short *)linux_dbvec)!=-1)) \
58 sp_enter_debugger(); \
59 } while(0)
60
61enum die_val {
62 DIE_UNUSED,
63};
64
65#endif /* !(__ASSEMBLY__) */
66
67/* Some nice offset defines for assembler code. */
68#define KDEBUG_ENTRY_OFF 0x0
69#define KDEBUG_DUNNO_OFF 0x4
70#define KDEBUG_DUNNO2_OFF 0x8
71#define KDEBUG_TEACH_OFF 0xc
72
73#endif /* !(_SPARC_KDEBUG_H) */
diff --git a/include/asm-sparc/kdebug_32.h b/include/asm-sparc/kdebug_32.h
new file mode 100644
index 000000000000..f69fe7d84b3c
--- /dev/null
+++ b/include/asm-sparc/kdebug_32.h
@@ -0,0 +1,73 @@
1/*
2 * kdebug.h: Defines and definitions for debugging the Linux kernel
3 * under various kernel debuggers.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7#ifndef _SPARC_KDEBUG_H
8#define _SPARC_KDEBUG_H
9
10#include <asm/openprom.h>
11#include <asm/vaddrs.h>
12
13/* Breakpoints are enter through trap table entry 126. So in sparc assembly
14 * if you want to drop into the debugger you do:
15 *
16 * t DEBUG_BP_TRAP
17 */
18
19#define DEBUG_BP_TRAP 126
20
21#ifndef __ASSEMBLY__
22/* The debug vector is passed in %o1 at boot time. It is a pointer to
23 * a structure in the debuggers address space. Here is its format.
24 */
25
26typedef unsigned int (*debugger_funct)(void);
27
28struct kernel_debug {
29 /* First the entry point into the debugger. You jump here
30 * to give control over to the debugger.
31 */
32 unsigned long kdebug_entry;
33 unsigned long kdebug_trapme; /* Figure out later... */
34 /* The following is the number of pages that the debugger has
35 * taken from to total pool.
36 */
37 unsigned long *kdebug_stolen_pages;
38 /* Ok, after you remap yourself and/or change the trap table
39 * from what you were left with at boot time you have to call
40 * this synchronization function so the debugger can check out
41 * what you have done.
42 */
43 debugger_funct teach_debugger;
44}; /* I think that is it... */
45
46extern struct kernel_debug *linux_dbvec;
47
48/* Use this macro in C-code to enter the debugger. */
49static inline void sp_enter_debugger(void)
50{
51 __asm__ __volatile__("jmpl %0, %%o7\n\t"
52 "nop\n\t" : :
53 "r" (linux_dbvec) : "o7", "memory");
54}
55
56#define SP_ENTER_DEBUGGER do { \
57 if((linux_dbvec!=0) && ((*(short *)linux_dbvec)!=-1)) \
58 sp_enter_debugger(); \
59 } while(0)
60
61enum die_val {
62 DIE_UNUSED,
63};
64
65#endif /* !(__ASSEMBLY__) */
66
67/* Some nice offset defines for assembler code. */
68#define KDEBUG_ENTRY_OFF 0x0
69#define KDEBUG_DUNNO_OFF 0x4
70#define KDEBUG_DUNNO2_OFF 0x8
71#define KDEBUG_TEACH_OFF 0xc
72
73#endif /* !(_SPARC_KDEBUG_H) */
diff --git a/include/asm-sparc/kdebug_64.h b/include/asm-sparc/kdebug_64.h
new file mode 100644
index 000000000000..f905b773235a
--- /dev/null
+++ b/include/asm-sparc/kdebug_64.h
@@ -0,0 +1,19 @@
1#ifndef _SPARC64_KDEBUG_H
2#define _SPARC64_KDEBUG_H
3
4struct pt_regs;
5
6extern void bad_trap(struct pt_regs *, long);
7
8/* Grossly misnamed. */
9enum die_val {
10 DIE_OOPS = 1,
11 DIE_DEBUG, /* ta 0x70 */
12 DIE_DEBUG_2, /* ta 0x71 */
13 DIE_DIE,
14 DIE_TRAP,
15 DIE_TRAP_TL1,
16 DIE_CALL,
17};
18
19#endif
diff --git a/include/asm-sparc/kmap_types.h b/include/asm-sparc/kmap_types.h
index e215f7104974..602f5e034f7a 100644
--- a/include/asm-sparc/kmap_types.h
+++ b/include/asm-sparc/kmap_types.h
@@ -1,6 +1,10 @@
1#ifndef _ASM_KMAP_TYPES_H 1#ifndef _ASM_KMAP_TYPES_H
2#define _ASM_KMAP_TYPES_H 2#define _ASM_KMAP_TYPES_H
3 3
4/* Dummy header just to define km_type. None of this
5 * is actually used on sparc. -DaveM
6 */
7
4enum km_type { 8enum km_type {
5 KM_BOUNCE_READ, 9 KM_BOUNCE_READ,
6 KM_SKB_SUNRPC_DATA, 10 KM_SKB_SUNRPC_DATA,
diff --git a/include/asm-sparc/kprobes.h b/include/asm-sparc/kprobes.h
new file mode 100644
index 000000000000..5879d71afdaa
--- /dev/null
+++ b/include/asm-sparc/kprobes.h
@@ -0,0 +1,49 @@
1#ifndef _SPARC64_KPROBES_H
2#define _SPARC64_KPROBES_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
6
7typedef u32 kprobe_opcode_t;
8
9#define BREAKPOINT_INSTRUCTION 0x91d02070 /* ta 0x70 */
10#define BREAKPOINT_INSTRUCTION_2 0x91d02071 /* ta 0x71 */
11#define MAX_INSN_SIZE 2
12
13#define kretprobe_blacklist_size 0
14
15#define arch_remove_kprobe(p) do {} while (0)
16
17#define flush_insn_slot(p) \
18do { flushi(&(p)->ainsn.insn[0]); \
19 flushi(&(p)->ainsn.insn[1]); \
20} while (0)
21
22void kretprobe_trampoline(void);
23
24/* Architecture specific copy of original instruction*/
25struct arch_specific_insn {
26 /* copy of the original instruction */
27 kprobe_opcode_t insn[MAX_INSN_SIZE];
28};
29
30struct prev_kprobe {
31 struct kprobe *kp;
32 unsigned long status;
33 unsigned long orig_tnpc;
34 unsigned long orig_tstate_pil;
35};
36
37/* per-cpu kprobe control block */
38struct kprobe_ctlblk {
39 unsigned long kprobe_status;
40 unsigned long kprobe_orig_tnpc;
41 unsigned long kprobe_orig_tstate_pil;
42 struct pt_regs jprobe_saved_regs;
43 struct prev_kprobe prev_kprobe;
44};
45
46extern int kprobe_exceptions_notify(struct notifier_block *self,
47 unsigned long val, void *data);
48extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
49#endif /* _SPARC64_KPROBES_H */
diff --git a/include/asm-sparc/kvm.h b/include/asm-sparc/kvm.h
deleted file mode 100644
index 2e5478da3819..000000000000
--- a/include/asm-sparc/kvm.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __LINUX_KVM_SPARC_H
2#define __LINUX_KVM_SPARC_H
3
4/* sparc does not support KVM */
5
6#endif
diff --git a/include/asm-sparc/ldc.h b/include/asm-sparc/ldc.h
new file mode 100644
index 000000000000..bdb524a7b814
--- /dev/null
+++ b/include/asm-sparc/ldc.h
@@ -0,0 +1,138 @@
1#ifndef _SPARC64_LDC_H
2#define _SPARC64_LDC_H
3
4#include <asm/hypervisor.h>
5
6extern int ldom_domaining_enabled;
7extern void ldom_set_var(const char *var, const char *value);
8extern void ldom_reboot(const char *boot_command);
9extern void ldom_power_off(void);
10
11/* The event handler will be evoked when link state changes
12 * or data becomes available on the receive side.
13 *
14 * For non-RAW links, if the LDC_EVENT_RESET event arrives the
15 * driver should reset all of it's internal state and reinvoke
16 * ldc_connect() to try and bring the link up again.
17 *
18 * For RAW links, ldc_connect() is not used. Instead the driver
19 * just waits for the LDC_EVENT_UP event.
20 */
21struct ldc_channel_config {
22 void (*event)(void *arg, int event);
23
24 u32 mtu;
25 unsigned int rx_irq;
26 unsigned int tx_irq;
27 u8 mode;
28#define LDC_MODE_RAW 0x00
29#define LDC_MODE_UNRELIABLE 0x01
30#define LDC_MODE_RESERVED 0x02
31#define LDC_MODE_STREAM 0x03
32
33 u8 debug;
34#define LDC_DEBUG_HS 0x01
35#define LDC_DEBUG_STATE 0x02
36#define LDC_DEBUG_RX 0x04
37#define LDC_DEBUG_TX 0x08
38#define LDC_DEBUG_DATA 0x10
39};
40
41#define LDC_EVENT_RESET 0x01
42#define LDC_EVENT_UP 0x02
43#define LDC_EVENT_DATA_READY 0x04
44
45#define LDC_STATE_INVALID 0x00
46#define LDC_STATE_INIT 0x01
47#define LDC_STATE_BOUND 0x02
48#define LDC_STATE_READY 0x03
49#define LDC_STATE_CONNECTED 0x04
50
51struct ldc_channel;
52
53/* Allocate state for a channel. */
54extern struct ldc_channel *ldc_alloc(unsigned long id,
55 const struct ldc_channel_config *cfgp,
56 void *event_arg);
57
58/* Shut down and free state for a channel. */
59extern void ldc_free(struct ldc_channel *lp);
60
61/* Register TX and RX queues of the link with the hypervisor. */
62extern int ldc_bind(struct ldc_channel *lp, const char *name);
63
64/* For non-RAW protocols we need to complete a handshake before
65 * communication can proceed. ldc_connect() does that, if the
66 * handshake completes successfully, an LDC_EVENT_UP event will
67 * be sent up to the driver.
68 */
69extern int ldc_connect(struct ldc_channel *lp);
70extern int ldc_disconnect(struct ldc_channel *lp);
71
72extern int ldc_state(struct ldc_channel *lp);
73
74/* Read and write operations. Only valid when the link is up. */
75extern int ldc_write(struct ldc_channel *lp, const void *buf,
76 unsigned int size);
77extern int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size);
78
79#define LDC_MAP_SHADOW 0x01
80#define LDC_MAP_DIRECT 0x02
81#define LDC_MAP_IO 0x04
82#define LDC_MAP_R 0x08
83#define LDC_MAP_W 0x10
84#define LDC_MAP_X 0x20
85#define LDC_MAP_RW (LDC_MAP_R | LDC_MAP_W)
86#define LDC_MAP_RWX (LDC_MAP_R | LDC_MAP_W | LDC_MAP_X)
87#define LDC_MAP_ALL 0x03f
88
89struct ldc_trans_cookie {
90 u64 cookie_addr;
91 u64 cookie_size;
92};
93
94struct scatterlist;
95extern int ldc_map_sg(struct ldc_channel *lp,
96 struct scatterlist *sg, int num_sg,
97 struct ldc_trans_cookie *cookies, int ncookies,
98 unsigned int map_perm);
99
100extern int ldc_map_single(struct ldc_channel *lp,
101 void *buf, unsigned int len,
102 struct ldc_trans_cookie *cookies, int ncookies,
103 unsigned int map_perm);
104
105extern void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
106 int ncookies);
107
108extern int ldc_copy(struct ldc_channel *lp, int copy_dir,
109 void *buf, unsigned int len, unsigned long offset,
110 struct ldc_trans_cookie *cookies, int ncookies);
111
112static inline int ldc_get_dring_entry(struct ldc_channel *lp,
113 void *buf, unsigned int len,
114 unsigned long offset,
115 struct ldc_trans_cookie *cookies,
116 int ncookies)
117{
118 return ldc_copy(lp, LDC_COPY_IN, buf, len, offset, cookies, ncookies);
119}
120
121static inline int ldc_put_dring_entry(struct ldc_channel *lp,
122 void *buf, unsigned int len,
123 unsigned long offset,
124 struct ldc_trans_cookie *cookies,
125 int ncookies)
126{
127 return ldc_copy(lp, LDC_COPY_OUT, buf, len, offset, cookies, ncookies);
128}
129
130extern void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
131 struct ldc_trans_cookie *cookies,
132 int *ncookies, unsigned int map_perm);
133
134extern void ldc_free_exp_dring(struct ldc_channel *lp, void *buf,
135 unsigned int len,
136 struct ldc_trans_cookie *cookies, int ncookies);
137
138#endif /* _SPARC64_LDC_H */
diff --git a/include/asm-sparc/lmb.h b/include/asm-sparc/lmb.h
new file mode 100644
index 000000000000..6a352cbcf520
--- /dev/null
+++ b/include/asm-sparc/lmb.h
@@ -0,0 +1,10 @@
1#ifndef _SPARC64_LMB_H
2#define _SPARC64_LMB_H
3
4#include <asm/oplib.h>
5
6#define LMB_DBG(fmt...) prom_printf(fmt)
7
8#define LMB_REAL_LIMIT 0
9
10#endif /* !(_SPARC64_LMB_H) */
diff --git a/include/asm-sparc/lsu.h b/include/asm-sparc/lsu.h
new file mode 100644
index 000000000000..7190f8de90a0
--- /dev/null
+++ b/include/asm-sparc/lsu.h
@@ -0,0 +1,19 @@
1#ifndef _SPARC64_LSU_H
2#define _SPARC64_LSU_H
3
4#include <linux/const.h>
5
6/* LSU Control Register */
7#define LSU_CONTROL_PM _AC(0x000001fe00000000,UL) /* Phys-watchpoint byte mask*/
8#define LSU_CONTROL_VM _AC(0x00000001fe000000,UL) /* Virt-watchpoint byte mask*/
9#define LSU_CONTROL_PR _AC(0x0000000001000000,UL) /* Phys-rd watchpoint enable*/
10#define LSU_CONTROL_PW _AC(0x0000000000800000,UL) /* Phys-wr watchpoint enable*/
11#define LSU_CONTROL_VR _AC(0x0000000000400000,UL) /* Virt-rd watchpoint enable*/
12#define LSU_CONTROL_VW _AC(0x0000000000200000,UL) /* Virt-wr watchpoint enable*/
13#define LSU_CONTROL_FM _AC(0x00000000000ffff0,UL) /* Parity mask enables. */
14#define LSU_CONTROL_DM _AC(0x0000000000000008,UL) /* Data MMU enable. */
15#define LSU_CONTROL_IM _AC(0x0000000000000004,UL) /* Instruction MMU enable. */
16#define LSU_CONTROL_DC _AC(0x0000000000000002,UL) /* Data cache enable. */
17#define LSU_CONTROL_IC _AC(0x0000000000000001,UL) /* Instruction cache enable.*/
18
19#endif /* !(_SPARC64_LSU_H) */
diff --git a/include/asm-sparc/machines.h b/include/asm-sparc/machines.h
index d6c6bf836206..c28c2f248794 100644
--- a/include/asm-sparc/machines.h
+++ b/include/asm-sparc/machines.h
@@ -17,8 +17,6 @@ struct Sun_Machine_Models {
17 */ 17 */
18#define NUM_SUN_MACHINES 15 18#define NUM_SUN_MACHINES 15
19 19
20extern struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES];
21
22/* The machine type in the idprom area looks like this: 20/* The machine type in the idprom area looks like this:
23 * 21 *
24 * --------------- 22 * ---------------
diff --git a/include/asm-sparc/mbus.h b/include/asm-sparc/mbus.h
index bb5ae614b166..69f07a022ee6 100644
--- a/include/asm-sparc/mbus.h
+++ b/include/asm-sparc/mbus.h
@@ -43,8 +43,6 @@ extern unsigned int viking_rev, swift_rev, cypress_rev;
43#define HWBUG_SUPERSCALAR_BAD 0x00000080 43#define HWBUG_SUPERSCALAR_BAD 0x00000080
44#define HWBUG_PACINIT_BITROT 0x00000100 44#define HWBUG_PACINIT_BITROT 0x00000100
45 45
46extern unsigned int hwbug_bitmask;
47
48/* First the module type values. To find out which you have, just load 46/* First the module type values. To find out which you have, just load
49 * the mmu control register from ASI_M_MMUREG alternate address space and 47 * the mmu control register from ASI_M_MMUREG alternate address space and
50 * shift the value right 28 bits. 48 * shift the value right 28 bits.
diff --git a/include/asm-sparc/mc146818rtc.h b/include/asm-sparc/mc146818rtc.h
index fa7eac926582..9ab65c21e9e4 100644
--- a/include/asm-sparc/mc146818rtc.h
+++ b/include/asm-sparc/mc146818rtc.h
@@ -1,29 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_MC146818RTC_H
2 * Machine dependent access functions for RTC registers. 2#define ___ASM_SPARC_MC146818RTC_H
3 */ 3#if defined(__sparc__) && defined(__arch64__)
4#ifndef __ASM_SPARC_MC146818RTC_H 4#include <asm-sparc/mc146818rtc_64.h>
5#define __ASM_SPARC_MC146818RTC_H 5#else
6 6#include <asm-sparc/mc146818rtc_32.h>
7#include <asm/io.h> 7#endif
8
9#ifndef RTC_PORT
10#define RTC_PORT(x) (0x70 + (x))
11#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
12#endif 8#endif
13
14/*
15 * The yet supported machines all access the RTC index register via
16 * an ISA port access but the way to access the date register differs ...
17 */
18#define CMOS_READ(addr) ({ \
19outb_p((addr),RTC_PORT(0)); \
20inb_p(RTC_PORT(1)); \
21})
22#define CMOS_WRITE(val, addr) ({ \
23outb_p((addr),RTC_PORT(0)); \
24outb_p((val),RTC_PORT(1)); \
25})
26
27#define RTC_IRQ 8
28
29#endif /* __ASM_SPARC_MC146818RTC_H */
diff --git a/include/asm-sparc/mc146818rtc_32.h b/include/asm-sparc/mc146818rtc_32.h
new file mode 100644
index 000000000000..fa7eac926582
--- /dev/null
+++ b/include/asm-sparc/mc146818rtc_32.h
@@ -0,0 +1,29 @@
1/*
2 * Machine dependent access functions for RTC registers.
3 */
4#ifndef __ASM_SPARC_MC146818RTC_H
5#define __ASM_SPARC_MC146818RTC_H
6
7#include <asm/io.h>
8
9#ifndef RTC_PORT
10#define RTC_PORT(x) (0x70 + (x))
11#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
12#endif
13
14/*
15 * The yet supported machines all access the RTC index register via
16 * an ISA port access but the way to access the date register differs ...
17 */
18#define CMOS_READ(addr) ({ \
19outb_p((addr),RTC_PORT(0)); \
20inb_p(RTC_PORT(1)); \
21})
22#define CMOS_WRITE(val, addr) ({ \
23outb_p((addr),RTC_PORT(0)); \
24outb_p((val),RTC_PORT(1)); \
25})
26
27#define RTC_IRQ 8
28
29#endif /* __ASM_SPARC_MC146818RTC_H */
diff --git a/include/asm-sparc/mc146818rtc_64.h b/include/asm-sparc/mc146818rtc_64.h
new file mode 100644
index 000000000000..e9c0fcc25c6f
--- /dev/null
+++ b/include/asm-sparc/mc146818rtc_64.h
@@ -0,0 +1,34 @@
1/*
2 * Machine dependent access functions for RTC registers.
3 */
4#ifndef __ASM_SPARC64_MC146818RTC_H
5#define __ASM_SPARC64_MC146818RTC_H
6
7#include <asm/io.h>
8
9#ifndef RTC_PORT
10#ifdef CONFIG_PCI
11extern unsigned long ds1287_regs;
12#else
13#define ds1287_regs (0UL)
14#endif
15#define RTC_PORT(x) (ds1287_regs + (x))
16#define RTC_ALWAYS_BCD 0
17#endif
18
19/*
20 * The yet supported machines all access the RTC index register via
21 * an ISA port access but the way to access the date register differs ...
22 */
23#define CMOS_READ(addr) ({ \
24outb_p((addr),RTC_PORT(0)); \
25inb_p(RTC_PORT(1)); \
26})
27#define CMOS_WRITE(val, addr) ({ \
28outb_p((addr),RTC_PORT(0)); \
29outb_p((val),RTC_PORT(1)); \
30})
31
32#define RTC_IRQ 8
33
34#endif /* __ASM_SPARC64_MC146818RTC_H */
diff --git a/include/asm-sparc/mdesc.h b/include/asm-sparc/mdesc.h
new file mode 100644
index 000000000000..1acc7272e537
--- /dev/null
+++ b/include/asm-sparc/mdesc.h
@@ -0,0 +1,78 @@
1#ifndef _SPARC64_MDESC_H
2#define _SPARC64_MDESC_H
3
4#include <linux/types.h>
5#include <linux/cpumask.h>
6#include <asm/prom.h>
7
8struct mdesc_handle;
9
10/* Machine description operations are to be surrounded by grab and
11 * release calls. The mdesc_handle returned from the grab is
12 * the first argument to all of the operational calls that work
13 * on mdescs.
14 */
15extern struct mdesc_handle *mdesc_grab(void);
16extern void mdesc_release(struct mdesc_handle *);
17
18#define MDESC_NODE_NULL (~(u64)0)
19
20extern u64 mdesc_node_by_name(struct mdesc_handle *handle,
21 u64 from_node, const char *name);
22#define mdesc_for_each_node_by_name(__hdl, __node, __name) \
23 for (__node = mdesc_node_by_name(__hdl, MDESC_NODE_NULL, __name); \
24 (__node) != MDESC_NODE_NULL; \
25 __node = mdesc_node_by_name(__hdl, __node, __name))
26
27/* Access to property values returned from mdesc_get_property() are
28 * only valid inside of a mdesc_grab()/mdesc_release() sequence.
29 * Once mdesc_release() is called, the memory backed up by these
30 * pointers may reference freed up memory.
31 *
32 * Therefore callers must make copies of any property values
33 * they need.
34 *
35 * These same rules apply to mdesc_node_name().
36 */
37extern const void *mdesc_get_property(struct mdesc_handle *handle,
38 u64 node, const char *name, int *lenp);
39extern const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
40
41/* MD arc iteration, the standard sequence is:
42 *
43 * unsigned long arc;
44 * mdesc_for_each_arc(arc, handle, node, MDESC_ARC_TYPE_{FWD,BACK}) {
45 * unsigned long target = mdesc_arc_target(handle, arc);
46 * ...
47 * }
48 */
49
50#define MDESC_ARC_TYPE_FWD "fwd"
51#define MDESC_ARC_TYPE_BACK "back"
52
53extern u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from,
54 const char *arc_type);
55#define mdesc_for_each_arc(__arc, __hdl, __node, __type) \
56 for (__arc = mdesc_next_arc(__hdl, __node, __type); \
57 (__arc) != MDESC_NODE_NULL; \
58 __arc = mdesc_next_arc(__hdl, __arc, __type))
59
60extern u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
61
62extern void mdesc_update(void);
63
64struct mdesc_notifier_client {
65 void (*add)(struct mdesc_handle *handle, u64 node);
66 void (*remove)(struct mdesc_handle *handle, u64 node);
67
68 const char *node_name;
69 struct mdesc_notifier_client *next;
70};
71
72extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
73
74extern void mdesc_fill_in_cpu_data(cpumask_t mask);
75
76extern void sun4v_mdesc_init(void);
77
78#endif
diff --git a/include/asm-sparc/mmu.h b/include/asm-sparc/mmu.h
index ccd36d26615a..ee66bf6dcbd6 100644
--- a/include/asm-sparc/mmu.h
+++ b/include/asm-sparc/mmu.h
@@ -1,7 +1,8 @@
1#ifndef __MMU_H 1#ifndef ___ASM_SPARC_MMU_H
2#define __MMU_H 2#define ___ASM_SPARC_MMU_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* Default "unsigned long" context */ 4#include <asm-sparc/mmu_64.h>
5typedef unsigned long mm_context_t; 5#else
6 6#include <asm-sparc/mmu_32.h>
7#endif
7#endif 8#endif
diff --git a/include/asm-sparc/mmu_32.h b/include/asm-sparc/mmu_32.h
new file mode 100644
index 000000000000..ccd36d26615a
--- /dev/null
+++ b/include/asm-sparc/mmu_32.h
@@ -0,0 +1,7 @@
1#ifndef __MMU_H
2#define __MMU_H
3
4/* Default "unsigned long" context */
5typedef unsigned long mm_context_t;
6
7#endif
diff --git a/include/asm-sparc/mmu_64.h b/include/asm-sparc/mmu_64.h
new file mode 100644
index 000000000000..9067dc500535
--- /dev/null
+++ b/include/asm-sparc/mmu_64.h
@@ -0,0 +1,123 @@
1#ifndef __MMU_H
2#define __MMU_H
3
4#include <linux/const.h>
5#include <asm/page.h>
6#include <asm/hypervisor.h>
7
8#define CTX_NR_BITS 13
9
10#define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL))
11
12/* UltraSPARC-III+ and later have a feature whereby you can
13 * select what page size the various Data-TLB instances in the
14 * chip. In order to gracefully support this, we put the version
15 * field in a spot outside of the areas of the context register
16 * where this parameter is specified.
17 */
18#define CTX_VERSION_SHIFT 22
19#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
20
21#define CTX_PGSZ_8KB _AC(0x0,UL)
22#define CTX_PGSZ_64KB _AC(0x1,UL)
23#define CTX_PGSZ_512KB _AC(0x2,UL)
24#define CTX_PGSZ_4MB _AC(0x3,UL)
25#define CTX_PGSZ_BITS _AC(0x7,UL)
26#define CTX_PGSZ0_NUC_SHIFT 61
27#define CTX_PGSZ1_NUC_SHIFT 58
28#define CTX_PGSZ0_SHIFT 16
29#define CTX_PGSZ1_SHIFT 19
30#define CTX_PGSZ_MASK ((CTX_PGSZ_BITS << CTX_PGSZ0_SHIFT) | \
31 (CTX_PGSZ_BITS << CTX_PGSZ1_SHIFT))
32
33#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
34#define CTX_PGSZ_BASE CTX_PGSZ_8KB
35#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
36#define CTX_PGSZ_BASE CTX_PGSZ_64KB
37#else
38#error No page size specified in kernel configuration
39#endif
40
41#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
42#define CTX_PGSZ_HUGE CTX_PGSZ_4MB
43#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
44#define CTX_PGSZ_HUGE CTX_PGSZ_512KB
45#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
46#define CTX_PGSZ_HUGE CTX_PGSZ_64KB
47#endif
48
49#define CTX_PGSZ_KERN CTX_PGSZ_4MB
50
51/* Thus, when running on UltraSPARC-III+ and later, we use the following
52 * PRIMARY_CONTEXT register values for the kernel context.
53 */
54#define CTX_CHEETAH_PLUS_NUC \
55 ((CTX_PGSZ_KERN << CTX_PGSZ0_NUC_SHIFT) | \
56 (CTX_PGSZ_BASE << CTX_PGSZ1_NUC_SHIFT))
57
58#define CTX_CHEETAH_PLUS_CTX0 \
59 ((CTX_PGSZ_KERN << CTX_PGSZ0_SHIFT) | \
60 (CTX_PGSZ_BASE << CTX_PGSZ1_SHIFT))
61
62/* If you want "the TLB context number" use CTX_NR_MASK. If you
63 * want "the bits I program into the context registers" use
64 * CTX_HW_MASK.
65 */
66#define CTX_NR_MASK TAG_CONTEXT_BITS
67#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
68
69#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
70#define CTX_VALID(__ctx) \
71 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
72#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
73#define CTX_NRBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_NR_MASK)
74
75#ifndef __ASSEMBLY__
76
77#define TSB_ENTRY_ALIGNMENT 16
78
79struct tsb {
80 unsigned long tag;
81 unsigned long pte;
82} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
83
84extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
85extern void tsb_flush(unsigned long ent, unsigned long tag);
86extern void tsb_init(struct tsb *tsb, unsigned long size);
87
88struct tsb_config {
89 struct tsb *tsb;
90 unsigned long tsb_rss_limit;
91 unsigned long tsb_nentries;
92 unsigned long tsb_reg_val;
93 unsigned long tsb_map_vaddr;
94 unsigned long tsb_map_pte;
95};
96
97#define MM_TSB_BASE 0
98
99#ifdef CONFIG_HUGETLB_PAGE
100#define MM_TSB_HUGE 1
101#define MM_NUM_TSBS 2
102#else
103#define MM_NUM_TSBS 1
104#endif
105
106typedef struct {
107 spinlock_t lock;
108 unsigned long sparc64_ctx_val;
109 unsigned long huge_pte_count;
110 struct tsb_config tsb_block[MM_NUM_TSBS];
111 struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
112} mm_context_t;
113
114#endif /* !__ASSEMBLY__ */
115
116#define TSB_CONFIG_TSB 0x00
117#define TSB_CONFIG_RSS_LIMIT 0x08
118#define TSB_CONFIG_NENTRIES 0x10
119#define TSB_CONFIG_REG_VAL 0x18
120#define TSB_CONFIG_MAP_VADDR 0x20
121#define TSB_CONFIG_MAP_PTE 0x28
122
123#endif /* __MMU_H */
diff --git a/include/asm-sparc/mmu_context.h b/include/asm-sparc/mmu_context.h
index 671a997b9e69..e14efb9532ff 100644
--- a/include/asm-sparc/mmu_context.h
+++ b/include/asm-sparc/mmu_context.h
@@ -1,42 +1,8 @@
1#ifndef __SPARC_MMU_CONTEXT_H 1#ifndef ___ASM_SPARC_MMU_CONTEXT_H
2#define __SPARC_MMU_CONTEXT_H 2#define ___ASM_SPARC_MMU_CONTEXT_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm/btfixup.h> 4#include <asm-sparc/mmu_context_64.h>
5 5#else
6#ifndef __ASSEMBLY__ 6#include <asm-sparc/mmu_context_32.h>
7 7#endif
8#include <asm-generic/mm_hooks.h> 8#endif
9
10static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11{
12}
13
14/*
15 * Initialize a new mmu context. This is invoked when a new
16 * address space instance (unique or shared) is instantiated.
17 */
18#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
19
20/*
21 * Destroy a dead context. This occurs when mmput drops the
22 * mm_users count to zero, the mmaps have been released, and
23 * all the page tables have been flushed. Our job is to destroy
24 * any remaining processor-specific state.
25 */
26BTFIXUPDEF_CALL(void, destroy_context, struct mm_struct *)
27
28#define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm)
29
30/* Switch the current MM context. */
31BTFIXUPDEF_CALL(void, switch_mm, struct mm_struct *, struct mm_struct *, struct task_struct *)
32
33#define switch_mm(old_mm, mm, tsk) BTFIXUP_CALL(switch_mm)(old_mm, mm, tsk)
34
35#define deactivate_mm(tsk,mm) do { } while (0)
36
37/* Activate a new MM instance for the current task. */
38#define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
39
40#endif /* !(__ASSEMBLY__) */
41
42#endif /* !(__SPARC_MMU_CONTEXT_H) */
diff --git a/include/asm-sparc/mmu_context_32.h b/include/asm-sparc/mmu_context_32.h
new file mode 100644
index 000000000000..671a997b9e69
--- /dev/null
+++ b/include/asm-sparc/mmu_context_32.h
@@ -0,0 +1,42 @@
1#ifndef __SPARC_MMU_CONTEXT_H
2#define __SPARC_MMU_CONTEXT_H
3
4#include <asm/btfixup.h>
5
6#ifndef __ASSEMBLY__
7
8#include <asm-generic/mm_hooks.h>
9
10static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11{
12}
13
14/*
15 * Initialize a new mmu context. This is invoked when a new
16 * address space instance (unique or shared) is instantiated.
17 */
18#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
19
20/*
21 * Destroy a dead context. This occurs when mmput drops the
22 * mm_users count to zero, the mmaps have been released, and
23 * all the page tables have been flushed. Our job is to destroy
24 * any remaining processor-specific state.
25 */
26BTFIXUPDEF_CALL(void, destroy_context, struct mm_struct *)
27
28#define destroy_context(mm) BTFIXUP_CALL(destroy_context)(mm)
29
30/* Switch the current MM context. */
31BTFIXUPDEF_CALL(void, switch_mm, struct mm_struct *, struct mm_struct *, struct task_struct *)
32
33#define switch_mm(old_mm, mm, tsk) BTFIXUP_CALL(switch_mm)(old_mm, mm, tsk)
34
35#define deactivate_mm(tsk,mm) do { } while (0)
36
37/* Activate a new MM instance for the current task. */
38#define activate_mm(active_mm, mm) switch_mm((active_mm), (mm), NULL)
39
40#endif /* !(__ASSEMBLY__) */
41
42#endif /* !(__SPARC_MMU_CONTEXT_H) */
diff --git a/include/asm-sparc/mmu_context_64.h b/include/asm-sparc/mmu_context_64.h
new file mode 100644
index 000000000000..5693ab482606
--- /dev/null
+++ b/include/asm-sparc/mmu_context_64.h
@@ -0,0 +1,155 @@
1#ifndef __SPARC64_MMU_CONTEXT_H
2#define __SPARC64_MMU_CONTEXT_H
3
4/* Derived heavily from Linus's Alpha/AXP ASN code... */
5
6#ifndef __ASSEMBLY__
7
8#include <linux/spinlock.h>
9#include <asm/system.h>
10#include <asm/spitfire.h>
11#include <asm-generic/mm_hooks.h>
12
13static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14{
15}
16
17extern spinlock_t ctx_alloc_lock;
18extern unsigned long tlb_context_cache;
19extern unsigned long mmu_context_bmap[];
20
21extern void get_new_mmu_context(struct mm_struct *mm);
22#ifdef CONFIG_SMP
23extern void smp_new_mmu_context_version(void);
24#else
25#define smp_new_mmu_context_version() do { } while (0)
26#endif
27
28extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
29extern void destroy_context(struct mm_struct *mm);
30
31extern void __tsb_context_switch(unsigned long pgd_pa,
32 struct tsb_config *tsb_base,
33 struct tsb_config *tsb_huge,
34 unsigned long tsb_descr_pa);
35
36static inline void tsb_context_switch(struct mm_struct *mm)
37{
38 __tsb_context_switch(__pa(mm->pgd),
39 &mm->context.tsb_block[0],
40#ifdef CONFIG_HUGETLB_PAGE
41 (mm->context.tsb_block[1].tsb ?
42 &mm->context.tsb_block[1] :
43 NULL)
44#else
45 NULL
46#endif
47 , __pa(&mm->context.tsb_descr[0]));
48}
49
50extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss);
51#ifdef CONFIG_SMP
52extern void smp_tsb_sync(struct mm_struct *mm);
53#else
54#define smp_tsb_sync(__mm) do { } while (0)
55#endif
56
57/* Set MMU context in the actual hardware. */
58#define load_secondary_context(__mm) \
59 __asm__ __volatile__( \
60 "\n661: stxa %0, [%1] %2\n" \
61 " .section .sun4v_1insn_patch, \"ax\"\n" \
62 " .word 661b\n" \
63 " stxa %0, [%1] %3\n" \
64 " .previous\n" \
65 " flush %%g6\n" \
66 : /* No outputs */ \
67 : "r" (CTX_HWBITS((__mm)->context)), \
68 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
69
70extern void __flush_tlb_mm(unsigned long, unsigned long);
71
72/* Switch the current MM context. Interrupts are disabled. */
73static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
74{
75 unsigned long ctx_valid, flags;
76 int cpu;
77
78 if (unlikely(mm == &init_mm))
79 return;
80
81 spin_lock_irqsave(&mm->context.lock, flags);
82 ctx_valid = CTX_VALID(mm->context);
83 if (!ctx_valid)
84 get_new_mmu_context(mm);
85
86 /* We have to be extremely careful here or else we will miss
87 * a TSB grow if we switch back and forth between a kernel
88 * thread and an address space which has it's TSB size increased
89 * on another processor.
90 *
91 * It is possible to play some games in order to optimize the
92 * switch, but the safest thing to do is to unconditionally
93 * perform the secondary context load and the TSB context switch.
94 *
95 * For reference the bad case is, for address space "A":
96 *
97 * CPU 0 CPU 1
98 * run address space A
99 * set cpu0's bits in cpu_vm_mask
100 * switch to kernel thread, borrow
101 * address space A via entry_lazy_tlb
102 * run address space A
103 * set cpu1's bit in cpu_vm_mask
104 * flush_tlb_pending()
105 * reset cpu_vm_mask to just cpu1
106 * TSB grow
107 * run address space A
108 * context was valid, so skip
109 * TSB context switch
110 *
111 * At that point cpu0 continues to use a stale TSB, the one from
112 * before the TSB grow performed on cpu1. cpu1 did not cross-call
113 * cpu0 to update it's TSB because at that point the cpu_vm_mask
114 * only had cpu1 set in it.
115 */
116 load_secondary_context(mm);
117 tsb_context_switch(mm);
118
119 /* Any time a processor runs a context on an address space
120 * for the first time, we must flush that context out of the
121 * local TLB.
122 */
123 cpu = smp_processor_id();
124 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
125 cpu_set(cpu, mm->cpu_vm_mask);
126 __flush_tlb_mm(CTX_HWBITS(mm->context),
127 SECONDARY_CONTEXT);
128 }
129 spin_unlock_irqrestore(&mm->context.lock, flags);
130}
131
132#define deactivate_mm(tsk,mm) do { } while (0)
133
134/* Activate a new MM instance for the current task. */
135static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
136{
137 unsigned long flags;
138 int cpu;
139
140 spin_lock_irqsave(&mm->context.lock, flags);
141 if (!CTX_VALID(mm->context))
142 get_new_mmu_context(mm);
143 cpu = smp_processor_id();
144 if (!cpu_isset(cpu, mm->cpu_vm_mask))
145 cpu_set(cpu, mm->cpu_vm_mask);
146
147 load_secondary_context(mm);
148 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
149 tsb_context_switch(mm);
150 spin_unlock_irqrestore(&mm->context.lock, flags);
151}
152
153#endif /* !(__ASSEMBLY__) */
154
155#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/include/asm-sparc/mmzone.h b/include/asm-sparc/mmzone.h
new file mode 100644
index 000000000000..ebf5986c12ed
--- /dev/null
+++ b/include/asm-sparc/mmzone.h
@@ -0,0 +1,17 @@
1#ifndef _SPARC64_MMZONE_H
2#define _SPARC64_MMZONE_H
3
4#ifdef CONFIG_NEED_MULTIPLE_NODES
5
6extern struct pglist_data *node_data[];
7
8#define NODE_DATA(nid) (node_data[nid])
9#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
10#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
11
12extern int numa_cpu_lookup_table[];
13extern cpumask_t numa_cpumask_lookup_table[];
14
15#endif /* CONFIG_NEED_MULTIPLE_NODES */
16
17#endif /* _SPARC64_MMZONE_H */
diff --git a/include/asm-sparc/module.h b/include/asm-sparc/module.h
index cbd9e67b0c0b..516138fe681a 100644
--- a/include/asm-sparc/module.h
+++ b/include/asm-sparc/module.h
@@ -1,7 +1,8 @@
1#ifndef _ASM_SPARC_MODULE_H 1#ifndef ___ASM_SPARC_MODULE_H
2#define _ASM_SPARC_MODULE_H 2#define ___ASM_SPARC_MODULE_H
3struct mod_arch_specific { }; 3#if defined(__sparc__) && defined(__arch64__)
4#define Elf_Shdr Elf32_Shdr 4#include <asm-sparc/module_64.h>
5#define Elf_Sym Elf32_Sym 5#else
6#define Elf_Ehdr Elf32_Ehdr 6#include <asm-sparc/module_32.h>
7#endif /* _ASM_SPARC_MODULE_H */ 7#endif
8#endif
diff --git a/include/asm-sparc/module_32.h b/include/asm-sparc/module_32.h
new file mode 100644
index 000000000000..cbd9e67b0c0b
--- /dev/null
+++ b/include/asm-sparc/module_32.h
@@ -0,0 +1,7 @@
1#ifndef _ASM_SPARC_MODULE_H
2#define _ASM_SPARC_MODULE_H
3struct mod_arch_specific { };
4#define Elf_Shdr Elf32_Shdr
5#define Elf_Sym Elf32_Sym
6#define Elf_Ehdr Elf32_Ehdr
7#endif /* _ASM_SPARC_MODULE_H */
diff --git a/include/asm-sparc/module_64.h b/include/asm-sparc/module_64.h
new file mode 100644
index 000000000000..3d77ba465783
--- /dev/null
+++ b/include/asm-sparc/module_64.h
@@ -0,0 +1,7 @@
1#ifndef _ASM_SPARC64_MODULE_H
2#define _ASM_SPARC64_MODULE_H
3struct mod_arch_specific { };
4#define Elf_Shdr Elf64_Shdr
5#define Elf_Sym Elf64_Sym
6#define Elf_Ehdr Elf64_Ehdr
7#endif /* _ASM_SPARC64_MODULE_H */
diff --git a/include/asm-sparc/mostek.h b/include/asm-sparc/mostek.h
index 29aad11b8f00..5b9f7fec7ee7 100644
--- a/include/asm-sparc/mostek.h
+++ b/include/asm-sparc/mostek.h
@@ -1,173 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_MOSTEK_H
2 * mostek.h: Describes the various Mostek time of day clock registers. 2#define ___ASM_SPARC_MOSTEK_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/mostek_64.h>
5 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
6 * Added intersil code 05/25/98 Chris Davis (cdavis@cois.on.ca)
7 */
8
9#ifndef _SPARC_MOSTEK_H
10#define _SPARC_MOSTEK_H
11
12#include <asm/idprom.h>
13#include <asm/io.h>
14
15/* M48T02 Register Map (adapted from Sun NVRAM/Hostid FAQ)
16 *
17 * Data
18 * Address Function
19 * Bit 7 Bit 6 Bit 5 Bit 4Bit 3 Bit 2 Bit 1 Bit 0
20 * 7ff - - - - - - - - Year 00-99
21 * 7fe 0 0 0 - - - - - Month 01-12
22 * 7fd 0 0 - - - - - - Date 01-31
23 * 7fc 0 FT 0 0 0 - - - Day 01-07
24 * 7fb KS 0 - - - - - - Hours 00-23
25 * 7fa 0 - - - - - - - Minutes 00-59
26 * 7f9 ST - - - - - - - Seconds 00-59
27 * 7f8 W R S - - - - - Control
28 *
29 * * ST is STOP BIT
30 * * W is WRITE BIT
31 * * R is READ BIT
32 * * S is SIGN BIT
33 * * FT is FREQ TEST BIT
34 * * KS is KICK START BIT
35 */
36
37/* The Mostek 48t02 real time clock and NVRAM chip. The registers
38 * other than the control register are in binary coded decimal. Some
39 * control bits also live outside the control register.
40 */
41#define mostek_read(_addr) readb(_addr)
42#define mostek_write(_addr,_val) writeb(_val, _addr)
43#define MOSTEK_EEPROM 0x0000UL
44#define MOSTEK_IDPROM 0x07d8UL
45#define MOSTEK_CREG 0x07f8UL
46#define MOSTEK_SEC 0x07f9UL
47#define MOSTEK_MIN 0x07faUL
48#define MOSTEK_HOUR 0x07fbUL
49#define MOSTEK_DOW 0x07fcUL
50#define MOSTEK_DOM 0x07fdUL
51#define MOSTEK_MONTH 0x07feUL
52#define MOSTEK_YEAR 0x07ffUL
53
54struct mostek48t02 {
55 volatile char eeprom[2008]; /* This is the eeprom, don't touch! */
56 struct idprom idprom; /* The idprom lives here. */
57 volatile unsigned char creg; /* Control register */
58 volatile unsigned char sec; /* Seconds (0-59) */
59 volatile unsigned char min; /* Minutes (0-59) */
60 volatile unsigned char hour; /* Hour (0-23) */
61 volatile unsigned char dow; /* Day of the week (1-7) */
62 volatile unsigned char dom; /* Day of the month (1-31) */
63 volatile unsigned char month; /* Month of year (1-12) */
64 volatile unsigned char year; /* Year (0-99) */
65};
66
67extern spinlock_t mostek_lock;
68extern void __iomem *mstk48t02_regs;
69
70/* Control register values. */
71#define MSTK_CREG_WRITE 0x80 /* Must set this before placing values. */
72#define MSTK_CREG_READ 0x40 /* Stop updates to allow a clean read. */
73#define MSTK_CREG_SIGN 0x20 /* Slow/speed clock in calibration mode. */
74
75/* Control bits that live in the other registers. */
76#define MSTK_STOP 0x80 /* Stop the clock oscillator. (sec) */
77#define MSTK_KICK_START 0x80 /* Kick start the clock chip. (hour) */
78#define MSTK_FREQ_TEST 0x40 /* Frequency test mode. (day) */
79
80#define MSTK_YEAR_ZERO 1968 /* If year reg has zero, it is 1968. */
81#define MSTK_CVT_YEAR(yr) ((yr) + MSTK_YEAR_ZERO)
82
83/* Masks that define how much space each value takes up. */
84#define MSTK_SEC_MASK 0x7f
85#define MSTK_MIN_MASK 0x7f
86#define MSTK_HOUR_MASK 0x3f
87#define MSTK_DOW_MASK 0x07
88#define MSTK_DOM_MASK 0x3f
89#define MSTK_MONTH_MASK 0x1f
90#define MSTK_YEAR_MASK 0xffU
91
92/* Binary coded decimal conversion macros. */
93#define MSTK_REGVAL_TO_DECIMAL(x) (((x) & 0x0F) + 0x0A * ((x) >> 0x04))
94#define MSTK_DECIMAL_TO_REGVAL(x) ((((x) / 0x0A) << 0x04) + ((x) % 0x0A))
95
96/* Generic register set and get macros for internal use. */
97#define MSTK_GET(regs,var,mask) (MSTK_REGVAL_TO_DECIMAL(((struct mostek48t02 *)regs)->var & MSTK_ ## mask ## _MASK))
98#define MSTK_SET(regs,var,value,mask) do { ((struct mostek48t02 *)regs)->var &= ~(MSTK_ ## mask ## _MASK); ((struct mostek48t02 *)regs)->var |= MSTK_DECIMAL_TO_REGVAL(value) & (MSTK_ ## mask ## _MASK); } while (0)
99
100/* Macros to make register access easier on our fingers. These give you
101 * the decimal value of the register requested if applicable. You pass
102 * the a pointer to a 'struct mostek48t02'.
103 */
104#define MSTK_REG_CREG(regs) (((struct mostek48t02 *)regs)->creg)
105#define MSTK_REG_SEC(regs) MSTK_GET(regs,sec,SEC)
106#define MSTK_REG_MIN(regs) MSTK_GET(regs,min,MIN)
107#define MSTK_REG_HOUR(regs) MSTK_GET(regs,hour,HOUR)
108#define MSTK_REG_DOW(regs) MSTK_GET(regs,dow,DOW)
109#define MSTK_REG_DOM(regs) MSTK_GET(regs,dom,DOM)
110#define MSTK_REG_MONTH(regs) MSTK_GET(regs,month,MONTH)
111#define MSTK_REG_YEAR(regs) MSTK_GET(regs,year,YEAR)
112
113#define MSTK_SET_REG_SEC(regs,value) MSTK_SET(regs,sec,value,SEC)
114#define MSTK_SET_REG_MIN(regs,value) MSTK_SET(regs,min,value,MIN)
115#define MSTK_SET_REG_HOUR(regs,value) MSTK_SET(regs,hour,value,HOUR)
116#define MSTK_SET_REG_DOW(regs,value) MSTK_SET(regs,dow,value,DOW)
117#define MSTK_SET_REG_DOM(regs,value) MSTK_SET(regs,dom,value,DOM)
118#define MSTK_SET_REG_MONTH(regs,value) MSTK_SET(regs,month,value,MONTH)
119#define MSTK_SET_REG_YEAR(regs,value) MSTK_SET(regs,year,value,YEAR)
120
121
122/* The Mostek 48t08 clock chip. Found on Sun4m's I think. It has the
123 * same (basically) layout of the 48t02 chip except for the extra
124 * NVRAM on board (8 KB against the 48t02's 2 KB).
125 */
126struct mostek48t08 {
127 char offset[6*1024]; /* Magic things may be here, who knows? */
128 struct mostek48t02 regs; /* Here is what we are interested in. */
129};
130
131extern enum sparc_clock_type sp_clock_typ;
132
133#ifdef CONFIG_SUN4
134enum sparc_clock_type { MSTK48T02, MSTK48T08, \
135INTERSIL, MSTK_INVALID };
136#else 5#else
137enum sparc_clock_type { MSTK48T02, MSTK48T08, \ 6#include <asm-sparc/mostek_32.h>
138MSTK_INVALID };
139#endif 7#endif
140
141#ifdef CONFIG_SUN4
142/* intersil on a sun 4/260 code data from harris doc */
143struct intersil_dt {
144 volatile unsigned char int_csec;
145 volatile unsigned char int_hour;
146 volatile unsigned char int_min;
147 volatile unsigned char int_sec;
148 volatile unsigned char int_month;
149 volatile unsigned char int_day;
150 volatile unsigned char int_year;
151 volatile unsigned char int_dow;
152};
153
154struct intersil {
155 struct intersil_dt clk;
156 struct intersil_dt cmp;
157 volatile unsigned char int_intr_reg;
158 volatile unsigned char int_cmd_reg;
159};
160
161#define INTERSIL_STOP 0x0
162#define INTERSIL_START 0x8
163#define INTERSIL_INTR_DISABLE 0x0
164#define INTERSIL_INTR_ENABLE 0x10
165#define INTERSIL_32K 0x0
166#define INTERSIL_NORMAL 0x0
167#define INTERSIL_24H 0x4
168#define INTERSIL_INT_100HZ 0x2
169
170/* end of intersil info */
171#endif 8#endif
172
173#endif /* !(_SPARC_MOSTEK_H) */
diff --git a/include/asm-sparc/mostek_32.h b/include/asm-sparc/mostek_32.h
new file mode 100644
index 000000000000..a99590c4c507
--- /dev/null
+++ b/include/asm-sparc/mostek_32.h
@@ -0,0 +1,171 @@
1/*
2 * mostek.h: Describes the various Mostek time of day clock registers.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
6 * Added intersil code 05/25/98 Chris Davis (cdavis@cois.on.ca)
7 */
8
9#ifndef _SPARC_MOSTEK_H
10#define _SPARC_MOSTEK_H
11
12#include <asm/idprom.h>
13#include <asm/io.h>
14
15/* M48T02 Register Map (adapted from Sun NVRAM/Hostid FAQ)
16 *
17 * Data
18 * Address Function
19 * Bit 7 Bit 6 Bit 5 Bit 4Bit 3 Bit 2 Bit 1 Bit 0
20 * 7ff - - - - - - - - Year 00-99
21 * 7fe 0 0 0 - - - - - Month 01-12
22 * 7fd 0 0 - - - - - - Date 01-31
23 * 7fc 0 FT 0 0 0 - - - Day 01-07
24 * 7fb KS 0 - - - - - - Hours 00-23
25 * 7fa 0 - - - - - - - Minutes 00-59
26 * 7f9 ST - - - - - - - Seconds 00-59
27 * 7f8 W R S - - - - - Control
28 *
29 * * ST is STOP BIT
30 * * W is WRITE BIT
31 * * R is READ BIT
32 * * S is SIGN BIT
33 * * FT is FREQ TEST BIT
34 * * KS is KICK START BIT
35 */
36
37/* The Mostek 48t02 real time clock and NVRAM chip. The registers
38 * other than the control register are in binary coded decimal. Some
39 * control bits also live outside the control register.
40 */
41#define mostek_read(_addr) readb(_addr)
42#define mostek_write(_addr,_val) writeb(_val, _addr)
43#define MOSTEK_EEPROM 0x0000UL
44#define MOSTEK_IDPROM 0x07d8UL
45#define MOSTEK_CREG 0x07f8UL
46#define MOSTEK_SEC 0x07f9UL
47#define MOSTEK_MIN 0x07faUL
48#define MOSTEK_HOUR 0x07fbUL
49#define MOSTEK_DOW 0x07fcUL
50#define MOSTEK_DOM 0x07fdUL
51#define MOSTEK_MONTH 0x07feUL
52#define MOSTEK_YEAR 0x07ffUL
53
54struct mostek48t02 {
55 volatile char eeprom[2008]; /* This is the eeprom, don't touch! */
56 struct idprom idprom; /* The idprom lives here. */
57 volatile unsigned char creg; /* Control register */
58 volatile unsigned char sec; /* Seconds (0-59) */
59 volatile unsigned char min; /* Minutes (0-59) */
60 volatile unsigned char hour; /* Hour (0-23) */
61 volatile unsigned char dow; /* Day of the week (1-7) */
62 volatile unsigned char dom; /* Day of the month (1-31) */
63 volatile unsigned char month; /* Month of year (1-12) */
64 volatile unsigned char year; /* Year (0-99) */
65};
66
67extern spinlock_t mostek_lock;
68extern void __iomem *mstk48t02_regs;
69
70/* Control register values. */
71#define MSTK_CREG_WRITE 0x80 /* Must set this before placing values. */
72#define MSTK_CREG_READ 0x40 /* Stop updates to allow a clean read. */
73#define MSTK_CREG_SIGN 0x20 /* Slow/speed clock in calibration mode. */
74
75/* Control bits that live in the other registers. */
76#define MSTK_STOP 0x80 /* Stop the clock oscillator. (sec) */
77#define MSTK_KICK_START 0x80 /* Kick start the clock chip. (hour) */
78#define MSTK_FREQ_TEST 0x40 /* Frequency test mode. (day) */
79
80#define MSTK_YEAR_ZERO 1968 /* If year reg has zero, it is 1968. */
81#define MSTK_CVT_YEAR(yr) ((yr) + MSTK_YEAR_ZERO)
82
83/* Masks that define how much space each value takes up. */
84#define MSTK_SEC_MASK 0x7f
85#define MSTK_MIN_MASK 0x7f
86#define MSTK_HOUR_MASK 0x3f
87#define MSTK_DOW_MASK 0x07
88#define MSTK_DOM_MASK 0x3f
89#define MSTK_MONTH_MASK 0x1f
90#define MSTK_YEAR_MASK 0xffU
91
92/* Binary coded decimal conversion macros. */
93#define MSTK_REGVAL_TO_DECIMAL(x) (((x) & 0x0F) + 0x0A * ((x) >> 0x04))
94#define MSTK_DECIMAL_TO_REGVAL(x) ((((x) / 0x0A) << 0x04) + ((x) % 0x0A))
95
96/* Generic register set and get macros for internal use. */
97#define MSTK_GET(regs,var,mask) (MSTK_REGVAL_TO_DECIMAL(((struct mostek48t02 *)regs)->var & MSTK_ ## mask ## _MASK))
98#define MSTK_SET(regs,var,value,mask) do { ((struct mostek48t02 *)regs)->var &= ~(MSTK_ ## mask ## _MASK); ((struct mostek48t02 *)regs)->var |= MSTK_DECIMAL_TO_REGVAL(value) & (MSTK_ ## mask ## _MASK); } while (0)
99
100/* Macros to make register access easier on our fingers. These give you
101 * the decimal value of the register requested if applicable. You pass
102 * the a pointer to a 'struct mostek48t02'.
103 */
104#define MSTK_REG_CREG(regs) (((struct mostek48t02 *)regs)->creg)
105#define MSTK_REG_SEC(regs) MSTK_GET(regs,sec,SEC)
106#define MSTK_REG_MIN(regs) MSTK_GET(regs,min,MIN)
107#define MSTK_REG_HOUR(regs) MSTK_GET(regs,hour,HOUR)
108#define MSTK_REG_DOW(regs) MSTK_GET(regs,dow,DOW)
109#define MSTK_REG_DOM(regs) MSTK_GET(regs,dom,DOM)
110#define MSTK_REG_MONTH(regs) MSTK_GET(regs,month,MONTH)
111#define MSTK_REG_YEAR(regs) MSTK_GET(regs,year,YEAR)
112
113#define MSTK_SET_REG_SEC(regs,value) MSTK_SET(regs,sec,value,SEC)
114#define MSTK_SET_REG_MIN(regs,value) MSTK_SET(regs,min,value,MIN)
115#define MSTK_SET_REG_HOUR(regs,value) MSTK_SET(regs,hour,value,HOUR)
116#define MSTK_SET_REG_DOW(regs,value) MSTK_SET(regs,dow,value,DOW)
117#define MSTK_SET_REG_DOM(regs,value) MSTK_SET(regs,dom,value,DOM)
118#define MSTK_SET_REG_MONTH(regs,value) MSTK_SET(regs,month,value,MONTH)
119#define MSTK_SET_REG_YEAR(regs,value) MSTK_SET(regs,year,value,YEAR)
120
121
122/* The Mostek 48t08 clock chip. Found on Sun4m's I think. It has the
123 * same (basically) layout of the 48t02 chip except for the extra
124 * NVRAM on board (8 KB against the 48t02's 2 KB).
125 */
126struct mostek48t08 {
127 char offset[6*1024]; /* Magic things may be here, who knows? */
128 struct mostek48t02 regs; /* Here is what we are interested in. */
129};
130
131#ifdef CONFIG_SUN4
132enum sparc_clock_type { MSTK48T02, MSTK48T08, \
133INTERSIL, MSTK_INVALID };
134#else
135enum sparc_clock_type { MSTK48T02, MSTK48T08, \
136MSTK_INVALID };
137#endif
138
139#ifdef CONFIG_SUN4
140/* intersil on a sun 4/260 code data from harris doc */
141struct intersil_dt {
142 volatile unsigned char int_csec;
143 volatile unsigned char int_hour;
144 volatile unsigned char int_min;
145 volatile unsigned char int_sec;
146 volatile unsigned char int_month;
147 volatile unsigned char int_day;
148 volatile unsigned char int_year;
149 volatile unsigned char int_dow;
150};
151
152struct intersil {
153 struct intersil_dt clk;
154 struct intersil_dt cmp;
155 volatile unsigned char int_intr_reg;
156 volatile unsigned char int_cmd_reg;
157};
158
159#define INTERSIL_STOP 0x0
160#define INTERSIL_START 0x8
161#define INTERSIL_INTR_DISABLE 0x0
162#define INTERSIL_INTR_ENABLE 0x10
163#define INTERSIL_32K 0x0
164#define INTERSIL_NORMAL 0x0
165#define INTERSIL_24H 0x4
166#define INTERSIL_INT_100HZ 0x2
167
168/* end of intersil info */
169#endif
170
171#endif /* !(_SPARC_MOSTEK_H) */
diff --git a/include/asm-sparc/mostek_64.h b/include/asm-sparc/mostek_64.h
new file mode 100644
index 000000000000..c5652de2ace2
--- /dev/null
+++ b/include/asm-sparc/mostek_64.h
@@ -0,0 +1,143 @@
1/* mostek.h: Describes the various Mostek time of day clock registers.
2 *
3 * Copyright (C) 1995 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
5 */
6
7#ifndef _SPARC64_MOSTEK_H
8#define _SPARC64_MOSTEK_H
9
10#include <asm/idprom.h>
11
12/* M48T02 Register Map (adapted from Sun NVRAM/Hostid FAQ)
13 *
14 * Data
15 * Address Function
16 * Bit 7 Bit 6 Bit 5 Bit 4Bit 3 Bit 2 Bit 1 Bit 0
17 * 7ff - - - - - - - - Year 00-99
18 * 7fe 0 0 0 - - - - - Month 01-12
19 * 7fd 0 0 - - - - - - Date 01-31
20 * 7fc 0 FT 0 0 0 - - - Day 01-07
21 * 7fb KS 0 - - - - - - Hours 00-23
22 * 7fa 0 - - - - - - - Minutes 00-59
23 * 7f9 ST - - - - - - - Seconds 00-59
24 * 7f8 W R S - - - - - Control
25 *
26 * * ST is STOP BIT
27 * * W is WRITE BIT
28 * * R is READ BIT
29 * * S is SIGN BIT
30 * * FT is FREQ TEST BIT
31 * * KS is KICK START BIT
32 */
33
34/* The Mostek 48t02 real time clock and NVRAM chip. The registers
35 * other than the control register are in binary coded decimal. Some
36 * control bits also live outside the control register.
37 *
38 * We now deal with physical addresses for I/O to the chip. -DaveM
39 */
40static inline u8 mostek_read(void __iomem *addr)
41{
42 u8 ret;
43
44 __asm__ __volatile__("lduba [%1] %2, %0"
45 : "=r" (ret)
46 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
47 return ret;
48}
49
50static inline void mostek_write(void __iomem *addr, u8 val)
51{
52 __asm__ __volatile__("stba %0, [%1] %2"
53 : /* no outputs */
54 : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
55}
56
57#define MOSTEK_EEPROM 0x0000UL
58#define MOSTEK_IDPROM 0x07d8UL
59#define MOSTEK_CREG 0x07f8UL
60#define MOSTEK_SEC 0x07f9UL
61#define MOSTEK_MIN 0x07faUL
62#define MOSTEK_HOUR 0x07fbUL
63#define MOSTEK_DOW 0x07fcUL
64#define MOSTEK_DOM 0x07fdUL
65#define MOSTEK_MONTH 0x07feUL
66#define MOSTEK_YEAR 0x07ffUL
67
68extern spinlock_t mostek_lock;
69extern void __iomem *mstk48t02_regs;
70
71/* Control register values. */
72#define MSTK_CREG_WRITE 0x80 /* Must set this before placing values. */
73#define MSTK_CREG_READ 0x40 /* Stop updates to allow a clean read. */
74#define MSTK_CREG_SIGN 0x20 /* Slow/speed clock in calibration mode. */
75
76/* Control bits that live in the other registers. */
77#define MSTK_STOP 0x80 /* Stop the clock oscillator. (sec) */
78#define MSTK_KICK_START 0x80 /* Kick start the clock chip. (hour) */
79#define MSTK_FREQ_TEST 0x40 /* Frequency test mode. (day) */
80
81#define MSTK_YEAR_ZERO 1968 /* If year reg has zero, it is 1968. */
82#define MSTK_CVT_YEAR(yr) ((yr) + MSTK_YEAR_ZERO)
83
84/* Masks that define how much space each value takes up. */
85#define MSTK_SEC_MASK 0x7f
86#define MSTK_MIN_MASK 0x7f
87#define MSTK_HOUR_MASK 0x3f
88#define MSTK_DOW_MASK 0x07
89#define MSTK_DOM_MASK 0x3f
90#define MSTK_MONTH_MASK 0x1f
91#define MSTK_YEAR_MASK 0xffU
92
93/* Binary coded decimal conversion macros. */
94#define MSTK_REGVAL_TO_DECIMAL(x) (((x) & 0x0F) + 0x0A * ((x) >> 0x04))
95#define MSTK_DECIMAL_TO_REGVAL(x) ((((x) / 0x0A) << 0x04) + ((x) % 0x0A))
96
97/* Generic register set and get macros for internal use. */
98#define MSTK_GET(regs,name) \
99 (MSTK_REGVAL_TO_DECIMAL(mostek_read(regs + MOSTEK_ ## name) & MSTK_ ## name ## _MASK))
100#define MSTK_SET(regs,name,value) \
101do { u8 __val = mostek_read(regs + MOSTEK_ ## name); \
102 __val &= ~(MSTK_ ## name ## _MASK); \
103 __val |= (MSTK_DECIMAL_TO_REGVAL(value) & \
104 (MSTK_ ## name ## _MASK)); \
105 mostek_write(regs + MOSTEK_ ## name, __val); \
106} while(0)
107
108/* Macros to make register access easier on our fingers. These give you
109 * the decimal value of the register requested if applicable. You pass
110 * the a pointer to a 'struct mostek48t02'.
111 */
112#define MSTK_REG_CREG(regs) (mostek_read((regs) + MOSTEK_CREG))
113#define MSTK_REG_SEC(regs) MSTK_GET(regs,SEC)
114#define MSTK_REG_MIN(regs) MSTK_GET(regs,MIN)
115#define MSTK_REG_HOUR(regs) MSTK_GET(regs,HOUR)
116#define MSTK_REG_DOW(regs) MSTK_GET(regs,DOW)
117#define MSTK_REG_DOM(regs) MSTK_GET(regs,DOM)
118#define MSTK_REG_MONTH(regs) MSTK_GET(regs,MONTH)
119#define MSTK_REG_YEAR(regs) MSTK_GET(regs,YEAR)
120
121#define MSTK_SET_REG_SEC(regs,value) MSTK_SET(regs,SEC,value)
122#define MSTK_SET_REG_MIN(regs,value) MSTK_SET(regs,MIN,value)
123#define MSTK_SET_REG_HOUR(regs,value) MSTK_SET(regs,HOUR,value)
124#define MSTK_SET_REG_DOW(regs,value) MSTK_SET(regs,DOW,value)
125#define MSTK_SET_REG_DOM(regs,value) MSTK_SET(regs,DOM,value)
126#define MSTK_SET_REG_MONTH(regs,value) MSTK_SET(regs,MONTH,value)
127#define MSTK_SET_REG_YEAR(regs,value) MSTK_SET(regs,YEAR,value)
128
129
130/* The Mostek 48t08 clock chip. Found on Sun4m's I think. It has the
131 * same (basically) layout of the 48t02 chip except for the extra
132 * NVRAM on board (8 KB against the 48t02's 2 KB).
133 */
134#define MOSTEK_48T08_OFFSET 0x0000UL /* Lower NVRAM portions */
135#define MOSTEK_48T08_48T02 0x1800UL /* Offset to 48T02 chip */
136
137/* SUN5 systems usually have 48t59 model clock chipsets. But we keep the older
138 * clock chip definitions around just in case.
139 */
140#define MOSTEK_48T59_OFFSET 0x0000UL /* Lower NVRAM portions */
141#define MOSTEK_48T59_48T02 0x1800UL /* Offset to 48T02 chip */
142
143#endif /* !(_SPARC64_MOSTEK_H) */
diff --git a/include/asm-sparc/msgbuf.h b/include/asm-sparc/msgbuf.h
index 8cec9ad0b825..efc7cbe9788f 100644
--- a/include/asm-sparc/msgbuf.h
+++ b/include/asm-sparc/msgbuf.h
@@ -1,7 +1,7 @@
1#ifndef _SPARC64_MSGBUF_H 1#ifndef _SPARC_MSGBUF_H
2#define _SPARC64_MSGBUF_H 2#define _SPARC_MSGBUF_H
3 3
4/* 4/*
5 * The msqid64_ds structure for sparc64 architecture. 5 * The msqid64_ds structure for sparc64 architecture.
6 * Note extra padding because this structure is passed back and forth 6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space. 7 * between kernel and user space.
@@ -11,13 +11,20 @@
11 * - 2 miscellaneous 32-bit values 11 * - 2 miscellaneous 32-bit values
12 */ 12 */
13 13
14#if defined(__sparc__) && defined(__arch64__)
15# define PADDING(x)
16#else
17# define PADDING(x) unsigned int x;
18#endif
19
20
14struct msqid64_ds { 21struct msqid64_ds {
15 struct ipc64_perm msg_perm; 22 struct ipc64_perm msg_perm;
16 unsigned int __pad1; 23 PADDING(__pad1)
17 __kernel_time_t msg_stime; /* last msgsnd time */ 24 __kernel_time_t msg_stime; /* last msgsnd time */
18 unsigned int __pad2; 25 PADDING(__pad2)
19 __kernel_time_t msg_rtime; /* last msgrcv time */ 26 __kernel_time_t msg_rtime; /* last msgrcv time */
20 unsigned int __pad3; 27 PADDING(__pad3)
21 __kernel_time_t msg_ctime; /* last change time */ 28 __kernel_time_t msg_ctime; /* last change time */
22 unsigned long msg_cbytes; /* current number of bytes on queue */ 29 unsigned long msg_cbytes; /* current number of bytes on queue */
23 unsigned long msg_qnum; /* number of messages in queue */ 30 unsigned long msg_qnum; /* number of messages in queue */
@@ -27,5 +34,5 @@ struct msqid64_ds {
27 unsigned long __unused1; 34 unsigned long __unused1;
28 unsigned long __unused2; 35 unsigned long __unused2;
29}; 36};
30 37#undef PADDING
31#endif /* _SPARC64_MSGBUF_H */ 38#endif /* _SPARC_MSGBUF_H */
diff --git a/include/asm-sparc/namei.h b/include/asm-sparc/namei.h
deleted file mode 100644
index 0646102fb020..000000000000
--- a/include/asm-sparc/namei.h
+++ /dev/null
@@ -1,13 +0,0 @@
1/*
2 * linux/include/asm-sparc/namei.h
3 *
4 * Routines to handle famous /usr/gnemul/s*.
5 * Included from linux/fs/namei.c
6 */
7
8#ifndef __SPARC_NAMEI_H
9#define __SPARC_NAMEI_H
10
11#define __emul_prefix() NULL
12
13#endif /* __SPARC_NAMEI_H */
diff --git a/include/asm-sparc/ns87303.h b/include/asm-sparc/ns87303.h
new file mode 100644
index 000000000000..686defe6aaa0
--- /dev/null
+++ b/include/asm-sparc/ns87303.h
@@ -0,0 +1,118 @@
1/* ns87303.h: Configuration Register Description for the
2 * National Semiconductor PC87303 (SuperIO).
3 *
4 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
5 */
6
7#ifndef _SPARC_NS87303_H
8#define _SPARC_NS87303_H 1
9
10/*
11 * Control Register Index Values
12 */
13#define FER 0x00
14#define FAR 0x01
15#define PTR 0x02
16#define FCR 0x03
17#define PCR 0x04
18#define KRR 0x05
19#define PMC 0x06
20#define TUP 0x07
21#define SID 0x08
22#define ASC 0x09
23#define CS0CF0 0x0a
24#define CS0CF1 0x0b
25#define CS1CF0 0x0c
26#define CS1CF1 0x0d
27
28/* Function Enable Register (FER) bits */
29#define FER_EDM 0x10 /* Encoded Drive and Motor pin information */
30
31/* Function Address Register (FAR) bits */
32#define FAR_LPT_MASK 0x03
33#define FAR_LPTB 0x00
34#define FAR_LPTA 0x01
35#define FAR_LPTC 0x02
36
37/* Power and Test Register (PTR) bits */
38#define PTR_LPTB_IRQ7 0x08
39#define PTR_LEVEL_IRQ 0x80 /* When not ECP/EPP: Use level IRQ */
40#define PTR_LPT_REG_DIR 0x80 /* When ECP/EPP: LPT CTR controlls direction */
41 /* of the parallel port */
42
43/* Function Control Register (FCR) bits */
44#define FCR_LDE 0x10 /* Logical Drive Exchange */
45#define FCR_ZWS_ENA 0x20 /* Enable short host read/write in ECP/EPP */
46
47/* Printer Control Register (PCR) bits */
48#define PCR_EPP_ENABLE 0x01
49#define PCR_EPP_IEEE 0x02 /* Enable EPP Version 1.9 (IEEE 1284) */
50#define PCR_ECP_ENABLE 0x04
51#define PCR_ECP_CLK_ENA 0x08 /* If 0 ECP Clock is stopped on Power down */
52#define PCR_IRQ_POLAR 0x20 /* If 0 IRQ is level high or negative pulse, */
53 /* if 1 polarity is inverted */
54#define PCR_IRQ_ODRAIN 0x40 /* If 1, IRQ is open drain */
55
56/* Tape UARTs and Parallel Port Config Register (TUP) bits */
57#define TUP_EPP_TIMO 0x02 /* Enable EPP timeout IRQ */
58
59/* Advanced SuperIO Config Register (ASC) bits */
60#define ASC_LPT_IRQ7 0x01 /* Always use IRQ7 for LPT */
61#define ASC_DRV2_SEL 0x02 /* Logical Drive Exchange controlled by TDR */
62
63#define FER_RESERVED 0x00
64#define FAR_RESERVED 0x00
65#define PTR_RESERVED 0x73
66#define FCR_RESERVED 0xc4
67#define PCR_RESERVED 0x10
68#define KRR_RESERVED 0x00
69#define PMC_RESERVED 0x98
70#define TUP_RESERVED 0xfb
71#define SIP_RESERVED 0x00
72#define ASC_RESERVED 0x18
73#define CS0CF0_RESERVED 0x00
74#define CS0CF1_RESERVED 0x08
75#define CS1CF0_RESERVED 0x00
76#define CS1CF1_RESERVED 0x08
77
78#ifdef __KERNEL__
79
80#include <linux/spinlock.h>
81
82#include <asm/system.h>
83#include <asm/io.h>
84
85extern spinlock_t ns87303_lock;
86
87static inline int ns87303_modify(unsigned long port, unsigned int index,
88 unsigned char clr, unsigned char set)
89{
90 static unsigned char reserved[] = {
91 FER_RESERVED, FAR_RESERVED, PTR_RESERVED, FCR_RESERVED,
92 PCR_RESERVED, KRR_RESERVED, PMC_RESERVED, TUP_RESERVED,
93 SIP_RESERVED, ASC_RESERVED, CS0CF0_RESERVED, CS0CF1_RESERVED,
94 CS1CF0_RESERVED, CS1CF1_RESERVED
95 };
96 unsigned long flags;
97 unsigned char value;
98
99 if (index > 0x0d)
100 return -EINVAL;
101
102 spin_lock_irqsave(&ns87303_lock, flags);
103
104 outb(index, port);
105 value = inb(port + 1);
106 value &= ~(reserved[index] | clr);
107 value |= set;
108 outb(value, port + 1);
109 outb(value, port + 1);
110
111 spin_unlock_irqrestore(&ns87303_lock, flags);
112
113 return 0;
114}
115
116#endif /* __KERNEL__ */
117
118#endif /* !(_SPARC_NS87303_H) */
diff --git a/include/asm-sparc/of_platform.h b/include/asm-sparc/of_platform.h
index 38334351c36b..851eb84d737e 100644
--- a/include/asm-sparc/of_platform.h
+++ b/include/asm-sparc/of_platform.h
@@ -1,24 +1,8 @@
1#ifndef _ASM_SPARC_OF_PLATFORM_H 1#ifndef ___ASM_SPARC_OF_PLATFORM_H
2#define _ASM_SPARC_OF_PLATFORM_H 2#define ___ASM_SPARC_OF_PLATFORM_H
3/* 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. 4#include <asm-sparc/of_platform_64.h>
5 * <benh@kernel.crashing.org> 5#else
6 * Modified for Sparc by merging parts of asm-sparc/of_device.h 6#include <asm-sparc/of_platform_32.h>
7 * by Stephen Rothwell 7#endif
8 * 8#endif
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16/* This is just here during the transition */
17#include <linux/of_platform.h>
18
19extern struct bus_type ebus_bus_type;
20extern struct bus_type sbus_bus_type;
21
22#define of_bus_type of_platform_bus_type /* for compatibility */
23
24#endif /* _ASM_SPARC_OF_PLATFORM_H */
diff --git a/include/asm-sparc/of_platform_32.h b/include/asm-sparc/of_platform_32.h
new file mode 100644
index 000000000000..38334351c36b
--- /dev/null
+++ b/include/asm-sparc/of_platform_32.h
@@ -0,0 +1,24 @@
1#ifndef _ASM_SPARC_OF_PLATFORM_H
2#define _ASM_SPARC_OF_PLATFORM_H
3/*
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org>
6 * Modified for Sparc by merging parts of asm-sparc/of_device.h
7 * by Stephen Rothwell
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16/* This is just here during the transition */
17#include <linux/of_platform.h>
18
19extern struct bus_type ebus_bus_type;
20extern struct bus_type sbus_bus_type;
21
22#define of_bus_type of_platform_bus_type /* for compatibility */
23
24#endif /* _ASM_SPARC_OF_PLATFORM_H */
diff --git a/include/asm-sparc/of_platform_64.h b/include/asm-sparc/of_platform_64.h
new file mode 100644
index 000000000000..78aa032b674c
--- /dev/null
+++ b/include/asm-sparc/of_platform_64.h
@@ -0,0 +1,25 @@
1#ifndef _ASM_SPARC64_OF_PLATFORM_H
2#define _ASM_SPARC64_OF_PLATFORM_H
3/*
4 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org>
6 * Modified for Sparc by merging parts of asm-sparc/of_device.h
7 * by Stephen Rothwell
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16/* This is just here during the transition */
17#include <linux/of_platform.h>
18
19extern struct bus_type isa_bus_type;
20extern struct bus_type ebus_bus_type;
21extern struct bus_type sbus_bus_type;
22
23#define of_bus_type of_platform_bus_type /* for compatibility */
24
25#endif /* _ASM_SPARC64_OF_PLATFORM_H */
diff --git a/include/asm-sparc/openprom.h b/include/asm-sparc/openprom.h
index ed4b6bc2b102..8c349f061994 100644
--- a/include/asm-sparc/openprom.h
+++ b/include/asm-sparc/openprom.h
@@ -1,257 +1,8 @@
1#ifndef __SPARC_OPENPROM_H 1#ifndef ___ASM_SPARC_OPENPROM_H
2#define __SPARC_OPENPROM_H 2#define ___ASM_SPARC_OPENPROM_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* openprom.h: Prom structures and defines for access to the OPENBOOT 4#include <asm-sparc/openprom_64.h>
5 * prom routines and data areas. 5#else
6 * 6#include <asm-sparc/openprom_32.h>
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 7#endif
8 */ 8#endif
9
10#include <asm/vaddrs.h>
11
12/* Empirical constants... */
13#define LINUX_OPPROM_MAGIC 0x10010407
14
15#ifndef __ASSEMBLY__
16/* V0 prom device operations. */
17struct linux_dev_v0_funcs {
18 int (*v0_devopen)(char *device_str);
19 int (*v0_devclose)(int dev_desc);
20 int (*v0_rdblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
21 int (*v0_wrblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
22 int (*v0_wrnetdev)(int dev_desc, int num_bytes, char *buf);
23 int (*v0_rdnetdev)(int dev_desc, int num_bytes, char *buf);
24 int (*v0_rdchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
25 int (*v0_wrchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
26 int (*v0_seekdev)(int dev_desc, long logical_offst, int from);
27};
28
29/* V2 and later prom device operations. */
30struct linux_dev_v2_funcs {
31 int (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */
32 char * (*v2_dumb_mem_alloc)(char *va, unsigned sz);
33 void (*v2_dumb_mem_free)(char *va, unsigned sz);
34
35 /* To map devices into virtual I/O space. */
36 char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz);
37 void (*v2_dumb_munmap)(char *virta, unsigned size);
38
39 int (*v2_dev_open)(char *devpath);
40 void (*v2_dev_close)(int d);
41 int (*v2_dev_read)(int d, char *buf, int nbytes);
42 int (*v2_dev_write)(int d, char *buf, int nbytes);
43 int (*v2_dev_seek)(int d, int hi, int lo);
44
45 /* Never issued (multistage load support) */
46 void (*v2_wheee2)(void);
47 void (*v2_wheee3)(void);
48};
49
50struct linux_mlist_v0 {
51 struct linux_mlist_v0 *theres_more;
52 char *start_adr;
53 unsigned num_bytes;
54};
55
56struct linux_mem_v0 {
57 struct linux_mlist_v0 **v0_totphys;
58 struct linux_mlist_v0 **v0_prommap;
59 struct linux_mlist_v0 **v0_available; /* What we can use */
60};
61
62/* Arguments sent to the kernel from the boot prompt. */
63struct linux_arguments_v0 {
64 char *argv[8];
65 char args[100];
66 char boot_dev[2];
67 int boot_dev_ctrl;
68 int boot_dev_unit;
69 int dev_partition;
70 char *kernel_file_name;
71 void *aieee1; /* XXX */
72};
73
74/* V2 and up boot things. */
75struct linux_bootargs_v2 {
76 char **bootpath;
77 char **bootargs;
78 int *fd_stdin;
79 int *fd_stdout;
80};
81
82/* The top level PROM vector. */
83struct linux_romvec {
84 /* Version numbers. */
85 unsigned int pv_magic_cookie;
86 unsigned int pv_romvers;
87 unsigned int pv_plugin_revision;
88 unsigned int pv_printrev;
89
90 /* Version 0 memory descriptors. */
91 struct linux_mem_v0 pv_v0mem;
92
93 /* Node operations. */
94 struct linux_nodeops *pv_nodeops;
95
96 char **pv_bootstr;
97 struct linux_dev_v0_funcs pv_v0devops;
98
99 char *pv_stdin;
100 char *pv_stdout;
101#define PROMDEV_KBD 0 /* input from keyboard */
102#define PROMDEV_SCREEN 0 /* output to screen */
103#define PROMDEV_TTYA 1 /* in/out to ttya */
104#define PROMDEV_TTYB 2 /* in/out to ttyb */
105
106 /* Blocking getchar/putchar. NOT REENTRANT! (grr) */
107 int (*pv_getchar)(void);
108 void (*pv_putchar)(int ch);
109
110 /* Non-blocking variants. */
111 int (*pv_nbgetchar)(void);
112 int (*pv_nbputchar)(int ch);
113
114 void (*pv_putstr)(char *str, int len);
115
116 /* Miscellany. */
117 void (*pv_reboot)(char *bootstr);
118 void (*pv_printf)(__const__ char *fmt, ...);
119 void (*pv_abort)(void);
120 __volatile__ int *pv_ticks;
121 void (*pv_halt)(void);
122 void (**pv_synchook)(void);
123
124 /* Evaluate a forth string, not different proto for V0 and V2->up. */
125 union {
126 void (*v0_eval)(int len, char *str);
127 void (*v2_eval)(char *str);
128 } pv_fortheval;
129
130 struct linux_arguments_v0 **pv_v0bootargs;
131
132 /* Get ether address. */
133 unsigned int (*pv_enaddr)(int d, char *enaddr);
134
135 struct linux_bootargs_v2 pv_v2bootargs;
136 struct linux_dev_v2_funcs pv_v2devops;
137
138 int filler[15];
139
140 /* This one is sun4c/sun4 only. */
141 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
142
143 /* Prom version 3 Multiprocessor routines. This stuff is crazy.
144 * No joke. Calling these when there is only one cpu probably
145 * crashes the machine, have to test this. :-)
146 */
147
148 /* v3_cpustart() will start the cpu 'whichcpu' in mmu-context
149 * 'thiscontext' executing at address 'prog_counter'
150 */
151 int (*v3_cpustart)(unsigned int whichcpu, int ctxtbl_ptr,
152 int thiscontext, char *prog_counter);
153
154 /* v3_cpustop() will cause cpu 'whichcpu' to stop executing
155 * until a resume cpu call is made.
156 */
157 int (*v3_cpustop)(unsigned int whichcpu);
158
159 /* v3_cpuidle() will idle cpu 'whichcpu' until a stop or
160 * resume cpu call is made.
161 */
162 int (*v3_cpuidle)(unsigned int whichcpu);
163
164 /* v3_cpuresume() will resume processor 'whichcpu' executing
165 * starting with whatever 'pc' and 'npc' were left at the
166 * last 'idle' or 'stop' call.
167 */
168 int (*v3_cpuresume)(unsigned int whichcpu);
169};
170
171/* Routines for traversing the prom device tree. */
172struct linux_nodeops {
173 int (*no_nextnode)(int node);
174 int (*no_child)(int node);
175 int (*no_proplen)(int node, char *name);
176 int (*no_getprop)(int node, char *name, char *val);
177 int (*no_setprop)(int node, char *name, char *val, int len);
178 char * (*no_nextprop)(int node, char *name);
179};
180
181/* More fun PROM structures for device probing. */
182#define PROMREG_MAX 16
183#define PROMVADDR_MAX 16
184#define PROMINTR_MAX 15
185
186struct linux_prom_registers {
187 unsigned int which_io; /* is this in OBIO space? */
188 unsigned int phys_addr; /* The physical address of this register */
189 unsigned int reg_size; /* How many bytes does this register take up? */
190};
191
192struct linux_prom_irqs {
193 int pri; /* IRQ priority */
194 int vector; /* This is foobar, what does it do? */
195};
196
197/* Element of the "ranges" vector */
198struct linux_prom_ranges {
199 unsigned int ot_child_space;
200 unsigned int ot_child_base; /* Bus feels this */
201 unsigned int ot_parent_space;
202 unsigned int ot_parent_base; /* CPU looks from here */
203 unsigned int or_size;
204};
205
206/* Ranges and reg properties are a bit different for PCI. */
207struct linux_prom_pci_registers {
208 /*
209 * We don't know what information this field contain.
210 * We guess, PCI device function is in bits 15:8
211 * So, ...
212 */
213 unsigned int which_io; /* Let it be which_io */
214
215 unsigned int phys_hi;
216 unsigned int phys_lo;
217
218 unsigned int size_hi;
219 unsigned int size_lo;
220};
221
222struct linux_prom_pci_ranges {
223 unsigned int child_phys_hi; /* Only certain bits are encoded here. */
224 unsigned int child_phys_mid;
225 unsigned int child_phys_lo;
226
227 unsigned int parent_phys_hi;
228 unsigned int parent_phys_lo;
229
230 unsigned int size_hi;
231 unsigned int size_lo;
232};
233
234struct linux_prom_pci_assigned_addresses {
235 unsigned int which_io;
236
237 unsigned int phys_hi;
238 unsigned int phys_lo;
239
240 unsigned int size_hi;
241 unsigned int size_lo;
242};
243
244struct linux_prom_ebus_ranges {
245 unsigned int child_phys_hi;
246 unsigned int child_phys_lo;
247
248 unsigned int parent_phys_hi;
249 unsigned int parent_phys_mid;
250 unsigned int parent_phys_lo;
251
252 unsigned int size;
253};
254
255#endif /* !(__ASSEMBLY__) */
256
257#endif /* !(__SPARC_OPENPROM_H) */
diff --git a/include/asm-sparc/openprom_32.h b/include/asm-sparc/openprom_32.h
new file mode 100644
index 000000000000..8b1649f29ed9
--- /dev/null
+++ b/include/asm-sparc/openprom_32.h
@@ -0,0 +1,255 @@
1#ifndef __SPARC_OPENPROM_H
2#define __SPARC_OPENPROM_H
3
4/* openprom.h: Prom structures and defines for access to the OPENBOOT
5 * prom routines and data areas.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 */
9
10/* Empirical constants... */
11#define LINUX_OPPROM_MAGIC 0x10010407
12
13#ifndef __ASSEMBLY__
14/* V0 prom device operations. */
15struct linux_dev_v0_funcs {
16 int (*v0_devopen)(char *device_str);
17 int (*v0_devclose)(int dev_desc);
18 int (*v0_rdblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
19 int (*v0_wrblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
20 int (*v0_wrnetdev)(int dev_desc, int num_bytes, char *buf);
21 int (*v0_rdnetdev)(int dev_desc, int num_bytes, char *buf);
22 int (*v0_rdchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
23 int (*v0_wrchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
24 int (*v0_seekdev)(int dev_desc, long logical_offst, int from);
25};
26
27/* V2 and later prom device operations. */
28struct linux_dev_v2_funcs {
29 int (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */
30 char * (*v2_dumb_mem_alloc)(char *va, unsigned sz);
31 void (*v2_dumb_mem_free)(char *va, unsigned sz);
32
33 /* To map devices into virtual I/O space. */
34 char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz);
35 void (*v2_dumb_munmap)(char *virta, unsigned size);
36
37 int (*v2_dev_open)(char *devpath);
38 void (*v2_dev_close)(int d);
39 int (*v2_dev_read)(int d, char *buf, int nbytes);
40 int (*v2_dev_write)(int d, char *buf, int nbytes);
41 int (*v2_dev_seek)(int d, int hi, int lo);
42
43 /* Never issued (multistage load support) */
44 void (*v2_wheee2)(void);
45 void (*v2_wheee3)(void);
46};
47
48struct linux_mlist_v0 {
49 struct linux_mlist_v0 *theres_more;
50 char *start_adr;
51 unsigned num_bytes;
52};
53
54struct linux_mem_v0 {
55 struct linux_mlist_v0 **v0_totphys;
56 struct linux_mlist_v0 **v0_prommap;
57 struct linux_mlist_v0 **v0_available; /* What we can use */
58};
59
60/* Arguments sent to the kernel from the boot prompt. */
61struct linux_arguments_v0 {
62 char *argv[8];
63 char args[100];
64 char boot_dev[2];
65 int boot_dev_ctrl;
66 int boot_dev_unit;
67 int dev_partition;
68 char *kernel_file_name;
69 void *aieee1; /* XXX */
70};
71
72/* V2 and up boot things. */
73struct linux_bootargs_v2 {
74 char **bootpath;
75 char **bootargs;
76 int *fd_stdin;
77 int *fd_stdout;
78};
79
80/* The top level PROM vector. */
81struct linux_romvec {
82 /* Version numbers. */
83 unsigned int pv_magic_cookie;
84 unsigned int pv_romvers;
85 unsigned int pv_plugin_revision;
86 unsigned int pv_printrev;
87
88 /* Version 0 memory descriptors. */
89 struct linux_mem_v0 pv_v0mem;
90
91 /* Node operations. */
92 struct linux_nodeops *pv_nodeops;
93
94 char **pv_bootstr;
95 struct linux_dev_v0_funcs pv_v0devops;
96
97 char *pv_stdin;
98 char *pv_stdout;
99#define PROMDEV_KBD 0 /* input from keyboard */
100#define PROMDEV_SCREEN 0 /* output to screen */
101#define PROMDEV_TTYA 1 /* in/out to ttya */
102#define PROMDEV_TTYB 2 /* in/out to ttyb */
103
104 /* Blocking getchar/putchar. NOT REENTRANT! (grr) */
105 int (*pv_getchar)(void);
106 void (*pv_putchar)(int ch);
107
108 /* Non-blocking variants. */
109 int (*pv_nbgetchar)(void);
110 int (*pv_nbputchar)(int ch);
111
112 void (*pv_putstr)(char *str, int len);
113
114 /* Miscellany. */
115 void (*pv_reboot)(char *bootstr);
116 void (*pv_printf)(__const__ char *fmt, ...);
117 void (*pv_abort)(void);
118 __volatile__ int *pv_ticks;
119 void (*pv_halt)(void);
120 void (**pv_synchook)(void);
121
122 /* Evaluate a forth string, not different proto for V0 and V2->up. */
123 union {
124 void (*v0_eval)(int len, char *str);
125 void (*v2_eval)(char *str);
126 } pv_fortheval;
127
128 struct linux_arguments_v0 **pv_v0bootargs;
129
130 /* Get ether address. */
131 unsigned int (*pv_enaddr)(int d, char *enaddr);
132
133 struct linux_bootargs_v2 pv_v2bootargs;
134 struct linux_dev_v2_funcs pv_v2devops;
135
136 int filler[15];
137
138 /* This one is sun4c/sun4 only. */
139 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
140
141 /* Prom version 3 Multiprocessor routines. This stuff is crazy.
142 * No joke. Calling these when there is only one cpu probably
143 * crashes the machine, have to test this. :-)
144 */
145
146 /* v3_cpustart() will start the cpu 'whichcpu' in mmu-context
147 * 'thiscontext' executing at address 'prog_counter'
148 */
149 int (*v3_cpustart)(unsigned int whichcpu, int ctxtbl_ptr,
150 int thiscontext, char *prog_counter);
151
152 /* v3_cpustop() will cause cpu 'whichcpu' to stop executing
153 * until a resume cpu call is made.
154 */
155 int (*v3_cpustop)(unsigned int whichcpu);
156
157 /* v3_cpuidle() will idle cpu 'whichcpu' until a stop or
158 * resume cpu call is made.
159 */
160 int (*v3_cpuidle)(unsigned int whichcpu);
161
162 /* v3_cpuresume() will resume processor 'whichcpu' executing
163 * starting with whatever 'pc' and 'npc' were left at the
164 * last 'idle' or 'stop' call.
165 */
166 int (*v3_cpuresume)(unsigned int whichcpu);
167};
168
169/* Routines for traversing the prom device tree. */
170struct linux_nodeops {
171 int (*no_nextnode)(int node);
172 int (*no_child)(int node);
173 int (*no_proplen)(int node, char *name);
174 int (*no_getprop)(int node, char *name, char *val);
175 int (*no_setprop)(int node, char *name, char *val, int len);
176 char * (*no_nextprop)(int node, char *name);
177};
178
179/* More fun PROM structures for device probing. */
180#define PROMREG_MAX 16
181#define PROMVADDR_MAX 16
182#define PROMINTR_MAX 15
183
184struct linux_prom_registers {
185 unsigned int which_io; /* is this in OBIO space? */
186 unsigned int phys_addr; /* The physical address of this register */
187 unsigned int reg_size; /* How many bytes does this register take up? */
188};
189
190struct linux_prom_irqs {
191 int pri; /* IRQ priority */
192 int vector; /* This is foobar, what does it do? */
193};
194
195/* Element of the "ranges" vector */
196struct linux_prom_ranges {
197 unsigned int ot_child_space;
198 unsigned int ot_child_base; /* Bus feels this */
199 unsigned int ot_parent_space;
200 unsigned int ot_parent_base; /* CPU looks from here */
201 unsigned int or_size;
202};
203
204/* Ranges and reg properties are a bit different for PCI. */
205struct linux_prom_pci_registers {
206 /*
207 * We don't know what information this field contain.
208 * We guess, PCI device function is in bits 15:8
209 * So, ...
210 */
211 unsigned int which_io; /* Let it be which_io */
212
213 unsigned int phys_hi;
214 unsigned int phys_lo;
215
216 unsigned int size_hi;
217 unsigned int size_lo;
218};
219
220struct linux_prom_pci_ranges {
221 unsigned int child_phys_hi; /* Only certain bits are encoded here. */
222 unsigned int child_phys_mid;
223 unsigned int child_phys_lo;
224
225 unsigned int parent_phys_hi;
226 unsigned int parent_phys_lo;
227
228 unsigned int size_hi;
229 unsigned int size_lo;
230};
231
232struct linux_prom_pci_assigned_addresses {
233 unsigned int which_io;
234
235 unsigned int phys_hi;
236 unsigned int phys_lo;
237
238 unsigned int size_hi;
239 unsigned int size_lo;
240};
241
242struct linux_prom_ebus_ranges {
243 unsigned int child_phys_hi;
244 unsigned int child_phys_lo;
245
246 unsigned int parent_phys_hi;
247 unsigned int parent_phys_mid;
248 unsigned int parent_phys_lo;
249
250 unsigned int size;
251};
252
253#endif /* !(__ASSEMBLY__) */
254
255#endif /* !(__SPARC_OPENPROM_H) */
diff --git a/include/asm-sparc/openprom_64.h b/include/asm-sparc/openprom_64.h
new file mode 100644
index 000000000000..b69e4a8c9170
--- /dev/null
+++ b/include/asm-sparc/openprom_64.h
@@ -0,0 +1,280 @@
1#ifndef __SPARC64_OPENPROM_H
2#define __SPARC64_OPENPROM_H
3
4/* openprom.h: Prom structures and defines for access to the OPENBOOT
5 * prom routines and data areas.
6 *
7 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 */
9
10#ifndef __ASSEMBLY__
11/* V0 prom device operations. */
12struct linux_dev_v0_funcs {
13 int (*v0_devopen)(char *device_str);
14 int (*v0_devclose)(int dev_desc);
15 int (*v0_rdblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
16 int (*v0_wrblkdev)(int dev_desc, int num_blks, int blk_st, char *buf);
17 int (*v0_wrnetdev)(int dev_desc, int num_bytes, char *buf);
18 int (*v0_rdnetdev)(int dev_desc, int num_bytes, char *buf);
19 int (*v0_rdchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
20 int (*v0_wrchardev)(int dev_desc, int num_bytes, int dummy, char *buf);
21 int (*v0_seekdev)(int dev_desc, long logical_offst, int from);
22};
23
24/* V2 and later prom device operations. */
25struct linux_dev_v2_funcs {
26 int (*v2_inst2pkg)(int d); /* Convert ihandle to phandle */
27 char * (*v2_dumb_mem_alloc)(char *va, unsigned sz);
28 void (*v2_dumb_mem_free)(char *va, unsigned sz);
29
30 /* To map devices into virtual I/O space. */
31 char * (*v2_dumb_mmap)(char *virta, int which_io, unsigned paddr, unsigned sz);
32 void (*v2_dumb_munmap)(char *virta, unsigned size);
33
34 int (*v2_dev_open)(char *devpath);
35 void (*v2_dev_close)(int d);
36 int (*v2_dev_read)(int d, char *buf, int nbytes);
37 int (*v2_dev_write)(int d, char *buf, int nbytes);
38 int (*v2_dev_seek)(int d, int hi, int lo);
39
40 /* Never issued (multistage load support) */
41 void (*v2_wheee2)(void);
42 void (*v2_wheee3)(void);
43};
44
45struct linux_mlist_v0 {
46 struct linux_mlist_v0 *theres_more;
47 unsigned start_adr;
48 unsigned num_bytes;
49};
50
51struct linux_mem_v0 {
52 struct linux_mlist_v0 **v0_totphys;
53 struct linux_mlist_v0 **v0_prommap;
54 struct linux_mlist_v0 **v0_available; /* What we can use */
55};
56
57/* Arguments sent to the kernel from the boot prompt. */
58struct linux_arguments_v0 {
59 char *argv[8];
60 char args[100];
61 char boot_dev[2];
62 int boot_dev_ctrl;
63 int boot_dev_unit;
64 int dev_partition;
65 char *kernel_file_name;
66 void *aieee1; /* XXX */
67};
68
69/* V2 and up boot things. */
70struct linux_bootargs_v2 {
71 char **bootpath;
72 char **bootargs;
73 int *fd_stdin;
74 int *fd_stdout;
75};
76
77/* The top level PROM vector. */
78struct linux_romvec {
79 /* Version numbers. */
80 unsigned int pv_magic_cookie;
81 unsigned int pv_romvers;
82 unsigned int pv_plugin_revision;
83 unsigned int pv_printrev;
84
85 /* Version 0 memory descriptors. */
86 struct linux_mem_v0 pv_v0mem;
87
88 /* Node operations. */
89 struct linux_nodeops *pv_nodeops;
90
91 char **pv_bootstr;
92 struct linux_dev_v0_funcs pv_v0devops;
93
94 char *pv_stdin;
95 char *pv_stdout;
96#define PROMDEV_KBD 0 /* input from keyboard */
97#define PROMDEV_SCREEN 0 /* output to screen */
98#define PROMDEV_TTYA 1 /* in/out to ttya */
99#define PROMDEV_TTYB 2 /* in/out to ttyb */
100
101 /* Blocking getchar/putchar. NOT REENTRANT! (grr) */
102 int (*pv_getchar)(void);
103 void (*pv_putchar)(int ch);
104
105 /* Non-blocking variants. */
106 int (*pv_nbgetchar)(void);
107 int (*pv_nbputchar)(int ch);
108
109 void (*pv_putstr)(char *str, int len);
110
111 /* Miscellany. */
112 void (*pv_reboot)(char *bootstr);
113 void (*pv_printf)(__const__ char *fmt, ...);
114 void (*pv_abort)(void);
115 __volatile__ int *pv_ticks;
116 void (*pv_halt)(void);
117 void (**pv_synchook)(void);
118
119 /* Evaluate a forth string, not different proto for V0 and V2->up. */
120 union {
121 void (*v0_eval)(int len, char *str);
122 void (*v2_eval)(char *str);
123 } pv_fortheval;
124
125 struct linux_arguments_v0 **pv_v0bootargs;
126
127 /* Get ether address. */
128 unsigned int (*pv_enaddr)(int d, char *enaddr);
129
130 struct linux_bootargs_v2 pv_v2bootargs;
131 struct linux_dev_v2_funcs pv_v2devops;
132
133 int filler[15];
134
135 /* This one is sun4c/sun4 only. */
136 void (*pv_setctxt)(int ctxt, char *va, int pmeg);
137
138 /* Prom version 3 Multiprocessor routines. This stuff is crazy.
139 * No joke. Calling these when there is only one cpu probably
140 * crashes the machine, have to test this. :-)
141 */
142
143 /* v3_cpustart() will start the cpu 'whichcpu' in mmu-context
144 * 'thiscontext' executing at address 'prog_counter'
145 */
146 int (*v3_cpustart)(unsigned int whichcpu, int ctxtbl_ptr,
147 int thiscontext, char *prog_counter);
148
149 /* v3_cpustop() will cause cpu 'whichcpu' to stop executing
150 * until a resume cpu call is made.
151 */
152 int (*v3_cpustop)(unsigned int whichcpu);
153
154 /* v3_cpuidle() will idle cpu 'whichcpu' until a stop or
155 * resume cpu call is made.
156 */
157 int (*v3_cpuidle)(unsigned int whichcpu);
158
159 /* v3_cpuresume() will resume processor 'whichcpu' executing
160 * starting with whatever 'pc' and 'npc' were left at the
161 * last 'idle' or 'stop' call.
162 */
163 int (*v3_cpuresume)(unsigned int whichcpu);
164};
165
166/* Routines for traversing the prom device tree. */
167struct linux_nodeops {
168 int (*no_nextnode)(int node);
169 int (*no_child)(int node);
170 int (*no_proplen)(int node, char *name);
171 int (*no_getprop)(int node, char *name, char *val);
172 int (*no_setprop)(int node, char *name, char *val, int len);
173 char * (*no_nextprop)(int node, char *name);
174};
175
176/* More fun PROM structures for device probing. */
177#define PROMREG_MAX 24
178#define PROMVADDR_MAX 16
179#define PROMINTR_MAX 32
180
181struct linux_prom_registers {
182 unsigned which_io; /* hi part of physical address */
183 unsigned phys_addr; /* The physical address of this register */
184 int reg_size; /* How many bytes does this register take up? */
185};
186
187struct linux_prom64_registers {
188 unsigned long phys_addr;
189 unsigned long reg_size;
190};
191
192struct linux_prom_irqs {
193 int pri; /* IRQ priority */
194 int vector; /* This is foobar, what does it do? */
195};
196
197/* Element of the "ranges" vector */
198struct linux_prom_ranges {
199 unsigned int ot_child_space;
200 unsigned int ot_child_base; /* Bus feels this */
201 unsigned int ot_parent_space;
202 unsigned int ot_parent_base; /* CPU looks from here */
203 unsigned int or_size;
204};
205
206struct linux_prom64_ranges {
207 unsigned long ot_child_base; /* Bus feels this */
208 unsigned long ot_parent_base; /* CPU looks from here */
209 unsigned long or_size;
210};
211
212/* Ranges and reg properties are a bit different for PCI. */
213struct linux_prom_pci_registers {
214 unsigned int phys_hi;
215 unsigned int phys_mid;
216 unsigned int phys_lo;
217
218 unsigned int size_hi;
219 unsigned int size_lo;
220};
221
222struct linux_prom_pci_ranges {
223 unsigned int child_phys_hi; /* Only certain bits are encoded here. */
224 unsigned int child_phys_mid;
225 unsigned int child_phys_lo;
226
227 unsigned int parent_phys_hi;
228 unsigned int parent_phys_lo;
229
230 unsigned int size_hi;
231 unsigned int size_lo;
232};
233
234struct linux_prom_pci_intmap {
235 unsigned int phys_hi;
236 unsigned int phys_mid;
237 unsigned int phys_lo;
238
239 unsigned int interrupt;
240
241 int cnode;
242 unsigned int cinterrupt;
243};
244
245struct linux_prom_pci_intmask {
246 unsigned int phys_hi;
247 unsigned int phys_mid;
248 unsigned int phys_lo;
249 unsigned int interrupt;
250};
251
252struct linux_prom_ebus_ranges {
253 unsigned int child_phys_hi;
254 unsigned int child_phys_lo;
255
256 unsigned int parent_phys_hi;
257 unsigned int parent_phys_mid;
258 unsigned int parent_phys_lo;
259
260 unsigned int size;
261};
262
263struct linux_prom_ebus_intmap {
264 unsigned int phys_hi;
265 unsigned int phys_lo;
266
267 unsigned int interrupt;
268
269 int cnode;
270 unsigned int cinterrupt;
271};
272
273struct linux_prom_ebus_intmask {
274 unsigned int phys_hi;
275 unsigned int phys_lo;
276 unsigned int interrupt;
277};
278#endif /* !(__ASSEMBLY__) */
279
280#endif /* !(__SPARC64_OPENPROM_H) */
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h
index 61c3ca6a8ac3..e88d7c04a292 100644
--- a/include/asm-sparc/oplib.h
+++ b/include/asm-sparc/oplib.h
@@ -1,273 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_OPLIB_H
2 * oplib.h: Describes the interface and available routines in the 2#define ___ASM_SPARC_OPLIB_H
3 * Linux Prom library. 3#if defined(__sparc__) && defined(__arch64__)
4 * 4#include <asm-sparc/oplib_64.h>
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5#else
6 */ 6#include <asm-sparc/oplib_32.h>
7 7#endif
8#ifndef __SPARC_OPLIB_H
9#define __SPARC_OPLIB_H
10
11#include <asm/openprom.h>
12#include <linux/spinlock.h>
13#include <linux/compiler.h>
14
15/* The master romvec pointer... */
16extern struct linux_romvec *romvec;
17
18/* Enumeration to describe the prom major version we have detected. */
19enum prom_major_version {
20 PROM_V0, /* Original sun4c V0 prom */
21 PROM_V2, /* sun4c and early sun4m V2 prom */
22 PROM_V3, /* sun4m and later, up to sun4d/sun4e machines V3 */
23 PROM_P1275, /* IEEE compliant ISA based Sun PROM, only sun4u */
24 PROM_AP1000, /* actually no prom at all */
25 PROM_SUN4, /* Old sun4 proms are totally different, but we'll shoehorn it to make it fit */
26};
27
28extern enum prom_major_version prom_vers;
29/* Revision, and firmware revision. */
30extern unsigned int prom_rev, prom_prev;
31
32/* Root node of the prom device tree, this stays constant after
33 * initialization is complete.
34 */
35extern int prom_root_node;
36
37/* Pointer to prom structure containing the device tree traversal
38 * and usage utility functions. Only prom-lib should use these,
39 * users use the interface defined by the library only!
40 */
41extern struct linux_nodeops *prom_nodeops;
42
43/* The functions... */
44
45/* You must call prom_init() before using any of the library services,
46 * preferably as early as possible. Pass it the romvec pointer.
47 */
48extern void prom_init(struct linux_romvec *rom_ptr);
49
50/* Boot argument acquisition, returns the boot command line string. */
51extern char *prom_getbootargs(void);
52
53/* Device utilities. */
54
55/* Map and unmap devices in IO space at virtual addresses. Note that the
56 * virtual address you pass is a request and the prom may put your mappings
57 * somewhere else, so check your return value as that is where your new
58 * mappings really are!
59 *
60 * Another note, these are only available on V2 or higher proms!
61 */
62extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes);
63extern void prom_unmapio(char *virt_addr, unsigned int num_bytes);
64
65/* Device operations. */
66
67/* Open the device described by the passed string. Note, that the format
68 * of the string is different on V0 vs. V2->higher proms. The caller must
69 * know what he/she is doing! Returns the device descriptor, an int.
70 */
71extern int prom_devopen(char *device_string);
72
73/* Close a previously opened device described by the passed integer
74 * descriptor.
75 */
76extern int prom_devclose(int device_handle);
77
78/* Do a seek operation on the device described by the passed integer
79 * descriptor.
80 */
81extern void prom_seek(int device_handle, unsigned int seek_hival,
82 unsigned int seek_lowval);
83
84/* Miscellaneous routines, don't really fit in any category per se. */
85
86/* Reboot the machine with the command line passed. */
87extern void prom_reboot(char *boot_command);
88
89/* Evaluate the forth string passed. */
90extern void prom_feval(char *forth_string);
91
92/* Enter the prom, with possibility of continuation with the 'go'
93 * command in newer proms.
94 */
95extern void prom_cmdline(void);
96
97/* Enter the prom, with no chance of continuation for the stand-alone
98 * which calls this.
99 */
100extern void prom_halt(void) __attribute__ ((noreturn));
101
102/* Set the PROM 'sync' callback function to the passed function pointer.
103 * When the user gives the 'sync' command at the prom prompt while the
104 * kernel is still active, the prom will call this routine.
105 *
106 * XXX The arguments are different on V0 vs. V2->higher proms, grrr! XXX
107 */
108typedef void (*sync_func_t)(void);
109extern void prom_setsync(sync_func_t func_ptr);
110
111/* Acquire the IDPROM of the root node in the prom device tree. This
112 * gets passed a buffer where you would like it stuffed. The return value
113 * is the format type of this idprom or 0xff on error.
114 */
115extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
116
117/* Get the prom major version. */
118extern int prom_version(void);
119
120/* Get the prom plugin revision. */
121extern int prom_getrev(void);
122
123/* Get the prom firmware revision. */
124extern int prom_getprev(void);
125
126/* Character operations to/from the console.... */
127
128/* Non-blocking get character from console. */
129extern int prom_nbgetchar(void);
130
131/* Non-blocking put character to console. */
132extern int prom_nbputchar(char character);
133
134/* Blocking get character from console. */
135extern char prom_getchar(void);
136
137/* Blocking put character to console. */
138extern void prom_putchar(char character);
139
140/* Prom's internal routines, don't use in kernel/boot code. */
141extern void prom_printf(char *fmt, ...);
142extern void prom_write(const char *buf, unsigned int len);
143
144/* Multiprocessor operations... */
145
146/* Start the CPU with the given device tree node, context table, and context
147 * at the passed program counter.
148 */
149extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
150 int context, char *program_counter);
151
152/* Stop the CPU with the passed device tree node. */
153extern int prom_stopcpu(int cpunode);
154
155/* Idle the CPU with the passed device tree node. */
156extern int prom_idlecpu(int cpunode);
157
158/* Re-Start the CPU with the passed device tree node. */
159extern int prom_restartcpu(int cpunode);
160
161/* PROM memory allocation facilities... */
162
163/* Allocated at possibly the given virtual address a chunk of the
164 * indicated size.
165 */
166extern char *prom_alloc(char *virt_hint, unsigned int size);
167
168/* Free a previously allocated chunk. */
169extern void prom_free(char *virt_addr, unsigned int size);
170
171/* Sun4/sun4c specific memory-management startup hook. */
172
173/* Map the passed segment in the given context at the passed
174 * virtual address.
175 */
176extern void prom_putsegment(int context, unsigned long virt_addr,
177 int physical_segment);
178
179
180/* PROM device tree traversal functions... */
181
182#ifdef PROMLIB_INTERNAL
183
184/* Internal version of prom_getchild. */
185extern int __prom_getchild(int parent_node);
186
187/* Internal version of prom_getsibling. */
188extern int __prom_getsibling(int node);
189
190#endif 8#endif
191
192
193/* Get the child node of the given node, or zero if no child exists. */
194extern int prom_getchild(int parent_node);
195
196/* Get the next sibling node of the given node, or zero if no further
197 * siblings exist.
198 */
199extern int prom_getsibling(int node);
200
201/* Get the length, at the passed node, of the given property type.
202 * Returns -1 on error (ie. no such property at this node).
203 */
204extern int prom_getproplen(int thisnode, char *property);
205
206/* Fetch the requested property using the given buffer. Returns
207 * the number of bytes the prom put into your buffer or -1 on error.
208 */
209extern int __must_check prom_getproperty(int thisnode, char *property,
210 char *prop_buffer, int propbuf_size);
211
212/* Acquire an integer property. */
213extern int prom_getint(int node, char *property);
214
215/* Acquire an integer property, with a default value. */
216extern int prom_getintdefault(int node, char *property, int defval);
217
218/* Acquire a boolean property, 0=FALSE 1=TRUE. */
219extern int prom_getbool(int node, char *prop);
220
221/* Acquire a string property, null string on error. */
222extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
223
224/* Does the passed node have the given "name"? YES=1 NO=0 */
225extern int prom_nodematch(int thisnode, char *name);
226
227/* Search all siblings starting at the passed node for "name" matching
228 * the given string. Returns the node on success, zero on failure.
229 */
230extern int prom_searchsiblings(int node_start, char *name);
231
232/* Return the first property type, as a string, for the given node.
233 * Returns a null string on error.
234 */
235extern char *prom_firstprop(int node, char *buffer);
236
237/* Returns the next property after the passed property for the given
238 * node. Returns null string on failure.
239 */
240extern char *prom_nextprop(int node, char *prev_property, char *buffer);
241
242/* Returns phandle of the path specified */
243extern int prom_finddevice(char *name);
244
245/* Returns 1 if the specified node has given property. */
246extern int prom_node_has_property(int node, char *property);
247
248/* Set the indicated property at the given node with the passed value.
249 * Returns the number of bytes of your value that the prom took.
250 */
251extern int prom_setprop(int node, char *prop_name, char *prop_value,
252 int value_size);
253
254extern int prom_pathtoinode(char *path);
255extern int prom_inst2pkg(int);
256
257/* Dorking with Bus ranges... */
258
259/* Apply promlib probes OBIO ranges to registers. */
260extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
261
262/* Apply ranges of any prom node (and optionally parent node as well) to registers. */
263extern void prom_apply_generic_ranges(int node, int parent,
264 struct linux_prom_registers *sbusregs, int nregs);
265
266/* CPU probing helpers. */
267int cpu_find_by_instance(int instance, int *prom_node, int *mid);
268int cpu_find_by_mid(int mid, int *prom_node);
269int cpu_get_hwmid(int prom_node);
270
271extern spinlock_t prom_lock;
272
273#endif /* !(__SPARC_OPLIB_H) */
diff --git a/include/asm-sparc/oplib_32.h b/include/asm-sparc/oplib_32.h
new file mode 100644
index 000000000000..b2631da259e0
--- /dev/null
+++ b/include/asm-sparc/oplib_32.h
@@ -0,0 +1,272 @@
1/*
2 * oplib.h: Describes the interface and available routines in the
3 * Linux Prom library.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#ifndef __SPARC_OPLIB_H
9#define __SPARC_OPLIB_H
10
11#include <asm/openprom.h>
12#include <linux/spinlock.h>
13#include <linux/compiler.h>
14
15/* The master romvec pointer... */
16extern struct linux_romvec *romvec;
17
18/* Enumeration to describe the prom major version we have detected. */
19enum prom_major_version {
20 PROM_V0, /* Original sun4c V0 prom */
21 PROM_V2, /* sun4c and early sun4m V2 prom */
22 PROM_V3, /* sun4m and later, up to sun4d/sun4e machines V3 */
23 PROM_P1275, /* IEEE compliant ISA based Sun PROM, only sun4u */
24 PROM_SUN4, /* Old sun4 proms are totally different, but we'll shoehorn it to make it fit */
25};
26
27extern enum prom_major_version prom_vers;
28/* Revision, and firmware revision. */
29extern unsigned int prom_rev, prom_prev;
30
31/* Root node of the prom device tree, this stays constant after
32 * initialization is complete.
33 */
34extern int prom_root_node;
35
36/* Pointer to prom structure containing the device tree traversal
37 * and usage utility functions. Only prom-lib should use these,
38 * users use the interface defined by the library only!
39 */
40extern struct linux_nodeops *prom_nodeops;
41
42/* The functions... */
43
44/* You must call prom_init() before using any of the library services,
45 * preferably as early as possible. Pass it the romvec pointer.
46 */
47extern void prom_init(struct linux_romvec *rom_ptr);
48
49/* Boot argument acquisition, returns the boot command line string. */
50extern char *prom_getbootargs(void);
51
52/* Device utilities. */
53
54/* Map and unmap devices in IO space at virtual addresses. Note that the
55 * virtual address you pass is a request and the prom may put your mappings
56 * somewhere else, so check your return value as that is where your new
57 * mappings really are!
58 *
59 * Another note, these are only available on V2 or higher proms!
60 */
61extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes);
62extern void prom_unmapio(char *virt_addr, unsigned int num_bytes);
63
64/* Device operations. */
65
66/* Open the device described by the passed string. Note, that the format
67 * of the string is different on V0 vs. V2->higher proms. The caller must
68 * know what he/she is doing! Returns the device descriptor, an int.
69 */
70extern int prom_devopen(char *device_string);
71
72/* Close a previously opened device described by the passed integer
73 * descriptor.
74 */
75extern int prom_devclose(int device_handle);
76
77/* Do a seek operation on the device described by the passed integer
78 * descriptor.
79 */
80extern void prom_seek(int device_handle, unsigned int seek_hival,
81 unsigned int seek_lowval);
82
83/* Miscellaneous routines, don't really fit in any category per se. */
84
85/* Reboot the machine with the command line passed. */
86extern void prom_reboot(char *boot_command);
87
88/* Evaluate the forth string passed. */
89extern void prom_feval(char *forth_string);
90
91/* Enter the prom, with possibility of continuation with the 'go'
92 * command in newer proms.
93 */
94extern void prom_cmdline(void);
95
96/* Enter the prom, with no chance of continuation for the stand-alone
97 * which calls this.
98 */
99extern void prom_halt(void) __attribute__ ((noreturn));
100
101/* Set the PROM 'sync' callback function to the passed function pointer.
102 * When the user gives the 'sync' command at the prom prompt while the
103 * kernel is still active, the prom will call this routine.
104 *
105 * XXX The arguments are different on V0 vs. V2->higher proms, grrr! XXX
106 */
107typedef void (*sync_func_t)(void);
108extern void prom_setsync(sync_func_t func_ptr);
109
110/* Acquire the IDPROM of the root node in the prom device tree. This
111 * gets passed a buffer where you would like it stuffed. The return value
112 * is the format type of this idprom or 0xff on error.
113 */
114extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
115
116/* Get the prom major version. */
117extern int prom_version(void);
118
119/* Get the prom plugin revision. */
120extern int prom_getrev(void);
121
122/* Get the prom firmware revision. */
123extern int prom_getprev(void);
124
125/* Character operations to/from the console.... */
126
127/* Non-blocking get character from console. */
128extern int prom_nbgetchar(void);
129
130/* Non-blocking put character to console. */
131extern int prom_nbputchar(char character);
132
133/* Blocking get character from console. */
134extern char prom_getchar(void);
135
136/* Blocking put character to console. */
137extern void prom_putchar(char character);
138
139/* Prom's internal routines, don't use in kernel/boot code. */
140extern void prom_printf(char *fmt, ...);
141extern void prom_write(const char *buf, unsigned int len);
142
143/* Multiprocessor operations... */
144
145/* Start the CPU with the given device tree node, context table, and context
146 * at the passed program counter.
147 */
148extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
149 int context, char *program_counter);
150
151/* Stop the CPU with the passed device tree node. */
152extern int prom_stopcpu(int cpunode);
153
154/* Idle the CPU with the passed device tree node. */
155extern int prom_idlecpu(int cpunode);
156
157/* Re-Start the CPU with the passed device tree node. */
158extern int prom_restartcpu(int cpunode);
159
160/* PROM memory allocation facilities... */
161
162/* Allocated at possibly the given virtual address a chunk of the
163 * indicated size.
164 */
165extern char *prom_alloc(char *virt_hint, unsigned int size);
166
167/* Free a previously allocated chunk. */
168extern void prom_free(char *virt_addr, unsigned int size);
169
170/* Sun4/sun4c specific memory-management startup hook. */
171
172/* Map the passed segment in the given context at the passed
173 * virtual address.
174 */
175extern void prom_putsegment(int context, unsigned long virt_addr,
176 int physical_segment);
177
178
179/* PROM device tree traversal functions... */
180
181#ifdef PROMLIB_INTERNAL
182
183/* Internal version of prom_getchild. */
184extern int __prom_getchild(int parent_node);
185
186/* Internal version of prom_getsibling. */
187extern int __prom_getsibling(int node);
188
189#endif
190
191
192/* Get the child node of the given node, or zero if no child exists. */
193extern int prom_getchild(int parent_node);
194
195/* Get the next sibling node of the given node, or zero if no further
196 * siblings exist.
197 */
198extern int prom_getsibling(int node);
199
200/* Get the length, at the passed node, of the given property type.
201 * Returns -1 on error (ie. no such property at this node).
202 */
203extern int prom_getproplen(int thisnode, char *property);
204
205/* Fetch the requested property using the given buffer. Returns
206 * the number of bytes the prom put into your buffer or -1 on error.
207 */
208extern int __must_check prom_getproperty(int thisnode, char *property,
209 char *prop_buffer, int propbuf_size);
210
211/* Acquire an integer property. */
212extern int prom_getint(int node, char *property);
213
214/* Acquire an integer property, with a default value. */
215extern int prom_getintdefault(int node, char *property, int defval);
216
217/* Acquire a boolean property, 0=FALSE 1=TRUE. */
218extern int prom_getbool(int node, char *prop);
219
220/* Acquire a string property, null string on error. */
221extern void prom_getstring(int node, char *prop, char *buf, int bufsize);
222
223/* Does the passed node have the given "name"? YES=1 NO=0 */
224extern int prom_nodematch(int thisnode, char *name);
225
226/* Search all siblings starting at the passed node for "name" matching
227 * the given string. Returns the node on success, zero on failure.
228 */
229extern int prom_searchsiblings(int node_start, char *name);
230
231/* Return the first property type, as a string, for the given node.
232 * Returns a null string on error.
233 */
234extern char *prom_firstprop(int node, char *buffer);
235
236/* Returns the next property after the passed property for the given
237 * node. Returns null string on failure.
238 */
239extern char *prom_nextprop(int node, char *prev_property, char *buffer);
240
241/* Returns phandle of the path specified */
242extern int prom_finddevice(char *name);
243
244/* Returns 1 if the specified node has given property. */
245extern int prom_node_has_property(int node, char *property);
246
247/* Set the indicated property at the given node with the passed value.
248 * Returns the number of bytes of your value that the prom took.
249 */
250extern int prom_setprop(int node, char *prop_name, char *prop_value,
251 int value_size);
252
253extern int prom_pathtoinode(char *path);
254extern int prom_inst2pkg(int);
255
256/* Dorking with Bus ranges... */
257
258/* Apply promlib probes OBIO ranges to registers. */
259extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
260
261/* Apply ranges of any prom node (and optionally parent node as well) to registers. */
262extern void prom_apply_generic_ranges(int node, int parent,
263 struct linux_prom_registers *sbusregs, int nregs);
264
265/* CPU probing helpers. */
266int cpu_find_by_instance(int instance, int *prom_node, int *mid);
267int cpu_find_by_mid(int mid, int *prom_node);
268int cpu_get_hwmid(int prom_node);
269
270extern spinlock_t prom_lock;
271
272#endif /* !(__SPARC_OPLIB_H) */
diff --git a/include/asm-sparc/oplib_64.h b/include/asm-sparc/oplib_64.h
new file mode 100644
index 000000000000..6d2c2ca98039
--- /dev/null
+++ b/include/asm-sparc/oplib_64.h
@@ -0,0 +1,322 @@
1/* oplib.h: Describes the interface and available routines in the
2 * Linux Prom library.
3 *
4 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#ifndef __SPARC64_OPLIB_H
9#define __SPARC64_OPLIB_H
10
11#include <asm/openprom.h>
12
13/* OBP version string. */
14extern char prom_version[];
15
16/* Root node of the prom device tree, this stays constant after
17 * initialization is complete.
18 */
19extern int prom_root_node;
20
21/* PROM stdin and stdout */
22extern int prom_stdin, prom_stdout;
23
24/* /chosen node of the prom device tree, this stays constant after
25 * initialization is complete.
26 */
27extern int prom_chosen_node;
28
29/* Helper values and strings in arch/sparc64/kernel/head.S */
30extern const char prom_peer_name[];
31extern const char prom_compatible_name[];
32extern const char prom_root_compatible[];
33extern const char prom_cpu_compatible[];
34extern const char prom_finddev_name[];
35extern const char prom_chosen_path[];
36extern const char prom_cpu_path[];
37extern const char prom_getprop_name[];
38extern const char prom_mmu_name[];
39extern const char prom_callmethod_name[];
40extern const char prom_translate_name[];
41extern const char prom_map_name[];
42extern const char prom_unmap_name[];
43extern int prom_mmu_ihandle_cache;
44extern unsigned int prom_boot_mapped_pc;
45extern unsigned int prom_boot_mapping_mode;
46extern unsigned long prom_boot_mapping_phys_high, prom_boot_mapping_phys_low;
47
48struct linux_mlist_p1275 {
49 struct linux_mlist_p1275 *theres_more;
50 unsigned long start_adr;
51 unsigned long num_bytes;
52};
53
54struct linux_mem_p1275 {
55 struct linux_mlist_p1275 **p1275_totphys;
56 struct linux_mlist_p1275 **p1275_prommap;
57 struct linux_mlist_p1275 **p1275_available; /* What we can use */
58};
59
60/* The functions... */
61
62/* You must call prom_init() before using any of the library services,
63 * preferably as early as possible. Pass it the romvec pointer.
64 */
65extern void prom_init(void *cif_handler, void *cif_stack);
66
67/* Boot argument acquisition, returns the boot command line string. */
68extern char *prom_getbootargs(void);
69
70/* Device utilities. */
71
72/* Device operations. */
73
74/* Open the device described by the passed string. Note, that the format
75 * of the string is different on V0 vs. V2->higher proms. The caller must
76 * know what he/she is doing! Returns the device descriptor, an int.
77 */
78extern int prom_devopen(const char *device_string);
79
80/* Close a previously opened device described by the passed integer
81 * descriptor.
82 */
83extern int prom_devclose(int device_handle);
84
85/* Do a seek operation on the device described by the passed integer
86 * descriptor.
87 */
88extern void prom_seek(int device_handle, unsigned int seek_hival,
89 unsigned int seek_lowval);
90
91/* Miscellaneous routines, don't really fit in any category per se. */
92
93/* Reboot the machine with the command line passed. */
94extern void prom_reboot(const char *boot_command);
95
96/* Evaluate the forth string passed. */
97extern void prom_feval(const char *forth_string);
98
99/* Enter the prom, with possibility of continuation with the 'go'
100 * command in newer proms.
101 */
102extern void prom_cmdline(void);
103
104/* Enter the prom, with no chance of continuation for the stand-alone
105 * which calls this.
106 */
107extern void prom_halt(void) __attribute__ ((noreturn));
108
109/* Halt and power-off the machine. */
110extern void prom_halt_power_off(void) __attribute__ ((noreturn));
111
112/* Set the PROM 'sync' callback function to the passed function pointer.
113 * When the user gives the 'sync' command at the prom prompt while the
114 * kernel is still active, the prom will call this routine.
115 *
116 */
117typedef int (*callback_func_t)(long *cmd);
118extern void prom_setcallback(callback_func_t func_ptr);
119
120/* Acquire the IDPROM of the root node in the prom device tree. This
121 * gets passed a buffer where you would like it stuffed. The return value
122 * is the format type of this idprom or 0xff on error.
123 */
124extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
125
126/* Character operations to/from the console.... */
127
128/* Non-blocking get character from console. */
129extern int prom_nbgetchar(void);
130
131/* Non-blocking put character to console. */
132extern int prom_nbputchar(char character);
133
134/* Blocking get character from console. */
135extern char prom_getchar(void);
136
137/* Blocking put character to console. */
138extern void prom_putchar(char character);
139
140/* Prom's internal routines, don't use in kernel/boot code. */
141extern void prom_printf(const char *fmt, ...);
142extern void prom_write(const char *buf, unsigned int len);
143
144/* Multiprocessor operations... */
145#ifdef CONFIG_SMP
146/* Start the CPU with the given device tree node at the passed program
147 * counter with the given arg passed in via register %o0.
148 */
149extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
150
151/* Start the CPU with the given cpu ID at the passed program
152 * counter with the given arg passed in via register %o0.
153 */
154extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
155
156/* Stop the CPU with the given cpu ID. */
157extern void prom_stopcpu_cpuid(int cpuid);
158
159/* Stop the current CPU. */
160extern void prom_stopself(void);
161
162/* Idle the current CPU. */
163extern void prom_idleself(void);
164
165/* Resume the CPU with the passed device tree node. */
166extern void prom_resumecpu(int cpunode);
167#endif
168
169/* Power management interfaces. */
170
171/* Put the current CPU to sleep. */
172extern void prom_sleepself(void);
173
174/* Put the entire system to sleep. */
175extern int prom_sleepsystem(void);
176
177/* Initiate a wakeup event. */
178extern int prom_wakeupsystem(void);
179
180/* MMU and memory related OBP interfaces. */
181
182/* Get unique string identifying SIMM at given physical address. */
183extern int prom_getunumber(int syndrome_code,
184 unsigned long phys_addr,
185 char *buf, int buflen);
186
187/* Retain physical memory to the caller across soft resets. */
188extern unsigned long prom_retain(const char *name,
189 unsigned long pa_low, unsigned long pa_high,
190 long size, long align);
191
192/* Load explicit I/D TLB entries into the calling processor. */
193extern long prom_itlb_load(unsigned long index,
194 unsigned long tte_data,
195 unsigned long vaddr);
196
197extern long prom_dtlb_load(unsigned long index,
198 unsigned long tte_data,
199 unsigned long vaddr);
200
201/* Map/Unmap client program address ranges. First the format of
202 * the mapping mode argument.
203 */
204#define PROM_MAP_WRITE 0x0001 /* Writable */
205#define PROM_MAP_READ 0x0002 /* Readable - sw */
206#define PROM_MAP_EXEC 0x0004 /* Executable - sw */
207#define PROM_MAP_LOCKED 0x0010 /* Locked, use i/dtlb load calls for this instead */
208#define PROM_MAP_CACHED 0x0020 /* Cacheable in both L1 and L2 caches */
209#define PROM_MAP_SE 0x0040 /* Side-Effects */
210#define PROM_MAP_GLOB 0x0080 /* Global */
211#define PROM_MAP_IE 0x0100 /* Invert-Endianness */
212#define PROM_MAP_DEFAULT (PROM_MAP_WRITE | PROM_MAP_READ | PROM_MAP_EXEC | PROM_MAP_CACHED)
213
214extern int prom_map(int mode, unsigned long size,
215 unsigned long vaddr, unsigned long paddr);
216extern void prom_unmap(unsigned long size, unsigned long vaddr);
217
218
219/* PROM device tree traversal functions... */
220
221#ifdef PROMLIB_INTERNAL
222
223/* Internal version of prom_getchild. */
224extern int __prom_getchild(int parent_node);
225
226/* Internal version of prom_getsibling. */
227extern int __prom_getsibling(int node);
228
229#endif
230
231/* Get the child node of the given node, or zero if no child exists. */
232extern int prom_getchild(int parent_node);
233
234/* Get the next sibling node of the given node, or zero if no further
235 * siblings exist.
236 */
237extern int prom_getsibling(int node);
238
239/* Get the length, at the passed node, of the given property type.
240 * Returns -1 on error (ie. no such property at this node).
241 */
242extern int prom_getproplen(int thisnode, const char *property);
243
244/* Fetch the requested property using the given buffer. Returns
245 * the number of bytes the prom put into your buffer or -1 on error.
246 */
247extern int prom_getproperty(int thisnode, const char *property,
248 char *prop_buffer, int propbuf_size);
249
250/* Acquire an integer property. */
251extern int prom_getint(int node, const char *property);
252
253/* Acquire an integer property, with a default value. */
254extern int prom_getintdefault(int node, const char *property, int defval);
255
256/* Acquire a boolean property, 0=FALSE 1=TRUE. */
257extern int prom_getbool(int node, const char *prop);
258
259/* Acquire a string property, null string on error. */
260extern void prom_getstring(int node, const char *prop, char *buf, int bufsize);
261
262/* Does the passed node have the given "name"? YES=1 NO=0 */
263extern int prom_nodematch(int thisnode, const char *name);
264
265/* Search all siblings starting at the passed node for "name" matching
266 * the given string. Returns the node on success, zero on failure.
267 */
268extern int prom_searchsiblings(int node_start, const char *name);
269
270/* Return the first property type, as a string, for the given node.
271 * Returns a null string on error. Buffer should be at least 32B long.
272 */
273extern char *prom_firstprop(int node, char *buffer);
274
275/* Returns the next property after the passed property for the given
276 * node. Returns null string on failure. Buffer should be at least 32B long.
277 */
278extern char *prom_nextprop(int node, const char *prev_property, char *buffer);
279
280/* Returns 1 if the specified node has given property. */
281extern int prom_node_has_property(int node, const char *property);
282
283/* Returns phandle of the path specified */
284extern int prom_finddevice(const char *name);
285
286/* Set the indicated property at the given node with the passed value.
287 * Returns the number of bytes of your value that the prom took.
288 */
289extern int prom_setprop(int node, const char *prop_name, char *prop_value,
290 int value_size);
291
292extern int prom_pathtoinode(const char *path);
293extern int prom_inst2pkg(int);
294extern int prom_service_exists(const char *service_name);
295extern void prom_sun4v_guest_soft_state(void);
296
297extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
298
299/* Client interface level routines. */
300extern long p1275_cmd(const char *, long, ...);
301
302#if 0
303#define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x))
304#else
305#define P1275_SIZE(x) x
306#endif
307
308/* We support at most 16 input and 1 output argument */
309#define P1275_ARG_NUMBER 0
310#define P1275_ARG_IN_STRING 1
311#define P1275_ARG_OUT_BUF 2
312#define P1275_ARG_OUT_32B 3
313#define P1275_ARG_IN_FUNCTION 4
314#define P1275_ARG_IN_BUF 5
315#define P1275_ARG_IN_64B 6
316
317#define P1275_IN(x) ((x) & 0xf)
318#define P1275_OUT(x) (((x) << 4) & 0xf0)
319#define P1275_INOUT(i,o) (P1275_IN(i)|P1275_OUT(o))
320#define P1275_ARG(n,x) ((x) << ((n)*3 + 8))
321
322#endif /* !(__SPARC64_OPLIB_H) */
diff --git a/include/asm-sparc/page.h b/include/asm-sparc/page.h
index 6aa9e4c910cf..f32f49fcf75c 100644
--- a/include/asm-sparc/page.h
+++ b/include/asm-sparc/page.h
@@ -1,165 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_PAGE_H
2 * page.h: Various defines and such for MMU operations on the Sparc for 2#define ___ASM_SPARC_PAGE_H
3 * the Linux kernel. 3#if defined(__sparc__) && defined(__arch64__)
4 * 4#include <asm-sparc/page_64.h>
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#ifndef _SPARC_PAGE_H
9#define _SPARC_PAGE_H
10
11#ifdef CONFIG_SUN4
12#define PAGE_SHIFT 13
13#else 5#else
14#define PAGE_SHIFT 12 6#include <asm-sparc/page_32.h>
15#endif 7#endif
16#ifndef __ASSEMBLY__
17/* I have my suspicions... -DaveM */
18#define PAGE_SIZE (1UL << PAGE_SHIFT)
19#else
20#define PAGE_SIZE (1 << PAGE_SHIFT)
21#endif
22#define PAGE_MASK (~(PAGE_SIZE-1))
23
24#include <asm/btfixup.h>
25
26#ifndef __ASSEMBLY__
27
28#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
29#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
30#define clear_user_page(addr, vaddr, page) \
31 do { clear_page(addr); \
32 sparc_flush_page_to_ram(page); \
33 } while (0)
34#define copy_user_page(to, from, vaddr, page) \
35 do { copy_page(to, from); \
36 sparc_flush_page_to_ram(page); \
37 } while (0)
38
39/* The following structure is used to hold the physical
40 * memory configuration of the machine. This is filled in
41 * prom_meminit() and is later used by mem_init() to set up
42 * mem_map[]. We statically allocate SPARC_PHYS_BANKS+1 of
43 * these structs, this is arbitrary. The entry after the
44 * last valid one has num_bytes==0.
45 */
46struct sparc_phys_banks {
47 unsigned long base_addr;
48 unsigned long num_bytes;
49};
50
51#define SPARC_PHYS_BANKS 32
52
53extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
54
55/* Cache alias structure. Entry is valid if context != -1. */
56struct cache_palias {
57 unsigned long vaddr;
58 int context;
59};
60
61extern struct cache_palias *sparc_aliases;
62
63/* passing structs on the Sparc slow us down tremendously... */
64
65/* #define STRICT_MM_TYPECHECKS */
66
67#ifdef STRICT_MM_TYPECHECKS
68/*
69 * These are used to make use of C type-checking..
70 */
71typedef struct { unsigned long pte; } pte_t;
72typedef struct { unsigned long iopte; } iopte_t;
73typedef struct { unsigned long pmdv[16]; } pmd_t;
74typedef struct { unsigned long pgd; } pgd_t;
75typedef struct { unsigned long ctxd; } ctxd_t;
76typedef struct { unsigned long pgprot; } pgprot_t;
77typedef struct { unsigned long iopgprot; } iopgprot_t;
78
79#define pte_val(x) ((x).pte)
80#define iopte_val(x) ((x).iopte)
81#define pmd_val(x) ((x).pmdv[0])
82#define pgd_val(x) ((x).pgd)
83#define ctxd_val(x) ((x).ctxd)
84#define pgprot_val(x) ((x).pgprot)
85#define iopgprot_val(x) ((x).iopgprot)
86
87#define __pte(x) ((pte_t) { (x) } )
88#define __iopte(x) ((iopte_t) { (x) } )
89/* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */
90#define __pgd(x) ((pgd_t) { (x) } )
91#define __ctxd(x) ((ctxd_t) { (x) } )
92#define __pgprot(x) ((pgprot_t) { (x) } )
93#define __iopgprot(x) ((iopgprot_t) { (x) } )
94
95#else
96/*
97 * .. while these make it easier on the compiler
98 */
99typedef unsigned long pte_t;
100typedef unsigned long iopte_t;
101typedef struct { unsigned long pmdv[16]; } pmd_t;
102typedef unsigned long pgd_t;
103typedef unsigned long ctxd_t;
104typedef unsigned long pgprot_t;
105typedef unsigned long iopgprot_t;
106
107#define pte_val(x) (x)
108#define iopte_val(x) (x)
109#define pmd_val(x) ((x).pmdv[0])
110#define pgd_val(x) (x)
111#define ctxd_val(x) (x)
112#define pgprot_val(x) (x)
113#define iopgprot_val(x) (x)
114
115#define __pte(x) (x)
116#define __iopte(x) (x)
117/* #define __pmd(x) (x) */ /* XXX later */
118#define __pgd(x) (x)
119#define __ctxd(x) (x)
120#define __pgprot(x) (x)
121#define __iopgprot(x) (x)
122
123#endif
124
125typedef struct page *pgtable_t;
126
127extern unsigned long sparc_unmapped_base;
128
129BTFIXUPDEF_SETHI(sparc_unmapped_base)
130
131#define TASK_UNMAPPED_BASE BTFIXUP_SETHI(sparc_unmapped_base)
132
133#else /* !(__ASSEMBLY__) */
134
135#define __pgprot(x) (x)
136
137#endif /* !(__ASSEMBLY__) */
138
139/* to align the pointer to the (next) page boundary */
140#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
141
142#define PAGE_OFFSET 0xf0000000
143#ifndef __ASSEMBLY__
144extern unsigned long phys_base;
145extern unsigned long pfn_base;
146#endif 8#endif
147#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + phys_base)
148#define __va(x) ((void *)((unsigned long) (x) - phys_base + PAGE_OFFSET))
149
150#define virt_to_phys __pa
151#define phys_to_virt __va
152
153#define ARCH_PFN_OFFSET (pfn_base)
154#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT)))
155
156#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
157#define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
158
159#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
160 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
161
162#include <asm-generic/memory_model.h>
163#include <asm-generic/page.h>
164
165#endif /* _SPARC_PAGE_H */
diff --git a/include/asm-sparc/page_32.h b/include/asm-sparc/page_32.h
new file mode 100644
index 000000000000..cf5fb70ca1c1
--- /dev/null
+++ b/include/asm-sparc/page_32.h
@@ -0,0 +1,160 @@
1/*
2 * page.h: Various defines and such for MMU operations on the Sparc for
3 * the Linux kernel.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#ifndef _SPARC_PAGE_H
9#define _SPARC_PAGE_H
10
11#ifdef CONFIG_SUN4
12#define PAGE_SHIFT 13
13#else
14#define PAGE_SHIFT 12
15#endif
16#ifndef __ASSEMBLY__
17/* I have my suspicions... -DaveM */
18#define PAGE_SIZE (1UL << PAGE_SHIFT)
19#else
20#define PAGE_SIZE (1 << PAGE_SHIFT)
21#endif
22#define PAGE_MASK (~(PAGE_SIZE-1))
23
24#include <asm/btfixup.h>
25
26#ifndef __ASSEMBLY__
27
28#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
29#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
30#define clear_user_page(addr, vaddr, page) \
31 do { clear_page(addr); \
32 sparc_flush_page_to_ram(page); \
33 } while (0)
34#define copy_user_page(to, from, vaddr, page) \
35 do { copy_page(to, from); \
36 sparc_flush_page_to_ram(page); \
37 } while (0)
38
39/* The following structure is used to hold the physical
40 * memory configuration of the machine. This is filled in
41 * prom_meminit() and is later used by mem_init() to set up
42 * mem_map[]. We statically allocate SPARC_PHYS_BANKS+1 of
43 * these structs, this is arbitrary. The entry after the
44 * last valid one has num_bytes==0.
45 */
46struct sparc_phys_banks {
47 unsigned long base_addr;
48 unsigned long num_bytes;
49};
50
51#define SPARC_PHYS_BANKS 32
52
53extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
54
55/* Cache alias structure. Entry is valid if context != -1. */
56struct cache_palias {
57 unsigned long vaddr;
58 int context;
59};
60
61/* passing structs on the Sparc slow us down tremendously... */
62
63/* #define STRICT_MM_TYPECHECKS */
64
65#ifdef STRICT_MM_TYPECHECKS
66/*
67 * These are used to make use of C type-checking..
68 */
69typedef struct { unsigned long pte; } pte_t;
70typedef struct { unsigned long iopte; } iopte_t;
71typedef struct { unsigned long pmdv[16]; } pmd_t;
72typedef struct { unsigned long pgd; } pgd_t;
73typedef struct { unsigned long ctxd; } ctxd_t;
74typedef struct { unsigned long pgprot; } pgprot_t;
75typedef struct { unsigned long iopgprot; } iopgprot_t;
76
77#define pte_val(x) ((x).pte)
78#define iopte_val(x) ((x).iopte)
79#define pmd_val(x) ((x).pmdv[0])
80#define pgd_val(x) ((x).pgd)
81#define ctxd_val(x) ((x).ctxd)
82#define pgprot_val(x) ((x).pgprot)
83#define iopgprot_val(x) ((x).iopgprot)
84
85#define __pte(x) ((pte_t) { (x) } )
86#define __iopte(x) ((iopte_t) { (x) } )
87/* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */
88#define __pgd(x) ((pgd_t) { (x) } )
89#define __ctxd(x) ((ctxd_t) { (x) } )
90#define __pgprot(x) ((pgprot_t) { (x) } )
91#define __iopgprot(x) ((iopgprot_t) { (x) } )
92
93#else
94/*
95 * .. while these make it easier on the compiler
96 */
97typedef unsigned long pte_t;
98typedef unsigned long iopte_t;
99typedef struct { unsigned long pmdv[16]; } pmd_t;
100typedef unsigned long pgd_t;
101typedef unsigned long ctxd_t;
102typedef unsigned long pgprot_t;
103typedef unsigned long iopgprot_t;
104
105#define pte_val(x) (x)
106#define iopte_val(x) (x)
107#define pmd_val(x) ((x).pmdv[0])
108#define pgd_val(x) (x)
109#define ctxd_val(x) (x)
110#define pgprot_val(x) (x)
111#define iopgprot_val(x) (x)
112
113#define __pte(x) (x)
114#define __iopte(x) (x)
115/* #define __pmd(x) (x) */ /* XXX later */
116#define __pgd(x) (x)
117#define __ctxd(x) (x)
118#define __pgprot(x) (x)
119#define __iopgprot(x) (x)
120
121#endif
122
123typedef struct page *pgtable_t;
124
125extern unsigned long sparc_unmapped_base;
126
127BTFIXUPDEF_SETHI(sparc_unmapped_base)
128
129#define TASK_UNMAPPED_BASE BTFIXUP_SETHI(sparc_unmapped_base)
130
131#else /* !(__ASSEMBLY__) */
132
133#define __pgprot(x) (x)
134
135#endif /* !(__ASSEMBLY__) */
136
137#define PAGE_OFFSET 0xf0000000
138#ifndef __ASSEMBLY__
139extern unsigned long phys_base;
140extern unsigned long pfn_base;
141#endif
142#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + phys_base)
143#define __va(x) ((void *)((unsigned long) (x) - phys_base + PAGE_OFFSET))
144
145#define virt_to_phys __pa
146#define phys_to_virt __va
147
148#define ARCH_PFN_OFFSET (pfn_base)
149#define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT)))
150
151#define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr))
152#define virt_addr_valid(kaddr) ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT) < max_mapnr)
153
154#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
155 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
156
157#include <asm-generic/memory_model.h>
158#include <asm-generic/page.h>
159
160#endif /* _SPARC_PAGE_H */
diff --git a/include/asm-sparc/page_64.h b/include/asm-sparc/page_64.h
new file mode 100644
index 000000000000..b579b910ef51
--- /dev/null
+++ b/include/asm-sparc/page_64.h
@@ -0,0 +1,135 @@
1#ifndef _SPARC64_PAGE_H
2#define _SPARC64_PAGE_H
3
4#include <linux/const.h>
5
6#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
7#define PAGE_SHIFT 13
8#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
9#define PAGE_SHIFT 16
10#else
11#error No page size specified in kernel configuration
12#endif
13
14#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
15#define PAGE_MASK (~(PAGE_SIZE-1))
16
17/* Flushing for D-cache alias handling is only needed if
18 * the page size is smaller than 16K.
19 */
20#if PAGE_SHIFT < 14
21#define DCACHE_ALIASING_POSSIBLE
22#endif
23
24#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
25#define HPAGE_SHIFT 22
26#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
27#define HPAGE_SHIFT 19
28#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
29#define HPAGE_SHIFT 16
30#endif
31
32#ifdef CONFIG_HUGETLB_PAGE
33#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
34#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
35#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
36#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
37#endif
38
39#ifndef __ASSEMBLY__
40
41extern void _clear_page(void *page);
42#define clear_page(X) _clear_page((void *)(X))
43struct page;
44extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
45#define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
46extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
47
48/* Unlike sparc32, sparc64's parameter passing API is more
49 * sane in that structures which as small enough are passed
50 * in registers instead of on the stack. Thus, setting
51 * STRICT_MM_TYPECHECKS does not generate worse code so
52 * let's enable it to get the type checking.
53 */
54
55#define STRICT_MM_TYPECHECKS
56
57#ifdef STRICT_MM_TYPECHECKS
58/* These are used to make use of C type-checking.. */
59typedef struct { unsigned long pte; } pte_t;
60typedef struct { unsigned long iopte; } iopte_t;
61typedef struct { unsigned int pmd; } pmd_t;
62typedef struct { unsigned int pgd; } pgd_t;
63typedef struct { unsigned long pgprot; } pgprot_t;
64
65#define pte_val(x) ((x).pte)
66#define iopte_val(x) ((x).iopte)
67#define pmd_val(x) ((x).pmd)
68#define pgd_val(x) ((x).pgd)
69#define pgprot_val(x) ((x).pgprot)
70
71#define __pte(x) ((pte_t) { (x) } )
72#define __iopte(x) ((iopte_t) { (x) } )
73#define __pmd(x) ((pmd_t) { (x) } )
74#define __pgd(x) ((pgd_t) { (x) } )
75#define __pgprot(x) ((pgprot_t) { (x) } )
76
77#else
78/* .. while these make it easier on the compiler */
79typedef unsigned long pte_t;
80typedef unsigned long iopte_t;
81typedef unsigned int pmd_t;
82typedef unsigned int pgd_t;
83typedef unsigned long pgprot_t;
84
85#define pte_val(x) (x)
86#define iopte_val(x) (x)
87#define pmd_val(x) (x)
88#define pgd_val(x) (x)
89#define pgprot_val(x) (x)
90
91#define __pte(x) (x)
92#define __iopte(x) (x)
93#define __pmd(x) (x)
94#define __pgd(x) (x)
95#define __pgprot(x) (x)
96
97#endif /* (STRICT_MM_TYPECHECKS) */
98
99typedef struct page *pgtable_t;
100
101#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_32BIT) ? \
102 (_AC(0x0000000070000000,UL)) : \
103 (_AC(0xfffff80000000000,UL) + (1UL << 32UL)))
104
105#include <asm-generic/memory_model.h>
106
107#endif /* !(__ASSEMBLY__) */
108
109/* We used to stick this into a hard-coded global register (%g4)
110 * but that does not make sense anymore.
111 */
112#define PAGE_OFFSET _AC(0xFFFFF80000000000,UL)
113
114#ifndef __ASSEMBLY__
115
116#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
117#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
118
119#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
120
121#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr)>>PAGE_SHIFT)
122
123#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
124
125#define virt_to_phys __pa
126#define phys_to_virt __va
127
128#endif /* !(__ASSEMBLY__) */
129
130#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
131 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
132
133#include <asm-generic/page.h>
134
135#endif /* _SPARC64_PAGE_H */
diff --git a/include/asm-sparc/parport.h b/include/asm-sparc/parport.h
new file mode 100644
index 000000000000..7818b2523b8d
--- /dev/null
+++ b/include/asm-sparc/parport.h
@@ -0,0 +1,246 @@
1/* parport.h: sparc64 specific parport initialization and dma.
2 *
3 * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be)
4 */
5
6#ifndef _ASM_SPARC64_PARPORT_H
7#define _ASM_SPARC64_PARPORT_H 1
8
9#include <asm/ebus.h>
10#include <asm/ns87303.h>
11#include <asm/of_device.h>
12#include <asm/prom.h>
13
14#define PARPORT_PC_MAX_PORTS PARPORT_MAX
15
16/*
17 * While sparc64 doesn't have an ISA DMA API, we provide something that looks
18 * close enough to make parport_pc happy
19 */
20#define HAS_DMA
21
22static DEFINE_SPINLOCK(dma_spin_lock);
23
24#define claim_dma_lock() \
25({ unsigned long flags; \
26 spin_lock_irqsave(&dma_spin_lock, flags); \
27 flags; \
28})
29
30#define release_dma_lock(__flags) \
31 spin_unlock_irqrestore(&dma_spin_lock, __flags);
32
33static struct sparc_ebus_info {
34 struct ebus_dma_info info;
35 unsigned int addr;
36 unsigned int count;
37 int lock;
38
39 struct parport *port;
40} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
41
42static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
43
44static inline int request_dma(unsigned int dmanr, const char *device_id)
45{
46 if (dmanr >= PARPORT_PC_MAX_PORTS)
47 return -EINVAL;
48 if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
49 return -EBUSY;
50 return 0;
51}
52
53static inline void free_dma(unsigned int dmanr)
54{
55 if (dmanr >= PARPORT_PC_MAX_PORTS) {
56 printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
57 return;
58 }
59 if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
60 printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
61 return;
62 }
63}
64
65static inline void enable_dma(unsigned int dmanr)
66{
67 ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
68
69 if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
70 sparc_ebus_dmas[dmanr].addr,
71 sparc_ebus_dmas[dmanr].count))
72 BUG();
73}
74
75static inline void disable_dma(unsigned int dmanr)
76{
77 ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
78}
79
80static inline void clear_dma_ff(unsigned int dmanr)
81{
82 /* nothing */
83}
84
85static inline void set_dma_mode(unsigned int dmanr, char mode)
86{
87 ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
88}
89
90static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
91{
92 sparc_ebus_dmas[dmanr].addr = addr;
93}
94
95static inline void set_dma_count(unsigned int dmanr, unsigned int count)
96{
97 sparc_ebus_dmas[dmanr].count = count;
98}
99
100static inline unsigned int get_dma_residue(unsigned int dmanr)
101{
102 return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
103}
104
105static int __devinit ecpp_probe(struct of_device *op, const struct of_device_id *match)
106{
107 unsigned long base = op->resource[0].start;
108 unsigned long config = op->resource[1].start;
109 unsigned long d_base = op->resource[2].start;
110 unsigned long d_len;
111 struct device_node *parent;
112 struct parport *p;
113 int slot, err;
114
115 parent = op->node->parent;
116 if (!strcmp(parent->name, "dma")) {
117 p = parport_pc_probe_port(base, base + 0x400,
118 op->irqs[0], PARPORT_DMA_NOFIFO,
119 op->dev.parent->parent);
120 if (!p)
121 return -ENOMEM;
122 dev_set_drvdata(&op->dev, p);
123 return 0;
124 }
125
126 for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
127 if (!test_and_set_bit(slot, dma_slot_map))
128 break;
129 }
130 err = -ENODEV;
131 if (slot >= PARPORT_PC_MAX_PORTS)
132 goto out_err;
133
134 spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
135
136 d_len = (op->resource[2].end - d_base) + 1UL;
137 sparc_ebus_dmas[slot].info.regs =
138 of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
139
140 if (!sparc_ebus_dmas[slot].info.regs)
141 goto out_clear_map;
142
143 sparc_ebus_dmas[slot].info.flags = 0;
144 sparc_ebus_dmas[slot].info.callback = NULL;
145 sparc_ebus_dmas[slot].info.client_cookie = NULL;
146 sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
147 strcpy(sparc_ebus_dmas[slot].info.name, "parport");
148 if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
149 goto out_unmap_regs;
150
151 ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
152
153 /* Configure IRQ to Push Pull, Level Low */
154 /* Enable ECP, set bit 2 of the CTR first */
155 outb(0x04, base + 0x02);
156 ns87303_modify(config, PCR,
157 PCR_EPP_ENABLE |
158 PCR_IRQ_ODRAIN,
159 PCR_ECP_ENABLE |
160 PCR_ECP_CLK_ENA |
161 PCR_IRQ_POLAR);
162
163 /* CTR bit 5 controls direction of port */
164 ns87303_modify(config, PTR,
165 0, PTR_LPT_REG_DIR);
166
167 p = parport_pc_probe_port(base, base + 0x400,
168 op->irqs[0],
169 slot,
170 op->dev.parent);
171 err = -ENOMEM;
172 if (!p)
173 goto out_disable_irq;
174
175 dev_set_drvdata(&op->dev, p);
176
177 return 0;
178
179out_disable_irq:
180 ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
181 ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
182
183out_unmap_regs:
184 of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
185
186out_clear_map:
187 clear_bit(slot, dma_slot_map);
188
189out_err:
190 return err;
191}
192
193static int __devexit ecpp_remove(struct of_device *op)
194{
195 struct parport *p = dev_get_drvdata(&op->dev);
196 int slot = p->dma;
197
198 parport_pc_unregister_port(p);
199
200 if (slot != PARPORT_DMA_NOFIFO) {
201 unsigned long d_base = op->resource[2].start;
202 unsigned long d_len;
203
204 d_len = (op->resource[2].end - d_base) + 1UL;
205
206 ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
207 ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
208 of_iounmap(&op->resource[2],
209 sparc_ebus_dmas[slot].info.regs,
210 d_len);
211 clear_bit(slot, dma_slot_map);
212 }
213
214 return 0;
215}
216
217static struct of_device_id ecpp_match[] = {
218 {
219 .name = "ecpp",
220 },
221 {
222 .name = "parallel",
223 .compatible = "ecpp",
224 },
225 {
226 .name = "parallel",
227 .compatible = "ns87317-ecpp",
228 },
229 {},
230};
231
232static struct of_platform_driver ecpp_driver = {
233 .name = "ecpp",
234 .match_table = ecpp_match,
235 .probe = ecpp_probe,
236 .remove = __devexit_p(ecpp_remove),
237};
238
239static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
240{
241 of_register_driver(&ecpp_driver, &of_bus_type);
242
243 return 0;
244}
245
246#endif /* !(_ASM_SPARC64_PARPORT_H */
diff --git a/include/asm-sparc/pci.h b/include/asm-sparc/pci.h
index b93b6c79e08f..b807d52a4809 100644
--- a/include/asm-sparc/pci.h
+++ b/include/asm-sparc/pci.h
@@ -1,170 +1,8 @@
1#ifndef __SPARC_PCI_H 1#ifndef ___ASM_SPARC_PCI_H
2#define __SPARC_PCI_H 2#define ___ASM_SPARC_PCI_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#ifdef __KERNEL__ 4#include <asm-sparc/pci_64.h>
5 5#else
6/* Can be used to override the logic in pci_scan_bus for skipping 6#include <asm-sparc/pci_32.h>
7 * already-configured bus numbers - to be used for buggy BIOSes 7#endif
8 * or architectures with incomplete PCI setup by the loader.
9 */
10#define pcibios_assign_all_busses() 0
11#define pcibios_scan_all_fns(a, b) 0
12
13#define PCIBIOS_MIN_IO 0UL
14#define PCIBIOS_MIN_MEM 0UL
15
16#define PCI_IRQ_NONE 0xffffffff
17
18static inline void pcibios_set_master(struct pci_dev *dev)
19{
20 /* No special bus mastering setup handling */
21}
22
23static inline void pcibios_penalize_isa_irq(int irq, int active)
24{
25 /* We don't do dynamic PCI IRQ allocation */
26}
27
28/* Dynamic DMA mapping stuff.
29 */
30#define PCI_DMA_BUS_IS_PHYS (0)
31
32#include <asm/scatterlist.h>
33
34struct pci_dev;
35
36/* Allocate and map kernel buffer using consistent mode DMA for a device.
37 * hwdev should be valid struct pci_dev pointer for PCI devices.
38 */
39extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
40
41/* Free and unmap a consistent DMA buffer.
42 * cpu_addr is what was returned from pci_alloc_consistent,
43 * size must be the same as what as passed into pci_alloc_consistent,
44 * and likewise dma_addr must be the same as what *dma_addrp was set to.
45 *
46 * References to the memory and mappings assosciated with cpu_addr/dma_addr
47 * past this call are illegal.
48 */
49extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
50
51/* Map a single buffer of the indicated size for DMA in streaming mode.
52 * The 32-bit bus address to use is returned.
53 *
54 * Once the device is given the dma address, the device owns this memory
55 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
56 */
57extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
58
59/* Unmap a single streaming mode DMA translation. The dma_addr and size
60 * must match what was provided for in a previous pci_map_single call. All
61 * other usages are undefined.
62 *
63 * After this call, reads by the cpu to the buffer are guaranteed to see
64 * whatever the device wrote there.
65 */
66extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
67
68/* pci_unmap_{single,page} is not a nop, thus... */
69#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
70 dma_addr_t ADDR_NAME;
71#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
72 __u32 LEN_NAME;
73#define pci_unmap_addr(PTR, ADDR_NAME) \
74 ((PTR)->ADDR_NAME)
75#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
76 (((PTR)->ADDR_NAME) = (VAL))
77#define pci_unmap_len(PTR, LEN_NAME) \
78 ((PTR)->LEN_NAME)
79#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
80 (((PTR)->LEN_NAME) = (VAL))
81
82/*
83 * Same as above, only with pages instead of mapped addresses.
84 */
85extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
86 unsigned long offset, size_t size, int direction);
87extern void pci_unmap_page(struct pci_dev *hwdev,
88 dma_addr_t dma_address, size_t size, int direction);
89
90/* Map a set of buffers described by scatterlist in streaming
91 * mode for DMA. This is the scather-gather version of the
92 * above pci_map_single interface. Here the scatter gather list
93 * elements are each tagged with the appropriate dma address
94 * and length. They are obtained via sg_dma_{address,length}(SG).
95 *
96 * NOTE: An implementation may be able to use a smaller number of
97 * DMA address/length pairs than there are SG table elements.
98 * (for example via virtual mapping capabilities)
99 * The routine returns the number of addr/length pairs actually
100 * used, at most nents.
101 *
102 * Device ownership issues as mentioned above for pci_map_single are
103 * the same here.
104 */
105extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
106
107/* Unmap a set of streaming mode DMA translations.
108 * Again, cpu read rules concerning calls here are the same as for
109 * pci_unmap_single() above.
110 */
111extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
112
113/* Make physical memory consistent for a single
114 * streaming mode DMA translation after a transfer.
115 *
116 * If you perform a pci_map_single() but wish to interrogate the
117 * buffer using the cpu, yet do not wish to teardown the PCI dma
118 * mapping, you must call this function before doing so. At the
119 * next point you give the PCI dma address back to the card, you
120 * must first perform a pci_dma_sync_for_device, and then the device
121 * again owns the buffer.
122 */
123extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
124extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
125
126/* Make physical memory consistent for a set of streaming
127 * mode DMA translations after a transfer.
128 *
129 * The same as pci_dma_sync_single_* but for a scatter-gather list,
130 * same rules and usage.
131 */
132extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
133extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
134
135/* Return whether the given PCI device DMA address mask can
136 * be supported properly. For example, if your device can
137 * only drive the low 24-bits during PCI bus mastering, then
138 * you would pass 0x00ffffff as the mask to this function.
139 */
140static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
141{
142 return 1;
143}
144
145#ifdef CONFIG_PCI
146static inline void pci_dma_burst_advice(struct pci_dev *pdev,
147 enum pci_dma_burst_strategy *strat,
148 unsigned long *strategy_parameter)
149{
150 *strat = PCI_DMA_BURST_INFINITY;
151 *strategy_parameter = ~0UL;
152}
153#endif 8#endif
154
155#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
156
157static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
158{
159 return (dma_addr == PCI_DMA_ERROR_CODE);
160}
161
162struct device_node;
163extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
164
165#endif /* __KERNEL__ */
166
167/* generic pci stuff */
168#include <asm-generic/pci.h>
169
170#endif /* __SPARC_PCI_H */
diff --git a/include/asm-sparc/pci_32.h b/include/asm-sparc/pci_32.h
new file mode 100644
index 000000000000..0ee949d220c0
--- /dev/null
+++ b/include/asm-sparc/pci_32.h
@@ -0,0 +1,171 @@
1#ifndef __SPARC_PCI_H
2#define __SPARC_PCI_H
3
4#ifdef __KERNEL__
5
6/* Can be used to override the logic in pci_scan_bus for skipping
7 * already-configured bus numbers - to be used for buggy BIOSes
8 * or architectures with incomplete PCI setup by the loader.
9 */
10#define pcibios_assign_all_busses() 0
11#define pcibios_scan_all_fns(a, b) 0
12
13#define PCIBIOS_MIN_IO 0UL
14#define PCIBIOS_MIN_MEM 0UL
15
16#define PCI_IRQ_NONE 0xffffffff
17
18static inline void pcibios_set_master(struct pci_dev *dev)
19{
20 /* No special bus mastering setup handling */
21}
22
23static inline void pcibios_penalize_isa_irq(int irq, int active)
24{
25 /* We don't do dynamic PCI IRQ allocation */
26}
27
28/* Dynamic DMA mapping stuff.
29 */
30#define PCI_DMA_BUS_IS_PHYS (0)
31
32#include <asm/scatterlist.h>
33
34struct pci_dev;
35
36/* Allocate and map kernel buffer using consistent mode DMA for a device.
37 * hwdev should be valid struct pci_dev pointer for PCI devices.
38 */
39extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
40
41/* Free and unmap a consistent DMA buffer.
42 * cpu_addr is what was returned from pci_alloc_consistent,
43 * size must be the same as what as passed into pci_alloc_consistent,
44 * and likewise dma_addr must be the same as what *dma_addrp was set to.
45 *
46 * References to the memory and mappings assosciated with cpu_addr/dma_addr
47 * past this call are illegal.
48 */
49extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
50
51/* Map a single buffer of the indicated size for DMA in streaming mode.
52 * The 32-bit bus address to use is returned.
53 *
54 * Once the device is given the dma address, the device owns this memory
55 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
56 */
57extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
58
59/* Unmap a single streaming mode DMA translation. The dma_addr and size
60 * must match what was provided for in a previous pci_map_single call. All
61 * other usages are undefined.
62 *
63 * After this call, reads by the cpu to the buffer are guaranteed to see
64 * whatever the device wrote there.
65 */
66extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
67
68/* pci_unmap_{single,page} is not a nop, thus... */
69#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
70 dma_addr_t ADDR_NAME;
71#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
72 __u32 LEN_NAME;
73#define pci_unmap_addr(PTR, ADDR_NAME) \
74 ((PTR)->ADDR_NAME)
75#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
76 (((PTR)->ADDR_NAME) = (VAL))
77#define pci_unmap_len(PTR, LEN_NAME) \
78 ((PTR)->LEN_NAME)
79#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
80 (((PTR)->LEN_NAME) = (VAL))
81
82/*
83 * Same as above, only with pages instead of mapped addresses.
84 */
85extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
86 unsigned long offset, size_t size, int direction);
87extern void pci_unmap_page(struct pci_dev *hwdev,
88 dma_addr_t dma_address, size_t size, int direction);
89
90/* Map a set of buffers described by scatterlist in streaming
91 * mode for DMA. This is the scather-gather version of the
92 * above pci_map_single interface. Here the scatter gather list
93 * elements are each tagged with the appropriate dma address
94 * and length. They are obtained via sg_dma_{address,length}(SG).
95 *
96 * NOTE: An implementation may be able to use a smaller number of
97 * DMA address/length pairs than there are SG table elements.
98 * (for example via virtual mapping capabilities)
99 * The routine returns the number of addr/length pairs actually
100 * used, at most nents.
101 *
102 * Device ownership issues as mentioned above for pci_map_single are
103 * the same here.
104 */
105extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
106
107/* Unmap a set of streaming mode DMA translations.
108 * Again, cpu read rules concerning calls here are the same as for
109 * pci_unmap_single() above.
110 */
111extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
112
113/* Make physical memory consistent for a single
114 * streaming mode DMA translation after a transfer.
115 *
116 * If you perform a pci_map_single() but wish to interrogate the
117 * buffer using the cpu, yet do not wish to teardown the PCI dma
118 * mapping, you must call this function before doing so. At the
119 * next point you give the PCI dma address back to the card, you
120 * must first perform a pci_dma_sync_for_device, and then the device
121 * again owns the buffer.
122 */
123extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
124extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
125
126/* Make physical memory consistent for a set of streaming
127 * mode DMA translations after a transfer.
128 *
129 * The same as pci_dma_sync_single_* but for a scatter-gather list,
130 * same rules and usage.
131 */
132extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
133extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
134
135/* Return whether the given PCI device DMA address mask can
136 * be supported properly. For example, if your device can
137 * only drive the low 24-bits during PCI bus mastering, then
138 * you would pass 0x00ffffff as the mask to this function.
139 */
140static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
141{
142 return 1;
143}
144
145#ifdef CONFIG_PCI
146static inline void pci_dma_burst_advice(struct pci_dev *pdev,
147 enum pci_dma_burst_strategy *strat,
148 unsigned long *strategy_parameter)
149{
150 *strat = PCI_DMA_BURST_INFINITY;
151 *strategy_parameter = ~0UL;
152}
153#endif
154
155#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
156
157static inline int pci_dma_mapping_error(struct pci_dev *pdev,
158 dma_addr_t dma_addr)
159{
160 return (dma_addr == PCI_DMA_ERROR_CODE);
161}
162
163struct device_node;
164extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
165
166#endif /* __KERNEL__ */
167
168/* generic pci stuff */
169#include <asm-generic/pci.h>
170
171#endif /* __SPARC_PCI_H */
diff --git a/include/asm-sparc/pci_64.h b/include/asm-sparc/pci_64.h
new file mode 100644
index 000000000000..4f79a54948f6
--- /dev/null
+++ b/include/asm-sparc/pci_64.h
@@ -0,0 +1,210 @@
1#ifndef __SPARC64_PCI_H
2#define __SPARC64_PCI_H
3
4#ifdef __KERNEL__
5
6#include <linux/dma-mapping.h>
7
8/* Can be used to override the logic in pci_scan_bus for skipping
9 * already-configured bus numbers - to be used for buggy BIOSes
10 * or architectures with incomplete PCI setup by the loader.
11 */
12#define pcibios_assign_all_busses() 0
13#define pcibios_scan_all_fns(a, b) 0
14
15#define PCIBIOS_MIN_IO 0UL
16#define PCIBIOS_MIN_MEM 0UL
17
18#define PCI_IRQ_NONE 0xffffffff
19
20#define PCI_CACHE_LINE_BYTES 64
21
22static inline void pcibios_set_master(struct pci_dev *dev)
23{
24 /* No special bus mastering setup handling */
25}
26
27static inline void pcibios_penalize_isa_irq(int irq, int active)
28{
29 /* We don't do dynamic PCI IRQ allocation */
30}
31
32/* The PCI address space does not equal the physical memory
33 * address space. The networking and block device layers use
34 * this boolean for bounce buffer decisions.
35 */
36#define PCI_DMA_BUS_IS_PHYS (0)
37
38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
39 dma_addr_t *dma_handle)
40{
41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
42}
43
44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
45 void *vaddr, dma_addr_t dma_handle)
46{
47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
48}
49
50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
51 size_t size, int direction)
52{
53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
55}
56
57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
58 size_t size, int direction)
59{
60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
62}
63
64#define pci_map_page(dev, page, off, size, dir) \
65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
68
69/* pci_unmap_{single,page} is not a nop, thus... */
70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
71 dma_addr_t ADDR_NAME;
72#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
73 __u32 LEN_NAME;
74#define pci_unmap_addr(PTR, ADDR_NAME) \
75 ((PTR)->ADDR_NAME)
76#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
77 (((PTR)->ADDR_NAME) = (VAL))
78#define pci_unmap_len(PTR, LEN_NAME) \
79 ((PTR)->LEN_NAME)
80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
81 (((PTR)->LEN_NAME) = (VAL))
82
83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
84 int nents, int direction)
85{
86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
88}
89
90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
91 int nents, int direction)
92{
93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
95}
96
97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
98 dma_addr_t dma_handle,
99 size_t size, int direction)
100{
101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
103}
104
105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
106 dma_addr_t dma_handle,
107 size_t size, int direction)
108{
109 /* No flushing needed to sync cpu writes to the device. */
110}
111
112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
113 struct scatterlist *sg,
114 int nents, int direction)
115{
116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
118}
119
120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
121 struct scatterlist *sg,
122 int nelems, int direction)
123{
124 /* No flushing needed to sync cpu writes to the device. */
125}
126
127/* Return whether the given PCI device DMA address mask can
128 * be supported properly. For example, if your device can
129 * only drive the low 24-bits during PCI bus mastering, then
130 * you would pass 0x00ffffff as the mask to this function.
131 */
132extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
133
134/* PCI IOMMU mapping bypass support. */
135
136/* PCI 64-bit addressing works for all slots on all controller
137 * types on sparc64. However, it requires that the device
138 * can drive enough of the 64 bits.
139 */
140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
141#define PCI64_ADDR_BASE 0xfffc000000000000UL
142
143static inline int pci_dma_mapping_error(struct pci_dev *pdev,
144 dma_addr_t dma_addr)
145{
146 return dma_mapping_error(&pdev->dev, dma_addr);
147}
148
149#ifdef CONFIG_PCI
150static inline void pci_dma_burst_advice(struct pci_dev *pdev,
151 enum pci_dma_burst_strategy *strat,
152 unsigned long *strategy_parameter)
153{
154 unsigned long cacheline_size;
155 u8 byte;
156
157 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
158 if (byte == 0)
159 cacheline_size = 1024;
160 else
161 cacheline_size = (int) byte * 4;
162
163 *strat = PCI_DMA_BURST_BOUNDARY;
164 *strategy_parameter = cacheline_size;
165}
166#endif
167
168/* Return the index of the PCI controller for device PDEV. */
169
170extern int pci_domain_nr(struct pci_bus *bus);
171static inline int pci_proc_domain(struct pci_bus *bus)
172{
173 return 1;
174}
175
176/* Platform support for /proc/bus/pci/X/Y mmap()s. */
177
178#define HAVE_PCI_MMAP
179#define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
180#define get_pci_unmapped_area get_fb_unmapped_area
181
182extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
183 enum pci_mmap_state mmap_state,
184 int write_combine);
185
186extern void
187pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
188 struct resource *res);
189
190extern void
191pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
192 struct pci_bus_region *region);
193
194extern struct resource *pcibios_select_root(struct pci_dev *, struct resource *);
195
196static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
197{
198 return PCI_IRQ_NONE;
199}
200
201struct device_node;
202extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
203
204#define HAVE_ARCH_PCI_RESOURCE_TO_USER
205extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
206 const struct resource *rsrc,
207 resource_size_t *start, resource_size_t *end);
208#endif /* __KERNEL__ */
209
210#endif /* __SPARC64_PCI_H */
diff --git a/include/asm-sparc/percpu.h b/include/asm-sparc/percpu.h
index 06066a7aaec3..d98ed6cf2e36 100644
--- a/include/asm-sparc/percpu.h
+++ b/include/asm-sparc/percpu.h
@@ -1,6 +1,8 @@
1#ifndef __ARCH_SPARC_PERCPU__ 1#ifndef ___ASM_SPARC_PERCPU_H
2#define __ARCH_SPARC_PERCPU__ 2#define ___ASM_SPARC_PERCPU_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm-generic/percpu.h> 4#include <asm-sparc/percpu_64.h>
5 5#else
6#endif /* __ARCH_SPARC_PERCPU__ */ 6#include <asm-sparc/percpu_32.h>
7#endif
8#endif
diff --git a/include/asm-sparc/percpu_32.h b/include/asm-sparc/percpu_32.h
new file mode 100644
index 000000000000..06066a7aaec3
--- /dev/null
+++ b/include/asm-sparc/percpu_32.h
@@ -0,0 +1,6 @@
1#ifndef __ARCH_SPARC_PERCPU__
2#define __ARCH_SPARC_PERCPU__
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ARCH_SPARC_PERCPU__ */
diff --git a/include/asm-sparc/percpu_64.h b/include/asm-sparc/percpu_64.h
new file mode 100644
index 000000000000..bee64593023e
--- /dev/null
+++ b/include/asm-sparc/percpu_64.h
@@ -0,0 +1,28 @@
1#ifndef __ARCH_SPARC64_PERCPU__
2#define __ARCH_SPARC64_PERCPU__
3
4#include <linux/compiler.h>
5
6register unsigned long __local_per_cpu_offset asm("g5");
7
8#ifdef CONFIG_SMP
9
10extern void real_setup_per_cpu_areas(void);
11
12extern unsigned long __per_cpu_base;
13extern unsigned long __per_cpu_shift;
14#define __per_cpu_offset(__cpu) \
15 (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
16#define per_cpu_offset(x) (__per_cpu_offset(x))
17
18#define __my_cpu_offset __local_per_cpu_offset
19
20#else /* ! SMP */
21
22#define real_setup_per_cpu_areas() do { } while (0)
23
24#endif /* SMP */
25
26#include <asm-generic/percpu.h>
27
28#endif /* __ARCH_SPARC64_PERCPU__ */
diff --git a/include/asm-sparc/pgalloc.h b/include/asm-sparc/pgalloc.h
index 681582d26969..7fa02b53d392 100644
--- a/include/asm-sparc/pgalloc.h
+++ b/include/asm-sparc/pgalloc.h
@@ -1,68 +1,8 @@
1#ifndef _SPARC_PGALLOC_H 1#ifndef ___ASM_SPARC_PGALLOC_H
2#define _SPARC_PGALLOC_H 2#define ___ASM_SPARC_PGALLOC_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/kernel.h> 4#include <asm-sparc/pgalloc_64.h>
5#include <linux/sched.h> 5#else
6 6#include <asm-sparc/pgalloc_32.h>
7#include <asm/page.h> 7#endif
8#include <asm/btfixup.h> 8#endif
9
10struct page;
11
12extern struct pgtable_cache_struct {
13 unsigned long *pgd_cache;
14 unsigned long *pte_cache;
15 unsigned long pgtable_cache_sz;
16 unsigned long pgd_cache_sz;
17} pgt_quicklists;
18#define pgd_quicklist (pgt_quicklists.pgd_cache)
19#define pmd_quicklist ((unsigned long *)0)
20#define pte_quicklist (pgt_quicklists.pte_cache)
21#define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz)
22#define pgd_cache_size (pgt_quicklists.pgd_cache_sz)
23
24extern void check_pgt_cache(void);
25BTFIXUPDEF_CALL(void, do_check_pgt_cache, int, int)
26#define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high)
27
28BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
29#define get_pgd_fast() BTFIXUP_CALL(get_pgd_fast)()
30
31BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
32#define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd)
33
34#define pgd_free(mm, pgd) free_pgd_fast(pgd)
35#define pgd_alloc(mm) get_pgd_fast()
36
37BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
38#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
39#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
40
41BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
42#define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
43
44BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
45#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
46
47#define pmd_free(mm, pmd) free_pmd_fast(pmd)
48#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
49
50BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
51#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
52#define pmd_pgtable(pmd) pmd_page(pmd)
53BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
54#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
55
56BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long)
57#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
58BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
59#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
60
61BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
62#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
63
64BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
65#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
66#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
67
68#endif /* _SPARC_PGALLOC_H */
diff --git a/include/asm-sparc/pgalloc_32.h b/include/asm-sparc/pgalloc_32.h
new file mode 100644
index 000000000000..681582d26969
--- /dev/null
+++ b/include/asm-sparc/pgalloc_32.h
@@ -0,0 +1,68 @@
1#ifndef _SPARC_PGALLOC_H
2#define _SPARC_PGALLOC_H
3
4#include <linux/kernel.h>
5#include <linux/sched.h>
6
7#include <asm/page.h>
8#include <asm/btfixup.h>
9
10struct page;
11
12extern struct pgtable_cache_struct {
13 unsigned long *pgd_cache;
14 unsigned long *pte_cache;
15 unsigned long pgtable_cache_sz;
16 unsigned long pgd_cache_sz;
17} pgt_quicklists;
18#define pgd_quicklist (pgt_quicklists.pgd_cache)
19#define pmd_quicklist ((unsigned long *)0)
20#define pte_quicklist (pgt_quicklists.pte_cache)
21#define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz)
22#define pgd_cache_size (pgt_quicklists.pgd_cache_sz)
23
24extern void check_pgt_cache(void);
25BTFIXUPDEF_CALL(void, do_check_pgt_cache, int, int)
26#define do_check_pgt_cache(low,high) BTFIXUP_CALL(do_check_pgt_cache)(low,high)
27
28BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
29#define get_pgd_fast() BTFIXUP_CALL(get_pgd_fast)()
30
31BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
32#define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd)
33
34#define pgd_free(mm, pgd) free_pgd_fast(pgd)
35#define pgd_alloc(mm) get_pgd_fast()
36
37BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
38#define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
39#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
40
41BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
42#define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
43
44BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
45#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
46
47#define pmd_free(mm, pmd) free_pmd_fast(pmd)
48#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
49
50BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
51#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
52#define pmd_pgtable(pmd) pmd_page(pmd)
53BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
54#define pmd_populate_kernel(MM, PMD, PTE) BTFIXUP_CALL(pmd_set)(PMD, PTE)
55
56BTFIXUPDEF_CALL(pgtable_t , pte_alloc_one, struct mm_struct *, unsigned long)
57#define pte_alloc_one(mm, address) BTFIXUP_CALL(pte_alloc_one)(mm, address)
58BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long)
59#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
60
61BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
62#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
63
64BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
65#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
66#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
67
68#endif /* _SPARC_PGALLOC_H */
diff --git a/include/asm-sparc/pgalloc_64.h b/include/asm-sparc/pgalloc_64.h
new file mode 100644
index 000000000000..5bdfa2c6e400
--- /dev/null
+++ b/include/asm-sparc/pgalloc_64.h
@@ -0,0 +1,81 @@
1#ifndef _SPARC64_PGALLOC_H
2#define _SPARC64_PGALLOC_H
3
4#include <linux/kernel.h>
5#include <linux/sched.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/quicklist.h>
9
10#include <asm/spitfire.h>
11#include <asm/cpudata.h>
12#include <asm/cacheflush.h>
13#include <asm/page.h>
14
15/* Page table allocation/freeing. */
16
17static inline pgd_t *pgd_alloc(struct mm_struct *mm)
18{
19 return quicklist_alloc(0, GFP_KERNEL, NULL);
20}
21
22static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
23{
24 quicklist_free(0, NULL, pgd);
25}
26
27#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
28
29static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
30{
31 return quicklist_alloc(0, GFP_KERNEL, NULL);
32}
33
34static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
35{
36 quicklist_free(0, NULL, pmd);
37}
38
39static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
40 unsigned long address)
41{
42 return quicklist_alloc(0, GFP_KERNEL, NULL);
43}
44
45static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
46 unsigned long address)
47{
48 struct page *page;
49 void *pg;
50
51 pg = quicklist_alloc(0, GFP_KERNEL, NULL);
52 if (!pg)
53 return NULL;
54 page = virt_to_page(pg);
55 pgtable_page_ctor(page);
56 return page;
57}
58
59static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
60{
61 quicklist_free(0, NULL, pte);
62}
63
64static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
65{
66 pgtable_page_dtor(ptepage);
67 quicklist_free_page(0, NULL, ptepage);
68}
69
70
71#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
72#define pmd_populate(MM,PMD,PTE_PAGE) \
73 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
74#define pmd_pgtable(pmd) pmd_page(pmd)
75
76static inline void check_pgt_cache(void)
77{
78 quicklist_trim(0, NULL, 25, 16);
79}
80
81#endif /* _SPARC64_PGALLOC_H */
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 60512296b2ca..63cdef53bc52 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -1,475 +1,8 @@
1#ifndef _SPARC_PGTABLE_H 1#ifndef ___ASM_SPARC_PGTABLE_H
2#define _SPARC_PGTABLE_H 2#define ___ASM_SPARC_PGTABLE_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* asm-sparc/pgtable.h: Defines and functions used to work 4#include <asm-sparc/pgtable_64.h>
5 * with Sparc page tables.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#include <asm-generic/4level-fixup.h>
12
13#include <linux/spinlock.h>
14#include <linux/swap.h>
15#include <asm/types.h>
16#ifdef CONFIG_SUN4
17#include <asm/pgtsun4.h>
18#else 5#else
19#include <asm/pgtsun4c.h> 6#include <asm-sparc/pgtable_32.h>
20#endif 7#endif
21#include <asm/pgtsrmmu.h>
22#include <asm/vac-ops.h>
23#include <asm/oplib.h>
24#include <asm/btfixup.h>
25#include <asm/system.h>
26
27#ifndef __ASSEMBLY__
28
29struct vm_area_struct;
30struct page;
31
32extern void load_mmu(void);
33extern unsigned long calc_highpages(void);
34
35BTFIXUPDEF_SIMM13(pgdir_shift)
36BTFIXUPDEF_SETHI(pgdir_size)
37BTFIXUPDEF_SETHI(pgdir_mask)
38
39BTFIXUPDEF_SIMM13(ptrs_per_pmd)
40BTFIXUPDEF_SIMM13(ptrs_per_pgd)
41BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
42
43#define pte_ERROR(e) __builtin_trap()
44#define pmd_ERROR(e) __builtin_trap()
45#define pgd_ERROR(e) __builtin_trap()
46
47BTFIXUPDEF_INT(page_none)
48BTFIXUPDEF_INT(page_copy)
49BTFIXUPDEF_INT(page_readonly)
50BTFIXUPDEF_INT(page_kernel)
51
52#define PMD_SHIFT SUN4C_PMD_SHIFT
53#define PMD_SIZE (1UL << PMD_SHIFT)
54#define PMD_MASK (~(PMD_SIZE-1))
55#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
56#define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift)
57#define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size)
58#define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask)
59#define PTRS_PER_PTE 1024
60#define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd)
61#define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd)
62#define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd)
63#define FIRST_USER_ADDRESS 0
64#define PTE_SIZE (PTRS_PER_PTE*4)
65
66#define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
67extern pgprot_t PAGE_SHARED;
68#define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
69#define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
70
71extern unsigned long page_kernel;
72
73#ifdef MODULE
74#define PAGE_KERNEL page_kernel
75#else
76#define PAGE_KERNEL __pgprot(BTFIXUP_INT(page_kernel))
77#endif 8#endif
78
79/* Top-level page directory */
80extern pgd_t swapper_pg_dir[1024];
81
82extern void paging_init(void);
83
84/* Page table for 0-4MB for everybody, on the Sparc this
85 * holds the same as on the i386.
86 */
87extern pte_t pg0[1024];
88extern pte_t pg1[1024];
89extern pte_t pg2[1024];
90extern pte_t pg3[1024];
91
92extern unsigned long ptr_in_current_pgd;
93
94/* Here is a trick, since mmap.c need the initializer elements for
95 * protection_map[] to be constant at compile time, I set the following
96 * to all zeros. I set it to the real values after I link in the
97 * appropriate MMU page table routines at boot time.
98 */
99#define __P000 __pgprot(0)
100#define __P001 __pgprot(0)
101#define __P010 __pgprot(0)
102#define __P011 __pgprot(0)
103#define __P100 __pgprot(0)
104#define __P101 __pgprot(0)
105#define __P110 __pgprot(0)
106#define __P111 __pgprot(0)
107
108#define __S000 __pgprot(0)
109#define __S001 __pgprot(0)
110#define __S010 __pgprot(0)
111#define __S011 __pgprot(0)
112#define __S100 __pgprot(0)
113#define __S101 __pgprot(0)
114#define __S110 __pgprot(0)
115#define __S111 __pgprot(0)
116
117extern int num_contexts;
118
119/* First physical page can be anywhere, the following is needed so that
120 * va-->pa and vice versa conversions work properly without performance
121 * hit for all __pa()/__va() operations.
122 */
123extern unsigned long phys_base;
124extern unsigned long pfn_base;
125
126/*
127 * BAD_PAGETABLE is used when we need a bogus page-table, while
128 * BAD_PAGE is used for a bogus page.
129 *
130 * ZERO_PAGE is a global shared page that is always zero: used
131 * for zero-mapped memory areas etc..
132 */
133extern pte_t * __bad_pagetable(void);
134extern pte_t __bad_page(void);
135extern unsigned long empty_zero_page;
136
137#define BAD_PAGETABLE __bad_pagetable()
138#define BAD_PAGE __bad_page()
139#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
140
141/*
142 */
143BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
144BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
145
146#define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
147#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
148
149BTFIXUPDEF_SETHI(none_mask)
150BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
151BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
152
153static inline int pte_none(pte_t pte)
154{
155 return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
156}
157
158#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
159#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte)
160
161BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
162BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
163BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
164
165static inline int pmd_none(pmd_t pmd)
166{
167 return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
168}
169
170#define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
171#define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
172#define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
173
174BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
175BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
176BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
177BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
178
179#define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
180#define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
181#define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
182#define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
183
184/*
185 * The following only work if pte_present() is true.
186 * Undefined behaviour if not..
187 */
188BTFIXUPDEF_HALF(pte_writei)
189BTFIXUPDEF_HALF(pte_dirtyi)
190BTFIXUPDEF_HALF(pte_youngi)
191
192static int pte_write(pte_t pte) __attribute_const__;
193static inline int pte_write(pte_t pte)
194{
195 return pte_val(pte) & BTFIXUP_HALF(pte_writei);
196}
197
198static int pte_dirty(pte_t pte) __attribute_const__;
199static inline int pte_dirty(pte_t pte)
200{
201 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
202}
203
204static int pte_young(pte_t pte) __attribute_const__;
205static inline int pte_young(pte_t pte)
206{
207 return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
208}
209
210/*
211 * The following only work if pte_present() is not true.
212 */
213BTFIXUPDEF_HALF(pte_filei)
214
215static int pte_file(pte_t pte) __attribute_const__;
216static inline int pte_file(pte_t pte)
217{
218 return pte_val(pte) & BTFIXUP_HALF(pte_filei);
219}
220
221static inline int pte_special(pte_t pte)
222{
223 return 0;
224}
225
226/*
227 */
228BTFIXUPDEF_HALF(pte_wrprotecti)
229BTFIXUPDEF_HALF(pte_mkcleani)
230BTFIXUPDEF_HALF(pte_mkoldi)
231
232static pte_t pte_wrprotect(pte_t pte) __attribute_const__;
233static inline pte_t pte_wrprotect(pte_t pte)
234{
235 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
236}
237
238static pte_t pte_mkclean(pte_t pte) __attribute_const__;
239static inline pte_t pte_mkclean(pte_t pte)
240{
241 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
242}
243
244static pte_t pte_mkold(pte_t pte) __attribute_const__;
245static inline pte_t pte_mkold(pte_t pte)
246{
247 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
248}
249
250BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
251BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
252BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
253
254#define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
255#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
256#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
257
258#define pte_mkspecial(pte) (pte)
259
260#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
261
262BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t)
263#define pte_pfn(pte) BTFIXUP_CALL(pte_pfn)(pte)
264#define pte_page(pte) pfn_to_page(pte_pfn(pte))
265
266/*
267 * Conversion functions: convert a page and protection to a page entry,
268 * and a page entry and page directory to the page they refer to.
269 */
270BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
271
272BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
273BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
274BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t)
275
276#define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
277#define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
278#define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
279
280#define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot)
281
282BTFIXUPDEF_INT(pte_modify_mask)
283
284static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
285static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
286{
287 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
288 pgprot_val(newprot));
289}
290
291#define pgd_index(address) ((address) >> PGDIR_SHIFT)
292
293/* to find an entry in a page-table-directory */
294#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
295
296/* to find an entry in a kernel page-table-directory */
297#define pgd_offset_k(address) pgd_offset(&init_mm, address)
298
299/* Find an entry in the second-level page table.. */
300BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
301#define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
302
303/* Find an entry in the third-level page table.. */
304BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
305#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
306
307/*
308 * This shortcut works on sun4m (and sun4d) because the nocache area is static,
309 * and sun4c is guaranteed to have no highmem anyway.
310 */
311#define pte_offset_map(d, a) pte_offset_kernel(d,a)
312#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a)
313
314#define pte_unmap(pte) do{}while(0)
315#define pte_unmap_nested(pte) do{}while(0)
316
317/* Certain architectures need to do special things when pte's
318 * within a page table are directly modified. Thus, the following
319 * hook is made available.
320 */
321
322BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
323
324#define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
325#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
326
327struct seq_file;
328BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
329
330#define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
331
332/* Fault handler stuff... */
333#define FAULT_CODE_PROT 0x1
334#define FAULT_CODE_WRITE 0x2
335#define FAULT_CODE_USER 0x4
336
337BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t)
338
339#define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte)
340
341BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
342 unsigned long, unsigned int)
343BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
344#define sparc_mapiorange(bus,pa,va,len) BTFIXUP_CALL(sparc_mapiorange)(bus,pa,va,len)
345#define sparc_unmapiorange(va,len) BTFIXUP_CALL(sparc_unmapiorange)(va,len)
346
347extern int invalid_segment;
348
349/* Encode and de-code a swap entry */
350BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t)
351BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t)
352BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long)
353
354#define __swp_type(__x) BTFIXUP_CALL(__swp_type)(__x)
355#define __swp_offset(__x) BTFIXUP_CALL(__swp_offset)(__x)
356#define __swp_entry(__type,__off) BTFIXUP_CALL(__swp_entry)(__type,__off)
357
358#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
359#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
360
361/* file-offset-in-pte helpers */
362BTFIXUPDEF_CALL(unsigned long, pte_to_pgoff, pte_t pte);
363BTFIXUPDEF_CALL(pte_t, pgoff_to_pte, unsigned long pgoff);
364
365#define pte_to_pgoff(pte) BTFIXUP_CALL(pte_to_pgoff)(pte)
366#define pgoff_to_pte(off) BTFIXUP_CALL(pgoff_to_pte)(off)
367
368/*
369 * This is made a constant because mm/fremap.c required a constant.
370 * Note that layout of these bits is different between sun4c.c and srmmu.c.
371 */
372#define PTE_FILE_MAX_BITS 24
373
374/*
375 */
376struct ctx_list {
377 struct ctx_list *next;
378 struct ctx_list *prev;
379 unsigned int ctx_number;
380 struct mm_struct *ctx_mm;
381};
382
383extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
384extern struct ctx_list ctx_free; /* Head of free list */
385extern struct ctx_list ctx_used; /* Head of used contexts list */
386
387#define NO_CONTEXT -1
388
389static inline void remove_from_ctx_list(struct ctx_list *entry)
390{
391 entry->next->prev = entry->prev;
392 entry->prev->next = entry->next;
393}
394
395static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
396{
397 entry->next = head;
398 (entry->prev = head->prev)->next = entry;
399 head->prev = entry;
400}
401#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
402#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
403
404static inline unsigned long
405__get_phys (unsigned long addr)
406{
407 switch (sparc_cpu_model){
408 case sun4:
409 case sun4c:
410 return sun4c_get_pte (addr) << PAGE_SHIFT;
411 case sun4m:
412 case sun4d:
413 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
414 default:
415 return 0;
416 }
417}
418
419static inline int
420__get_iospace (unsigned long addr)
421{
422 switch (sparc_cpu_model){
423 case sun4:
424 case sun4c:
425 return -1; /* Don't check iospace on sun4c */
426 case sun4m:
427 case sun4d:
428 return (srmmu_get_pte (addr) >> 28);
429 default:
430 return -1;
431 }
432}
433
434extern unsigned long *sparc_valid_addr_bitmap;
435
436/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
437#define kern_addr_valid(addr) \
438 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
439
440extern int io_remap_pfn_range(struct vm_area_struct *vma,
441 unsigned long from, unsigned long pfn,
442 unsigned long size, pgprot_t prot);
443
444/*
445 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
446 * its high 4 bits. These macros/functions put it there or get it from there.
447 */
448#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
449#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
450#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
451
452#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
453#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
454({ \
455 int __changed = !pte_same(*(__ptep), __entry); \
456 if (__changed) { \
457 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
458 flush_tlb_page(__vma, __address); \
459 } \
460 (sparc_cpu_model == sun4c) || __changed; \
461})
462
463#include <asm-generic/pgtable.h>
464
465#endif /* !(__ASSEMBLY__) */
466
467/* We provide our own get_unmapped_area to cope with VA holes for userland */
468#define HAVE_ARCH_UNMAPPED_AREA
469
470/*
471 * No page table caches to initialise
472 */
473#define pgtable_cache_init() do { } while (0)
474
475#endif /* !(_SPARC_PGTABLE_H) */
diff --git a/include/asm-sparc/pgtable_32.h b/include/asm-sparc/pgtable_32.h
new file mode 100644
index 000000000000..781bd4694a1c
--- /dev/null
+++ b/include/asm-sparc/pgtable_32.h
@@ -0,0 +1,480 @@
1#ifndef _SPARC_PGTABLE_H
2#define _SPARC_PGTABLE_H
3
4/* asm-sparc/pgtable.h: Defines and functions used to work
5 * with Sparc page tables.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#ifndef __ASSEMBLY__
12#include <asm-generic/4level-fixup.h>
13
14#include <linux/spinlock.h>
15#include <linux/swap.h>
16#include <asm/types.h>
17#ifdef CONFIG_SUN4
18#include <asm/pgtsun4.h>
19#else
20#include <asm/pgtsun4c.h>
21#endif
22#include <asm/pgtsrmmu.h>
23#include <asm/vac-ops.h>
24#include <asm/oplib.h>
25#include <asm/btfixup.h>
26#include <asm/system.h>
27
28
29struct vm_area_struct;
30struct page;
31
32extern void load_mmu(void);
33extern unsigned long calc_highpages(void);
34
35BTFIXUPDEF_SIMM13(pgdir_shift)
36BTFIXUPDEF_SETHI(pgdir_size)
37BTFIXUPDEF_SETHI(pgdir_mask)
38
39BTFIXUPDEF_SIMM13(ptrs_per_pmd)
40BTFIXUPDEF_SIMM13(ptrs_per_pgd)
41BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
42
43#define pte_ERROR(e) __builtin_trap()
44#define pmd_ERROR(e) __builtin_trap()
45#define pgd_ERROR(e) __builtin_trap()
46
47BTFIXUPDEF_INT(page_none)
48BTFIXUPDEF_INT(page_copy)
49BTFIXUPDEF_INT(page_readonly)
50BTFIXUPDEF_INT(page_kernel)
51
52#define PMD_SHIFT SUN4C_PMD_SHIFT
53#define PMD_SIZE (1UL << PMD_SHIFT)
54#define PMD_MASK (~(PMD_SIZE-1))
55#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
56#define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift)
57#define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size)
58#define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask)
59#define PTRS_PER_PTE 1024
60#define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd)
61#define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd)
62#define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd)
63#define FIRST_USER_ADDRESS 0
64#define PTE_SIZE (PTRS_PER_PTE*4)
65
66#define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
67extern pgprot_t PAGE_SHARED;
68#define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
69#define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
70
71extern unsigned long page_kernel;
72
73#ifdef MODULE
74#define PAGE_KERNEL page_kernel
75#else
76#define PAGE_KERNEL __pgprot(BTFIXUP_INT(page_kernel))
77#endif
78
79/* Top-level page directory */
80extern pgd_t swapper_pg_dir[1024];
81
82extern void paging_init(void);
83
84/* Page table for 0-4MB for everybody, on the Sparc this
85 * holds the same as on the i386.
86 */
87extern pte_t pg0[1024];
88extern pte_t pg1[1024];
89extern pte_t pg2[1024];
90extern pte_t pg3[1024];
91
92extern unsigned long ptr_in_current_pgd;
93
94/* Here is a trick, since mmap.c need the initializer elements for
95 * protection_map[] to be constant at compile time, I set the following
96 * to all zeros. I set it to the real values after I link in the
97 * appropriate MMU page table routines at boot time.
98 */
99#define __P000 __pgprot(0)
100#define __P001 __pgprot(0)
101#define __P010 __pgprot(0)
102#define __P011 __pgprot(0)
103#define __P100 __pgprot(0)
104#define __P101 __pgprot(0)
105#define __P110 __pgprot(0)
106#define __P111 __pgprot(0)
107
108#define __S000 __pgprot(0)
109#define __S001 __pgprot(0)
110#define __S010 __pgprot(0)
111#define __S011 __pgprot(0)
112#define __S100 __pgprot(0)
113#define __S101 __pgprot(0)
114#define __S110 __pgprot(0)
115#define __S111 __pgprot(0)
116
117extern int num_contexts;
118
119/* First physical page can be anywhere, the following is needed so that
120 * va-->pa and vice versa conversions work properly without performance
121 * hit for all __pa()/__va() operations.
122 */
123extern unsigned long phys_base;
124extern unsigned long pfn_base;
125
126/*
127 * BAD_PAGETABLE is used when we need a bogus page-table, while
128 * BAD_PAGE is used for a bogus page.
129 *
130 * ZERO_PAGE is a global shared page that is always zero: used
131 * for zero-mapped memory areas etc..
132 */
133extern pte_t * __bad_pagetable(void);
134extern pte_t __bad_page(void);
135extern unsigned long empty_zero_page;
136
137#define BAD_PAGETABLE __bad_pagetable()
138#define BAD_PAGE __bad_page()
139#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
140
141/*
142 */
143BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
144BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
145
146#define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
147#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
148
149BTFIXUPDEF_SETHI(none_mask)
150BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
151BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
152
153static inline int pte_none(pte_t pte)
154{
155 return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
156}
157
158#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
159#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte)
160
161BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
162BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
163BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
164
165static inline int pmd_none(pmd_t pmd)
166{
167 return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
168}
169
170#define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
171#define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
172#define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
173
174BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
175BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
176BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
177BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
178
179#define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
180#define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
181#define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
182#define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
183
184/*
185 * The following only work if pte_present() is true.
186 * Undefined behaviour if not..
187 */
188BTFIXUPDEF_HALF(pte_writei)
189BTFIXUPDEF_HALF(pte_dirtyi)
190BTFIXUPDEF_HALF(pte_youngi)
191
192static int pte_write(pte_t pte) __attribute_const__;
193static inline int pte_write(pte_t pte)
194{
195 return pte_val(pte) & BTFIXUP_HALF(pte_writei);
196}
197
198static int pte_dirty(pte_t pte) __attribute_const__;
199static inline int pte_dirty(pte_t pte)
200{
201 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
202}
203
204static int pte_young(pte_t pte) __attribute_const__;
205static inline int pte_young(pte_t pte)
206{
207 return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
208}
209
210/*
211 * The following only work if pte_present() is not true.
212 */
213BTFIXUPDEF_HALF(pte_filei)
214
215static int pte_file(pte_t pte) __attribute_const__;
216static inline int pte_file(pte_t pte)
217{
218 return pte_val(pte) & BTFIXUP_HALF(pte_filei);
219}
220
221static inline int pte_special(pte_t pte)
222{
223 return 0;
224}
225
226/*
227 */
228BTFIXUPDEF_HALF(pte_wrprotecti)
229BTFIXUPDEF_HALF(pte_mkcleani)
230BTFIXUPDEF_HALF(pte_mkoldi)
231
232static pte_t pte_wrprotect(pte_t pte) __attribute_const__;
233static inline pte_t pte_wrprotect(pte_t pte)
234{
235 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
236}
237
238static pte_t pte_mkclean(pte_t pte) __attribute_const__;
239static inline pte_t pte_mkclean(pte_t pte)
240{
241 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
242}
243
244static pte_t pte_mkold(pte_t pte) __attribute_const__;
245static inline pte_t pte_mkold(pte_t pte)
246{
247 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
248}
249
250BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
251BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
252BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
253
254#define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
255#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
256#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
257
258#define pte_mkspecial(pte) (pte)
259
260#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
261
262BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t)
263#define pte_pfn(pte) BTFIXUP_CALL(pte_pfn)(pte)
264#define pte_page(pte) pfn_to_page(pte_pfn(pte))
265
266/*
267 * Conversion functions: convert a page and protection to a page entry,
268 * and a page entry and page directory to the page they refer to.
269 */
270BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
271
272BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
273BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
274BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t)
275
276#define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
277#define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
278#define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
279
280#define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot)
281
282BTFIXUPDEF_INT(pte_modify_mask)
283
284static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
285static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
286{
287 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
288 pgprot_val(newprot));
289}
290
291#define pgd_index(address) ((address) >> PGDIR_SHIFT)
292
293/* to find an entry in a page-table-directory */
294#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
295
296/* to find an entry in a kernel page-table-directory */
297#define pgd_offset_k(address) pgd_offset(&init_mm, address)
298
299/* Find an entry in the second-level page table.. */
300BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
301#define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
302
303/* Find an entry in the third-level page table.. */
304BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
305#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
306
307/*
308 * This shortcut works on sun4m (and sun4d) because the nocache area is static,
309 * and sun4c is guaranteed to have no highmem anyway.
310 */
311#define pte_offset_map(d, a) pte_offset_kernel(d,a)
312#define pte_offset_map_nested(d, a) pte_offset_kernel(d,a)
313
314#define pte_unmap(pte) do{}while(0)
315#define pte_unmap_nested(pte) do{}while(0)
316
317/* Certain architectures need to do special things when pte's
318 * within a page table are directly modified. Thus, the following
319 * hook is made available.
320 */
321
322BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
323
324#define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
325#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
326
327struct seq_file;
328BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
329
330#define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
331
332/* Fault handler stuff... */
333#define FAULT_CODE_PROT 0x1
334#define FAULT_CODE_WRITE 0x2
335#define FAULT_CODE_USER 0x4
336
337BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t)
338
339#define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte)
340
341BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
342 unsigned long, unsigned int)
343BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
344#define sparc_mapiorange(bus,pa,va,len) BTFIXUP_CALL(sparc_mapiorange)(bus,pa,va,len)
345#define sparc_unmapiorange(va,len) BTFIXUP_CALL(sparc_unmapiorange)(va,len)
346
347extern int invalid_segment;
348
349/* Encode and de-code a swap entry */
350BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t)
351BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t)
352BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long)
353
354#define __swp_type(__x) BTFIXUP_CALL(__swp_type)(__x)
355#define __swp_offset(__x) BTFIXUP_CALL(__swp_offset)(__x)
356#define __swp_entry(__type,__off) BTFIXUP_CALL(__swp_entry)(__type,__off)
357
358#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
359#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
360
361/* file-offset-in-pte helpers */
362BTFIXUPDEF_CALL(unsigned long, pte_to_pgoff, pte_t pte);
363BTFIXUPDEF_CALL(pte_t, pgoff_to_pte, unsigned long pgoff);
364
365#define pte_to_pgoff(pte) BTFIXUP_CALL(pte_to_pgoff)(pte)
366#define pgoff_to_pte(off) BTFIXUP_CALL(pgoff_to_pte)(off)
367
368/*
369 * This is made a constant because mm/fremap.c required a constant.
370 * Note that layout of these bits is different between sun4c.c and srmmu.c.
371 */
372#define PTE_FILE_MAX_BITS 24
373
374/*
375 */
376struct ctx_list {
377 struct ctx_list *next;
378 struct ctx_list *prev;
379 unsigned int ctx_number;
380 struct mm_struct *ctx_mm;
381};
382
383extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
384extern struct ctx_list ctx_free; /* Head of free list */
385extern struct ctx_list ctx_used; /* Head of used contexts list */
386
387#define NO_CONTEXT -1
388
389static inline void remove_from_ctx_list(struct ctx_list *entry)
390{
391 entry->next->prev = entry->prev;
392 entry->prev->next = entry->next;
393}
394
395static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
396{
397 entry->next = head;
398 (entry->prev = head->prev)->next = entry;
399 head->prev = entry;
400}
401#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
402#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
403
404static inline unsigned long
405__get_phys (unsigned long addr)
406{
407 switch (sparc_cpu_model){
408 case sun4:
409 case sun4c:
410 return sun4c_get_pte (addr) << PAGE_SHIFT;
411 case sun4m:
412 case sun4d:
413 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
414 default:
415 return 0;
416 }
417}
418
419static inline int
420__get_iospace (unsigned long addr)
421{
422 switch (sparc_cpu_model){
423 case sun4:
424 case sun4c:
425 return -1; /* Don't check iospace on sun4c */
426 case sun4m:
427 case sun4d:
428 return (srmmu_get_pte (addr) >> 28);
429 default:
430 return -1;
431 }
432}
433
434extern unsigned long *sparc_valid_addr_bitmap;
435
436/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
437#define kern_addr_valid(addr) \
438 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
439
440extern int io_remap_pfn_range(struct vm_area_struct *vma,
441 unsigned long from, unsigned long pfn,
442 unsigned long size, pgprot_t prot);
443
444/*
445 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
446 * its high 4 bits. These macros/functions put it there or get it from there.
447 */
448#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
449#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
450#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
451
452#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
453#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
454({ \
455 int __changed = !pte_same(*(__ptep), __entry); \
456 if (__changed) { \
457 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
458 flush_tlb_page(__vma, __address); \
459 } \
460 (sparc_cpu_model == sun4c) || __changed; \
461})
462
463#include <asm-generic/pgtable.h>
464
465#endif /* !(__ASSEMBLY__) */
466
467#define VMALLOC_START 0xfe600000
468/* XXX Alter this when I get around to fixing sun4c - Anton */
469#define VMALLOC_END 0xffc00000
470
471
472/* We provide our own get_unmapped_area to cope with VA holes for userland */
473#define HAVE_ARCH_UNMAPPED_AREA
474
475/*
476 * No page table caches to initialise
477 */
478#define pgtable_cache_init() do { } while (0)
479
480#endif /* !(_SPARC_PGTABLE_H) */
diff --git a/include/asm-sparc/pgtable_64.h b/include/asm-sparc/pgtable_64.h
new file mode 100644
index 000000000000..bb9ec2cce355
--- /dev/null
+++ b/include/asm-sparc/pgtable_64.h
@@ -0,0 +1,775 @@
1/*
2 * pgtable.h: SpitFire page table operations.
3 *
4 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#ifndef _SPARC64_PGTABLE_H
9#define _SPARC64_PGTABLE_H
10
11/* This file contains the functions and defines necessary to modify and use
12 * the SpitFire page tables.
13 */
14
15#include <asm-generic/pgtable-nopud.h>
16
17#include <linux/compiler.h>
18#include <linux/const.h>
19#include <asm/types.h>
20#include <asm/spitfire.h>
21#include <asm/asi.h>
22#include <asm/system.h>
23#include <asm/page.h>
24#include <asm/processor.h>
25
26/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
27 * The page copy blockops can use 0x6000000 to 0x8000000.
28 * The TSB is mapped in the 0x8000000 to 0xa000000 range.
29 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
30 * The vmalloc area spans 0x100000000 to 0x200000000.
31 * Since modules need to be in the lowest 32-bits of the address space,
32 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
33 * There is a single static kernel PMD which maps from 0x0 to address
34 * 0x400000000.
35 */
36#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
37#define TSBMAP_BASE _AC(0x0000000008000000,UL)
38#define MODULES_VADDR _AC(0x0000000010000000,UL)
39#define MODULES_LEN _AC(0x00000000e0000000,UL)
40#define MODULES_END _AC(0x00000000f0000000,UL)
41#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
42#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
43#define VMALLOC_START _AC(0x0000000100000000,UL)
44#define VMALLOC_END _AC(0x0000000200000000,UL)
45#define VMEMMAP_BASE _AC(0x0000000200000000,UL)
46
47#define vmemmap ((struct page *)VMEMMAP_BASE)
48
49/* XXX All of this needs to be rethought so we can take advantage
50 * XXX cheetah's full 64-bit virtual address space, ie. no more hole
51 * XXX in the middle like on spitfire. -DaveM
52 */
53/*
54 * Given a virtual address, the lowest PAGE_SHIFT bits determine offset
55 * into the page; the next higher PAGE_SHIFT-3 bits determine the pte#
56 * in the proper pagetable (the -3 is from the 8 byte ptes, and each page
57 * table is a single page long). The next higher PMD_BITS determine pmd#
58 * in the proper pmdtable (where we must have PMD_BITS <= (PAGE_SHIFT-2)
59 * since the pmd entries are 4 bytes, and each pmd page is a single page
60 * long). Finally, the higher few bits determine pgde#.
61 */
62
63/* PMD_SHIFT determines the size of the area a second-level page
64 * table can map
65 */
66#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
67#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
68#define PMD_MASK (~(PMD_SIZE-1))
69#define PMD_BITS (PAGE_SHIFT - 2)
70
71/* PGDIR_SHIFT determines what a third-level page table entry can map */
72#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
73#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
74#define PGDIR_MASK (~(PGDIR_SIZE-1))
75#define PGDIR_BITS (PAGE_SHIFT - 2)
76
77#ifndef __ASSEMBLY__
78
79#include <linux/sched.h>
80
81/* Entries per page directory level. */
82#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
83#define PTRS_PER_PMD (1UL << PMD_BITS)
84#define PTRS_PER_PGD (1UL << PGDIR_BITS)
85
86/* Kernel has a separate 44bit address space. */
87#define FIRST_USER_ADDRESS 0
88
89#define pte_ERROR(e) __builtin_trap()
90#define pmd_ERROR(e) __builtin_trap()
91#define pgd_ERROR(e) __builtin_trap()
92
93#endif /* !(__ASSEMBLY__) */
94
95/* PTE bits which are the same in SUN4U and SUN4V format. */
96#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
97#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
98
99/* SUN4U pte bits... */
100#define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
101#define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
102#define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
103#define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
104#define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
105#define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
106#define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
107#define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
108#define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
109#define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
110#define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
111#define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
112#define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
113#define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
114#define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
115#define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
116#define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
117#define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */
118#define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
119#define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
120#define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
121#define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
122#define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
123#define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
124#define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
125#define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
126#define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
127#define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
128
129/* SUN4V pte bits... */
130#define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
131#define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
132#define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
133#define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
134#define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
135#define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
136#define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
137#define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
138#define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
139#define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
140#define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
141#define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
142#define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
143#define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
144#define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
145#define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */
146#define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
147#define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
148#define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
149#define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
150#define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
151#define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
152#define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
153#define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
154#define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
155#define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
156#define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
157
158#if PAGE_SHIFT == 13
159#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
160#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
161#elif PAGE_SHIFT == 16
162#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U
163#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V
164#else
165#error Wrong PAGE_SHIFT specified
166#endif
167
168#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
169#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
170#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
171#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
172#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U
173#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V
174#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
175#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U
176#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V
177#endif
178
179/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
180#define __P000 __pgprot(0)
181#define __P001 __pgprot(0)
182#define __P010 __pgprot(0)
183#define __P011 __pgprot(0)
184#define __P100 __pgprot(0)
185#define __P101 __pgprot(0)
186#define __P110 __pgprot(0)
187#define __P111 __pgprot(0)
188
189#define __S000 __pgprot(0)
190#define __S001 __pgprot(0)
191#define __S010 __pgprot(0)
192#define __S011 __pgprot(0)
193#define __S100 __pgprot(0)
194#define __S101 __pgprot(0)
195#define __S110 __pgprot(0)
196#define __S111 __pgprot(0)
197
198#ifndef __ASSEMBLY__
199
200extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
201
202extern unsigned long pte_sz_bits(unsigned long size);
203
204extern pgprot_t PAGE_KERNEL;
205extern pgprot_t PAGE_KERNEL_LOCKED;
206extern pgprot_t PAGE_COPY;
207extern pgprot_t PAGE_SHARED;
208
209/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
210extern unsigned long _PAGE_IE;
211extern unsigned long _PAGE_E;
212extern unsigned long _PAGE_CACHE;
213
214extern unsigned long pg_iobits;
215extern unsigned long _PAGE_ALL_SZ_BITS;
216extern unsigned long _PAGE_SZBITS;
217
218extern struct page *mem_map_zero;
219#define ZERO_PAGE(vaddr) (mem_map_zero)
220
221/* PFNs are real physical page numbers. However, mem_map only begins to record
222 * per-page information starting at pfn_base. This is to handle systems where
223 * the first physical page in the machine is at some huge physical address,
224 * such as 4GB. This is common on a partitioned E10000, for example.
225 */
226static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
227{
228 unsigned long paddr = pfn << PAGE_SHIFT;
229 unsigned long sz_bits;
230
231 sz_bits = 0UL;
232 if (_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL) {
233 __asm__ __volatile__(
234 "\n661: sethi %%uhi(%1), %0\n"
235 " sllx %0, 32, %0\n"
236 " .section .sun4v_2insn_patch, \"ax\"\n"
237 " .word 661b\n"
238 " mov %2, %0\n"
239 " nop\n"
240 " .previous\n"
241 : "=r" (sz_bits)
242 : "i" (_PAGE_SZBITS_4U), "i" (_PAGE_SZBITS_4V));
243 }
244 return __pte(paddr | sz_bits | pgprot_val(prot));
245}
246#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
247
248/* This one can be done with two shifts. */
249static inline unsigned long pte_pfn(pte_t pte)
250{
251 unsigned long ret;
252
253 __asm__ __volatile__(
254 "\n661: sllx %1, %2, %0\n"
255 " srlx %0, %3, %0\n"
256 " .section .sun4v_2insn_patch, \"ax\"\n"
257 " .word 661b\n"
258 " sllx %1, %4, %0\n"
259 " srlx %0, %5, %0\n"
260 " .previous\n"
261 : "=r" (ret)
262 : "r" (pte_val(pte)),
263 "i" (21), "i" (21 + PAGE_SHIFT),
264 "i" (8), "i" (8 + PAGE_SHIFT));
265
266 return ret;
267}
268#define pte_page(x) pfn_to_page(pte_pfn(x))
269
270static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
271{
272 unsigned long mask, tmp;
273
274 /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
275 * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
276 *
277 * Even if we use negation tricks the result is still a 6
278 * instruction sequence, so don't try to play fancy and just
279 * do the most straightforward implementation.
280 *
281 * Note: We encode this into 3 sun4v 2-insn patch sequences.
282 */
283
284 __asm__ __volatile__(
285 "\n661: sethi %%uhi(%2), %1\n"
286 " sethi %%hi(%2), %0\n"
287 "\n662: or %1, %%ulo(%2), %1\n"
288 " or %0, %%lo(%2), %0\n"
289 "\n663: sllx %1, 32, %1\n"
290 " or %0, %1, %0\n"
291 " .section .sun4v_2insn_patch, \"ax\"\n"
292 " .word 661b\n"
293 " sethi %%uhi(%3), %1\n"
294 " sethi %%hi(%3), %0\n"
295 " .word 662b\n"
296 " or %1, %%ulo(%3), %1\n"
297 " or %0, %%lo(%3), %0\n"
298 " .word 663b\n"
299 " sllx %1, 32, %1\n"
300 " or %0, %1, %0\n"
301 " .previous\n"
302 : "=r" (mask), "=r" (tmp)
303 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
304 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
305 _PAGE_SZBITS_4U),
306 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
307 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
308 _PAGE_SZBITS_4V));
309
310 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
311}
312
313static inline pte_t pgoff_to_pte(unsigned long off)
314{
315 off <<= PAGE_SHIFT;
316
317 __asm__ __volatile__(
318 "\n661: or %0, %2, %0\n"
319 " .section .sun4v_1insn_patch, \"ax\"\n"
320 " .word 661b\n"
321 " or %0, %3, %0\n"
322 " .previous\n"
323 : "=r" (off)
324 : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
325
326 return __pte(off);
327}
328
329static inline pgprot_t pgprot_noncached(pgprot_t prot)
330{
331 unsigned long val = pgprot_val(prot);
332
333 __asm__ __volatile__(
334 "\n661: andn %0, %2, %0\n"
335 " or %0, %3, %0\n"
336 " .section .sun4v_2insn_patch, \"ax\"\n"
337 " .word 661b\n"
338 " andn %0, %4, %0\n"
339 " or %0, %5, %0\n"
340 " .previous\n"
341 : "=r" (val)
342 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
343 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
344
345 return __pgprot(val);
346}
347/* Various pieces of code check for platform support by ifdef testing
348 * on "pgprot_noncached". That's broken and should be fixed, but for
349 * now...
350 */
351#define pgprot_noncached pgprot_noncached
352
353#ifdef CONFIG_HUGETLB_PAGE
354static inline pte_t pte_mkhuge(pte_t pte)
355{
356 unsigned long mask;
357
358 __asm__ __volatile__(
359 "\n661: sethi %%uhi(%1), %0\n"
360 " sllx %0, 32, %0\n"
361 " .section .sun4v_2insn_patch, \"ax\"\n"
362 " .word 661b\n"
363 " mov %2, %0\n"
364 " nop\n"
365 " .previous\n"
366 : "=r" (mask)
367 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
368
369 return __pte(pte_val(pte) | mask);
370}
371#endif
372
373static inline pte_t pte_mkdirty(pte_t pte)
374{
375 unsigned long val = pte_val(pte), tmp;
376
377 __asm__ __volatile__(
378 "\n661: or %0, %3, %0\n"
379 " nop\n"
380 "\n662: nop\n"
381 " nop\n"
382 " .section .sun4v_2insn_patch, \"ax\"\n"
383 " .word 661b\n"
384 " sethi %%uhi(%4), %1\n"
385 " sllx %1, 32, %1\n"
386 " .word 662b\n"
387 " or %1, %%lo(%4), %1\n"
388 " or %0, %1, %0\n"
389 " .previous\n"
390 : "=r" (val), "=r" (tmp)
391 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
392 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
393
394 return __pte(val);
395}
396
397static inline pte_t pte_mkclean(pte_t pte)
398{
399 unsigned long val = pte_val(pte), tmp;
400
401 __asm__ __volatile__(
402 "\n661: andn %0, %3, %0\n"
403 " nop\n"
404 "\n662: nop\n"
405 " nop\n"
406 " .section .sun4v_2insn_patch, \"ax\"\n"
407 " .word 661b\n"
408 " sethi %%uhi(%4), %1\n"
409 " sllx %1, 32, %1\n"
410 " .word 662b\n"
411 " or %1, %%lo(%4), %1\n"
412 " andn %0, %1, %0\n"
413 " .previous\n"
414 : "=r" (val), "=r" (tmp)
415 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
416 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
417
418 return __pte(val);
419}
420
421static inline pte_t pte_mkwrite(pte_t pte)
422{
423 unsigned long val = pte_val(pte), mask;
424
425 __asm__ __volatile__(
426 "\n661: mov %1, %0\n"
427 " nop\n"
428 " .section .sun4v_2insn_patch, \"ax\"\n"
429 " .word 661b\n"
430 " sethi %%uhi(%2), %0\n"
431 " sllx %0, 32, %0\n"
432 " .previous\n"
433 : "=r" (mask)
434 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
435
436 return __pte(val | mask);
437}
438
439static inline pte_t pte_wrprotect(pte_t pte)
440{
441 unsigned long val = pte_val(pte), tmp;
442
443 __asm__ __volatile__(
444 "\n661: andn %0, %3, %0\n"
445 " nop\n"
446 "\n662: nop\n"
447 " nop\n"
448 " .section .sun4v_2insn_patch, \"ax\"\n"
449 " .word 661b\n"
450 " sethi %%uhi(%4), %1\n"
451 " sllx %1, 32, %1\n"
452 " .word 662b\n"
453 " or %1, %%lo(%4), %1\n"
454 " andn %0, %1, %0\n"
455 " .previous\n"
456 : "=r" (val), "=r" (tmp)
457 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
458 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
459
460 return __pte(val);
461}
462
463static inline pte_t pte_mkold(pte_t pte)
464{
465 unsigned long mask;
466
467 __asm__ __volatile__(
468 "\n661: mov %1, %0\n"
469 " nop\n"
470 " .section .sun4v_2insn_patch, \"ax\"\n"
471 " .word 661b\n"
472 " sethi %%uhi(%2), %0\n"
473 " sllx %0, 32, %0\n"
474 " .previous\n"
475 : "=r" (mask)
476 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
477
478 mask |= _PAGE_R;
479
480 return __pte(pte_val(pte) & ~mask);
481}
482
483static inline pte_t pte_mkyoung(pte_t pte)
484{
485 unsigned long mask;
486
487 __asm__ __volatile__(
488 "\n661: mov %1, %0\n"
489 " nop\n"
490 " .section .sun4v_2insn_patch, \"ax\"\n"
491 " .word 661b\n"
492 " sethi %%uhi(%2), %0\n"
493 " sllx %0, 32, %0\n"
494 " .previous\n"
495 : "=r" (mask)
496 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
497
498 mask |= _PAGE_R;
499
500 return __pte(pte_val(pte) | mask);
501}
502
503static inline pte_t pte_mkspecial(pte_t pte)
504{
505 return pte;
506}
507
508static inline unsigned long pte_young(pte_t pte)
509{
510 unsigned long mask;
511
512 __asm__ __volatile__(
513 "\n661: mov %1, %0\n"
514 " nop\n"
515 " .section .sun4v_2insn_patch, \"ax\"\n"
516 " .word 661b\n"
517 " sethi %%uhi(%2), %0\n"
518 " sllx %0, 32, %0\n"
519 " .previous\n"
520 : "=r" (mask)
521 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
522
523 return (pte_val(pte) & mask);
524}
525
526static inline unsigned long pte_dirty(pte_t pte)
527{
528 unsigned long mask;
529
530 __asm__ __volatile__(
531 "\n661: mov %1, %0\n"
532 " nop\n"
533 " .section .sun4v_2insn_patch, \"ax\"\n"
534 " .word 661b\n"
535 " sethi %%uhi(%2), %0\n"
536 " sllx %0, 32, %0\n"
537 " .previous\n"
538 : "=r" (mask)
539 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
540
541 return (pte_val(pte) & mask);
542}
543
544static inline unsigned long pte_write(pte_t pte)
545{
546 unsigned long mask;
547
548 __asm__ __volatile__(
549 "\n661: mov %1, %0\n"
550 " nop\n"
551 " .section .sun4v_2insn_patch, \"ax\"\n"
552 " .word 661b\n"
553 " sethi %%uhi(%2), %0\n"
554 " sllx %0, 32, %0\n"
555 " .previous\n"
556 : "=r" (mask)
557 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
558
559 return (pte_val(pte) & mask);
560}
561
562static inline unsigned long pte_exec(pte_t pte)
563{
564 unsigned long mask;
565
566 __asm__ __volatile__(
567 "\n661: sethi %%hi(%1), %0\n"
568 " .section .sun4v_1insn_patch, \"ax\"\n"
569 " .word 661b\n"
570 " mov %2, %0\n"
571 " .previous\n"
572 : "=r" (mask)
573 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
574
575 return (pte_val(pte) & mask);
576}
577
578static inline unsigned long pte_file(pte_t pte)
579{
580 unsigned long val = pte_val(pte);
581
582 __asm__ __volatile__(
583 "\n661: and %0, %2, %0\n"
584 " .section .sun4v_1insn_patch, \"ax\"\n"
585 " .word 661b\n"
586 " and %0, %3, %0\n"
587 " .previous\n"
588 : "=r" (val)
589 : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V));
590
591 return val;
592}
593
594static inline unsigned long pte_present(pte_t pte)
595{
596 unsigned long val = pte_val(pte);
597
598 __asm__ __volatile__(
599 "\n661: and %0, %2, %0\n"
600 " .section .sun4v_1insn_patch, \"ax\"\n"
601 " .word 661b\n"
602 " and %0, %3, %0\n"
603 " .previous\n"
604 : "=r" (val)
605 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
606
607 return val;
608}
609
610static inline int pte_special(pte_t pte)
611{
612 return 0;
613}
614
615#define pmd_set(pmdp, ptep) \
616 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
617#define pud_set(pudp, pmdp) \
618 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
619#define __pmd_page(pmd) \
620 ((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL)))
621#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
622#define pud_page_vaddr(pud) \
623 ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
624#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
625#define pmd_none(pmd) (!pmd_val(pmd))
626#define pmd_bad(pmd) (0)
627#define pmd_present(pmd) (pmd_val(pmd) != 0U)
628#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U)
629#define pud_none(pud) (!pud_val(pud))
630#define pud_bad(pud) (0)
631#define pud_present(pud) (pud_val(pud) != 0U)
632#define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
633
634/* Same in both SUN4V and SUN4U. */
635#define pte_none(pte) (!pte_val(pte))
636
637/* to find an entry in a page-table-directory. */
638#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
639#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
640
641/* to find an entry in a kernel page-table-directory */
642#define pgd_offset_k(address) pgd_offset(&init_mm, address)
643
644/* Find an entry in the second-level page table.. */
645#define pmd_offset(pudp, address) \
646 ((pmd_t *) pud_page_vaddr(*(pudp)) + \
647 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
648
649/* Find an entry in the third-level page table.. */
650#define pte_index(dir, address) \
651 ((pte_t *) __pmd_page(*(dir)) + \
652 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
653#define pte_offset_kernel pte_index
654#define pte_offset_map pte_index
655#define pte_offset_map_nested pte_index
656#define pte_unmap(pte) do { } while (0)
657#define pte_unmap_nested(pte) do { } while (0)
658
659/* Actual page table PTE updates. */
660extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
661
662static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
663{
664 pte_t orig = *ptep;
665
666 *ptep = pte;
667
668 /* It is more efficient to let flush_tlb_kernel_range()
669 * handle init_mm tlb flushes.
670 *
671 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
672 * and SUN4V pte layout, so this inline test is fine.
673 */
674 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
675 tlb_batch_add(mm, addr, ptep, orig);
676}
677
678#define pte_clear(mm,addr,ptep) \
679 set_pte_at((mm), (addr), (ptep), __pte(0UL))
680
681#ifdef DCACHE_ALIASING_POSSIBLE
682#define __HAVE_ARCH_MOVE_PTE
683#define move_pte(pte, prot, old_addr, new_addr) \
684({ \
685 pte_t newpte = (pte); \
686 if (tlb_type != hypervisor && pte_present(pte)) { \
687 unsigned long this_pfn = pte_pfn(pte); \
688 \
689 if (pfn_valid(this_pfn) && \
690 (((old_addr) ^ (new_addr)) & (1 << 13))) \
691 flush_dcache_page_all(current->mm, \
692 pfn_to_page(this_pfn)); \
693 } \
694 newpte; \
695})
696#endif
697
698extern pgd_t swapper_pg_dir[2048];
699extern pmd_t swapper_low_pmd_dir[2048];
700
701extern void paging_init(void);
702extern unsigned long find_ecache_flush_span(unsigned long size);
703
704/* These do nothing with the way I have things setup. */
705#define mmu_lockarea(vaddr, len) (vaddr)
706#define mmu_unlockarea(vaddr, len) do { } while(0)
707
708struct vm_area_struct;
709extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
710
711/* Encode and de-code a swap entry */
712#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
713#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
714#define __swp_entry(type, offset) \
715 ( (swp_entry_t) \
716 { \
717 (((long)(type) << PAGE_SHIFT) | \
718 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
719 } )
720#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
721#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
722
723/* File offset in PTE support. */
724extern unsigned long pte_file(pte_t);
725#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
726extern pte_t pgoff_to_pte(unsigned long);
727#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
728
729extern unsigned long *sparc64_valid_addr_bitmap;
730
731/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
732#define kern_addr_valid(addr) \
733 (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))
734
735extern int page_in_phys_avail(unsigned long paddr);
736
737extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
738 unsigned long pfn,
739 unsigned long size, pgprot_t prot);
740
741/*
742 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
743 * its high 4 bits. These macros/functions put it there or get it from there.
744 */
745#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
746#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
747#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
748
749#include <asm-generic/pgtable.h>
750
751/* We provide our own get_unmapped_area to cope with VA holes and
752 * SHM area cache aliasing for userland.
753 */
754#define HAVE_ARCH_UNMAPPED_AREA
755#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
756
757/* We provide a special get_unmapped_area for framebuffer mmaps to try and use
758 * the largest alignment possible such that larget PTEs can be used.
759 */
760extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
761 unsigned long, unsigned long,
762 unsigned long);
763#define HAVE_ARCH_FB_UNMAPPED_AREA
764
765extern void pgtable_cache_init(void);
766extern void sun4v_register_fault_status(void);
767extern void sun4v_ktsb_register(void);
768extern void __init cheetah_ecache_flush_init(void);
769extern void sun4v_patch_tlb_handlers(void);
770
771extern unsigned long cmdline_memory_size;
772
773#endif /* !(__ASSEMBLY__) */
774
775#endif /* !(_SPARC64_PGTABLE_H) */
diff --git a/include/asm-sparc/pil.h b/include/asm-sparc/pil.h
new file mode 100644
index 000000000000..71819bb943fc
--- /dev/null
+++ b/include/asm-sparc/pil.h
@@ -0,0 +1,22 @@
1#ifndef _SPARC64_PIL_H
2#define _SPARC64_PIL_H
3
4/* To avoid some locking problems, we hard allocate certain PILs
5 * for SMP cross call messages that must do a etrap/rtrap.
6 *
7 * A local_irq_disable() does not block the cross call delivery, so
8 * when SMP locking is an issue we reschedule the event into a PIL
9 * interrupt which is blocked by local_irq_disable().
10 *
11 * In fact any XCALL which has to etrap/rtrap has a problem because
12 * it is difficult to prevent rtrap from running BH's, and that would
13 * need to be done if the XCALL arrived while %pil==15.
14 */
15#define PIL_SMP_CALL_FUNC 1
16#define PIL_SMP_RECEIVE_SIGNAL 2
17#define PIL_SMP_CAPTURE 3
18#define PIL_SMP_CTX_NEW_VERSION 4
19#define PIL_DEVICE_IRQ 5
20#define PIL_SMP_CALL_FUNC_SNGL 6
21
22#endif /* !(_SPARC64_PIL_H) */
diff --git a/include/asm-sparc/posix_types.h b/include/asm-sparc/posix_types.h
index dcc07eb5e181..58c820d75e83 100644
--- a/include/asm-sparc/posix_types.h
+++ b/include/asm-sparc/posix_types.h
@@ -1,118 +1,8 @@
1#ifndef __ARCH_SPARC_POSIX_TYPES_H 1#ifndef ___ASM_SPARC_POSIX_TYPES_H
2#define __ARCH_SPARC_POSIX_TYPES_H 2#define ___ASM_SPARC_POSIX_TYPES_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* 4#include <asm-sparc/posix_types_64.h>
5 * This file is generally used by user-level software, so you need to 5#else
6 * be a little careful about namespace pollution etc. Also, we cannot 6#include <asm-sparc/posix_types_32.h>
7 * assume GCC is being used. 7#endif
8 */
9
10typedef unsigned int __kernel_size_t;
11typedef int __kernel_ssize_t;
12typedef long int __kernel_ptrdiff_t;
13typedef long __kernel_time_t;
14typedef long __kernel_suseconds_t;
15typedef long __kernel_clock_t;
16typedef int __kernel_pid_t;
17typedef unsigned short __kernel_ipc_pid_t;
18typedef unsigned short __kernel_uid_t;
19typedef unsigned short __kernel_gid_t;
20typedef unsigned long __kernel_ino_t;
21typedef unsigned short __kernel_mode_t;
22typedef unsigned short __kernel_umode_t;
23typedef short __kernel_nlink_t;
24typedef long __kernel_daddr_t;
25typedef long __kernel_off_t;
26typedef char * __kernel_caddr_t;
27typedef unsigned short __kernel_uid16_t;
28typedef unsigned short __kernel_gid16_t;
29typedef unsigned int __kernel_uid32_t;
30typedef unsigned int __kernel_gid32_t;
31typedef unsigned short __kernel_old_uid_t;
32typedef unsigned short __kernel_old_gid_t;
33typedef unsigned short __kernel_old_dev_t;
34typedef int __kernel_clockid_t;
35typedef int __kernel_timer_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif 8#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
49{
50 unsigned long _tmp = fd / __NFDBITS;
51 unsigned long _rem = fd % __NFDBITS;
52 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
53}
54
55#undef __FD_CLR
56static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
57{
58 unsigned long _tmp = fd / __NFDBITS;
59 unsigned long _rem = fd % __NFDBITS;
60 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
61}
62
63#undef __FD_ISSET
64static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
65{
66 unsigned long _tmp = fd / __NFDBITS;
67 unsigned long _rem = fd % __NFDBITS;
68 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
69}
70
71/*
72 * This will unroll the loop for the normal constant cases (8 or 32 longs,
73 * for 256 and 1024-bit fd_sets respectively)
74 */
75#undef __FD_ZERO
76static inline void __FD_ZERO(__kernel_fd_set *p)
77{
78 unsigned long *tmp = p->fds_bits;
79 int i;
80
81 if (__builtin_constant_p(__FDSET_LONGS)) {
82 switch (__FDSET_LONGS) {
83 case 32:
84 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
85 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
86 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
87 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
88 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
89 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
90 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
91 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
92 return;
93 case 16:
94 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
95 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
96 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
97 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
98 return;
99 case 8:
100 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
101 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
102 return;
103 case 4:
104 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
105 return;
106 }
107 }
108 i = __FDSET_LONGS;
109 while (i) {
110 i--;
111 *tmp = 0;
112 tmp++;
113 }
114}
115
116#endif /* defined(__KERNEL__) */
117
118#endif /* !(__ARCH_SPARC_POSIX_TYPES_H) */
diff --git a/include/asm-sparc/posix_types_32.h b/include/asm-sparc/posix_types_32.h
new file mode 100644
index 000000000000..6bb6eb1ca0f2
--- /dev/null
+++ b/include/asm-sparc/posix_types_32.h
@@ -0,0 +1,118 @@
1#ifndef __ARCH_SPARC_POSIX_TYPES_H
2#define __ARCH_SPARC_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned int __kernel_size_t;
11typedef int __kernel_ssize_t;
12typedef long int __kernel_ptrdiff_t;
13typedef long __kernel_time_t;
14typedef long __kernel_suseconds_t;
15typedef long __kernel_clock_t;
16typedef int __kernel_pid_t;
17typedef unsigned short __kernel_ipc_pid_t;
18typedef unsigned short __kernel_uid_t;
19typedef unsigned short __kernel_gid_t;
20typedef unsigned long __kernel_ino_t;
21typedef unsigned short __kernel_mode_t;
22typedef unsigned short __kernel_umode_t;
23typedef short __kernel_nlink_t;
24typedef long __kernel_daddr_t;
25typedef long __kernel_off_t;
26typedef char * __kernel_caddr_t;
27typedef unsigned short __kernel_uid16_t;
28typedef unsigned short __kernel_gid16_t;
29typedef unsigned int __kernel_uid32_t;
30typedef unsigned int __kernel_gid32_t;
31typedef unsigned short __kernel_old_uid_t;
32typedef unsigned short __kernel_old_gid_t;
33typedef unsigned short __kernel_old_dev_t;
34typedef int __kernel_clockid_t;
35typedef int __kernel_timer_t;
36
37#ifdef __GNUC__
38typedef long long __kernel_loff_t;
39#endif
40
41typedef struct {
42 int val[2];
43} __kernel_fsid_t;
44
45#if defined(__KERNEL__)
46
47#undef __FD_SET
48static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
49{
50 unsigned long _tmp = fd / __NFDBITS;
51 unsigned long _rem = fd % __NFDBITS;
52 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
53}
54
55#undef __FD_CLR
56static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
57{
58 unsigned long _tmp = fd / __NFDBITS;
59 unsigned long _rem = fd % __NFDBITS;
60 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
61}
62
63#undef __FD_ISSET
64static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
65{
66 unsigned long _tmp = fd / __NFDBITS;
67 unsigned long _rem = fd % __NFDBITS;
68 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
69}
70
71/*
72 * This will unroll the loop for the normal constant cases (8 or 32 longs,
73 * for 256 and 1024-bit fd_sets respectively)
74 */
75#undef __FD_ZERO
76static inline void __FD_ZERO(__kernel_fd_set *p)
77{
78 unsigned long *tmp = p->fds_bits;
79 int i;
80
81 if (__builtin_constant_p(__FDSET_LONGS)) {
82 switch (__FDSET_LONGS) {
83 case 32:
84 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
85 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
86 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
87 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
88 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
89 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
90 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
91 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
92 return;
93 case 16:
94 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
95 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
96 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
97 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
98 return;
99 case 8:
100 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
101 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
102 return;
103 case 4:
104 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
105 return;
106 }
107 }
108 i = __FDSET_LONGS;
109 while (i) {
110 i--;
111 *tmp = 0;
112 tmp++;
113 }
114}
115
116#endif /* defined(__KERNEL__) */
117
118#endif /* !(__ARCH_SPARC_POSIX_TYPES_H) */
diff --git a/include/asm-sparc/posix_types_64.h b/include/asm-sparc/posix_types_64.h
new file mode 100644
index 000000000000..ba8f93295763
--- /dev/null
+++ b/include/asm-sparc/posix_types_64.h
@@ -0,0 +1,122 @@
1#ifndef __ARCH_SPARC64_POSIX_TYPES_H
2#define __ARCH_SPARC64_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned long __kernel_size_t;
11typedef long __kernel_ssize_t;
12typedef long __kernel_ptrdiff_t;
13typedef long __kernel_time_t;
14typedef long __kernel_clock_t;
15typedef int __kernel_pid_t;
16typedef int __kernel_ipc_pid_t;
17typedef unsigned int __kernel_uid_t;
18typedef unsigned int __kernel_gid_t;
19typedef unsigned long __kernel_ino_t;
20typedef unsigned int __kernel_mode_t;
21typedef unsigned short __kernel_umode_t;
22typedef unsigned int __kernel_nlink_t;
23typedef int __kernel_daddr_t;
24typedef long __kernel_off_t;
25typedef char * __kernel_caddr_t;
26typedef unsigned short __kernel_uid16_t;
27typedef unsigned short __kernel_gid16_t;
28typedef int __kernel_clockid_t;
29typedef int __kernel_timer_t;
30
31typedef unsigned short __kernel_old_uid_t;
32typedef unsigned short __kernel_old_gid_t;
33typedef __kernel_uid_t __kernel_uid32_t;
34typedef __kernel_gid_t __kernel_gid32_t;
35
36typedef unsigned int __kernel_old_dev_t;
37
38/* Note this piece of asymmetry from the v9 ABI. */
39typedef int __kernel_suseconds_t;
40
41#ifdef __GNUC__
42typedef long long __kernel_loff_t;
43#endif
44
45typedef struct {
46 int val[2];
47} __kernel_fsid_t;
48
49#if defined(__KERNEL__)
50
51#undef __FD_SET
52static inline void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
53{
54 unsigned long _tmp = fd / __NFDBITS;
55 unsigned long _rem = fd % __NFDBITS;
56 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
57}
58
59#undef __FD_CLR
60static inline void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
61{
62 unsigned long _tmp = fd / __NFDBITS;
63 unsigned long _rem = fd % __NFDBITS;
64 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
65}
66
67#undef __FD_ISSET
68static inline int __FD_ISSET(unsigned long fd, __const__ __kernel_fd_set *p)
69{
70 unsigned long _tmp = fd / __NFDBITS;
71 unsigned long _rem = fd % __NFDBITS;
72 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
73}
74
75/*
76 * This will unroll the loop for the normal constant cases (8 or 32 longs,
77 * for 256 and 1024-bit fd_sets respectively)
78 */
79#undef __FD_ZERO
80static inline void __FD_ZERO(__kernel_fd_set *p)
81{
82 unsigned long *tmp = p->fds_bits;
83 int i;
84
85 if (__builtin_constant_p(__FDSET_LONGS)) {
86 switch (__FDSET_LONGS) {
87 case 32:
88 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
89 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
90 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
91 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
92 tmp[16] = 0; tmp[17] = 0; tmp[18] = 0; tmp[19] = 0;
93 tmp[20] = 0; tmp[21] = 0; tmp[22] = 0; tmp[23] = 0;
94 tmp[24] = 0; tmp[25] = 0; tmp[26] = 0; tmp[27] = 0;
95 tmp[28] = 0; tmp[29] = 0; tmp[30] = 0; tmp[31] = 0;
96 return;
97 case 16:
98 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
99 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
100 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
101 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
102 return;
103 case 8:
104 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
105 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
106 return;
107 case 4:
108 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
109 return;
110 }
111 }
112 i = __FDSET_LONGS;
113 while (i) {
114 i--;
115 *tmp = 0;
116 tmp++;
117 }
118}
119
120#endif /* defined(__KERNEL__) */
121
122#endif /* !(__ARCH_SPARC64_POSIX_TYPES_H) */
diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h
index 8898efbbbe07..11a66bb02eaa 100644
--- a/include/asm-sparc/processor.h
+++ b/include/asm-sparc/processor.h
@@ -1,128 +1,8 @@
1/* include/asm-sparc/processor.h 1#ifndef ___ASM_SPARC_PROCESSOR_H
2 * 2#define ___ASM_SPARC_PROCESSOR_H
3 * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/processor_64.h>
5 5#else
6#ifndef __ASM_SPARC_PROCESSOR_H 6#include <asm-sparc/processor_32.h>
7#define __ASM_SPARC_PROCESSOR_H
8
9/*
10 * Sparc32 implementation of macro that returns current
11 * instruction pointer ("program counter").
12 */
13#define current_text_addr() ({ void *pc; __asm__("sethi %%hi(1f), %0; or %0, %%lo(1f), %0;\n1:" : "=r" (pc)); pc; })
14
15#include <asm/psr.h>
16#include <asm/ptrace.h>
17#include <asm/head.h>
18#include <asm/signal.h>
19#include <asm/btfixup.h>
20#include <asm/page.h>
21
22/*
23 * The sparc has no problems with write protection
24 */
25#define wp_works_ok 1
26#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
27
28/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
29 * That one page is used to protect kernel from intruders, so that
30 * we can make our access_ok test faster
31 */
32#define TASK_SIZE PAGE_OFFSET
33#ifdef __KERNEL__
34#define STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
35#define STACK_TOP_MAX STACK_TOP
36#endif /* __KERNEL__ */
37
38struct task_struct;
39
40#ifdef __KERNEL__
41struct fpq {
42 unsigned long *insn_addr;
43 unsigned long insn;
44};
45#endif 7#endif
46
47typedef struct {
48 int seg;
49} mm_segment_t;
50
51/* The Sparc processor specific thread struct. */
52struct thread_struct {
53 struct pt_regs *kregs;
54 unsigned int _pad1;
55
56 /* Special child fork kpsr/kwim values. */
57 unsigned long fork_kpsr __attribute__ ((aligned (8)));
58 unsigned long fork_kwim;
59
60 /* Floating point regs */
61 unsigned long float_regs[32] __attribute__ ((aligned (8)));
62 unsigned long fsr;
63 unsigned long fpqdepth;
64 struct fpq fpqueue[16];
65 unsigned long flags;
66 mm_segment_t current_ds;
67};
68
69#define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */
70#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */
71
72#define INIT_THREAD { \
73 .flags = SPARC_FLAG_KTHREAD, \
74 .current_ds = KERNEL_DS, \
75}
76
77/* Return saved PC of a blocked thread. */
78extern unsigned long thread_saved_pc(struct task_struct *t);
79
80/* Do necessary setup to start up a newly executed thread. */
81static inline void start_thread(struct pt_regs * regs, unsigned long pc,
82 unsigned long sp)
83{
84 register unsigned long zero asm("g1");
85
86 regs->psr = (regs->psr & (PSR_CWP)) | PSR_S;
87 regs->pc = ((pc & (~3)) - 4);
88 regs->npc = regs->pc + 4;
89 regs->y = 0;
90 zero = 0;
91 __asm__ __volatile__("std\t%%g0, [%0 + %3 + 0x00]\n\t"
92 "std\t%%g0, [%0 + %3 + 0x08]\n\t"
93 "std\t%%g0, [%0 + %3 + 0x10]\n\t"
94 "std\t%%g0, [%0 + %3 + 0x18]\n\t"
95 "std\t%%g0, [%0 + %3 + 0x20]\n\t"
96 "std\t%%g0, [%0 + %3 + 0x28]\n\t"
97 "std\t%%g0, [%0 + %3 + 0x30]\n\t"
98 "st\t%1, [%0 + %3 + 0x38]\n\t"
99 "st\t%%g0, [%0 + %3 + 0x3c]"
100 : /* no outputs */
101 : "r" (regs),
102 "r" (sp - sizeof(struct reg_window)),
103 "r" (zero),
104 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))
105 : "memory");
106}
107
108/* Free all resources held by a thread. */
109#define release_thread(tsk) do { } while(0)
110extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
111
112/* Prepare to copy thread state - unlazy all lazy status */
113#define prepare_to_copy(tsk) do { } while (0)
114
115extern unsigned long get_wchan(struct task_struct *);
116
117#define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
118#define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
119
120#ifdef __KERNEL__
121
122extern struct task_struct *last_task_used_math;
123
124#define cpu_relax() barrier()
125
126#endif 8#endif
127
128#endif /* __ASM_SPARC_PROCESSOR_H */
diff --git a/include/asm-sparc/processor_32.h b/include/asm-sparc/processor_32.h
new file mode 100644
index 000000000000..562c0d69c537
--- /dev/null
+++ b/include/asm-sparc/processor_32.h
@@ -0,0 +1,128 @@
1/* include/asm-sparc/processor.h
2 *
3 * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __ASM_SPARC_PROCESSOR_H
7#define __ASM_SPARC_PROCESSOR_H
8
9/*
10 * Sparc32 implementation of macro that returns current
11 * instruction pointer ("program counter").
12 */
13#define current_text_addr() ({ void *pc; __asm__("sethi %%hi(1f), %0; or %0, %%lo(1f), %0;\n1:" : "=r" (pc)); pc; })
14
15#include <asm/psr.h>
16#include <asm/ptrace.h>
17#include <asm/head.h>
18#include <asm/signal.h>
19#include <asm/btfixup.h>
20#include <asm/page.h>
21
22/*
23 * The sparc has no problems with write protection
24 */
25#define wp_works_ok 1
26#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
27
28/* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
29 * That one page is used to protect kernel from intruders, so that
30 * we can make our access_ok test faster
31 */
32#define TASK_SIZE PAGE_OFFSET
33#ifdef __KERNEL__
34#define STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
35#define STACK_TOP_MAX STACK_TOP
36#endif /* __KERNEL__ */
37
38struct task_struct;
39
40#ifdef __KERNEL__
41struct fpq {
42 unsigned long *insn_addr;
43 unsigned long insn;
44};
45#endif
46
47typedef struct {
48 int seg;
49} mm_segment_t;
50
51/* The Sparc processor specific thread struct. */
52struct thread_struct {
53 struct pt_regs *kregs;
54 unsigned int _pad1;
55
56 /* Special child fork kpsr/kwim values. */
57 unsigned long fork_kpsr __attribute__ ((aligned (8)));
58 unsigned long fork_kwim;
59
60 /* Floating point regs */
61 unsigned long float_regs[32] __attribute__ ((aligned (8)));
62 unsigned long fsr;
63 unsigned long fpqdepth;
64 struct fpq fpqueue[16];
65 unsigned long flags;
66 mm_segment_t current_ds;
67};
68
69#define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */
70#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */
71
72#define INIT_THREAD { \
73 .flags = SPARC_FLAG_KTHREAD, \
74 .current_ds = KERNEL_DS, \
75}
76
77/* Return saved PC of a blocked thread. */
78extern unsigned long thread_saved_pc(struct task_struct *t);
79
80/* Do necessary setup to start up a newly executed thread. */
81static inline void start_thread(struct pt_regs * regs, unsigned long pc,
82 unsigned long sp)
83{
84 register unsigned long zero asm("g1");
85
86 regs->psr = (regs->psr & (PSR_CWP)) | PSR_S;
87 regs->pc = ((pc & (~3)) - 4);
88 regs->npc = regs->pc + 4;
89 regs->y = 0;
90 zero = 0;
91 __asm__ __volatile__("std\t%%g0, [%0 + %3 + 0x00]\n\t"
92 "std\t%%g0, [%0 + %3 + 0x08]\n\t"
93 "std\t%%g0, [%0 + %3 + 0x10]\n\t"
94 "std\t%%g0, [%0 + %3 + 0x18]\n\t"
95 "std\t%%g0, [%0 + %3 + 0x20]\n\t"
96 "std\t%%g0, [%0 + %3 + 0x28]\n\t"
97 "std\t%%g0, [%0 + %3 + 0x30]\n\t"
98 "st\t%1, [%0 + %3 + 0x38]\n\t"
99 "st\t%%g0, [%0 + %3 + 0x3c]"
100 : /* no outputs */
101 : "r" (regs),
102 "r" (sp - sizeof(struct reg_window)),
103 "r" (zero),
104 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))
105 : "memory");
106}
107
108/* Free all resources held by a thread. */
109#define release_thread(tsk) do { } while(0)
110extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
111
112/* Prepare to copy thread state - unlazy all lazy status */
113#define prepare_to_copy(tsk) do { } while (0)
114
115extern unsigned long get_wchan(struct task_struct *);
116
117#define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
118#define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
119
120#ifdef __KERNEL__
121
122extern struct task_struct *last_task_used_math;
123
124#define cpu_relax() barrier()
125
126#endif
127
128#endif /* __ASM_SPARC_PROCESSOR_H */
diff --git a/include/asm-sparc/processor_64.h b/include/asm-sparc/processor_64.h
new file mode 100644
index 000000000000..70d42801a0d2
--- /dev/null
+++ b/include/asm-sparc/processor_64.h
@@ -0,0 +1,237 @@
1/*
2 * include/asm-sparc64/processor.h
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef __ASM_SPARC64_PROCESSOR_H
8#define __ASM_SPARC64_PROCESSOR_H
9
10/*
11 * Sparc64 implementation of macro that returns current
12 * instruction pointer ("program counter").
13 */
14#define current_text_addr() ({ void *pc; __asm__("rd %%pc, %0" : "=r" (pc)); pc; })
15
16#include <asm/asi.h>
17#include <asm/pstate.h>
18#include <asm/ptrace.h>
19#include <asm/page.h>
20
21/* The sparc has no problems with write protection */
22#define wp_works_ok 1
23#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
24
25/*
26 * User lives in his very own context, and cannot reference us. Note
27 * that TASK_SIZE is a misnomer, it really gives maximum user virtual
28 * address that the kernel will allocate out.
29 *
30 * XXX No longer using virtual page tables, kill this upper limit...
31 */
32#define VA_BITS 44
33#ifndef __ASSEMBLY__
34#define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3))
35#else
36#define VPTE_SIZE (1 << (VA_BITS - PAGE_SHIFT + 3))
37#endif
38
39#define TASK_SIZE ((unsigned long)-VPTE_SIZE)
40#define TASK_SIZE_OF(tsk) \
41 (test_tsk_thread_flag(tsk,TIF_32BIT) ? \
42 (1UL << 32UL) : TASK_SIZE)
43#ifdef __KERNEL__
44
45#define STACK_TOP32 ((1UL << 32UL) - PAGE_SIZE)
46#define STACK_TOP64 (0x0000080000000000UL - (1UL << 32UL))
47
48#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
49 STACK_TOP32 : STACK_TOP64)
50
51#define STACK_TOP_MAX STACK_TOP64
52
53#endif
54
55#ifndef __ASSEMBLY__
56
57typedef struct {
58 unsigned char seg;
59} mm_segment_t;
60
61/* The Sparc processor specific thread struct. */
62/* XXX This should die, everything can go into thread_info now. */
63struct thread_struct {
64#ifdef CONFIG_DEBUG_SPINLOCK
65 /* How many spinlocks held by this thread.
66 * Used with spin lock debugging to catch tasks
67 * sleeping illegally with locks held.
68 */
69 int smp_lock_count;
70 unsigned int smp_lock_pc;
71#else
72 int dummy; /* f'in gcc bug... */
73#endif
74};
75
76#endif /* !(__ASSEMBLY__) */
77
78#ifndef CONFIG_DEBUG_SPINLOCK
79#define INIT_THREAD { \
80 0, \
81}
82#else /* CONFIG_DEBUG_SPINLOCK */
83#define INIT_THREAD { \
84/* smp_lock_count, smp_lock_pc, */ \
85 0, 0, \
86}
87#endif /* !(CONFIG_DEBUG_SPINLOCK) */
88
89#ifndef __ASSEMBLY__
90
91#include <linux/types.h>
92
93/* Return saved PC of a blocked thread. */
94struct task_struct;
95extern unsigned long thread_saved_pc(struct task_struct *);
96
97/* On Uniprocessor, even in RMO processes see TSO semantics */
98#ifdef CONFIG_SMP
99#define TSTATE_INITIAL_MM TSTATE_TSO
100#else
101#define TSTATE_INITIAL_MM TSTATE_RMO
102#endif
103
104/* Do necessary setup to start up a newly executed thread. */
105#define start_thread(regs, pc, sp) \
106do { \
107 unsigned long __asi = ASI_PNF; \
108 regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_INITIAL_MM|TSTATE_IE) | (__asi << 24UL); \
109 regs->tpc = ((pc & (~3)) - 4); \
110 regs->tnpc = regs->tpc + 4; \
111 regs->y = 0; \
112 set_thread_wstate(1 << 3); \
113 if (current_thread_info()->utraps) { \
114 if (*(current_thread_info()->utraps) < 2) \
115 kfree(current_thread_info()->utraps); \
116 else \
117 (*(current_thread_info()->utraps))--; \
118 current_thread_info()->utraps = NULL; \
119 } \
120 __asm__ __volatile__( \
121 "stx %%g0, [%0 + %2 + 0x00]\n\t" \
122 "stx %%g0, [%0 + %2 + 0x08]\n\t" \
123 "stx %%g0, [%0 + %2 + 0x10]\n\t" \
124 "stx %%g0, [%0 + %2 + 0x18]\n\t" \
125 "stx %%g0, [%0 + %2 + 0x20]\n\t" \
126 "stx %%g0, [%0 + %2 + 0x28]\n\t" \
127 "stx %%g0, [%0 + %2 + 0x30]\n\t" \
128 "stx %%g0, [%0 + %2 + 0x38]\n\t" \
129 "stx %%g0, [%0 + %2 + 0x40]\n\t" \
130 "stx %%g0, [%0 + %2 + 0x48]\n\t" \
131 "stx %%g0, [%0 + %2 + 0x50]\n\t" \
132 "stx %%g0, [%0 + %2 + 0x58]\n\t" \
133 "stx %%g0, [%0 + %2 + 0x60]\n\t" \
134 "stx %%g0, [%0 + %2 + 0x68]\n\t" \
135 "stx %1, [%0 + %2 + 0x70]\n\t" \
136 "stx %%g0, [%0 + %2 + 0x78]\n\t" \
137 "wrpr %%g0, (1 << 3), %%wstate\n\t" \
138 : \
139 : "r" (regs), "r" (sp - sizeof(struct reg_window) - STACK_BIAS), \
140 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
141} while (0)
142
143#define start_thread32(regs, pc, sp) \
144do { \
145 unsigned long __asi = ASI_PNF; \
146 pc &= 0x00000000ffffffffUL; \
147 sp &= 0x00000000ffffffffUL; \
148 regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_INITIAL_MM|TSTATE_IE|TSTATE_AM) | (__asi << 24UL); \
149 regs->tpc = ((pc & (~3)) - 4); \
150 regs->tnpc = regs->tpc + 4; \
151 regs->y = 0; \
152 set_thread_wstate(2 << 3); \
153 if (current_thread_info()->utraps) { \
154 if (*(current_thread_info()->utraps) < 2) \
155 kfree(current_thread_info()->utraps); \
156 else \
157 (*(current_thread_info()->utraps))--; \
158 current_thread_info()->utraps = NULL; \
159 } \
160 __asm__ __volatile__( \
161 "stx %%g0, [%0 + %2 + 0x00]\n\t" \
162 "stx %%g0, [%0 + %2 + 0x08]\n\t" \
163 "stx %%g0, [%0 + %2 + 0x10]\n\t" \
164 "stx %%g0, [%0 + %2 + 0x18]\n\t" \
165 "stx %%g0, [%0 + %2 + 0x20]\n\t" \
166 "stx %%g0, [%0 + %2 + 0x28]\n\t" \
167 "stx %%g0, [%0 + %2 + 0x30]\n\t" \
168 "stx %%g0, [%0 + %2 + 0x38]\n\t" \
169 "stx %%g0, [%0 + %2 + 0x40]\n\t" \
170 "stx %%g0, [%0 + %2 + 0x48]\n\t" \
171 "stx %%g0, [%0 + %2 + 0x50]\n\t" \
172 "stx %%g0, [%0 + %2 + 0x58]\n\t" \
173 "stx %%g0, [%0 + %2 + 0x60]\n\t" \
174 "stx %%g0, [%0 + %2 + 0x68]\n\t" \
175 "stx %1, [%0 + %2 + 0x70]\n\t" \
176 "stx %%g0, [%0 + %2 + 0x78]\n\t" \
177 "wrpr %%g0, (2 << 3), %%wstate\n\t" \
178 : \
179 : "r" (regs), "r" (sp - sizeof(struct reg_window32)), \
180 "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \
181} while (0)
182
183/* Free all resources held by a thread. */
184#define release_thread(tsk) do { } while (0)
185
186/* Prepare to copy thread state - unlazy all lazy status */
187#define prepare_to_copy(tsk) do { } while (0)
188
189extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
190
191extern unsigned long get_wchan(struct task_struct *task);
192
193#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
194#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
195#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
196
197#define cpu_relax() barrier()
198
199/* Prefetch support. This is tuned for UltraSPARC-III and later.
200 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
201 * a shallower prefetch queue than later chips.
202 */
203#define ARCH_HAS_PREFETCH
204#define ARCH_HAS_PREFETCHW
205#define ARCH_HAS_SPINLOCK_PREFETCH
206
207static inline void prefetch(const void *x)
208{
209 /* We do not use the read prefetch mnemonic because that
210 * prefetches into the prefetch-cache which only is accessible
211 * by floating point operations in UltraSPARC-III and later.
212 * By contrast, "#one_write" prefetches into the L2 cache
213 * in shared state.
214 */
215 __asm__ __volatile__("prefetch [%0], #one_write"
216 : /* no outputs */
217 : "r" (x));
218}
219
220static inline void prefetchw(const void *x)
221{
222 /* The most optimal prefetch to use for writes is
223 * "#n_writes". This brings the cacheline into the
224 * L2 cache in "owned" state.
225 */
226 __asm__ __volatile__("prefetch [%0], #n_writes"
227 : /* no outputs */
228 : "r" (x));
229}
230
231#define spin_lock_prefetch(x) prefetchw(x)
232
233#define HAVE_ARCH_PICK_MMAP_LAYOUT
234
235#endif /* !(__ASSEMBLY__) */
236
237#endif /* !(__ASM_SPARC64_PROCESSOR_H) */
diff --git a/include/asm-sparc/psrcompat.h b/include/asm-sparc/psrcompat.h
new file mode 100644
index 000000000000..44b6327dbbf5
--- /dev/null
+++ b/include/asm-sparc/psrcompat.h
@@ -0,0 +1,45 @@
1#ifndef _SPARC64_PSRCOMPAT_H
2#define _SPARC64_PSRCOMPAT_H
3
4#include <asm/pstate.h>
5
6/* Old 32-bit PSR fields for the compatibility conversion code. */
7#define PSR_CWP 0x0000001f /* current window pointer */
8#define PSR_ET 0x00000020 /* enable traps field */
9#define PSR_PS 0x00000040 /* previous privilege level */
10#define PSR_S 0x00000080 /* current privilege level */
11#define PSR_PIL 0x00000f00 /* processor interrupt level */
12#define PSR_EF 0x00001000 /* enable floating point */
13#define PSR_EC 0x00002000 /* enable co-processor */
14#define PSR_SYSCALL 0x00004000 /* inside of a syscall */
15#define PSR_LE 0x00008000 /* SuperSparcII little-endian */
16#define PSR_ICC 0x00f00000 /* integer condition codes */
17#define PSR_C 0x00100000 /* carry bit */
18#define PSR_V 0x00200000 /* overflow bit */
19#define PSR_Z 0x00400000 /* zero bit */
20#define PSR_N 0x00800000 /* negative bit */
21#define PSR_VERS 0x0f000000 /* cpu-version field */
22#define PSR_IMPL 0xf0000000 /* cpu-implementation field */
23
24#define PSR_V8PLUS 0xff000000 /* fake impl/ver, meaning a 64bit CPU is present */
25#define PSR_XCC 0x000f0000 /* if PSR_V8PLUS, this is %xcc */
26
27static inline unsigned int tstate_to_psr(unsigned long tstate)
28{
29 return ((tstate & TSTATE_CWP) |
30 PSR_S |
31 ((tstate & TSTATE_ICC) >> 12) |
32 ((tstate & TSTATE_XCC) >> 20) |
33 ((tstate & TSTATE_SYSCALL) ? PSR_SYSCALL : 0) |
34 PSR_V8PLUS);
35}
36
37static inline unsigned long psr_to_tstate_icc(unsigned int psr)
38{
39 unsigned long tstate = ((unsigned long)(psr & PSR_ICC)) << 12;
40 if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS)
41 tstate |= ((unsigned long)(psr & PSR_XCC)) << 20;
42 return tstate;
43}
44
45#endif /* !(_SPARC64_PSRCOMPAT_H) */
diff --git a/include/asm-sparc/pstate.h b/include/asm-sparc/pstate.h
new file mode 100644
index 000000000000..a26a53777bb0
--- /dev/null
+++ b/include/asm-sparc/pstate.h
@@ -0,0 +1,91 @@
1#ifndef _SPARC64_PSTATE_H
2#define _SPARC64_PSTATE_H
3
4#include <linux/const.h>
5
6/* The V9 PSTATE Register (with SpitFire extensions).
7 *
8 * -----------------------------------------------------------------------
9 * | Resv | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
10 * -----------------------------------------------------------------------
11 * 63 12 11 10 9 8 7 6 5 4 3 2 1 0
12 */
13#define PSTATE_IG _AC(0x0000000000000800,UL) /* Interrupt Globals. */
14#define PSTATE_MG _AC(0x0000000000000400,UL) /* MMU Globals. */
15#define PSTATE_CLE _AC(0x0000000000000200,UL) /* Current Little Endian.*/
16#define PSTATE_TLE _AC(0x0000000000000100,UL) /* Trap Little Endian. */
17#define PSTATE_MM _AC(0x00000000000000c0,UL) /* Memory Model. */
18#define PSTATE_TSO _AC(0x0000000000000000,UL) /* MM: TotalStoreOrder */
19#define PSTATE_PSO _AC(0x0000000000000040,UL) /* MM: PartialStoreOrder */
20#define PSTATE_RMO _AC(0x0000000000000080,UL) /* MM: RelaxedMemoryOrder*/
21#define PSTATE_RED _AC(0x0000000000000020,UL) /* Reset Error Debug. */
22#define PSTATE_PEF _AC(0x0000000000000010,UL) /* Floating Point Enable.*/
23#define PSTATE_AM _AC(0x0000000000000008,UL) /* Address Mask. */
24#define PSTATE_PRIV _AC(0x0000000000000004,UL) /* Privilege. */
25#define PSTATE_IE _AC(0x0000000000000002,UL) /* Interrupt Enable. */
26#define PSTATE_AG _AC(0x0000000000000001,UL) /* Alternate Globals. */
27
28/* The V9 TSTATE Register (with SpitFire and Linux extensions).
29 *
30 * ---------------------------------------------------------------------
31 * | Resv | GL | CCR | ASI | %pil | PSTATE | Resv | CWP |
32 * ---------------------------------------------------------------------
33 * 63 43 42 40 39 32 31 24 23 20 19 8 7 5 4 0
34 */
35#define TSTATE_GL _AC(0x0000070000000000,UL) /* Global reg level */
36#define TSTATE_CCR _AC(0x000000ff00000000,UL) /* Condition Codes. */
37#define TSTATE_XCC _AC(0x000000f000000000,UL) /* Condition Codes. */
38#define TSTATE_XNEG _AC(0x0000008000000000,UL) /* %xcc Negative. */
39#define TSTATE_XZERO _AC(0x0000004000000000,UL) /* %xcc Zero. */
40#define TSTATE_XOVFL _AC(0x0000002000000000,UL) /* %xcc Overflow. */
41#define TSTATE_XCARRY _AC(0x0000001000000000,UL) /* %xcc Carry. */
42#define TSTATE_ICC _AC(0x0000000f00000000,UL) /* Condition Codes. */
43#define TSTATE_INEG _AC(0x0000000800000000,UL) /* %icc Negative. */
44#define TSTATE_IZERO _AC(0x0000000400000000,UL) /* %icc Zero. */
45#define TSTATE_IOVFL _AC(0x0000000200000000,UL) /* %icc Overflow. */
46#define TSTATE_ICARRY _AC(0x0000000100000000,UL) /* %icc Carry. */
47#define TSTATE_ASI _AC(0x00000000ff000000,UL) /* AddrSpace ID. */
48#define TSTATE_PIL _AC(0x0000000000f00000,UL) /* %pil (Linux traps)*/
49#define TSTATE_PSTATE _AC(0x00000000000fff00,UL) /* PSTATE. */
50#define TSTATE_IG _AC(0x0000000000080000,UL) /* Interrupt Globals.*/
51#define TSTATE_MG _AC(0x0000000000040000,UL) /* MMU Globals. */
52#define TSTATE_CLE _AC(0x0000000000020000,UL) /* CurrLittleEndian. */
53#define TSTATE_TLE _AC(0x0000000000010000,UL) /* TrapLittleEndian. */
54#define TSTATE_MM _AC(0x000000000000c000,UL) /* Memory Model. */
55#define TSTATE_TSO _AC(0x0000000000000000,UL) /* MM: TSO */
56#define TSTATE_PSO _AC(0x0000000000004000,UL) /* MM: PSO */
57#define TSTATE_RMO _AC(0x0000000000008000,UL) /* MM: RMO */
58#define TSTATE_RED _AC(0x0000000000002000,UL) /* Reset Error Debug.*/
59#define TSTATE_PEF _AC(0x0000000000001000,UL) /* FPU Enable. */
60#define TSTATE_AM _AC(0x0000000000000800,UL) /* Address Mask. */
61#define TSTATE_PRIV _AC(0x0000000000000400,UL) /* Privilege. */
62#define TSTATE_IE _AC(0x0000000000000200,UL) /* Interrupt Enable. */
63#define TSTATE_AG _AC(0x0000000000000100,UL) /* Alternate Globals.*/
64#define TSTATE_SYSCALL _AC(0x0000000000000020,UL) /* in syscall trap */
65#define TSTATE_CWP _AC(0x000000000000001f,UL) /* Curr Win-Pointer. */
66
67/* Floating-Point Registers State Register.
68 *
69 * --------------------------------
70 * | Resv | FEF | DU | DL |
71 * --------------------------------
72 * 63 3 2 1 0
73 */
74#define FPRS_FEF _AC(0x0000000000000004,UL) /* FPU Enable. */
75#define FPRS_DU _AC(0x0000000000000002,UL) /* Dirty Upper. */
76#define FPRS_DL _AC(0x0000000000000001,UL) /* Dirty Lower. */
77
78/* Version Register.
79 *
80 * ------------------------------------------------------
81 * | MANUF | IMPL | MASK | Resv | MAXTL | Resv | MAXWIN |
82 * ------------------------------------------------------
83 * 63 48 47 32 31 24 23 16 15 8 7 5 4 0
84 */
85#define VERS_MANUF _AC(0xffff000000000000,UL) /* Manufacturer. */
86#define VERS_IMPL _AC(0x0000ffff00000000,UL) /* Implementation. */
87#define VERS_MASK _AC(0x00000000ff000000,UL) /* Mask Set Revision.*/
88#define VERS_MAXTL _AC(0x000000000000ff00,UL) /* Max Trap Level. */
89#define VERS_MAXWIN _AC(0x000000000000001f,UL) /* Max RegWindow Idx.*/
90
91#endif /* !(_SPARC64_PSTATE_H) */
diff --git a/include/asm-sparc/ptrace.h b/include/asm-sparc/ptrace.h
index 11f3bc2bb3f5..f36ab6c30ff3 100644
--- a/include/asm-sparc/ptrace.h
+++ b/include/asm-sparc/ptrace.h
@@ -1,175 +1,8 @@
1#ifndef _SPARC_PTRACE_H 1#ifndef ___ASM_SPARC_PTRACE_H
2#define _SPARC_PTRACE_H 2#define ___ASM_SPARC_PTRACE_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm/psr.h> 4#include <asm-sparc/ptrace_64.h>
5 5#else
6/* This struct defines the way the registers are stored on the 6#include <asm-sparc/ptrace_32.h>
7 * stack during a system call and basically all traps.
8 */
9
10#ifndef __ASSEMBLY__
11
12#include <linux/types.h>
13
14struct pt_regs {
15 unsigned long psr;
16 unsigned long pc;
17 unsigned long npc;
18 unsigned long y;
19 unsigned long u_regs[16]; /* globals and ins */
20};
21
22#define UREG_G0 0
23#define UREG_G1 1
24#define UREG_G2 2
25#define UREG_G3 3
26#define UREG_G4 4
27#define UREG_G5 5
28#define UREG_G6 6
29#define UREG_G7 7
30#define UREG_I0 8
31#define UREG_I1 9
32#define UREG_I2 10
33#define UREG_I3 11
34#define UREG_I4 12
35#define UREG_I5 13
36#define UREG_I6 14
37#define UREG_I7 15
38#define UREG_WIM UREG_G0
39#define UREG_FADDR UREG_G0
40#define UREG_FP UREG_I6
41#define UREG_RETPC UREG_I7
42
43static inline bool pt_regs_is_syscall(struct pt_regs *regs)
44{
45 return (regs->psr & PSR_SYSCALL);
46}
47
48static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
49{
50 return (regs->psr &= ~PSR_SYSCALL);
51}
52
53/* A register window */
54struct reg_window {
55 unsigned long locals[8];
56 unsigned long ins[8];
57};
58
59/* A Sparc stack frame */
60struct sparc_stackf {
61 unsigned long locals[8];
62 unsigned long ins[6];
63 struct sparc_stackf *fp;
64 unsigned long callers_pc;
65 char *structptr;
66 unsigned long xargs[6];
67 unsigned long xxargs[1];
68};
69
70#define TRACEREG_SZ sizeof(struct pt_regs)
71#define STACKFRAME_SZ sizeof(struct sparc_stackf)
72
73#ifdef __KERNEL__
74
75#define user_mode(regs) (!((regs)->psr & PSR_PS))
76#define instruction_pointer(regs) ((regs)->pc)
77unsigned long profile_pc(struct pt_regs *);
78extern void show_regs(struct pt_regs *);
79#endif 7#endif
80
81#else /* __ASSEMBLY__ */
82/* For assembly code. */
83#define TRACEREG_SZ 0x50
84#define STACKFRAME_SZ 0x60
85#endif 8#endif
86
87/*
88 * The asm-offsets.h is a generated file, so we cannot include it.
89 * It may be OK for glibc headers, but it's utterly pointless for C code.
90 * The assembly code using those offsets has to include it explicitly.
91 */
92/* #include <asm/asm-offsets.h> */
93
94/* These are for pt_regs. */
95#define PT_PSR 0x0
96#define PT_PC 0x4
97#define PT_NPC 0x8
98#define PT_Y 0xc
99#define PT_G0 0x10
100#define PT_WIM PT_G0
101#define PT_G1 0x14
102#define PT_G2 0x18
103#define PT_G3 0x1c
104#define PT_G4 0x20
105#define PT_G5 0x24
106#define PT_G6 0x28
107#define PT_G7 0x2c
108#define PT_I0 0x30
109#define PT_I1 0x34
110#define PT_I2 0x38
111#define PT_I3 0x3c
112#define PT_I4 0x40
113#define PT_I5 0x44
114#define PT_I6 0x48
115#define PT_FP PT_I6
116#define PT_I7 0x4c
117
118/* Reg_window offsets */
119#define RW_L0 0x00
120#define RW_L1 0x04
121#define RW_L2 0x08
122#define RW_L3 0x0c
123#define RW_L4 0x10
124#define RW_L5 0x14
125#define RW_L6 0x18
126#define RW_L7 0x1c
127#define RW_I0 0x20
128#define RW_I1 0x24
129#define RW_I2 0x28
130#define RW_I3 0x2c
131#define RW_I4 0x30
132#define RW_I5 0x34
133#define RW_I6 0x38
134#define RW_I7 0x3c
135
136/* Stack_frame offsets */
137#define SF_L0 0x00
138#define SF_L1 0x04
139#define SF_L2 0x08
140#define SF_L3 0x0c
141#define SF_L4 0x10
142#define SF_L5 0x14
143#define SF_L6 0x18
144#define SF_L7 0x1c
145#define SF_I0 0x20
146#define SF_I1 0x24
147#define SF_I2 0x28
148#define SF_I3 0x2c
149#define SF_I4 0x30
150#define SF_I5 0x34
151#define SF_FP 0x38
152#define SF_PC 0x3c
153#define SF_RETP 0x40
154#define SF_XARG0 0x44
155#define SF_XARG1 0x48
156#define SF_XARG2 0x4c
157#define SF_XARG3 0x50
158#define SF_XARG4 0x54
159#define SF_XARG5 0x58
160#define SF_XXARG 0x5c
161
162/* Stuff for the ptrace system call */
163#define PTRACE_SPARC_DETACH 11
164#define PTRACE_GETREGS 12
165#define PTRACE_SETREGS 13
166#define PTRACE_GETFPREGS 14
167#define PTRACE_SETFPREGS 15
168#define PTRACE_READDATA 16
169#define PTRACE_WRITEDATA 17
170#define PTRACE_READTEXT 18
171#define PTRACE_WRITETEXT 19
172#define PTRACE_GETFPAREGS 20
173#define PTRACE_SETFPAREGS 21
174
175#endif /* !(_SPARC_PTRACE_H) */
diff --git a/include/asm-sparc/ptrace_32.h b/include/asm-sparc/ptrace_32.h
new file mode 100644
index 000000000000..0401cc7ec38e
--- /dev/null
+++ b/include/asm-sparc/ptrace_32.h
@@ -0,0 +1,175 @@
1#ifndef _SPARC_PTRACE_H
2#define _SPARC_PTRACE_H
3
4#include <asm/psr.h>
5
6/* This struct defines the way the registers are stored on the
7 * stack during a system call and basically all traps.
8 */
9
10#ifndef __ASSEMBLY__
11
12#include <linux/types.h>
13
14struct pt_regs {
15 unsigned long psr;
16 unsigned long pc;
17 unsigned long npc;
18 unsigned long y;
19 unsigned long u_regs[16]; /* globals and ins */
20};
21
22#define UREG_G0 0
23#define UREG_G1 1
24#define UREG_G2 2
25#define UREG_G3 3
26#define UREG_G4 4
27#define UREG_G5 5
28#define UREG_G6 6
29#define UREG_G7 7
30#define UREG_I0 8
31#define UREG_I1 9
32#define UREG_I2 10
33#define UREG_I3 11
34#define UREG_I4 12
35#define UREG_I5 13
36#define UREG_I6 14
37#define UREG_I7 15
38#define UREG_WIM UREG_G0
39#define UREG_FADDR UREG_G0
40#define UREG_FP UREG_I6
41#define UREG_RETPC UREG_I7
42
43static inline bool pt_regs_is_syscall(struct pt_regs *regs)
44{
45 return (regs->psr & PSR_SYSCALL);
46}
47
48static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
49{
50 return (regs->psr &= ~PSR_SYSCALL);
51}
52
53/* A register window */
54struct reg_window {
55 unsigned long locals[8];
56 unsigned long ins[8];
57};
58
59/* A Sparc stack frame */
60struct sparc_stackf {
61 unsigned long locals[8];
62 unsigned long ins[6];
63 struct sparc_stackf *fp;
64 unsigned long callers_pc;
65 char *structptr;
66 unsigned long xargs[6];
67 unsigned long xxargs[1];
68};
69
70#define TRACEREG_SZ sizeof(struct pt_regs)
71#define STACKFRAME_SZ sizeof(struct sparc_stackf)
72
73#ifdef __KERNEL__
74
75#define user_mode(regs) (!((regs)->psr & PSR_PS))
76#define instruction_pointer(regs) ((regs)->pc)
77unsigned long profile_pc(struct pt_regs *);
78extern void show_regs(struct pt_regs *);
79#endif
80
81#else /* __ASSEMBLY__ */
82/* For assembly code. */
83#define TRACEREG_SZ 0x50
84#define STACKFRAME_SZ 0x60
85#endif
86
87/*
88 * The asm-offsets.h is a generated file, so we cannot include it.
89 * It may be OK for glibc headers, but it's utterly pointless for C code.
90 * The assembly code using those offsets has to include it explicitly.
91 */
92/* #include <asm/asm-offsets.h> */
93
94/* These are for pt_regs. */
95#define PT_PSR 0x0
96#define PT_PC 0x4
97#define PT_NPC 0x8
98#define PT_Y 0xc
99#define PT_G0 0x10
100#define PT_WIM PT_G0
101#define PT_G1 0x14
102#define PT_G2 0x18
103#define PT_G3 0x1c
104#define PT_G4 0x20
105#define PT_G5 0x24
106#define PT_G6 0x28
107#define PT_G7 0x2c
108#define PT_I0 0x30
109#define PT_I1 0x34
110#define PT_I2 0x38
111#define PT_I3 0x3c
112#define PT_I4 0x40
113#define PT_I5 0x44
114#define PT_I6 0x48
115#define PT_FP PT_I6
116#define PT_I7 0x4c
117
118/* Reg_window offsets */
119#define RW_L0 0x00
120#define RW_L1 0x04
121#define RW_L2 0x08
122#define RW_L3 0x0c
123#define RW_L4 0x10
124#define RW_L5 0x14
125#define RW_L6 0x18
126#define RW_L7 0x1c
127#define RW_I0 0x20
128#define RW_I1 0x24
129#define RW_I2 0x28
130#define RW_I3 0x2c
131#define RW_I4 0x30
132#define RW_I5 0x34
133#define RW_I6 0x38
134#define RW_I7 0x3c
135
136/* Stack_frame offsets */
137#define SF_L0 0x00
138#define SF_L1 0x04
139#define SF_L2 0x08
140#define SF_L3 0x0c
141#define SF_L4 0x10
142#define SF_L5 0x14
143#define SF_L6 0x18
144#define SF_L7 0x1c
145#define SF_I0 0x20
146#define SF_I1 0x24
147#define SF_I2 0x28
148#define SF_I3 0x2c
149#define SF_I4 0x30
150#define SF_I5 0x34
151#define SF_FP 0x38
152#define SF_PC 0x3c
153#define SF_RETP 0x40
154#define SF_XARG0 0x44
155#define SF_XARG1 0x48
156#define SF_XARG2 0x4c
157#define SF_XARG3 0x50
158#define SF_XARG4 0x54
159#define SF_XARG5 0x58
160#define SF_XXARG 0x5c
161
162/* Stuff for the ptrace system call */
163#define PTRACE_SPARC_DETACH 11
164#define PTRACE_GETREGS 12
165#define PTRACE_SETREGS 13
166#define PTRACE_GETFPREGS 14
167#define PTRACE_SETFPREGS 15
168#define PTRACE_READDATA 16
169#define PTRACE_WRITEDATA 17
170#define PTRACE_READTEXT 18
171#define PTRACE_WRITETEXT 19
172#define PTRACE_GETFPAREGS 20
173#define PTRACE_SETFPAREGS 21
174
175#endif /* !(_SPARC_PTRACE_H) */
diff --git a/include/asm-sparc/ptrace_64.h b/include/asm-sparc/ptrace_64.h
new file mode 100644
index 000000000000..a682e66d5c4a
--- /dev/null
+++ b/include/asm-sparc/ptrace_64.h
@@ -0,0 +1,346 @@
1#ifndef _SPARC64_PTRACE_H
2#define _SPARC64_PTRACE_H
3
4#include <asm/pstate.h>
5
6/* This struct defines the way the registers are stored on the
7 * stack during a system call and basically all traps.
8 */
9
10/* This magic value must have the low 9 bits clear,
11 * as that is where we encode the %tt value, see below.
12 */
13#define PT_REGS_MAGIC 0x57ac6c00
14
15#ifndef __ASSEMBLY__
16
17#include <linux/types.h>
18
19struct pt_regs {
20 unsigned long u_regs[16]; /* globals and ins */
21 unsigned long tstate;
22 unsigned long tpc;
23 unsigned long tnpc;
24 unsigned int y;
25
26 /* We encode a magic number, PT_REGS_MAGIC, along
27 * with the %tt (trap type) register value at trap
28 * entry time. The magic number allows us to identify
29 * accurately a trap stack frame in the stack
30 * unwinder, and the %tt value allows us to test
31 * things like "in a system call" etc. for an arbitray
32 * process.
33 *
34 * The PT_REGS_MAGIC is choosen such that it can be
35 * loaded completely using just a sethi instruction.
36 */
37 unsigned int magic;
38};
39
40static inline int pt_regs_trap_type(struct pt_regs *regs)
41{
42 return regs->magic & 0x1ff;
43}
44
45static inline bool pt_regs_is_syscall(struct pt_regs *regs)
46{
47 return (regs->tstate & TSTATE_SYSCALL);
48}
49
50static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
51{
52 return (regs->tstate &= ~TSTATE_SYSCALL);
53}
54
55struct pt_regs32 {
56 unsigned int psr;
57 unsigned int pc;
58 unsigned int npc;
59 unsigned int y;
60 unsigned int u_regs[16]; /* globals and ins */
61};
62
63#define UREG_G0 0
64#define UREG_G1 1
65#define UREG_G2 2
66#define UREG_G3 3
67#define UREG_G4 4
68#define UREG_G5 5
69#define UREG_G6 6
70#define UREG_G7 7
71#define UREG_I0 8
72#define UREG_I1 9
73#define UREG_I2 10
74#define UREG_I3 11
75#define UREG_I4 12
76#define UREG_I5 13
77#define UREG_I6 14
78#define UREG_I7 15
79#define UREG_FP UREG_I6
80#define UREG_RETPC UREG_I7
81
82/* A V9 register window */
83struct reg_window {
84 unsigned long locals[8];
85 unsigned long ins[8];
86};
87
88/* A 32-bit register window. */
89struct reg_window32 {
90 unsigned int locals[8];
91 unsigned int ins[8];
92};
93
94/* A V9 Sparc stack frame */
95struct sparc_stackf {
96 unsigned long locals[8];
97 unsigned long ins[6];
98 struct sparc_stackf *fp;
99 unsigned long callers_pc;
100 char *structptr;
101 unsigned long xargs[6];
102 unsigned long xxargs[1];
103};
104
105/* A 32-bit Sparc stack frame */
106struct sparc_stackf32 {
107 unsigned int locals[8];
108 unsigned int ins[6];
109 unsigned int fp;
110 unsigned int callers_pc;
111 unsigned int structptr;
112 unsigned int xargs[6];
113 unsigned int xxargs[1];
114};
115
116struct sparc_trapf {
117 unsigned long locals[8];
118 unsigned long ins[8];
119 unsigned long _unused;
120 struct pt_regs *regs;
121};
122
123#define TRACEREG_SZ sizeof(struct pt_regs)
124#define STACKFRAME_SZ sizeof(struct sparc_stackf)
125
126#define TRACEREG32_SZ sizeof(struct pt_regs32)
127#define STACKFRAME32_SZ sizeof(struct sparc_stackf32)
128
129#ifdef __KERNEL__
130
131struct global_reg_snapshot {
132 unsigned long tstate;
133 unsigned long tpc;
134 unsigned long tnpc;
135 unsigned long o7;
136 unsigned long i7;
137 struct thread_info *thread;
138 unsigned long pad1;
139 unsigned long pad2;
140};
141
142#define __ARCH_WANT_COMPAT_SYS_PTRACE
143
144#define force_successful_syscall_return() \
145do { current_thread_info()->syscall_noerror = 1; \
146} while (0)
147#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
148#define instruction_pointer(regs) ((regs)->tpc)
149#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
150#ifdef CONFIG_SMP
151extern unsigned long profile_pc(struct pt_regs *);
152#else
153#define profile_pc(regs) instruction_pointer(regs)
154#endif
155extern void show_regs(struct pt_regs *);
156extern void __show_regs(struct pt_regs *);
157#endif
158
159#else /* __ASSEMBLY__ */
160/* For assembly code. */
161#define TRACEREG_SZ 0xa0
162#define STACKFRAME_SZ 0xc0
163
164#define TRACEREG32_SZ 0x50
165#define STACKFRAME32_SZ 0x60
166#endif
167
168#ifdef __KERNEL__
169#define STACK_BIAS 2047
170#endif
171
172/* These are for pt_regs. */
173#define PT_V9_G0 0x00
174#define PT_V9_G1 0x08
175#define PT_V9_G2 0x10
176#define PT_V9_G3 0x18
177#define PT_V9_G4 0x20
178#define PT_V9_G5 0x28
179#define PT_V9_G6 0x30
180#define PT_V9_G7 0x38
181#define PT_V9_I0 0x40
182#define PT_V9_I1 0x48
183#define PT_V9_I2 0x50
184#define PT_V9_I3 0x58
185#define PT_V9_I4 0x60
186#define PT_V9_I5 0x68
187#define PT_V9_I6 0x70
188#define PT_V9_FP PT_V9_I6
189#define PT_V9_I7 0x78
190#define PT_V9_TSTATE 0x80
191#define PT_V9_TPC 0x88
192#define PT_V9_TNPC 0x90
193#define PT_V9_Y 0x98
194#define PT_V9_MAGIC 0x9c
195#define PT_TSTATE PT_V9_TSTATE
196#define PT_TPC PT_V9_TPC
197#define PT_TNPC PT_V9_TNPC
198
199/* These for pt_regs32. */
200#define PT_PSR 0x0
201#define PT_PC 0x4
202#define PT_NPC 0x8
203#define PT_Y 0xc
204#define PT_G0 0x10
205#define PT_WIM PT_G0
206#define PT_G1 0x14
207#define PT_G2 0x18
208#define PT_G3 0x1c
209#define PT_G4 0x20
210#define PT_G5 0x24
211#define PT_G6 0x28
212#define PT_G7 0x2c
213#define PT_I0 0x30
214#define PT_I1 0x34
215#define PT_I2 0x38
216#define PT_I3 0x3c
217#define PT_I4 0x40
218#define PT_I5 0x44
219#define PT_I6 0x48
220#define PT_FP PT_I6
221#define PT_I7 0x4c
222
223/* Reg_window offsets */
224#define RW_V9_L0 0x00
225#define RW_V9_L1 0x08
226#define RW_V9_L2 0x10
227#define RW_V9_L3 0x18
228#define RW_V9_L4 0x20
229#define RW_V9_L5 0x28
230#define RW_V9_L6 0x30
231#define RW_V9_L7 0x38
232#define RW_V9_I0 0x40
233#define RW_V9_I1 0x48
234#define RW_V9_I2 0x50
235#define RW_V9_I3 0x58
236#define RW_V9_I4 0x60
237#define RW_V9_I5 0x68
238#define RW_V9_I6 0x70
239#define RW_V9_I7 0x78
240
241#define RW_L0 0x00
242#define RW_L1 0x04
243#define RW_L2 0x08
244#define RW_L3 0x0c
245#define RW_L4 0x10
246#define RW_L5 0x14
247#define RW_L6 0x18
248#define RW_L7 0x1c
249#define RW_I0 0x20
250#define RW_I1 0x24
251#define RW_I2 0x28
252#define RW_I3 0x2c
253#define RW_I4 0x30
254#define RW_I5 0x34
255#define RW_I6 0x38
256#define RW_I7 0x3c
257
258/* Stack_frame offsets */
259#define SF_V9_L0 0x00
260#define SF_V9_L1 0x08
261#define SF_V9_L2 0x10
262#define SF_V9_L3 0x18
263#define SF_V9_L4 0x20
264#define SF_V9_L5 0x28
265#define SF_V9_L6 0x30
266#define SF_V9_L7 0x38
267#define SF_V9_I0 0x40
268#define SF_V9_I1 0x48
269#define SF_V9_I2 0x50
270#define SF_V9_I3 0x58
271#define SF_V9_I4 0x60
272#define SF_V9_I5 0x68
273#define SF_V9_FP 0x70
274#define SF_V9_PC 0x78
275#define SF_V9_RETP 0x80
276#define SF_V9_XARG0 0x88
277#define SF_V9_XARG1 0x90
278#define SF_V9_XARG2 0x98
279#define SF_V9_XARG3 0xa0
280#define SF_V9_XARG4 0xa8
281#define SF_V9_XARG5 0xb0
282#define SF_V9_XXARG 0xb8
283
284#define SF_L0 0x00
285#define SF_L1 0x04
286#define SF_L2 0x08
287#define SF_L3 0x0c
288#define SF_L4 0x10
289#define SF_L5 0x14
290#define SF_L6 0x18
291#define SF_L7 0x1c
292#define SF_I0 0x20
293#define SF_I1 0x24
294#define SF_I2 0x28
295#define SF_I3 0x2c
296#define SF_I4 0x30
297#define SF_I5 0x34
298#define SF_FP 0x38
299#define SF_PC 0x3c
300#define SF_RETP 0x40
301#define SF_XARG0 0x44
302#define SF_XARG1 0x48
303#define SF_XARG2 0x4c
304#define SF_XARG3 0x50
305#define SF_XARG4 0x54
306#define SF_XARG5 0x58
307#define SF_XXARG 0x5c
308
309#ifdef __KERNEL__
310
311/* global_reg_snapshot offsets */
312#define GR_SNAP_TSTATE 0x00
313#define GR_SNAP_TPC 0x08
314#define GR_SNAP_TNPC 0x10
315#define GR_SNAP_O7 0x18
316#define GR_SNAP_I7 0x20
317#define GR_SNAP_THREAD 0x28
318#define GR_SNAP_PAD1 0x30
319#define GR_SNAP_PAD2 0x38
320
321#endif /* __KERNEL__ */
322
323/* Stuff for the ptrace system call */
324#define PTRACE_SPARC_DETACH 11
325#define PTRACE_GETREGS 12
326#define PTRACE_SETREGS 13
327#define PTRACE_GETFPREGS 14
328#define PTRACE_SETFPREGS 15
329#define PTRACE_READDATA 16
330#define PTRACE_WRITEDATA 17
331#define PTRACE_READTEXT 18
332#define PTRACE_WRITETEXT 19
333#define PTRACE_GETFPAREGS 20
334#define PTRACE_SETFPAREGS 21
335
336/* There are for debugging 64-bit processes, either from a 32 or 64 bit
337 * parent. Thus their complements are for debugging 32-bit processes only.
338 */
339
340#define PTRACE_GETREGS64 22
341#define PTRACE_SETREGS64 23
342/* PTRACE_SYSCALL is 24 */
343#define PTRACE_GETFPREGS64 25
344#define PTRACE_SETFPREGS64 26
345
346#endif /* !(_SPARC64_PTRACE_H) */
diff --git a/include/asm-sparc/reboot.h b/include/asm-sparc/reboot.h
new file mode 100644
index 000000000000..3f3f43f5be5e
--- /dev/null
+++ b/include/asm-sparc/reboot.h
@@ -0,0 +1,6 @@
1#ifndef _SPARC64_REBOOT_H
2#define _SPARC64_REBOOT_H
3
4extern void machine_alt_power_off(void);
5
6#endif /* _SPARC64_REBOOT_H */
diff --git a/include/asm-sparc/reg.h b/include/asm-sparc/reg.h
index ea0a7e590bb3..cb34b0a49aad 100644
--- a/include/asm-sparc/reg.h
+++ b/include/asm-sparc/reg.h
@@ -1,79 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_REG_H
2 * linux/include/asm-sparc/reg.h 2#define ___ASM_SPARC_REG_H
3 * Layout of the registers as expected by gdb on the Sparc 3#if defined(__sparc__) && defined(__arch64__)
4 * we should replace the user.h definitions with those in 4#include <asm-sparc/reg_64.h>
5 * this file, we don't even use the other 5#else
6 * -miguel 6#include <asm-sparc/reg_32.h>
7 * 7#endif
8 * The names of the structures, constants and aliases in this file 8#endif
9 * have the same names as the sunos ones, some programs rely on these
10 * names (gdb for example).
11 *
12 */
13
14#ifndef __SPARC_REG_H
15#define __SPARC_REG_H
16
17struct regs {
18 int r_psr;
19#define r_ps r_psr
20 int r_pc;
21 int r_npc;
22 int r_y;
23 int r_g1;
24 int r_g2;
25 int r_g3;
26 int r_g4;
27 int r_g5;
28 int r_g6;
29 int r_g7;
30 int r_o0;
31 int r_o1;
32 int r_o2;
33 int r_o3;
34 int r_o4;
35 int r_o5;
36 int r_o6;
37 int r_o7;
38};
39
40struct fpq {
41 unsigned long *addr;
42 unsigned long instr;
43};
44
45struct fq {
46 union {
47 double whole;
48 struct fpq fpq;
49 } FQu;
50};
51
52#define FPU_REGS_TYPE unsigned int
53#define FPU_FSR_TYPE unsigned
54
55struct fp_status {
56 union {
57 FPU_REGS_TYPE Fpu_regs[32];
58 double Fpu_dregs[16];
59 } fpu_fr;
60 FPU_FSR_TYPE Fpu_fsr;
61 unsigned Fpu_flags;
62 unsigned Fpu_extra;
63 unsigned Fpu_qcnt;
64 struct fq Fpu_q[16];
65};
66
67#define fpu_regs f_fpstatus.fpu_fr.Fpu_regs
68#define fpu_dregs f_fpstatus.fpu_fr.Fpu_dregs
69#define fpu_fsr f_fpstatus.Fpu_fsr
70#define fpu_flags f_fpstatus.Fpu_flags
71#define fpu_extra f_fpstatus.Fpu_extra
72#define fpu_q f_fpstatus.Fpu_q
73#define fpu_qcnt f_fpstatus.Fpu_qcnt
74
75struct fpu {
76 struct fp_status f_fpstatus;
77};
78
79#endif /* __SPARC_REG_H */
diff --git a/include/asm-sparc/reg_32.h b/include/asm-sparc/reg_32.h
new file mode 100644
index 000000000000..42fecfcd97e7
--- /dev/null
+++ b/include/asm-sparc/reg_32.h
@@ -0,0 +1,79 @@
1/*
2 * linux/include/asm-sparc/reg.h
3 * Layout of the registers as expected by gdb on the Sparc
4 * we should replace the user.h definitions with those in
5 * this file, we don't even use the other
6 * -miguel
7 *
8 * The names of the structures, constants and aliases in this file
9 * have the same names as the sunos ones, some programs rely on these
10 * names (gdb for example).
11 *
12 */
13
14#ifndef __SPARC_REG_H
15#define __SPARC_REG_H
16
17struct regs {
18 int r_psr;
19#define r_ps r_psr
20 int r_pc;
21 int r_npc;
22 int r_y;
23 int r_g1;
24 int r_g2;
25 int r_g3;
26 int r_g4;
27 int r_g5;
28 int r_g6;
29 int r_g7;
30 int r_o0;
31 int r_o1;
32 int r_o2;
33 int r_o3;
34 int r_o4;
35 int r_o5;
36 int r_o6;
37 int r_o7;
38};
39
40struct fpq {
41 unsigned long *addr;
42 unsigned long instr;
43};
44
45struct fq {
46 union {
47 double whole;
48 struct fpq fpq;
49 } FQu;
50};
51
52#define FPU_REGS_TYPE unsigned int
53#define FPU_FSR_TYPE unsigned
54
55struct fp_status {
56 union {
57 FPU_REGS_TYPE Fpu_regs[32];
58 double Fpu_dregs[16];
59 } fpu_fr;
60 FPU_FSR_TYPE Fpu_fsr;
61 unsigned Fpu_flags;
62 unsigned Fpu_extra;
63 unsigned Fpu_qcnt;
64 struct fq Fpu_q[16];
65};
66
67#define fpu_regs f_fpstatus.fpu_fr.Fpu_regs
68#define fpu_dregs f_fpstatus.fpu_fr.Fpu_dregs
69#define fpu_fsr f_fpstatus.Fpu_fsr
70#define fpu_flags f_fpstatus.Fpu_flags
71#define fpu_extra f_fpstatus.Fpu_extra
72#define fpu_q f_fpstatus.Fpu_q
73#define fpu_qcnt f_fpstatus.Fpu_qcnt
74
75struct fpu {
76 struct fp_status f_fpstatus;
77};
78
79#endif /* __SPARC_REG_H */
diff --git a/include/asm-sparc/reg_64.h b/include/asm-sparc/reg_64.h
new file mode 100644
index 000000000000..eb24a07ff4d5
--- /dev/null
+++ b/include/asm-sparc/reg_64.h
@@ -0,0 +1,56 @@
1/*
2 * linux/asm-sparc64/reg.h
3 * Layout of the registers as expected by gdb on the Sparc
4 * we should replace the user.h definitions with those in
5 * this file, we don't even use the other
6 * -miguel
7 *
8 * The names of the structures, constants and aliases in this file
9 * have the same names as the sunos ones, some programs rely on these
10 * names (gdb for example).
11 *
12 */
13
14#ifndef __SPARC64_REG_H
15#define __SPARC64_REG_H
16
17struct regs {
18 unsigned long r_g1;
19 unsigned long r_g2;
20 unsigned long r_g3;
21 unsigned long r_g4;
22 unsigned long r_g5;
23 unsigned long r_g6;
24 unsigned long r_g7;
25 unsigned long r_o0;
26 unsigned long r_o1;
27 unsigned long r_o2;
28 unsigned long r_o3;
29 unsigned long r_o4;
30 unsigned long r_o5;
31 unsigned long r_o6;
32 unsigned long r_o7;
33 unsigned long __pad;
34 unsigned long r_tstate;
35 unsigned long r_tpc;
36 unsigned long r_tnpc;
37 unsigned int r_y;
38 unsigned int r_fprs;
39};
40
41#define FPU_REGS_TYPE unsigned int
42#define FPU_FSR_TYPE unsigned long
43
44struct fp_status {
45 unsigned long fpu_fr[32];
46 unsigned long Fpu_fsr;
47};
48
49struct fpu {
50 struct fp_status f_fpstatus;
51};
52
53#define fpu_regs f_fpstatus.fpu_fr
54#define fpu_fsr f_fpstatus.Fpu_fsr
55
56#endif /* __SPARC64_REG_H */
diff --git a/include/asm-sparc/resource.h b/include/asm-sparc/resource.h
index 985948a41299..fe163cafb4c7 100644
--- a/include/asm-sparc/resource.h
+++ b/include/asm-sparc/resource.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * resource.h: Resource definitions. 2 * resource.h: Resource definitions.
3 * 3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
5 */ 5 */
6 6
7#ifndef _SPARC_RESOURCE_H 7#ifndef _SPARC_RESOURCE_H
@@ -14,12 +14,16 @@
14#define RLIMIT_NOFILE 6 /* max number of open files */ 14#define RLIMIT_NOFILE 6 /* max number of open files */
15#define RLIMIT_NPROC 7 /* max number of processes */ 15#define RLIMIT_NPROC 7 /* max number of processes */
16 16
17#if defined(__sparc__) && defined(__arch64__)
18/* Use generic version */
19#else
17/* 20/*
18 * SuS says limits have to be unsigned. 21 * SuS says limits have to be unsigned.
19 * We make this unsigned, but keep the 22 * We make this unsigned, but keep the
20 * old value for compatibility: 23 * old value for compatibility:
21 */ 24 */
22#define RLIM_INFINITY 0x7fffffff 25#define RLIM_INFINITY 0x7fffffff
26#endif
23 27
24#include <asm-generic/resource.h> 28#include <asm-generic/resource.h>
25 29
diff --git a/include/asm-sparc/rwsem-const.h b/include/asm-sparc/rwsem-const.h
new file mode 100644
index 000000000000..a303c9d64d84
--- /dev/null
+++ b/include/asm-sparc/rwsem-const.h
@@ -0,0 +1,12 @@
1/* rwsem-const.h: RW semaphore counter constants. */
2#ifndef _SPARC64_RWSEM_CONST_H
3#define _SPARC64_RWSEM_CONST_H
4
5#define RWSEM_UNLOCKED_VALUE 0x00000000
6#define RWSEM_ACTIVE_BIAS 0x00000001
7#define RWSEM_ACTIVE_MASK 0x0000ffff
8#define RWSEM_WAITING_BIAS 0xffff0000
9#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
10#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
11
12#endif /* _SPARC64_RWSEM_CONST_H */
diff --git a/include/asm-sparc/rwsem.h b/include/asm-sparc/rwsem.h
new file mode 100644
index 000000000000..1dc129ac2feb
--- /dev/null
+++ b/include/asm-sparc/rwsem.h
@@ -0,0 +1,84 @@
1/*
2 * rwsem.h: R/W semaphores implemented using CAS
3 *
4 * Written by David S. Miller (davem@redhat.com), 2001.
5 * Derived from asm-i386/rwsem.h
6 */
7#ifndef _SPARC64_RWSEM_H
8#define _SPARC64_RWSEM_H
9
10#ifndef _LINUX_RWSEM_H
11#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
12#endif
13
14#ifdef __KERNEL__
15
16#include <linux/list.h>
17#include <linux/spinlock.h>
18#include <asm/rwsem-const.h>
19
20struct rwsem_waiter;
21
22struct rw_semaphore {
23 signed int count;
24 spinlock_t wait_lock;
25 struct list_head wait_list;
26#ifdef CONFIG_DEBUG_LOCK_ALLOC
27 struct lockdep_map dep_map;
28#endif
29};
30
31#ifdef CONFIG_DEBUG_LOCK_ALLOC
32# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
33#else
34# define __RWSEM_DEP_MAP_INIT(lockname)
35#endif
36
37#define __RWSEM_INITIALIZER(name) \
38{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
39 __RWSEM_DEP_MAP_INIT(name) }
40
41#define DECLARE_RWSEM(name) \
42 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
43
44extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
45 struct lock_class_key *key);
46
47#define init_rwsem(sem) \
48do { \
49 static struct lock_class_key __key; \
50 \
51 __init_rwsem((sem), #sem, &__key); \
52} while (0)
53
54extern void __down_read(struct rw_semaphore *sem);
55extern int __down_read_trylock(struct rw_semaphore *sem);
56extern void __down_write(struct rw_semaphore *sem);
57extern int __down_write_trylock(struct rw_semaphore *sem);
58extern void __up_read(struct rw_semaphore *sem);
59extern void __up_write(struct rw_semaphore *sem);
60extern void __downgrade_write(struct rw_semaphore *sem);
61
62static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
63{
64 __down_write(sem);
65}
66
67static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
68{
69 return atomic_add_return(delta, (atomic_t *)(&sem->count));
70}
71
72static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
73{
74 atomic_add(delta, (atomic_t *)(&sem->count));
75}
76
77static inline int rwsem_is_locked(struct rw_semaphore *sem)
78{
79 return (sem->count != 0);
80}
81
82#endif /* __KERNEL__ */
83
84#endif /* _SPARC64_RWSEM_H */
diff --git a/include/asm-sparc/sbus.h b/include/asm-sparc/sbus.h
index f1d2fe1c9a30..8f29a1979665 100644
--- a/include/asm-sparc/sbus.h
+++ b/include/asm-sparc/sbus.h
@@ -1,153 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_SBUS_H
2 * sbus.h: Defines for the Sun SBus. 2#define ___ASM_SPARC_SBUS_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/sbus_64.h>
5 */ 5#else
6 6#include <asm-sparc/sbus_32.h>
7#ifndef _SPARC_SBUS_H 7#endif
8#define _SPARC_SBUS_H 8#endif
9
10#include <linux/dma-mapping.h>
11#include <linux/ioport.h>
12
13#include <asm/oplib.h>
14#include <asm/prom.h>
15#include <asm/of_device.h>
16#include <asm/scatterlist.h>
17
18/* We scan which devices are on the SBus using the PROM node device
19 * tree. SBus devices are described in two different ways. You can
20 * either get an absolute address at which to access the device, or
21 * you can get a SBus 'slot' number and an offset within that slot.
22 */
23
24/* The base address at which to calculate device OBIO addresses. */
25#define SUN_SBUS_BVADDR 0xf8000000
26#define SBUS_OFF_MASK 0x01ffffff
27
28/* These routines are used to calculate device address from slot
29 * numbers + offsets, and vice versa.
30 */
31
32static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset)
33{
34 return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset));
35}
36
37static inline int sbus_dev_slot(unsigned long dev_addr)
38{
39 return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25);
40}
41
42struct sbus_bus;
43
44/* Linux SBUS device tables */
45struct sbus_dev {
46 struct of_device ofdev;
47 struct sbus_bus *bus;
48 struct sbus_dev *next;
49 struct sbus_dev *child;
50 struct sbus_dev *parent;
51 int prom_node;
52 char prom_name[64];
53 int slot;
54
55 struct resource resource[PROMREG_MAX];
56
57 struct linux_prom_registers reg_addrs[PROMREG_MAX];
58 int num_registers;
59
60 struct linux_prom_ranges device_ranges[PROMREG_MAX];
61 int num_device_ranges;
62
63 unsigned int irqs[4];
64 int num_irqs;
65};
66#define to_sbus_device(d) container_of(d, struct sbus_dev, ofdev.dev)
67
68/* This struct describes the SBus(s) found on this machine. */
69struct sbus_bus {
70 struct of_device ofdev;
71 struct sbus_dev *devices; /* Link to devices on this SBus */
72 struct sbus_bus *next; /* next SBus, if more than one SBus */
73 int prom_node; /* PROM device tree node for this SBus */
74 char prom_name[64]; /* Usually "sbus" or "sbi" */
75 int clock_freq;
76
77 struct linux_prom_ranges sbus_ranges[PROMREG_MAX];
78 int num_sbus_ranges;
79
80 int devid;
81 int board;
82};
83#define to_sbus(d) container_of(d, struct sbus_bus, ofdev.dev)
84
85extern struct sbus_bus *sbus_root;
86
87static inline int
88sbus_is_slave(struct sbus_dev *dev)
89{
90 /* XXX Have to write this for sun4c's */
91 return 0;
92}
93
94/* Device probing routines could find these handy */
95#define for_each_sbus(bus) \
96 for((bus) = sbus_root; (bus); (bus)=(bus)->next)
97
98#define for_each_sbusdev(device, bus) \
99 for((device) = (bus)->devices; (device); (device)=(device)->next)
100
101#define for_all_sbusdev(device, bus) \
102 for ((bus) = sbus_root; (bus); (bus) = (bus)->next) \
103 for ((device) = (bus)->devices; (device); (device) = (device)->next)
104
105/* Driver DVMA interfaces. */
106#define sbus_can_dma_64bit(sdev) (0) /* actually, sparc_cpu_model==sun4d */
107#define sbus_can_burst64(sdev) (0) /* actually, sparc_cpu_model==sun4d */
108extern void sbus_set_sbus64(struct sbus_dev *, int);
109extern void sbus_fill_device_irq(struct sbus_dev *);
110
111/* These yield IOMMU mappings in consistent mode. */
112extern void *sbus_alloc_consistent(struct sbus_dev *, long, u32 *dma_addrp);
113extern void sbus_free_consistent(struct sbus_dev *, long, void *, u32);
114void prom_adjust_ranges(struct linux_prom_ranges *, int,
115 struct linux_prom_ranges *, int);
116
117#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
118#define SBUS_DMA_TODEVICE DMA_TO_DEVICE
119#define SBUS_DMA_FROMDEVICE DMA_FROM_DEVICE
120#define SBUS_DMA_NONE DMA_NONE
121
122/* All the rest use streaming mode mappings. */
123extern dma_addr_t sbus_map_single(struct sbus_dev *, void *, size_t, int);
124extern void sbus_unmap_single(struct sbus_dev *, dma_addr_t, size_t, int);
125extern int sbus_map_sg(struct sbus_dev *, struct scatterlist *, int, int);
126extern void sbus_unmap_sg(struct sbus_dev *, struct scatterlist *, int, int);
127
128/* Finally, allow explicit synchronization of streamable mappings. */
129extern void sbus_dma_sync_single_for_cpu(struct sbus_dev *, dma_addr_t, size_t, int);
130#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
131extern void sbus_dma_sync_single_for_device(struct sbus_dev *, dma_addr_t, size_t, int);
132extern void sbus_dma_sync_sg_for_cpu(struct sbus_dev *, struct scatterlist *, int, int);
133#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
134extern void sbus_dma_sync_sg_for_device(struct sbus_dev *, struct scatterlist *, int, int);
135
136/* Eric Brower (ebrower@usa.net)
137 * Translate SBus interrupt levels to ino values--
138 * this is used when converting sbus "interrupts" OBP
139 * node values to "intr" node values, and is platform
140 * dependent. If only we could call OBP with
141 * "sbus-intr>cpu (sbint -- ino)" from kernel...
142 * See .../drivers/sbus/sbus.c for details.
143 */
144BTFIXUPDEF_CALL(unsigned int, sbint_to_irq, struct sbus_dev *sdev, unsigned int)
145#define sbint_to_irq(sdev, sbint) BTFIXUP_CALL(sbint_to_irq)(sdev, sbint)
146
147extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
148extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);
149extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *);
150extern int sbus_arch_preinit(void);
151extern void sbus_arch_postinit(void);
152
153#endif /* !(_SPARC_SBUS_H) */
diff --git a/include/asm-sparc/sbus_32.h b/include/asm-sparc/sbus_32.h
new file mode 100644
index 000000000000..77b5d3aadc99
--- /dev/null
+++ b/include/asm-sparc/sbus_32.h
@@ -0,0 +1,153 @@
1/*
2 * sbus.h: Defines for the Sun SBus.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef _SPARC_SBUS_H
8#define _SPARC_SBUS_H
9
10#include <linux/dma-mapping.h>
11#include <linux/ioport.h>
12
13#include <asm/oplib.h>
14#include <asm/prom.h>
15#include <asm/of_device.h>
16#include <asm/scatterlist.h>
17
18/* We scan which devices are on the SBus using the PROM node device
19 * tree. SBus devices are described in two different ways. You can
20 * either get an absolute address at which to access the device, or
21 * you can get a SBus 'slot' number and an offset within that slot.
22 */
23
24/* The base address at which to calculate device OBIO addresses. */
25#define SUN_SBUS_BVADDR 0xf8000000
26#define SBUS_OFF_MASK 0x01ffffff
27
28/* These routines are used to calculate device address from slot
29 * numbers + offsets, and vice versa.
30 */
31
32static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset)
33{
34 return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset));
35}
36
37static inline int sbus_dev_slot(unsigned long dev_addr)
38{
39 return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25);
40}
41
42struct sbus_bus;
43
44/* Linux SBUS device tables */
45struct sbus_dev {
46 struct of_device ofdev;
47 struct sbus_bus *bus;
48 struct sbus_dev *next;
49 struct sbus_dev *child;
50 struct sbus_dev *parent;
51 int prom_node;
52 char prom_name[64];
53 int slot;
54
55 struct resource resource[PROMREG_MAX];
56
57 struct linux_prom_registers reg_addrs[PROMREG_MAX];
58 int num_registers;
59
60 struct linux_prom_ranges device_ranges[PROMREG_MAX];
61 int num_device_ranges;
62
63 unsigned int irqs[4];
64 int num_irqs;
65};
66#define to_sbus_device(d) container_of(d, struct sbus_dev, ofdev.dev)
67
68/* This struct describes the SBus(s) found on this machine. */
69struct sbus_bus {
70 struct of_device ofdev;
71 struct sbus_dev *devices; /* Link to devices on this SBus */
72 struct sbus_bus *next; /* next SBus, if more than one SBus */
73 int prom_node; /* PROM device tree node for this SBus */
74 char prom_name[64]; /* Usually "sbus" or "sbi" */
75 int clock_freq;
76
77 struct linux_prom_ranges sbus_ranges[PROMREG_MAX];
78 int num_sbus_ranges;
79
80 int devid;
81 int board;
82};
83#define to_sbus(d) container_of(d, struct sbus_bus, ofdev.dev)
84
85extern struct sbus_bus *sbus_root;
86
87static inline int
88sbus_is_slave(struct sbus_dev *dev)
89{
90 /* XXX Have to write this for sun4c's */
91 return 0;
92}
93
94/* Device probing routines could find these handy */
95#define for_each_sbus(bus) \
96 for((bus) = sbus_root; (bus); (bus)=(bus)->next)
97
98#define for_each_sbusdev(device, bus) \
99 for((device) = (bus)->devices; (device); (device)=(device)->next)
100
101#define for_all_sbusdev(device, bus) \
102 for ((bus) = sbus_root; (bus); (bus) = (bus)->next) \
103 for ((device) = (bus)->devices; (device); (device) = (device)->next)
104
105/* Driver DVMA interfaces. */
106#define sbus_can_dma_64bit(sdev) (0) /* actually, sparc_cpu_model==sun4d */
107#define sbus_can_burst64(sdev) (0) /* actually, sparc_cpu_model==sun4d */
108extern void sbus_set_sbus64(struct sbus_dev *, int);
109extern void sbus_fill_device_irq(struct sbus_dev *);
110
111/* These yield IOMMU mappings in consistent mode. */
112extern void *sbus_alloc_consistent(struct sbus_dev *, long, u32 *dma_addrp);
113extern void sbus_free_consistent(struct sbus_dev *, long, void *, u32);
114void prom_adjust_ranges(struct linux_prom_ranges *, int,
115 struct linux_prom_ranges *, int);
116
117#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
118#define SBUS_DMA_TODEVICE DMA_TO_DEVICE
119#define SBUS_DMA_FROMDEVICE DMA_FROM_DEVICE
120#define SBUS_DMA_NONE DMA_NONE
121
122/* All the rest use streaming mode mappings. */
123extern dma_addr_t sbus_map_single(struct sbus_dev *, void *, size_t, int);
124extern void sbus_unmap_single(struct sbus_dev *, dma_addr_t, size_t, int);
125extern int sbus_map_sg(struct sbus_dev *, struct scatterlist *, int, int);
126extern void sbus_unmap_sg(struct sbus_dev *, struct scatterlist *, int, int);
127
128/* Finally, allow explicit synchronization of streamable mappings. */
129extern void sbus_dma_sync_single_for_cpu(struct sbus_dev *, dma_addr_t, size_t, int);
130#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
131extern void sbus_dma_sync_single_for_device(struct sbus_dev *, dma_addr_t, size_t, int);
132extern void sbus_dma_sync_sg_for_cpu(struct sbus_dev *, struct scatterlist *, int, int);
133#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
134extern void sbus_dma_sync_sg_for_device(struct sbus_dev *, struct scatterlist *, int, int);
135
136/* Eric Brower (ebrower@usa.net)
137 * Translate SBus interrupt levels to ino values--
138 * this is used when converting sbus "interrupts" OBP
139 * node values to "intr" node values, and is platform
140 * dependent. If only we could call OBP with
141 * "sbus-intr>cpu (sbint -- ino)" from kernel...
142 * See .../drivers/sbus/sbus.c for details.
143 */
144BTFIXUPDEF_CALL(unsigned int, sbint_to_irq, struct sbus_dev *sdev, unsigned int)
145#define sbint_to_irq(sdev, sbint) BTFIXUP_CALL(sbint_to_irq)(sdev, sbint)
146
147extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
148extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);
149extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *);
150extern int sbus_arch_preinit(void);
151extern void sbus_arch_postinit(void);
152
153#endif /* !(_SPARC_SBUS_H) */
diff --git a/include/asm-sparc/sbus_64.h b/include/asm-sparc/sbus_64.h
new file mode 100644
index 000000000000..0e16b6dd7e96
--- /dev/null
+++ b/include/asm-sparc/sbus_64.h
@@ -0,0 +1,190 @@
1/* sbus.h: Defines for the Sun SBus.
2 *
3 * Copyright (C) 1996, 1999, 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_SBUS_H
7#define _SPARC64_SBUS_H
8
9#include <linux/dma-mapping.h>
10#include <linux/ioport.h>
11
12#include <asm/oplib.h>
13#include <asm/prom.h>
14#include <asm/of_device.h>
15#include <asm/iommu.h>
16#include <asm/scatterlist.h>
17
18/* We scan which devices are on the SBus using the PROM node device
19 * tree. SBus devices are described in two different ways. You can
20 * either get an absolute address at which to access the device, or
21 * you can get a SBus 'slot' number and an offset within that slot.
22 */
23
24/* The base address at which to calculate device OBIO addresses. */
25#define SUN_SBUS_BVADDR 0x00000000
26#define SBUS_OFF_MASK 0x0fffffff
27
28/* These routines are used to calculate device address from slot
29 * numbers + offsets, and vice versa.
30 */
31
32static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset)
33{
34 return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<28)+(offset));
35}
36
37static inline int sbus_dev_slot(unsigned long dev_addr)
38{
39 return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>28);
40}
41
42struct sbus_bus;
43
44/* Linux SBUS device tables */
45struct sbus_dev {
46 struct of_device ofdev;
47 struct sbus_bus *bus;
48 struct sbus_dev *next;
49 struct sbus_dev *child;
50 struct sbus_dev *parent;
51 int prom_node;
52 char prom_name[64];
53 int slot;
54
55 struct resource resource[PROMREG_MAX];
56
57 struct linux_prom_registers reg_addrs[PROMREG_MAX];
58 int num_registers;
59
60 struct linux_prom_ranges device_ranges[PROMREG_MAX];
61 int num_device_ranges;
62
63 unsigned int irqs[4];
64 int num_irqs;
65};
66#define to_sbus_device(d) container_of(d, struct sbus_dev, ofdev.dev)
67
68/* This struct describes the SBus(s) found on this machine. */
69struct sbus_bus {
70 struct of_device ofdev;
71 struct sbus_dev *devices; /* Tree of SBUS devices */
72 struct sbus_bus *next; /* Next SBUS in system */
73 int prom_node; /* OBP node of SBUS */
74 char prom_name[64]; /* Usually "sbus" or "sbi" */
75 int clock_freq;
76
77 struct linux_prom_ranges sbus_ranges[PROMREG_MAX];
78 int num_sbus_ranges;
79
80 int portid;
81};
82#define to_sbus(d) container_of(d, struct sbus_bus, ofdev.dev)
83
84extern struct sbus_bus *sbus_root;
85
86/* Device probing routines could find these handy */
87#define for_each_sbus(bus) \
88 for((bus) = sbus_root; (bus); (bus)=(bus)->next)
89
90#define for_each_sbusdev(device, bus) \
91 for((device) = (bus)->devices; (device); (device)=(device)->next)
92
93#define for_all_sbusdev(device, bus) \
94 for ((bus) = sbus_root; (bus); (bus) = (bus)->next) \
95 for ((device) = (bus)->devices; (device); (device) = (device)->next)
96
97/* Driver DVMA interfaces. */
98#define sbus_can_dma_64bit(sdev) (1)
99#define sbus_can_burst64(sdev) (1)
100extern void sbus_set_sbus64(struct sbus_dev *, int);
101extern void sbus_fill_device_irq(struct sbus_dev *);
102
103static inline void *sbus_alloc_consistent(struct sbus_dev *sdev , size_t size,
104 dma_addr_t *dma_handle)
105{
106 return dma_alloc_coherent(&sdev->ofdev.dev, size,
107 dma_handle, GFP_ATOMIC);
108}
109
110static inline void sbus_free_consistent(struct sbus_dev *sdev, size_t size,
111 void *vaddr, dma_addr_t dma_handle)
112{
113 return dma_free_coherent(&sdev->ofdev.dev, size, vaddr, dma_handle);
114}
115
116#define SBUS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
117#define SBUS_DMA_TODEVICE DMA_TO_DEVICE
118#define SBUS_DMA_FROMDEVICE DMA_FROM_DEVICE
119#define SBUS_DMA_NONE DMA_NONE
120
121/* All the rest use streaming mode mappings. */
122static inline dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *ptr,
123 size_t size, int direction)
124{
125 return dma_map_single(&sdev->ofdev.dev, ptr, size,
126 (enum dma_data_direction) direction);
127}
128
129static inline void sbus_unmap_single(struct sbus_dev *sdev,
130 dma_addr_t dma_addr, size_t size,
131 int direction)
132{
133 dma_unmap_single(&sdev->ofdev.dev, dma_addr, size,
134 (enum dma_data_direction) direction);
135}
136
137static inline int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg,
138 int nents, int direction)
139{
140 return dma_map_sg(&sdev->ofdev.dev, sg, nents,
141 (enum dma_data_direction) direction);
142}
143
144static inline void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg,
145 int nents, int direction)
146{
147 dma_unmap_sg(&sdev->ofdev.dev, sg, nents,
148 (enum dma_data_direction) direction);
149}
150
151/* Finally, allow explicit synchronization of streamable mappings. */
152static inline void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev,
153 dma_addr_t dma_handle,
154 size_t size, int direction)
155{
156 dma_sync_single_for_cpu(&sdev->ofdev.dev, dma_handle, size,
157 (enum dma_data_direction) direction);
158}
159#define sbus_dma_sync_single sbus_dma_sync_single_for_cpu
160
161static inline void sbus_dma_sync_single_for_device(struct sbus_dev *sdev,
162 dma_addr_t dma_handle,
163 size_t size, int direction)
164{
165 /* No flushing needed to sync cpu writes to the device. */
166}
167
168static inline void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev,
169 struct scatterlist *sg,
170 int nents, int direction)
171{
172 dma_sync_sg_for_cpu(&sdev->ofdev.dev, sg, nents,
173 (enum dma_data_direction) direction);
174}
175#define sbus_dma_sync_sg sbus_dma_sync_sg_for_cpu
176
177static inline void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev,
178 struct scatterlist *sg,
179 int nents, int direction)
180{
181 /* No flushing needed to sync cpu writes to the device. */
182}
183
184extern void sbus_arch_bus_ranges_init(struct device_node *, struct sbus_bus *);
185extern void sbus_setup_iommu(struct sbus_bus *, struct device_node *);
186extern void sbus_setup_arch_props(struct sbus_bus *, struct device_node *);
187extern int sbus_arch_preinit(void);
188extern void sbus_arch_postinit(void);
189
190#endif /* !(_SPARC64_SBUS_H) */
diff --git a/include/asm-sparc/scatterlist.h b/include/asm-sparc/scatterlist.h
index c82609ca1d0f..b1a0e316c2b6 100644
--- a/include/asm-sparc/scatterlist.h
+++ b/include/asm-sparc/scatterlist.h
@@ -1,26 +1,8 @@
1#ifndef _SPARC_SCATTERLIST_H 1#ifndef ___ASM_SPARC_SCATTERLIST_H
2#define _SPARC_SCATTERLIST_H 2#define ___ASM_SPARC_SCATTERLIST_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/types.h> 4#include <asm-sparc/scatterlist_64.h>
5 5#else
6struct scatterlist { 6#include <asm-sparc/scatterlist_32.h>
7#ifdef CONFIG_DEBUG_SG 7#endif
8 unsigned long sg_magic;
9#endif 8#endif
10 unsigned long page_link;
11 unsigned int offset;
12
13 unsigned int length;
14
15 __u32 dvma_address; /* A place to hang host-specific addresses at. */
16 __u32 dvma_length;
17};
18
19#define sg_dma_address(sg) ((sg)->dvma_address)
20#define sg_dma_len(sg) ((sg)->dvma_length)
21
22#define ISA_DMA_THRESHOLD (~0UL)
23
24#define ARCH_HAS_SG_CHAIN
25
26#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/include/asm-sparc/scatterlist_32.h b/include/asm-sparc/scatterlist_32.h
new file mode 100644
index 000000000000..c82609ca1d0f
--- /dev/null
+++ b/include/asm-sparc/scatterlist_32.h
@@ -0,0 +1,26 @@
1#ifndef _SPARC_SCATTERLIST_H
2#define _SPARC_SCATTERLIST_H
3
4#include <linux/types.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12
13 unsigned int length;
14
15 __u32 dvma_address; /* A place to hang host-specific addresses at. */
16 __u32 dvma_length;
17};
18
19#define sg_dma_address(sg) ((sg)->dvma_address)
20#define sg_dma_len(sg) ((sg)->dvma_length)
21
22#define ISA_DMA_THRESHOLD (~0UL)
23
24#define ARCH_HAS_SG_CHAIN
25
26#endif /* !(_SPARC_SCATTERLIST_H) */
diff --git a/include/asm-sparc/scatterlist_64.h b/include/asm-sparc/scatterlist_64.h
new file mode 100644
index 000000000000..81bd058f9382
--- /dev/null
+++ b/include/asm-sparc/scatterlist_64.h
@@ -0,0 +1,27 @@
1#ifndef _SPARC64_SCATTERLIST_H
2#define _SPARC64_SCATTERLIST_H
3
4#include <asm/page.h>
5#include <asm/types.h>
6
7struct scatterlist {
8#ifdef CONFIG_DEBUG_SG
9 unsigned long sg_magic;
10#endif
11 unsigned long page_link;
12 unsigned int offset;
13
14 unsigned int length;
15
16 dma_addr_t dma_address;
17 __u32 dma_length;
18};
19
20#define sg_dma_address(sg) ((sg)->dma_address)
21#define sg_dma_len(sg) ((sg)->dma_length)
22
23#define ISA_DMA_THRESHOLD (~0UL)
24
25#define ARCH_HAS_SG_CHAIN
26
27#endif /* !(_SPARC64_SCATTERLIST_H) */
diff --git a/include/asm-sparc/scratchpad.h b/include/asm-sparc/scratchpad.h
new file mode 100644
index 000000000000..5e8b01fb3343
--- /dev/null
+++ b/include/asm-sparc/scratchpad.h
@@ -0,0 +1,14 @@
1#ifndef _SPARC64_SCRATCHPAD_H
2#define _SPARC64_SCRATCHPAD_H
3
4/* Sun4v scratchpad registers, accessed via ASI_SCRATCHPAD. */
5
6#define SCRATCHPAD_MMU_MISS 0x00 /* Shared with OBP - set by OBP */
7#define SCRATCHPAD_CPUID 0x08 /* Shared with OBP - set by hypervisor */
8#define SCRATCHPAD_UTSBREG1 0x10
9#define SCRATCHPAD_UTSBREG2 0x18
10 /* 0x20 and 0x28, hypervisor only... */
11#define SCRATCHPAD_UNUSED1 0x30
12#define SCRATCHPAD_UNUSED2 0x38 /* Reserved for OBP */
13
14#endif /* !(_SPARC64_SCRATCHPAD_H) */
diff --git a/include/asm-sparc/seccomp.h b/include/asm-sparc/seccomp.h
new file mode 100644
index 000000000000..7fcd9968192b
--- /dev/null
+++ b/include/asm-sparc/seccomp.h
@@ -0,0 +1,21 @@
1#ifndef _ASM_SECCOMP_H
2
3#include <linux/thread_info.h> /* already defines TIF_32BIT */
4
5#ifndef TIF_32BIT
6#error "unexpected TIF_32BIT on sparc64"
7#endif
8
9#include <linux/unistd.h>
10
11#define __NR_seccomp_read __NR_read
12#define __NR_seccomp_write __NR_write
13#define __NR_seccomp_exit __NR_exit
14#define __NR_seccomp_sigreturn __NR_rt_sigreturn
15
16#define __NR_seccomp_read_32 __NR_read
17#define __NR_seccomp_write_32 __NR_write
18#define __NR_seccomp_exit_32 __NR_exit
19#define __NR_seccomp_sigreturn_32 __NR_sigreturn
20
21#endif /* _ASM_SECCOMP_H */
diff --git a/include/asm-sparc/sections.h b/include/asm-sparc/sections.h
index 6832841df051..cbd019162425 100644
--- a/include/asm-sparc/sections.h
+++ b/include/asm-sparc/sections.h
@@ -1,6 +1,8 @@
1#ifndef _SPARC_SECTIONS_H 1#ifndef ___ASM_SPARC_SECTIONS_H
2#define _SPARC_SECTIONS_H 2#define ___ASM_SPARC_SECTIONS_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm-generic/sections.h> 4#include <asm-sparc/sections_64.h>
5 5#else
6#include <asm-sparc/sections_32.h>
7#endif
6#endif 8#endif
diff --git a/include/asm-sparc/sections_32.h b/include/asm-sparc/sections_32.h
new file mode 100644
index 000000000000..6832841df051
--- /dev/null
+++ b/include/asm-sparc/sections_32.h
@@ -0,0 +1,6 @@
1#ifndef _SPARC_SECTIONS_H
2#define _SPARC_SECTIONS_H
3
4#include <asm-generic/sections.h>
5
6#endif
diff --git a/include/asm-sparc/sections_64.h b/include/asm-sparc/sections_64.h
new file mode 100644
index 000000000000..3f4b9fdc28d0
--- /dev/null
+++ b/include/asm-sparc/sections_64.h
@@ -0,0 +1,9 @@
1#ifndef _SPARC64_SECTIONS_H
2#define _SPARC64_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7extern char _start[];
8
9#endif
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h
deleted file mode 100644
index d9b2034ed1d2..000000000000
--- a/include/asm-sparc/semaphore.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <linux/semaphore.h>
diff --git a/include/asm-sparc/sembuf.h b/include/asm-sparc/sembuf.h
index a79c4bb3c08a..faee1be08d67 100644
--- a/include/asm-sparc/sembuf.h
+++ b/include/asm-sparc/sembuf.h
@@ -1,7 +1,7 @@
1#ifndef _SPARC_SEMBUF_H 1#ifndef _SPARC_SEMBUF_H
2#define _SPARC_SEMBUF_H 2#define _SPARC_SEMBUF_H
3 3
4/* 4/*
5 * The semid64_ds structure for sparc architecture. 5 * The semid64_ds structure for sparc architecture.
6 * Note extra padding because this structure is passed back and forth 6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space. 7 * between kernel and user space.
@@ -10,16 +10,22 @@
10 * - 64-bit time_t to solve y2038 problem 10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values 11 * - 2 miscellaneous 32-bit values
12 */ 12 */
13#if defined(__sparc__) && defined(__arch64__)
14# define PADDING(x)
15#else
16# define PADDING(x) unsigned int x;
17#endif
13 18
14struct semid64_ds { 19struct semid64_ds {
15 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ 20 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
16 unsigned int __pad1; 21 PADDING(__pad1)
17 __kernel_time_t sem_otime; /* last semop time */ 22 __kernel_time_t sem_otime; /* last semop time */
18 unsigned int __pad2; 23 PADDING(__pad2)
19 __kernel_time_t sem_ctime; /* last change time */ 24 __kernel_time_t sem_ctime; /* last change time */
20 unsigned long sem_nsems; /* no. of semaphores in array */ 25 unsigned long sem_nsems; /* no. of semaphores in array */
21 unsigned long __unused1; 26 unsigned long __unused1;
22 unsigned long __unused2; 27 unsigned long __unused2;
23}; 28};
29#undef PADDING
24 30
25#endif /* _SPARC64_SEMBUF_H */ 31#endif /* _SPARC64_SEMBUF_H */
diff --git a/include/asm-sparc/setup.h b/include/asm-sparc/setup.h
index b3af958a2ad2..2643c62f4ac0 100644
--- a/include/asm-sparc/setup.h
+++ b/include/asm-sparc/setup.h
@@ -5,6 +5,10 @@
5#ifndef _SPARC_SETUP_H 5#ifndef _SPARC_SETUP_H
6#define _SPARC_SETUP_H 6#define _SPARC_SETUP_H
7 7
8#define COMMAND_LINE_SIZE 256 8#if defined(__sparc__) && defined(__arch64__)
9# define COMMAND_LINE_SIZE 2048
10#else
11# define COMMAND_LINE_SIZE 256
12#endif
9 13
10#endif /* _SPARC_SETUP_H */ 14#endif /* _SPARC_SETUP_H */
diff --git a/include/asm-sparc/sfafsr.h b/include/asm-sparc/sfafsr.h
new file mode 100644
index 000000000000..e96137b04a4f
--- /dev/null
+++ b/include/asm-sparc/sfafsr.h
@@ -0,0 +1,82 @@
1#ifndef _SPARC64_SFAFSR_H
2#define _SPARC64_SFAFSR_H
3
4#include <linux/const.h>
5
6/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
7
8#define SFAFSR_ME (_AC(1,UL) << SFAFSR_ME_SHIFT)
9#define SFAFSR_ME_SHIFT 32
10#define SFAFSR_PRIV (_AC(1,UL) << SFAFSR_PRIV_SHIFT)
11#define SFAFSR_PRIV_SHIFT 31
12#define SFAFSR_ISAP (_AC(1,UL) << SFAFSR_ISAP_SHIFT)
13#define SFAFSR_ISAP_SHIFT 30
14#define SFAFSR_ETP (_AC(1,UL) << SFAFSR_ETP_SHIFT)
15#define SFAFSR_ETP_SHIFT 29
16#define SFAFSR_IVUE (_AC(1,UL) << SFAFSR_IVUE_SHIFT)
17#define SFAFSR_IVUE_SHIFT 28
18#define SFAFSR_TO (_AC(1,UL) << SFAFSR_TO_SHIFT)
19#define SFAFSR_TO_SHIFT 27
20#define SFAFSR_BERR (_AC(1,UL) << SFAFSR_BERR_SHIFT)
21#define SFAFSR_BERR_SHIFT 26
22#define SFAFSR_LDP (_AC(1,UL) << SFAFSR_LDP_SHIFT)
23#define SFAFSR_LDP_SHIFT 25
24#define SFAFSR_CP (_AC(1,UL) << SFAFSR_CP_SHIFT)
25#define SFAFSR_CP_SHIFT 24
26#define SFAFSR_WP (_AC(1,UL) << SFAFSR_WP_SHIFT)
27#define SFAFSR_WP_SHIFT 23
28#define SFAFSR_EDP (_AC(1,UL) << SFAFSR_EDP_SHIFT)
29#define SFAFSR_EDP_SHIFT 22
30#define SFAFSR_UE (_AC(1,UL) << SFAFSR_UE_SHIFT)
31#define SFAFSR_UE_SHIFT 21
32#define SFAFSR_CE (_AC(1,UL) << SFAFSR_CE_SHIFT)
33#define SFAFSR_CE_SHIFT 20
34#define SFAFSR_ETS (_AC(0xf,UL) << SFAFSR_ETS_SHIFT)
35#define SFAFSR_ETS_SHIFT 16
36#define SFAFSR_PSYND (_AC(0xffff,UL) << SFAFSR_PSYND_SHIFT)
37#define SFAFSR_PSYND_SHIFT 0
38
39/* UDB Error Register, ASI=0x7f VA<63:0>=0x0(High),0x18(Low) for read
40 * ASI=0x77 VA<63:0>=0x0(High),0x18(Low) for write
41 */
42
43#define UDBE_UE (_AC(1,UL) << 9)
44#define UDBE_CE (_AC(1,UL) << 8)
45#define UDBE_E_SYNDR (_AC(0xff,UL) << 0)
46
47/* The trap handlers for asynchronous errors encode the AFSR and
48 * other pieces of information into a 64-bit argument for C code
49 * encoded as follows:
50 *
51 * -----------------------------------------------
52 * | UDB_H | UDB_L | TL>1 | TT | AFSR |
53 * -----------------------------------------------
54 * 63 54 53 44 42 41 33 32 0
55 *
56 * The AFAR is passed in unchanged.
57 */
58#define SFSTAT_UDBH_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
59#define SFSTAT_UDBH_SHIFT 54
60#define SFSTAT_UDBL_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT)
61#define SFSTAT_UDBL_SHIFT 44
62#define SFSTAT_TL_GT_ONE (_AC(1,UL) << SFSTAT_TL_GT_ONE_SHIFT)
63#define SFSTAT_TL_GT_ONE_SHIFT 42
64#define SFSTAT_TRAP_TYPE (_AC(0x1FF,UL) << SFSTAT_TRAP_TYPE_SHIFT)
65#define SFSTAT_TRAP_TYPE_SHIFT 33
66#define SFSTAT_AFSR_MASK (_AC(0x1ffffffff,UL) << SFSTAT_AFSR_SHIFT)
67#define SFSTAT_AFSR_SHIFT 0
68
69/* ESTATE Error Enable Register, ASI=0x4b VA<63:0>=0x0 */
70#define ESTATE_ERR_CE 0x1 /* Correctable errors */
71#define ESTATE_ERR_NCE 0x2 /* TO, BERR, LDP, ETP, EDP, WP, UE, IVUE */
72#define ESTATE_ERR_ISAP 0x4 /* System address parity error */
73#define ESTATE_ERR_ALL (ESTATE_ERR_CE | \
74 ESTATE_ERR_NCE | \
75 ESTATE_ERR_ISAP)
76
77/* The various trap types that report using the above state. */
78#define TRAP_TYPE_IAE 0x09 /* Instruction Access Error */
79#define TRAP_TYPE_DAE 0x32 /* Data Access Error */
80#define TRAP_TYPE_CEE 0x63 /* Correctable ECC Error */
81
82#endif /* _SPARC64_SFAFSR_H */
diff --git a/include/asm-sparc/sfp-machine.h b/include/asm-sparc/sfp-machine.h
index 266a42b8f99f..c676fcc2dd27 100644
--- a/include/asm-sparc/sfp-machine.h
+++ b/include/asm-sparc/sfp-machine.h
@@ -1,212 +1,8 @@
1/* Machine-dependent software floating-point definitions. 1#ifndef ___ASM_SPARC_SFP_MACHINE_H
2 Sparc userland (_Q_*) version. 2#define ___ASM_SPARC_SFP_MACHINE_H
3 Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. 3#if defined(__sparc__) && defined(__arch64__)
4 This file is part of the GNU C Library. 4#include <asm-sparc/sfp-machine_64.h>
5 Contributed by Richard Henderson (rth@cygnus.com),
6 Jakub Jelinek (jj@ultra.linux.cz),
7 David S. Miller (davem@redhat.com) and
8 Peter Maydell (pmaydell@chiark.greenend.org.uk).
9
10 The GNU C Library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Library General Public License as
12 published by the Free Software Foundation; either version 2 of the
13 License, or (at your option) any later version.
14
15 The GNU C Library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Library General Public License for more details.
19
20 You should have received a copy of the GNU Library General Public
21 License along with the GNU C Library; see the file COPYING.LIB. If
22 not, write to the Free Software Foundation, Inc.,
23 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
24
25#ifndef _SFP_MACHINE_H
26#define _SFP_MACHINE_H
27
28
29#define _FP_W_TYPE_SIZE 32
30#define _FP_W_TYPE unsigned long
31#define _FP_WS_TYPE signed long
32#define _FP_I_TYPE long
33
34#define _FP_MUL_MEAT_S(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_D(R,X,Y) \
37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
38#define _FP_MUL_MEAT_Q(R,X,Y) \
39 _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
40
41#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
42#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
43#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
44
45#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
46#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
47#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
48#define _FP_NANSIGN_S 0
49#define _FP_NANSIGN_D 0
50#define _FP_NANSIGN_Q 0
51
52#define _FP_KEEPNANFRACP 1
53
54/* If one NaN is signaling and the other is not,
55 * we choose that one, otherwise we choose X.
56 */
57/* For _Qp_* and _Q_*, this should prefer X, for
58 * CPU instruction emulation this should prefer Y.
59 * (see SPAMv9 B.2.2 section).
60 */
61#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
62 do { \
63 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
64 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
65 { \
66 R##_s = X##_s; \
67 _FP_FRAC_COPY_##wc(R,X); \
68 } \
69 else \
70 { \
71 R##_s = Y##_s; \
72 _FP_FRAC_COPY_##wc(R,Y); \
73 } \
74 R##_c = FP_CLS_NAN; \
75 } while (0)
76
77/* Some assembly to speed things up. */
78#define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
79 __asm__ ("addcc %r7,%8,%2\n\t" \
80 "addxcc %r5,%6,%1\n\t" \
81 "addx %r3,%4,%0\n" \
82 : "=r" ((USItype)(r2)), \
83 "=&r" ((USItype)(r1)), \
84 "=&r" ((USItype)(r0)) \
85 : "%rJ" ((USItype)(x2)), \
86 "rI" ((USItype)(y2)), \
87 "%rJ" ((USItype)(x1)), \
88 "rI" ((USItype)(y1)), \
89 "%rJ" ((USItype)(x0)), \
90 "rI" ((USItype)(y0)) \
91 : "cc")
92
93#define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
94 __asm__ ("subcc %r7,%8,%2\n\t" \
95 "subxcc %r5,%6,%1\n\t" \
96 "subx %r3,%4,%0\n" \
97 : "=r" ((USItype)(r2)), \
98 "=&r" ((USItype)(r1)), \
99 "=&r" ((USItype)(r0)) \
100 : "%rJ" ((USItype)(x2)), \
101 "rI" ((USItype)(y2)), \
102 "%rJ" ((USItype)(x1)), \
103 "rI" ((USItype)(y1)), \
104 "%rJ" ((USItype)(x0)), \
105 "rI" ((USItype)(y0)) \
106 : "cc")
107
108#define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
109 do { \
110 /* We need to fool gcc, as we need to pass more than 10 \
111 input/outputs. */ \
112 register USItype _t1 __asm__ ("g1"), _t2 __asm__ ("g2"); \
113 __asm__ __volatile__ ( \
114 "addcc %r8,%9,%1\n\t" \
115 "addxcc %r6,%7,%0\n\t" \
116 "addxcc %r4,%5,%%g2\n\t" \
117 "addx %r2,%3,%%g1\n\t" \
118 : "=&r" ((USItype)(r1)), \
119 "=&r" ((USItype)(r0)) \
120 : "%rJ" ((USItype)(x3)), \
121 "rI" ((USItype)(y3)), \
122 "%rJ" ((USItype)(x2)), \
123 "rI" ((USItype)(y2)), \
124 "%rJ" ((USItype)(x1)), \
125 "rI" ((USItype)(y1)), \
126 "%rJ" ((USItype)(x0)), \
127 "rI" ((USItype)(y0)) \
128 : "cc", "g1", "g2"); \
129 __asm__ __volatile__ ("" : "=r" (_t1), "=r" (_t2)); \
130 r3 = _t1; r2 = _t2; \
131 } while (0)
132
133#define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
134 do { \
135 /* We need to fool gcc, as we need to pass more than 10 \
136 input/outputs. */ \
137 register USItype _t1 __asm__ ("g1"), _t2 __asm__ ("g2"); \
138 __asm__ __volatile__ ( \
139 "subcc %r8,%9,%1\n\t" \
140 "subxcc %r6,%7,%0\n\t" \
141 "subxcc %r4,%5,%%g2\n\t" \
142 "subx %r2,%3,%%g1\n\t" \
143 : "=&r" ((USItype)(r1)), \
144 "=&r" ((USItype)(r0)) \
145 : "%rJ" ((USItype)(x3)), \
146 "rI" ((USItype)(y3)), \
147 "%rJ" ((USItype)(x2)), \
148 "rI" ((USItype)(y2)), \
149 "%rJ" ((USItype)(x1)), \
150 "rI" ((USItype)(y1)), \
151 "%rJ" ((USItype)(x0)), \
152 "rI" ((USItype)(y0)) \
153 : "cc", "g1", "g2"); \
154 __asm__ __volatile__ ("" : "=r" (_t1), "=r" (_t2)); \
155 r3 = _t1; r2 = _t2; \
156 } while (0)
157
158#define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) __FP_FRAC_SUB_3(x2,x1,x0,x2,x1,x0,y2,y1,y0)
159
160#define __FP_FRAC_DEC_4(x3,x2,x1,x0,y3,y2,y1,y0) __FP_FRAC_SUB_4(x3,x2,x1,x0,x3,x2,x1,x0,y3,y2,y1,y0)
161
162#define __FP_FRAC_ADDI_4(x3,x2,x1,x0,i) \
163 __asm__ ("addcc %3,%4,%3\n\t" \
164 "addxcc %2,%%g0,%2\n\t" \
165 "addxcc %1,%%g0,%1\n\t" \
166 "addx %0,%%g0,%0\n\t" \
167 : "=&r" ((USItype)(x3)), \
168 "=&r" ((USItype)(x2)), \
169 "=&r" ((USItype)(x1)), \
170 "=&r" ((USItype)(x0)) \
171 : "rI" ((USItype)(i)), \
172 "0" ((USItype)(x3)), \
173 "1" ((USItype)(x2)), \
174 "2" ((USItype)(x1)), \
175 "3" ((USItype)(x0)) \
176 : "cc")
177
178#ifndef CONFIG_SMP
179extern struct task_struct *last_task_used_math;
180#endif
181
182/* Obtain the current rounding mode. */
183#ifndef FP_ROUNDMODE
184#ifdef CONFIG_SMP
185#define FP_ROUNDMODE ((current->thread.fsr >> 30) & 0x3)
186#else
187#define FP_ROUNDMODE ((last_task_used_math->thread.fsr >> 30) & 0x3)
188#endif
189#endif
190
191/* Exception flags. */
192#define FP_EX_INVALID (1 << 4)
193#define FP_EX_OVERFLOW (1 << 3)
194#define FP_EX_UNDERFLOW (1 << 2)
195#define FP_EX_DIVZERO (1 << 1)
196#define FP_EX_INEXACT (1 << 0)
197
198#define FP_HANDLE_EXCEPTIONS return _fex
199
200#ifdef CONFIG_SMP
201#define FP_INHIBIT_RESULTS ((current->thread.fsr >> 23) & _fex)
202#else
203#define FP_INHIBIT_RESULTS ((last_task_used_math->thread.fsr >> 23) & _fex)
204#endif
205
206#ifdef CONFIG_SMP
207#define FP_TRAPPING_EXCEPTIONS ((current->thread.fsr >> 23) & 0x1f)
208#else 5#else
209#define FP_TRAPPING_EXCEPTIONS ((last_task_used_math->thread.fsr >> 23) & 0x1f) 6#include <asm-sparc/sfp-machine_32.h>
210#endif 7#endif
211
212#endif 8#endif
diff --git a/include/asm-sparc/sfp-machine_32.h b/include/asm-sparc/sfp-machine_32.h
new file mode 100644
index 000000000000..01d9c3b5a73b
--- /dev/null
+++ b/include/asm-sparc/sfp-machine_32.h
@@ -0,0 +1,212 @@
1/* Machine-dependent software floating-point definitions.
2 Sparc userland (_Q_*) version.
3 Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Richard Henderson (rth@cygnus.com),
6 Jakub Jelinek (jj@ultra.linux.cz),
7 David S. Miller (davem@redhat.com) and
8 Peter Maydell (pmaydell@chiark.greenend.org.uk).
9
10 The GNU C Library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Library General Public License as
12 published by the Free Software Foundation; either version 2 of the
13 License, or (at your option) any later version.
14
15 The GNU C Library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Library General Public License for more details.
19
20 You should have received a copy of the GNU Library General Public
21 License along with the GNU C Library; see the file COPYING.LIB. If
22 not, write to the Free Software Foundation, Inc.,
23 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
24
25#ifndef _SFP_MACHINE_H
26#define _SFP_MACHINE_H
27
28
29#define _FP_W_TYPE_SIZE 32
30#define _FP_W_TYPE unsigned long
31#define _FP_WS_TYPE signed long
32#define _FP_I_TYPE long
33
34#define _FP_MUL_MEAT_S(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_D(R,X,Y) \
37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
38#define _FP_MUL_MEAT_Q(R,X,Y) \
39 _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
40
41#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
42#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
43#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
44
45#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
46#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
47#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
48#define _FP_NANSIGN_S 0
49#define _FP_NANSIGN_D 0
50#define _FP_NANSIGN_Q 0
51
52#define _FP_KEEPNANFRACP 1
53
54/* If one NaN is signaling and the other is not,
55 * we choose that one, otherwise we choose X.
56 */
57/* For _Qp_* and _Q_*, this should prefer X, for
58 * CPU instruction emulation this should prefer Y.
59 * (see SPAMv9 B.2.2 section).
60 */
61#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
62 do { \
63 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
64 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
65 { \
66 R##_s = X##_s; \
67 _FP_FRAC_COPY_##wc(R,X); \
68 } \
69 else \
70 { \
71 R##_s = Y##_s; \
72 _FP_FRAC_COPY_##wc(R,Y); \
73 } \
74 R##_c = FP_CLS_NAN; \
75 } while (0)
76
77/* Some assembly to speed things up. */
78#define __FP_FRAC_ADD_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
79 __asm__ ("addcc %r7,%8,%2\n\t" \
80 "addxcc %r5,%6,%1\n\t" \
81 "addx %r3,%4,%0\n" \
82 : "=r" ((USItype)(r2)), \
83 "=&r" ((USItype)(r1)), \
84 "=&r" ((USItype)(r0)) \
85 : "%rJ" ((USItype)(x2)), \
86 "rI" ((USItype)(y2)), \
87 "%rJ" ((USItype)(x1)), \
88 "rI" ((USItype)(y1)), \
89 "%rJ" ((USItype)(x0)), \
90 "rI" ((USItype)(y0)) \
91 : "cc")
92
93#define __FP_FRAC_SUB_3(r2,r1,r0,x2,x1,x0,y2,y1,y0) \
94 __asm__ ("subcc %r7,%8,%2\n\t" \
95 "subxcc %r5,%6,%1\n\t" \
96 "subx %r3,%4,%0\n" \
97 : "=r" ((USItype)(r2)), \
98 "=&r" ((USItype)(r1)), \
99 "=&r" ((USItype)(r0)) \
100 : "%rJ" ((USItype)(x2)), \
101 "rI" ((USItype)(y2)), \
102 "%rJ" ((USItype)(x1)), \
103 "rI" ((USItype)(y1)), \
104 "%rJ" ((USItype)(x0)), \
105 "rI" ((USItype)(y0)) \
106 : "cc")
107
108#define __FP_FRAC_ADD_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
109 do { \
110 /* We need to fool gcc, as we need to pass more than 10 \
111 input/outputs. */ \
112 register USItype _t1 __asm__ ("g1"), _t2 __asm__ ("g2"); \
113 __asm__ __volatile__ ( \
114 "addcc %r8,%9,%1\n\t" \
115 "addxcc %r6,%7,%0\n\t" \
116 "addxcc %r4,%5,%%g2\n\t" \
117 "addx %r2,%3,%%g1\n\t" \
118 : "=&r" ((USItype)(r1)), \
119 "=&r" ((USItype)(r0)) \
120 : "%rJ" ((USItype)(x3)), \
121 "rI" ((USItype)(y3)), \
122 "%rJ" ((USItype)(x2)), \
123 "rI" ((USItype)(y2)), \
124 "%rJ" ((USItype)(x1)), \
125 "rI" ((USItype)(y1)), \
126 "%rJ" ((USItype)(x0)), \
127 "rI" ((USItype)(y0)) \
128 : "cc", "g1", "g2"); \
129 __asm__ __volatile__ ("" : "=r" (_t1), "=r" (_t2)); \
130 r3 = _t1; r2 = _t2; \
131 } while (0)
132
133#define __FP_FRAC_SUB_4(r3,r2,r1,r0,x3,x2,x1,x0,y3,y2,y1,y0) \
134 do { \
135 /* We need to fool gcc, as we need to pass more than 10 \
136 input/outputs. */ \
137 register USItype _t1 __asm__ ("g1"), _t2 __asm__ ("g2"); \
138 __asm__ __volatile__ ( \
139 "subcc %r8,%9,%1\n\t" \
140 "subxcc %r6,%7,%0\n\t" \
141 "subxcc %r4,%5,%%g2\n\t" \
142 "subx %r2,%3,%%g1\n\t" \
143 : "=&r" ((USItype)(r1)), \
144 "=&r" ((USItype)(r0)) \
145 : "%rJ" ((USItype)(x3)), \
146 "rI" ((USItype)(y3)), \
147 "%rJ" ((USItype)(x2)), \
148 "rI" ((USItype)(y2)), \
149 "%rJ" ((USItype)(x1)), \
150 "rI" ((USItype)(y1)), \
151 "%rJ" ((USItype)(x0)), \
152 "rI" ((USItype)(y0)) \
153 : "cc", "g1", "g2"); \
154 __asm__ __volatile__ ("" : "=r" (_t1), "=r" (_t2)); \
155 r3 = _t1; r2 = _t2; \
156 } while (0)
157
158#define __FP_FRAC_DEC_3(x2,x1,x0,y2,y1,y0) __FP_FRAC_SUB_3(x2,x1,x0,x2,x1,x0,y2,y1,y0)
159
160#define __FP_FRAC_DEC_4(x3,x2,x1,x0,y3,y2,y1,y0) __FP_FRAC_SUB_4(x3,x2,x1,x0,x3,x2,x1,x0,y3,y2,y1,y0)
161
162#define __FP_FRAC_ADDI_4(x3,x2,x1,x0,i) \
163 __asm__ ("addcc %3,%4,%3\n\t" \
164 "addxcc %2,%%g0,%2\n\t" \
165 "addxcc %1,%%g0,%1\n\t" \
166 "addx %0,%%g0,%0\n\t" \
167 : "=&r" ((USItype)(x3)), \
168 "=&r" ((USItype)(x2)), \
169 "=&r" ((USItype)(x1)), \
170 "=&r" ((USItype)(x0)) \
171 : "rI" ((USItype)(i)), \
172 "0" ((USItype)(x3)), \
173 "1" ((USItype)(x2)), \
174 "2" ((USItype)(x1)), \
175 "3" ((USItype)(x0)) \
176 : "cc")
177
178#ifndef CONFIG_SMP
179extern struct task_struct *last_task_used_math;
180#endif
181
182/* Obtain the current rounding mode. */
183#ifndef FP_ROUNDMODE
184#ifdef CONFIG_SMP
185#define FP_ROUNDMODE ((current->thread.fsr >> 30) & 0x3)
186#else
187#define FP_ROUNDMODE ((last_task_used_math->thread.fsr >> 30) & 0x3)
188#endif
189#endif
190
191/* Exception flags. */
192#define FP_EX_INVALID (1 << 4)
193#define FP_EX_OVERFLOW (1 << 3)
194#define FP_EX_UNDERFLOW (1 << 2)
195#define FP_EX_DIVZERO (1 << 1)
196#define FP_EX_INEXACT (1 << 0)
197
198#define FP_HANDLE_EXCEPTIONS return _fex
199
200#ifdef CONFIG_SMP
201#define FP_INHIBIT_RESULTS ((current->thread.fsr >> 23) & _fex)
202#else
203#define FP_INHIBIT_RESULTS ((last_task_used_math->thread.fsr >> 23) & _fex)
204#endif
205
206#ifdef CONFIG_SMP
207#define FP_TRAPPING_EXCEPTIONS ((current->thread.fsr >> 23) & 0x1f)
208#else
209#define FP_TRAPPING_EXCEPTIONS ((last_task_used_math->thread.fsr >> 23) & 0x1f)
210#endif
211
212#endif
diff --git a/include/asm-sparc/sfp-machine_64.h b/include/asm-sparc/sfp-machine_64.h
new file mode 100644
index 000000000000..ca913ef40bd5
--- /dev/null
+++ b/include/asm-sparc/sfp-machine_64.h
@@ -0,0 +1,93 @@
1/* Machine-dependent software floating-point definitions.
2 Sparc64 kernel version.
3 Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Richard Henderson (rth@cygnus.com),
6 Jakub Jelinek (jj@ultra.linux.cz) and
7 David S. Miller (davem@redhat.com).
8
9 The GNU C Library is free software; you can redistribute it and/or
10 modify it under the terms of the GNU Library General Public License as
11 published by the Free Software Foundation; either version 2 of the
12 License, or (at your option) any later version.
13
14 The GNU C Library is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Library General Public License for more details.
18
19 You should have received a copy of the GNU Library General Public
20 License along with the GNU C Library; see the file COPYING.LIB. If
21 not, write to the Free Software Foundation, Inc.,
22 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23
24#ifndef _SFP_MACHINE_H
25#define _SFP_MACHINE_H
26
27#define _FP_W_TYPE_SIZE 64
28#define _FP_W_TYPE unsigned long
29#define _FP_WS_TYPE signed long
30#define _FP_I_TYPE long
31
32#define _FP_MUL_MEAT_S(R,X,Y) \
33 _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
34#define _FP_MUL_MEAT_D(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_Q(R,X,Y) \
37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
38
39#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
40#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y)
41#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y)
42
43#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
44#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1)
45#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1
46#define _FP_NANSIGN_S 0
47#define _FP_NANSIGN_D 0
48#define _FP_NANSIGN_Q 0
49
50#define _FP_KEEPNANFRACP 1
51
52/* If one NaN is signaling and the other is not,
53 * we choose that one, otherwise we choose X.
54 */
55/* For _Qp_* and _Q_*, this should prefer X, for
56 * CPU instruction emulation this should prefer Y.
57 * (see SPAMv9 B.2.2 section).
58 */
59#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
60 do { \
61 if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
62 && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
63 { \
64 R##_s = X##_s; \
65 _FP_FRAC_COPY_##wc(R,X); \
66 } \
67 else \
68 { \
69 R##_s = Y##_s; \
70 _FP_FRAC_COPY_##wc(R,Y); \
71 } \
72 R##_c = FP_CLS_NAN; \
73 } while (0)
74
75/* Obtain the current rounding mode. */
76#ifndef FP_ROUNDMODE
77#define FP_ROUNDMODE ((current_thread_info()->xfsr[0] >> 30) & 0x3)
78#endif
79
80/* Exception flags. */
81#define FP_EX_INVALID (1 << 4)
82#define FP_EX_OVERFLOW (1 << 3)
83#define FP_EX_UNDERFLOW (1 << 2)
84#define FP_EX_DIVZERO (1 << 1)
85#define FP_EX_INEXACT (1 << 0)
86
87#define FP_HANDLE_EXCEPTIONS return _fex
88
89#define FP_INHIBIT_RESULTS ((current_thread_info()->xfsr[0] >> 23) & _fex)
90
91#define FP_TRAPPING_EXCEPTIONS ((current_thread_info()->xfsr[0] >> 23) & 0x1f)
92
93#endif
diff --git a/include/asm-sparc/shmbuf.h b/include/asm-sparc/shmbuf.h
index 1ff9da8bec73..83a16055363f 100644
--- a/include/asm-sparc/shmbuf.h
+++ b/include/asm-sparc/shmbuf.h
@@ -11,13 +11,19 @@
11 * - 2 miscellaneous 32-bit values 11 * - 2 miscellaneous 32-bit values
12 */ 12 */
13 13
14#if defined(__sparc__) && defined(__arch64__)
15# define PADDING(x)
16#else
17# define PADDING(x) unsigned int x;
18#endif
19
14struct shmid64_ds { 20struct shmid64_ds {
15 struct ipc64_perm shm_perm; /* operation perms */ 21 struct ipc64_perm shm_perm; /* operation perms */
16 unsigned int __pad1; 22 PADDING(__pad1)
17 __kernel_time_t shm_atime; /* last attach time */ 23 __kernel_time_t shm_atime; /* last attach time */
18 unsigned int __pad2; 24 PADDING(__pad2)
19 __kernel_time_t shm_dtime; /* last detach time */ 25 __kernel_time_t shm_dtime; /* last detach time */
20 unsigned int __pad3; 26 PADDING(__pad3)
21 __kernel_time_t shm_ctime; /* last change time */ 27 __kernel_time_t shm_ctime; /* last change time */
22 size_t shm_segsz; /* size of segment (bytes) */ 28 size_t shm_segsz; /* size of segment (bytes) */
23 __kernel_pid_t shm_cpid; /* pid of creator */ 29 __kernel_pid_t shm_cpid; /* pid of creator */
@@ -39,4 +45,6 @@ struct shminfo64 {
39 unsigned long __unused4; 45 unsigned long __unused4;
40}; 46};
41 47
48#undef PADDING
49
42#endif /* _SPARC_SHMBUF_H */ 50#endif /* _SPARC_SHMBUF_H */
diff --git a/include/asm-sparc/shmparam.h b/include/asm-sparc/shmparam.h
index 59a1243c12f3..16fda7e9acc8 100644
--- a/include/asm-sparc/shmparam.h
+++ b/include/asm-sparc/shmparam.h
@@ -1,11 +1,8 @@
1#ifndef _ASMSPARC_SHMPARAM_H 1#ifndef ___ASM_SPARC_SHMPARAM_H
2#define _ASMSPARC_SHMPARAM_H 2#define ___ASM_SPARC_SHMPARAM_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#define __ARCH_FORCE_SHMLBA 1 4#include <asm-sparc/shmparam_64.h>
5 5#else
6extern int vac_cache_size; 6#include <asm-sparc/shmparam_32.h>
7#define SHMLBA (vac_cache_size ? vac_cache_size : \ 7#endif
8 (sparc_cpu_model == sun4c ? (64 * 1024) : \ 8#endif
9 (sparc_cpu_model == sun4 ? (128 * 1024) : PAGE_SIZE)))
10
11#endif /* _ASMSPARC_SHMPARAM_H */
diff --git a/include/asm-sparc/shmparam_32.h b/include/asm-sparc/shmparam_32.h
new file mode 100644
index 000000000000..59a1243c12f3
--- /dev/null
+++ b/include/asm-sparc/shmparam_32.h
@@ -0,0 +1,11 @@
1#ifndef _ASMSPARC_SHMPARAM_H
2#define _ASMSPARC_SHMPARAM_H
3
4#define __ARCH_FORCE_SHMLBA 1
5
6extern int vac_cache_size;
7#define SHMLBA (vac_cache_size ? vac_cache_size : \
8 (sparc_cpu_model == sun4c ? (64 * 1024) : \
9 (sparc_cpu_model == sun4 ? (128 * 1024) : PAGE_SIZE)))
10
11#endif /* _ASMSPARC_SHMPARAM_H */
diff --git a/include/asm-sparc/shmparam_64.h b/include/asm-sparc/shmparam_64.h
new file mode 100644
index 000000000000..1ed0d6701a9b
--- /dev/null
+++ b/include/asm-sparc/shmparam_64.h
@@ -0,0 +1,10 @@
1#ifndef _ASMSPARC64_SHMPARAM_H
2#define _ASMSPARC64_SHMPARAM_H
3
4#include <asm/spitfire.h>
5
6#define __ARCH_FORCE_SHMLBA 1
7/* attach addr a multiple of this */
8#define SHMLBA ((PAGE_SIZE > L1DCACHE_SIZE) ? PAGE_SIZE : L1DCACHE_SIZE)
9
10#endif /* _ASMSPARC64_SHMPARAM_H */
diff --git a/include/asm-sparc/sigcontext.h b/include/asm-sparc/sigcontext.h
index c5fb60dcbd75..82fc7d54a4fa 100644
--- a/include/asm-sparc/sigcontext.h
+++ b/include/asm-sparc/sigcontext.h
@@ -1,62 +1,8 @@
1#ifndef __SPARC_SIGCONTEXT_H 1#ifndef ___ASM_SPARC_SIGCONTEXT_H
2#define __SPARC_SIGCONTEXT_H 2#define ___ASM_SPARC_SIGCONTEXT_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#ifdef __KERNEL__ 4#include <asm-sparc/sigcontext_64.h>
5#include <asm/ptrace.h> 5#else
6 6#include <asm-sparc/sigcontext_32.h>
7#ifndef __ASSEMBLY__ 7#endif
8 8#endif
9#define __SUNOS_MAXWIN 31
10
11/* This is what SunOS does, so shall I. */
12struct sigcontext {
13 int sigc_onstack; /* state to restore */
14 int sigc_mask; /* sigmask to restore */
15 int sigc_sp; /* stack pointer */
16 int sigc_pc; /* program counter */
17 int sigc_npc; /* next program counter */
18 int sigc_psr; /* for condition codes etc */
19 int sigc_g1; /* User uses these two registers */
20 int sigc_o0; /* within the trampoline code. */
21
22 /* Now comes information regarding the users window set
23 * at the time of the signal.
24 */
25 int sigc_oswins; /* outstanding windows */
26
27 /* stack ptrs for each regwin buf */
28 char *sigc_spbuf[__SUNOS_MAXWIN];
29
30 /* Windows to restore after signal */
31 struct {
32 unsigned long locals[8];
33 unsigned long ins[8];
34 } sigc_wbuf[__SUNOS_MAXWIN];
35};
36
37typedef struct {
38 struct {
39 unsigned long psr;
40 unsigned long pc;
41 unsigned long npc;
42 unsigned long y;
43 unsigned long u_regs[16]; /* globals and ins */
44 } si_regs;
45 int si_mask;
46} __siginfo_t;
47
48typedef struct {
49 unsigned long si_float_regs [32];
50 unsigned long si_fsr;
51 unsigned long si_fpqdepth;
52 struct {
53 unsigned long *insn_addr;
54 unsigned long insn;
55 } si_fpqueue [16];
56} __siginfo_fpu_t;
57
58#endif /* !(__ASSEMBLY__) */
59
60#endif /* (__KERNEL__) */
61
62#endif /* !(__SPARC_SIGCONTEXT_H) */
diff --git a/include/asm-sparc/sigcontext_32.h b/include/asm-sparc/sigcontext_32.h
new file mode 100644
index 000000000000..c5fb60dcbd75
--- /dev/null
+++ b/include/asm-sparc/sigcontext_32.h
@@ -0,0 +1,62 @@
1#ifndef __SPARC_SIGCONTEXT_H
2#define __SPARC_SIGCONTEXT_H
3
4#ifdef __KERNEL__
5#include <asm/ptrace.h>
6
7#ifndef __ASSEMBLY__
8
9#define __SUNOS_MAXWIN 31
10
11/* This is what SunOS does, so shall I. */
12struct sigcontext {
13 int sigc_onstack; /* state to restore */
14 int sigc_mask; /* sigmask to restore */
15 int sigc_sp; /* stack pointer */
16 int sigc_pc; /* program counter */
17 int sigc_npc; /* next program counter */
18 int sigc_psr; /* for condition codes etc */
19 int sigc_g1; /* User uses these two registers */
20 int sigc_o0; /* within the trampoline code. */
21
22 /* Now comes information regarding the users window set
23 * at the time of the signal.
24 */
25 int sigc_oswins; /* outstanding windows */
26
27 /* stack ptrs for each regwin buf */
28 char *sigc_spbuf[__SUNOS_MAXWIN];
29
30 /* Windows to restore after signal */
31 struct {
32 unsigned long locals[8];
33 unsigned long ins[8];
34 } sigc_wbuf[__SUNOS_MAXWIN];
35};
36
37typedef struct {
38 struct {
39 unsigned long psr;
40 unsigned long pc;
41 unsigned long npc;
42 unsigned long y;
43 unsigned long u_regs[16]; /* globals and ins */
44 } si_regs;
45 int si_mask;
46} __siginfo_t;
47
48typedef struct {
49 unsigned long si_float_regs [32];
50 unsigned long si_fsr;
51 unsigned long si_fpqdepth;
52 struct {
53 unsigned long *insn_addr;
54 unsigned long insn;
55 } si_fpqueue [16];
56} __siginfo_fpu_t;
57
58#endif /* !(__ASSEMBLY__) */
59
60#endif /* (__KERNEL__) */
61
62#endif /* !(__SPARC_SIGCONTEXT_H) */
diff --git a/include/asm-sparc/sigcontext_64.h b/include/asm-sparc/sigcontext_64.h
new file mode 100644
index 000000000000..1c868d680cfc
--- /dev/null
+++ b/include/asm-sparc/sigcontext_64.h
@@ -0,0 +1,87 @@
1#ifndef __SPARC64_SIGCONTEXT_H
2#define __SPARC64_SIGCONTEXT_H
3
4#ifdef __KERNEL__
5#include <asm/ptrace.h>
6#endif
7
8#ifndef __ASSEMBLY__
9
10#ifdef __KERNEL__
11
12#define __SUNOS_MAXWIN 31
13
14/* This is what SunOS does, so shall I unless we use new 32bit signals or rt signals. */
15struct sigcontext32 {
16 int sigc_onstack; /* state to restore */
17 int sigc_mask; /* sigmask to restore */
18 int sigc_sp; /* stack pointer */
19 int sigc_pc; /* program counter */
20 int sigc_npc; /* next program counter */
21 int sigc_psr; /* for condition codes etc */
22 int sigc_g1; /* User uses these two registers */
23 int sigc_o0; /* within the trampoline code. */
24
25 /* Now comes information regarding the users window set
26 * at the time of the signal.
27 */
28 int sigc_oswins; /* outstanding windows */
29
30 /* stack ptrs for each regwin buf */
31 unsigned sigc_spbuf[__SUNOS_MAXWIN];
32
33 /* Windows to restore after signal */
34 struct reg_window32 sigc_wbuf[__SUNOS_MAXWIN];
35};
36
37#endif
38
39#ifdef __KERNEL__
40
41/* This is what we use for 32bit new non-rt signals. */
42
43typedef struct {
44 struct {
45 unsigned int psr;
46 unsigned int pc;
47 unsigned int npc;
48 unsigned int y;
49 unsigned int u_regs[16]; /* globals and ins */
50 } si_regs;
51 int si_mask;
52} __siginfo32_t;
53
54#endif
55
56typedef struct {
57 unsigned int si_float_regs [64];
58 unsigned long si_fsr;
59 unsigned long si_gsr;
60 unsigned long si_fprs;
61} __siginfo_fpu_t;
62
63/* This is what SunOS doesn't, so we have to write this alone
64 and do it properly. */
65struct sigcontext {
66 /* The size of this array has to match SI_MAX_SIZE from siginfo.h */
67 char sigc_info[128];
68 struct {
69 unsigned long u_regs[16]; /* globals and ins */
70 unsigned long tstate;
71 unsigned long tpc;
72 unsigned long tnpc;
73 unsigned int y;
74 unsigned int fprs;
75 } sigc_regs;
76 __siginfo_fpu_t * sigc_fpu_save;
77 struct {
78 void * ss_sp;
79 int ss_flags;
80 unsigned long ss_size;
81 } sigc_stack;
82 unsigned long sigc_mask;
83};
84
85#endif /* !(__ASSEMBLY__) */
86
87#endif /* !(__SPARC64_SIGCONTEXT_H) */
diff --git a/include/asm-sparc/siginfo.h b/include/asm-sparc/siginfo.h
index 3c71af135c52..2c9fccf4ce18 100644
--- a/include/asm-sparc/siginfo.h
+++ b/include/asm-sparc/siginfo.h
@@ -1,17 +1,8 @@
1#ifndef _SPARC_SIGINFO_H 1#ifndef ___ASM_SPARC_SIGINFO_H
2#define _SPARC_SIGINFO_H 2#define ___ASM_SPARC_SIGINFO_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#define __ARCH_SI_UID_T unsigned int 4#include <asm-sparc/siginfo_64.h>
5#define __ARCH_SI_TRAPNO 5#else
6 6#include <asm-sparc/siginfo_32.h>
7#include <asm-generic/siginfo.h> 7#endif
8 8#endif
9#define SI_NOINFO 32767 /* no information in siginfo_t */
10
11/*
12 * SIGEMT si_codes
13 */
14#define EMT_TAGOVF (__SI_FAULT|1) /* tag overflow */
15#define NSIGEMT 1
16
17#endif /* !(_SPARC_SIGINFO_H) */
diff --git a/include/asm-sparc/siginfo_32.h b/include/asm-sparc/siginfo_32.h
new file mode 100644
index 000000000000..3c71af135c52
--- /dev/null
+++ b/include/asm-sparc/siginfo_32.h
@@ -0,0 +1,17 @@
1#ifndef _SPARC_SIGINFO_H
2#define _SPARC_SIGINFO_H
3
4#define __ARCH_SI_UID_T unsigned int
5#define __ARCH_SI_TRAPNO
6
7#include <asm-generic/siginfo.h>
8
9#define SI_NOINFO 32767 /* no information in siginfo_t */
10
11/*
12 * SIGEMT si_codes
13 */
14#define EMT_TAGOVF (__SI_FAULT|1) /* tag overflow */
15#define NSIGEMT 1
16
17#endif /* !(_SPARC_SIGINFO_H) */
diff --git a/include/asm-sparc/siginfo_64.h b/include/asm-sparc/siginfo_64.h
new file mode 100644
index 000000000000..c96e6c30f8b0
--- /dev/null
+++ b/include/asm-sparc/siginfo_64.h
@@ -0,0 +1,32 @@
1#ifndef _SPARC64_SIGINFO_H
2#define _SPARC64_SIGINFO_H
3
4#define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3)
5
6#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
7#define __ARCH_SI_TRAPNO
8#define __ARCH_SI_BAND_T int
9
10#include <asm-generic/siginfo.h>
11
12#ifdef __KERNEL__
13
14#include <linux/compat.h>
15
16#ifdef CONFIG_COMPAT
17
18struct compat_siginfo;
19
20#endif /* CONFIG_COMPAT */
21
22#endif /* __KERNEL__ */
23
24#define SI_NOINFO 32767 /* no information in siginfo_t */
25
26/*
27 * SIGEMT si_codes
28 */
29#define EMT_TAGOVF (__SI_FAULT|1) /* tag overflow */
30#define NSIGEMT 1
31
32#endif
diff --git a/include/asm-sparc/signal.h b/include/asm-sparc/signal.h
index 683657d6e685..36f5f9e482f7 100644
--- a/include/asm-sparc/signal.h
+++ b/include/asm-sparc/signal.h
@@ -1,207 +1,8 @@
1#ifndef _ASMSPARC_SIGNAL_H 1#ifndef ___ASM_SPARC_SIGNAL_H
2#define _ASMSPARC_SIGNAL_H 2#define ___ASM_SPARC_SIGNAL_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm/sigcontext.h> 4#include <asm-sparc/signal_64.h>
5#include <linux/compiler.h>
6
7#ifdef __KERNEL__
8#ifndef __ASSEMBLY__
9#include <linux/personality.h>
10#include <linux/types.h>
11#endif
12#endif
13
14/* On the Sparc the signal handlers get passed a 'sub-signal' code
15 * for certain signal types, which we document here.
16 */
17#define SIGHUP 1
18#define SIGINT 2
19#define SIGQUIT 3
20#define SIGILL 4
21#define SUBSIG_STACK 0
22#define SUBSIG_ILLINST 2
23#define SUBSIG_PRIVINST 3
24#define SUBSIG_BADTRAP(t) (0x80 + (t))
25
26#define SIGTRAP 5
27#define SIGABRT 6
28#define SIGIOT 6
29
30#define SIGEMT 7
31#define SUBSIG_TAG 10
32
33#define SIGFPE 8
34#define SUBSIG_FPDISABLED 0x400
35#define SUBSIG_FPERROR 0x404
36#define SUBSIG_FPINTOVFL 0x001
37#define SUBSIG_FPSTSIG 0x002
38#define SUBSIG_IDIVZERO 0x014
39#define SUBSIG_FPINEXACT 0x0c4
40#define SUBSIG_FPDIVZERO 0x0c8
41#define SUBSIG_FPUNFLOW 0x0cc
42#define SUBSIG_FPOPERROR 0x0d0
43#define SUBSIG_FPOVFLOW 0x0d4
44
45#define SIGKILL 9
46#define SIGBUS 10
47#define SUBSIG_BUSTIMEOUT 1
48#define SUBSIG_ALIGNMENT 2
49#define SUBSIG_MISCERROR 5
50
51#define SIGSEGV 11
52#define SUBSIG_NOMAPPING 3
53#define SUBSIG_PROTECTION 4
54#define SUBSIG_SEGERROR 5
55
56#define SIGSYS 12
57
58#define SIGPIPE 13
59#define SIGALRM 14
60#define SIGTERM 15
61#define SIGURG 16
62
63/* SunOS values which deviate from the Linux/i386 ones */
64#define SIGSTOP 17
65#define SIGTSTP 18
66#define SIGCONT 19
67#define SIGCHLD 20
68#define SIGTTIN 21
69#define SIGTTOU 22
70#define SIGIO 23
71#define SIGPOLL SIGIO /* SysV name for SIGIO */
72#define SIGXCPU 24
73#define SIGXFSZ 25
74#define SIGVTALRM 26
75#define SIGPROF 27
76#define SIGWINCH 28
77#define SIGLOST 29
78#define SIGPWR SIGLOST
79#define SIGUSR1 30
80#define SIGUSR2 31
81
82/* Most things should be clean enough to redefine this at will, if care
83 * is taken to make libc match.
84 */
85
86#define __OLD_NSIG 32
87#define __NEW_NSIG 64
88#define _NSIG_BPW 32
89#define _NSIG_WORDS (__NEW_NSIG / _NSIG_BPW)
90
91#define SIGRTMIN 32
92#define SIGRTMAX __NEW_NSIG
93
94#if defined(__KERNEL__) || defined(__WANT_POSIX1B_SIGNALS__)
95#define _NSIG __NEW_NSIG
96#define __new_sigset_t sigset_t
97#define __new_sigaction sigaction
98#define __old_sigset_t old_sigset_t
99#define __old_sigaction old_sigaction
100#else 5#else
101#define _NSIG __OLD_NSIG 6#include <asm-sparc/signal_32.h>
102#define __old_sigset_t sigset_t
103#define __old_sigaction sigaction
104#endif
105
106#ifndef __ASSEMBLY__
107
108typedef unsigned long __old_sigset_t;
109
110typedef struct {
111 unsigned long sig[_NSIG_WORDS];
112} __new_sigset_t;
113
114
115#ifdef __KERNEL__
116/* A SunOS sigstack */
117struct sigstack {
118 char *the_stack;
119 int cur_status;
120};
121#endif 7#endif
122
123/* Sigvec flags */
124#define _SV_SSTACK 1u /* This signal handler should use sig-stack */
125#define _SV_INTR 2u /* Sig return should not restart system call */
126#define _SV_RESET 4u /* Set handler to SIG_DFL upon taken signal */
127#define _SV_IGNCHILD 8u /* Do not send SIGCHLD */
128
129/*
130 * sa_flags values: SA_STACK is not currently supported, but will allow the
131 * usage of signal stacks by using the (now obsolete) sa_restorer field in
132 * the sigaction structure as a stack pointer. This is now possible due to
133 * the changes in signal handling. LBT 010493.
134 * SA_RESTART flag to get restarting signals (which were the default long ago)
135 */
136#define SA_NOCLDSTOP _SV_IGNCHILD
137#define SA_STACK _SV_SSTACK
138#define SA_ONSTACK _SV_SSTACK
139#define SA_RESTART _SV_INTR
140#define SA_ONESHOT _SV_RESET
141#define SA_NOMASK 0x20u
142#define SA_NOCLDWAIT 0x100u
143#define SA_SIGINFO 0x200u
144
145#define SIG_BLOCK 0x01 /* for blocking signals */
146#define SIG_UNBLOCK 0x02 /* for unblocking signals */
147#define SIG_SETMASK 0x04 /* for setting the signal mask */
148
149/*
150 * sigaltstack controls
151 */
152#define SS_ONSTACK 1
153#define SS_DISABLE 2
154
155#define MINSIGSTKSZ 4096
156#define SIGSTKSZ 16384
157
158#ifdef __KERNEL__
159/*
160 * DJHR
161 * SA_STATIC_ALLOC is used for the SPARC system to indicate that this
162 * interrupt handler's irq structure should be statically allocated
163 * by the request_irq routine.
164 * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge
165 * of interrupt usage and that sucks. Also without a flag like this
166 * it may be possible for the free_irq routine to attempt to free
167 * statically allocated data.. which is NOT GOOD.
168 *
169 */
170#define SA_STATIC_ALLOC 0x8000
171#endif 8#endif
172
173#include <asm-generic/signal.h>
174
175#ifdef __KERNEL__
176struct __new_sigaction {
177 __sighandler_t sa_handler;
178 unsigned long sa_flags;
179 void (*sa_restorer)(void); /* Not used by Linux/SPARC */
180 __new_sigset_t sa_mask;
181};
182
183struct k_sigaction {
184 struct __new_sigaction sa;
185 void __user *ka_restorer;
186};
187
188struct __old_sigaction {
189 __sighandler_t sa_handler;
190 __old_sigset_t sa_mask;
191 unsigned long sa_flags;
192 void (*sa_restorer) (void); /* not used by Linux/SPARC */
193};
194
195typedef struct sigaltstack {
196 void __user *ss_sp;
197 int ss_flags;
198 size_t ss_size;
199} stack_t;
200
201#define ptrace_signal_deliver(regs, cookie) do { } while (0)
202
203#endif /* !(__KERNEL__) */
204
205#endif /* !(__ASSEMBLY__) */
206
207#endif /* !(_ASMSPARC_SIGNAL_H) */
diff --git a/include/asm-sparc/signal_32.h b/include/asm-sparc/signal_32.h
new file mode 100644
index 000000000000..96a60ab03ca1
--- /dev/null
+++ b/include/asm-sparc/signal_32.h
@@ -0,0 +1,207 @@
1#ifndef _ASMSPARC_SIGNAL_H
2#define _ASMSPARC_SIGNAL_H
3
4#include <asm/sigcontext.h>
5#include <linux/compiler.h>
6
7#ifdef __KERNEL__
8#ifndef __ASSEMBLY__
9#include <linux/personality.h>
10#include <linux/types.h>
11#endif
12#endif
13
14/* On the Sparc the signal handlers get passed a 'sub-signal' code
15 * for certain signal types, which we document here.
16 */
17#define SIGHUP 1
18#define SIGINT 2
19#define SIGQUIT 3
20#define SIGILL 4
21#define SUBSIG_STACK 0
22#define SUBSIG_ILLINST 2
23#define SUBSIG_PRIVINST 3
24#define SUBSIG_BADTRAP(t) (0x80 + (t))
25
26#define SIGTRAP 5
27#define SIGABRT 6
28#define SIGIOT 6
29
30#define SIGEMT 7
31#define SUBSIG_TAG 10
32
33#define SIGFPE 8
34#define SUBSIG_FPDISABLED 0x400
35#define SUBSIG_FPERROR 0x404
36#define SUBSIG_FPINTOVFL 0x001
37#define SUBSIG_FPSTSIG 0x002
38#define SUBSIG_IDIVZERO 0x014
39#define SUBSIG_FPINEXACT 0x0c4
40#define SUBSIG_FPDIVZERO 0x0c8
41#define SUBSIG_FPUNFLOW 0x0cc
42#define SUBSIG_FPOPERROR 0x0d0
43#define SUBSIG_FPOVFLOW 0x0d4
44
45#define SIGKILL 9
46#define SIGBUS 10
47#define SUBSIG_BUSTIMEOUT 1
48#define SUBSIG_ALIGNMENT 2
49#define SUBSIG_MISCERROR 5
50
51#define SIGSEGV 11
52#define SUBSIG_NOMAPPING 3
53#define SUBSIG_PROTECTION 4
54#define SUBSIG_SEGERROR 5
55
56#define SIGSYS 12
57
58#define SIGPIPE 13
59#define SIGALRM 14
60#define SIGTERM 15
61#define SIGURG 16
62
63/* SunOS values which deviate from the Linux/i386 ones */
64#define SIGSTOP 17
65#define SIGTSTP 18
66#define SIGCONT 19
67#define SIGCHLD 20
68#define SIGTTIN 21
69#define SIGTTOU 22
70#define SIGIO 23
71#define SIGPOLL SIGIO /* SysV name for SIGIO */
72#define SIGXCPU 24
73#define SIGXFSZ 25
74#define SIGVTALRM 26
75#define SIGPROF 27
76#define SIGWINCH 28
77#define SIGLOST 29
78#define SIGPWR SIGLOST
79#define SIGUSR1 30
80#define SIGUSR2 31
81
82/* Most things should be clean enough to redefine this at will, if care
83 * is taken to make libc match.
84 */
85
86#define __OLD_NSIG 32
87#define __NEW_NSIG 64
88#define _NSIG_BPW 32
89#define _NSIG_WORDS (__NEW_NSIG / _NSIG_BPW)
90
91#define SIGRTMIN 32
92#define SIGRTMAX __NEW_NSIG
93
94#if defined(__KERNEL__) || defined(__WANT_POSIX1B_SIGNALS__)
95#define _NSIG __NEW_NSIG
96#define __new_sigset_t sigset_t
97#define __new_sigaction sigaction
98#define __old_sigset_t old_sigset_t
99#define __old_sigaction old_sigaction
100#else
101#define _NSIG __OLD_NSIG
102#define __old_sigset_t sigset_t
103#define __old_sigaction sigaction
104#endif
105
106#ifndef __ASSEMBLY__
107
108typedef unsigned long __old_sigset_t;
109
110typedef struct {
111 unsigned long sig[_NSIG_WORDS];
112} __new_sigset_t;
113
114
115#ifdef __KERNEL__
116/* A SunOS sigstack */
117struct sigstack {
118 char *the_stack;
119 int cur_status;
120};
121#endif
122
123/* Sigvec flags */
124#define _SV_SSTACK 1u /* This signal handler should use sig-stack */
125#define _SV_INTR 2u /* Sig return should not restart system call */
126#define _SV_RESET 4u /* Set handler to SIG_DFL upon taken signal */
127#define _SV_IGNCHILD 8u /* Do not send SIGCHLD */
128
129/*
130 * sa_flags values: SA_STACK is not currently supported, but will allow the
131 * usage of signal stacks by using the (now obsolete) sa_restorer field in
132 * the sigaction structure as a stack pointer. This is now possible due to
133 * the changes in signal handling. LBT 010493.
134 * SA_RESTART flag to get restarting signals (which were the default long ago)
135 */
136#define SA_NOCLDSTOP _SV_IGNCHILD
137#define SA_STACK _SV_SSTACK
138#define SA_ONSTACK _SV_SSTACK
139#define SA_RESTART _SV_INTR
140#define SA_ONESHOT _SV_RESET
141#define SA_NOMASK 0x20u
142#define SA_NOCLDWAIT 0x100u
143#define SA_SIGINFO 0x200u
144
145#define SIG_BLOCK 0x01 /* for blocking signals */
146#define SIG_UNBLOCK 0x02 /* for unblocking signals */
147#define SIG_SETMASK 0x04 /* for setting the signal mask */
148
149/*
150 * sigaltstack controls
151 */
152#define SS_ONSTACK 1
153#define SS_DISABLE 2
154
155#define MINSIGSTKSZ 4096
156#define SIGSTKSZ 16384
157
158#ifdef __KERNEL__
159/*
160 * DJHR
161 * SA_STATIC_ALLOC is used for the SPARC system to indicate that this
162 * interrupt handler's irq structure should be statically allocated
163 * by the request_irq routine.
164 * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge
165 * of interrupt usage and that sucks. Also without a flag like this
166 * it may be possible for the free_irq routine to attempt to free
167 * statically allocated data.. which is NOT GOOD.
168 *
169 */
170#define SA_STATIC_ALLOC 0x8000
171#endif
172
173#include <asm-generic/signal.h>
174
175#ifdef __KERNEL__
176struct __new_sigaction {
177 __sighandler_t sa_handler;
178 unsigned long sa_flags;
179 void (*sa_restorer)(void); /* Not used by Linux/SPARC */
180 __new_sigset_t sa_mask;
181};
182
183struct k_sigaction {
184 struct __new_sigaction sa;
185 void __user *ka_restorer;
186};
187
188struct __old_sigaction {
189 __sighandler_t sa_handler;
190 __old_sigset_t sa_mask;
191 unsigned long sa_flags;
192 void (*sa_restorer) (void); /* not used by Linux/SPARC */
193};
194
195typedef struct sigaltstack {
196 void __user *ss_sp;
197 int ss_flags;
198 size_t ss_size;
199} stack_t;
200
201#define ptrace_signal_deliver(regs, cookie) do { } while (0)
202
203#endif /* !(__KERNEL__) */
204
205#endif /* !(__ASSEMBLY__) */
206
207#endif /* !(_ASMSPARC_SIGNAL_H) */
diff --git a/include/asm-sparc/signal_64.h b/include/asm-sparc/signal_64.h
new file mode 100644
index 000000000000..ab1509a101c5
--- /dev/null
+++ b/include/asm-sparc/signal_64.h
@@ -0,0 +1,194 @@
1#ifndef _ASMSPARC64_SIGNAL_H
2#define _ASMSPARC64_SIGNAL_H
3
4#include <asm/sigcontext.h>
5
6#ifdef __KERNEL__
7#ifndef __ASSEMBLY__
8#include <linux/personality.h>
9#include <linux/types.h>
10#endif
11#endif
12
13/* On the Sparc the signal handlers get passed a 'sub-signal' code
14 * for certain signal types, which we document here.
15 */
16#define SIGHUP 1
17#define SIGINT 2
18#define SIGQUIT 3
19#define SIGILL 4
20#define SUBSIG_STACK 0
21#define SUBSIG_ILLINST 2
22#define SUBSIG_PRIVINST 3
23#define SUBSIG_BADTRAP(t) (0x80 + (t))
24
25#define SIGTRAP 5
26#define SIGABRT 6
27#define SIGIOT 6
28
29#define SIGEMT 7
30#define SUBSIG_TAG 10
31
32#define SIGFPE 8
33#define SUBSIG_FPDISABLED 0x400
34#define SUBSIG_FPERROR 0x404
35#define SUBSIG_FPINTOVFL 0x001
36#define SUBSIG_FPSTSIG 0x002
37#define SUBSIG_IDIVZERO 0x014
38#define SUBSIG_FPINEXACT 0x0c4
39#define SUBSIG_FPDIVZERO 0x0c8
40#define SUBSIG_FPUNFLOW 0x0cc
41#define SUBSIG_FPOPERROR 0x0d0
42#define SUBSIG_FPOVFLOW 0x0d4
43
44#define SIGKILL 9
45#define SIGBUS 10
46#define SUBSIG_BUSTIMEOUT 1
47#define SUBSIG_ALIGNMENT 2
48#define SUBSIG_MISCERROR 5
49
50#define SIGSEGV 11
51#define SUBSIG_NOMAPPING 3
52#define SUBSIG_PROTECTION 4
53#define SUBSIG_SEGERROR 5
54
55#define SIGSYS 12
56
57#define SIGPIPE 13
58#define SIGALRM 14
59#define SIGTERM 15
60#define SIGURG 16
61
62/* SunOS values which deviate from the Linux/i386 ones */
63#define SIGSTOP 17
64#define SIGTSTP 18
65#define SIGCONT 19
66#define SIGCHLD 20
67#define SIGTTIN 21
68#define SIGTTOU 22
69#define SIGIO 23
70#define SIGPOLL SIGIO /* SysV name for SIGIO */
71#define SIGXCPU 24
72#define SIGXFSZ 25
73#define SIGVTALRM 26
74#define SIGPROF 27
75#define SIGWINCH 28
76#define SIGLOST 29
77#define SIGPWR SIGLOST
78#define SIGUSR1 30
79#define SIGUSR2 31
80
81/* Most things should be clean enough to redefine this at will, if care
82 is taken to make libc match. */
83
84#define __OLD_NSIG 32
85#define __NEW_NSIG 64
86#define _NSIG_BPW 64
87#define _NSIG_WORDS (__NEW_NSIG / _NSIG_BPW)
88
89#define SIGRTMIN 32
90#define SIGRTMAX __NEW_NSIG
91
92#if defined(__KERNEL__) || defined(__WANT_POSIX1B_SIGNALS__)
93#define _NSIG __NEW_NSIG
94#define __new_sigset_t sigset_t
95#define __new_sigaction sigaction
96#define __new_sigaction32 sigaction32
97#define __old_sigset_t old_sigset_t
98#define __old_sigaction old_sigaction
99#define __old_sigaction32 old_sigaction32
100#else
101#define _NSIG __OLD_NSIG
102#define NSIG _NSIG
103#define __old_sigset_t sigset_t
104#define __old_sigaction sigaction
105#define __old_sigaction32 sigaction32
106#endif
107
108#ifndef __ASSEMBLY__
109
110typedef unsigned long __old_sigset_t; /* at least 32 bits */
111
112typedef struct {
113 unsigned long sig[_NSIG_WORDS];
114} __new_sigset_t;
115
116/* A SunOS sigstack */
117struct sigstack {
118 /* XXX 32-bit pointers pinhead XXX */
119 char *the_stack;
120 int cur_status;
121};
122
123/* Sigvec flags */
124#define _SV_SSTACK 1u /* This signal handler should use sig-stack */
125#define _SV_INTR 2u /* Sig return should not restart system call */
126#define _SV_RESET 4u /* Set handler to SIG_DFL upon taken signal */
127#define _SV_IGNCHILD 8u /* Do not send SIGCHLD */
128
129/*
130 * sa_flags values: SA_STACK is not currently supported, but will allow the
131 * usage of signal stacks by using the (now obsolete) sa_restorer field in
132 * the sigaction structure as a stack pointer. This is now possible due to
133 * the changes in signal handling. LBT 010493.
134 * SA_RESTART flag to get restarting signals (which were the default long ago)
135 */
136#define SA_NOCLDSTOP _SV_IGNCHILD
137#define SA_STACK _SV_SSTACK
138#define SA_ONSTACK _SV_SSTACK
139#define SA_RESTART _SV_INTR
140#define SA_ONESHOT _SV_RESET
141#define SA_NOMASK 0x20u
142#define SA_NOCLDWAIT 0x100u
143#define SA_SIGINFO 0x200u
144
145
146#define SIG_BLOCK 0x01 /* for blocking signals */
147#define SIG_UNBLOCK 0x02 /* for unblocking signals */
148#define SIG_SETMASK 0x04 /* for setting the signal mask */
149
150/*
151 * sigaltstack controls
152 */
153#define SS_ONSTACK 1
154#define SS_DISABLE 2
155
156#define MINSIGSTKSZ 4096
157#define SIGSTKSZ 16384
158
159#include <asm-generic/signal.h>
160
161struct __new_sigaction {
162 __sighandler_t sa_handler;
163 unsigned long sa_flags;
164 __sigrestore_t sa_restorer; /* not used by Linux/SPARC yet */
165 __new_sigset_t sa_mask;
166};
167
168struct __old_sigaction {
169 __sighandler_t sa_handler;
170 __old_sigset_t sa_mask;
171 unsigned long sa_flags;
172 void (*sa_restorer)(void); /* not used by Linux/SPARC yet */
173};
174
175typedef struct sigaltstack {
176 void __user *ss_sp;
177 int ss_flags;
178 size_t ss_size;
179} stack_t;
180
181#ifdef __KERNEL__
182
183struct k_sigaction {
184 struct __new_sigaction sa;
185 void __user *ka_restorer;
186};
187
188#define ptrace_signal_deliver(regs, cookie) do { } while (0)
189
190#endif /* !(__KERNEL__) */
191
192#endif /* !(__ASSEMBLY__) */
193
194#endif /* !(_ASMSPARC64_SIGNAL_H) */
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index e6d561599726..1f9dedfbabd8 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -1,173 +1,8 @@
1/* smp.h: Sparc specific SMP stuff. 1#ifndef ___ASM_SPARC_SMP_H
2 * 2#define ___ASM_SPARC_SMP_H
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/smp_64.h>
5
6#ifndef _SPARC_SMP_H
7#define _SPARC_SMP_H
8
9#include <linux/threads.h>
10#include <asm/head.h>
11#include <asm/btfixup.h>
12
13#ifndef __ASSEMBLY__
14
15#include <linux/cpumask.h>
16
17#endif /* __ASSEMBLY__ */
18
19#ifdef CONFIG_SMP
20
21#ifndef __ASSEMBLY__
22
23#include <asm/ptrace.h>
24#include <asm/asi.h>
25#include <asm/atomic.h>
26
27/*
28 * Private routines/data
29 */
30
31extern unsigned char boot_cpu_id;
32extern cpumask_t phys_cpu_present_map;
33#define cpu_possible_map phys_cpu_present_map
34
35typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long);
37
38/*
39 * General functions that each host system must provide.
40 */
41
42void sun4m_init_smp(void);
43void sun4d_init_smp(void);
44
45void smp_callin(void);
46void smp_boot_cpus(void);
47void smp_store_cpu_info(int);
48
49struct seq_file;
50void smp_bogo(struct seq_file *);
51void smp_info(struct seq_file *);
52
53BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long)
54BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
55BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
56BTFIXUPDEF_BLACKBOX(load_current)
57
58#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
59
60static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
61static inline void xc1(smpfunc_t func, unsigned long arg1)
62{ smp_cross_call(func, arg1, 0, 0, 0, 0); }
63static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
64{ smp_cross_call(func, arg1, arg2, 0, 0, 0); }
65static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
66 unsigned long arg3)
67{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
68static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
69 unsigned long arg3, unsigned long arg4)
70{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
71static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
72 unsigned long arg3, unsigned long arg4, unsigned long arg5)
73{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
74
75static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait)
76{
77 xc1((smpfunc_t)func, (unsigned long)info);
78 return 0;
79}
80
81static inline int cpu_logical_map(int cpu)
82{
83 return cpu;
84}
85
86static inline int hard_smp4m_processor_id(void)
87{
88 int cpuid;
89
90 __asm__ __volatile__("rd %%tbr, %0\n\t"
91 "srl %0, 12, %0\n\t"
92 "and %0, 3, %0\n\t" :
93 "=&r" (cpuid));
94 return cpuid;
95}
96
97static inline int hard_smp4d_processor_id(void)
98{
99 int cpuid;
100
101 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
102 "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
103 return cpuid;
104}
105
106#ifndef MODULE
107static inline int hard_smp_processor_id(void)
108{
109 int cpuid;
110
111 /* Black box - sun4m
112 __asm__ __volatile__("rd %%tbr, %0\n\t"
113 "srl %0, 12, %0\n\t"
114 "and %0, 3, %0\n\t" :
115 "=&r" (cpuid));
116 - sun4d
117 __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
118 "nop; nop" :
119 "=&r" (cpuid));
120 See btfixup.h and btfixupprep.c to understand how a blackbox works.
121 */
122 __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
123 "sethi %%hi(boot_cpu_id), %0\n\t"
124 "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
125 "=&r" (cpuid));
126 return cpuid;
127}
128#else 5#else
129static inline int hard_smp_processor_id(void) 6#include <asm-sparc/smp_32.h>
130{ 7#endif
131 int cpuid;
132
133 __asm__ __volatile__("mov %%o7, %%g1\n\t"
134 "call ___f___hard_smp_processor_id\n\t"
135 " nop\n\t"
136 "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
137 return cpuid;
138}
139#endif 8#endif
140
141#define raw_smp_processor_id() (current_thread_info()->cpu)
142
143#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
144#define prof_counter(__cpu) cpu_data(__cpu).counter
145
146void smp_setup_cpu_possible_map(void);
147
148#endif /* !(__ASSEMBLY__) */
149
150/* Sparc specific messages. */
151#define MSG_CROSS_CALL 0x0005 /* run func on cpus */
152
153/* Empirical PROM processor mailbox constants. If the per-cpu mailbox
154 * contains something other than one of these then the ipi is from
155 * Linux's active_kernel_processor. This facility exists so that
156 * the boot monitor can capture all the other cpus when one catches
157 * a watchdog reset or the user enters the monitor using L1-A keys.
158 */
159#define MBOX_STOPCPU 0xFB
160#define MBOX_IDLECPU 0xFC
161#define MBOX_IDLECPU2 0xFD
162#define MBOX_STOPCPU2 0xFE
163
164#else /* SMP */
165
166#define hard_smp_processor_id() 0
167#define smp_setup_cpu_possible_map() do { } while (0)
168
169#endif /* !(SMP) */
170
171#define NO_PROC_ID 0xFF
172
173#endif /* !(_SPARC_SMP_H) */
diff --git a/include/asm-sparc/smp_32.h b/include/asm-sparc/smp_32.h
new file mode 100644
index 000000000000..7201752cf934
--- /dev/null
+++ b/include/asm-sparc/smp_32.h
@@ -0,0 +1,173 @@
1/* smp.h: Sparc specific SMP stuff.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef _SPARC_SMP_H
7#define _SPARC_SMP_H
8
9#include <linux/threads.h>
10#include <asm/head.h>
11#include <asm/btfixup.h>
12
13#ifndef __ASSEMBLY__
14
15#include <linux/cpumask.h>
16
17#endif /* __ASSEMBLY__ */
18
19#ifdef CONFIG_SMP
20
21#ifndef __ASSEMBLY__
22
23#include <asm/ptrace.h>
24#include <asm/asi.h>
25#include <asm/atomic.h>
26
27/*
28 * Private routines/data
29 */
30
31extern unsigned char boot_cpu_id;
32extern cpumask_t phys_cpu_present_map;
33#define cpu_possible_map phys_cpu_present_map
34
35typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long);
37
38/*
39 * General functions that each host system must provide.
40 */
41
42void sun4m_init_smp(void);
43void sun4d_init_smp(void);
44
45void smp_callin(void);
46void smp_boot_cpus(void);
47void smp_store_cpu_info(int);
48
49struct seq_file;
50void smp_bogo(struct seq_file *);
51void smp_info(struct seq_file *);
52
53BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long)
54BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
55BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
56BTFIXUPDEF_BLACKBOX(load_current)
57
58#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
59
60static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
61static inline void xc1(smpfunc_t func, unsigned long arg1)
62{ smp_cross_call(func, arg1, 0, 0, 0, 0); }
63static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
64{ smp_cross_call(func, arg1, arg2, 0, 0, 0); }
65static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
66 unsigned long arg3)
67{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
68static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
69 unsigned long arg3, unsigned long arg4)
70{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
71static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
72 unsigned long arg3, unsigned long arg4, unsigned long arg5)
73{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
74
75static inline int smp_call_function(void (*func)(void *info), void *info, int wait)
76{
77 xc1((smpfunc_t)func, (unsigned long)info);
78 return 0;
79}
80
81static inline int cpu_logical_map(int cpu)
82{
83 return cpu;
84}
85
86static inline int hard_smp4m_processor_id(void)
87{
88 int cpuid;
89
90 __asm__ __volatile__("rd %%tbr, %0\n\t"
91 "srl %0, 12, %0\n\t"
92 "and %0, 3, %0\n\t" :
93 "=&r" (cpuid));
94 return cpuid;
95}
96
97static inline int hard_smp4d_processor_id(void)
98{
99 int cpuid;
100
101 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
102 "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1));
103 return cpuid;
104}
105
106#ifndef MODULE
107static inline int hard_smp_processor_id(void)
108{
109 int cpuid;
110
111 /* Black box - sun4m
112 __asm__ __volatile__("rd %%tbr, %0\n\t"
113 "srl %0, 12, %0\n\t"
114 "and %0, 3, %0\n\t" :
115 "=&r" (cpuid));
116 - sun4d
117 __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t"
118 "nop; nop" :
119 "=&r" (cpuid));
120 See btfixup.h and btfixupprep.c to understand how a blackbox works.
121 */
122 __asm__ __volatile__("sethi %%hi(___b_hard_smp_processor_id), %0\n\t"
123 "sethi %%hi(boot_cpu_id), %0\n\t"
124 "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" :
125 "=&r" (cpuid));
126 return cpuid;
127}
128#else
129static inline int hard_smp_processor_id(void)
130{
131 int cpuid;
132
133 __asm__ __volatile__("mov %%o7, %%g1\n\t"
134 "call ___f___hard_smp_processor_id\n\t"
135 " nop\n\t"
136 "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2");
137 return cpuid;
138}
139#endif
140
141#define raw_smp_processor_id() (current_thread_info()->cpu)
142
143#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
144#define prof_counter(__cpu) cpu_data(__cpu).counter
145
146void smp_setup_cpu_possible_map(void);
147
148#endif /* !(__ASSEMBLY__) */
149
150/* Sparc specific messages. */
151#define MSG_CROSS_CALL 0x0005 /* run func on cpus */
152
153/* Empirical PROM processor mailbox constants. If the per-cpu mailbox
154 * contains something other than one of these then the ipi is from
155 * Linux's active_kernel_processor. This facility exists so that
156 * the boot monitor can capture all the other cpus when one catches
157 * a watchdog reset or the user enters the monitor using L1-A keys.
158 */
159#define MBOX_STOPCPU 0xFB
160#define MBOX_IDLECPU 0xFC
161#define MBOX_IDLECPU2 0xFD
162#define MBOX_STOPCPU2 0xFE
163
164#else /* SMP */
165
166#define hard_smp_processor_id() 0
167#define smp_setup_cpu_possible_map() do { } while (0)
168
169#endif /* !(SMP) */
170
171#define NO_PROC_ID 0xFF
172
173#endif /* !(_SPARC_SMP_H) */
diff --git a/include/asm-sparc/smp_64.h b/include/asm-sparc/smp_64.h
new file mode 100644
index 000000000000..57224dd37b3a
--- /dev/null
+++ b/include/asm-sparc/smp_64.h
@@ -0,0 +1,67 @@
1/* smp.h: Sparc64 specific SMP stuff.
2 *
3 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_SMP_H
7#define _SPARC64_SMP_H
8
9#include <linux/threads.h>
10#include <asm/asi.h>
11#include <asm/starfire.h>
12#include <asm/spitfire.h>
13
14#ifndef __ASSEMBLY__
15
16#include <linux/cpumask.h>
17#include <linux/cache.h>
18
19#endif /* !(__ASSEMBLY__) */
20
21#ifdef CONFIG_SMP
22
23#ifndef __ASSEMBLY__
24
25/*
26 * Private routines/data
27 */
28
29#include <linux/bitops.h>
30#include <asm/atomic.h>
31#include <asm/percpu.h>
32
33DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
34extern cpumask_t cpu_core_map[NR_CPUS];
35extern int sparc64_multi_core;
36
37extern void arch_send_call_function_single_ipi(int cpu);
38extern void arch_send_call_function_ipi(cpumask_t mask);
39
40/*
41 * General functions that each host system must provide.
42 */
43
44extern int hard_smp_processor_id(void);
45#define raw_smp_processor_id() (current_thread_info()->cpu)
46
47extern void smp_fill_in_sib_core_maps(void);
48extern void cpu_play_dead(void);
49
50extern void smp_fetch_global_regs(void);
51
52#ifdef CONFIG_HOTPLUG_CPU
53extern int __cpu_disable(void);
54extern void __cpu_die(unsigned int cpu);
55#endif
56
57#endif /* !(__ASSEMBLY__) */
58
59#else
60
61#define hard_smp_processor_id() 0
62#define smp_fill_in_sib_core_maps() do { } while (0)
63#define smp_fetch_global_regs() do { } while (0)
64
65#endif /* !(CONFIG_SMP) */
66
67#endif /* !(_SPARC64_SMP_H) */
diff --git a/include/asm-sparc/sparsemem.h b/include/asm-sparc/sparsemem.h
new file mode 100644
index 000000000000..b99d4e4b6d28
--- /dev/null
+++ b/include/asm-sparc/sparsemem.h
@@ -0,0 +1,12 @@
1#ifndef _SPARC64_SPARSEMEM_H
2#define _SPARC64_SPARSEMEM_H
3
4#ifdef __KERNEL__
5
6#define SECTION_SIZE_BITS 30
7#define MAX_PHYSADDR_BITS 42
8#define MAX_PHYSMEM_BITS 42
9
10#endif /* !(__KERNEL__) */
11
12#endif /* !(_SPARC64_SPARSEMEM_H) */
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h
index de2249b267c6..3b71c50b72eb 100644
--- a/include/asm-sparc/spinlock.h
+++ b/include/asm-sparc/spinlock.h
@@ -1,192 +1,8 @@
1/* spinlock.h: 32-bit Sparc spinlock support. 1#ifndef ___ASM_SPARC_SPINLOCK_H
2 * 2#define ___ASM_SPARC_SPINLOCK_H
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 3#if defined(__sparc__) && defined(__arch64__)
4 */ 4#include <asm-sparc/spinlock_64.h>
5 5#else
6#ifndef __SPARC_SPINLOCK_H 6#include <asm-sparc/spinlock_32.h>
7#define __SPARC_SPINLOCK_H 7#endif
8 8#endif
9#include <linux/threads.h> /* For NR_CPUS */
10
11#ifndef __ASSEMBLY__
12
13#include <asm/psr.h>
14
15#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
16
17#define __raw_spin_unlock_wait(lock) \
18 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
19
20static inline void __raw_spin_lock(raw_spinlock_t *lock)
21{
22 __asm__ __volatile__(
23 "\n1:\n\t"
24 "ldstub [%0], %%g2\n\t"
25 "orcc %%g2, 0x0, %%g0\n\t"
26 "bne,a 2f\n\t"
27 " ldub [%0], %%g2\n\t"
28 ".subsection 2\n"
29 "2:\n\t"
30 "orcc %%g2, 0x0, %%g0\n\t"
31 "bne,a 2b\n\t"
32 " ldub [%0], %%g2\n\t"
33 "b,a 1b\n\t"
34 ".previous\n"
35 : /* no outputs */
36 : "r" (lock)
37 : "g2", "memory", "cc");
38}
39
40static inline int __raw_spin_trylock(raw_spinlock_t *lock)
41{
42 unsigned int result;
43 __asm__ __volatile__("ldstub [%1], %0"
44 : "=r" (result)
45 : "r" (lock)
46 : "memory");
47 return (result == 0);
48}
49
50static inline void __raw_spin_unlock(raw_spinlock_t *lock)
51{
52 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
53}
54
55/* Read-write spinlocks, allowing multiple readers
56 * but only one writer.
57 *
58 * NOTE! it is quite common to have readers in interrupts
59 * but no interrupt writers. For those circumstances we
60 * can "mix" irq-safe locks - any writer needs to get a
61 * irq-safe write-lock, but readers can get non-irqsafe
62 * read-locks.
63 *
64 * XXX This might create some problems with my dual spinlock
65 * XXX scheme, deadlocks etc. -DaveM
66 *
67 * Sort of like atomic_t's on Sparc, but even more clever.
68 *
69 * ------------------------------------
70 * | 24-bit counter | wlock | raw_rwlock_t
71 * ------------------------------------
72 * 31 8 7 0
73 *
74 * wlock signifies the one writer is in or somebody is updating
75 * counter. For a writer, if he successfully acquires the wlock,
76 * but counter is non-zero, he has to release the lock and wait,
77 * till both counter and wlock are zero.
78 *
79 * Unfortunately this scheme limits us to ~16,000,000 cpus.
80 */
81static inline void __read_lock(raw_rwlock_t *rw)
82{
83 register raw_rwlock_t *lp asm("g1");
84 lp = rw;
85 __asm__ __volatile__(
86 "mov %%o7, %%g4\n\t"
87 "call ___rw_read_enter\n\t"
88 " ldstub [%%g1 + 3], %%g2\n"
89 : /* no outputs */
90 : "r" (lp)
91 : "g2", "g4", "memory", "cc");
92}
93
94#define __raw_read_lock(lock) \
95do { unsigned long flags; \
96 local_irq_save(flags); \
97 __read_lock(lock); \
98 local_irq_restore(flags); \
99} while(0)
100
101static inline void __read_unlock(raw_rwlock_t *rw)
102{
103 register raw_rwlock_t *lp asm("g1");
104 lp = rw;
105 __asm__ __volatile__(
106 "mov %%o7, %%g4\n\t"
107 "call ___rw_read_exit\n\t"
108 " ldstub [%%g1 + 3], %%g2\n"
109 : /* no outputs */
110 : "r" (lp)
111 : "g2", "g4", "memory", "cc");
112}
113
114#define __raw_read_unlock(lock) \
115do { unsigned long flags; \
116 local_irq_save(flags); \
117 __read_unlock(lock); \
118 local_irq_restore(flags); \
119} while(0)
120
121static inline void __raw_write_lock(raw_rwlock_t *rw)
122{
123 register raw_rwlock_t *lp asm("g1");
124 lp = rw;
125 __asm__ __volatile__(
126 "mov %%o7, %%g4\n\t"
127 "call ___rw_write_enter\n\t"
128 " ldstub [%%g1 + 3], %%g2\n"
129 : /* no outputs */
130 : "r" (lp)
131 : "g2", "g4", "memory", "cc");
132 *(volatile __u32 *)&lp->lock = ~0U;
133}
134
135static inline int __raw_write_trylock(raw_rwlock_t *rw)
136{
137 unsigned int val;
138
139 __asm__ __volatile__("ldstub [%1 + 3], %0"
140 : "=r" (val)
141 : "r" (&rw->lock)
142 : "memory");
143
144 if (val == 0) {
145 val = rw->lock & ~0xff;
146 if (val)
147 ((volatile u8*)&rw->lock)[3] = 0;
148 else
149 *(volatile u32*)&rw->lock = ~0U;
150 }
151
152 return (val == 0);
153}
154
155static inline int __read_trylock(raw_rwlock_t *rw)
156{
157 register raw_rwlock_t *lp asm("g1");
158 register int res asm("o0");
159 lp = rw;
160 __asm__ __volatile__(
161 "mov %%o7, %%g4\n\t"
162 "call ___rw_read_try\n\t"
163 " ldstub [%%g1 + 3], %%g2\n"
164 : "=r" (res)
165 : "r" (lp)
166 : "g2", "g4", "memory", "cc");
167 return res;
168}
169
170#define __raw_read_trylock(lock) \
171({ unsigned long flags; \
172 int res; \
173 local_irq_save(flags); \
174 res = __read_trylock(lock); \
175 local_irq_restore(flags); \
176 res; \
177})
178
179#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
180
181#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
182
183#define _raw_spin_relax(lock) cpu_relax()
184#define _raw_read_relax(lock) cpu_relax()
185#define _raw_write_relax(lock) cpu_relax()
186
187#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
188#define __raw_write_can_lock(rw) (!(rw)->lock)
189
190#endif /* !(__ASSEMBLY__) */
191
192#endif /* __SPARC_SPINLOCK_H */
diff --git a/include/asm-sparc/spinlock_32.h b/include/asm-sparc/spinlock_32.h
new file mode 100644
index 000000000000..de2249b267c6
--- /dev/null
+++ b/include/asm-sparc/spinlock_32.h
@@ -0,0 +1,192 @@
1/* spinlock.h: 32-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC_SPINLOCK_H
7#define __SPARC_SPINLOCK_H
8
9#include <linux/threads.h> /* For NR_CPUS */
10
11#ifndef __ASSEMBLY__
12
13#include <asm/psr.h>
14
15#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
16
17#define __raw_spin_unlock_wait(lock) \
18 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
19
20static inline void __raw_spin_lock(raw_spinlock_t *lock)
21{
22 __asm__ __volatile__(
23 "\n1:\n\t"
24 "ldstub [%0], %%g2\n\t"
25 "orcc %%g2, 0x0, %%g0\n\t"
26 "bne,a 2f\n\t"
27 " ldub [%0], %%g2\n\t"
28 ".subsection 2\n"
29 "2:\n\t"
30 "orcc %%g2, 0x0, %%g0\n\t"
31 "bne,a 2b\n\t"
32 " ldub [%0], %%g2\n\t"
33 "b,a 1b\n\t"
34 ".previous\n"
35 : /* no outputs */
36 : "r" (lock)
37 : "g2", "memory", "cc");
38}
39
40static inline int __raw_spin_trylock(raw_spinlock_t *lock)
41{
42 unsigned int result;
43 __asm__ __volatile__("ldstub [%1], %0"
44 : "=r" (result)
45 : "r" (lock)
46 : "memory");
47 return (result == 0);
48}
49
50static inline void __raw_spin_unlock(raw_spinlock_t *lock)
51{
52 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
53}
54
55/* Read-write spinlocks, allowing multiple readers
56 * but only one writer.
57 *
58 * NOTE! it is quite common to have readers in interrupts
59 * but no interrupt writers. For those circumstances we
60 * can "mix" irq-safe locks - any writer needs to get a
61 * irq-safe write-lock, but readers can get non-irqsafe
62 * read-locks.
63 *
64 * XXX This might create some problems with my dual spinlock
65 * XXX scheme, deadlocks etc. -DaveM
66 *
67 * Sort of like atomic_t's on Sparc, but even more clever.
68 *
69 * ------------------------------------
70 * | 24-bit counter | wlock | raw_rwlock_t
71 * ------------------------------------
72 * 31 8 7 0
73 *
74 * wlock signifies the one writer is in or somebody is updating
75 * counter. For a writer, if he successfully acquires the wlock,
76 * but counter is non-zero, he has to release the lock and wait,
77 * till both counter and wlock are zero.
78 *
79 * Unfortunately this scheme limits us to ~16,000,000 cpus.
80 */
81static inline void __read_lock(raw_rwlock_t *rw)
82{
83 register raw_rwlock_t *lp asm("g1");
84 lp = rw;
85 __asm__ __volatile__(
86 "mov %%o7, %%g4\n\t"
87 "call ___rw_read_enter\n\t"
88 " ldstub [%%g1 + 3], %%g2\n"
89 : /* no outputs */
90 : "r" (lp)
91 : "g2", "g4", "memory", "cc");
92}
93
94#define __raw_read_lock(lock) \
95do { unsigned long flags; \
96 local_irq_save(flags); \
97 __read_lock(lock); \
98 local_irq_restore(flags); \
99} while(0)
100
101static inline void __read_unlock(raw_rwlock_t *rw)
102{
103 register raw_rwlock_t *lp asm("g1");
104 lp = rw;
105 __asm__ __volatile__(
106 "mov %%o7, %%g4\n\t"
107 "call ___rw_read_exit\n\t"
108 " ldstub [%%g1 + 3], %%g2\n"
109 : /* no outputs */
110 : "r" (lp)
111 : "g2", "g4", "memory", "cc");
112}
113
114#define __raw_read_unlock(lock) \
115do { unsigned long flags; \
116 local_irq_save(flags); \
117 __read_unlock(lock); \
118 local_irq_restore(flags); \
119} while(0)
120
121static inline void __raw_write_lock(raw_rwlock_t *rw)
122{
123 register raw_rwlock_t *lp asm("g1");
124 lp = rw;
125 __asm__ __volatile__(
126 "mov %%o7, %%g4\n\t"
127 "call ___rw_write_enter\n\t"
128 " ldstub [%%g1 + 3], %%g2\n"
129 : /* no outputs */
130 : "r" (lp)
131 : "g2", "g4", "memory", "cc");
132 *(volatile __u32 *)&lp->lock = ~0U;
133}
134
135static inline int __raw_write_trylock(raw_rwlock_t *rw)
136{
137 unsigned int val;
138
139 __asm__ __volatile__("ldstub [%1 + 3], %0"
140 : "=r" (val)
141 : "r" (&rw->lock)
142 : "memory");
143
144 if (val == 0) {
145 val = rw->lock & ~0xff;
146 if (val)
147 ((volatile u8*)&rw->lock)[3] = 0;
148 else
149 *(volatile u32*)&rw->lock = ~0U;
150 }
151
152 return (val == 0);
153}
154
155static inline int __read_trylock(raw_rwlock_t *rw)
156{
157 register raw_rwlock_t *lp asm("g1");
158 register int res asm("o0");
159 lp = rw;
160 __asm__ __volatile__(
161 "mov %%o7, %%g4\n\t"
162 "call ___rw_read_try\n\t"
163 " ldstub [%%g1 + 3], %%g2\n"
164 : "=r" (res)
165 : "r" (lp)
166 : "g2", "g4", "memory", "cc");
167 return res;
168}
169
170#define __raw_read_trylock(lock) \
171({ unsigned long flags; \
172 int res; \
173 local_irq_save(flags); \
174 res = __read_trylock(lock); \
175 local_irq_restore(flags); \
176 res; \
177})
178
179#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
180
181#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
182
183#define _raw_spin_relax(lock) cpu_relax()
184#define _raw_read_relax(lock) cpu_relax()
185#define _raw_write_relax(lock) cpu_relax()
186
187#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff))
188#define __raw_write_can_lock(rw) (!(rw)->lock)
189
190#endif /* !(__ASSEMBLY__) */
191
192#endif /* __SPARC_SPINLOCK_H */
diff --git a/include/asm-sparc/spinlock_64.h b/include/asm-sparc/spinlock_64.h
new file mode 100644
index 000000000000..0006fe9f8c7a
--- /dev/null
+++ b/include/asm-sparc/spinlock_64.h
@@ -0,0 +1,250 @@
1/* spinlock.h: 64-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC64_SPINLOCK_H
7#define __SPARC64_SPINLOCK_H
8
9#include <linux/threads.h> /* For NR_CPUS */
10
11#ifndef __ASSEMBLY__
12
13/* To get debugging spinlocks which detect and catch
14 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
15 * and rebuild your kernel.
16 */
17
18/* All of these locking primitives are expected to work properly
19 * even in an RMO memory model, which currently is what the kernel
20 * runs in.
21 *
22 * There is another issue. Because we play games to save cycles
23 * in the non-contention case, we need to be extra careful about
24 * branch targets into the "spinning" code. They live in their
25 * own section, but the newer V9 branches have a shorter range
26 * than the traditional 32-bit sparc branch variants. The rule
27 * is that the branches that go into and out of the spinner sections
28 * must be pre-V9 branches.
29 */
30
31#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
32
33#define __raw_spin_unlock_wait(lp) \
34 do { rmb(); \
35 } while((lp)->lock)
36
37static inline void __raw_spin_lock(raw_spinlock_t *lock)
38{
39 unsigned long tmp;
40
41 __asm__ __volatile__(
42"1: ldstub [%1], %0\n"
43" membar #StoreLoad | #StoreStore\n"
44" brnz,pn %0, 2f\n"
45" nop\n"
46" .subsection 2\n"
47"2: ldub [%1], %0\n"
48" membar #LoadLoad\n"
49" brnz,pt %0, 2b\n"
50" nop\n"
51" ba,a,pt %%xcc, 1b\n"
52" .previous"
53 : "=&r" (tmp)
54 : "r" (lock)
55 : "memory");
56}
57
58static inline int __raw_spin_trylock(raw_spinlock_t *lock)
59{
60 unsigned long result;
61
62 __asm__ __volatile__(
63" ldstub [%1], %0\n"
64" membar #StoreLoad | #StoreStore"
65 : "=r" (result)
66 : "r" (lock)
67 : "memory");
68
69 return (result == 0UL);
70}
71
72static inline void __raw_spin_unlock(raw_spinlock_t *lock)
73{
74 __asm__ __volatile__(
75" membar #StoreStore | #LoadStore\n"
76" stb %%g0, [%0]"
77 : /* No outputs */
78 : "r" (lock)
79 : "memory");
80}
81
82static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
83{
84 unsigned long tmp1, tmp2;
85
86 __asm__ __volatile__(
87"1: ldstub [%2], %0\n"
88" membar #StoreLoad | #StoreStore\n"
89" brnz,pn %0, 2f\n"
90" nop\n"
91" .subsection 2\n"
92"2: rdpr %%pil, %1\n"
93" wrpr %3, %%pil\n"
94"3: ldub [%2], %0\n"
95" membar #LoadLoad\n"
96" brnz,pt %0, 3b\n"
97" nop\n"
98" ba,pt %%xcc, 1b\n"
99" wrpr %1, %%pil\n"
100" .previous"
101 : "=&r" (tmp1), "=&r" (tmp2)
102 : "r"(lock), "r"(flags)
103 : "memory");
104}
105
106/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
107
108static void inline __read_lock(raw_rwlock_t *lock)
109{
110 unsigned long tmp1, tmp2;
111
112 __asm__ __volatile__ (
113"1: ldsw [%2], %0\n"
114" brlz,pn %0, 2f\n"
115"4: add %0, 1, %1\n"
116" cas [%2], %0, %1\n"
117" cmp %0, %1\n"
118" membar #StoreLoad | #StoreStore\n"
119" bne,pn %%icc, 1b\n"
120" nop\n"
121" .subsection 2\n"
122"2: ldsw [%2], %0\n"
123" membar #LoadLoad\n"
124" brlz,pt %0, 2b\n"
125" nop\n"
126" ba,a,pt %%xcc, 4b\n"
127" .previous"
128 : "=&r" (tmp1), "=&r" (tmp2)
129 : "r" (lock)
130 : "memory");
131}
132
133static int inline __read_trylock(raw_rwlock_t *lock)
134{
135 int tmp1, tmp2;
136
137 __asm__ __volatile__ (
138"1: ldsw [%2], %0\n"
139" brlz,a,pn %0, 2f\n"
140" mov 0, %0\n"
141" add %0, 1, %1\n"
142" cas [%2], %0, %1\n"
143" cmp %0, %1\n"
144" membar #StoreLoad | #StoreStore\n"
145" bne,pn %%icc, 1b\n"
146" mov 1, %0\n"
147"2:"
148 : "=&r" (tmp1), "=&r" (tmp2)
149 : "r" (lock)
150 : "memory");
151
152 return tmp1;
153}
154
155static void inline __read_unlock(raw_rwlock_t *lock)
156{
157 unsigned long tmp1, tmp2;
158
159 __asm__ __volatile__(
160" membar #StoreLoad | #LoadLoad\n"
161"1: lduw [%2], %0\n"
162" sub %0, 1, %1\n"
163" cas [%2], %0, %1\n"
164" cmp %0, %1\n"
165" bne,pn %%xcc, 1b\n"
166" nop"
167 : "=&r" (tmp1), "=&r" (tmp2)
168 : "r" (lock)
169 : "memory");
170}
171
172static void inline __write_lock(raw_rwlock_t *lock)
173{
174 unsigned long mask, tmp1, tmp2;
175
176 mask = 0x80000000UL;
177
178 __asm__ __volatile__(
179"1: lduw [%2], %0\n"
180" brnz,pn %0, 2f\n"
181"4: or %0, %3, %1\n"
182" cas [%2], %0, %1\n"
183" cmp %0, %1\n"
184" membar #StoreLoad | #StoreStore\n"
185" bne,pn %%icc, 1b\n"
186" nop\n"
187" .subsection 2\n"
188"2: lduw [%2], %0\n"
189" membar #LoadLoad\n"
190" brnz,pt %0, 2b\n"
191" nop\n"
192" ba,a,pt %%xcc, 4b\n"
193" .previous"
194 : "=&r" (tmp1), "=&r" (tmp2)
195 : "r" (lock), "r" (mask)
196 : "memory");
197}
198
199static void inline __write_unlock(raw_rwlock_t *lock)
200{
201 __asm__ __volatile__(
202" membar #LoadStore | #StoreStore\n"
203" stw %%g0, [%0]"
204 : /* no outputs */
205 : "r" (lock)
206 : "memory");
207}
208
209static int inline __write_trylock(raw_rwlock_t *lock)
210{
211 unsigned long mask, tmp1, tmp2, result;
212
213 mask = 0x80000000UL;
214
215 __asm__ __volatile__(
216" mov 0, %2\n"
217"1: lduw [%3], %0\n"
218" brnz,pn %0, 2f\n"
219" or %0, %4, %1\n"
220" cas [%3], %0, %1\n"
221" cmp %0, %1\n"
222" membar #StoreLoad | #StoreStore\n"
223" bne,pn %%icc, 1b\n"
224" nop\n"
225" mov 1, %2\n"
226"2:"
227 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
228 : "r" (lock), "r" (mask)
229 : "memory");
230
231 return result;
232}
233
234#define __raw_read_lock(p) __read_lock(p)
235#define __raw_read_trylock(p) __read_trylock(p)
236#define __raw_read_unlock(p) __read_unlock(p)
237#define __raw_write_lock(p) __write_lock(p)
238#define __raw_write_unlock(p) __write_unlock(p)
239#define __raw_write_trylock(p) __write_trylock(p)
240
241#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
242#define __raw_write_can_lock(rw) (!(rw)->lock)
243
244#define _raw_spin_relax(lock) cpu_relax()
245#define _raw_read_relax(lock) cpu_relax()
246#define _raw_write_relax(lock) cpu_relax()
247
248#endif /* !(__ASSEMBLY__) */
249
250#endif /* !(__SPARC64_SPINLOCK_H) */
diff --git a/include/asm-sparc/spinlock_types.h b/include/asm-sparc/spinlock_types.h
index 0a0fb116c4ec..37cbe01c585b 100644
--- a/include/asm-sparc/spinlock_types.h
+++ b/include/asm-sparc/spinlock_types.h
@@ -6,7 +6,7 @@
6#endif 6#endif
7 7
8typedef struct { 8typedef struct {
9 unsigned char lock; 9 volatile unsigned char lock;
10} raw_spinlock_t; 10} raw_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
diff --git a/include/asm-sparc/spitfire.h b/include/asm-sparc/spitfire.h
new file mode 100644
index 000000000000..985ea7e31992
--- /dev/null
+++ b/include/asm-sparc/spitfire.h
@@ -0,0 +1,342 @@
1/* spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_SPITFIRE_H
7#define _SPARC64_SPITFIRE_H
8
9#include <asm/asi.h>
10
11/* The following register addresses are accessible via ASI_DMMU
12 * and ASI_IMMU, that is there is a distinct and unique copy of
13 * each these registers for each TLB.
14 */
15#define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
16#define TLB_SFSR 0x0000000000000018 /* All chips */
17#define TSB_REG 0x0000000000000028 /* All chips */
18#define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
19#define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
20#define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
21#define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
22#define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
23#define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
24#define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
25
26/* These registers only exist as one entity, and are accessed
27 * via ASI_DMMU only.
28 */
29#define PRIMARY_CONTEXT 0x0000000000000008
30#define SECONDARY_CONTEXT 0x0000000000000010
31#define DMMU_SFAR 0x0000000000000020
32#define VIRT_WATCHPOINT 0x0000000000000038
33#define PHYS_WATCHPOINT 0x0000000000000040
34
35#define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
36#define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
37
38#define L1DCACHE_SIZE 0x4000
39
40#define SUN4V_CHIP_INVALID 0x00
41#define SUN4V_CHIP_NIAGARA1 0x01
42#define SUN4V_CHIP_NIAGARA2 0x02
43#define SUN4V_CHIP_UNKNOWN 0xff
44
45#ifndef __ASSEMBLY__
46
47enum ultra_tlb_layout {
48 spitfire = 0,
49 cheetah = 1,
50 cheetah_plus = 2,
51 hypervisor = 3,
52};
53
54extern enum ultra_tlb_layout tlb_type;
55
56extern int sun4v_chip_type;
57
58extern int cheetah_pcache_forced_on;
59extern void cheetah_enable_pcache(void);
60
61#define sparc64_highest_locked_tlbent() \
62 (tlb_type == spitfire ? \
63 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
64 CHEETAH_HIGHEST_LOCKED_TLBENT)
65
66extern int num_kernel_image_mappings;
67
68/* The data cache is write through, so this just invalidates the
69 * specified line.
70 */
71static inline void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
72{
73 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
74 "membar #Sync"
75 : /* No outputs */
76 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
77}
78
79/* The instruction cache lines are flushed with this, but note that
80 * this does not flush the pipeline. It is possible for a line to
81 * get flushed but stale instructions to still be in the pipeline,
82 * a flush instruction (to any address) is sufficient to handle
83 * this issue after the line is invalidated.
84 */
85static inline void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
86{
87 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
88 "membar #Sync"
89 : /* No outputs */
90 : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
91}
92
93static inline unsigned long spitfire_get_dtlb_data(int entry)
94{
95 unsigned long data;
96
97 __asm__ __volatile__("ldxa [%1] %2, %0"
98 : "=r" (data)
99 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
100
101 /* Clear TTE diag bits. */
102 data &= ~0x0003fe0000000000UL;
103
104 return data;
105}
106
107static inline unsigned long spitfire_get_dtlb_tag(int entry)
108{
109 unsigned long tag;
110
111 __asm__ __volatile__("ldxa [%1] %2, %0"
112 : "=r" (tag)
113 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
114 return tag;
115}
116
117static inline void spitfire_put_dtlb_data(int entry, unsigned long data)
118{
119 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
120 "membar #Sync"
121 : /* No outputs */
122 : "r" (data), "r" (entry << 3),
123 "i" (ASI_DTLB_DATA_ACCESS));
124}
125
126static inline unsigned long spitfire_get_itlb_data(int entry)
127{
128 unsigned long data;
129
130 __asm__ __volatile__("ldxa [%1] %2, %0"
131 : "=r" (data)
132 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
133
134 /* Clear TTE diag bits. */
135 data &= ~0x0003fe0000000000UL;
136
137 return data;
138}
139
140static inline unsigned long spitfire_get_itlb_tag(int entry)
141{
142 unsigned long tag;
143
144 __asm__ __volatile__("ldxa [%1] %2, %0"
145 : "=r" (tag)
146 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
147 return tag;
148}
149
150static inline void spitfire_put_itlb_data(int entry, unsigned long data)
151{
152 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
153 "membar #Sync"
154 : /* No outputs */
155 : "r" (data), "r" (entry << 3),
156 "i" (ASI_ITLB_DATA_ACCESS));
157}
158
159static inline void spitfire_flush_dtlb_nucleus_page(unsigned long page)
160{
161 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
162 "membar #Sync"
163 : /* No outputs */
164 : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
165}
166
167static inline void spitfire_flush_itlb_nucleus_page(unsigned long page)
168{
169 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
170 "membar #Sync"
171 : /* No outputs */
172 : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
173}
174
175/* Cheetah has "all non-locked" tlb flushes. */
176static inline void cheetah_flush_dtlb_all(void)
177{
178 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
179 "membar #Sync"
180 : /* No outputs */
181 : "r" (0x80), "i" (ASI_DMMU_DEMAP));
182}
183
184static inline void cheetah_flush_itlb_all(void)
185{
186 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
187 "membar #Sync"
188 : /* No outputs */
189 : "r" (0x80), "i" (ASI_IMMU_DEMAP));
190}
191
192/* Cheetah has a 4-tlb layout so direct access is a bit different.
193 * The first two TLBs are fully assosciative, hold 16 entries, and are
194 * used only for locked and >8K sized translations. One exists for
195 * data accesses and one for instruction accesses.
196 *
197 * The third TLB is for data accesses to 8K non-locked translations, is
198 * 2 way assosciative, and holds 512 entries. The fourth TLB is for
199 * instruction accesses to 8K non-locked translations, is 2 way
200 * assosciative, and holds 128 entries.
201 *
202 * Cheetah has some bug where bogus data can be returned from
203 * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
204 * the problem for me. -DaveM
205 */
206static inline unsigned long cheetah_get_ldtlb_data(int entry)
207{
208 unsigned long data;
209
210 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
211 "ldxa [%1] %2, %0"
212 : "=r" (data)
213 : "r" ((0 << 16) | (entry << 3)),
214 "i" (ASI_DTLB_DATA_ACCESS));
215
216 return data;
217}
218
219static inline unsigned long cheetah_get_litlb_data(int entry)
220{
221 unsigned long data;
222
223 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
224 "ldxa [%1] %2, %0"
225 : "=r" (data)
226 : "r" ((0 << 16) | (entry << 3)),
227 "i" (ASI_ITLB_DATA_ACCESS));
228
229 return data;
230}
231
232static inline unsigned long cheetah_get_ldtlb_tag(int entry)
233{
234 unsigned long tag;
235
236 __asm__ __volatile__("ldxa [%1] %2, %0"
237 : "=r" (tag)
238 : "r" ((0 << 16) | (entry << 3)),
239 "i" (ASI_DTLB_TAG_READ));
240
241 return tag;
242}
243
244static inline unsigned long cheetah_get_litlb_tag(int entry)
245{
246 unsigned long tag;
247
248 __asm__ __volatile__("ldxa [%1] %2, %0"
249 : "=r" (tag)
250 : "r" ((0 << 16) | (entry << 3)),
251 "i" (ASI_ITLB_TAG_READ));
252
253 return tag;
254}
255
256static inline void cheetah_put_ldtlb_data(int entry, unsigned long data)
257{
258 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
259 "membar #Sync"
260 : /* No outputs */
261 : "r" (data),
262 "r" ((0 << 16) | (entry << 3)),
263 "i" (ASI_DTLB_DATA_ACCESS));
264}
265
266static inline void cheetah_put_litlb_data(int entry, unsigned long data)
267{
268 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
269 "membar #Sync"
270 : /* No outputs */
271 : "r" (data),
272 "r" ((0 << 16) | (entry << 3)),
273 "i" (ASI_ITLB_DATA_ACCESS));
274}
275
276static inline unsigned long cheetah_get_dtlb_data(int entry, int tlb)
277{
278 unsigned long data;
279
280 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
281 "ldxa [%1] %2, %0"
282 : "=r" (data)
283 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
284
285 return data;
286}
287
288static inline unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
289{
290 unsigned long tag;
291
292 __asm__ __volatile__("ldxa [%1] %2, %0"
293 : "=r" (tag)
294 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
295 return tag;
296}
297
298static inline void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
299{
300 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
301 "membar #Sync"
302 : /* No outputs */
303 : "r" (data),
304 "r" ((tlb << 16) | (entry << 3)),
305 "i" (ASI_DTLB_DATA_ACCESS));
306}
307
308static inline unsigned long cheetah_get_itlb_data(int entry)
309{
310 unsigned long data;
311
312 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
313 "ldxa [%1] %2, %0"
314 : "=r" (data)
315 : "r" ((2 << 16) | (entry << 3)),
316 "i" (ASI_ITLB_DATA_ACCESS));
317
318 return data;
319}
320
321static inline unsigned long cheetah_get_itlb_tag(int entry)
322{
323 unsigned long tag;
324
325 __asm__ __volatile__("ldxa [%1] %2, %0"
326 : "=r" (tag)
327 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
328 return tag;
329}
330
331static inline void cheetah_put_itlb_data(int entry, unsigned long data)
332{
333 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
334 "membar #Sync"
335 : /* No outputs */
336 : "r" (data), "r" ((2 << 16) | (entry << 3)),
337 "i" (ASI_ITLB_DATA_ACCESS));
338}
339
340#endif /* !(__ASSEMBLY__) */
341
342#endif /* !(_SPARC64_SPITFIRE_H) */
diff --git a/include/asm-sparc/sstate.h b/include/asm-sparc/sstate.h
new file mode 100644
index 000000000000..a7c35dbcb281
--- /dev/null
+++ b/include/asm-sparc/sstate.h
@@ -0,0 +1,13 @@
1#ifndef _SPARC64_SSTATE_H
2#define _SPARC64_SSTATE_H
3
4extern void sstate_booting(void);
5extern void sstate_running(void);
6extern void sstate_halt(void);
7extern void sstate_poweroff(void);
8extern void sstate_panic(void);
9extern void sstate_reboot(void);
10
11extern void sun4v_sstate_init(void);
12
13#endif /* _SPARC64_SSTATE_H */
diff --git a/include/asm-sparc/stacktrace.h b/include/asm-sparc/stacktrace.h
new file mode 100644
index 000000000000..6cee39adf6d6
--- /dev/null
+++ b/include/asm-sparc/stacktrace.h
@@ -0,0 +1,6 @@
1#ifndef _SPARC64_STACKTRACE_H
2#define _SPARC64_STACKTRACE_H
3
4extern void stack_trace_flush(void);
5
6#endif /* _SPARC64_STACKTRACE_H */
diff --git a/include/asm-sparc/starfire.h b/include/asm-sparc/starfire.h
new file mode 100644
index 000000000000..07bafd31e33c
--- /dev/null
+++ b/include/asm-sparc/starfire.h
@@ -0,0 +1,21 @@
1/*
2 * starfire.h: Group all starfire specific code together.
3 *
4 * Copyright (C) 2000 Anton Blanchard (anton@samba.org)
5 */
6
7#ifndef _SPARC64_STARFIRE_H
8#define _SPARC64_STARFIRE_H
9
10#ifndef __ASSEMBLY__
11
12extern int this_is_starfire;
13
14extern void check_if_starfire(void);
15extern void starfire_cpu_setup(void);
16extern int starfire_hard_smp_processor_id(void);
17extern void starfire_hookup(int);
18extern unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
19
20#endif
21#endif
diff --git a/include/asm-sparc/stat.h b/include/asm-sparc/stat.h
index 2299e1d5d94c..9fdcaf8c9cd3 100644
--- a/include/asm-sparc/stat.h
+++ b/include/asm-sparc/stat.h
@@ -1,76 +1,8 @@
1#ifndef _SPARC_STAT_H 1#ifndef ___ASM_SPARC_STAT_H
2#define _SPARC_STAT_H 2#define ___ASM_SPARC_STAT_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/types.h> 4#include <asm-sparc/stat_64.h>
5 5#else
6struct __old_kernel_stat { 6#include <asm-sparc/stat_32.h>
7 unsigned short st_dev; 7#endif
8 unsigned short st_ino;
9 unsigned short st_mode;
10 unsigned short st_nlink;
11 unsigned short st_uid;
12 unsigned short st_gid;
13 unsigned short st_rdev;
14 unsigned long st_size;
15 unsigned long st_atime;
16 unsigned long st_mtime;
17 unsigned long st_ctime;
18};
19
20struct stat {
21 unsigned short st_dev;
22 unsigned long st_ino;
23 unsigned short st_mode;
24 short st_nlink;
25 unsigned short st_uid;
26 unsigned short st_gid;
27 unsigned short st_rdev;
28 long st_size;
29 long st_atime;
30 unsigned long st_atime_nsec;
31 long st_mtime;
32 unsigned long st_mtime_nsec;
33 long st_ctime;
34 unsigned long st_ctime_nsec;
35 long st_blksize;
36 long st_blocks;
37 unsigned long __unused4[2];
38};
39
40#define STAT_HAVE_NSEC 1
41
42struct stat64 {
43 unsigned long long st_dev;
44
45 unsigned long long st_ino;
46
47 unsigned int st_mode;
48 unsigned int st_nlink;
49
50 unsigned int st_uid;
51 unsigned int st_gid;
52
53 unsigned long long st_rdev;
54
55 unsigned char __pad3[8];
56
57 long long st_size;
58 unsigned int st_blksize;
59
60 unsigned char __pad4[8];
61 unsigned int st_blocks;
62
63 unsigned int st_atime;
64 unsigned int st_atime_nsec;
65
66 unsigned int st_mtime;
67 unsigned int st_mtime_nsec;
68
69 unsigned int st_ctime;
70 unsigned int st_ctime_nsec;
71
72 unsigned int __unused4;
73 unsigned int __unused5;
74};
75
76#endif 8#endif
diff --git a/include/asm-sparc/stat_32.h b/include/asm-sparc/stat_32.h
new file mode 100644
index 000000000000..2299e1d5d94c
--- /dev/null
+++ b/include/asm-sparc/stat_32.h
@@ -0,0 +1,76 @@
1#ifndef _SPARC_STAT_H
2#define _SPARC_STAT_H
3
4#include <linux/types.h>
5
6struct __old_kernel_stat {
7 unsigned short st_dev;
8 unsigned short st_ino;
9 unsigned short st_mode;
10 unsigned short st_nlink;
11 unsigned short st_uid;
12 unsigned short st_gid;
13 unsigned short st_rdev;
14 unsigned long st_size;
15 unsigned long st_atime;
16 unsigned long st_mtime;
17 unsigned long st_ctime;
18};
19
20struct stat {
21 unsigned short st_dev;
22 unsigned long st_ino;
23 unsigned short st_mode;
24 short st_nlink;
25 unsigned short st_uid;
26 unsigned short st_gid;
27 unsigned short st_rdev;
28 long st_size;
29 long st_atime;
30 unsigned long st_atime_nsec;
31 long st_mtime;
32 unsigned long st_mtime_nsec;
33 long st_ctime;
34 unsigned long st_ctime_nsec;
35 long st_blksize;
36 long st_blocks;
37 unsigned long __unused4[2];
38};
39
40#define STAT_HAVE_NSEC 1
41
42struct stat64 {
43 unsigned long long st_dev;
44
45 unsigned long long st_ino;
46
47 unsigned int st_mode;
48 unsigned int st_nlink;
49
50 unsigned int st_uid;
51 unsigned int st_gid;
52
53 unsigned long long st_rdev;
54
55 unsigned char __pad3[8];
56
57 long long st_size;
58 unsigned int st_blksize;
59
60 unsigned char __pad4[8];
61 unsigned int st_blocks;
62
63 unsigned int st_atime;
64 unsigned int st_atime_nsec;
65
66 unsigned int st_mtime;
67 unsigned int st_mtime_nsec;
68
69 unsigned int st_ctime;
70 unsigned int st_ctime_nsec;
71
72 unsigned int __unused4;
73 unsigned int __unused5;
74};
75
76#endif
diff --git a/include/asm-sparc/stat_64.h b/include/asm-sparc/stat_64.h
new file mode 100644
index 000000000000..9650fdea847f
--- /dev/null
+++ b/include/asm-sparc/stat_64.h
@@ -0,0 +1,47 @@
1#ifndef _SPARC64_STAT_H
2#define _SPARC64_STAT_H
3
4#include <linux/types.h>
5
6struct stat {
7 unsigned st_dev;
8 ino_t st_ino;
9 mode_t st_mode;
10 short st_nlink;
11 uid_t st_uid;
12 gid_t st_gid;
13 unsigned st_rdev;
14 off_t st_size;
15 time_t st_atime;
16 time_t st_mtime;
17 time_t st_ctime;
18 off_t st_blksize;
19 off_t st_blocks;
20 unsigned long __unused4[2];
21};
22
23struct stat64 {
24 unsigned long st_dev;
25 unsigned long st_ino;
26 unsigned long st_nlink;
27
28 unsigned int st_mode;
29 unsigned int st_uid;
30 unsigned int st_gid;
31 unsigned int __pad0;
32
33 unsigned long st_rdev;
34 long st_size;
35 long st_blksize;
36 long st_blocks;
37
38 unsigned long st_atime;
39 unsigned long st_atime_nsec;
40 unsigned long st_mtime;
41 unsigned long st_mtime_nsec;
42 unsigned long st_ctime;
43 unsigned long st_ctime_nsec;
44 long __unused[3];
45};
46
47#endif
diff --git a/include/asm-sparc/statfs.h b/include/asm-sparc/statfs.h
index 304520fa8863..a70cc52e7018 100644
--- a/include/asm-sparc/statfs.h
+++ b/include/asm-sparc/statfs.h
@@ -1,6 +1,8 @@
1#ifndef _SPARC_STATFS_H 1#ifndef ___ASM_SPARC_STATFS_H
2#define _SPARC_STATFS_H 2#define ___ASM_SPARC_STATFS_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm-generic/statfs.h> 4#include <asm-sparc/statfs_64.h>
5 5#else
6#include <asm-sparc/statfs_32.h>
7#endif
6#endif 8#endif
diff --git a/include/asm-sparc/statfs_32.h b/include/asm-sparc/statfs_32.h
new file mode 100644
index 000000000000..304520fa8863
--- /dev/null
+++ b/include/asm-sparc/statfs_32.h
@@ -0,0 +1,6 @@
1#ifndef _SPARC_STATFS_H
2#define _SPARC_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif
diff --git a/include/asm-sparc/statfs_64.h b/include/asm-sparc/statfs_64.h
new file mode 100644
index 000000000000..79b3c890a5fa
--- /dev/null
+++ b/include/asm-sparc/statfs_64.h
@@ -0,0 +1,54 @@
1#ifndef _SPARC64_STATFS_H
2#define _SPARC64_STATFS_H
3
4#ifndef __KERNEL_STRICT_NAMES
5
6#include <linux/types.h>
7
8typedef __kernel_fsid_t fsid_t;
9
10#endif
11
12struct statfs {
13 long f_type;
14 long f_bsize;
15 long f_blocks;
16 long f_bfree;
17 long f_bavail;
18 long f_files;
19 long f_ffree;
20 __kernel_fsid_t f_fsid;
21 long f_namelen;
22 long f_frsize;
23 long f_spare[5];
24};
25
26struct statfs64 {
27 long f_type;
28 long f_bsize;
29 long f_blocks;
30 long f_bfree;
31 long f_bavail;
32 long f_files;
33 long f_ffree;
34 __kernel_fsid_t f_fsid;
35 long f_namelen;
36 long f_frsize;
37 long f_spare[5];
38};
39
40struct compat_statfs64 {
41 __u32 f_type;
42 __u32 f_bsize;
43 __u64 f_blocks;
44 __u64 f_bfree;
45 __u64 f_bavail;
46 __u64 f_files;
47 __u64 f_ffree;
48 __kernel_fsid_t f_fsid;
49 __u32 f_namelen;
50 __u32 f_frsize;
51 __u32 f_spare[5];
52};
53
54#endif
diff --git a/include/asm-sparc/string.h b/include/asm-sparc/string.h
index 8d7c0dd4f299..14c04c7697a5 100644
--- a/include/asm-sparc/string.h
+++ b/include/asm-sparc/string.h
@@ -1,205 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_STRING_H
2 * string.h: External definitions for optimized assembly string 2#define ___ASM_SPARC_STRING_H
3 * routines for the Linux Kernel. 3#if defined(__sparc__) && defined(__arch64__)
4 * 4#include <asm-sparc/string_64.h>
5 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu) 5#else
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6#include <asm-sparc/string_32.h>
7 */ 7#endif
8 8#endif
9#ifndef __SPARC_STRING_H__
10#define __SPARC_STRING_H__
11
12#include <asm/page.h>
13
14/* Really, userland/ksyms should not see any of this stuff. */
15
16#ifdef __KERNEL__
17
18extern void __memmove(void *,const void *,__kernel_size_t);
19extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
20extern __kernel_size_t __memset(void *,int,__kernel_size_t);
21
22#ifndef EXPORT_SYMTAB_STROPS
23
24/* First the mem*() things. */
25#define __HAVE_ARCH_MEMMOVE
26#undef memmove
27#define memmove(_to, _from, _n) \
28({ \
29 void *_t = (_to); \
30 __memmove(_t, (_from), (_n)); \
31 _t; \
32})
33
34#define __HAVE_ARCH_MEMCPY
35
36static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
37{
38 extern void __copy_1page(void *, const void *);
39
40 if(n <= 32) {
41 __builtin_memcpy(to, from, n);
42 } else if (((unsigned int) to & 7) != 0) {
43 /* Destination is not aligned on the double-word boundary */
44 __memcpy(to, from, n);
45 } else {
46 switch(n) {
47 case PAGE_SIZE:
48 __copy_1page(to, from);
49 break;
50 default:
51 __memcpy(to, from, n);
52 break;
53 }
54 }
55 return to;
56}
57
58static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
59{
60 __memcpy(to, from, n);
61 return to;
62}
63
64#undef memcpy
65#define memcpy(t, f, n) \
66(__builtin_constant_p(n) ? \
67 __constant_memcpy((t),(f),(n)) : \
68 __nonconstant_memcpy((t),(f),(n)))
69
70#define __HAVE_ARCH_MEMSET
71
72static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
73{
74 extern void bzero_1page(void *);
75 extern __kernel_size_t __bzero(void *, __kernel_size_t);
76
77 if(!c) {
78 if(count == PAGE_SIZE)
79 bzero_1page(s);
80 else
81 __bzero(s, count);
82 } else {
83 __memset(s, c, count);
84 }
85 return s;
86}
87
88static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
89{
90 extern __kernel_size_t __bzero(void *, __kernel_size_t);
91
92 if(!c)
93 __bzero(s, count);
94 else
95 __memset(s, c, count);
96 return s;
97}
98
99static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
100{
101 __memset(s, c, count);
102 return s;
103}
104
105#undef memset
106#define memset(s, c, count) \
107(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
108 __constant_c_and_count_memset((s), (c), (count)) : \
109 __constant_c_memset((s), (c), (count))) \
110 : __nonconstant_memset((s), (c), (count)))
111
112#define __HAVE_ARCH_MEMSCAN
113
114#undef memscan
115#define memscan(__arg0, __char, __arg2) \
116({ \
117 extern void *__memscan_zero(void *, size_t); \
118 extern void *__memscan_generic(void *, int, size_t); \
119 void *__retval, *__addr = (__arg0); \
120 size_t __size = (__arg2); \
121 \
122 if(__builtin_constant_p(__char) && !(__char)) \
123 __retval = __memscan_zero(__addr, __size); \
124 else \
125 __retval = __memscan_generic(__addr, (__char), __size); \
126 \
127 __retval; \
128})
129
130#define __HAVE_ARCH_MEMCMP
131extern int memcmp(const void *,const void *,__kernel_size_t);
132
133/* Now the str*() stuff... */
134#define __HAVE_ARCH_STRLEN
135extern __kernel_size_t strlen(const char *);
136
137#define __HAVE_ARCH_STRNCMP
138
139extern int __strncmp(const char *, const char *, __kernel_size_t);
140
141static inline int __constant_strncmp(const char *src, const char *dest, __kernel_size_t count)
142{
143 register int retval;
144 switch(count) {
145 case 0: return 0;
146 case 1: return (src[0] - dest[0]);
147 case 2: retval = (src[0] - dest[0]);
148 if(!retval && src[0])
149 retval = (src[1] - dest[1]);
150 return retval;
151 case 3: retval = (src[0] - dest[0]);
152 if(!retval && src[0]) {
153 retval = (src[1] - dest[1]);
154 if(!retval && src[1])
155 retval = (src[2] - dest[2]);
156 }
157 return retval;
158 case 4: retval = (src[0] - dest[0]);
159 if(!retval && src[0]) {
160 retval = (src[1] - dest[1]);
161 if(!retval && src[1]) {
162 retval = (src[2] - dest[2]);
163 if (!retval && src[2])
164 retval = (src[3] - dest[3]);
165 }
166 }
167 return retval;
168 case 5: retval = (src[0] - dest[0]);
169 if(!retval && src[0]) {
170 retval = (src[1] - dest[1]);
171 if(!retval && src[1]) {
172 retval = (src[2] - dest[2]);
173 if (!retval && src[2]) {
174 retval = (src[3] - dest[3]);
175 if (!retval && src[3])
176 retval = (src[4] - dest[4]);
177 }
178 }
179 }
180 return retval;
181 default:
182 retval = (src[0] - dest[0]);
183 if(!retval && src[0]) {
184 retval = (src[1] - dest[1]);
185 if(!retval && src[1]) {
186 retval = (src[2] - dest[2]);
187 if(!retval && src[2])
188 retval = __strncmp(src+3,dest+3,count-3);
189 }
190 }
191 return retval;
192 }
193}
194
195#undef strncmp
196#define strncmp(__arg0, __arg1, __arg2) \
197(__builtin_constant_p(__arg2) ? \
198 __constant_strncmp(__arg0, __arg1, __arg2) : \
199 __strncmp(__arg0, __arg1, __arg2))
200
201#endif /* !EXPORT_SYMTAB_STROPS */
202
203#endif /* __KERNEL__ */
204
205#endif /* !(__SPARC_STRING_H__) */
diff --git a/include/asm-sparc/string_32.h b/include/asm-sparc/string_32.h
new file mode 100644
index 000000000000..6c5fddb7e6b5
--- /dev/null
+++ b/include/asm-sparc/string_32.h
@@ -0,0 +1,205 @@
1/*
2 * string.h: External definitions for optimized assembly string
3 * routines for the Linux Kernel.
4 *
5 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#ifndef __SPARC_STRING_H__
10#define __SPARC_STRING_H__
11
12#include <asm/page.h>
13
14/* Really, userland/ksyms should not see any of this stuff. */
15
16#ifdef __KERNEL__
17
18extern void __memmove(void *,const void *,__kernel_size_t);
19extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t);
20extern __kernel_size_t __memset(void *,int,__kernel_size_t);
21
22#ifndef EXPORT_SYMTAB_STROPS
23
24/* First the mem*() things. */
25#define __HAVE_ARCH_MEMMOVE
26#undef memmove
27#define memmove(_to, _from, _n) \
28({ \
29 void *_t = (_to); \
30 __memmove(_t, (_from), (_n)); \
31 _t; \
32})
33
34#define __HAVE_ARCH_MEMCPY
35
36static inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n)
37{
38 extern void __copy_1page(void *, const void *);
39
40 if(n <= 32) {
41 __builtin_memcpy(to, from, n);
42 } else if (((unsigned int) to & 7) != 0) {
43 /* Destination is not aligned on the double-word boundary */
44 __memcpy(to, from, n);
45 } else {
46 switch(n) {
47 case PAGE_SIZE:
48 __copy_1page(to, from);
49 break;
50 default:
51 __memcpy(to, from, n);
52 break;
53 }
54 }
55 return to;
56}
57
58static inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_size_t n)
59{
60 __memcpy(to, from, n);
61 return to;
62}
63
64#undef memcpy
65#define memcpy(t, f, n) \
66(__builtin_constant_p(n) ? \
67 __constant_memcpy((t),(f),(n)) : \
68 __nonconstant_memcpy((t),(f),(n)))
69
70#define __HAVE_ARCH_MEMSET
71
72static inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count)
73{
74 extern void bzero_1page(void *);
75 extern __kernel_size_t __bzero(void *, __kernel_size_t);
76
77 if(!c) {
78 if(count == PAGE_SIZE)
79 bzero_1page(s);
80 else
81 __bzero(s, count);
82 } else {
83 __memset(s, c, count);
84 }
85 return s;
86}
87
88static inline void *__constant_c_memset(void *s, char c, __kernel_size_t count)
89{
90 extern __kernel_size_t __bzero(void *, __kernel_size_t);
91
92 if(!c)
93 __bzero(s, count);
94 else
95 __memset(s, c, count);
96 return s;
97}
98
99static inline void *__nonconstant_memset(void *s, char c, __kernel_size_t count)
100{
101 __memset(s, c, count);
102 return s;
103}
104
105#undef memset
106#define memset(s, c, count) \
107(__builtin_constant_p(c) ? (__builtin_constant_p(count) ? \
108 __constant_c_and_count_memset((s), (c), (count)) : \
109 __constant_c_memset((s), (c), (count))) \
110 : __nonconstant_memset((s), (c), (count)))
111
112#define __HAVE_ARCH_MEMSCAN
113
114#undef memscan
115#define memscan(__arg0, __char, __arg2) \
116({ \
117 extern void *__memscan_zero(void *, size_t); \
118 extern void *__memscan_generic(void *, int, size_t); \
119 void *__retval, *__addr = (__arg0); \
120 size_t __size = (__arg2); \
121 \
122 if(__builtin_constant_p(__char) && !(__char)) \
123 __retval = __memscan_zero(__addr, __size); \
124 else \
125 __retval = __memscan_generic(__addr, (__char), __size); \
126 \
127 __retval; \
128})
129
130#define __HAVE_ARCH_MEMCMP
131extern int memcmp(const void *,const void *,__kernel_size_t);
132
133/* Now the str*() stuff... */
134#define __HAVE_ARCH_STRLEN
135extern __kernel_size_t strlen(const char *);
136
137#define __HAVE_ARCH_STRNCMP
138
139extern int __strncmp(const char *, const char *, __kernel_size_t);
140
141static inline int __constant_strncmp(const char *src, const char *dest, __kernel_size_t count)
142{
143 register int retval;
144 switch(count) {
145 case 0: return 0;
146 case 1: return (src[0] - dest[0]);
147 case 2: retval = (src[0] - dest[0]);
148 if(!retval && src[0])
149 retval = (src[1] - dest[1]);
150 return retval;
151 case 3: retval = (src[0] - dest[0]);
152 if(!retval && src[0]) {
153 retval = (src[1] - dest[1]);
154 if(!retval && src[1])
155 retval = (src[2] - dest[2]);
156 }
157 return retval;
158 case 4: retval = (src[0] - dest[0]);
159 if(!retval && src[0]) {
160 retval = (src[1] - dest[1]);
161 if(!retval && src[1]) {
162 retval = (src[2] - dest[2]);
163 if (!retval && src[2])
164 retval = (src[3] - dest[3]);
165 }
166 }
167 return retval;
168 case 5: retval = (src[0] - dest[0]);
169 if(!retval && src[0]) {
170 retval = (src[1] - dest[1]);
171 if(!retval && src[1]) {
172 retval = (src[2] - dest[2]);
173 if (!retval && src[2]) {
174 retval = (src[3] - dest[3]);
175 if (!retval && src[3])
176 retval = (src[4] - dest[4]);
177 }
178 }
179 }
180 return retval;
181 default:
182 retval = (src[0] - dest[0]);
183 if(!retval && src[0]) {
184 retval = (src[1] - dest[1]);
185 if(!retval && src[1]) {
186 retval = (src[2] - dest[2]);
187 if(!retval && src[2])
188 retval = __strncmp(src+3,dest+3,count-3);
189 }
190 }
191 return retval;
192 }
193}
194
195#undef strncmp
196#define strncmp(__arg0, __arg1, __arg2) \
197(__builtin_constant_p(__arg2) ? \
198 __constant_strncmp(__arg0, __arg1, __arg2) : \
199 __strncmp(__arg0, __arg1, __arg2))
200
201#endif /* !EXPORT_SYMTAB_STROPS */
202
203#endif /* __KERNEL__ */
204
205#endif /* !(__SPARC_STRING_H__) */
diff --git a/include/asm-sparc/string_64.h b/include/asm-sparc/string_64.h
new file mode 100644
index 000000000000..43161f2d17eb
--- /dev/null
+++ b/include/asm-sparc/string_64.h
@@ -0,0 +1,83 @@
1/*
2 * string.h: External definitions for optimized assembly string
3 * routines for the Linux Kernel.
4 *
5 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997,1999 Jakub Jelinek (jakub@redhat.com)
7 */
8
9#ifndef __SPARC64_STRING_H__
10#define __SPARC64_STRING_H__
11
12/* Really, userland/ksyms should not see any of this stuff. */
13
14#ifdef __KERNEL__
15
16#include <asm/asi.h>
17
18extern void *__memset(void *,int,__kernel_size_t);
19
20#ifndef EXPORT_SYMTAB_STROPS
21
22/* First the mem*() things. */
23#define __HAVE_ARCH_MEMMOVE
24extern void *memmove(void *, const void *, __kernel_size_t);
25
26#define __HAVE_ARCH_MEMCPY
27extern void *memcpy(void *, const void *, __kernel_size_t);
28
29#define __HAVE_ARCH_MEMSET
30extern void *__builtin_memset(void *,int,__kernel_size_t);
31
32static inline void *__constant_memset(void *s, int c, __kernel_size_t count)
33{
34 extern __kernel_size_t __bzero(void *, __kernel_size_t);
35
36 if (!c) {
37 __bzero(s, count);
38 return s;
39 } else
40 return __memset(s, c, count);
41}
42
43#undef memset
44#define memset(s, c, count) \
45((__builtin_constant_p(count) && (count) <= 32) ? \
46 __builtin_memset((s), (c), (count)) : \
47 (__builtin_constant_p(c) ? \
48 __constant_memset((s), (c), (count)) : \
49 __memset((s), (c), (count))))
50
51#define __HAVE_ARCH_MEMSCAN
52
53#undef memscan
54#define memscan(__arg0, __char, __arg2) \
55({ \
56 extern void *__memscan_zero(void *, size_t); \
57 extern void *__memscan_generic(void *, int, size_t); \
58 void *__retval, *__addr = (__arg0); \
59 size_t __size = (__arg2); \
60 \
61 if(__builtin_constant_p(__char) && !(__char)) \
62 __retval = __memscan_zero(__addr, __size); \
63 else \
64 __retval = __memscan_generic(__addr, (__char), __size); \
65 \
66 __retval; \
67})
68
69#define __HAVE_ARCH_MEMCMP
70extern int memcmp(const void *,const void *,__kernel_size_t);
71
72/* Now the str*() stuff... */
73#define __HAVE_ARCH_STRLEN
74extern __kernel_size_t strlen(const char *);
75
76#define __HAVE_ARCH_STRNCMP
77extern int strncmp(const char *, const char *, __kernel_size_t);
78
79#endif /* !EXPORT_SYMTAB_STROPS */
80
81#endif /* __KERNEL__ */
82
83#endif /* !(__SPARC64_STRING_H__) */
diff --git a/include/asm-sparc/syscalls.h b/include/asm-sparc/syscalls.h
new file mode 100644
index 000000000000..45a43f637a14
--- /dev/null
+++ b/include/asm-sparc/syscalls.h
@@ -0,0 +1,13 @@
1#ifndef _SPARC64_SYSCALLS_H
2#define _SPARC64_SYSCALLS_H
3
4struct pt_regs;
5
6extern asmlinkage long sparc_do_fork(unsigned long clone_flags,
7 unsigned long stack_start,
8 struct pt_regs *regs,
9 unsigned long stack_size);
10
11extern asmlinkage int sparc_execve(struct pt_regs *regs);
12
13#endif /* _SPARC64_SYSCALLS_H */
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index b4b024445fc9..15e2a3bc4f61 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -1,288 +1,8 @@
1#ifndef __SPARC_SYSTEM_H 1#ifndef ___ASM_SPARC_SYSTEM_H
2#define __SPARC_SYSTEM_H 2#define ___ASM_SPARC_SYSTEM_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/kernel.h> 4#include <asm-sparc/system_64.h>
5#include <linux/threads.h> /* NR_CPUS */
6#include <linux/thread_info.h>
7
8#include <asm/page.h>
9#include <asm/psr.h>
10#include <asm/ptrace.h>
11#include <asm/btfixup.h>
12#include <asm/smp.h>
13
14#ifndef __ASSEMBLY__
15
16#include <linux/irqflags.h>
17
18/*
19 * Sparc (general) CPU types
20 */
21enum sparc_cpu {
22 sun4 = 0x00,
23 sun4c = 0x01,
24 sun4m = 0x02,
25 sun4d = 0x03,
26 sun4e = 0x04,
27 sun4u = 0x05, /* V8 ploos ploos */
28 sun_unknown = 0x06,
29 ap1000 = 0x07, /* almost a sun4m */
30};
31
32/* Really, userland should not be looking at any of this... */
33#ifdef __KERNEL__
34
35extern enum sparc_cpu sparc_cpu_model;
36
37#ifndef CONFIG_SUN4
38#define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c)
39#define ARCH_SUN4 0
40#else 5#else
41#define ARCH_SUN4C_SUN4 1 6#include <asm-sparc/system_32.h>
42#define ARCH_SUN4 1
43#endif 7#endif
44
45#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
46
47extern char reboot_command[];
48
49extern struct thread_info *current_set[NR_CPUS];
50
51extern unsigned long empty_bad_page;
52extern unsigned long empty_bad_page_table;
53extern unsigned long empty_zero_page;
54
55extern void sun_do_break(void);
56extern int serial_console;
57extern int stop_a_enabled;
58
59static inline int con_is_present(void)
60{
61 return serial_console ? 0 : 1;
62}
63
64/* When a context switch happens we must flush all user windows so that
65 * the windows of the current process are flushed onto its stack. This
66 * way the windows are all clean for the next process and the stack
67 * frames are up to date.
68 */
69extern void flush_user_windows(void);
70extern void kill_user_windows(void);
71extern void synchronize_user_stack(void);
72extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
73 void *fpqueue, unsigned long *fpqdepth);
74
75#ifdef CONFIG_SMP
76#define SWITCH_ENTER(prv) \
77 do { \
78 if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
79 put_psr(get_psr() | PSR_EF); \
80 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
81 &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
82 clear_tsk_thread_flag(prv, TIF_USEDFPU); \
83 (prv)->thread.kregs->psr &= ~PSR_EF; \
84 } \
85 } while(0)
86
87#define SWITCH_DO_LAZY_FPU(next) /* */
88#else
89#define SWITCH_ENTER(prv) /* */
90#define SWITCH_DO_LAZY_FPU(nxt) \
91 do { \
92 if (last_task_used_math != (nxt)) \
93 (nxt)->thread.kregs->psr&=~PSR_EF; \
94 } while(0)
95#endif
96
97extern void flushw_all(void);
98
99/*
100 * Flush windows so that the VM switch which follows
101 * would not pull the stack from under us.
102 *
103 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
104 * XXX WTF is the above comment? Found in late teen 2.4.x.
105 */
106#define prepare_arch_switch(next) do { \
107 __asm__ __volatile__( \
108 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
109 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
110 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
111 "save %sp, -0x40, %sp\n\t" \
112 "restore; restore; restore; restore; restore; restore; restore"); \
113} while(0)
114
115 /* Much care has gone into this code, do not touch it.
116 *
117 * We need to loadup regs l0/l1 for the newly forked child
118 * case because the trap return path relies on those registers
119 * holding certain values, gcc is told that they are clobbered.
120 * Gcc needs registers for 3 values in and 1 value out, so we
121 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
122 *
123 * Hey Dave, that do not touch sign is too much of an incentive
124 * - Anton & Pete
125 */
126#define switch_to(prev, next, last) do { \
127 SWITCH_ENTER(prev); \
128 SWITCH_DO_LAZY_FPU(next); \
129 cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask); \
130 __asm__ __volatile__( \
131 "sethi %%hi(here - 0x8), %%o7\n\t" \
132 "mov %%g6, %%g3\n\t" \
133 "or %%o7, %%lo(here - 0x8), %%o7\n\t" \
134 "rd %%psr, %%g4\n\t" \
135 "std %%sp, [%%g6 + %4]\n\t" \
136 "rd %%wim, %%g5\n\t" \
137 "wr %%g4, 0x20, %%psr\n\t" \
138 "nop\n\t" \
139 "std %%g4, [%%g6 + %3]\n\t" \
140 "ldd [%2 + %3], %%g4\n\t" \
141 "mov %2, %%g6\n\t" \
142 ".globl patchme_store_new_current\n" \
143"patchme_store_new_current:\n\t" \
144 "st %2, [%1]\n\t" \
145 "wr %%g4, 0x20, %%psr\n\t" \
146 "nop\n\t" \
147 "nop\n\t" \
148 "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \
149 "ldd [%%g6 + %4], %%sp\n\t" \
150 "wr %%g5, 0x0, %%wim\n\t" \
151 "ldd [%%sp + 0x00], %%l0\n\t" \
152 "ldd [%%sp + 0x38], %%i6\n\t" \
153 "wr %%g4, 0x0, %%psr\n\t" \
154 "nop\n\t" \
155 "nop\n\t" \
156 "jmpl %%o7 + 0x8, %%g0\n\t" \
157 " ld [%%g3 + %5], %0\n\t" \
158 "here:\n" \
159 : "=&r" (last) \
160 : "r" (&(current_set[hard_smp_processor_id()])), \
161 "r" (task_thread_info(next)), \
162 "i" (TI_KPSR), \
163 "i" (TI_KSP), \
164 "i" (TI_TASK) \
165 : "g1", "g2", "g3", "g4", "g5", "g7", \
166 "l0", "l1", "l3", "l4", "l5", "l6", "l7", \
167 "i0", "i1", "i2", "i3", "i4", "i5", \
168 "o0", "o1", "o2", "o3", "o7"); \
169 } while(0)
170
171/* XXX Change this if we ever use a PSO mode kernel. */
172#define mb() __asm__ __volatile__ ("" : : : "memory")
173#define rmb() mb()
174#define wmb() mb()
175#define read_barrier_depends() do { } while(0)
176#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
177#define smp_mb() __asm__ __volatile__("":::"memory")
178#define smp_rmb() __asm__ __volatile__("":::"memory")
179#define smp_wmb() __asm__ __volatile__("":::"memory")
180#define smp_read_barrier_depends() do { } while(0)
181
182#define nop() __asm__ __volatile__ ("nop")
183
184/* This has special calling conventions */
185#ifndef CONFIG_SMP
186BTFIXUPDEF_CALL(void, ___xchg32, void)
187#endif
188
189static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
190{
191#ifdef CONFIG_SMP
192 __asm__ __volatile__("swap [%2], %0"
193 : "=&r" (val)
194 : "0" (val), "r" (m)
195 : "memory");
196 return val;
197#else
198 register unsigned long *ptr asm("g1");
199 register unsigned long ret asm("g2");
200
201 ptr = (unsigned long *) m;
202 ret = val;
203
204 /* Note: this is magic and the nop there is
205 really needed. */
206 __asm__ __volatile__(
207 "mov %%o7, %%g4\n\t"
208 "call ___f____xchg32\n\t"
209 " nop\n\t"
210 : "=&r" (ret)
211 : "0" (ret), "r" (ptr)
212 : "g3", "g4", "g7", "memory", "cc");
213
214 return ret;
215#endif 8#endif
216}
217
218#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
219
220extern void __xchg_called_with_bad_pointer(void);
221
222static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
223{
224 switch (size) {
225 case 4:
226 return xchg_u32(ptr, x);
227 };
228 __xchg_called_with_bad_pointer();
229 return x;
230}
231
232/* Emulate cmpxchg() the same way we emulate atomics,
233 * by hashing the object address and indexing into an array
234 * of spinlocks to get a bit of performance...
235 *
236 * See arch/sparc/lib/atomic32.c for implementation.
237 *
238 * Cribbed from <asm-parisc/atomic.h>
239 */
240#define __HAVE_ARCH_CMPXCHG 1
241
242/* bug catcher for when unsupported size is used - won't link */
243extern void __cmpxchg_called_with_bad_pointer(void);
244/* we only need to support cmpxchg of a u32 on sparc */
245extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
246
247/* don't worry...optimizer will get rid of most of this */
248static inline unsigned long
249__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
250{
251 switch (size) {
252 case 4:
253 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
254 default:
255 __cmpxchg_called_with_bad_pointer();
256 break;
257 }
258 return old;
259}
260
261#define cmpxchg(ptr, o, n) \
262({ \
263 __typeof__(*(ptr)) _o_ = (o); \
264 __typeof__(*(ptr)) _n_ = (n); \
265 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
266 (unsigned long)_n_, sizeof(*(ptr))); \
267})
268
269#include <asm-generic/cmpxchg-local.h>
270
271/*
272 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
273 * them available.
274 */
275#define cmpxchg_local(ptr, o, n) \
276 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
277 (unsigned long)(n), sizeof(*(ptr))))
278#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
279
280extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
281
282#endif /* __KERNEL__ */
283
284#endif /* __ASSEMBLY__ */
285
286#define arch_align_stack(x) (x)
287
288#endif /* !(__SPARC_SYSTEM_H) */
diff --git a/include/asm-sparc/system_32.h b/include/asm-sparc/system_32.h
new file mode 100644
index 000000000000..b4b024445fc9
--- /dev/null
+++ b/include/asm-sparc/system_32.h
@@ -0,0 +1,288 @@
1#ifndef __SPARC_SYSTEM_H
2#define __SPARC_SYSTEM_H
3
4#include <linux/kernel.h>
5#include <linux/threads.h> /* NR_CPUS */
6#include <linux/thread_info.h>
7
8#include <asm/page.h>
9#include <asm/psr.h>
10#include <asm/ptrace.h>
11#include <asm/btfixup.h>
12#include <asm/smp.h>
13
14#ifndef __ASSEMBLY__
15
16#include <linux/irqflags.h>
17
18/*
19 * Sparc (general) CPU types
20 */
21enum sparc_cpu {
22 sun4 = 0x00,
23 sun4c = 0x01,
24 sun4m = 0x02,
25 sun4d = 0x03,
26 sun4e = 0x04,
27 sun4u = 0x05, /* V8 ploos ploos */
28 sun_unknown = 0x06,
29 ap1000 = 0x07, /* almost a sun4m */
30};
31
32/* Really, userland should not be looking at any of this... */
33#ifdef __KERNEL__
34
35extern enum sparc_cpu sparc_cpu_model;
36
37#ifndef CONFIG_SUN4
38#define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c)
39#define ARCH_SUN4 0
40#else
41#define ARCH_SUN4C_SUN4 1
42#define ARCH_SUN4 1
43#endif
44
45#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */
46
47extern char reboot_command[];
48
49extern struct thread_info *current_set[NR_CPUS];
50
51extern unsigned long empty_bad_page;
52extern unsigned long empty_bad_page_table;
53extern unsigned long empty_zero_page;
54
55extern void sun_do_break(void);
56extern int serial_console;
57extern int stop_a_enabled;
58
59static inline int con_is_present(void)
60{
61 return serial_console ? 0 : 1;
62}
63
64/* When a context switch happens we must flush all user windows so that
65 * the windows of the current process are flushed onto its stack. This
66 * way the windows are all clean for the next process and the stack
67 * frames are up to date.
68 */
69extern void flush_user_windows(void);
70extern void kill_user_windows(void);
71extern void synchronize_user_stack(void);
72extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
73 void *fpqueue, unsigned long *fpqdepth);
74
75#ifdef CONFIG_SMP
76#define SWITCH_ENTER(prv) \
77 do { \
78 if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
79 put_psr(get_psr() | PSR_EF); \
80 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
81 &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
82 clear_tsk_thread_flag(prv, TIF_USEDFPU); \
83 (prv)->thread.kregs->psr &= ~PSR_EF; \
84 } \
85 } while(0)
86
87#define SWITCH_DO_LAZY_FPU(next) /* */
88#else
89#define SWITCH_ENTER(prv) /* */
90#define SWITCH_DO_LAZY_FPU(nxt) \
91 do { \
92 if (last_task_used_math != (nxt)) \
93 (nxt)->thread.kregs->psr&=~PSR_EF; \
94 } while(0)
95#endif
96
97extern void flushw_all(void);
98
99/*
100 * Flush windows so that the VM switch which follows
101 * would not pull the stack from under us.
102 *
103 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
104 * XXX WTF is the above comment? Found in late teen 2.4.x.
105 */
106#define prepare_arch_switch(next) do { \
107 __asm__ __volatile__( \
108 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
109 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
110 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
111 "save %sp, -0x40, %sp\n\t" \
112 "restore; restore; restore; restore; restore; restore; restore"); \
113} while(0)
114
115 /* Much care has gone into this code, do not touch it.
116 *
117 * We need to loadup regs l0/l1 for the newly forked child
118 * case because the trap return path relies on those registers
119 * holding certain values, gcc is told that they are clobbered.
120 * Gcc needs registers for 3 values in and 1 value out, so we
121 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
122 *
123 * Hey Dave, that do not touch sign is too much of an incentive
124 * - Anton & Pete
125 */
126#define switch_to(prev, next, last) do { \
127 SWITCH_ENTER(prev); \
128 SWITCH_DO_LAZY_FPU(next); \
129 cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask); \
130 __asm__ __volatile__( \
131 "sethi %%hi(here - 0x8), %%o7\n\t" \
132 "mov %%g6, %%g3\n\t" \
133 "or %%o7, %%lo(here - 0x8), %%o7\n\t" \
134 "rd %%psr, %%g4\n\t" \
135 "std %%sp, [%%g6 + %4]\n\t" \
136 "rd %%wim, %%g5\n\t" \
137 "wr %%g4, 0x20, %%psr\n\t" \
138 "nop\n\t" \
139 "std %%g4, [%%g6 + %3]\n\t" \
140 "ldd [%2 + %3], %%g4\n\t" \
141 "mov %2, %%g6\n\t" \
142 ".globl patchme_store_new_current\n" \
143"patchme_store_new_current:\n\t" \
144 "st %2, [%1]\n\t" \
145 "wr %%g4, 0x20, %%psr\n\t" \
146 "nop\n\t" \
147 "nop\n\t" \
148 "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \
149 "ldd [%%g6 + %4], %%sp\n\t" \
150 "wr %%g5, 0x0, %%wim\n\t" \
151 "ldd [%%sp + 0x00], %%l0\n\t" \
152 "ldd [%%sp + 0x38], %%i6\n\t" \
153 "wr %%g4, 0x0, %%psr\n\t" \
154 "nop\n\t" \
155 "nop\n\t" \
156 "jmpl %%o7 + 0x8, %%g0\n\t" \
157 " ld [%%g3 + %5], %0\n\t" \
158 "here:\n" \
159 : "=&r" (last) \
160 : "r" (&(current_set[hard_smp_processor_id()])), \
161 "r" (task_thread_info(next)), \
162 "i" (TI_KPSR), \
163 "i" (TI_KSP), \
164 "i" (TI_TASK) \
165 : "g1", "g2", "g3", "g4", "g5", "g7", \
166 "l0", "l1", "l3", "l4", "l5", "l6", "l7", \
167 "i0", "i1", "i2", "i3", "i4", "i5", \
168 "o0", "o1", "o2", "o3", "o7"); \
169 } while(0)
170
171/* XXX Change this if we ever use a PSO mode kernel. */
172#define mb() __asm__ __volatile__ ("" : : : "memory")
173#define rmb() mb()
174#define wmb() mb()
175#define read_barrier_depends() do { } while(0)
176#define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
177#define smp_mb() __asm__ __volatile__("":::"memory")
178#define smp_rmb() __asm__ __volatile__("":::"memory")
179#define smp_wmb() __asm__ __volatile__("":::"memory")
180#define smp_read_barrier_depends() do { } while(0)
181
182#define nop() __asm__ __volatile__ ("nop")
183
184/* This has special calling conventions */
185#ifndef CONFIG_SMP
186BTFIXUPDEF_CALL(void, ___xchg32, void)
187#endif
188
189static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
190{
191#ifdef CONFIG_SMP
192 __asm__ __volatile__("swap [%2], %0"
193 : "=&r" (val)
194 : "0" (val), "r" (m)
195 : "memory");
196 return val;
197#else
198 register unsigned long *ptr asm("g1");
199 register unsigned long ret asm("g2");
200
201 ptr = (unsigned long *) m;
202 ret = val;
203
204 /* Note: this is magic and the nop there is
205 really needed. */
206 __asm__ __volatile__(
207 "mov %%o7, %%g4\n\t"
208 "call ___f____xchg32\n\t"
209 " nop\n\t"
210 : "=&r" (ret)
211 : "0" (ret), "r" (ptr)
212 : "g3", "g4", "g7", "memory", "cc");
213
214 return ret;
215#endif
216}
217
218#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
219
220extern void __xchg_called_with_bad_pointer(void);
221
222static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
223{
224 switch (size) {
225 case 4:
226 return xchg_u32(ptr, x);
227 };
228 __xchg_called_with_bad_pointer();
229 return x;
230}
231
232/* Emulate cmpxchg() the same way we emulate atomics,
233 * by hashing the object address and indexing into an array
234 * of spinlocks to get a bit of performance...
235 *
236 * See arch/sparc/lib/atomic32.c for implementation.
237 *
238 * Cribbed from <asm-parisc/atomic.h>
239 */
240#define __HAVE_ARCH_CMPXCHG 1
241
242/* bug catcher for when unsupported size is used - won't link */
243extern void __cmpxchg_called_with_bad_pointer(void);
244/* we only need to support cmpxchg of a u32 on sparc */
245extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
246
247/* don't worry...optimizer will get rid of most of this */
248static inline unsigned long
249__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
250{
251 switch (size) {
252 case 4:
253 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
254 default:
255 __cmpxchg_called_with_bad_pointer();
256 break;
257 }
258 return old;
259}
260
261#define cmpxchg(ptr, o, n) \
262({ \
263 __typeof__(*(ptr)) _o_ = (o); \
264 __typeof__(*(ptr)) _n_ = (n); \
265 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
266 (unsigned long)_n_, sizeof(*(ptr))); \
267})
268
269#include <asm-generic/cmpxchg-local.h>
270
271/*
272 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
273 * them available.
274 */
275#define cmpxchg_local(ptr, o, n) \
276 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
277 (unsigned long)(n), sizeof(*(ptr))))
278#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
279
280extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
281
282#endif /* __KERNEL__ */
283
284#endif /* __ASSEMBLY__ */
285
286#define arch_align_stack(x) (x)
287
288#endif /* !(__SPARC_SYSTEM_H) */
diff --git a/include/asm-sparc/system_64.h b/include/asm-sparc/system_64.h
new file mode 100644
index 000000000000..db9e742a406a
--- /dev/null
+++ b/include/asm-sparc/system_64.h
@@ -0,0 +1,355 @@
1#ifndef __SPARC64_SYSTEM_H
2#define __SPARC64_SYSTEM_H
3
4#include <asm/ptrace.h>
5#include <asm/processor.h>
6#include <asm/visasm.h>
7
8#ifndef __ASSEMBLY__
9
10#include <linux/irqflags.h>
11#include <asm-generic/cmpxchg-local.h>
12
13/*
14 * Sparc (general) CPU types
15 */
16enum sparc_cpu {
17 sun4 = 0x00,
18 sun4c = 0x01,
19 sun4m = 0x02,
20 sun4d = 0x03,
21 sun4e = 0x04,
22 sun4u = 0x05, /* V8 ploos ploos */
23 sun_unknown = 0x06,
24 ap1000 = 0x07, /* almost a sun4m */
25};
26
27#define sparc_cpu_model sun4u
28
29/* This cannot ever be a sun4c nor sun4 :) That's just history. */
30#define ARCH_SUN4C_SUN4 0
31#define ARCH_SUN4 0
32
33extern char reboot_command[];
34
35/* These are here in an effort to more fully work around Spitfire Errata
36 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
37 * branch, the chip can stop executing instructions until a trap occurs.
38 * Therefore, if interrupts are disabled, the chip can hang forever.
39 *
40 * It used to be believed that the memory barrier had to be right in the
41 * delay slot, but a case has been traced recently wherein the memory barrier
42 * was one instruction after the branch delay slot and the chip still hung.
43 * The offending sequence was the following in sym_wakeup_done() of the
44 * sym53c8xx_2 driver:
45 *
46 * call sym_ccb_from_dsa, 0
47 * movge %icc, 0, %l0
48 * brz,pn %o0, .LL1303
49 * mov %o0, %l2
50 * membar #LoadLoad
51 *
52 * The branch has to be mispredicted for the bug to occur. Therefore, we put
53 * the memory barrier explicitly into a "branch always, predicted taken"
54 * delay slot to avoid the problem case.
55 */
56#define membar_safe(type) \
57do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
58 " membar " type "\n" \
59 "1:\n" \
60 : : : "memory"); \
61} while (0)
62
63#define mb() \
64 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
65#define rmb() \
66 membar_safe("#LoadLoad")
67#define wmb() \
68 membar_safe("#StoreStore")
69#define membar_storeload() \
70 membar_safe("#StoreLoad")
71#define membar_storeload_storestore() \
72 membar_safe("#StoreLoad | #StoreStore")
73#define membar_storeload_loadload() \
74 membar_safe("#StoreLoad | #LoadLoad")
75#define membar_storestore_loadstore() \
76 membar_safe("#StoreStore | #LoadStore")
77
78#endif
79
80#define nop() __asm__ __volatile__ ("nop")
81
82#define read_barrier_depends() do { } while(0)
83#define set_mb(__var, __value) \
84 do { __var = __value; membar_storeload_storestore(); } while(0)
85
86#ifdef CONFIG_SMP
87#define smp_mb() mb()
88#define smp_rmb() rmb()
89#define smp_wmb() wmb()
90#define smp_read_barrier_depends() read_barrier_depends()
91#else
92#define smp_mb() __asm__ __volatile__("":::"memory")
93#define smp_rmb() __asm__ __volatile__("":::"memory")
94#define smp_wmb() __asm__ __volatile__("":::"memory")
95#define smp_read_barrier_depends() do { } while(0)
96#endif
97
98#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
99
100#define flushw_all() __asm__ __volatile__("flushw")
101
102/* Performance counter register access. */
103#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
104#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
105#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
106
107/* Blackbird errata workaround. See commentary in
108 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
109 * for more information.
110 */
111#define reset_pic() \
112 __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \
113 ".align 64\n" \
114 "99:wr %g0, 0x0, %pic\n\t" \
115 "rd %pic, %g0")
116
117#ifndef __ASSEMBLY__
118
119extern void sun_do_break(void);
120extern int stop_a_enabled;
121
122extern void fault_in_user_windows(void);
123extern void synchronize_user_stack(void);
124
125extern void __flushw_user(void);
126#define flushw_user() __flushw_user()
127
128#define flush_user_windows flushw_user
129#define flush_register_windows flushw_all
130
131/* Don't hold the runqueue lock over context switch */
132#define __ARCH_WANT_UNLOCKED_CTXSW
133#define prepare_arch_switch(next) \
134do { \
135 flushw_all(); \
136} while (0)
137
138 /* See what happens when you design the chip correctly?
139 *
140 * We tell gcc we clobber all non-fixed-usage registers except
141 * for l0/l1. It will use one for 'next' and the other to hold
142 * the output value of 'last'. 'next' is not referenced again
143 * past the invocation of switch_to in the scheduler, so we need
144 * not preserve it's value. Hairy, but it lets us remove 2 loads
145 * and 2 stores in this critical code path. -DaveM
146 */
147#define switch_to(prev, next, last) \
148do { if (test_thread_flag(TIF_PERFCTR)) { \
149 unsigned long __tmp; \
150 read_pcr(__tmp); \
151 current_thread_info()->pcr_reg = __tmp; \
152 read_pic(__tmp); \
153 current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
154 current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
155 } \
156 flush_tlb_pending(); \
157 save_and_clear_fpu(); \
158 /* If you are tempted to conditionalize the following */ \
159 /* so that ASI is only written if it changes, think again. */ \
160 __asm__ __volatile__("wr %%g0, %0, %%asi" \
161 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
162 trap_block[current_thread_info()->cpu].thread = \
163 task_thread_info(next); \
164 __asm__ __volatile__( \
165 "mov %%g4, %%g7\n\t" \
166 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
167 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
168 "rdpr %%wstate, %%o5\n\t" \
169 "stx %%o6, [%%g6 + %6]\n\t" \
170 "stb %%o5, [%%g6 + %5]\n\t" \
171 "rdpr %%cwp, %%o5\n\t" \
172 "stb %%o5, [%%g6 + %8]\n\t" \
173 "mov %4, %%g6\n\t" \
174 "ldub [%4 + %8], %%g1\n\t" \
175 "wrpr %%g1, %%cwp\n\t" \
176 "ldx [%%g6 + %6], %%o6\n\t" \
177 "ldub [%%g6 + %5], %%o5\n\t" \
178 "ldub [%%g6 + %7], %%o7\n\t" \
179 "wrpr %%o5, 0x0, %%wstate\n\t" \
180 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
181 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
182 "ldx [%%g6 + %9], %%g4\n\t" \
183 "brz,pt %%o7, switch_to_pc\n\t" \
184 " mov %%g7, %0\n\t" \
185 "sethi %%hi(ret_from_syscall), %%g1\n\t" \
186 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
187 " nop\n\t" \
188 ".globl switch_to_pc\n\t" \
189 "switch_to_pc:\n\t" \
190 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
191 "=r" (__local_per_cpu_offset) \
192 : "0" (task_thread_info(next)), \
193 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
194 "i" (TI_CWP), "i" (TI_TASK) \
195 : "cc", \
196 "g1", "g2", "g3", "g7", \
197 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
198 "i0", "i1", "i2", "i3", "i4", "i5", \
199 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
200 /* If you fuck with this, update ret_from_syscall code too. */ \
201 if (test_thread_flag(TIF_PERFCTR)) { \
202 write_pcr(current_thread_info()->pcr_reg); \
203 reset_pic(); \
204 } \
205} while(0)
206
207static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
208{
209 unsigned long tmp1, tmp2;
210
211 __asm__ __volatile__(
212" membar #StoreLoad | #LoadLoad\n"
213" mov %0, %1\n"
214"1: lduw [%4], %2\n"
215" cas [%4], %2, %0\n"
216" cmp %2, %0\n"
217" bne,a,pn %%icc, 1b\n"
218" mov %1, %0\n"
219" membar #StoreLoad | #StoreStore\n"
220 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
221 : "0" (val), "r" (m)
222 : "cc", "memory");
223 return val;
224}
225
226static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
227{
228 unsigned long tmp1, tmp2;
229
230 __asm__ __volatile__(
231" membar #StoreLoad | #LoadLoad\n"
232" mov %0, %1\n"
233"1: ldx [%4], %2\n"
234" casx [%4], %2, %0\n"
235" cmp %2, %0\n"
236" bne,a,pn %%xcc, 1b\n"
237" mov %1, %0\n"
238" membar #StoreLoad | #StoreStore\n"
239 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
240 : "0" (val), "r" (m)
241 : "cc", "memory");
242 return val;
243}
244
245#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
246
247extern void __xchg_called_with_bad_pointer(void);
248
249static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
250 int size)
251{
252 switch (size) {
253 case 4:
254 return xchg32(ptr, x);
255 case 8:
256 return xchg64(ptr, x);
257 };
258 __xchg_called_with_bad_pointer();
259 return x;
260}
261
262extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
263
264/*
265 * Atomic compare and exchange. Compare OLD with MEM, if identical,
266 * store NEW in MEM. Return the initial value in MEM. Success is
267 * indicated by comparing RETURN with OLD.
268 */
269
270#define __HAVE_ARCH_CMPXCHG 1
271
272static inline unsigned long
273__cmpxchg_u32(volatile int *m, int old, int new)
274{
275 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
276 "cas [%2], %3, %0\n\t"
277 "membar #StoreLoad | #StoreStore"
278 : "=&r" (new)
279 : "0" (new), "r" (m), "r" (old)
280 : "memory");
281
282 return new;
283}
284
285static inline unsigned long
286__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
287{
288 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
289 "casx [%2], %3, %0\n\t"
290 "membar #StoreLoad | #StoreStore"
291 : "=&r" (new)
292 : "0" (new), "r" (m), "r" (old)
293 : "memory");
294
295 return new;
296}
297
298/* This function doesn't exist, so you'll get a linker error
299 if something tries to do an invalid cmpxchg(). */
300extern void __cmpxchg_called_with_bad_pointer(void);
301
302static inline unsigned long
303__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
304{
305 switch (size) {
306 case 4:
307 return __cmpxchg_u32(ptr, old, new);
308 case 8:
309 return __cmpxchg_u64(ptr, old, new);
310 }
311 __cmpxchg_called_with_bad_pointer();
312 return old;
313}
314
315#define cmpxchg(ptr,o,n) \
316 ({ \
317 __typeof__(*(ptr)) _o_ = (o); \
318 __typeof__(*(ptr)) _n_ = (n); \
319 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
320 (unsigned long)_n_, sizeof(*(ptr))); \
321 })
322
323/*
324 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
325 * them available.
326 */
327
328static inline unsigned long __cmpxchg_local(volatile void *ptr,
329 unsigned long old,
330 unsigned long new, int size)
331{
332 switch (size) {
333 case 4:
334 case 8: return __cmpxchg(ptr, old, new, size);
335 default:
336 return __cmpxchg_local_generic(ptr, old, new, size);
337 }
338
339 return old;
340}
341
342#define cmpxchg_local(ptr, o, n) \
343 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
344 (unsigned long)(n), sizeof(*(ptr))))
345#define cmpxchg64_local(ptr, o, n) \
346 ({ \
347 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
348 cmpxchg_local((ptr), (o), (n)); \
349 })
350
351#endif /* !(__ASSEMBLY__) */
352
353#define arch_align_stack(x) (x)
354
355#endif /* !(__SPARC64_SYSTEM_H) */
diff --git a/include/asm-sparc/termbits.h b/include/asm-sparc/termbits.h
index 90cf2210118b..d6ca3e2754f5 100644
--- a/include/asm-sparc/termbits.h
+++ b/include/asm-sparc/termbits.h
@@ -5,7 +5,12 @@
5 5
6typedef unsigned char cc_t; 6typedef unsigned char cc_t;
7typedef unsigned int speed_t; 7typedef unsigned int speed_t;
8
9#if defined(__sparc__) && defined(__arch64__)
10typedef unsigned int tcflag_t;
11#else
8typedef unsigned long tcflag_t; 12typedef unsigned long tcflag_t;
13#endif
9 14
10#define NCC 8 15#define NCC 8
11struct termio { 16struct termio {
diff --git a/include/asm-sparc/termios.h b/include/asm-sparc/termios.h
index f7b4409c35ff..e8ba95399643 100644
--- a/include/asm-sparc/termios.h
+++ b/include/asm-sparc/termios.h
@@ -53,7 +53,6 @@ struct winsize {
53#define _VMIN 4 53#define _VMIN 4
54#define _VTIME 5 54#define _VTIME 5
55 55
56
57/* intr=^C quit=^\ erase=del kill=^U 56/* intr=^C quit=^\ erase=del kill=^U
58 eof=^D eol=\0 eol2=\0 sxtc=\0 57 eof=^D eol=\0 eol2=\0 sxtc=\0
59 start=^Q stop=^S susp=^Z dsusp=^Y 58 start=^Q stop=^S susp=^Z dsusp=^Y
@@ -68,16 +67,17 @@ struct winsize {
68#define user_termio_to_kernel_termios(termios, termio) \ 67#define user_termio_to_kernel_termios(termios, termio) \
69({ \ 68({ \
70 unsigned short tmp; \ 69 unsigned short tmp; \
71 get_user(tmp, &(termio)->c_iflag); \ 70 int err; \
71 err = get_user(tmp, &(termio)->c_iflag); \
72 (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \ 72 (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \
73 get_user(tmp, &(termio)->c_oflag); \ 73 err |= get_user(tmp, &(termio)->c_oflag); \
74 (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \ 74 (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \
75 get_user(tmp, &(termio)->c_cflag); \ 75 err |= get_user(tmp, &(termio)->c_cflag); \
76 (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \ 76 (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \
77 get_user(tmp, &(termio)->c_lflag); \ 77 err |= get_user(tmp, &(termio)->c_lflag); \
78 (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \ 78 (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \
79 copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ 79 err |= copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
80 0; \ 80 err; \
81}) 81})
82 82
83/* 83/*
@@ -87,17 +87,18 @@ struct winsize {
87 */ 87 */
88#define kernel_termios_to_user_termio(termio, termios) \ 88#define kernel_termios_to_user_termio(termio, termios) \
89({ \ 89({ \
90 put_user((termios)->c_iflag, &(termio)->c_iflag); \ 90 int err; \
91 put_user((termios)->c_oflag, &(termio)->c_oflag); \ 91 err = put_user((termios)->c_iflag, &(termio)->c_iflag); \
92 put_user((termios)->c_cflag, &(termio)->c_cflag); \ 92 err |= put_user((termios)->c_oflag, &(termio)->c_oflag); \
93 put_user((termios)->c_lflag, &(termio)->c_lflag); \ 93 err |= put_user((termios)->c_cflag, &(termio)->c_cflag); \
94 put_user((termios)->c_line, &(termio)->c_line); \ 94 err |= put_user((termios)->c_lflag, &(termio)->c_lflag); \
95 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ 95 err |= put_user((termios)->c_line, &(termio)->c_line); \
96 err |= copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
96 if (!((termios)->c_lflag & ICANON)) { \ 97 if (!((termios)->c_lflag & ICANON)) { \
97 put_user((termios)->c_cc[VMIN], &(termio)->c_cc[_VMIN]); \ 98 err |= put_user((termios)->c_cc[VMIN], &(termio)->c_cc[_VMIN]); \
98 put_user((termios)->c_cc[VTIME], &(termio)->c_cc[_VTIME]); \ 99 err |= put_user((termios)->c_cc[VTIME], &(termio)->c_cc[_VTIME]); \
99 } \ 100 } \
100 0; \ 101 err; \
101}) 102})
102 103
103#define user_termios_to_kernel_termios(k, u) \ 104#define user_termios_to_kernel_termios(k, u) \
@@ -144,38 +145,40 @@ struct winsize {
144 145
145#define user_termios_to_kernel_termios_1(k, u) \ 146#define user_termios_to_kernel_termios_1(k, u) \
146({ \ 147({ \
147 get_user((k)->c_iflag, &(u)->c_iflag); \ 148 int err; \
148 get_user((k)->c_oflag, &(u)->c_oflag); \ 149 err = get_user((k)->c_iflag, &(u)->c_iflag); \
149 get_user((k)->c_cflag, &(u)->c_cflag); \ 150 err |= get_user((k)->c_oflag, &(u)->c_oflag); \
150 get_user((k)->c_lflag, &(u)->c_lflag); \ 151 err |= get_user((k)->c_cflag, &(u)->c_cflag); \
151 get_user((k)->c_line, &(u)->c_line); \ 152 err |= get_user((k)->c_lflag, &(u)->c_lflag); \
152 copy_from_user((k)->c_cc, (u)->c_cc, NCCS); \ 153 err |= get_user((k)->c_line, &(u)->c_line); \
154 err |= copy_from_user((k)->c_cc, (u)->c_cc, NCCS); \
153 if ((k)->c_lflag & ICANON) { \ 155 if ((k)->c_lflag & ICANON) { \
154 get_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \ 156 err |= get_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \
155 get_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \ 157 err |= get_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \
156 } else { \ 158 } else { \
157 get_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \ 159 err |= get_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \
158 get_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \ 160 err |= get_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \
159 } \ 161 } \
160 0; \ 162 err; \
161}) 163})
162 164
163#define kernel_termios_to_user_termios_1(u, k) \ 165#define kernel_termios_to_user_termios_1(u, k) \
164({ \ 166({ \
165 put_user((k)->c_iflag, &(u)->c_iflag); \ 167 int err; \
166 put_user((k)->c_oflag, &(u)->c_oflag); \ 168 err = put_user((k)->c_iflag, &(u)->c_iflag); \
167 put_user((k)->c_cflag, &(u)->c_cflag); \ 169 err |= put_user((k)->c_oflag, &(u)->c_oflag); \
168 put_user((k)->c_lflag, &(u)->c_lflag); \ 170 err |= put_user((k)->c_cflag, &(u)->c_cflag); \
169 put_user((k)->c_line, &(u)->c_line); \ 171 err |= put_user((k)->c_lflag, &(u)->c_lflag); \
170 copy_to_user((u)->c_cc, (k)->c_cc, NCCS); \ 172 err |= put_user((k)->c_line, &(u)->c_line); \
173 err |= copy_to_user((u)->c_cc, (k)->c_cc, NCCS); \
171 if (!((k)->c_lflag & ICANON)) { \ 174 if (!((k)->c_lflag & ICANON)) { \
172 put_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \ 175 err |= put_user((k)->c_cc[VMIN], &(u)->c_cc[_VMIN]); \
173 put_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \ 176 err |= put_user((k)->c_cc[VTIME], &(u)->c_cc[_VTIME]); \
174 } else { \ 177 } else { \
175 put_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \ 178 err |= put_user((k)->c_cc[VEOF], &(u)->c_cc[VEOF]); \
176 put_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \ 179 err |= put_user((k)->c_cc[VEOL], &(u)->c_cc[VEOL]); \
177 } \ 180 } \
178 0; \ 181 err; \
179}) 182})
180 183
181#endif /* __KERNEL__ */ 184#endif /* __KERNEL__ */
diff --git a/include/asm-sparc/thread_info.h b/include/asm-sparc/thread_info.h
index 91b9f5888c85..64155cf89f37 100644
--- a/include/asm-sparc/thread_info.h
+++ b/include/asm-sparc/thread_info.h
@@ -1,151 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_THREAD_INFO_H
2 * thread_info.h: sparc low-level thread information 2#define ___ASM_SPARC_THREAD_INFO_H
3 * adapted from the ppc version by Pete Zaitcev, which was 3#if defined(__sparc__) && defined(__arch64__)
4 * adapted from the i386 version by Paul Mackerras 4#include <asm-sparc/thread_info_64.h>
5 * 5#else
6 * Copyright (C) 2002 David Howells (dhowells@redhat.com) 6#include <asm-sparc/thread_info_32.h>
7 * Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com) 7#endif
8 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
9 */
10
11#ifndef _ASM_THREAD_INFO_H
12#define _ASM_THREAD_INFO_H
13
14#ifdef __KERNEL__
15
16#ifndef __ASSEMBLY__
17
18#include <asm/btfixup.h>
19#include <asm/ptrace.h>
20#include <asm/page.h>
21
22/*
23 * Low level task data.
24 *
25 * If you change this, change the TI_* offsets below to match.
26 */
27#define NSWINS 8
28struct thread_info {
29 unsigned long uwinmask;
30 struct task_struct *task; /* main task structure */
31 struct exec_domain *exec_domain; /* execution domain */
32 unsigned long flags; /* low level flags */
33 int cpu; /* cpu we're on */
34 int preempt_count; /* 0 => preemptable,
35 <0 => BUG */
36 int softirq_count;
37 int hardirq_count;
38
39 /* Context switch saved kernel state. */
40 unsigned long ksp; /* ... ksp __attribute__ ((aligned (8))); */
41 unsigned long kpc;
42 unsigned long kpsr;
43 unsigned long kwim;
44
45 /* A place to store user windows and stack pointers
46 * when the stack needs inspection.
47 */
48 struct reg_window reg_window[NSWINS]; /* align for ldd! */
49 unsigned long rwbuf_stkptrs[NSWINS];
50 unsigned long w_saved;
51
52 struct restart_block restart_block;
53};
54
55/*
56 * macros/functions for gaining access to the thread information structure
57 *
58 * preempt_count needs to be 1 initially, until the scheduler is functional.
59 */
60#define INIT_THREAD_INFO(tsk) \
61{ \
62 .uwinmask = 0, \
63 .task = &tsk, \
64 .exec_domain = &default_exec_domain, \
65 .flags = 0, \
66 .cpu = 0, \
67 .preempt_count = 1, \
68 .restart_block = { \
69 .fn = do_no_restart_syscall, \
70 }, \
71}
72
73#define init_thread_info (init_thread_union.thread_info)
74#define init_stack (init_thread_union.stack)
75
76/* how to get the thread information struct from C */
77register struct thread_info *current_thread_info_reg asm("g6");
78#define current_thread_info() (current_thread_info_reg)
79
80/*
81 * thread information allocation
82 */
83#if PAGE_SHIFT == 13
84#define THREAD_INFO_ORDER 0
85#else /* PAGE_SHIFT */
86#define THREAD_INFO_ORDER 1
87#endif 8#endif
88
89BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
90#define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)()
91
92BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
93#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
94
95#endif /* __ASSEMBLY__ */
96
97/*
98 * Size of kernel stack for each process.
99 * Observe the order of get_free_pages() in alloc_thread_info().
100 * The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
101 */
102#define THREAD_SIZE 8192
103
104/*
105 * Offsets in thread_info structure, used in assembly code
106 * The "#define REGWIN_SZ 0x40" was abolished, so no multiplications.
107 */
108#define TI_UWINMASK 0x00 /* uwinmask */
109#define TI_TASK 0x04
110#define TI_EXECDOMAIN 0x08 /* exec_domain */
111#define TI_FLAGS 0x0c
112#define TI_CPU 0x10
113#define TI_PREEMPT 0x14 /* preempt_count */
114#define TI_SOFTIRQ 0x18 /* softirq_count */
115#define TI_HARDIRQ 0x1c /* hardirq_count */
116#define TI_KSP 0x20 /* ksp */
117#define TI_KPC 0x24 /* kpc (ldd'ed with kpc) */
118#define TI_KPSR 0x28 /* kpsr */
119#define TI_KWIM 0x2c /* kwim (ldd'ed with kpsr) */
120#define TI_REG_WINDOW 0x30
121#define TI_RWIN_SPTRS 0x230
122#define TI_W_SAVED 0x250
123/* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */
124
125#define PREEMPT_ACTIVE 0x4000000
126
127/*
128 * thread information flag bit numbers
129 */
130#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
131/* flag bit 1 is available */
132#define TIF_SIGPENDING 2 /* signal pending */
133#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
134#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
135#define TIF_USEDFPU 8 /* FPU was used by this task
136 * this quantum (SMP) */
137#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
138 * TIF_NEED_RESCHED */
139#define TIF_MEMDIE 10
140
141/* as above, but as bit values */
142#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
143#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
144#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
145#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
146#define _TIF_USEDFPU (1<<TIF_USEDFPU)
147#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
148
149#endif /* __KERNEL__ */
150
151#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-sparc/thread_info_32.h b/include/asm-sparc/thread_info_32.h
new file mode 100644
index 000000000000..2cf9db044055
--- /dev/null
+++ b/include/asm-sparc/thread_info_32.h
@@ -0,0 +1,153 @@
1/*
2 * thread_info.h: sparc low-level thread information
3 * adapted from the ppc version by Pete Zaitcev, which was
4 * adapted from the i386 version by Paul Mackerras
5 *
6 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
7 * Copyright (c) 2002 Pete Zaitcev (zaitcev@yahoo.com)
8 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
9 */
10
11#ifndef _ASM_THREAD_INFO_H
12#define _ASM_THREAD_INFO_H
13
14#ifdef __KERNEL__
15
16#ifndef __ASSEMBLY__
17
18#include <asm/btfixup.h>
19#include <asm/ptrace.h>
20#include <asm/page.h>
21
22/*
23 * Low level task data.
24 *
25 * If you change this, change the TI_* offsets below to match.
26 */
27#define NSWINS 8
28struct thread_info {
29 unsigned long uwinmask;
30 struct task_struct *task; /* main task structure */
31 struct exec_domain *exec_domain; /* execution domain */
32 unsigned long flags; /* low level flags */
33 int cpu; /* cpu we're on */
34 int preempt_count; /* 0 => preemptable,
35 <0 => BUG */
36 int softirq_count;
37 int hardirq_count;
38
39 /* Context switch saved kernel state. */
40 unsigned long ksp; /* ... ksp __attribute__ ((aligned (8))); */
41 unsigned long kpc;
42 unsigned long kpsr;
43 unsigned long kwim;
44
45 /* A place to store user windows and stack pointers
46 * when the stack needs inspection.
47 */
48 struct reg_window reg_window[NSWINS]; /* align for ldd! */
49 unsigned long rwbuf_stkptrs[NSWINS];
50 unsigned long w_saved;
51
52 struct restart_block restart_block;
53};
54
55/*
56 * macros/functions for gaining access to the thread information structure
57 *
58 * preempt_count needs to be 1 initially, until the scheduler is functional.
59 */
60#define INIT_THREAD_INFO(tsk) \
61{ \
62 .uwinmask = 0, \
63 .task = &tsk, \
64 .exec_domain = &default_exec_domain, \
65 .flags = 0, \
66 .cpu = 0, \
67 .preempt_count = 1, \
68 .restart_block = { \
69 .fn = do_no_restart_syscall, \
70 }, \
71}
72
73#define init_thread_info (init_thread_union.thread_info)
74#define init_stack (init_thread_union.stack)
75
76/* how to get the thread information struct from C */
77register struct thread_info *current_thread_info_reg asm("g6");
78#define current_thread_info() (current_thread_info_reg)
79
80/*
81 * thread information allocation
82 */
83#if PAGE_SHIFT == 13
84#define THREAD_INFO_ORDER 0
85#else /* PAGE_SHIFT */
86#define THREAD_INFO_ORDER 1
87#endif
88
89#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
90
91BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
92#define alloc_thread_info(tsk) BTFIXUP_CALL(alloc_thread_info)()
93
94BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
95#define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
96
97#endif /* __ASSEMBLY__ */
98
99/*
100 * Size of kernel stack for each process.
101 * Observe the order of get_free_pages() in alloc_thread_info().
102 * The sun4 has 8K stack too, because it's short on memory, and 16K is a waste.
103 */
104#define THREAD_SIZE 8192
105
106/*
107 * Offsets in thread_info structure, used in assembly code
108 * The "#define REGWIN_SZ 0x40" was abolished, so no multiplications.
109 */
110#define TI_UWINMASK 0x00 /* uwinmask */
111#define TI_TASK 0x04
112#define TI_EXECDOMAIN 0x08 /* exec_domain */
113#define TI_FLAGS 0x0c
114#define TI_CPU 0x10
115#define TI_PREEMPT 0x14 /* preempt_count */
116#define TI_SOFTIRQ 0x18 /* softirq_count */
117#define TI_HARDIRQ 0x1c /* hardirq_count */
118#define TI_KSP 0x20 /* ksp */
119#define TI_KPC 0x24 /* kpc (ldd'ed with kpc) */
120#define TI_KPSR 0x28 /* kpsr */
121#define TI_KWIM 0x2c /* kwim (ldd'ed with kpsr) */
122#define TI_REG_WINDOW 0x30
123#define TI_RWIN_SPTRS 0x230
124#define TI_W_SAVED 0x250
125/* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */
126
127#define PREEMPT_ACTIVE 0x4000000
128
129/*
130 * thread information flag bit numbers
131 */
132#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
133/* flag bit 1 is available */
134#define TIF_SIGPENDING 2 /* signal pending */
135#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
136#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
137#define TIF_USEDFPU 8 /* FPU was used by this task
138 * this quantum (SMP) */
139#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
140 * TIF_NEED_RESCHED */
141#define TIF_MEMDIE 10
142
143/* as above, but as bit values */
144#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
145#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
146#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
147#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
148#define _TIF_USEDFPU (1<<TIF_USEDFPU)
149#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
150
151#endif /* __KERNEL__ */
152
153#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-sparc/thread_info_64.h b/include/asm-sparc/thread_info_64.h
new file mode 100644
index 000000000000..960969d5ad06
--- /dev/null
+++ b/include/asm-sparc/thread_info_64.h
@@ -0,0 +1,279 @@
1/* thread_info.h: sparc64 low-level thread information
2 *
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 */
5
6#ifndef _ASM_THREAD_INFO_H
7#define _ASM_THREAD_INFO_H
8
9#ifdef __KERNEL__
10
11#define NSWINS 7
12
13#define TI_FLAG_BYTE_FAULT_CODE 0
14#define TI_FLAG_FAULT_CODE_SHIFT 56
15#define TI_FLAG_BYTE_WSTATE 1
16#define TI_FLAG_WSTATE_SHIFT 48
17#define TI_FLAG_BYTE_CWP 2
18#define TI_FLAG_CWP_SHIFT 40
19#define TI_FLAG_BYTE_CURRENT_DS 3
20#define TI_FLAG_CURRENT_DS_SHIFT 32
21#define TI_FLAG_BYTE_FPDEPTH 4
22#define TI_FLAG_FPDEPTH_SHIFT 24
23#define TI_FLAG_BYTE_WSAVED 5
24#define TI_FLAG_WSAVED_SHIFT 16
25
26#include <asm/page.h>
27
28#ifndef __ASSEMBLY__
29
30#include <asm/ptrace.h>
31#include <asm/types.h>
32
33struct task_struct;
34struct exec_domain;
35
36struct thread_info {
37 /* D$ line 1 */
38 struct task_struct *task;
39 unsigned long flags;
40 __u8 fpsaved[7];
41 __u8 status;
42 unsigned long ksp;
43
44 /* D$ line 2 */
45 unsigned long fault_address;
46 struct pt_regs *kregs;
47 struct exec_domain *exec_domain;
48 int preempt_count; /* 0 => preemptable, <0 => BUG */
49 __u8 new_child;
50 __u8 syscall_noerror;
51 __u16 cpu;
52
53 unsigned long *utraps;
54
55 struct reg_window reg_window[NSWINS];
56 unsigned long rwbuf_stkptrs[NSWINS];
57
58 unsigned long gsr[7];
59 unsigned long xfsr[7];
60
61 __u64 __user *user_cntd0;
62 __u64 __user *user_cntd1;
63 __u64 kernel_cntd0, kernel_cntd1;
64 __u64 pcr_reg;
65
66 struct restart_block restart_block;
67
68 struct pt_regs *kern_una_regs;
69 unsigned int kern_una_insn;
70
71 unsigned long fpregs[0] __attribute__ ((aligned(64)));
72};
73
74#endif /* !(__ASSEMBLY__) */
75
76/* offsets into the thread_info struct for assembly code access */
77#define TI_TASK 0x00000000
78#define TI_FLAGS 0x00000008
79#define TI_FAULT_CODE (TI_FLAGS + TI_FLAG_BYTE_FAULT_CODE)
80#define TI_WSTATE (TI_FLAGS + TI_FLAG_BYTE_WSTATE)
81#define TI_CWP (TI_FLAGS + TI_FLAG_BYTE_CWP)
82#define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS)
83#define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH)
84#define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED)
85#define TI_FPSAVED 0x00000010
86#define TI_KSP 0x00000018
87#define TI_FAULT_ADDR 0x00000020
88#define TI_KREGS 0x00000028
89#define TI_EXEC_DOMAIN 0x00000030
90#define TI_PRE_COUNT 0x00000038
91#define TI_NEW_CHILD 0x0000003c
92#define TI_SYS_NOERROR 0x0000003d
93#define TI_CPU 0x0000003e
94#define TI_UTRAPS 0x00000040
95#define TI_REG_WINDOW 0x00000048
96#define TI_RWIN_SPTRS 0x000003c8
97#define TI_GSR 0x00000400
98#define TI_XFSR 0x00000438
99#define TI_USER_CNTD0 0x00000470
100#define TI_USER_CNTD1 0x00000478
101#define TI_KERN_CNTD0 0x00000480
102#define TI_KERN_CNTD1 0x00000488
103#define TI_PCR 0x00000490
104#define TI_RESTART_BLOCK 0x00000498
105#define TI_KUNA_REGS 0x000004c0
106#define TI_KUNA_INSN 0x000004c8
107#define TI_FPREGS 0x00000500
108
109/* We embed this in the uppermost byte of thread_info->flags */
110#define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */
111#define FAULT_CODE_DTLB 0x02 /* Miss happened in D-TLB */
112#define FAULT_CODE_ITLB 0x04 /* Miss happened in I-TLB */
113#define FAULT_CODE_WINFIXUP 0x08 /* Miss happened during spill/fill */
114#define FAULT_CODE_BLKCOMMIT 0x10 /* Use blk-commit ASI in copy_page */
115
116#if PAGE_SHIFT == 13
117#define THREAD_SIZE (2*PAGE_SIZE)
118#define THREAD_SHIFT (PAGE_SHIFT + 1)
119#else /* PAGE_SHIFT == 13 */
120#define THREAD_SIZE PAGE_SIZE
121#define THREAD_SHIFT PAGE_SHIFT
122#endif /* PAGE_SHIFT == 13 */
123
124#define PREEMPT_ACTIVE 0x4000000
125
126/*
127 * macros/functions for gaining access to the thread information structure
128 *
129 * preempt_count needs to be 1 initially, until the scheduler is functional.
130 */
131#ifndef __ASSEMBLY__
132
133#define INIT_THREAD_INFO(tsk) \
134{ \
135 .task = &tsk, \
136 .flags = ((unsigned long)ASI_P) << TI_FLAG_CURRENT_DS_SHIFT, \
137 .exec_domain = &default_exec_domain, \
138 .preempt_count = 1, \
139 .restart_block = { \
140 .fn = do_no_restart_syscall, \
141 }, \
142}
143
144#define init_thread_info (init_thread_union.thread_info)
145#define init_stack (init_thread_union.stack)
146
147/* how to get the thread information struct from C */
148register struct thread_info *current_thread_info_reg asm("g6");
149#define current_thread_info() (current_thread_info_reg)
150
151/* thread information allocation */
152#if PAGE_SHIFT == 13
153#define __THREAD_INFO_ORDER 1
154#else /* PAGE_SHIFT == 13 */
155#define __THREAD_INFO_ORDER 0
156#endif /* PAGE_SHIFT == 13 */
157
158#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
159
160#ifdef CONFIG_DEBUG_STACK_USAGE
161#define alloc_thread_info(tsk) \
162({ \
163 struct thread_info *ret; \
164 \
165 ret = (struct thread_info *) \
166 __get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER); \
167 if (ret) \
168 memset(ret, 0, PAGE_SIZE<<__THREAD_INFO_ORDER); \
169 ret; \
170})
171#else
172#define alloc_thread_info(tsk) \
173 ((struct thread_info *)__get_free_pages(GFP_KERNEL, __THREAD_INFO_ORDER))
174#endif
175
176#define free_thread_info(ti) \
177 free_pages((unsigned long)(ti),__THREAD_INFO_ORDER)
178
179#define __thread_flag_byte_ptr(ti) \
180 ((unsigned char *)(&((ti)->flags)))
181#define __cur_thread_flag_byte_ptr __thread_flag_byte_ptr(current_thread_info())
182
183#define get_thread_fault_code() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FAULT_CODE])
184#define set_thread_fault_code(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FAULT_CODE] = (val))
185#define get_thread_wstate() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE])
186#define set_thread_wstate(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSTATE] = (val))
187#define get_thread_cwp() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP])
188#define set_thread_cwp(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CWP] = (val))
189#define get_thread_current_ds() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS])
190#define set_thread_current_ds(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_CURRENT_DS] = (val))
191#define get_thread_fpdepth() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH])
192#define set_thread_fpdepth(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_FPDEPTH] = (val))
193#define get_thread_wsaved() (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED])
194#define set_thread_wsaved(val) (__cur_thread_flag_byte_ptr[TI_FLAG_BYTE_WSAVED] = (val))
195
196#endif /* !(__ASSEMBLY__) */
197
198/*
199 * Thread information flags, only 16 bits are available as we encode
200 * other values into the upper 6 bytes.
201 *
202 * On trap return we need to test several values:
203 *
204 * user: need_resched, notify_resume, sigpending, wsaved, perfctr
205 * kernel: fpdepth
206 *
207 * So to check for work in the kernel case we simply load the fpdepth
208 * byte out of the flags and test it. For the user case we encode the
209 * lower 3 bytes of flags as follows:
210 * ----------------------------------------
211 * | wsaved | flags byte 1 | flags byte 2 |
212 * ----------------------------------------
213 * This optimizes the user test into:
214 * ldx [%g6 + TI_FLAGS], REG1
215 * sethi %hi(_TIF_USER_WORK_MASK), REG2
216 * or REG2, %lo(_TIF_USER_WORK_MASK), REG2
217 * andcc REG1, REG2, %g0
218 * be,pt no_work_to_do
219 * nop
220 */
221#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
222/* flags bit 1 is available */
223#define TIF_SIGPENDING 2 /* signal pending */
224#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
225#define TIF_PERFCTR 4 /* performance counters active */
226#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
227/* flag bit 6 is available */
228#define TIF_32BIT 7 /* 32-bit binary */
229/* flag bit 8 is available */
230#define TIF_SECCOMP 9 /* secure computing */
231#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
232/* flag bit 11 is available */
233/* NOTE: Thread flags >= 12 should be ones we have no interest
234 * in using in assembly, else we can't use the mask as
235 * an immediate value in instructions such as andcc.
236 */
237#define TIF_ABI_PENDING 12
238#define TIF_MEMDIE 13
239#define TIF_POLLING_NRFLAG 14
240
241#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
242#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
243#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
244#define _TIF_PERFCTR (1<<TIF_PERFCTR)
245#define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
246#define _TIF_32BIT (1<<TIF_32BIT)
247#define _TIF_SECCOMP (1<<TIF_SECCOMP)
248#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
249#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
250#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
251
252#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
253 (_TIF_SIGPENDING | \
254 _TIF_NEED_RESCHED | _TIF_PERFCTR))
255
256/*
257 * Thread-synchronous status.
258 *
259 * This is different from the flags in that nobody else
260 * ever touches our thread-synchronous status, so we don't
261 * have to worry about atomic accesses.
262 *
263 * Note that there are only 8 bits available.
264 */
265#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
266
267#ifndef __ASSEMBLY__
268#define HAVE_SET_RESTORE_SIGMASK 1
269static inline void set_restore_sigmask(void)
270{
271 struct thread_info *ti = current_thread_info();
272 ti->status |= TS_RESTORE_SIGMASK;
273 set_bit(TIF_SIGPENDING, &ti->flags);
274}
275#endif /* !__ASSEMBLY__ */
276
277#endif /* __KERNEL__ */
278
279#endif /* _ASM_THREAD_INFO_H */
diff --git a/include/asm-sparc/timer.h b/include/asm-sparc/timer.h
index d909565f9410..475baa05a96e 100644
--- a/include/asm-sparc/timer.h
+++ b/include/asm-sparc/timer.h
@@ -1,109 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_TIMER_H
2 * timer.h: Definitions for the timer chips on the Sparc. 2#define ___ASM_SPARC_TIMER_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/timer_64.h>
5 */
6
7
8#ifndef _SPARC_TIMER_H
9#define _SPARC_TIMER_H
10
11#include <asm/system.h> /* For SUN4M_NCPUS */
12#include <asm/sun4paddr.h>
13#include <asm/btfixup.h>
14
15/* Timer structures. The interrupt timer has two properties which
16 * are the counter (which is handled in do_timer in sched.c) and the limit.
17 * This limit is where the timer's counter 'wraps' around. Oddly enough,
18 * the sun4c timer when it hits the limit wraps back to 1 and not zero
19 * thus when calculating the value at which it will fire a microsecond you
20 * must adjust by one. Thanks SUN for designing such great hardware ;(
21 */
22
23/* Note that I am only going to use the timer that interrupts at
24 * Sparc IRQ 10. There is another one available that can fire at
25 * IRQ 14. Currently it is left untouched, we keep the PROM's limit
26 * register value and let the prom take these interrupts. This allows
27 * L1-A to work.
28 */
29
30struct sun4c_timer_info {
31 __volatile__ unsigned int cur_count10;
32 __volatile__ unsigned int timer_limit10;
33 __volatile__ unsigned int cur_count14;
34 __volatile__ unsigned int timer_limit14;
35};
36
37#define SUN4C_TIMER_PHYSADDR 0xf3000000
38#ifdef CONFIG_SUN4
39#define SUN_TIMER_PHYSADDR SUN4_300_TIMER_PHYSADDR
40#else 5#else
41#define SUN_TIMER_PHYSADDR SUN4C_TIMER_PHYSADDR 6#include <asm-sparc/timer_32.h>
7#endif
42#endif 8#endif
43
44/* A sun4m has two blocks of registers which are probably of the same
45 * structure. LSI Logic's L64851 is told to _decrement_ from the limit
46 * value. Aurora behaves similarly but its limit value is compacted in
47 * other fashion (it's wider). Documented fields are defined here.
48 */
49
50/* As with the interrupt register, we have two classes of timer registers
51 * which are per-cpu and master. Per-cpu timers only hit that cpu and are
52 * only level 14 ticks, master timer hits all cpus and is level 10.
53 */
54
55#define SUN4M_PRM_CNT_L 0x80000000
56#define SUN4M_PRM_CNT_LVALUE 0x7FFFFC00
57
58struct sun4m_timer_percpu_info {
59 __volatile__ unsigned int l14_timer_limit; /* Initial value is 0x009c4000 */
60 __volatile__ unsigned int l14_cur_count;
61
62 /* This register appears to be write only and/or inaccessible
63 * on Uni-Processor sun4m machines.
64 */
65 __volatile__ unsigned int l14_limit_noclear; /* Data access error is here */
66
67 __volatile__ unsigned int cntrl; /* =1 after POST on Aurora */
68 __volatile__ unsigned char space[PAGE_SIZE - 16];
69};
70
71struct sun4m_timer_regs {
72 struct sun4m_timer_percpu_info cpu_timers[SUN4M_NCPUS];
73 volatile unsigned int l10_timer_limit;
74 volatile unsigned int l10_cur_count;
75
76 /* Again, this appears to be write only and/or inaccessible
77 * on uni-processor sun4m machines.
78 */
79 volatile unsigned int l10_limit_noclear;
80
81 /* This register too, it must be magic. */
82 volatile unsigned int foobar;
83
84 volatile unsigned int cfg; /* equals zero at boot time... */
85};
86
87extern struct sun4m_timer_regs *sun4m_timers;
88
89#define SUN4D_PRM_CNT_L 0x80000000
90#define SUN4D_PRM_CNT_LVALUE 0x7FFFFC00
91
92struct sun4d_timer_regs {
93 volatile unsigned int l10_timer_limit;
94 volatile unsigned int l10_cur_countx;
95 volatile unsigned int l10_limit_noclear;
96 volatile unsigned int ctrl;
97 volatile unsigned int l10_cur_count;
98};
99
100extern struct sun4d_timer_regs *sun4d_timers;
101
102extern __volatile__ unsigned int *master_l10_counter;
103extern __volatile__ unsigned int *master_l10_limit;
104
105/* FIXME: Make do_[gs]ettimeofday btfixup calls */
106BTFIXUPDEF_CALL(int, bus_do_settimeofday, struct timespec *tv)
107#define bus_do_settimeofday(tv) BTFIXUP_CALL(bus_do_settimeofday)(tv)
108
109#endif /* !(_SPARC_TIMER_H) */
diff --git a/include/asm-sparc/timer_32.h b/include/asm-sparc/timer_32.h
new file mode 100644
index 000000000000..361e53898dd7
--- /dev/null
+++ b/include/asm-sparc/timer_32.h
@@ -0,0 +1,107 @@
1/*
2 * timer.h: Definitions for the timer chips on the Sparc.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7
8#ifndef _SPARC_TIMER_H
9#define _SPARC_TIMER_H
10
11#include <asm/system.h> /* For SUN4M_NCPUS */
12#include <asm/sun4paddr.h>
13#include <asm/btfixup.h>
14
15/* Timer structures. The interrupt timer has two properties which
16 * are the counter (which is handled in do_timer in sched.c) and the limit.
17 * This limit is where the timer's counter 'wraps' around. Oddly enough,
18 * the sun4c timer when it hits the limit wraps back to 1 and not zero
19 * thus when calculating the value at which it will fire a microsecond you
20 * must adjust by one. Thanks SUN for designing such great hardware ;(
21 */
22
23/* Note that I am only going to use the timer that interrupts at
24 * Sparc IRQ 10. There is another one available that can fire at
25 * IRQ 14. Currently it is left untouched, we keep the PROM's limit
26 * register value and let the prom take these interrupts. This allows
27 * L1-A to work.
28 */
29
30struct sun4c_timer_info {
31 __volatile__ unsigned int cur_count10;
32 __volatile__ unsigned int timer_limit10;
33 __volatile__ unsigned int cur_count14;
34 __volatile__ unsigned int timer_limit14;
35};
36
37#define SUN4C_TIMER_PHYSADDR 0xf3000000
38#ifdef CONFIG_SUN4
39#define SUN_TIMER_PHYSADDR SUN4_300_TIMER_PHYSADDR
40#else
41#define SUN_TIMER_PHYSADDR SUN4C_TIMER_PHYSADDR
42#endif
43
44/* A sun4m has two blocks of registers which are probably of the same
45 * structure. LSI Logic's L64851 is told to _decrement_ from the limit
46 * value. Aurora behaves similarly but its limit value is compacted in
47 * other fashion (it's wider). Documented fields are defined here.
48 */
49
50/* As with the interrupt register, we have two classes of timer registers
51 * which are per-cpu and master. Per-cpu timers only hit that cpu and are
52 * only level 14 ticks, master timer hits all cpus and is level 10.
53 */
54
55#define SUN4M_PRM_CNT_L 0x80000000
56#define SUN4M_PRM_CNT_LVALUE 0x7FFFFC00
57
58struct sun4m_timer_percpu_info {
59 __volatile__ unsigned int l14_timer_limit; /* Initial value is 0x009c4000 */
60 __volatile__ unsigned int l14_cur_count;
61
62 /* This register appears to be write only and/or inaccessible
63 * on Uni-Processor sun4m machines.
64 */
65 __volatile__ unsigned int l14_limit_noclear; /* Data access error is here */
66
67 __volatile__ unsigned int cntrl; /* =1 after POST on Aurora */
68 __volatile__ unsigned char space[PAGE_SIZE - 16];
69};
70
71struct sun4m_timer_regs {
72 struct sun4m_timer_percpu_info cpu_timers[SUN4M_NCPUS];
73 volatile unsigned int l10_timer_limit;
74 volatile unsigned int l10_cur_count;
75
76 /* Again, this appears to be write only and/or inaccessible
77 * on uni-processor sun4m machines.
78 */
79 volatile unsigned int l10_limit_noclear;
80
81 /* This register too, it must be magic. */
82 volatile unsigned int foobar;
83
84 volatile unsigned int cfg; /* equals zero at boot time... */
85};
86
87#define SUN4D_PRM_CNT_L 0x80000000
88#define SUN4D_PRM_CNT_LVALUE 0x7FFFFC00
89
90struct sun4d_timer_regs {
91 volatile unsigned int l10_timer_limit;
92 volatile unsigned int l10_cur_countx;
93 volatile unsigned int l10_limit_noclear;
94 volatile unsigned int ctrl;
95 volatile unsigned int l10_cur_count;
96};
97
98extern struct sun4d_timer_regs *sun4d_timers;
99
100extern __volatile__ unsigned int *master_l10_counter;
101extern __volatile__ unsigned int *master_l10_limit;
102
103/* FIXME: Make do_[gs]ettimeofday btfixup calls */
104BTFIXUPDEF_CALL(int, bus_do_settimeofday, struct timespec *tv)
105#define bus_do_settimeofday(tv) BTFIXUP_CALL(bus_do_settimeofday)(tv)
106
107#endif /* !(_SPARC_TIMER_H) */
diff --git a/include/asm-sparc/timer_64.h b/include/asm-sparc/timer_64.h
new file mode 100644
index 000000000000..5b779fd1f788
--- /dev/null
+++ b/include/asm-sparc/timer_64.h
@@ -0,0 +1,30 @@
1/* timer.h: System timer definitions for sun5.
2 *
3 * Copyright (C) 1997, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#ifndef _SPARC64_TIMER_H
7#define _SPARC64_TIMER_H
8
9#include <linux/types.h>
10#include <linux/init.h>
11
12struct sparc64_tick_ops {
13 unsigned long (*get_tick)(void);
14 int (*add_compare)(unsigned long);
15 unsigned long softint_mask;
16 void (*disable_irq)(void);
17
18 void (*init_tick)(void);
19 unsigned long (*add_tick)(unsigned long);
20
21 char *name;
22};
23
24extern struct sparc64_tick_ops *tick_ops;
25
26extern unsigned long sparc64_get_clock_tick(unsigned int cpu);
27extern void __devinit setup_sparc64_timer(void);
28extern void __init time_init(void);
29
30#endif /* _SPARC64_TIMER_H */
diff --git a/include/asm-sparc/timex.h b/include/asm-sparc/timex.h
index 71b45c90ccae..01d9f199d452 100644
--- a/include/asm-sparc/timex.h
+++ b/include/asm-sparc/timex.h
@@ -1,15 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_TIMEX_H
2 * linux/include/asm-sparc/timex.h 2#define ___ASM_SPARC_TIMEX_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * sparc architecture timex specifications 4#include <asm-sparc/timex_64.h>
5 */ 5#else
6#ifndef _ASMsparc_TIMEX_H 6#include <asm-sparc/timex_32.h>
7#define _ASMsparc_TIMEX_H 7#endif
8
9#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
10
11/* XXX Maybe do something better at some point... -DaveM */
12typedef unsigned long cycles_t;
13#define get_cycles() (0)
14
15#endif 8#endif
diff --git a/include/asm-sparc/timex_32.h b/include/asm-sparc/timex_32.h
new file mode 100644
index 000000000000..71b45c90ccae
--- /dev/null
+++ b/include/asm-sparc/timex_32.h
@@ -0,0 +1,15 @@
1/*
2 * linux/include/asm-sparc/timex.h
3 *
4 * sparc architecture timex specifications
5 */
6#ifndef _ASMsparc_TIMEX_H
7#define _ASMsparc_TIMEX_H
8
9#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
10
11/* XXX Maybe do something better at some point... -DaveM */
12typedef unsigned long cycles_t;
13#define get_cycles() (0)
14
15#endif
diff --git a/include/asm-sparc/timex_64.h b/include/asm-sparc/timex_64.h
new file mode 100644
index 000000000000..c622535c4560
--- /dev/null
+++ b/include/asm-sparc/timex_64.h
@@ -0,0 +1,19 @@
1/*
2 * linux/include/asm-sparc64/timex.h
3 *
4 * sparc64 architecture timex specifications
5 */
6#ifndef _ASMsparc64_TIMEX_H
7#define _ASMsparc64_TIMEX_H
8
9#include <asm/timer.h>
10
11#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
12
13/* Getting on the cycle counter on sparc64. */
14typedef unsigned long cycles_t;
15#define get_cycles() tick_ops->get_tick()
16
17#define ARCH_HAS_READ_CURRENT_TIMER
18
19#endif
diff --git a/include/asm-sparc/tlb.h b/include/asm-sparc/tlb.h
index 6d02d1ce53f3..a821057327c4 100644
--- a/include/asm-sparc/tlb.h
+++ b/include/asm-sparc/tlb.h
@@ -1,24 +1,8 @@
1#ifndef _SPARC_TLB_H 1#ifndef ___ASM_SPARC_TLB_H
2#define _SPARC_TLB_H 2#define ___ASM_SPARC_TLB_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#define tlb_start_vma(tlb, vma) \ 4#include <asm-sparc/tlb_64.h>
5do { \ 5#else
6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \ 6#include <asm-sparc/tlb_32.h>
7} while (0) 7#endif
8 8#endif
9#define tlb_end_vma(tlb, vma) \
10do { \
11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
12} while (0)
13
14#define __tlb_remove_tlb_entry(tlb, pte, address) \
15 do { } while (0)
16
17#define tlb_flush(tlb) \
18do { \
19 flush_tlb_mm((tlb)->mm); \
20} while (0)
21
22#include <asm-generic/tlb.h>
23
24#endif /* _SPARC_TLB_H */
diff --git a/include/asm-sparc/tlb_32.h b/include/asm-sparc/tlb_32.h
new file mode 100644
index 000000000000..6d02d1ce53f3
--- /dev/null
+++ b/include/asm-sparc/tlb_32.h
@@ -0,0 +1,24 @@
1#ifndef _SPARC_TLB_H
2#define _SPARC_TLB_H
3
4#define tlb_start_vma(tlb, vma) \
5do { \
6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
7} while (0)
8
9#define tlb_end_vma(tlb, vma) \
10do { \
11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
12} while (0)
13
14#define __tlb_remove_tlb_entry(tlb, pte, address) \
15 do { } while (0)
16
17#define tlb_flush(tlb) \
18do { \
19 flush_tlb_mm((tlb)->mm); \
20} while (0)
21
22#include <asm-generic/tlb.h>
23
24#endif /* _SPARC_TLB_H */
diff --git a/include/asm-sparc/tlb_64.h b/include/asm-sparc/tlb_64.h
new file mode 100644
index 000000000000..ec81cdedef2c
--- /dev/null
+++ b/include/asm-sparc/tlb_64.h
@@ -0,0 +1,111 @@
1#ifndef _SPARC64_TLB_H
2#define _SPARC64_TLB_H
3
4#include <linux/swap.h>
5#include <linux/pagemap.h>
6#include <asm/pgalloc.h>
7#include <asm/tlbflush.h>
8#include <asm/mmu_context.h>
9
10#define TLB_BATCH_NR 192
11
12/*
13 * For UP we don't need to worry about TLB flush
14 * and page free order so much..
15 */
16#ifdef CONFIG_SMP
17 #define FREE_PTE_NR 506
18 #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
19#else
20 #define FREE_PTE_NR 1
21 #define tlb_fast_mode(bp) 1
22#endif
23
24struct mmu_gather {
25 struct mm_struct *mm;
26 unsigned int pages_nr;
27 unsigned int need_flush;
28 unsigned int fullmm;
29 unsigned int tlb_nr;
30 unsigned long vaddrs[TLB_BATCH_NR];
31 struct page *pages[FREE_PTE_NR];
32};
33
34DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
35
36#ifdef CONFIG_SMP
37extern void smp_flush_tlb_pending(struct mm_struct *,
38 unsigned long, unsigned long *);
39#endif
40
41extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
42extern void flush_tlb_pending(void);
43
44static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
45{
46 struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
47
48 BUG_ON(mp->tlb_nr);
49
50 mp->mm = mm;
51 mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
52 mp->fullmm = full_mm_flush;
53
54 return mp;
55}
56
57
58static inline void tlb_flush_mmu(struct mmu_gather *mp)
59{
60 if (mp->need_flush) {
61 free_pages_and_swap_cache(mp->pages, mp->pages_nr);
62 mp->pages_nr = 0;
63 mp->need_flush = 0;
64 }
65
66}
67
68#ifdef CONFIG_SMP
69extern void smp_flush_tlb_mm(struct mm_struct *mm);
70#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
71#else
72#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
73#endif
74
75static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
76{
77 tlb_flush_mmu(mp);
78
79 if (mp->fullmm)
80 mp->fullmm = 0;
81 else
82 flush_tlb_pending();
83
84 /* keep the page table cache within bounds */
85 check_pgt_cache();
86
87 put_cpu_var(mmu_gathers);
88}
89
90static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
91{
92 if (tlb_fast_mode(mp)) {
93 free_page_and_swap_cache(page);
94 return;
95 }
96 mp->need_flush = 1;
97 mp->pages[mp->pages_nr++] = page;
98 if (mp->pages_nr >= FREE_PTE_NR)
99 tlb_flush_mmu(mp);
100}
101
102#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
103#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage)
104#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp)
105#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
106
107#define tlb_migrate_finish(mm) do { } while (0)
108#define tlb_start_vma(tlb, vma) do { } while (0)
109#define tlb_end_vma(tlb, vma) do { } while (0)
110
111#endif /* _SPARC64_TLB_H */
diff --git a/include/asm-sparc/tlbflush.h b/include/asm-sparc/tlbflush.h
index b957e29d2ae1..6e6bc12227b8 100644
--- a/include/asm-sparc/tlbflush.h
+++ b/include/asm-sparc/tlbflush.h
@@ -1,60 +1,8 @@
1#ifndef _SPARC_TLBFLUSH_H 1#ifndef ___ASM_SPARC_TLBFLUSH_H
2#define _SPARC_TLBFLUSH_H 2#define ___ASM_SPARC_TLBFLUSH_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <linux/mm.h> 4#include <asm-sparc/tlbflush_64.h>
5// #include <asm/processor.h> 5#else
6 6#include <asm-sparc/tlbflush_32.h>
7/* 7#endif
8 * TLB flushing: 8#endif
9 *
10 * - flush_tlb() flushes the current mm struct TLBs XXX Exists?
11 * - flush_tlb_all() flushes all processes TLBs
12 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
13 * - flush_tlb_page(vma, vmaddr) flushes one page
14 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
16 */
17
18#ifdef CONFIG_SMP
19
20BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
21BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
22BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
23BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
24
25#define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
26#define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
27#define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end)
28#define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
29
30extern void smp_flush_tlb_all(void);
31extern void smp_flush_tlb_mm(struct mm_struct *mm);
32extern void smp_flush_tlb_range(struct vm_area_struct *vma,
33 unsigned long start,
34 unsigned long end);
35extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
36
37#endif /* CONFIG_SMP */
38
39BTFIXUPDEF_CALL(void, flush_tlb_all, void)
40BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
41BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
42BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
43
44#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
45#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
46#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)
47#define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
48
49// #define flush_tlb() flush_tlb_mm(current->active_mm) /* XXX Sure? */
50
51/*
52 * This is a kludge, until I know better. --zaitcev XXX
53 */
54static inline void flush_tlb_kernel_range(unsigned long start,
55 unsigned long end)
56{
57 flush_tlb_all();
58}
59
60#endif /* _SPARC_TLBFLUSH_H */
diff --git a/include/asm-sparc/tlbflush_32.h b/include/asm-sparc/tlbflush_32.h
new file mode 100644
index 000000000000..fe0a71abc9bb
--- /dev/null
+++ b/include/asm-sparc/tlbflush_32.h
@@ -0,0 +1,60 @@
1#ifndef _SPARC_TLBFLUSH_H
2#define _SPARC_TLBFLUSH_H
3
4#include <linux/mm.h>
5// #include <asm/processor.h>
6
7/*
8 * TLB flushing:
9 *
10 * - flush_tlb() flushes the current mm struct TLBs XXX Exists?
11 * - flush_tlb_all() flushes all processes TLBs
12 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
13 * - flush_tlb_page(vma, vmaddr) flushes one page
14 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
16 */
17
18#ifdef CONFIG_SMP
19
20BTFIXUPDEF_CALL(void, local_flush_tlb_all, void)
21BTFIXUPDEF_CALL(void, local_flush_tlb_mm, struct mm_struct *)
22BTFIXUPDEF_CALL(void, local_flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
23BTFIXUPDEF_CALL(void, local_flush_tlb_page, struct vm_area_struct *, unsigned long)
24
25#define local_flush_tlb_all() BTFIXUP_CALL(local_flush_tlb_all)()
26#define local_flush_tlb_mm(mm) BTFIXUP_CALL(local_flush_tlb_mm)(mm)
27#define local_flush_tlb_range(vma,start,end) BTFIXUP_CALL(local_flush_tlb_range)(vma,start,end)
28#define local_flush_tlb_page(vma,addr) BTFIXUP_CALL(local_flush_tlb_page)(vma,addr)
29
30extern void smp_flush_tlb_all(void);
31extern void smp_flush_tlb_mm(struct mm_struct *mm);
32extern void smp_flush_tlb_range(struct vm_area_struct *vma,
33 unsigned long start,
34 unsigned long end);
35extern void smp_flush_tlb_page(struct vm_area_struct *mm, unsigned long page);
36
37#endif /* CONFIG_SMP */
38
39BTFIXUPDEF_CALL(void, flush_tlb_all, void)
40BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
41BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
42BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
43
44#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
45#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
46#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)
47#define flush_tlb_page(vma,addr) BTFIXUP_CALL(flush_tlb_page)(vma,addr)
48
49// #define flush_tlb() flush_tlb_mm(current->active_mm) /* XXX Sure? */
50
51/*
52 * This is a kludge, until I know better. --zaitcev XXX
53 */
54static inline void flush_tlb_kernel_range(unsigned long start,
55 unsigned long end)
56{
57 flush_tlb_all();
58}
59
60#endif /* _SPARC_TLBFLUSH_H */
diff --git a/include/asm-sparc/tlbflush_64.h b/include/asm-sparc/tlbflush_64.h
new file mode 100644
index 000000000000..fbb675dbe0c9
--- /dev/null
+++ b/include/asm-sparc/tlbflush_64.h
@@ -0,0 +1,44 @@
1#ifndef _SPARC64_TLBFLUSH_H
2#define _SPARC64_TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <asm/mmu_context.h>
6
7/* TSB flush operations. */
8struct mmu_gather;
9extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
10extern void flush_tsb_user(struct mmu_gather *mp);
11
12/* TLB flush operations. */
13
14extern void flush_tlb_pending(void);
15
16#define flush_tlb_range(vma,start,end) \
17 do { (void)(start); flush_tlb_pending(); } while (0)
18#define flush_tlb_page(vma,addr) flush_tlb_pending()
19#define flush_tlb_mm(mm) flush_tlb_pending()
20
21/* Local cpu only. */
22extern void __flush_tlb_all(void);
23
24extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
25
26#ifndef CONFIG_SMP
27
28#define flush_tlb_kernel_range(start,end) \
29do { flush_tsb_kernel_range(start,end); \
30 __flush_tlb_kernel_range(start,end); \
31} while (0)
32
33#else /* CONFIG_SMP */
34
35extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
36
37#define flush_tlb_kernel_range(start, end) \
38do { flush_tsb_kernel_range(start,end); \
39 smp_flush_tlb_kernel_range(start, end); \
40} while (0)
41
42#endif /* ! CONFIG_SMP */
43
44#endif /* _SPARC64_TLBFLUSH_H */
diff --git a/include/asm-sparc/topology.h b/include/asm-sparc/topology.h
index ee5ac9c9da28..ed13630f32e2 100644
--- a/include/asm-sparc/topology.h
+++ b/include/asm-sparc/topology.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_SPARC_TOPOLOGY_H 1#ifndef ___ASM_SPARC_TOPOLOGY_H
2#define _ASM_SPARC_TOPOLOGY_H 2#define ___ASM_SPARC_TOPOLOGY_H
3 3#if defined(__sparc__) && defined(__arch64__)
4#include <asm-generic/topology.h> 4#include <asm-sparc/topology_64.h>
5 5#else
6#endif /* _ASM_SPARC_TOPOLOGY_H */ 6#include <asm-sparc/topology_32.h>
7#endif
8#endif
diff --git a/include/asm-sparc/topology_32.h b/include/asm-sparc/topology_32.h
new file mode 100644
index 000000000000..ee5ac9c9da28
--- /dev/null
+++ b/include/asm-sparc/topology_32.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_SPARC_TOPOLOGY_H
2#define _ASM_SPARC_TOPOLOGY_H
3
4#include <asm-generic/topology.h>
5
6#endif /* _ASM_SPARC_TOPOLOGY_H */
diff --git a/include/asm-sparc/topology_64.h b/include/asm-sparc/topology_64.h
new file mode 100644
index 000000000000..001c04027c82
--- /dev/null
+++ b/include/asm-sparc/topology_64.h
@@ -0,0 +1,86 @@
1#ifndef _ASM_SPARC64_TOPOLOGY_H
2#define _ASM_SPARC64_TOPOLOGY_H
3
4#ifdef CONFIG_NUMA
5
6#include <asm/mmzone.h>
7
8static inline int cpu_to_node(int cpu)
9{
10 return numa_cpu_lookup_table[cpu];
11}
12
13#define parent_node(node) (node)
14
15static inline cpumask_t node_to_cpumask(int node)
16{
17 return numa_cpumask_lookup_table[node];
18}
19
20/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
21#define node_to_cpumask_ptr(v, node) \
22 cpumask_t *v = &(numa_cpumask_lookup_table[node])
23
24#define node_to_cpumask_ptr_next(v, node) \
25 v = &(numa_cpumask_lookup_table[node])
26
27static inline int node_to_first_cpu(int node)
28{
29 cpumask_t tmp;
30 tmp = node_to_cpumask(node);
31 return first_cpu(tmp);
32}
33
34struct pci_bus;
35#ifdef CONFIG_PCI
36extern int pcibus_to_node(struct pci_bus *pbus);
37#else
38static inline int pcibus_to_node(struct pci_bus *pbus)
39{
40 return -1;
41}
42#endif
43
44#define pcibus_to_cpumask(bus) \
45 (pcibus_to_node(bus) == -1 ? \
46 CPU_MASK_ALL : \
47 node_to_cpumask(pcibus_to_node(bus)))
48
49#define SD_NODE_INIT (struct sched_domain) { \
50 .min_interval = 8, \
51 .max_interval = 32, \
52 .busy_factor = 32, \
53 .imbalance_pct = 125, \
54 .cache_nice_tries = 2, \
55 .busy_idx = 3, \
56 .idle_idx = 2, \
57 .newidle_idx = 0, \
58 .wake_idx = 1, \
59 .forkexec_idx = 1, \
60 .flags = SD_LOAD_BALANCE \
61 | SD_BALANCE_FORK \
62 | SD_BALANCE_EXEC \
63 | SD_SERIALIZE \
64 | SD_WAKE_BALANCE, \
65 .last_balance = jiffies, \
66 .balance_interval = 1, \
67}
68
69#else /* CONFIG_NUMA */
70
71#include <asm-generic/topology.h>
72
73#endif /* !(CONFIG_NUMA) */
74
75#ifdef CONFIG_SMP
76#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
77#define topology_core_id(cpu) (cpu_data(cpu).core_id)
78#define topology_core_siblings(cpu) (cpu_core_map[cpu])
79#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
80#define mc_capable() (sparc64_multi_core)
81#define smt_capable() (sparc64_multi_core)
82#endif /* CONFIG_SMP */
83
84#define cpu_coregroup_map(cpu) (cpu_core_map[cpu])
85
86#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/include/asm-sparc/tsb.h b/include/asm-sparc/tsb.h
new file mode 100644
index 000000000000..76e4299dd9bc
--- /dev/null
+++ b/include/asm-sparc/tsb.h
@@ -0,0 +1,283 @@
1#ifndef _SPARC64_TSB_H
2#define _SPARC64_TSB_H
3
4/* The sparc64 TSB is similar to the powerpc hashtables. It's a
5 * power-of-2 sized table of TAG/PTE pairs. The cpu precomputes
6 * pointers into this table for 8K and 64K page sizes, and also a
7 * comparison TAG based upon the virtual address and context which
8 * faults.
9 *
10 * TLB miss trap handler software does the actual lookup via something
11 * of the form:
12 *
13 * ldxa [%g0] ASI_{D,I}MMU_TSB_8KB_PTR, %g1
14 * ldxa [%g0] ASI_{D,I}MMU, %g6
15 * sllx %g6, 22, %g6
16 * srlx %g6, 22, %g6
17 * ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4
18 * cmp %g4, %g6
19 * bne,pn %xcc, tsb_miss_{d,i}tlb
20 * mov FAULT_CODE_{D,I}TLB, %g3
21 * stxa %g5, [%g0] ASI_{D,I}TLB_DATA_IN
22 * retry
23 *
24 *
25 * Each 16-byte slot of the TSB is the 8-byte tag and then the 8-byte
26 * PTE. The TAG is of the same layout as the TLB TAG TARGET mmu
27 * register which is:
28 *
29 * -------------------------------------------------
30 * | - | CONTEXT | - | VADDR bits 63:22 |
31 * -------------------------------------------------
32 * 63 61 60 48 47 42 41 0
33 *
34 * But actually, since we use per-mm TSB's, we zero out the CONTEXT
35 * field.
36 *
37 * Like the powerpc hashtables we need to use locking in order to
38 * synchronize while we update the entries. PTE updates need locking
39 * as well.
40 *
41 * We need to carefully choose a lock bits for the TSB entry. We
42 * choose to use bit 47 in the tag. Also, since we never map anything
43 * at page zero in context zero, we use zero as an invalid tag entry.
44 * When the lock bit is set, this forces a tag comparison failure.
45 */
46
47#define TSB_TAG_LOCK_BIT 47
48#define TSB_TAG_LOCK_HIGH (1 << (TSB_TAG_LOCK_BIT - 32))
49
50#define TSB_TAG_INVALID_BIT 46
51#define TSB_TAG_INVALID_HIGH (1 << (TSB_TAG_INVALID_BIT - 32))
52
53#define TSB_MEMBAR membar #StoreStore
54
55/* Some cpus support physical address quad loads. We want to use
56 * those if possible so we don't need to hard-lock the TSB mapping
57 * into the TLB. We encode some instruction patching in order to
58 * support this.
59 *
60 * The kernel TSB is locked into the TLB by virtue of being in the
61 * kernel image, so we don't play these games for swapper_tsb access.
62 */
63#ifndef __ASSEMBLY__
64struct tsb_ldquad_phys_patch_entry {
65 unsigned int addr;
66 unsigned int sun4u_insn;
67 unsigned int sun4v_insn;
68};
69extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch,
70 __tsb_ldquad_phys_patch_end;
71
72struct tsb_phys_patch_entry {
73 unsigned int addr;
74 unsigned int insn;
75};
76extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
77#endif
78#define TSB_LOAD_QUAD(TSB, REG) \
79661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
80 .section .tsb_ldquad_phys_patch, "ax"; \
81 .word 661b; \
82 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
83 ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \
84 .previous
85
86#define TSB_LOAD_TAG_HIGH(TSB, REG) \
87661: lduwa [TSB] ASI_N, REG; \
88 .section .tsb_phys_patch, "ax"; \
89 .word 661b; \
90 lduwa [TSB] ASI_PHYS_USE_EC, REG; \
91 .previous
92
93#define TSB_LOAD_TAG(TSB, REG) \
94661: ldxa [TSB] ASI_N, REG; \
95 .section .tsb_phys_patch, "ax"; \
96 .word 661b; \
97 ldxa [TSB] ASI_PHYS_USE_EC, REG; \
98 .previous
99
100#define TSB_CAS_TAG_HIGH(TSB, REG1, REG2) \
101661: casa [TSB] ASI_N, REG1, REG2; \
102 .section .tsb_phys_patch, "ax"; \
103 .word 661b; \
104 casa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
105 .previous
106
107#define TSB_CAS_TAG(TSB, REG1, REG2) \
108661: casxa [TSB] ASI_N, REG1, REG2; \
109 .section .tsb_phys_patch, "ax"; \
110 .word 661b; \
111 casxa [TSB] ASI_PHYS_USE_EC, REG1, REG2; \
112 .previous
113
114#define TSB_STORE(ADDR, VAL) \
115661: stxa VAL, [ADDR] ASI_N; \
116 .section .tsb_phys_patch, "ax"; \
117 .word 661b; \
118 stxa VAL, [ADDR] ASI_PHYS_USE_EC; \
119 .previous
120
121#define TSB_LOCK_TAG(TSB, REG1, REG2) \
12299: TSB_LOAD_TAG_HIGH(TSB, REG1); \
123 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
124 andcc REG1, REG2, %g0; \
125 bne,pn %icc, 99b; \
126 nop; \
127 TSB_CAS_TAG_HIGH(TSB, REG1, REG2); \
128 cmp REG1, REG2; \
129 bne,pn %icc, 99b; \
130 nop; \
131 TSB_MEMBAR
132
133#define TSB_WRITE(TSB, TTE, TAG) \
134 add TSB, 0x8, TSB; \
135 TSB_STORE(TSB, TTE); \
136 sub TSB, 0x8, TSB; \
137 TSB_MEMBAR; \
138 TSB_STORE(TSB, TAG);
139
140#define KTSB_LOAD_QUAD(TSB, REG) \
141 ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG;
142
143#define KTSB_STORE(ADDR, VAL) \
144 stxa VAL, [ADDR] ASI_N;
145
146#define KTSB_LOCK_TAG(TSB, REG1, REG2) \
14799: lduwa [TSB] ASI_N, REG1; \
148 sethi %hi(TSB_TAG_LOCK_HIGH), REG2;\
149 andcc REG1, REG2, %g0; \
150 bne,pn %icc, 99b; \
151 nop; \
152 casa [TSB] ASI_N, REG1, REG2;\
153 cmp REG1, REG2; \
154 bne,pn %icc, 99b; \
155 nop; \
156 TSB_MEMBAR
157
158#define KTSB_WRITE(TSB, TTE, TAG) \
159 add TSB, 0x8, TSB; \
160 stxa TTE, [TSB] ASI_N; \
161 sub TSB, 0x8, TSB; \
162 TSB_MEMBAR; \
163 stxa TAG, [TSB] ASI_N;
164
165 /* Do a kernel page table walk. Leaves physical PTE pointer in
166 * REG1. Jumps to FAIL_LABEL on early page table walk termination.
167 * VADDR will not be clobbered, but REG2 will.
168 */
169#define KERN_PGTABLE_WALK(VADDR, REG1, REG2, FAIL_LABEL) \
170 sethi %hi(swapper_pg_dir), REG1; \
171 or REG1, %lo(swapper_pg_dir), REG1; \
172 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
173 srlx REG2, 64 - PAGE_SHIFT, REG2; \
174 andn REG2, 0x3, REG2; \
175 lduw [REG1 + REG2], REG1; \
176 brz,pn REG1, FAIL_LABEL; \
177 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
178 srlx REG2, 64 - PAGE_SHIFT, REG2; \
179 sllx REG1, 11, REG1; \
180 andn REG2, 0x3, REG2; \
181 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
182 brz,pn REG1, FAIL_LABEL; \
183 sllx VADDR, 64 - PMD_SHIFT, REG2; \
184 srlx REG2, 64 - PAGE_SHIFT, REG2; \
185 sllx REG1, 11, REG1; \
186 andn REG2, 0x7, REG2; \
187 add REG1, REG2, REG1;
188
189 /* Do a user page table walk in MMU globals. Leaves physical PTE
190 * pointer in REG1. Jumps to FAIL_LABEL on early page table walk
191 * termination. Physical base of page tables is in PHYS_PGD which
192 * will not be modified.
193 *
194 * VADDR will not be clobbered, but REG1 and REG2 will.
195 */
196#define USER_PGTABLE_WALK_TL1(VADDR, PHYS_PGD, REG1, REG2, FAIL_LABEL) \
197 sllx VADDR, 64 - (PGDIR_SHIFT + PGDIR_BITS), REG2; \
198 srlx REG2, 64 - PAGE_SHIFT, REG2; \
199 andn REG2, 0x3, REG2; \
200 lduwa [PHYS_PGD + REG2] ASI_PHYS_USE_EC, REG1; \
201 brz,pn REG1, FAIL_LABEL; \
202 sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \
203 srlx REG2, 64 - PAGE_SHIFT, REG2; \
204 sllx REG1, 11, REG1; \
205 andn REG2, 0x3, REG2; \
206 lduwa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \
207 brz,pn REG1, FAIL_LABEL; \
208 sllx VADDR, 64 - PMD_SHIFT, REG2; \
209 srlx REG2, 64 - PAGE_SHIFT, REG2; \
210 sllx REG1, 11, REG1; \
211 andn REG2, 0x7, REG2; \
212 add REG1, REG2, REG1;
213
214/* Lookup a OBP mapping on VADDR in the prom_trans[] table at TL>0.
215 * If no entry is found, FAIL_LABEL will be branched to. On success
216 * the resulting PTE value will be left in REG1. VADDR is preserved
217 * by this routine.
218 */
219#define OBP_TRANS_LOOKUP(VADDR, REG1, REG2, REG3, FAIL_LABEL) \
220 sethi %hi(prom_trans), REG1; \
221 or REG1, %lo(prom_trans), REG1; \
22297: ldx [REG1 + 0x00], REG2; \
223 brz,pn REG2, FAIL_LABEL; \
224 nop; \
225 ldx [REG1 + 0x08], REG3; \
226 add REG2, REG3, REG3; \
227 cmp REG2, VADDR; \
228 bgu,pt %xcc, 98f; \
229 cmp VADDR, REG3; \
230 bgeu,pt %xcc, 98f; \
231 ldx [REG1 + 0x10], REG3; \
232 sub VADDR, REG2, REG2; \
233 ba,pt %xcc, 99f; \
234 add REG3, REG2, REG1; \
23598: ba,pt %xcc, 97b; \
236 add REG1, (3 * 8), REG1; \
23799:
238
239 /* We use a 32K TSB for the whole kernel, this allows to
240 * handle about 16MB of modules and vmalloc mappings without
241 * incurring many hash conflicts.
242 */
243#define KERNEL_TSB_SIZE_BYTES (32 * 1024)
244#define KERNEL_TSB_NENTRIES \
245 (KERNEL_TSB_SIZE_BYTES / 16)
246#define KERNEL_TSB4M_NENTRIES 4096
247
248 /* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
249 * on TSB hit. REG1, REG2, REG3, and REG4 are used as temporaries
250 * and the found TTE will be left in REG1. REG3 and REG4 must
251 * be an even/odd pair of registers.
252 *
253 * VADDR and TAG will be preserved and not clobbered by this macro.
254 */
255#define KERN_TSB_LOOKUP_TL1(VADDR, TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
256 sethi %hi(swapper_tsb), REG1; \
257 or REG1, %lo(swapper_tsb), REG1; \
258 srlx VADDR, PAGE_SHIFT, REG2; \
259 and REG2, (KERNEL_TSB_NENTRIES - 1), REG2; \
260 sllx REG2, 4, REG2; \
261 add REG1, REG2, REG2; \
262 KTSB_LOAD_QUAD(REG2, REG3); \
263 cmp REG3, TAG; \
264 be,a,pt %xcc, OK_LABEL; \
265 mov REG4, REG1;
266
267#ifndef CONFIG_DEBUG_PAGEALLOC
268 /* This version uses a trick, the TAG is already (VADDR >> 22) so
269 * we can make use of that for the index computation.
270 */
271#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
272 sethi %hi(swapper_4m_tsb), REG1; \
273 or REG1, %lo(swapper_4m_tsb), REG1; \
274 and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
275 sllx REG2, 4, REG2; \
276 add REG1, REG2, REG2; \
277 KTSB_LOAD_QUAD(REG2, REG3); \
278 cmp REG3, TAG; \
279 be,a,pt %xcc, OK_LABEL; \
280 mov REG4, REG1;
281#endif
282
283#endif /* !(_SPARC64_TSB_H) */
diff --git a/include/asm-sparc/ttable.h b/include/asm-sparc/ttable.h
new file mode 100644
index 000000000000..5708ba2719fb
--- /dev/null
+++ b/include/asm-sparc/ttable.h
@@ -0,0 +1,658 @@
1#ifndef _SPARC64_TTABLE_H
2#define _SPARC64_TTABLE_H
3
4#include <asm/utrap.h>
5
6#ifdef __ASSEMBLY__
7#include <asm/thread_info.h>
8#endif
9
10#define BOOT_KERNEL b sparc64_boot; nop; nop; nop; nop; nop; nop; nop;
11
12/* We need a "cleaned" instruction... */
13#define CLEAN_WINDOW \
14 rdpr %cleanwin, %l0; add %l0, 1, %l0; \
15 wrpr %l0, 0x0, %cleanwin; \
16 clr %o0; clr %o1; clr %o2; clr %o3; \
17 clr %o4; clr %o5; clr %o6; clr %o7; \
18 clr %l0; clr %l1; clr %l2; clr %l3; \
19 clr %l4; clr %l5; clr %l6; clr %l7; \
20 retry; \
21 nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
22
23#define TRAP(routine) \
24 sethi %hi(109f), %g7; \
25 ba,pt %xcc, etrap; \
26109: or %g7, %lo(109b), %g7; \
27 call routine; \
28 add %sp, PTREGS_OFF, %o0; \
29 ba,pt %xcc, rtrap; \
30 nop; \
31 nop;
32
33#define TRAP_7INSNS(routine) \
34 sethi %hi(109f), %g7; \
35 ba,pt %xcc, etrap; \
36109: or %g7, %lo(109b), %g7; \
37 call routine; \
38 add %sp, PTREGS_OFF, %o0; \
39 ba,pt %xcc, rtrap; \
40 nop;
41
42#define TRAP_SAVEFPU(routine) \
43 sethi %hi(109f), %g7; \
44 ba,pt %xcc, do_fptrap; \
45109: or %g7, %lo(109b), %g7; \
46 call routine; \
47 add %sp, PTREGS_OFF, %o0; \
48 ba,pt %xcc, rtrap; \
49 nop; \
50 nop;
51
52#define TRAP_NOSAVE(routine) \
53 ba,pt %xcc, routine; \
54 nop; \
55 nop; nop; nop; nop; nop; nop;
56
57#define TRAP_NOSAVE_7INSNS(routine) \
58 ba,pt %xcc, routine; \
59 nop; \
60 nop; nop; nop; nop; nop;
61
62#define TRAPTL1(routine) \
63 sethi %hi(109f), %g7; \
64 ba,pt %xcc, etraptl1; \
65109: or %g7, %lo(109b), %g7; \
66 call routine; \
67 add %sp, PTREGS_OFF, %o0; \
68 ba,pt %xcc, rtrap; \
69 nop; \
70 nop;
71
72#define TRAP_ARG(routine, arg) \
73 sethi %hi(109f), %g7; \
74 ba,pt %xcc, etrap; \
75109: or %g7, %lo(109b), %g7; \
76 add %sp, PTREGS_OFF, %o0; \
77 call routine; \
78 mov arg, %o1; \
79 ba,pt %xcc, rtrap; \
80 nop;
81
82#define TRAPTL1_ARG(routine, arg) \
83 sethi %hi(109f), %g7; \
84 ba,pt %xcc, etraptl1; \
85109: or %g7, %lo(109b), %g7; \
86 add %sp, PTREGS_OFF, %o0; \
87 call routine; \
88 mov arg, %o1; \
89 ba,pt %xcc, rtrap; \
90 nop;
91
92#define SYSCALL_TRAP(routine, systbl) \
93 rdpr %pil, %g2; \
94 mov TSTATE_SYSCALL, %g3; \
95 sethi %hi(109f), %g7; \
96 ba,pt %xcc, etrap_syscall; \
97109: or %g7, %lo(109b), %g7; \
98 sethi %hi(systbl), %l7; \
99 ba,pt %xcc, routine; \
100 or %l7, %lo(systbl), %l7;
101
102#define TRAP_UTRAP(handler,lvl) \
103 mov handler, %g3; \
104 ba,pt %xcc, utrap_trap; \
105 mov lvl, %g4; \
106 nop; \
107 nop; \
108 nop; \
109 nop; \
110 nop;
111
112#ifdef CONFIG_COMPAT
113#define LINUX_32BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall32, sys_call_table32)
114#else
115#define LINUX_32BIT_SYSCALL_TRAP BTRAP(0x110)
116#endif
117#define LINUX_64BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall, sys_call_table64)
118#define GETCC_TRAP TRAP(getcc)
119#define SETCC_TRAP TRAP(setcc)
120#define BREAKPOINT_TRAP TRAP(breakpoint_trap)
121
122#ifdef CONFIG_TRACE_IRQFLAGS
123
124#define TRAP_IRQ(routine, level) \
125 rdpr %pil, %g2; \
126 wrpr %g0, 15, %pil; \
127 sethi %hi(1f-4), %g7; \
128 ba,pt %xcc, etrap_irq; \
129 or %g7, %lo(1f-4), %g7; \
130 nop; \
131 nop; \
132 nop; \
133 .subsection 2; \
1341: call trace_hardirqs_off; \
135 nop; \
136 mov level, %o0; \
137 call routine; \
138 add %sp, PTREGS_OFF, %o1; \
139 ba,a,pt %xcc, rtrap_irq; \
140 .previous;
141
142#else
143
144#define TRAP_IRQ(routine, level) \
145 rdpr %pil, %g2; \
146 wrpr %g0, 15, %pil; \
147 ba,pt %xcc, etrap_irq; \
148 rd %pc, %g7; \
149 mov level, %o0; \
150 call routine; \
151 add %sp, PTREGS_OFF, %o1; \
152 ba,a,pt %xcc, rtrap_irq;
153
154#endif
155
156#define TRAP_IVEC TRAP_NOSAVE(do_ivec)
157
158#define BTRAP(lvl) TRAP_ARG(bad_trap, lvl)
159
160#define BTRAPTL1(lvl) TRAPTL1_ARG(bad_trap_tl1, lvl)
161
162#define FLUSH_WINDOW_TRAP \
163 ba,pt %xcc, etrap; \
164 rd %pc, %g7; \
165 flushw; \
166 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1; \
167 add %l1, 4, %l2; \
168 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC]; \
169 ba,pt %xcc, rtrap; \
170 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC];
171
172#ifdef CONFIG_KPROBES
173#define KPROBES_TRAP(lvl) TRAP_IRQ(kprobe_trap, lvl)
174#else
175#define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
176#endif
177
178#ifdef CONFIG_KGDB
179#define KGDB_TRAP(lvl) TRAP_IRQ(kgdb_trap, lvl)
180#else
181#define KGDB_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
182#endif
183
184#define SUN4V_ITSB_MISS \
185 ldxa [%g0] ASI_SCRATCHPAD, %g2; \
186 ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4; \
187 ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5; \
188 srlx %g4, 22, %g6; \
189 ba,pt %xcc, sun4v_itsb_miss; \
190 nop; \
191 nop; \
192 nop;
193
194#define SUN4V_DTSB_MISS \
195 ldxa [%g0] ASI_SCRATCHPAD, %g2; \
196 ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4; \
197 ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5; \
198 srlx %g4, 22, %g6; \
199 ba,pt %xcc, sun4v_dtsb_miss; \
200 nop; \
201 nop; \
202 nop;
203
204/* Before touching these macros, you owe it to yourself to go and
205 * see how arch/sparc64/kernel/winfixup.S works... -DaveM
206 *
207 * For the user cases we used to use the %asi register, but
208 * it turns out that the "wr xxx, %asi" costs ~5 cycles, so
209 * now we use immediate ASI loads and stores instead. Kudos
210 * to Greg Onufer for pointing out this performance anomaly.
211 *
212 * Further note that we cannot use the g2, g4, g5, and g7 alternate
213 * globals in the spill routines, check out the save instruction in
214 * arch/sparc64/kernel/etrap.S to see what I mean about g2, and
215 * g4/g5 are the globals which are preserved by etrap processing
216 * for the caller of it. The g7 register is the return pc for
217 * etrap. Finally, g6 is the current thread register so we cannot
218 * us it in the spill handlers either. Most of these rules do not
219 * apply to fill processing, only g6 is not usable.
220 */
221
222/* Normal kernel spill */
223#define SPILL_0_NORMAL \
224 stx %l0, [%sp + STACK_BIAS + 0x00]; \
225 stx %l1, [%sp + STACK_BIAS + 0x08]; \
226 stx %l2, [%sp + STACK_BIAS + 0x10]; \
227 stx %l3, [%sp + STACK_BIAS + 0x18]; \
228 stx %l4, [%sp + STACK_BIAS + 0x20]; \
229 stx %l5, [%sp + STACK_BIAS + 0x28]; \
230 stx %l6, [%sp + STACK_BIAS + 0x30]; \
231 stx %l7, [%sp + STACK_BIAS + 0x38]; \
232 stx %i0, [%sp + STACK_BIAS + 0x40]; \
233 stx %i1, [%sp + STACK_BIAS + 0x48]; \
234 stx %i2, [%sp + STACK_BIAS + 0x50]; \
235 stx %i3, [%sp + STACK_BIAS + 0x58]; \
236 stx %i4, [%sp + STACK_BIAS + 0x60]; \
237 stx %i5, [%sp + STACK_BIAS + 0x68]; \
238 stx %i6, [%sp + STACK_BIAS + 0x70]; \
239 stx %i7, [%sp + STACK_BIAS + 0x78]; \
240 saved; retry; nop; nop; nop; nop; nop; nop; \
241 nop; nop; nop; nop; nop; nop; nop; nop;
242
243#define SPILL_0_NORMAL_ETRAP \
244etrap_kernel_spill: \
245 stx %l0, [%sp + STACK_BIAS + 0x00]; \
246 stx %l1, [%sp + STACK_BIAS + 0x08]; \
247 stx %l2, [%sp + STACK_BIAS + 0x10]; \
248 stx %l3, [%sp + STACK_BIAS + 0x18]; \
249 stx %l4, [%sp + STACK_BIAS + 0x20]; \
250 stx %l5, [%sp + STACK_BIAS + 0x28]; \
251 stx %l6, [%sp + STACK_BIAS + 0x30]; \
252 stx %l7, [%sp + STACK_BIAS + 0x38]; \
253 stx %i0, [%sp + STACK_BIAS + 0x40]; \
254 stx %i1, [%sp + STACK_BIAS + 0x48]; \
255 stx %i2, [%sp + STACK_BIAS + 0x50]; \
256 stx %i3, [%sp + STACK_BIAS + 0x58]; \
257 stx %i4, [%sp + STACK_BIAS + 0x60]; \
258 stx %i5, [%sp + STACK_BIAS + 0x68]; \
259 stx %i6, [%sp + STACK_BIAS + 0x70]; \
260 stx %i7, [%sp + STACK_BIAS + 0x78]; \
261 saved; \
262 sub %g1, 2, %g1; \
263 ba,pt %xcc, etrap_save; \
264 wrpr %g1, %cwp; \
265 nop; nop; nop; nop; nop; nop; nop; nop; \
266 nop; nop; nop; nop;
267
268/* Normal 64bit spill */
269#define SPILL_1_GENERIC(ASI) \
270 add %sp, STACK_BIAS + 0x00, %g1; \
271 stxa %l0, [%g1 + %g0] ASI; \
272 mov 0x08, %g3; \
273 stxa %l1, [%g1 + %g3] ASI; \
274 add %g1, 0x10, %g1; \
275 stxa %l2, [%g1 + %g0] ASI; \
276 stxa %l3, [%g1 + %g3] ASI; \
277 add %g1, 0x10, %g1; \
278 stxa %l4, [%g1 + %g0] ASI; \
279 stxa %l5, [%g1 + %g3] ASI; \
280 add %g1, 0x10, %g1; \
281 stxa %l6, [%g1 + %g0] ASI; \
282 stxa %l7, [%g1 + %g3] ASI; \
283 add %g1, 0x10, %g1; \
284 stxa %i0, [%g1 + %g0] ASI; \
285 stxa %i1, [%g1 + %g3] ASI; \
286 add %g1, 0x10, %g1; \
287 stxa %i2, [%g1 + %g0] ASI; \
288 stxa %i3, [%g1 + %g3] ASI; \
289 add %g1, 0x10, %g1; \
290 stxa %i4, [%g1 + %g0] ASI; \
291 stxa %i5, [%g1 + %g3] ASI; \
292 add %g1, 0x10, %g1; \
293 stxa %i6, [%g1 + %g0] ASI; \
294 stxa %i7, [%g1 + %g3] ASI; \
295 saved; \
296 retry; nop; nop; \
297 b,a,pt %xcc, spill_fixup_dax; \
298 b,a,pt %xcc, spill_fixup_mna; \
299 b,a,pt %xcc, spill_fixup;
300
301#define SPILL_1_GENERIC_ETRAP \
302etrap_user_spill_64bit: \
303 stxa %l0, [%sp + STACK_BIAS + 0x00] %asi; \
304 stxa %l1, [%sp + STACK_BIAS + 0x08] %asi; \
305 stxa %l2, [%sp + STACK_BIAS + 0x10] %asi; \
306 stxa %l3, [%sp + STACK_BIAS + 0x18] %asi; \
307 stxa %l4, [%sp + STACK_BIAS + 0x20] %asi; \
308 stxa %l5, [%sp + STACK_BIAS + 0x28] %asi; \
309 stxa %l6, [%sp + STACK_BIAS + 0x30] %asi; \
310 stxa %l7, [%sp + STACK_BIAS + 0x38] %asi; \
311 stxa %i0, [%sp + STACK_BIAS + 0x40] %asi; \
312 stxa %i1, [%sp + STACK_BIAS + 0x48] %asi; \
313 stxa %i2, [%sp + STACK_BIAS + 0x50] %asi; \
314 stxa %i3, [%sp + STACK_BIAS + 0x58] %asi; \
315 stxa %i4, [%sp + STACK_BIAS + 0x60] %asi; \
316 stxa %i5, [%sp + STACK_BIAS + 0x68] %asi; \
317 stxa %i6, [%sp + STACK_BIAS + 0x70] %asi; \
318 stxa %i7, [%sp + STACK_BIAS + 0x78] %asi; \
319 saved; \
320 sub %g1, 2, %g1; \
321 ba,pt %xcc, etrap_save; \
322 wrpr %g1, %cwp; \
323 nop; nop; nop; nop; nop; \
324 nop; nop; nop; nop; \
325 ba,a,pt %xcc, etrap_spill_fixup_64bit; \
326 ba,a,pt %xcc, etrap_spill_fixup_64bit; \
327 ba,a,pt %xcc, etrap_spill_fixup_64bit;
328
329#define SPILL_1_GENERIC_ETRAP_FIXUP \
330etrap_spill_fixup_64bit: \
331 ldub [%g6 + TI_WSAVED], %g1; \
332 sll %g1, 3, %g3; \
333 add %g6, %g3, %g3; \
334 stx %sp, [%g3 + TI_RWIN_SPTRS]; \
335 sll %g1, 7, %g3; \
336 add %g6, %g3, %g3; \
337 stx %l0, [%g3 + TI_REG_WINDOW + 0x00]; \
338 stx %l1, [%g3 + TI_REG_WINDOW + 0x08]; \
339 stx %l2, [%g3 + TI_REG_WINDOW + 0x10]; \
340 stx %l3, [%g3 + TI_REG_WINDOW + 0x18]; \
341 stx %l4, [%g3 + TI_REG_WINDOW + 0x20]; \
342 stx %l5, [%g3 + TI_REG_WINDOW + 0x28]; \
343 stx %l6, [%g3 + TI_REG_WINDOW + 0x30]; \
344 stx %l7, [%g3 + TI_REG_WINDOW + 0x38]; \
345 stx %i0, [%g3 + TI_REG_WINDOW + 0x40]; \
346 stx %i1, [%g3 + TI_REG_WINDOW + 0x48]; \
347 stx %i2, [%g3 + TI_REG_WINDOW + 0x50]; \
348 stx %i3, [%g3 + TI_REG_WINDOW + 0x58]; \
349 stx %i4, [%g3 + TI_REG_WINDOW + 0x60]; \
350 stx %i5, [%g3 + TI_REG_WINDOW + 0x68]; \
351 stx %i6, [%g3 + TI_REG_WINDOW + 0x70]; \
352 stx %i7, [%g3 + TI_REG_WINDOW + 0x78]; \
353 add %g1, 1, %g1; \
354 stb %g1, [%g6 + TI_WSAVED]; \
355 saved; \
356 rdpr %cwp, %g1; \
357 sub %g1, 2, %g1; \
358 ba,pt %xcc, etrap_save; \
359 wrpr %g1, %cwp; \
360 nop; nop; nop
361
362/* Normal 32bit spill */
363#define SPILL_2_GENERIC(ASI) \
364 srl %sp, 0, %sp; \
365 stwa %l0, [%sp + %g0] ASI; \
366 mov 0x04, %g3; \
367 stwa %l1, [%sp + %g3] ASI; \
368 add %sp, 0x08, %g1; \
369 stwa %l2, [%g1 + %g0] ASI; \
370 stwa %l3, [%g1 + %g3] ASI; \
371 add %g1, 0x08, %g1; \
372 stwa %l4, [%g1 + %g0] ASI; \
373 stwa %l5, [%g1 + %g3] ASI; \
374 add %g1, 0x08, %g1; \
375 stwa %l6, [%g1 + %g0] ASI; \
376 stwa %l7, [%g1 + %g3] ASI; \
377 add %g1, 0x08, %g1; \
378 stwa %i0, [%g1 + %g0] ASI; \
379 stwa %i1, [%g1 + %g3] ASI; \
380 add %g1, 0x08, %g1; \
381 stwa %i2, [%g1 + %g0] ASI; \
382 stwa %i3, [%g1 + %g3] ASI; \
383 add %g1, 0x08, %g1; \
384 stwa %i4, [%g1 + %g0] ASI; \
385 stwa %i5, [%g1 + %g3] ASI; \
386 add %g1, 0x08, %g1; \
387 stwa %i6, [%g1 + %g0] ASI; \
388 stwa %i7, [%g1 + %g3] ASI; \
389 saved; \
390 retry; nop; nop; \
391 b,a,pt %xcc, spill_fixup_dax; \
392 b,a,pt %xcc, spill_fixup_mna; \
393 b,a,pt %xcc, spill_fixup;
394
395#define SPILL_2_GENERIC_ETRAP \
396etrap_user_spill_32bit: \
397 srl %sp, 0, %sp; \
398 stwa %l0, [%sp + 0x00] %asi; \
399 stwa %l1, [%sp + 0x04] %asi; \
400 stwa %l2, [%sp + 0x08] %asi; \
401 stwa %l3, [%sp + 0x0c] %asi; \
402 stwa %l4, [%sp + 0x10] %asi; \
403 stwa %l5, [%sp + 0x14] %asi; \
404 stwa %l6, [%sp + 0x18] %asi; \
405 stwa %l7, [%sp + 0x1c] %asi; \
406 stwa %i0, [%sp + 0x20] %asi; \
407 stwa %i1, [%sp + 0x24] %asi; \
408 stwa %i2, [%sp + 0x28] %asi; \
409 stwa %i3, [%sp + 0x2c] %asi; \
410 stwa %i4, [%sp + 0x30] %asi; \
411 stwa %i5, [%sp + 0x34] %asi; \
412 stwa %i6, [%sp + 0x38] %asi; \
413 stwa %i7, [%sp + 0x3c] %asi; \
414 saved; \
415 sub %g1, 2, %g1; \
416 ba,pt %xcc, etrap_save; \
417 wrpr %g1, %cwp; \
418 nop; nop; nop; nop; \
419 nop; nop; nop; nop; \
420 ba,a,pt %xcc, etrap_spill_fixup_32bit; \
421 ba,a,pt %xcc, etrap_spill_fixup_32bit; \
422 ba,a,pt %xcc, etrap_spill_fixup_32bit;
423
424#define SPILL_2_GENERIC_ETRAP_FIXUP \
425etrap_spill_fixup_32bit: \
426 ldub [%g6 + TI_WSAVED], %g1; \
427 sll %g1, 3, %g3; \
428 add %g6, %g3, %g3; \
429 stx %sp, [%g3 + TI_RWIN_SPTRS]; \
430 sll %g1, 7, %g3; \
431 add %g6, %g3, %g3; \
432 stw %l0, [%g3 + TI_REG_WINDOW + 0x00]; \
433 stw %l1, [%g3 + TI_REG_WINDOW + 0x04]; \
434 stw %l2, [%g3 + TI_REG_WINDOW + 0x08]; \
435 stw %l3, [%g3 + TI_REG_WINDOW + 0x0c]; \
436 stw %l4, [%g3 + TI_REG_WINDOW + 0x10]; \
437 stw %l5, [%g3 + TI_REG_WINDOW + 0x14]; \
438 stw %l6, [%g3 + TI_REG_WINDOW + 0x18]; \
439 stw %l7, [%g3 + TI_REG_WINDOW + 0x1c]; \
440 stw %i0, [%g3 + TI_REG_WINDOW + 0x20]; \
441 stw %i1, [%g3 + TI_REG_WINDOW + 0x24]; \
442 stw %i2, [%g3 + TI_REG_WINDOW + 0x28]; \
443 stw %i3, [%g3 + TI_REG_WINDOW + 0x2c]; \
444 stw %i4, [%g3 + TI_REG_WINDOW + 0x30]; \
445 stw %i5, [%g3 + TI_REG_WINDOW + 0x34]; \
446 stw %i6, [%g3 + TI_REG_WINDOW + 0x38]; \
447 stw %i7, [%g3 + TI_REG_WINDOW + 0x3c]; \
448 add %g1, 1, %g1; \
449 stb %g1, [%g6 + TI_WSAVED]; \
450 saved; \
451 rdpr %cwp, %g1; \
452 sub %g1, 2, %g1; \
453 ba,pt %xcc, etrap_save; \
454 wrpr %g1, %cwp; \
455 nop; nop; nop
456
457#define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP)
458#define SPILL_2_NORMAL SPILL_2_GENERIC(ASI_AIUP)
459#define SPILL_3_NORMAL SPILL_0_NORMAL
460#define SPILL_4_NORMAL SPILL_0_NORMAL
461#define SPILL_5_NORMAL SPILL_0_NORMAL
462#define SPILL_6_NORMAL SPILL_0_NORMAL
463#define SPILL_7_NORMAL SPILL_0_NORMAL
464
465#define SPILL_0_OTHER SPILL_0_NORMAL
466#define SPILL_1_OTHER SPILL_1_GENERIC(ASI_AIUS)
467#define SPILL_2_OTHER SPILL_2_GENERIC(ASI_AIUS)
468#define SPILL_3_OTHER SPILL_3_NORMAL
469#define SPILL_4_OTHER SPILL_4_NORMAL
470#define SPILL_5_OTHER SPILL_5_NORMAL
471#define SPILL_6_OTHER SPILL_6_NORMAL
472#define SPILL_7_OTHER SPILL_7_NORMAL
473
474/* Normal kernel fill */
475#define FILL_0_NORMAL \
476 ldx [%sp + STACK_BIAS + 0x00], %l0; \
477 ldx [%sp + STACK_BIAS + 0x08], %l1; \
478 ldx [%sp + STACK_BIAS + 0x10], %l2; \
479 ldx [%sp + STACK_BIAS + 0x18], %l3; \
480 ldx [%sp + STACK_BIAS + 0x20], %l4; \
481 ldx [%sp + STACK_BIAS + 0x28], %l5; \
482 ldx [%sp + STACK_BIAS + 0x30], %l6; \
483 ldx [%sp + STACK_BIAS + 0x38], %l7; \
484 ldx [%sp + STACK_BIAS + 0x40], %i0; \
485 ldx [%sp + STACK_BIAS + 0x48], %i1; \
486 ldx [%sp + STACK_BIAS + 0x50], %i2; \
487 ldx [%sp + STACK_BIAS + 0x58], %i3; \
488 ldx [%sp + STACK_BIAS + 0x60], %i4; \
489 ldx [%sp + STACK_BIAS + 0x68], %i5; \
490 ldx [%sp + STACK_BIAS + 0x70], %i6; \
491 ldx [%sp + STACK_BIAS + 0x78], %i7; \
492 restored; retry; nop; nop; nop; nop; nop; nop; \
493 nop; nop; nop; nop; nop; nop; nop; nop;
494
495#define FILL_0_NORMAL_RTRAP \
496kern_rtt_fill: \
497 rdpr %cwp, %g1; \
498 sub %g1, 1, %g1; \
499 wrpr %g1, %cwp; \
500 ldx [%sp + STACK_BIAS + 0x00], %l0; \
501 ldx [%sp + STACK_BIAS + 0x08], %l1; \
502 ldx [%sp + STACK_BIAS + 0x10], %l2; \
503 ldx [%sp + STACK_BIAS + 0x18], %l3; \
504 ldx [%sp + STACK_BIAS + 0x20], %l4; \
505 ldx [%sp + STACK_BIAS + 0x28], %l5; \
506 ldx [%sp + STACK_BIAS + 0x30], %l6; \
507 ldx [%sp + STACK_BIAS + 0x38], %l7; \
508 ldx [%sp + STACK_BIAS + 0x40], %i0; \
509 ldx [%sp + STACK_BIAS + 0x48], %i1; \
510 ldx [%sp + STACK_BIAS + 0x50], %i2; \
511 ldx [%sp + STACK_BIAS + 0x58], %i3; \
512 ldx [%sp + STACK_BIAS + 0x60], %i4; \
513 ldx [%sp + STACK_BIAS + 0x68], %i5; \
514 ldx [%sp + STACK_BIAS + 0x70], %i6; \
515 ldx [%sp + STACK_BIAS + 0x78], %i7; \
516 restored; \
517 add %g1, 1, %g1; \
518 ba,pt %xcc, kern_rtt_restore; \
519 wrpr %g1, %cwp; \
520 nop; nop; nop; nop; nop; \
521 nop; nop; nop; nop;
522
523
524/* Normal 64bit fill */
525#define FILL_1_GENERIC(ASI) \
526 add %sp, STACK_BIAS + 0x00, %g1; \
527 ldxa [%g1 + %g0] ASI, %l0; \
528 mov 0x08, %g2; \
529 mov 0x10, %g3; \
530 ldxa [%g1 + %g2] ASI, %l1; \
531 mov 0x18, %g5; \
532 ldxa [%g1 + %g3] ASI, %l2; \
533 ldxa [%g1 + %g5] ASI, %l3; \
534 add %g1, 0x20, %g1; \
535 ldxa [%g1 + %g0] ASI, %l4; \
536 ldxa [%g1 + %g2] ASI, %l5; \
537 ldxa [%g1 + %g3] ASI, %l6; \
538 ldxa [%g1 + %g5] ASI, %l7; \
539 add %g1, 0x20, %g1; \
540 ldxa [%g1 + %g0] ASI, %i0; \
541 ldxa [%g1 + %g2] ASI, %i1; \
542 ldxa [%g1 + %g3] ASI, %i2; \
543 ldxa [%g1 + %g5] ASI, %i3; \
544 add %g1, 0x20, %g1; \
545 ldxa [%g1 + %g0] ASI, %i4; \
546 ldxa [%g1 + %g2] ASI, %i5; \
547 ldxa [%g1 + %g3] ASI, %i6; \
548 ldxa [%g1 + %g5] ASI, %i7; \
549 restored; \
550 retry; nop; nop; nop; nop; \
551 b,a,pt %xcc, fill_fixup_dax; \
552 b,a,pt %xcc, fill_fixup_mna; \
553 b,a,pt %xcc, fill_fixup;
554
555#define FILL_1_GENERIC_RTRAP \
556user_rtt_fill_64bit: \
557 ldxa [%sp + STACK_BIAS + 0x00] %asi, %l0; \
558 ldxa [%sp + STACK_BIAS + 0x08] %asi, %l1; \
559 ldxa [%sp + STACK_BIAS + 0x10] %asi, %l2; \
560 ldxa [%sp + STACK_BIAS + 0x18] %asi, %l3; \
561 ldxa [%sp + STACK_BIAS + 0x20] %asi, %l4; \
562 ldxa [%sp + STACK_BIAS + 0x28] %asi, %l5; \
563 ldxa [%sp + STACK_BIAS + 0x30] %asi, %l6; \
564 ldxa [%sp + STACK_BIAS + 0x38] %asi, %l7; \
565 ldxa [%sp + STACK_BIAS + 0x40] %asi, %i0; \
566 ldxa [%sp + STACK_BIAS + 0x48] %asi, %i1; \
567 ldxa [%sp + STACK_BIAS + 0x50] %asi, %i2; \
568 ldxa [%sp + STACK_BIAS + 0x58] %asi, %i3; \
569 ldxa [%sp + STACK_BIAS + 0x60] %asi, %i4; \
570 ldxa [%sp + STACK_BIAS + 0x68] %asi, %i5; \
571 ldxa [%sp + STACK_BIAS + 0x70] %asi, %i6; \
572 ldxa [%sp + STACK_BIAS + 0x78] %asi, %i7; \
573 ba,pt %xcc, user_rtt_pre_restore; \
574 restored; \
575 nop; nop; nop; nop; nop; nop; \
576 nop; nop; nop; nop; nop; \
577 ba,a,pt %xcc, user_rtt_fill_fixup; \
578 ba,a,pt %xcc, user_rtt_fill_fixup; \
579 ba,a,pt %xcc, user_rtt_fill_fixup;
580
581
582/* Normal 32bit fill */
583#define FILL_2_GENERIC(ASI) \
584 srl %sp, 0, %sp; \
585 lduwa [%sp + %g0] ASI, %l0; \
586 mov 0x04, %g2; \
587 mov 0x08, %g3; \
588 lduwa [%sp + %g2] ASI, %l1; \
589 mov 0x0c, %g5; \
590 lduwa [%sp + %g3] ASI, %l2; \
591 lduwa [%sp + %g5] ASI, %l3; \
592 add %sp, 0x10, %g1; \
593 lduwa [%g1 + %g0] ASI, %l4; \
594 lduwa [%g1 + %g2] ASI, %l5; \
595 lduwa [%g1 + %g3] ASI, %l6; \
596 lduwa [%g1 + %g5] ASI, %l7; \
597 add %g1, 0x10, %g1; \
598 lduwa [%g1 + %g0] ASI, %i0; \
599 lduwa [%g1 + %g2] ASI, %i1; \
600 lduwa [%g1 + %g3] ASI, %i2; \
601 lduwa [%g1 + %g5] ASI, %i3; \
602 add %g1, 0x10, %g1; \
603 lduwa [%g1 + %g0] ASI, %i4; \
604 lduwa [%g1 + %g2] ASI, %i5; \
605 lduwa [%g1 + %g3] ASI, %i6; \
606 lduwa [%g1 + %g5] ASI, %i7; \
607 restored; \
608 retry; nop; nop; nop; nop; \
609 b,a,pt %xcc, fill_fixup_dax; \
610 b,a,pt %xcc, fill_fixup_mna; \
611 b,a,pt %xcc, fill_fixup;
612
613#define FILL_2_GENERIC_RTRAP \
614user_rtt_fill_32bit: \
615 srl %sp, 0, %sp; \
616 lduwa [%sp + 0x00] %asi, %l0; \
617 lduwa [%sp + 0x04] %asi, %l1; \
618 lduwa [%sp + 0x08] %asi, %l2; \
619 lduwa [%sp + 0x0c] %asi, %l3; \
620 lduwa [%sp + 0x10] %asi, %l4; \
621 lduwa [%sp + 0x14] %asi, %l5; \
622 lduwa [%sp + 0x18] %asi, %l6; \
623 lduwa [%sp + 0x1c] %asi, %l7; \
624 lduwa [%sp + 0x20] %asi, %i0; \
625 lduwa [%sp + 0x24] %asi, %i1; \
626 lduwa [%sp + 0x28] %asi, %i2; \
627 lduwa [%sp + 0x2c] %asi, %i3; \
628 lduwa [%sp + 0x30] %asi, %i4; \
629 lduwa [%sp + 0x34] %asi, %i5; \
630 lduwa [%sp + 0x38] %asi, %i6; \
631 lduwa [%sp + 0x3c] %asi, %i7; \
632 ba,pt %xcc, user_rtt_pre_restore; \
633 restored; \
634 nop; nop; nop; nop; nop; \
635 nop; nop; nop; nop; nop; \
636 ba,a,pt %xcc, user_rtt_fill_fixup; \
637 ba,a,pt %xcc, user_rtt_fill_fixup; \
638 ba,a,pt %xcc, user_rtt_fill_fixup;
639
640
641#define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP)
642#define FILL_2_NORMAL FILL_2_GENERIC(ASI_AIUP)
643#define FILL_3_NORMAL FILL_0_NORMAL
644#define FILL_4_NORMAL FILL_0_NORMAL
645#define FILL_5_NORMAL FILL_0_NORMAL
646#define FILL_6_NORMAL FILL_0_NORMAL
647#define FILL_7_NORMAL FILL_0_NORMAL
648
649#define FILL_0_OTHER FILL_0_NORMAL
650#define FILL_1_OTHER FILL_1_GENERIC(ASI_AIUS)
651#define FILL_2_OTHER FILL_2_GENERIC(ASI_AIUS)
652#define FILL_3_OTHER FILL_3_NORMAL
653#define FILL_4_OTHER FILL_4_NORMAL
654#define FILL_5_OTHER FILL_5_NORMAL
655#define FILL_6_OTHER FILL_6_NORMAL
656#define FILL_7_OTHER FILL_7_NORMAL
657
658#endif /* !(_SPARC64_TTABLE_H) */
diff --git a/include/asm-sparc/types.h b/include/asm-sparc/types.h
index 07734f942405..8c28fde5eaa2 100644
--- a/include/asm-sparc/types.h
+++ b/include/asm-sparc/types.h
@@ -1,6 +1,5 @@
1#ifndef _SPARC_TYPES_H 1#ifndef _SPARC_TYPES_H
2#define _SPARC_TYPES_H 2#define _SPARC_TYPES_H
3
4/* 3/*
5 * This file is never included by application software unless 4 * This file is never included by application software unless
6 * explicitly requested (e.g., via linux/types.h) in which case the 5 * explicitly requested (e.g., via linux/types.h) in which case the
@@ -8,6 +7,35 @@
8 * not a major issue. However, for interoperability, libraries still 7 * not a major issue. However, for interoperability, libraries still
9 * need to be careful to avoid a name clashes. 8 * need to be careful to avoid a name clashes.
10 */ 9 */
10
11#if defined(__sparc__) && defined(__arch64__)
12
13/*** SPARC 64 bit ***/
14#include <asm-generic/int-l64.h>
15
16#ifndef __ASSEMBLY__
17
18typedef unsigned short umode_t;
19
20#endif /* __ASSEMBLY__ */
21
22#ifdef __KERNEL__
23
24#define BITS_PER_LONG 64
25
26#ifndef __ASSEMBLY__
27
28/* Dma addresses come in generic and 64-bit flavours. */
29
30typedef u32 dma_addr_t;
31typedef u64 dma64_addr_t;
32
33#endif /* __ASSEMBLY__ */
34
35#endif /* __KERNEL__ */
36#else
37
38/*** SPARC 32 bit ***/
11#include <asm-generic/int-ll64.h> 39#include <asm-generic/int-ll64.h>
12 40
13#ifndef __ASSEMBLY__ 41#ifndef __ASSEMBLY__
@@ -29,4 +57,6 @@ typedef u32 dma64_addr_t;
29 57
30#endif /* __KERNEL__ */ 58#endif /* __KERNEL__ */
31 59
60#endif /* defined(__sparc__) && defined(__arch64__) */
61
32#endif /* defined(_SPARC_TYPES_H) */ 62#endif /* defined(_SPARC_TYPES_H) */
diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h
index 47d5619d43fa..424facce5238 100644
--- a/include/asm-sparc/uaccess.h
+++ b/include/asm-sparc/uaccess.h
@@ -1,336 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_UACCESS_H
2 * uaccess.h: User space memore access functions. 2#define ___ASM_SPARC_UACCESS_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 4#include <asm-sparc/uaccess_64.h>
5 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 5#else
6 */ 6#include <asm-sparc/uaccess_32.h>
7#ifndef _ASM_UACCESS_H 7#endif
8#define _ASM_UACCESS_H
9
10#ifdef __KERNEL__
11#include <linux/compiler.h>
12#include <linux/sched.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <asm/vac-ops.h>
16#endif 8#endif
17
18#ifndef __ASSEMBLY__
19
20/* Sparc is not segmented, however we need to be able to fool access_ok()
21 * when doing system calls from kernel mode legitimately.
22 *
23 * "For historical reasons, these macros are grossly misnamed." -Linus
24 */
25
26#define KERNEL_DS ((mm_segment_t) { 0 })
27#define USER_DS ((mm_segment_t) { -1 })
28
29#define VERIFY_READ 0
30#define VERIFY_WRITE 1
31
32#define get_ds() (KERNEL_DS)
33#define get_fs() (current->thread.current_ds)
34#define set_fs(val) ((current->thread.current_ds) = (val))
35
36#define segment_eq(a,b) ((a).seg == (b).seg)
37
38/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
39 * can be fairly lightweight.
40 * No one can read/write anything from userland in the kernel space by setting
41 * large size and address near to PAGE_OFFSET - a fault will break his intentions.
42 */
43#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
44#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
45#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
46#define access_ok(type, addr, size) \
47 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
48
49/*
50 * The exception table consists of pairs of addresses: the first is the
51 * address of an instruction that is allowed to fault, and the second is
52 * the address at which the program should continue. No registers are
53 * modified, so it is entirely up to the continuation code to figure out
54 * what to do.
55 *
56 * All the routines below use bits of fixup code that are out of line
57 * with the main instruction path. This means when everything is well,
58 * we don't even have to jump over them. Further, they do not intrude
59 * on our cache or tlb entries.
60 *
61 * There is a special way how to put a range of potentially faulting
62 * insns (like twenty ldd/std's with now intervening other instructions)
63 * You specify address of first in insn and 0 in fixup and in the next
64 * exception_table_entry you specify last potentially faulting insn + 1
65 * and in fixup the routine which should handle the fault.
66 * That fixup code will get
67 * (faulting_insn_address - first_insn_in_the_range_address)/4
68 * in %g2 (ie. index of the faulting instruction in the range).
69 */
70
71struct exception_table_entry
72{
73 unsigned long insn, fixup;
74};
75
76/* Returns 0 if exception not found and fixup otherwise. */
77extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
78
79extern void __ret_efault(void);
80
81/* Uh, these should become the main single-value transfer routines..
82 * They automatically use the right size if we just have the right
83 * pointer type..
84 *
85 * This gets kind of ugly. We want to return _two_ values in "get_user()"
86 * and yet we don't want to do any pointers, because that is too much
87 * of a performance impact. Thus we have a few rather ugly macros here,
88 * and hide all the ugliness from the user.
89 */
90#define put_user(x,ptr) ({ \
91unsigned long __pu_addr = (unsigned long)(ptr); \
92__chk_user_ptr(ptr); \
93__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
94
95#define get_user(x,ptr) ({ \
96unsigned long __gu_addr = (unsigned long)(ptr); \
97__chk_user_ptr(ptr); \
98__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
99
100/*
101 * The "__xxx" versions do not do address space checking, useful when
102 * doing multiple accesses to the same area (the user has to do the
103 * checks by hand with "access_ok()")
104 */
105#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
106#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))
107
108struct __large_struct { unsigned long buf[100]; };
109#define __m(x) ((struct __large_struct __user *)(x))
110
111#define __put_user_check(x,addr,size) ({ \
112register int __pu_ret; \
113if (__access_ok(addr,size)) { \
114switch (size) { \
115case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
116case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
117case 4: __put_user_asm(x,,addr,__pu_ret); break; \
118case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
119default: __pu_ret = __put_user_bad(); break; \
120} } else { __pu_ret = -EFAULT; } __pu_ret; })
121
122#define __put_user_nocheck(x,addr,size) ({ \
123register int __pu_ret; \
124switch (size) { \
125case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
126case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
127case 4: __put_user_asm(x,,addr,__pu_ret); break; \
128case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
129default: __pu_ret = __put_user_bad(); break; \
130} __pu_ret; })
131
132#define __put_user_asm(x,size,addr,ret) \
133__asm__ __volatile__( \
134 "/* Put user asm, inline. */\n" \
135"1:\t" "st"#size " %1, %2\n\t" \
136 "clr %0\n" \
137"2:\n\n\t" \
138 ".section .fixup,#alloc,#execinstr\n\t" \
139 ".align 4\n" \
140"3:\n\t" \
141 "b 2b\n\t" \
142 " mov %3, %0\n\t" \
143 ".previous\n\n\t" \
144 ".section __ex_table,#alloc\n\t" \
145 ".align 4\n\t" \
146 ".word 1b, 3b\n\t" \
147 ".previous\n\n\t" \
148 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
149 "i" (-EFAULT))
150
151extern int __put_user_bad(void);
152
153#define __get_user_check(x,addr,size,type) ({ \
154register int __gu_ret; \
155register unsigned long __gu_val; \
156if (__access_ok(addr,size)) { \
157switch (size) { \
158case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
159case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
160case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
161case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
162default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
163} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })
164
165#define __get_user_check_ret(x,addr,size,type,retval) ({ \
166register unsigned long __gu_val __asm__ ("l1"); \
167if (__access_ok(addr,size)) { \
168switch (size) { \
169case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
170case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
171case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
172case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
173default: if (__get_user_bad()) return retval; \
174} x = (type) __gu_val; } else return retval; })
175
176#define __get_user_nocheck(x,addr,size,type) ({ \
177register int __gu_ret; \
178register unsigned long __gu_val; \
179switch (size) { \
180case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
181case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
182case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
183case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
184default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
185} x = (type) __gu_val; __gu_ret; })
186
187#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
188register unsigned long __gu_val __asm__ ("l1"); \
189switch (size) { \
190case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
191case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
192case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
193case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
194default: if (__get_user_bad()) return retval; \
195} x = (type) __gu_val; })
196
197#define __get_user_asm(x,size,addr,ret) \
198__asm__ __volatile__( \
199 "/* Get user asm, inline. */\n" \
200"1:\t" "ld"#size " %2, %1\n\t" \
201 "clr %0\n" \
202"2:\n\n\t" \
203 ".section .fixup,#alloc,#execinstr\n\t" \
204 ".align 4\n" \
205"3:\n\t" \
206 "clr %1\n\t" \
207 "b 2b\n\t" \
208 " mov %3, %0\n\n\t" \
209 ".previous\n\t" \
210 ".section __ex_table,#alloc\n\t" \
211 ".align 4\n\t" \
212 ".word 1b, 3b\n\n\t" \
213 ".previous\n\t" \
214 : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \
215 "i" (-EFAULT))
216
217#define __get_user_asm_ret(x,size,addr,retval) \
218if (__builtin_constant_p(retval) && retval == -EFAULT) \
219__asm__ __volatile__( \
220 "/* Get user asm ret, inline. */\n" \
221"1:\t" "ld"#size " %1, %0\n\n\t" \
222 ".section __ex_table,#alloc\n\t" \
223 ".align 4\n\t" \
224 ".word 1b,__ret_efault\n\n\t" \
225 ".previous\n\t" \
226 : "=&r" (x) : "m" (*__m(addr))); \
227else \
228__asm__ __volatile__( \
229 "/* Get user asm ret, inline. */\n" \
230"1:\t" "ld"#size " %1, %0\n\n\t" \
231 ".section .fixup,#alloc,#execinstr\n\t" \
232 ".align 4\n" \
233"3:\n\t" \
234 "ret\n\t" \
235 " restore %%g0, %2, %%o0\n\n\t" \
236 ".previous\n\t" \
237 ".section __ex_table,#alloc\n\t" \
238 ".align 4\n\t" \
239 ".word 1b, 3b\n\n\t" \
240 ".previous\n\t" \
241 : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
242
243extern int __get_user_bad(void);
244
245extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
246
247static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
248{
249 if (n && __access_ok((unsigned long) to, n))
250 return __copy_user(to, (__force void __user *) from, n);
251 else
252 return n;
253}
254
255static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
256{
257 return __copy_user(to, (__force void __user *) from, n);
258}
259
260static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
261{
262 if (n && __access_ok((unsigned long) from, n))
263 return __copy_user((__force void __user *) to, from, n);
264 else
265 return n;
266}
267
268static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
269{
270 return __copy_user((__force void __user *) to, from, n);
271}
272
273#define __copy_to_user_inatomic __copy_to_user
274#define __copy_from_user_inatomic __copy_from_user
275
276static inline unsigned long __clear_user(void __user *addr, unsigned long size)
277{
278 unsigned long ret;
279
280 __asm__ __volatile__ (
281 ".section __ex_table,#alloc\n\t"
282 ".align 4\n\t"
283 ".word 1f,3\n\t"
284 ".previous\n\t"
285 "mov %2, %%o1\n"
286 "1:\n\t"
287 "call __bzero\n\t"
288 " mov %1, %%o0\n\t"
289 "mov %%o0, %0\n"
290 : "=r" (ret) : "r" (addr), "r" (size) :
291 "o0", "o1", "o2", "o3", "o4", "o5", "o7",
292 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
293
294 return ret;
295}
296
297static inline unsigned long clear_user(void __user *addr, unsigned long n)
298{
299 if (n && __access_ok((unsigned long) addr, n))
300 return __clear_user(addr, n);
301 else
302 return n;
303}
304
305extern long __strncpy_from_user(char *dest, const char __user *src, long count);
306
307static inline long strncpy_from_user(char *dest, const char __user *src, long count)
308{
309 if (__access_ok((unsigned long) src, count))
310 return __strncpy_from_user(dest, src, count);
311 else
312 return -EFAULT;
313}
314
315extern long __strlen_user(const char __user *);
316extern long __strnlen_user(const char __user *, long len);
317
318static inline long strlen_user(const char __user *str)
319{
320 if (!access_ok(VERIFY_READ, str, 0))
321 return 0;
322 else
323 return __strlen_user(str);
324}
325
326static inline long strnlen_user(const char __user *str, long len)
327{
328 if (!access_ok(VERIFY_READ, str, 0))
329 return 0;
330 else
331 return __strnlen_user(str, len);
332}
333
334#endif /* __ASSEMBLY__ */
335
336#endif /* _ASM_UACCESS_H */
diff --git a/include/asm-sparc/uaccess_32.h b/include/asm-sparc/uaccess_32.h
new file mode 100644
index 000000000000..47d5619d43fa
--- /dev/null
+++ b/include/asm-sparc/uaccess_32.h
@@ -0,0 +1,336 @@
1/*
2 * uaccess.h: User space memore access functions.
3 *
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7#ifndef _ASM_UACCESS_H
8#define _ASM_UACCESS_H
9
10#ifdef __KERNEL__
11#include <linux/compiler.h>
12#include <linux/sched.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <asm/vac-ops.h>
16#endif
17
18#ifndef __ASSEMBLY__
19
20/* Sparc is not segmented, however we need to be able to fool access_ok()
21 * when doing system calls from kernel mode legitimately.
22 *
23 * "For historical reasons, these macros are grossly misnamed." -Linus
24 */
25
26#define KERNEL_DS ((mm_segment_t) { 0 })
27#define USER_DS ((mm_segment_t) { -1 })
28
29#define VERIFY_READ 0
30#define VERIFY_WRITE 1
31
32#define get_ds() (KERNEL_DS)
33#define get_fs() (current->thread.current_ds)
34#define set_fs(val) ((current->thread.current_ds) = (val))
35
36#define segment_eq(a,b) ((a).seg == (b).seg)
37
38/* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test
39 * can be fairly lightweight.
40 * No one can read/write anything from userland in the kernel space by setting
41 * large size and address near to PAGE_OFFSET - a fault will break his intentions.
42 */
43#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
44#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
45#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
46#define access_ok(type, addr, size) \
47 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
48
49/*
50 * The exception table consists of pairs of addresses: the first is the
51 * address of an instruction that is allowed to fault, and the second is
52 * the address at which the program should continue. No registers are
53 * modified, so it is entirely up to the continuation code to figure out
54 * what to do.
55 *
56 * All the routines below use bits of fixup code that are out of line
57 * with the main instruction path. This means when everything is well,
58 * we don't even have to jump over them. Further, they do not intrude
59 * on our cache or tlb entries.
60 *
61 * There is a special way how to put a range of potentially faulting
62 * insns (like twenty ldd/std's with now intervening other instructions)
63 * You specify address of first in insn and 0 in fixup and in the next
64 * exception_table_entry you specify last potentially faulting insn + 1
65 * and in fixup the routine which should handle the fault.
66 * That fixup code will get
67 * (faulting_insn_address - first_insn_in_the_range_address)/4
68 * in %g2 (ie. index of the faulting instruction in the range).
69 */
70
71struct exception_table_entry
72{
73 unsigned long insn, fixup;
74};
75
76/* Returns 0 if exception not found and fixup otherwise. */
77extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
78
79extern void __ret_efault(void);
80
81/* Uh, these should become the main single-value transfer routines..
82 * They automatically use the right size if we just have the right
83 * pointer type..
84 *
85 * This gets kind of ugly. We want to return _two_ values in "get_user()"
86 * and yet we don't want to do any pointers, because that is too much
87 * of a performance impact. Thus we have a few rather ugly macros here,
88 * and hide all the ugliness from the user.
89 */
90#define put_user(x,ptr) ({ \
91unsigned long __pu_addr = (unsigned long)(ptr); \
92__chk_user_ptr(ptr); \
93__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
94
95#define get_user(x,ptr) ({ \
96unsigned long __gu_addr = (unsigned long)(ptr); \
97__chk_user_ptr(ptr); \
98__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
99
100/*
101 * The "__xxx" versions do not do address space checking, useful when
102 * doing multiple accesses to the same area (the user has to do the
103 * checks by hand with "access_ok()")
104 */
105#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
106#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr)))
107
108struct __large_struct { unsigned long buf[100]; };
109#define __m(x) ((struct __large_struct __user *)(x))
110
111#define __put_user_check(x,addr,size) ({ \
112register int __pu_ret; \
113if (__access_ok(addr,size)) { \
114switch (size) { \
115case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
116case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
117case 4: __put_user_asm(x,,addr,__pu_ret); break; \
118case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
119default: __pu_ret = __put_user_bad(); break; \
120} } else { __pu_ret = -EFAULT; } __pu_ret; })
121
122#define __put_user_nocheck(x,addr,size) ({ \
123register int __pu_ret; \
124switch (size) { \
125case 1: __put_user_asm(x,b,addr,__pu_ret); break; \
126case 2: __put_user_asm(x,h,addr,__pu_ret); break; \
127case 4: __put_user_asm(x,,addr,__pu_ret); break; \
128case 8: __put_user_asm(x,d,addr,__pu_ret); break; \
129default: __pu_ret = __put_user_bad(); break; \
130} __pu_ret; })
131
132#define __put_user_asm(x,size,addr,ret) \
133__asm__ __volatile__( \
134 "/* Put user asm, inline. */\n" \
135"1:\t" "st"#size " %1, %2\n\t" \
136 "clr %0\n" \
137"2:\n\n\t" \
138 ".section .fixup,#alloc,#execinstr\n\t" \
139 ".align 4\n" \
140"3:\n\t" \
141 "b 2b\n\t" \
142 " mov %3, %0\n\t" \
143 ".previous\n\n\t" \
144 ".section __ex_table,#alloc\n\t" \
145 ".align 4\n\t" \
146 ".word 1b, 3b\n\t" \
147 ".previous\n\n\t" \
148 : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \
149 "i" (-EFAULT))
150
151extern int __put_user_bad(void);
152
153#define __get_user_check(x,addr,size,type) ({ \
154register int __gu_ret; \
155register unsigned long __gu_val; \
156if (__access_ok(addr,size)) { \
157switch (size) { \
158case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
159case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
160case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
161case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
162default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
163} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; })
164
165#define __get_user_check_ret(x,addr,size,type,retval) ({ \
166register unsigned long __gu_val __asm__ ("l1"); \
167if (__access_ok(addr,size)) { \
168switch (size) { \
169case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
170case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
171case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
172case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
173default: if (__get_user_bad()) return retval; \
174} x = (type) __gu_val; } else return retval; })
175
176#define __get_user_nocheck(x,addr,size,type) ({ \
177register int __gu_ret; \
178register unsigned long __gu_val; \
179switch (size) { \
180case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
181case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
182case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \
183case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \
184default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
185} x = (type) __gu_val; __gu_ret; })
186
187#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \
188register unsigned long __gu_val __asm__ ("l1"); \
189switch (size) { \
190case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
191case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
192case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \
193case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \
194default: if (__get_user_bad()) return retval; \
195} x = (type) __gu_val; })
196
197#define __get_user_asm(x,size,addr,ret) \
198__asm__ __volatile__( \
199 "/* Get user asm, inline. */\n" \
200"1:\t" "ld"#size " %2, %1\n\t" \
201 "clr %0\n" \
202"2:\n\n\t" \
203 ".section .fixup,#alloc,#execinstr\n\t" \
204 ".align 4\n" \
205"3:\n\t" \
206 "clr %1\n\t" \
207 "b 2b\n\t" \
208 " mov %3, %0\n\n\t" \
209 ".previous\n\t" \
210 ".section __ex_table,#alloc\n\t" \
211 ".align 4\n\t" \
212 ".word 1b, 3b\n\n\t" \
213 ".previous\n\t" \
214 : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \
215 "i" (-EFAULT))
216
217#define __get_user_asm_ret(x,size,addr,retval) \
218if (__builtin_constant_p(retval) && retval == -EFAULT) \
219__asm__ __volatile__( \
220 "/* Get user asm ret, inline. */\n" \
221"1:\t" "ld"#size " %1, %0\n\n\t" \
222 ".section __ex_table,#alloc\n\t" \
223 ".align 4\n\t" \
224 ".word 1b,__ret_efault\n\n\t" \
225 ".previous\n\t" \
226 : "=&r" (x) : "m" (*__m(addr))); \
227else \
228__asm__ __volatile__( \
229 "/* Get user asm ret, inline. */\n" \
230"1:\t" "ld"#size " %1, %0\n\n\t" \
231 ".section .fixup,#alloc,#execinstr\n\t" \
232 ".align 4\n" \
233"3:\n\t" \
234 "ret\n\t" \
235 " restore %%g0, %2, %%o0\n\n\t" \
236 ".previous\n\t" \
237 ".section __ex_table,#alloc\n\t" \
238 ".align 4\n\t" \
239 ".word 1b, 3b\n\n\t" \
240 ".previous\n\t" \
241 : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
242
243extern int __get_user_bad(void);
244
245extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
246
247static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
248{
249 if (n && __access_ok((unsigned long) to, n))
250 return __copy_user(to, (__force void __user *) from, n);
251 else
252 return n;
253}
254
255static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
256{
257 return __copy_user(to, (__force void __user *) from, n);
258}
259
260static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
261{
262 if (n && __access_ok((unsigned long) from, n))
263 return __copy_user((__force void __user *) to, from, n);
264 else
265 return n;
266}
267
268static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
269{
270 return __copy_user((__force void __user *) to, from, n);
271}
272
273#define __copy_to_user_inatomic __copy_to_user
274#define __copy_from_user_inatomic __copy_from_user
275
276static inline unsigned long __clear_user(void __user *addr, unsigned long size)
277{
278 unsigned long ret;
279
280 __asm__ __volatile__ (
281 ".section __ex_table,#alloc\n\t"
282 ".align 4\n\t"
283 ".word 1f,3\n\t"
284 ".previous\n\t"
285 "mov %2, %%o1\n"
286 "1:\n\t"
287 "call __bzero\n\t"
288 " mov %1, %%o0\n\t"
289 "mov %%o0, %0\n"
290 : "=r" (ret) : "r" (addr), "r" (size) :
291 "o0", "o1", "o2", "o3", "o4", "o5", "o7",
292 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
293
294 return ret;
295}
296
297static inline unsigned long clear_user(void __user *addr, unsigned long n)
298{
299 if (n && __access_ok((unsigned long) addr, n))
300 return __clear_user(addr, n);
301 else
302 return n;
303}
304
305extern long __strncpy_from_user(char *dest, const char __user *src, long count);
306
307static inline long strncpy_from_user(char *dest, const char __user *src, long count)
308{
309 if (__access_ok((unsigned long) src, count))
310 return __strncpy_from_user(dest, src, count);
311 else
312 return -EFAULT;
313}
314
315extern long __strlen_user(const char __user *);
316extern long __strnlen_user(const char __user *, long len);
317
318static inline long strlen_user(const char __user *str)
319{
320 if (!access_ok(VERIFY_READ, str, 0))
321 return 0;
322 else
323 return __strlen_user(str);
324}
325
326static inline long strnlen_user(const char __user *str, long len)
327{
328 if (!access_ok(VERIFY_READ, str, 0))
329 return 0;
330 else
331 return __strnlen_user(str, len);
332}
333
334#endif /* __ASSEMBLY__ */
335
336#endif /* _ASM_UACCESS_H */
diff --git a/include/asm-sparc/uaccess_64.h b/include/asm-sparc/uaccess_64.h
new file mode 100644
index 000000000000..296ef30e05c8
--- /dev/null
+++ b/include/asm-sparc/uaccess_64.h
@@ -0,0 +1,273 @@
1#ifndef _ASM_UACCESS_H
2#define _ASM_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
7
8#ifdef __KERNEL__
9#include <linux/compiler.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <asm/asi.h>
13#include <asm/system.h>
14#include <asm/spitfire.h>
15#include <asm-generic/uaccess.h>
16#endif
17
18#ifndef __ASSEMBLY__
19
20/*
21 * Sparc64 is segmented, though more like the M68K than the I386.
22 * We use the secondary ASI to address user memory, which references a
23 * completely different VM map, thus there is zero chance of the user
24 * doing something queer and tricking us into poking kernel memory.
25 *
26 * What is left here is basically what is needed for the other parts of
27 * the kernel that expect to be able to manipulate, erum, "segments".
28 * Or perhaps more properly, permissions.
29 *
30 * "For historical reasons, these macros are grossly misnamed." -Linus
31 */
32
33#define KERNEL_DS ((mm_segment_t) { ASI_P })
34#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
35
36#define VERIFY_READ 0
37#define VERIFY_WRITE 1
38
39#define get_fs() ((mm_segment_t) { get_thread_current_ds() })
40#define get_ds() (KERNEL_DS)
41
42#define segment_eq(a,b) ((a).seg == (b).seg)
43
44#define set_fs(val) \
45do { \
46 set_thread_current_ds((val).seg); \
47 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
48} while(0)
49
50static inline int __access_ok(const void __user * addr, unsigned long size)
51{
52 return 1;
53}
54
55static inline int access_ok(int type, const void __user * addr, unsigned long size)
56{
57 return 1;
58}
59
60/*
61 * The exception table consists of pairs of addresses: the first is the
62 * address of an instruction that is allowed to fault, and the second is
63 * the address at which the program should continue. No registers are
64 * modified, so it is entirely up to the continuation code to figure out
65 * what to do.
66 *
67 * All the routines below use bits of fixup code that are out of line
68 * with the main instruction path. This means when everything is well,
69 * we don't even have to jump over them. Further, they do not intrude
70 * on our cache or tlb entries.
71 */
72
73struct exception_table_entry {
74 unsigned int insn, fixup;
75};
76
77extern void __ret_efault(void);
78extern void __retl_efault(void);
79
80/* Uh, these should become the main single-value transfer routines..
81 * They automatically use the right size if we just have the right
82 * pointer type..
83 *
84 * This gets kind of ugly. We want to return _two_ values in "get_user()"
85 * and yet we don't want to do any pointers, because that is too much
86 * of a performance impact. Thus we have a few rather ugly macros here,
87 * and hide all the ugliness from the user.
88 */
89#define put_user(x,ptr) ({ \
90unsigned long __pu_addr = (unsigned long)(ptr); \
91__chk_user_ptr(ptr); \
92__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
93
94#define get_user(x,ptr) ({ \
95unsigned long __gu_addr = (unsigned long)(ptr); \
96__chk_user_ptr(ptr); \
97__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
98
99#define __put_user(x,ptr) put_user(x,ptr)
100#define __get_user(x,ptr) get_user(x,ptr)
101
102struct __large_struct { unsigned long buf[100]; };
103#define __m(x) ((struct __large_struct *)(x))
104
105#define __put_user_nocheck(data,addr,size) ({ \
106register int __pu_ret; \
107switch (size) { \
108case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
109case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
110case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
111case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
112default: __pu_ret = __put_user_bad(); break; \
113} __pu_ret; })
114
115#define __put_user_asm(x,size,addr,ret) \
116__asm__ __volatile__( \
117 "/* Put user asm, inline. */\n" \
118"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
119 "clr %0\n" \
120"2:\n\n\t" \
121 ".section .fixup,#alloc,#execinstr\n\t" \
122 ".align 4\n" \
123"3:\n\t" \
124 "sethi %%hi(2b), %0\n\t" \
125 "jmpl %0 + %%lo(2b), %%g0\n\t" \
126 " mov %3, %0\n\n\t" \
127 ".previous\n\t" \
128 ".section __ex_table,\"a\"\n\t" \
129 ".align 4\n\t" \
130 ".word 1b, 3b\n\t" \
131 ".previous\n\n\t" \
132 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
133 "i" (-EFAULT))
134
135extern int __put_user_bad(void);
136
137#define __get_user_nocheck(data,addr,size,type) ({ \
138register int __gu_ret; \
139register unsigned long __gu_val; \
140switch (size) { \
141case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
142case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
143case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
144case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
145default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
146} data = (type) __gu_val; __gu_ret; })
147
148#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
149register unsigned long __gu_val __asm__ ("l1"); \
150switch (size) { \
151case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
152case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
153case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
154case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
155default: if (__get_user_bad()) return retval; \
156} data = (type) __gu_val; })
157
158#define __get_user_asm(x,size,addr,ret) \
159__asm__ __volatile__( \
160 "/* Get user asm, inline. */\n" \
161"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
162 "clr %0\n" \
163"2:\n\n\t" \
164 ".section .fixup,#alloc,#execinstr\n\t" \
165 ".align 4\n" \
166"3:\n\t" \
167 "sethi %%hi(2b), %0\n\t" \
168 "clr %1\n\t" \
169 "jmpl %0 + %%lo(2b), %%g0\n\t" \
170 " mov %3, %0\n\n\t" \
171 ".previous\n\t" \
172 ".section __ex_table,\"a\"\n\t" \
173 ".align 4\n\t" \
174 ".word 1b, 3b\n\n\t" \
175 ".previous\n\t" \
176 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
177 "i" (-EFAULT))
178
179#define __get_user_asm_ret(x,size,addr,retval) \
180if (__builtin_constant_p(retval) && retval == -EFAULT) \
181__asm__ __volatile__( \
182 "/* Get user asm ret, inline. */\n" \
183"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
184 ".section __ex_table,\"a\"\n\t" \
185 ".align 4\n\t" \
186 ".word 1b,__ret_efault\n\n\t" \
187 ".previous\n\t" \
188 : "=r" (x) : "r" (__m(addr))); \
189else \
190__asm__ __volatile__( \
191 "/* Get user asm ret, inline. */\n" \
192"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
193 ".section .fixup,#alloc,#execinstr\n\t" \
194 ".align 4\n" \
195"3:\n\t" \
196 "ret\n\t" \
197 " restore %%g0, %2, %%o0\n\n\t" \
198 ".previous\n\t" \
199 ".section __ex_table,\"a\"\n\t" \
200 ".align 4\n\t" \
201 ".word 1b, 3b\n\n\t" \
202 ".previous\n\t" \
203 : "=r" (x) : "r" (__m(addr)), "i" (retval))
204
205extern int __get_user_bad(void);
206
207extern unsigned long __must_check ___copy_from_user(void *to,
208 const void __user *from,
209 unsigned long size);
210extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
211 unsigned long size);
212static inline unsigned long __must_check
213copy_from_user(void *to, const void __user *from, unsigned long size)
214{
215 unsigned long ret = ___copy_from_user(to, from, size);
216
217 if (unlikely(ret))
218 ret = copy_from_user_fixup(to, from, size);
219 return ret;
220}
221#define __copy_from_user copy_from_user
222
223extern unsigned long __must_check ___copy_to_user(void __user *to,
224 const void *from,
225 unsigned long size);
226extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
227 unsigned long size);
228static inline unsigned long __must_check
229copy_to_user(void __user *to, const void *from, unsigned long size)
230{
231 unsigned long ret = ___copy_to_user(to, from, size);
232
233 if (unlikely(ret))
234 ret = copy_to_user_fixup(to, from, size);
235 return ret;
236}
237#define __copy_to_user copy_to_user
238
239extern unsigned long __must_check ___copy_in_user(void __user *to,
240 const void __user *from,
241 unsigned long size);
242extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
243 unsigned long size);
244static inline unsigned long __must_check
245copy_in_user(void __user *to, void __user *from, unsigned long size)
246{
247 unsigned long ret = ___copy_in_user(to, from, size);
248
249 if (unlikely(ret))
250 ret = copy_in_user_fixup(to, from, size);
251 return ret;
252}
253#define __copy_in_user copy_in_user
254
255extern unsigned long __must_check __clear_user(void __user *, unsigned long);
256
257#define clear_user __clear_user
258
259extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
260
261#define strncpy_from_user __strncpy_from_user
262
263extern long __strlen_user(const char __user *);
264extern long __strnlen_user(const char __user *, long len);
265
266#define strlen_user __strlen_user
267#define strnlen_user __strnlen_user
268#define __copy_to_user_inatomic __copy_to_user
269#define __copy_from_user_inatomic __copy_from_user
270
271#endif /* __ASSEMBLY__ */
272
273#endif /* _ASM_UACCESS_H */
diff --git a/include/asm-sparc/uctx.h b/include/asm-sparc/uctx.h
new file mode 100644
index 000000000000..dc937c75ffdd
--- /dev/null
+++ b/include/asm-sparc/uctx.h
@@ -0,0 +1,71 @@
1/*
2 * uctx.h: Sparc64 {set,get}context() register state layouts.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 */
6
7#ifndef __SPARC64_UCTX_H
8#define __SPARC64_UCTX_H
9
10#define MC_TSTATE 0
11#define MC_PC 1
12#define MC_NPC 2
13#define MC_Y 3
14#define MC_G1 4
15#define MC_G2 5
16#define MC_G3 6
17#define MC_G4 7
18#define MC_G5 8
19#define MC_G6 9
20#define MC_G7 10
21#define MC_O0 11
22#define MC_O1 12
23#define MC_O2 13
24#define MC_O3 14
25#define MC_O4 15
26#define MC_O5 16
27#define MC_O6 17
28#define MC_O7 18
29#define MC_NGREG 19
30
31typedef unsigned long mc_greg_t;
32typedef mc_greg_t mc_gregset_t[MC_NGREG];
33
34#define MC_MAXFPQ 16
35struct mc_fq {
36 unsigned long *mcfq_addr;
37 unsigned int mcfq_insn;
38};
39
40struct mc_fpu {
41 union {
42 unsigned int sregs[32];
43 unsigned long dregs[32];
44 long double qregs[16];
45 } mcfpu_fregs;
46 unsigned long mcfpu_fsr;
47 unsigned long mcfpu_fprs;
48 unsigned long mcfpu_gsr;
49 struct mc_fq *mcfpu_fq;
50 unsigned char mcfpu_qcnt;
51 unsigned char mcfpu_qentsz;
52 unsigned char mcfpu_enab;
53};
54typedef struct mc_fpu mc_fpu_t;
55
56typedef struct {
57 mc_gregset_t mc_gregs;
58 mc_greg_t mc_fp;
59 mc_greg_t mc_i7;
60 mc_fpu_t mc_fpregs;
61} mcontext_t;
62
63struct ucontext {
64 struct ucontext *uc_link;
65 unsigned long uc_flags;
66 sigset_t uc_sigmask;
67 mcontext_t uc_mcontext;
68};
69typedef struct ucontext ucontext_t;
70
71#endif /* __SPARC64_UCTX_H */
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index 2338a0276377..3c2609618a09 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -1,378 +1,8 @@
1#ifndef _SPARC_UNISTD_H 1#ifndef ___ASM_SPARC_UNISTD_H
2#define _SPARC_UNISTD_H 2#define ___ASM_SPARC_UNISTD_H
3 3#if defined(__sparc__) && defined(__arch64__)
4/* 4#include <asm-sparc/unistd_64.h>
5 * System calls under the Sparc. 5#else
6 * 6#include <asm-sparc/unistd_32.h>
7 * Don't be scared by the ugly clobbers, it is the only way I can 7#endif
8 * think of right now to force the arguments into fixed registers 8#endif
9 * before the trap into the system call with gcc 'asm' statements.
10 *
11 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
12 *
13 * SunOS compatibility based upon preliminary work which is:
14 *
15 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
16 */
17
18#define __NR_restart_syscall 0 /* Linux Specific */
19#define __NR_exit 1 /* Common */
20#define __NR_fork 2 /* Common */
21#define __NR_read 3 /* Common */
22#define __NR_write 4 /* Common */
23#define __NR_open 5 /* Common */
24#define __NR_close 6 /* Common */
25#define __NR_wait4 7 /* Common */
26#define __NR_creat 8 /* Common */
27#define __NR_link 9 /* Common */
28#define __NR_unlink 10 /* Common */
29#define __NR_execv 11 /* SunOS Specific */
30#define __NR_chdir 12 /* Common */
31#define __NR_chown 13 /* Common */
32#define __NR_mknod 14 /* Common */
33#define __NR_chmod 15 /* Common */
34#define __NR_lchown 16 /* Common */
35#define __NR_brk 17 /* Common */
36#define __NR_perfctr 18 /* Performance counter operations */
37#define __NR_lseek 19 /* Common */
38#define __NR_getpid 20 /* Common */
39#define __NR_capget 21 /* Linux Specific */
40#define __NR_capset 22 /* Linux Specific */
41#define __NR_setuid 23 /* Implemented via setreuid in SunOS */
42#define __NR_getuid 24 /* Common */
43#define __NR_vmsplice 25 /* ENOSYS under SunOS */
44#define __NR_ptrace 26 /* Common */
45#define __NR_alarm 27 /* Implemented via setitimer in SunOS */
46#define __NR_sigaltstack 28 /* Common */
47#define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */
48#define __NR_utime 30 /* Implemented via utimes() under SunOS */
49#define __NR_lchown32 31 /* Linux sparc32 specific */
50#define __NR_fchown32 32 /* Linux sparc32 specific */
51#define __NR_access 33 /* Common */
52#define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */
53#define __NR_chown32 35 /* Linux sparc32 specific */
54#define __NR_sync 36 /* Common */
55#define __NR_kill 37 /* Common */
56#define __NR_stat 38 /* Common */
57#define __NR_sendfile 39 /* Linux Specific */
58#define __NR_lstat 40 /* Common */
59#define __NR_dup 41 /* Common */
60#define __NR_pipe 42 /* Common */
61#define __NR_times 43 /* Implemented via getrusage() in SunOS */
62#define __NR_getuid32 44 /* Linux sparc32 specific */
63#define __NR_umount2 45 /* Linux Specific */
64#define __NR_setgid 46 /* Implemented via setregid() in SunOS */
65#define __NR_getgid 47 /* Common */
66#define __NR_signal 48 /* Implemented via sigvec() in SunOS */
67#define __NR_geteuid 49 /* SunOS calls getuid() */
68#define __NR_getegid 50 /* SunOS calls getgid() */
69#define __NR_acct 51 /* Common */
70/* #define __NR_memory_ordering 52 Linux sparc64 specific */
71#define __NR_getgid32 53 /* Linux sparc32 specific */
72#define __NR_ioctl 54 /* Common */
73#define __NR_reboot 55 /* Common */
74#define __NR_mmap2 56 /* Linux sparc32 Specific */
75#define __NR_symlink 57 /* Common */
76#define __NR_readlink 58 /* Common */
77#define __NR_execve 59 /* Common */
78#define __NR_umask 60 /* Common */
79#define __NR_chroot 61 /* Common */
80#define __NR_fstat 62 /* Common */
81#define __NR_fstat64 63 /* Linux Specific */
82#define __NR_getpagesize 64 /* Common */
83#define __NR_msync 65 /* Common in newer 1.3.x revs... */
84#define __NR_vfork 66 /* Common */
85#define __NR_pread64 67 /* Linux Specific */
86#define __NR_pwrite64 68 /* Linux Specific */
87#define __NR_geteuid32 69 /* Linux sparc32, sbrk under SunOS */
88#define __NR_getegid32 70 /* Linux sparc32, sstk under SunOS */
89#define __NR_mmap 71 /* Common */
90#define __NR_setreuid32 72 /* Linux sparc32, vadvise under SunOS */
91#define __NR_munmap 73 /* Common */
92#define __NR_mprotect 74 /* Common */
93#define __NR_madvise 75 /* Common */
94#define __NR_vhangup 76 /* Common */
95#define __NR_truncate64 77 /* Linux sparc32 Specific */
96#define __NR_mincore 78 /* Common */
97#define __NR_getgroups 79 /* Common */
98#define __NR_setgroups 80 /* Common */
99#define __NR_getpgrp 81 /* Common */
100#define __NR_setgroups32 82 /* Linux sparc32, setpgrp under SunOS */
101#define __NR_setitimer 83 /* Common */
102#define __NR_ftruncate64 84 /* Linux sparc32 Specific */
103#define __NR_swapon 85 /* Common */
104#define __NR_getitimer 86 /* Common */
105#define __NR_setuid32 87 /* Linux sparc32, gethostname under SunOS */
106#define __NR_sethostname 88 /* Common */
107#define __NR_setgid32 89 /* Linux sparc32, getdtablesize under SunOS */
108#define __NR_dup2 90 /* Common */
109#define __NR_setfsuid32 91 /* Linux sparc32, getdopt under SunOS */
110#define __NR_fcntl 92 /* Common */
111#define __NR_select 93 /* Common */
112#define __NR_setfsgid32 94 /* Linux sparc32, setdopt under SunOS */
113#define __NR_fsync 95 /* Common */
114#define __NR_setpriority 96 /* Common */
115#define __NR_socket 97 /* Common */
116#define __NR_connect 98 /* Common */
117#define __NR_accept 99 /* Common */
118#define __NR_getpriority 100 /* Common */
119#define __NR_rt_sigreturn 101 /* Linux Specific */
120#define __NR_rt_sigaction 102 /* Linux Specific */
121#define __NR_rt_sigprocmask 103 /* Linux Specific */
122#define __NR_rt_sigpending 104 /* Linux Specific */
123#define __NR_rt_sigtimedwait 105 /* Linux Specific */
124#define __NR_rt_sigqueueinfo 106 /* Linux Specific */
125#define __NR_rt_sigsuspend 107 /* Linux Specific */
126#define __NR_setresuid32 108 /* Linux Specific, sigvec under SunOS */
127#define __NR_getresuid32 109 /* Linux Specific, sigblock under SunOS */
128#define __NR_setresgid32 110 /* Linux Specific, sigsetmask under SunOS */
129#define __NR_getresgid32 111 /* Linux Specific, sigpause under SunOS */
130#define __NR_setregid32 112 /* Linux sparc32, sigstack under SunOS */
131#define __NR_recvmsg 113 /* Common */
132#define __NR_sendmsg 114 /* Common */
133#define __NR_getgroups32 115 /* Linux sparc32, vtrace under SunOS */
134#define __NR_gettimeofday 116 /* Common */
135#define __NR_getrusage 117 /* Common */
136#define __NR_getsockopt 118 /* Common */
137#define __NR_getcwd 119 /* Linux Specific */
138#define __NR_readv 120 /* Common */
139#define __NR_writev 121 /* Common */
140#define __NR_settimeofday 122 /* Common */
141#define __NR_fchown 123 /* Common */
142#define __NR_fchmod 124 /* Common */
143#define __NR_recvfrom 125 /* Common */
144#define __NR_setreuid 126 /* Common */
145#define __NR_setregid 127 /* Common */
146#define __NR_rename 128 /* Common */
147#define __NR_truncate 129 /* Common */
148#define __NR_ftruncate 130 /* Common */
149#define __NR_flock 131 /* Common */
150#define __NR_lstat64 132 /* Linux Specific */
151#define __NR_sendto 133 /* Common */
152#define __NR_shutdown 134 /* Common */
153#define __NR_socketpair 135 /* Common */
154#define __NR_mkdir 136 /* Common */
155#define __NR_rmdir 137 /* Common */
156#define __NR_utimes 138 /* SunOS Specific */
157#define __NR_stat64 139 /* Linux Specific */
158#define __NR_sendfile64 140 /* adjtime under SunOS */
159#define __NR_getpeername 141 /* Common */
160#define __NR_futex 142 /* gethostid under SunOS */
161#define __NR_gettid 143 /* ENOSYS under SunOS */
162#define __NR_getrlimit 144 /* Common */
163#define __NR_setrlimit 145 /* Common */
164#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */
165#define __NR_prctl 147 /* ENOSYS under SunOS */
166#define __NR_pciconfig_read 148 /* ENOSYS under SunOS */
167#define __NR_pciconfig_write 149 /* ENOSYS under SunOS */
168#define __NR_getsockname 150 /* Common */
169#define __NR_inotify_init 151 /* Linux specific */
170#define __NR_inotify_add_watch 152 /* Linux specific */
171#define __NR_poll 153 /* Common */
172#define __NR_getdents64 154 /* Linux specific */
173#define __NR_fcntl64 155 /* Linux sparc32 Specific */
174#define __NR_inotify_rm_watch 156 /* Linux specific */
175#define __NR_statfs 157 /* Common */
176#define __NR_fstatfs 158 /* Common */
177#define __NR_umount 159 /* Common */
178#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */
179#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */
180#define __NR_getdomainname 162 /* SunOS Specific */
181#define __NR_setdomainname 163 /* Common */
182/* #define __NR_utrap_install 164 Linux sparc64 specific */
183#define __NR_quotactl 165 /* Common */
184#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */
185#define __NR_mount 167 /* Common */
186#define __NR_ustat 168 /* Common */
187#define __NR_setxattr 169 /* SunOS: semsys */
188#define __NR_lsetxattr 170 /* SunOS: msgsys */
189#define __NR_fsetxattr 171 /* SunOS: shmsys */
190#define __NR_getxattr 172 /* SunOS: auditsys */
191#define __NR_lgetxattr 173 /* SunOS: rfssys */
192#define __NR_getdents 174 /* Common */
193#define __NR_setsid 175 /* Common */
194#define __NR_fchdir 176 /* Common */
195#define __NR_fgetxattr 177 /* SunOS: fchroot */
196#define __NR_listxattr 178 /* SunOS: vpixsys */
197#define __NR_llistxattr 179 /* SunOS: aioread */
198#define __NR_flistxattr 180 /* SunOS: aiowrite */
199#define __NR_removexattr 181 /* SunOS: aiowait */
200#define __NR_lremovexattr 182 /* SunOS: aiocancel */
201#define __NR_sigpending 183 /* Common */
202#define __NR_query_module 184 /* Linux Specific */
203#define __NR_setpgid 185 /* Common */
204#define __NR_fremovexattr 186 /* SunOS: pathconf */
205#define __NR_tkill 187 /* SunOS: fpathconf */
206#define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */
207#define __NR_uname 189 /* Linux Specific */
208#define __NR_init_module 190 /* Linux Specific */
209#define __NR_personality 191 /* Linux Specific */
210#define __NR_remap_file_pages 192 /* Linux Specific */
211#define __NR_epoll_create 193 /* Linux Specific */
212#define __NR_epoll_ctl 194 /* Linux Specific */
213#define __NR_epoll_wait 195 /* Linux Specific */
214#define __NR_ioprio_set 196 /* Linux Specific */
215#define __NR_getppid 197 /* Linux Specific */
216#define __NR_sigaction 198 /* Linux Specific */
217#define __NR_sgetmask 199 /* Linux Specific */
218#define __NR_ssetmask 200 /* Linux Specific */
219#define __NR_sigsuspend 201 /* Linux Specific */
220#define __NR_oldlstat 202 /* Linux Specific */
221#define __NR_uselib 203 /* Linux Specific */
222#define __NR_readdir 204 /* Linux Specific */
223#define __NR_readahead 205 /* Linux Specific */
224#define __NR_socketcall 206 /* Linux Specific */
225#define __NR_syslog 207 /* Linux Specific */
226#define __NR_lookup_dcookie 208 /* Linux Specific */
227#define __NR_fadvise64 209 /* Linux Specific */
228#define __NR_fadvise64_64 210 /* Linux Specific */
229#define __NR_tgkill 211 /* Linux Specific */
230#define __NR_waitpid 212 /* Linux Specific */
231#define __NR_swapoff 213 /* Linux Specific */
232#define __NR_sysinfo 214 /* Linux Specific */
233#define __NR_ipc 215 /* Linux Specific */
234#define __NR_sigreturn 216 /* Linux Specific */
235#define __NR_clone 217 /* Linux Specific */
236#define __NR_ioprio_get 218 /* Linux Specific */
237#define __NR_adjtimex 219 /* Linux Specific */
238#define __NR_sigprocmask 220 /* Linux Specific */
239#define __NR_create_module 221 /* Linux Specific */
240#define __NR_delete_module 222 /* Linux Specific */
241#define __NR_get_kernel_syms 223 /* Linux Specific */
242#define __NR_getpgid 224 /* Linux Specific */
243#define __NR_bdflush 225 /* Linux Specific */
244#define __NR_sysfs 226 /* Linux Specific */
245#define __NR_afs_syscall 227 /* Linux Specific */
246#define __NR_setfsuid 228 /* Linux Specific */
247#define __NR_setfsgid 229 /* Linux Specific */
248#define __NR__newselect 230 /* Linux Specific */
249#define __NR_time 231 /* Linux Specific */
250#define __NR_splice 232 /* Linux Specific */
251#define __NR_stime 233 /* Linux Specific */
252#define __NR_statfs64 234 /* Linux Specific */
253#define __NR_fstatfs64 235 /* Linux Specific */
254#define __NR__llseek 236 /* Linux Specific */
255#define __NR_mlock 237
256#define __NR_munlock 238
257#define __NR_mlockall 239
258#define __NR_munlockall 240
259#define __NR_sched_setparam 241
260#define __NR_sched_getparam 242
261#define __NR_sched_setscheduler 243
262#define __NR_sched_getscheduler 244
263#define __NR_sched_yield 245
264#define __NR_sched_get_priority_max 246
265#define __NR_sched_get_priority_min 247
266#define __NR_sched_rr_get_interval 248
267#define __NR_nanosleep 249
268#define __NR_mremap 250
269#define __NR__sysctl 251
270#define __NR_getsid 252
271#define __NR_fdatasync 253
272#define __NR_nfsservctl 254
273#define __NR_sync_file_range 255
274#define __NR_clock_settime 256
275#define __NR_clock_gettime 257
276#define __NR_clock_getres 258
277#define __NR_clock_nanosleep 259
278#define __NR_sched_getaffinity 260
279#define __NR_sched_setaffinity 261
280#define __NR_timer_settime 262
281#define __NR_timer_gettime 263
282#define __NR_timer_getoverrun 264
283#define __NR_timer_delete 265
284#define __NR_timer_create 266
285/* #define __NR_vserver 267 Reserved for VSERVER */
286#define __NR_io_setup 268
287#define __NR_io_destroy 269
288#define __NR_io_submit 270
289#define __NR_io_cancel 271
290#define __NR_io_getevents 272
291#define __NR_mq_open 273
292#define __NR_mq_unlink 274
293#define __NR_mq_timedsend 275
294#define __NR_mq_timedreceive 276
295#define __NR_mq_notify 277
296#define __NR_mq_getsetattr 278
297#define __NR_waitid 279
298#define __NR_tee 280
299#define __NR_add_key 281
300#define __NR_request_key 282
301#define __NR_keyctl 283
302#define __NR_openat 284
303#define __NR_mkdirat 285
304#define __NR_mknodat 286
305#define __NR_fchownat 287
306#define __NR_futimesat 288
307#define __NR_fstatat64 289
308#define __NR_unlinkat 290
309#define __NR_renameat 291
310#define __NR_linkat 292
311#define __NR_symlinkat 293
312#define __NR_readlinkat 294
313#define __NR_fchmodat 295
314#define __NR_faccessat 296
315#define __NR_pselect6 297
316#define __NR_ppoll 298
317#define __NR_unshare 299
318#define __NR_set_robust_list 300
319#define __NR_get_robust_list 301
320#define __NR_migrate_pages 302
321#define __NR_mbind 303
322#define __NR_get_mempolicy 304
323#define __NR_set_mempolicy 305
324#define __NR_kexec_load 306
325#define __NR_move_pages 307
326#define __NR_getcpu 308
327#define __NR_epoll_pwait 309
328#define __NR_utimensat 310
329#define __NR_signalfd 311
330#define __NR_timerfd_create 312
331#define __NR_eventfd 313
332#define __NR_fallocate 314
333#define __NR_timerfd_settime 315
334#define __NR_timerfd_gettime 316
335
336#define NR_SYSCALLS 317
337
338/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
339 * it never had the plain ones and there is no value to adding those
340 * old versions into the syscall table.
341 */
342#define __IGNORE_setresuid
343#define __IGNORE_getresuid
344#define __IGNORE_setresgid
345#define __IGNORE_getresgid
346
347#ifdef __KERNEL__
348#define __ARCH_WANT_IPC_PARSE_VERSION
349#define __ARCH_WANT_OLD_READDIR
350#define __ARCH_WANT_STAT64
351#define __ARCH_WANT_SYS_ALARM
352#define __ARCH_WANT_SYS_GETHOSTNAME
353#define __ARCH_WANT_SYS_PAUSE
354#define __ARCH_WANT_SYS_SGETMASK
355#define __ARCH_WANT_SYS_SIGNAL
356#define __ARCH_WANT_SYS_TIME
357#define __ARCH_WANT_SYS_UTIME
358#define __ARCH_WANT_SYS_WAITPID
359#define __ARCH_WANT_SYS_SOCKETCALL
360#define __ARCH_WANT_SYS_FADVISE64
361#define __ARCH_WANT_SYS_GETPGRP
362#define __ARCH_WANT_SYS_LLSEEK
363#define __ARCH_WANT_SYS_NICE
364#define __ARCH_WANT_SYS_OLDUMOUNT
365#define __ARCH_WANT_SYS_SIGPENDING
366#define __ARCH_WANT_SYS_SIGPROCMASK
367#define __ARCH_WANT_SYS_RT_SIGSUSPEND
368
369/*
370 * "Conditional" syscalls
371 *
372 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
373 * but it doesn't work on all toolchains, so we just do it by hand
374 */
375#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
376
377#endif /* __KERNEL__ */
378#endif /* _SPARC_UNISTD_H */
diff --git a/include/asm-sparc/unistd_32.h b/include/asm-sparc/unistd_32.h
new file mode 100644
index 000000000000..648643a9f139
--- /dev/null
+++ b/include/asm-sparc/unistd_32.h
@@ -0,0 +1,384 @@
1#ifndef _SPARC_UNISTD_H
2#define _SPARC_UNISTD_H
3
4/*
5 * System calls under the Sparc.
6 *
7 * Don't be scared by the ugly clobbers, it is the only way I can
8 * think of right now to force the arguments into fixed registers
9 * before the trap into the system call with gcc 'asm' statements.
10 *
11 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
12 *
13 * SunOS compatibility based upon preliminary work which is:
14 *
15 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
16 */
17
18#define __NR_restart_syscall 0 /* Linux Specific */
19#define __NR_exit 1 /* Common */
20#define __NR_fork 2 /* Common */
21#define __NR_read 3 /* Common */
22#define __NR_write 4 /* Common */
23#define __NR_open 5 /* Common */
24#define __NR_close 6 /* Common */
25#define __NR_wait4 7 /* Common */
26#define __NR_creat 8 /* Common */
27#define __NR_link 9 /* Common */
28#define __NR_unlink 10 /* Common */
29#define __NR_execv 11 /* SunOS Specific */
30#define __NR_chdir 12 /* Common */
31#define __NR_chown 13 /* Common */
32#define __NR_mknod 14 /* Common */
33#define __NR_chmod 15 /* Common */
34#define __NR_lchown 16 /* Common */
35#define __NR_brk 17 /* Common */
36#define __NR_perfctr 18 /* Performance counter operations */
37#define __NR_lseek 19 /* Common */
38#define __NR_getpid 20 /* Common */
39#define __NR_capget 21 /* Linux Specific */
40#define __NR_capset 22 /* Linux Specific */
41#define __NR_setuid 23 /* Implemented via setreuid in SunOS */
42#define __NR_getuid 24 /* Common */
43#define __NR_vmsplice 25 /* ENOSYS under SunOS */
44#define __NR_ptrace 26 /* Common */
45#define __NR_alarm 27 /* Implemented via setitimer in SunOS */
46#define __NR_sigaltstack 28 /* Common */
47#define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */
48#define __NR_utime 30 /* Implemented via utimes() under SunOS */
49#define __NR_lchown32 31 /* Linux sparc32 specific */
50#define __NR_fchown32 32 /* Linux sparc32 specific */
51#define __NR_access 33 /* Common */
52#define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */
53#define __NR_chown32 35 /* Linux sparc32 specific */
54#define __NR_sync 36 /* Common */
55#define __NR_kill 37 /* Common */
56#define __NR_stat 38 /* Common */
57#define __NR_sendfile 39 /* Linux Specific */
58#define __NR_lstat 40 /* Common */
59#define __NR_dup 41 /* Common */
60#define __NR_pipe 42 /* Common */
61#define __NR_times 43 /* Implemented via getrusage() in SunOS */
62#define __NR_getuid32 44 /* Linux sparc32 specific */
63#define __NR_umount2 45 /* Linux Specific */
64#define __NR_setgid 46 /* Implemented via setregid() in SunOS */
65#define __NR_getgid 47 /* Common */
66#define __NR_signal 48 /* Implemented via sigvec() in SunOS */
67#define __NR_geteuid 49 /* SunOS calls getuid() */
68#define __NR_getegid 50 /* SunOS calls getgid() */
69#define __NR_acct 51 /* Common */
70/* #define __NR_memory_ordering 52 Linux sparc64 specific */
71#define __NR_getgid32 53 /* Linux sparc32 specific */
72#define __NR_ioctl 54 /* Common */
73#define __NR_reboot 55 /* Common */
74#define __NR_mmap2 56 /* Linux sparc32 Specific */
75#define __NR_symlink 57 /* Common */
76#define __NR_readlink 58 /* Common */
77#define __NR_execve 59 /* Common */
78#define __NR_umask 60 /* Common */
79#define __NR_chroot 61 /* Common */
80#define __NR_fstat 62 /* Common */
81#define __NR_fstat64 63 /* Linux Specific */
82#define __NR_getpagesize 64 /* Common */
83#define __NR_msync 65 /* Common in newer 1.3.x revs... */
84#define __NR_vfork 66 /* Common */
85#define __NR_pread64 67 /* Linux Specific */
86#define __NR_pwrite64 68 /* Linux Specific */
87#define __NR_geteuid32 69 /* Linux sparc32, sbrk under SunOS */
88#define __NR_getegid32 70 /* Linux sparc32, sstk under SunOS */
89#define __NR_mmap 71 /* Common */
90#define __NR_setreuid32 72 /* Linux sparc32, vadvise under SunOS */
91#define __NR_munmap 73 /* Common */
92#define __NR_mprotect 74 /* Common */
93#define __NR_madvise 75 /* Common */
94#define __NR_vhangup 76 /* Common */
95#define __NR_truncate64 77 /* Linux sparc32 Specific */
96#define __NR_mincore 78 /* Common */
97#define __NR_getgroups 79 /* Common */
98#define __NR_setgroups 80 /* Common */
99#define __NR_getpgrp 81 /* Common */
100#define __NR_setgroups32 82 /* Linux sparc32, setpgrp under SunOS */
101#define __NR_setitimer 83 /* Common */
102#define __NR_ftruncate64 84 /* Linux sparc32 Specific */
103#define __NR_swapon 85 /* Common */
104#define __NR_getitimer 86 /* Common */
105#define __NR_setuid32 87 /* Linux sparc32, gethostname under SunOS */
106#define __NR_sethostname 88 /* Common */
107#define __NR_setgid32 89 /* Linux sparc32, getdtablesize under SunOS */
108#define __NR_dup2 90 /* Common */
109#define __NR_setfsuid32 91 /* Linux sparc32, getdopt under SunOS */
110#define __NR_fcntl 92 /* Common */
111#define __NR_select 93 /* Common */
112#define __NR_setfsgid32 94 /* Linux sparc32, setdopt under SunOS */
113#define __NR_fsync 95 /* Common */
114#define __NR_setpriority 96 /* Common */
115#define __NR_socket 97 /* Common */
116#define __NR_connect 98 /* Common */
117#define __NR_accept 99 /* Common */
118#define __NR_getpriority 100 /* Common */
119#define __NR_rt_sigreturn 101 /* Linux Specific */
120#define __NR_rt_sigaction 102 /* Linux Specific */
121#define __NR_rt_sigprocmask 103 /* Linux Specific */
122#define __NR_rt_sigpending 104 /* Linux Specific */
123#define __NR_rt_sigtimedwait 105 /* Linux Specific */
124#define __NR_rt_sigqueueinfo 106 /* Linux Specific */
125#define __NR_rt_sigsuspend 107 /* Linux Specific */
126#define __NR_setresuid32 108 /* Linux Specific, sigvec under SunOS */
127#define __NR_getresuid32 109 /* Linux Specific, sigblock under SunOS */
128#define __NR_setresgid32 110 /* Linux Specific, sigsetmask under SunOS */
129#define __NR_getresgid32 111 /* Linux Specific, sigpause under SunOS */
130#define __NR_setregid32 112 /* Linux sparc32, sigstack under SunOS */
131#define __NR_recvmsg 113 /* Common */
132#define __NR_sendmsg 114 /* Common */
133#define __NR_getgroups32 115 /* Linux sparc32, vtrace under SunOS */
134#define __NR_gettimeofday 116 /* Common */
135#define __NR_getrusage 117 /* Common */
136#define __NR_getsockopt 118 /* Common */
137#define __NR_getcwd 119 /* Linux Specific */
138#define __NR_readv 120 /* Common */
139#define __NR_writev 121 /* Common */
140#define __NR_settimeofday 122 /* Common */
141#define __NR_fchown 123 /* Common */
142#define __NR_fchmod 124 /* Common */
143#define __NR_recvfrom 125 /* Common */
144#define __NR_setreuid 126 /* Common */
145#define __NR_setregid 127 /* Common */
146#define __NR_rename 128 /* Common */
147#define __NR_truncate 129 /* Common */
148#define __NR_ftruncate 130 /* Common */
149#define __NR_flock 131 /* Common */
150#define __NR_lstat64 132 /* Linux Specific */
151#define __NR_sendto 133 /* Common */
152#define __NR_shutdown 134 /* Common */
153#define __NR_socketpair 135 /* Common */
154#define __NR_mkdir 136 /* Common */
155#define __NR_rmdir 137 /* Common */
156#define __NR_utimes 138 /* SunOS Specific */
157#define __NR_stat64 139 /* Linux Specific */
158#define __NR_sendfile64 140 /* adjtime under SunOS */
159#define __NR_getpeername 141 /* Common */
160#define __NR_futex 142 /* gethostid under SunOS */
161#define __NR_gettid 143 /* ENOSYS under SunOS */
162#define __NR_getrlimit 144 /* Common */
163#define __NR_setrlimit 145 /* Common */
164#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */
165#define __NR_prctl 147 /* ENOSYS under SunOS */
166#define __NR_pciconfig_read 148 /* ENOSYS under SunOS */
167#define __NR_pciconfig_write 149 /* ENOSYS under SunOS */
168#define __NR_getsockname 150 /* Common */
169#define __NR_inotify_init 151 /* Linux specific */
170#define __NR_inotify_add_watch 152 /* Linux specific */
171#define __NR_poll 153 /* Common */
172#define __NR_getdents64 154 /* Linux specific */
173#define __NR_fcntl64 155 /* Linux sparc32 Specific */
174#define __NR_inotify_rm_watch 156 /* Linux specific */
175#define __NR_statfs 157 /* Common */
176#define __NR_fstatfs 158 /* Common */
177#define __NR_umount 159 /* Common */
178#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */
179#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */
180#define __NR_getdomainname 162 /* SunOS Specific */
181#define __NR_setdomainname 163 /* Common */
182/* #define __NR_utrap_install 164 Linux sparc64 specific */
183#define __NR_quotactl 165 /* Common */
184#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */
185#define __NR_mount 167 /* Common */
186#define __NR_ustat 168 /* Common */
187#define __NR_setxattr 169 /* SunOS: semsys */
188#define __NR_lsetxattr 170 /* SunOS: msgsys */
189#define __NR_fsetxattr 171 /* SunOS: shmsys */
190#define __NR_getxattr 172 /* SunOS: auditsys */
191#define __NR_lgetxattr 173 /* SunOS: rfssys */
192#define __NR_getdents 174 /* Common */
193#define __NR_setsid 175 /* Common */
194#define __NR_fchdir 176 /* Common */
195#define __NR_fgetxattr 177 /* SunOS: fchroot */
196#define __NR_listxattr 178 /* SunOS: vpixsys */
197#define __NR_llistxattr 179 /* SunOS: aioread */
198#define __NR_flistxattr 180 /* SunOS: aiowrite */
199#define __NR_removexattr 181 /* SunOS: aiowait */
200#define __NR_lremovexattr 182 /* SunOS: aiocancel */
201#define __NR_sigpending 183 /* Common */
202#define __NR_query_module 184 /* Linux Specific */
203#define __NR_setpgid 185 /* Common */
204#define __NR_fremovexattr 186 /* SunOS: pathconf */
205#define __NR_tkill 187 /* SunOS: fpathconf */
206#define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */
207#define __NR_uname 189 /* Linux Specific */
208#define __NR_init_module 190 /* Linux Specific */
209#define __NR_personality 191 /* Linux Specific */
210#define __NR_remap_file_pages 192 /* Linux Specific */
211#define __NR_epoll_create 193 /* Linux Specific */
212#define __NR_epoll_ctl 194 /* Linux Specific */
213#define __NR_epoll_wait 195 /* Linux Specific */
214#define __NR_ioprio_set 196 /* Linux Specific */
215#define __NR_getppid 197 /* Linux Specific */
216#define __NR_sigaction 198 /* Linux Specific */
217#define __NR_sgetmask 199 /* Linux Specific */
218#define __NR_ssetmask 200 /* Linux Specific */
219#define __NR_sigsuspend 201 /* Linux Specific */
220#define __NR_oldlstat 202 /* Linux Specific */
221#define __NR_uselib 203 /* Linux Specific */
222#define __NR_readdir 204 /* Linux Specific */
223#define __NR_readahead 205 /* Linux Specific */
224#define __NR_socketcall 206 /* Linux Specific */
225#define __NR_syslog 207 /* Linux Specific */
226#define __NR_lookup_dcookie 208 /* Linux Specific */
227#define __NR_fadvise64 209 /* Linux Specific */
228#define __NR_fadvise64_64 210 /* Linux Specific */
229#define __NR_tgkill 211 /* Linux Specific */
230#define __NR_waitpid 212 /* Linux Specific */
231#define __NR_swapoff 213 /* Linux Specific */
232#define __NR_sysinfo 214 /* Linux Specific */
233#define __NR_ipc 215 /* Linux Specific */
234#define __NR_sigreturn 216 /* Linux Specific */
235#define __NR_clone 217 /* Linux Specific */
236#define __NR_ioprio_get 218 /* Linux Specific */
237#define __NR_adjtimex 219 /* Linux Specific */
238#define __NR_sigprocmask 220 /* Linux Specific */
239#define __NR_create_module 221 /* Linux Specific */
240#define __NR_delete_module 222 /* Linux Specific */
241#define __NR_get_kernel_syms 223 /* Linux Specific */
242#define __NR_getpgid 224 /* Linux Specific */
243#define __NR_bdflush 225 /* Linux Specific */
244#define __NR_sysfs 226 /* Linux Specific */
245#define __NR_afs_syscall 227 /* Linux Specific */
246#define __NR_setfsuid 228 /* Linux Specific */
247#define __NR_setfsgid 229 /* Linux Specific */
248#define __NR__newselect 230 /* Linux Specific */
249#define __NR_time 231 /* Linux Specific */
250#define __NR_splice 232 /* Linux Specific */
251#define __NR_stime 233 /* Linux Specific */
252#define __NR_statfs64 234 /* Linux Specific */
253#define __NR_fstatfs64 235 /* Linux Specific */
254#define __NR__llseek 236 /* Linux Specific */
255#define __NR_mlock 237
256#define __NR_munlock 238
257#define __NR_mlockall 239
258#define __NR_munlockall 240
259#define __NR_sched_setparam 241
260#define __NR_sched_getparam 242
261#define __NR_sched_setscheduler 243
262#define __NR_sched_getscheduler 244
263#define __NR_sched_yield 245
264#define __NR_sched_get_priority_max 246
265#define __NR_sched_get_priority_min 247
266#define __NR_sched_rr_get_interval 248
267#define __NR_nanosleep 249
268#define __NR_mremap 250
269#define __NR__sysctl 251
270#define __NR_getsid 252
271#define __NR_fdatasync 253
272#define __NR_nfsservctl 254
273#define __NR_sync_file_range 255
274#define __NR_clock_settime 256
275#define __NR_clock_gettime 257
276#define __NR_clock_getres 258
277#define __NR_clock_nanosleep 259
278#define __NR_sched_getaffinity 260
279#define __NR_sched_setaffinity 261
280#define __NR_timer_settime 262
281#define __NR_timer_gettime 263
282#define __NR_timer_getoverrun 264
283#define __NR_timer_delete 265
284#define __NR_timer_create 266
285/* #define __NR_vserver 267 Reserved for VSERVER */
286#define __NR_io_setup 268
287#define __NR_io_destroy 269
288#define __NR_io_submit 270
289#define __NR_io_cancel 271
290#define __NR_io_getevents 272
291#define __NR_mq_open 273
292#define __NR_mq_unlink 274
293#define __NR_mq_timedsend 275
294#define __NR_mq_timedreceive 276
295#define __NR_mq_notify 277
296#define __NR_mq_getsetattr 278
297#define __NR_waitid 279
298#define __NR_tee 280
299#define __NR_add_key 281
300#define __NR_request_key 282
301#define __NR_keyctl 283
302#define __NR_openat 284
303#define __NR_mkdirat 285
304#define __NR_mknodat 286
305#define __NR_fchownat 287
306#define __NR_futimesat 288
307#define __NR_fstatat64 289
308#define __NR_unlinkat 290
309#define __NR_renameat 291
310#define __NR_linkat 292
311#define __NR_symlinkat 293
312#define __NR_readlinkat 294
313#define __NR_fchmodat 295
314#define __NR_faccessat 296
315#define __NR_pselect6 297
316#define __NR_ppoll 298
317#define __NR_unshare 299
318#define __NR_set_robust_list 300
319#define __NR_get_robust_list 301
320#define __NR_migrate_pages 302
321#define __NR_mbind 303
322#define __NR_get_mempolicy 304
323#define __NR_set_mempolicy 305
324#define __NR_kexec_load 306
325#define __NR_move_pages 307
326#define __NR_getcpu 308
327#define __NR_epoll_pwait 309
328#define __NR_utimensat 310
329#define __NR_signalfd 311
330#define __NR_timerfd_create 312
331#define __NR_eventfd 313
332#define __NR_fallocate 314
333#define __NR_timerfd_settime 315
334#define __NR_timerfd_gettime 316
335#define __NR_signalfd4 317
336#define __NR_eventfd2 318
337#define __NR_epoll_create1 319
338#define __NR_dup3 320
339#define __NR_pipe2 321
340#define __NR_inotify_init1 322
341
342#define NR_SYSCALLS 323
343
344/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
345 * it never had the plain ones and there is no value to adding those
346 * old versions into the syscall table.
347 */
348#define __IGNORE_setresuid
349#define __IGNORE_getresuid
350#define __IGNORE_setresgid
351#define __IGNORE_getresgid
352
353#ifdef __KERNEL__
354#define __ARCH_WANT_IPC_PARSE_VERSION
355#define __ARCH_WANT_OLD_READDIR
356#define __ARCH_WANT_STAT64
357#define __ARCH_WANT_SYS_ALARM
358#define __ARCH_WANT_SYS_GETHOSTNAME
359#define __ARCH_WANT_SYS_PAUSE
360#define __ARCH_WANT_SYS_SGETMASK
361#define __ARCH_WANT_SYS_SIGNAL
362#define __ARCH_WANT_SYS_TIME
363#define __ARCH_WANT_SYS_UTIME
364#define __ARCH_WANT_SYS_WAITPID
365#define __ARCH_WANT_SYS_SOCKETCALL
366#define __ARCH_WANT_SYS_FADVISE64
367#define __ARCH_WANT_SYS_GETPGRP
368#define __ARCH_WANT_SYS_LLSEEK
369#define __ARCH_WANT_SYS_NICE
370#define __ARCH_WANT_SYS_OLDUMOUNT
371#define __ARCH_WANT_SYS_SIGPENDING
372#define __ARCH_WANT_SYS_SIGPROCMASK
373#define __ARCH_WANT_SYS_RT_SIGSUSPEND
374
375/*
376 * "Conditional" syscalls
377 *
378 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
379 * but it doesn't work on all toolchains, so we just do it by hand
380 */
381#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
382
383#endif /* __KERNEL__ */
384#endif /* _SPARC_UNISTD_H */
diff --git a/include/asm-sparc/unistd_64.h b/include/asm-sparc/unistd_64.h
new file mode 100644
index 000000000000..c5cc0e052321
--- /dev/null
+++ b/include/asm-sparc/unistd_64.h
@@ -0,0 +1,379 @@
1#ifndef _SPARC64_UNISTD_H
2#define _SPARC64_UNISTD_H
3
4/*
5 * System calls under the Sparc.
6 *
7 * Don't be scared by the ugly clobbers, it is the only way I can
8 * think of right now to force the arguments into fixed registers
9 * before the trap into the system call with gcc 'asm' statements.
10 *
11 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
12 *
13 * SunOS compatibility based upon preliminary work which is:
14 *
15 * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
16 */
17
18#define __NR_restart_syscall 0 /* Linux Specific */
19#define __NR_exit 1 /* Common */
20#define __NR_fork 2 /* Common */
21#define __NR_read 3 /* Common */
22#define __NR_write 4 /* Common */
23#define __NR_open 5 /* Common */
24#define __NR_close 6 /* Common */
25#define __NR_wait4 7 /* Common */
26#define __NR_creat 8 /* Common */
27#define __NR_link 9 /* Common */
28#define __NR_unlink 10 /* Common */
29#define __NR_execv 11 /* SunOS Specific */
30#define __NR_chdir 12 /* Common */
31#define __NR_chown 13 /* Common */
32#define __NR_mknod 14 /* Common */
33#define __NR_chmod 15 /* Common */
34#define __NR_lchown 16 /* Common */
35#define __NR_brk 17 /* Common */
36#define __NR_perfctr 18 /* Performance counter operations */
37#define __NR_lseek 19 /* Common */
38#define __NR_getpid 20 /* Common */
39#define __NR_capget 21 /* Linux Specific */
40#define __NR_capset 22 /* Linux Specific */
41#define __NR_setuid 23 /* Implemented via setreuid in SunOS */
42#define __NR_getuid 24 /* Common */
43#define __NR_vmsplice 25 /* ENOSYS under SunOS */
44#define __NR_ptrace 26 /* Common */
45#define __NR_alarm 27 /* Implemented via setitimer in SunOS */
46#define __NR_sigaltstack 28 /* Common */
47#define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */
48#define __NR_utime 30 /* Implemented via utimes() under SunOS */
49/* #define __NR_lchown32 31 Linux sparc32 specific */
50/* #define __NR_fchown32 32 Linux sparc32 specific */
51#define __NR_access 33 /* Common */
52#define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */
53/* #define __NR_chown32 35 Linux sparc32 specific */
54#define __NR_sync 36 /* Common */
55#define __NR_kill 37 /* Common */
56#define __NR_stat 38 /* Common */
57#define __NR_sendfile 39 /* Linux Specific */
58#define __NR_lstat 40 /* Common */
59#define __NR_dup 41 /* Common */
60#define __NR_pipe 42 /* Common */
61#define __NR_times 43 /* Implemented via getrusage() in SunOS */
62/* #define __NR_getuid32 44 Linux sparc32 specific */
63#define __NR_umount2 45 /* Linux Specific */
64#define __NR_setgid 46 /* Implemented via setregid() in SunOS */
65#define __NR_getgid 47 /* Common */
66#define __NR_signal 48 /* Implemented via sigvec() in SunOS */
67#define __NR_geteuid 49 /* SunOS calls getuid() */
68#define __NR_getegid 50 /* SunOS calls getgid() */
69#define __NR_acct 51 /* Common */
70#define __NR_memory_ordering 52 /* Linux Specific */
71/* #define __NR_getgid32 53 Linux sparc32 specific */
72#define __NR_ioctl 54 /* Common */
73#define __NR_reboot 55 /* Common */
74/* #define __NR_mmap2 56 Linux sparc32 Specific */
75#define __NR_symlink 57 /* Common */
76#define __NR_readlink 58 /* Common */
77#define __NR_execve 59 /* Common */
78#define __NR_umask 60 /* Common */
79#define __NR_chroot 61 /* Common */
80#define __NR_fstat 62 /* Common */
81#define __NR_fstat64 63 /* Linux Specific */
82#define __NR_getpagesize 64 /* Common */
83#define __NR_msync 65 /* Common in newer 1.3.x revs... */
84#define __NR_vfork 66 /* Common */
85#define __NR_pread64 67 /* Linux Specific */
86#define __NR_pwrite64 68 /* Linux Specific */
87/* #define __NR_geteuid32 69 Linux sparc32, sbrk under SunOS */
88/* #define __NR_getegid32 70 Linux sparc32, sstk under SunOS */
89#define __NR_mmap 71 /* Common */
90/* #define __NR_setreuid32 72 Linux sparc32, vadvise under SunOS */
91#define __NR_munmap 73 /* Common */
92#define __NR_mprotect 74 /* Common */
93#define __NR_madvise 75 /* Common */
94#define __NR_vhangup 76 /* Common */
95/* #define __NR_truncate64 77 Linux sparc32 Specific */
96#define __NR_mincore 78 /* Common */
97#define __NR_getgroups 79 /* Common */
98#define __NR_setgroups 80 /* Common */
99#define __NR_getpgrp 81 /* Common */
100/* #define __NR_setgroups32 82 Linux sparc32, setpgrp under SunOS */
101#define __NR_setitimer 83 /* Common */
102/* #define __NR_ftruncate64 84 Linux sparc32 Specific */
103#define __NR_swapon 85 /* Common */
104#define __NR_getitimer 86 /* Common */
105/* #define __NR_setuid32 87 Linux sparc32, gethostname under SunOS */
106#define __NR_sethostname 88 /* Common */
107/* #define __NR_setgid32 89 Linux sparc32, getdtablesize under SunOS */
108#define __NR_dup2 90 /* Common */
109/* #define __NR_setfsuid32 91 Linux sparc32, getdopt under SunOS */
110#define __NR_fcntl 92 /* Common */
111#define __NR_select 93 /* Common */
112/* #define __NR_setfsgid32 94 Linux sparc32, setdopt under SunOS */
113#define __NR_fsync 95 /* Common */
114#define __NR_setpriority 96 /* Common */
115#define __NR_socket 97 /* Common */
116#define __NR_connect 98 /* Common */
117#define __NR_accept 99 /* Common */
118#define __NR_getpriority 100 /* Common */
119#define __NR_rt_sigreturn 101 /* Linux Specific */
120#define __NR_rt_sigaction 102 /* Linux Specific */
121#define __NR_rt_sigprocmask 103 /* Linux Specific */
122#define __NR_rt_sigpending 104 /* Linux Specific */
123#define __NR_rt_sigtimedwait 105 /* Linux Specific */
124#define __NR_rt_sigqueueinfo 106 /* Linux Specific */
125#define __NR_rt_sigsuspend 107 /* Linux Specific */
126#define __NR_setresuid 108 /* Linux Specific, sigvec under SunOS */
127#define __NR_getresuid 109 /* Linux Specific, sigblock under SunOS */
128#define __NR_setresgid 110 /* Linux Specific, sigsetmask under SunOS */
129#define __NR_getresgid 111 /* Linux Specific, sigpause under SunOS */
130/* #define __NR_setregid32 75 Linux sparc32, sigstack under SunOS */
131#define __NR_recvmsg 113 /* Common */
132#define __NR_sendmsg 114 /* Common */
133/* #define __NR_getgroups32 115 Linux sparc32, vtrace under SunOS */
134#define __NR_gettimeofday 116 /* Common */
135#define __NR_getrusage 117 /* Common */
136#define __NR_getsockopt 118 /* Common */
137#define __NR_getcwd 119 /* Linux Specific */
138#define __NR_readv 120 /* Common */
139#define __NR_writev 121 /* Common */
140#define __NR_settimeofday 122 /* Common */
141#define __NR_fchown 123 /* Common */
142#define __NR_fchmod 124 /* Common */
143#define __NR_recvfrom 125 /* Common */
144#define __NR_setreuid 126 /* Common */
145#define __NR_setregid 127 /* Common */
146#define __NR_rename 128 /* Common */
147#define __NR_truncate 129 /* Common */
148#define __NR_ftruncate 130 /* Common */
149#define __NR_flock 131 /* Common */
150#define __NR_lstat64 132 /* Linux Specific */
151#define __NR_sendto 133 /* Common */
152#define __NR_shutdown 134 /* Common */
153#define __NR_socketpair 135 /* Common */
154#define __NR_mkdir 136 /* Common */
155#define __NR_rmdir 137 /* Common */
156#define __NR_utimes 138 /* SunOS Specific */
157#define __NR_stat64 139 /* Linux Specific */
158#define __NR_sendfile64 140 /* adjtime under SunOS */
159#define __NR_getpeername 141 /* Common */
160#define __NR_futex 142 /* gethostid under SunOS */
161#define __NR_gettid 143 /* ENOSYS under SunOS */
162#define __NR_getrlimit 144 /* Common */
163#define __NR_setrlimit 145 /* Common */
164#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */
165#define __NR_prctl 147 /* ENOSYS under SunOS */
166#define __NR_pciconfig_read 148 /* ENOSYS under SunOS */
167#define __NR_pciconfig_write 149 /* ENOSYS under SunOS */
168#define __NR_getsockname 150 /* Common */
169#define __NR_inotify_init 151 /* Linux specific */
170#define __NR_inotify_add_watch 152 /* Linux specific */
171#define __NR_poll 153 /* Common */
172#define __NR_getdents64 154 /* Linux specific */
173/* #define __NR_fcntl64 155 Linux sparc32 Specific */
174#define __NR_inotify_rm_watch 156 /* Linux specific */
175#define __NR_statfs 157 /* Common */
176#define __NR_fstatfs 158 /* Common */
177#define __NR_umount 159 /* Common */
178#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */
179#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */
180#define __NR_getdomainname 162 /* SunOS Specific */
181#define __NR_setdomainname 163 /* Common */
182#define __NR_utrap_install 164 /* SYSV ABI/v9 required */
183#define __NR_quotactl 165 /* Common */
184#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */
185#define __NR_mount 167 /* Common */
186#define __NR_ustat 168 /* Common */
187#define __NR_setxattr 169 /* SunOS: semsys */
188#define __NR_lsetxattr 170 /* SunOS: msgsys */
189#define __NR_fsetxattr 171 /* SunOS: shmsys */
190#define __NR_getxattr 172 /* SunOS: auditsys */
191#define __NR_lgetxattr 173 /* SunOS: rfssys */
192#define __NR_getdents 174 /* Common */
193#define __NR_setsid 175 /* Common */
194#define __NR_fchdir 176 /* Common */
195#define __NR_fgetxattr 177 /* SunOS: fchroot */
196#define __NR_listxattr 178 /* SunOS: vpixsys */
197#define __NR_llistxattr 179 /* SunOS: aioread */
198#define __NR_flistxattr 180 /* SunOS: aiowrite */
199#define __NR_removexattr 181 /* SunOS: aiowait */
200#define __NR_lremovexattr 182 /* SunOS: aiocancel */
201#define __NR_sigpending 183 /* Common */
202#define __NR_query_module 184 /* Linux Specific */
203#define __NR_setpgid 185 /* Common */
204#define __NR_fremovexattr 186 /* SunOS: pathconf */
205#define __NR_tkill 187 /* SunOS: fpathconf */
206#define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */
207#define __NR_uname 189 /* Linux Specific */
208#define __NR_init_module 190 /* Linux Specific */
209#define __NR_personality 191 /* Linux Specific */
210#define __NR_remap_file_pages 192 /* Linux Specific */
211#define __NR_epoll_create 193 /* Linux Specific */
212#define __NR_epoll_ctl 194 /* Linux Specific */
213#define __NR_epoll_wait 195 /* Linux Specific */
214#define __NR_ioprio_set 196 /* Linux Specific */
215#define __NR_getppid 197 /* Linux Specific */
216#define __NR_sigaction 198 /* Linux Specific */
217#define __NR_sgetmask 199 /* Linux Specific */
218#define __NR_ssetmask 200 /* Linux Specific */
219#define __NR_sigsuspend 201 /* Linux Specific */
220#define __NR_oldlstat 202 /* Linux Specific */
221#define __NR_uselib 203 /* Linux Specific */
222#define __NR_readdir 204 /* Linux Specific */
223#define __NR_readahead 205 /* Linux Specific */
224#define __NR_socketcall 206 /* Linux Specific */
225#define __NR_syslog 207 /* Linux Specific */
226#define __NR_lookup_dcookie 208 /* Linux Specific */
227#define __NR_fadvise64 209 /* Linux Specific */
228#define __NR_fadvise64_64 210 /* Linux Specific */
229#define __NR_tgkill 211 /* Linux Specific */
230#define __NR_waitpid 212 /* Linux Specific */
231#define __NR_swapoff 213 /* Linux Specific */
232#define __NR_sysinfo 214 /* Linux Specific */
233#define __NR_ipc 215 /* Linux Specific */
234#define __NR_sigreturn 216 /* Linux Specific */
235#define __NR_clone 217 /* Linux Specific */
236#define __NR_ioprio_get 218 /* Linux Specific */
237#define __NR_adjtimex 219 /* Linux Specific */
238#define __NR_sigprocmask 220 /* Linux Specific */
239#define __NR_create_module 221 /* Linux Specific */
240#define __NR_delete_module 222 /* Linux Specific */
241#define __NR_get_kernel_syms 223 /* Linux Specific */
242#define __NR_getpgid 224 /* Linux Specific */
243#define __NR_bdflush 225 /* Linux Specific */
244#define __NR_sysfs 226 /* Linux Specific */
245#define __NR_afs_syscall 227 /* Linux Specific */
246#define __NR_setfsuid 228 /* Linux Specific */
247#define __NR_setfsgid 229 /* Linux Specific */
248#define __NR__newselect 230 /* Linux Specific */
249#ifdef __KERNEL__
250#define __NR_time 231 /* Linux sparc32 */
251#endif
252#define __NR_splice 232 /* Linux Specific */
253#define __NR_stime 233 /* Linux Specific */
254#define __NR_statfs64 234 /* Linux Specific */
255#define __NR_fstatfs64 235 /* Linux Specific */
256#define __NR__llseek 236 /* Linux Specific */
257#define __NR_mlock 237
258#define __NR_munlock 238
259#define __NR_mlockall 239
260#define __NR_munlockall 240
261#define __NR_sched_setparam 241
262#define __NR_sched_getparam 242
263#define __NR_sched_setscheduler 243
264#define __NR_sched_getscheduler 244
265#define __NR_sched_yield 245
266#define __NR_sched_get_priority_max 246
267#define __NR_sched_get_priority_min 247
268#define __NR_sched_rr_get_interval 248
269#define __NR_nanosleep 249
270#define __NR_mremap 250
271#define __NR__sysctl 251
272#define __NR_getsid 252
273#define __NR_fdatasync 253
274#define __NR_nfsservctl 254
275#define __NR_sync_file_range 255
276#define __NR_clock_settime 256
277#define __NR_clock_gettime 257
278#define __NR_clock_getres 258
279#define __NR_clock_nanosleep 259
280#define __NR_sched_getaffinity 260
281#define __NR_sched_setaffinity 261
282#define __NR_timer_settime 262
283#define __NR_timer_gettime 263
284#define __NR_timer_getoverrun 264
285#define __NR_timer_delete 265
286#define __NR_timer_create 266
287/* #define __NR_vserver 267 Reserved for VSERVER */
288#define __NR_io_setup 268
289#define __NR_io_destroy 269
290#define __NR_io_submit 270
291#define __NR_io_cancel 271
292#define __NR_io_getevents 272
293#define __NR_mq_open 273
294#define __NR_mq_unlink 274
295#define __NR_mq_timedsend 275
296#define __NR_mq_timedreceive 276
297#define __NR_mq_notify 277
298#define __NR_mq_getsetattr 278
299#define __NR_waitid 279
300#define __NR_tee 280
301#define __NR_add_key 281
302#define __NR_request_key 282
303#define __NR_keyctl 283
304#define __NR_openat 284
305#define __NR_mkdirat 285
306#define __NR_mknodat 286
307#define __NR_fchownat 287
308#define __NR_futimesat 288
309#define __NR_fstatat64 289
310#define __NR_unlinkat 290
311#define __NR_renameat 291
312#define __NR_linkat 292
313#define __NR_symlinkat 293
314#define __NR_readlinkat 294
315#define __NR_fchmodat 295
316#define __NR_faccessat 296
317#define __NR_pselect6 297
318#define __NR_ppoll 298
319#define __NR_unshare 299
320#define __NR_set_robust_list 300
321#define __NR_get_robust_list 301
322#define __NR_migrate_pages 302
323#define __NR_mbind 303
324#define __NR_get_mempolicy 304
325#define __NR_set_mempolicy 305
326#define __NR_kexec_load 306
327#define __NR_move_pages 307
328#define __NR_getcpu 308
329#define __NR_epoll_pwait 309
330#define __NR_utimensat 310
331#define __NR_signalfd 311
332#define __NR_timerfd_create 312
333#define __NR_eventfd 313
334#define __NR_fallocate 314
335#define __NR_timerfd_settime 315
336#define __NR_timerfd_gettime 316
337#define __NR_signalfd4 317
338#define __NR_eventfd2 318
339#define __NR_epoll_create1 319
340#define __NR_dup3 320
341#define __NR_pipe2 321
342#define __NR_inotify_init1 322
343
344#define NR_SYSCALLS 323
345
346#ifdef __KERNEL__
347#define __ARCH_WANT_IPC_PARSE_VERSION
348#define __ARCH_WANT_OLD_READDIR
349#define __ARCH_WANT_STAT64
350#define __ARCH_WANT_SYS_ALARM
351#define __ARCH_WANT_SYS_GETHOSTNAME
352#define __ARCH_WANT_SYS_PAUSE
353#define __ARCH_WANT_SYS_SGETMASK
354#define __ARCH_WANT_SYS_SIGNAL
355#define __ARCH_WANT_SYS_TIME
356#define __ARCH_WANT_COMPAT_SYS_TIME
357#define __ARCH_WANT_SYS_UTIME
358#define __ARCH_WANT_SYS_WAITPID
359#define __ARCH_WANT_SYS_SOCKETCALL
360#define __ARCH_WANT_SYS_FADVISE64
361#define __ARCH_WANT_SYS_GETPGRP
362#define __ARCH_WANT_SYS_LLSEEK
363#define __ARCH_WANT_SYS_NICE
364#define __ARCH_WANT_SYS_OLDUMOUNT
365#define __ARCH_WANT_SYS_SIGPENDING
366#define __ARCH_WANT_SYS_SIGPROCMASK
367#define __ARCH_WANT_SYS_RT_SIGSUSPEND
368#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
369
370/*
371 * "Conditional" syscalls
372 *
373 * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
374 * but it doesn't work on all toolchains, so we just do it by hand
375 */
376#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
377
378#endif /* __KERNEL__ */
379#endif /* _SPARC64_UNISTD_H */
diff --git a/include/asm-sparc/upa.h b/include/asm-sparc/upa.h
new file mode 100644
index 000000000000..5b1633223f92
--- /dev/null
+++ b/include/asm-sparc/upa.h
@@ -0,0 +1,109 @@
1#ifndef _SPARC64_UPA_H
2#define _SPARC64_UPA_H
3
4#include <asm/asi.h>
5
6/* UPA level registers and defines. */
7
8/* UPA Config Register */
9#define UPA_CONFIG_RESV 0xffffffffc0000000 /* Reserved. */
10#define UPA_CONFIG_PCON 0x000000003fc00000 /* Depth of various sys queues. */
11#define UPA_CONFIG_MID 0x00000000003e0000 /* Module ID. */
12#define UPA_CONFIG_PCAP 0x000000000001ffff /* Port Capabilities. */
13
14/* UPA Port ID Register */
15#define UPA_PORTID_FNP 0xff00000000000000 /* Hardcoded to 0xfc on ultra. */
16#define UPA_PORTID_RESV 0x00fffff800000000 /* Reserved. */
17#define UPA_PORTID_ECCVALID 0x0000000400000000 /* Zero if mod can generate ECC */
18#define UPA_PORTID_ONEREAD 0x0000000200000000 /* Set if mod generates P_RASB */
19#define UPA_PORTID_PINTRDQ 0x0000000180000000 /* # outstanding P_INT_REQ's */
20#define UPA_PORTID_PREQDQ 0x000000007e000000 /* slave-wr's to mod supported */
21#define UPA_PORTID_PREQRD 0x0000000001e00000 /* # incoming P_REQ's supported */
22#define UPA_PORTID_UPACAP 0x00000000001f0000 /* UPA capabilities of mod */
23#define UPA_PORTID_ID 0x000000000000ffff /* Module Identification bits */
24
25/* UPA I/O space accessors */
26#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
27static inline unsigned char _upa_readb(unsigned long addr)
28{
29 unsigned char ret;
30
31 __asm__ __volatile__("lduba\t[%1] %2, %0\t/* upa_readb */"
32 : "=r" (ret)
33 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
34
35 return ret;
36}
37
38static inline unsigned short _upa_readw(unsigned long addr)
39{
40 unsigned short ret;
41
42 __asm__ __volatile__("lduha\t[%1] %2, %0\t/* upa_readw */"
43 : "=r" (ret)
44 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
45
46 return ret;
47}
48
49static inline unsigned int _upa_readl(unsigned long addr)
50{
51 unsigned int ret;
52
53 __asm__ __volatile__("lduwa\t[%1] %2, %0\t/* upa_readl */"
54 : "=r" (ret)
55 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
56
57 return ret;
58}
59
60static inline unsigned long _upa_readq(unsigned long addr)
61{
62 unsigned long ret;
63
64 __asm__ __volatile__("ldxa\t[%1] %2, %0\t/* upa_readq */"
65 : "=r" (ret)
66 : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
67
68 return ret;
69}
70
71static inline void _upa_writeb(unsigned char b, unsigned long addr)
72{
73 __asm__ __volatile__("stba\t%0, [%1] %2\t/* upa_writeb */"
74 : /* no outputs */
75 : "r" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
76}
77
78static inline void _upa_writew(unsigned short w, unsigned long addr)
79{
80 __asm__ __volatile__("stha\t%0, [%1] %2\t/* upa_writew */"
81 : /* no outputs */
82 : "r" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
83}
84
85static inline void _upa_writel(unsigned int l, unsigned long addr)
86{
87 __asm__ __volatile__("stwa\t%0, [%1] %2\t/* upa_writel */"
88 : /* no outputs */
89 : "r" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
90}
91
92static inline void _upa_writeq(unsigned long q, unsigned long addr)
93{
94 __asm__ __volatile__("stxa\t%0, [%1] %2\t/* upa_writeq */"
95 : /* no outputs */
96 : "r" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
97}
98
99#define upa_readb(__addr) (_upa_readb((unsigned long)(__addr)))
100#define upa_readw(__addr) (_upa_readw((unsigned long)(__addr)))
101#define upa_readl(__addr) (_upa_readl((unsigned long)(__addr)))
102#define upa_readq(__addr) (_upa_readq((unsigned long)(__addr)))
103#define upa_writeb(__b, __addr) (_upa_writeb((__b), (unsigned long)(__addr)))
104#define upa_writew(__w, __addr) (_upa_writew((__w), (unsigned long)(__addr)))
105#define upa_writel(__l, __addr) (_upa_writel((__l), (unsigned long)(__addr)))
106#define upa_writeq(__q, __addr) (_upa_writeq((__q), (unsigned long)(__addr)))
107#endif /* __KERNEL__ && !__ASSEMBLY__ */
108
109#endif /* !(_SPARC64_UPA_H) */
diff --git a/include/asm-sparc/utrap.h b/include/asm-sparc/utrap.h
new file mode 100644
index 000000000000..9da37babbe5b
--- /dev/null
+++ b/include/asm-sparc/utrap.h
@@ -0,0 +1,51 @@
1/*
2 * include/asm-sparc64/utrap.h
3 *
4 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5 */
6
7#ifndef __ASM_SPARC64_UTRAP_H
8#define __ASM_SPARC64_UTRAP_H
9
10#define UT_INSTRUCTION_EXCEPTION 1
11#define UT_INSTRUCTION_ERROR 2
12#define UT_INSTRUCTION_PROTECTION 3
13#define UT_ILLTRAP_INSTRUCTION 4
14#define UT_ILLEGAL_INSTRUCTION 5
15#define UT_PRIVILEGED_OPCODE 6
16#define UT_FP_DISABLED 7
17#define UT_FP_EXCEPTION_IEEE_754 8
18#define UT_FP_EXCEPTION_OTHER 9
19#define UT_TAG_OVERVIEW 10
20#define UT_DIVISION_BY_ZERO 11
21#define UT_DATA_EXCEPTION 12
22#define UT_DATA_ERROR 13
23#define UT_DATA_PROTECTION 14
24#define UT_MEM_ADDRESS_NOT_ALIGNED 15
25#define UT_PRIVILEGED_ACTION 16
26#define UT_ASYNC_DATA_ERROR 17
27#define UT_TRAP_INSTRUCTION_16 18
28#define UT_TRAP_INSTRUCTION_17 19
29#define UT_TRAP_INSTRUCTION_18 20
30#define UT_TRAP_INSTRUCTION_19 21
31#define UT_TRAP_INSTRUCTION_20 22
32#define UT_TRAP_INSTRUCTION_21 23
33#define UT_TRAP_INSTRUCTION_22 24
34#define UT_TRAP_INSTRUCTION_23 25
35#define UT_TRAP_INSTRUCTION_24 26
36#define UT_TRAP_INSTRUCTION_25 27
37#define UT_TRAP_INSTRUCTION_26 28
38#define UT_TRAP_INSTRUCTION_27 29
39#define UT_TRAP_INSTRUCTION_28 30
40#define UT_TRAP_INSTRUCTION_29 31
41#define UT_TRAP_INSTRUCTION_30 32
42#define UT_TRAP_INSTRUCTION_31 33
43
44#define UTH_NOCHANGE (-1)
45
46#ifndef __ASSEMBLY__
47typedef int utrap_entry_t;
48typedef void *utrap_handler_t;
49#endif /* __ASSEMBLY__ */
50
51#endif /* !(__ASM_SPARC64_PROCESSOR_H) */
diff --git a/include/asm-sparc/vaddrs.h b/include/asm-sparc/vaddrs.h
index f6ca4779056c..a22fed5a3c6b 100644
--- a/include/asm-sparc/vaddrs.h
+++ b/include/asm-sparc/vaddrs.h
@@ -34,11 +34,6 @@
34#define IOBASE_VADDR 0xfe000000 34#define IOBASE_VADDR 0xfe000000
35#define IOBASE_END 0xfe600000 35#define IOBASE_END 0xfe600000
36 36
37#define VMALLOC_START 0xfe600000
38
39/* XXX Alter this when I get around to fixing sun4c - Anton */
40#define VMALLOC_END 0xffc00000
41
42/* 37/*
43 * On the sun4/4c we need a place 38 * On the sun4/4c we need a place
44 * to reliably map locked down kernel data. This includes the 39 * to reliably map locked down kernel data. This includes the
diff --git a/include/asm-sparc/vio.h b/include/asm-sparc/vio.h
new file mode 100644
index 000000000000..d4de32f0f8af
--- /dev/null
+++ b/include/asm-sparc/vio.h
@@ -0,0 +1,406 @@
1#ifndef _SPARC64_VIO_H
2#define _SPARC64_VIO_H
3
4#include <linux/kernel.h>
5#include <linux/device.h>
6#include <linux/mod_devicetable.h>
7#include <linux/timer.h>
8#include <linux/spinlock.h>
9#include <linux/completion.h>
10#include <linux/list.h>
11#include <linux/log2.h>
12
13#include <asm/ldc.h>
14#include <asm/mdesc.h>
15
16struct vio_msg_tag {
17 u8 type;
18#define VIO_TYPE_CTRL 0x01
19#define VIO_TYPE_DATA 0x02
20#define VIO_TYPE_ERR 0x04
21
22 u8 stype;
23#define VIO_SUBTYPE_INFO 0x01
24#define VIO_SUBTYPE_ACK 0x02
25#define VIO_SUBTYPE_NACK 0x04
26
27 u16 stype_env;
28#define VIO_VER_INFO 0x0001
29#define VIO_ATTR_INFO 0x0002
30#define VIO_DRING_REG 0x0003
31#define VIO_DRING_UNREG 0x0004
32#define VIO_RDX 0x0005
33#define VIO_PKT_DATA 0x0040
34#define VIO_DESC_DATA 0x0041
35#define VIO_DRING_DATA 0x0042
36#define VNET_MCAST_INFO 0x0101
37
38 u32 sid;
39};
40
41struct vio_rdx {
42 struct vio_msg_tag tag;
43 u64 resv[6];
44};
45
46struct vio_ver_info {
47 struct vio_msg_tag tag;
48 u16 major;
49 u16 minor;
50 u8 dev_class;
51#define VDEV_NETWORK 0x01
52#define VDEV_NETWORK_SWITCH 0x02
53#define VDEV_DISK 0x03
54#define VDEV_DISK_SERVER 0x04
55
56 u8 resv1[3];
57 u64 resv2[5];
58};
59
60struct vio_dring_register {
61 struct vio_msg_tag tag;
62 u64 dring_ident;
63 u32 num_descr;
64 u32 descr_size;
65 u16 options;
66#define VIO_TX_DRING 0x0001
67#define VIO_RX_DRING 0x0002
68 u16 resv;
69 u32 num_cookies;
70 struct ldc_trans_cookie cookies[0];
71};
72
73struct vio_dring_unregister {
74 struct vio_msg_tag tag;
75 u64 dring_ident;
76 u64 resv[5];
77};
78
79/* Data transfer modes */
80#define VIO_PKT_MODE 0x01 /* Packet based transfer */
81#define VIO_DESC_MODE 0x02 /* In-band descriptors */
82#define VIO_DRING_MODE 0x03 /* Descriptor rings */
83
84struct vio_dring_data {
85 struct vio_msg_tag tag;
86 u64 seq;
87 u64 dring_ident;
88 u32 start_idx;
89 u32 end_idx;
90 u8 state;
91#define VIO_DRING_ACTIVE 0x01
92#define VIO_DRING_STOPPED 0x02
93
94 u8 __pad1;
95 u16 __pad2;
96 u32 __pad3;
97 u64 __par4[2];
98};
99
100struct vio_dring_hdr {
101 u8 state;
102#define VIO_DESC_FREE 0x01
103#define VIO_DESC_READY 0x02
104#define VIO_DESC_ACCEPTED 0x03
105#define VIO_DESC_DONE 0x04
106 u8 ack;
107#define VIO_ACK_ENABLE 0x01
108#define VIO_ACK_DISABLE 0x00
109
110 u16 __pad1;
111 u32 __pad2;
112};
113
114/* VIO disk specific structures and defines */
115struct vio_disk_attr_info {
116 struct vio_msg_tag tag;
117 u8 xfer_mode;
118 u8 vdisk_type;
119#define VD_DISK_TYPE_SLICE 0x01 /* Slice in block device */
120#define VD_DISK_TYPE_DISK 0x02 /* Entire block device */
121 u16 resv1;
122 u32 vdisk_block_size;
123 u64 operations;
124 u64 vdisk_size;
125 u64 max_xfer_size;
126 u64 resv2[2];
127};
128
129struct vio_disk_desc {
130 struct vio_dring_hdr hdr;
131 u64 req_id;
132 u8 operation;
133#define VD_OP_BREAD 0x01 /* Block read */
134#define VD_OP_BWRITE 0x02 /* Block write */
135#define VD_OP_FLUSH 0x03 /* Flush disk contents */
136#define VD_OP_GET_WCE 0x04 /* Get write-cache status */
137#define VD_OP_SET_WCE 0x05 /* Enable/disable write-cache */
138#define VD_OP_GET_VTOC 0x06 /* Get VTOC */
139#define VD_OP_SET_VTOC 0x07 /* Set VTOC */
140#define VD_OP_GET_DISKGEOM 0x08 /* Get disk geometry */
141#define VD_OP_SET_DISKGEOM 0x09 /* Set disk geometry */
142#define VD_OP_SCSICMD 0x0a /* SCSI control command */
143#define VD_OP_GET_DEVID 0x0b /* Get device ID */
144#define VD_OP_GET_EFI 0x0c /* Get EFI */
145#define VD_OP_SET_EFI 0x0d /* Set EFI */
146 u8 slice;
147 u16 resv1;
148 u32 status;
149 u64 offset;
150 u64 size;
151 u32 ncookies;
152 u32 resv2;
153 struct ldc_trans_cookie cookies[0];
154};
155
156#define VIO_DISK_VNAME_LEN 8
157#define VIO_DISK_ALABEL_LEN 128
158#define VIO_DISK_NUM_PART 8
159
160struct vio_disk_vtoc {
161 u8 volume_name[VIO_DISK_VNAME_LEN];
162 u16 sector_size;
163 u16 num_partitions;
164 u8 ascii_label[VIO_DISK_ALABEL_LEN];
165 struct {
166 u16 id;
167 u16 perm_flags;
168 u32 resv;
169 u64 start_block;
170 u64 num_blocks;
171 } partitions[VIO_DISK_NUM_PART];
172};
173
174struct vio_disk_geom {
175 u16 num_cyl; /* Num data cylinders */
176 u16 alt_cyl; /* Num alternate cylinders */
177 u16 beg_cyl; /* Cyl off of fixed head area */
178 u16 num_hd; /* Num heads */
179 u16 num_sec; /* Num sectors */
180 u16 ifact; /* Interleave factor */
181 u16 apc; /* Alts per cylinder (SCSI) */
182 u16 rpm; /* Revolutions per minute */
183 u16 phy_cyl; /* Num physical cylinders */
184 u16 wr_skip; /* Num sects to skip, writes */
185 u16 rd_skip; /* Num sects to skip, writes */
186};
187
188struct vio_disk_devid {
189 u16 resv;
190 u16 type;
191 u32 len;
192 char id[0];
193};
194
195struct vio_disk_efi {
196 u64 lba;
197 u64 len;
198 char data[0];
199};
200
201/* VIO net specific structures and defines */
202struct vio_net_attr_info {
203 struct vio_msg_tag tag;
204 u8 xfer_mode;
205 u8 addr_type;
206#define VNET_ADDR_ETHERMAC 0x01
207 u16 ack_freq;
208 u32 resv1;
209 u64 addr;
210 u64 mtu;
211 u64 resv2[3];
212};
213
214#define VNET_NUM_MCAST 7
215
216struct vio_net_mcast_info {
217 struct vio_msg_tag tag;
218 u8 set;
219 u8 count;
220 u8 mcast_addr[VNET_NUM_MCAST * 6];
221 u32 resv;
222};
223
224struct vio_net_desc {
225 struct vio_dring_hdr hdr;
226 u32 size;
227 u32 ncookies;
228 struct ldc_trans_cookie cookies[0];
229};
230
231#define VIO_MAX_RING_COOKIES 24
232
233struct vio_dring_state {
234 u64 ident;
235 void *base;
236 u64 snd_nxt;
237 u64 rcv_nxt;
238 u32 entry_size;
239 u32 num_entries;
240 u32 prod;
241 u32 cons;
242 u32 pending;
243 int ncookies;
244 struct ldc_trans_cookie cookies[VIO_MAX_RING_COOKIES];
245};
246
247static inline void *vio_dring_cur(struct vio_dring_state *dr)
248{
249 return dr->base + (dr->entry_size * dr->prod);
250}
251
252static inline void *vio_dring_entry(struct vio_dring_state *dr,
253 unsigned int index)
254{
255 return dr->base + (dr->entry_size * index);
256}
257
258static inline u32 vio_dring_avail(struct vio_dring_state *dr,
259 unsigned int ring_size)
260{
261 BUILD_BUG_ON(!is_power_of_2(ring_size));
262
263 return (dr->pending -
264 ((dr->prod - dr->cons) & (ring_size - 1)));
265}
266
267#define VIO_MAX_TYPE_LEN 32
268#define VIO_MAX_COMPAT_LEN 64
269
270struct vio_dev {
271 u64 mp;
272 struct device_node *dp;
273
274 char type[VIO_MAX_TYPE_LEN];
275 char compat[VIO_MAX_COMPAT_LEN];
276 int compat_len;
277
278 u64 dev_no;
279
280 unsigned long channel_id;
281
282 unsigned int tx_irq;
283 unsigned int rx_irq;
284
285 struct device dev;
286};
287
288struct vio_driver {
289 struct list_head node;
290 const struct vio_device_id *id_table;
291 int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
292 int (*remove)(struct vio_dev *dev);
293 void (*shutdown)(struct vio_dev *dev);
294 unsigned long driver_data;
295 struct device_driver driver;
296};
297
298struct vio_version {
299 u16 major;
300 u16 minor;
301};
302
303struct vio_driver_state;
304struct vio_driver_ops {
305 int (*send_attr)(struct vio_driver_state *vio);
306 int (*handle_attr)(struct vio_driver_state *vio, void *pkt);
307 void (*handshake_complete)(struct vio_driver_state *vio);
308};
309
310struct vio_completion {
311 struct completion com;
312 int err;
313 int waiting_for;
314};
315
316struct vio_driver_state {
317 /* Protects VIO handshake and, optionally, driver private state. */
318 spinlock_t lock;
319
320 struct ldc_channel *lp;
321
322 u32 _peer_sid;
323 u32 _local_sid;
324 struct vio_dring_state drings[2];
325#define VIO_DRIVER_TX_RING 0
326#define VIO_DRIVER_RX_RING 1
327
328 u8 hs_state;
329#define VIO_HS_INVALID 0x00
330#define VIO_HS_GOTVERS 0x01
331#define VIO_HS_GOT_ATTR 0x04
332#define VIO_HS_SENT_DREG 0x08
333#define VIO_HS_SENT_RDX 0x10
334#define VIO_HS_GOT_RDX_ACK 0x20
335#define VIO_HS_GOT_RDX 0x40
336#define VIO_HS_SENT_RDX_ACK 0x80
337#define VIO_HS_COMPLETE (VIO_HS_GOT_RDX_ACK | VIO_HS_SENT_RDX_ACK)
338
339 u8 dev_class;
340
341 u8 dr_state;
342#define VIO_DR_STATE_TXREG 0x01
343#define VIO_DR_STATE_RXREG 0x02
344#define VIO_DR_STATE_TXREQ 0x10
345#define VIO_DR_STATE_RXREQ 0x20
346
347 u8 debug;
348#define VIO_DEBUG_HS 0x01
349#define VIO_DEBUG_DATA 0x02
350
351 void *desc_buf;
352 unsigned int desc_buf_len;
353
354 struct vio_completion *cmp;
355
356 struct vio_dev *vdev;
357
358 struct timer_list timer;
359
360 struct vio_version ver;
361
362 struct vio_version *ver_table;
363 int ver_table_entries;
364
365 char *name;
366
367 struct vio_driver_ops *ops;
368};
369
370#define viodbg(TYPE, f, a...) \
371do { if (vio->debug & VIO_DEBUG_##TYPE) \
372 printk(KERN_INFO "vio: ID[%lu] " f, \
373 vio->vdev->channel_id, ## a); \
374} while (0)
375
376extern int vio_register_driver(struct vio_driver *drv);
377extern void vio_unregister_driver(struct vio_driver *drv);
378
379static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
380{
381 return container_of(drv, struct vio_driver, driver);
382}
383
384static inline struct vio_dev *to_vio_dev(struct device *dev)
385{
386 return container_of(dev, struct vio_dev, dev);
387}
388
389extern int vio_ldc_send(struct vio_driver_state *vio, void *data, int len);
390extern void vio_link_state_change(struct vio_driver_state *vio, int event);
391extern void vio_conn_reset(struct vio_driver_state *vio);
392extern int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt);
393extern int vio_validate_sid(struct vio_driver_state *vio,
394 struct vio_msg_tag *tp);
395extern u32 vio_send_sid(struct vio_driver_state *vio);
396extern int vio_ldc_alloc(struct vio_driver_state *vio,
397 struct ldc_channel_config *base_cfg, void *event_arg);
398extern void vio_ldc_free(struct vio_driver_state *vio);
399extern int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
400 u8 dev_class, struct vio_version *ver_table,
401 int ver_table_size, struct vio_driver_ops *ops,
402 char *name);
403
404extern void vio_port_up(struct vio_driver_state *vio);
405
406#endif /* _SPARC64_VIO_H */
diff --git a/include/asm-sparc/visasm.h b/include/asm-sparc/visasm.h
new file mode 100644
index 000000000000..de797b9bf552
--- /dev/null
+++ b/include/asm-sparc/visasm.h
@@ -0,0 +1,62 @@
1#ifndef _SPARC64_VISASM_H
2#define _SPARC64_VISASM_H
3
4/* visasm.h: FPU saving macros for VIS routines
5 *
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <asm/pstate.h>
10#include <asm/ptrace.h>
11
12/* Clobbers %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
13
14#define VISEntry \
15 rd %fprs, %o5; \
16 andcc %o5, (FPRS_FEF|FPRS_DU), %g0; \
17 be,pt %icc, 297f; \
18 sethi %hi(297f), %g7; \
19 sethi %hi(VISenter), %g1; \
20 jmpl %g1 + %lo(VISenter), %g0; \
21 or %g7, %lo(297f), %g7; \
22297: wr %g0, FPRS_FEF, %fprs; \
23
24#define VISExit \
25 wr %g0, 0, %fprs;
26
27/* Clobbers %o5, %g1, %g2, %g3, %g7, %icc, %xcc.
28 * Must preserve %o5 between VISEntryHalf and VISExitHalf */
29
30#define VISEntryHalf \
31 rd %fprs, %o5; \
32 andcc %o5, FPRS_FEF, %g0; \
33 be,pt %icc, 297f; \
34 sethi %hi(298f), %g7; \
35 sethi %hi(VISenterhalf), %g1; \
36 jmpl %g1 + %lo(VISenterhalf), %g0; \
37 or %g7, %lo(298f), %g7; \
38 clr %o5; \
39297: wr %o5, FPRS_FEF, %fprs; \
40298:
41
42#define VISExitHalf \
43 wr %o5, 0, %fprs;
44
45#ifndef __ASSEMBLY__
46static inline void save_and_clear_fpu(void) {
47 __asm__ __volatile__ (
48" rd %%fprs, %%o5\n"
49" andcc %%o5, %0, %%g0\n"
50" be,pt %%icc, 299f\n"
51" sethi %%hi(298f), %%g7\n"
52" sethi %%hi(VISenter), %%g1\n"
53" jmpl %%g1 + %%lo(VISenter), %%g0\n"
54" or %%g7, %%lo(298f), %%g7\n"
55" 298: wr %%g0, 0, %%fprs\n"
56" 299:\n"
57" " : : "i" (FPRS_FEF|FPRS_DU) :
58 "o5", "g1", "g2", "g3", "g7", "cc");
59}
60#endif
61
62#endif /* _SPARC64_ASI_H */
diff --git a/include/asm-sparc/watchdog.h b/include/asm-sparc/watchdog.h
new file mode 100644
index 000000000000..5baf2d3919cf
--- /dev/null
+++ b/include/asm-sparc/watchdog.h
@@ -0,0 +1,31 @@
1/*
2 *
3 * watchdog - Driver interface for the hardware watchdog timers
4 * present on Sun Microsystems boardsets
5 *
6 * Copyright (c) 2000 Eric Brower <ebrower@usa.net>
7 *
8 */
9
10#ifndef _SPARC64_WATCHDOG_H
11#define _SPARC64_WATCHDOG_H
12
13#include <linux/watchdog.h>
14
15/* Solaris compatibility ioctls--
16 * Ref. <linux/watchdog.h> for standard linux watchdog ioctls
17 */
18#define WIOCSTART _IO (WATCHDOG_IOCTL_BASE, 10) /* Start Timer */
19#define WIOCSTOP _IO (WATCHDOG_IOCTL_BASE, 11) /* Stop Timer */
20#define WIOCGSTAT _IOR(WATCHDOG_IOCTL_BASE, 12, int)/* Get Timer Status */
21
22/* Status flags from WIOCGSTAT ioctl
23 */
24#define WD_FREERUN 0x01 /* timer is running, interrupts disabled */
25#define WD_EXPIRED 0x02 /* timer has expired */
26#define WD_RUNNING 0x04 /* timer is running, interrupts enabled */
27#define WD_STOPPED 0x08 /* timer has not been started */
28#define WD_SERVICED 0x10 /* timer interrupt was serviced */
29
30#endif /* ifndef _SPARC64_WATCHDOG_H */
31
diff --git a/include/asm-sparc/xor.h b/include/asm-sparc/xor.h
index f34b2cfa8206..35089a838c3f 100644
--- a/include/asm-sparc/xor.h
+++ b/include/asm-sparc/xor.h
@@ -1,269 +1,8 @@
1/* 1#ifndef ___ASM_SPARC_XOR_H
2 * include/asm-sparc/xor.h 2#define ___ASM_SPARC_XOR_H
3 * 3#if defined(__sparc__) && defined(__arch64__)
4 * Optimized RAID-5 checksumming functions for 32-bit Sparc. 4#include <asm-sparc/xor_64.h>
5 * 5#else
6 * This program is free software; you can redistribute it and/or modify 6#include <asm-sparc/xor_32.h>
7 * it under the terms of the GNU General Public License as published by 7#endif
8 * the Free Software Foundation; either version 2, or (at your option) 8#endif
9 * any later version.
10 *
11 * You should have received a copy of the GNU General Public License
12 * (for example /usr/src/linux/COPYING); if not, write to the Free
13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14 */
15
16/*
17 * High speed xor_block operation for RAID4/5 utilizing the
18 * ldd/std SPARC instructions.
19 *
20 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
21 */
22
23static void
24sparc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
25{
26 int lines = bytes / (sizeof (long)) / 8;
27
28 do {
29 __asm__ __volatile__(
30 "ldd [%0 + 0x00], %%g2\n\t"
31 "ldd [%0 + 0x08], %%g4\n\t"
32 "ldd [%0 + 0x10], %%o0\n\t"
33 "ldd [%0 + 0x18], %%o2\n\t"
34 "ldd [%1 + 0x00], %%o4\n\t"
35 "ldd [%1 + 0x08], %%l0\n\t"
36 "ldd [%1 + 0x10], %%l2\n\t"
37 "ldd [%1 + 0x18], %%l4\n\t"
38 "xor %%g2, %%o4, %%g2\n\t"
39 "xor %%g3, %%o5, %%g3\n\t"
40 "xor %%g4, %%l0, %%g4\n\t"
41 "xor %%g5, %%l1, %%g5\n\t"
42 "xor %%o0, %%l2, %%o0\n\t"
43 "xor %%o1, %%l3, %%o1\n\t"
44 "xor %%o2, %%l4, %%o2\n\t"
45 "xor %%o3, %%l5, %%o3\n\t"
46 "std %%g2, [%0 + 0x00]\n\t"
47 "std %%g4, [%0 + 0x08]\n\t"
48 "std %%o0, [%0 + 0x10]\n\t"
49 "std %%o2, [%0 + 0x18]\n"
50 :
51 : "r" (p1), "r" (p2)
52 : "g2", "g3", "g4", "g5",
53 "o0", "o1", "o2", "o3", "o4", "o5",
54 "l0", "l1", "l2", "l3", "l4", "l5");
55 p1 += 8;
56 p2 += 8;
57 } while (--lines > 0);
58}
59
60static void
61sparc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
62 unsigned long *p3)
63{
64 int lines = bytes / (sizeof (long)) / 8;
65
66 do {
67 __asm__ __volatile__(
68 "ldd [%0 + 0x00], %%g2\n\t"
69 "ldd [%0 + 0x08], %%g4\n\t"
70 "ldd [%0 + 0x10], %%o0\n\t"
71 "ldd [%0 + 0x18], %%o2\n\t"
72 "ldd [%1 + 0x00], %%o4\n\t"
73 "ldd [%1 + 0x08], %%l0\n\t"
74 "ldd [%1 + 0x10], %%l2\n\t"
75 "ldd [%1 + 0x18], %%l4\n\t"
76 "xor %%g2, %%o4, %%g2\n\t"
77 "xor %%g3, %%o5, %%g3\n\t"
78 "ldd [%2 + 0x00], %%o4\n\t"
79 "xor %%g4, %%l0, %%g4\n\t"
80 "xor %%g5, %%l1, %%g5\n\t"
81 "ldd [%2 + 0x08], %%l0\n\t"
82 "xor %%o0, %%l2, %%o0\n\t"
83 "xor %%o1, %%l3, %%o1\n\t"
84 "ldd [%2 + 0x10], %%l2\n\t"
85 "xor %%o2, %%l4, %%o2\n\t"
86 "xor %%o3, %%l5, %%o3\n\t"
87 "ldd [%2 + 0x18], %%l4\n\t"
88 "xor %%g2, %%o4, %%g2\n\t"
89 "xor %%g3, %%o5, %%g3\n\t"
90 "xor %%g4, %%l0, %%g4\n\t"
91 "xor %%g5, %%l1, %%g5\n\t"
92 "xor %%o0, %%l2, %%o0\n\t"
93 "xor %%o1, %%l3, %%o1\n\t"
94 "xor %%o2, %%l4, %%o2\n\t"
95 "xor %%o3, %%l5, %%o3\n\t"
96 "std %%g2, [%0 + 0x00]\n\t"
97 "std %%g4, [%0 + 0x08]\n\t"
98 "std %%o0, [%0 + 0x10]\n\t"
99 "std %%o2, [%0 + 0x18]\n"
100 :
101 : "r" (p1), "r" (p2), "r" (p3)
102 : "g2", "g3", "g4", "g5",
103 "o0", "o1", "o2", "o3", "o4", "o5",
104 "l0", "l1", "l2", "l3", "l4", "l5");
105 p1 += 8;
106 p2 += 8;
107 p3 += 8;
108 } while (--lines > 0);
109}
110
111static void
112sparc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
113 unsigned long *p3, unsigned long *p4)
114{
115 int lines = bytes / (sizeof (long)) / 8;
116
117 do {
118 __asm__ __volatile__(
119 "ldd [%0 + 0x00], %%g2\n\t"
120 "ldd [%0 + 0x08], %%g4\n\t"
121 "ldd [%0 + 0x10], %%o0\n\t"
122 "ldd [%0 + 0x18], %%o2\n\t"
123 "ldd [%1 + 0x00], %%o4\n\t"
124 "ldd [%1 + 0x08], %%l0\n\t"
125 "ldd [%1 + 0x10], %%l2\n\t"
126 "ldd [%1 + 0x18], %%l4\n\t"
127 "xor %%g2, %%o4, %%g2\n\t"
128 "xor %%g3, %%o5, %%g3\n\t"
129 "ldd [%2 + 0x00], %%o4\n\t"
130 "xor %%g4, %%l0, %%g4\n\t"
131 "xor %%g5, %%l1, %%g5\n\t"
132 "ldd [%2 + 0x08], %%l0\n\t"
133 "xor %%o0, %%l2, %%o0\n\t"
134 "xor %%o1, %%l3, %%o1\n\t"
135 "ldd [%2 + 0x10], %%l2\n\t"
136 "xor %%o2, %%l4, %%o2\n\t"
137 "xor %%o3, %%l5, %%o3\n\t"
138 "ldd [%2 + 0x18], %%l4\n\t"
139 "xor %%g2, %%o4, %%g2\n\t"
140 "xor %%g3, %%o5, %%g3\n\t"
141 "ldd [%3 + 0x00], %%o4\n\t"
142 "xor %%g4, %%l0, %%g4\n\t"
143 "xor %%g5, %%l1, %%g5\n\t"
144 "ldd [%3 + 0x08], %%l0\n\t"
145 "xor %%o0, %%l2, %%o0\n\t"
146 "xor %%o1, %%l3, %%o1\n\t"
147 "ldd [%3 + 0x10], %%l2\n\t"
148 "xor %%o2, %%l4, %%o2\n\t"
149 "xor %%o3, %%l5, %%o3\n\t"
150 "ldd [%3 + 0x18], %%l4\n\t"
151 "xor %%g2, %%o4, %%g2\n\t"
152 "xor %%g3, %%o5, %%g3\n\t"
153 "xor %%g4, %%l0, %%g4\n\t"
154 "xor %%g5, %%l1, %%g5\n\t"
155 "xor %%o0, %%l2, %%o0\n\t"
156 "xor %%o1, %%l3, %%o1\n\t"
157 "xor %%o2, %%l4, %%o2\n\t"
158 "xor %%o3, %%l5, %%o3\n\t"
159 "std %%g2, [%0 + 0x00]\n\t"
160 "std %%g4, [%0 + 0x08]\n\t"
161 "std %%o0, [%0 + 0x10]\n\t"
162 "std %%o2, [%0 + 0x18]\n"
163 :
164 : "r" (p1), "r" (p2), "r" (p3), "r" (p4)
165 : "g2", "g3", "g4", "g5",
166 "o0", "o1", "o2", "o3", "o4", "o5",
167 "l0", "l1", "l2", "l3", "l4", "l5");
168 p1 += 8;
169 p2 += 8;
170 p3 += 8;
171 p4 += 8;
172 } while (--lines > 0);
173}
174
175static void
176sparc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
177 unsigned long *p3, unsigned long *p4, unsigned long *p5)
178{
179 int lines = bytes / (sizeof (long)) / 8;
180
181 do {
182 __asm__ __volatile__(
183 "ldd [%0 + 0x00], %%g2\n\t"
184 "ldd [%0 + 0x08], %%g4\n\t"
185 "ldd [%0 + 0x10], %%o0\n\t"
186 "ldd [%0 + 0x18], %%o2\n\t"
187 "ldd [%1 + 0x00], %%o4\n\t"
188 "ldd [%1 + 0x08], %%l0\n\t"
189 "ldd [%1 + 0x10], %%l2\n\t"
190 "ldd [%1 + 0x18], %%l4\n\t"
191 "xor %%g2, %%o4, %%g2\n\t"
192 "xor %%g3, %%o5, %%g3\n\t"
193 "ldd [%2 + 0x00], %%o4\n\t"
194 "xor %%g4, %%l0, %%g4\n\t"
195 "xor %%g5, %%l1, %%g5\n\t"
196 "ldd [%2 + 0x08], %%l0\n\t"
197 "xor %%o0, %%l2, %%o0\n\t"
198 "xor %%o1, %%l3, %%o1\n\t"
199 "ldd [%2 + 0x10], %%l2\n\t"
200 "xor %%o2, %%l4, %%o2\n\t"
201 "xor %%o3, %%l5, %%o3\n\t"
202 "ldd [%2 + 0x18], %%l4\n\t"
203 "xor %%g2, %%o4, %%g2\n\t"
204 "xor %%g3, %%o5, %%g3\n\t"
205 "ldd [%3 + 0x00], %%o4\n\t"
206 "xor %%g4, %%l0, %%g4\n\t"
207 "xor %%g5, %%l1, %%g5\n\t"
208 "ldd [%3 + 0x08], %%l0\n\t"
209 "xor %%o0, %%l2, %%o0\n\t"
210 "xor %%o1, %%l3, %%o1\n\t"
211 "ldd [%3 + 0x10], %%l2\n\t"
212 "xor %%o2, %%l4, %%o2\n\t"
213 "xor %%o3, %%l5, %%o3\n\t"
214 "ldd [%3 + 0x18], %%l4\n\t"
215 "xor %%g2, %%o4, %%g2\n\t"
216 "xor %%g3, %%o5, %%g3\n\t"
217 "ldd [%4 + 0x00], %%o4\n\t"
218 "xor %%g4, %%l0, %%g4\n\t"
219 "xor %%g5, %%l1, %%g5\n\t"
220 "ldd [%4 + 0x08], %%l0\n\t"
221 "xor %%o0, %%l2, %%o0\n\t"
222 "xor %%o1, %%l3, %%o1\n\t"
223 "ldd [%4 + 0x10], %%l2\n\t"
224 "xor %%o2, %%l4, %%o2\n\t"
225 "xor %%o3, %%l5, %%o3\n\t"
226 "ldd [%4 + 0x18], %%l4\n\t"
227 "xor %%g2, %%o4, %%g2\n\t"
228 "xor %%g3, %%o5, %%g3\n\t"
229 "xor %%g4, %%l0, %%g4\n\t"
230 "xor %%g5, %%l1, %%g5\n\t"
231 "xor %%o0, %%l2, %%o0\n\t"
232 "xor %%o1, %%l3, %%o1\n\t"
233 "xor %%o2, %%l4, %%o2\n\t"
234 "xor %%o3, %%l5, %%o3\n\t"
235 "std %%g2, [%0 + 0x00]\n\t"
236 "std %%g4, [%0 + 0x08]\n\t"
237 "std %%o0, [%0 + 0x10]\n\t"
238 "std %%o2, [%0 + 0x18]\n"
239 :
240 : "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5)
241 : "g2", "g3", "g4", "g5",
242 "o0", "o1", "o2", "o3", "o4", "o5",
243 "l0", "l1", "l2", "l3", "l4", "l5");
244 p1 += 8;
245 p2 += 8;
246 p3 += 8;
247 p4 += 8;
248 p5 += 8;
249 } while (--lines > 0);
250}
251
252static struct xor_block_template xor_block_SPARC = {
253 .name = "SPARC",
254 .do_2 = sparc_2,
255 .do_3 = sparc_3,
256 .do_4 = sparc_4,
257 .do_5 = sparc_5,
258};
259
260/* For grins, also test the generic routines. */
261#include <asm-generic/xor.h>
262
263#undef XOR_TRY_TEMPLATES
264#define XOR_TRY_TEMPLATES \
265 do { \
266 xor_speed(&xor_block_8regs); \
267 xor_speed(&xor_block_32regs); \
268 xor_speed(&xor_block_SPARC); \
269 } while (0)
diff --git a/include/asm-sparc/xor_32.h b/include/asm-sparc/xor_32.h
new file mode 100644
index 000000000000..f34b2cfa8206
--- /dev/null
+++ b/include/asm-sparc/xor_32.h
@@ -0,0 +1,269 @@
1/*
2 * include/asm-sparc/xor.h
3 *
4 * Optimized RAID-5 checksumming functions for 32-bit Sparc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * You should have received a copy of the GNU General Public License
12 * (for example /usr/src/linux/COPYING); if not, write to the Free
13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14 */
15
16/*
17 * High speed xor_block operation for RAID4/5 utilizing the
18 * ldd/std SPARC instructions.
19 *
20 * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
21 */
22
23static void
24sparc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
25{
26 int lines = bytes / (sizeof (long)) / 8;
27
28 do {
29 __asm__ __volatile__(
30 "ldd [%0 + 0x00], %%g2\n\t"
31 "ldd [%0 + 0x08], %%g4\n\t"
32 "ldd [%0 + 0x10], %%o0\n\t"
33 "ldd [%0 + 0x18], %%o2\n\t"
34 "ldd [%1 + 0x00], %%o4\n\t"
35 "ldd [%1 + 0x08], %%l0\n\t"
36 "ldd [%1 + 0x10], %%l2\n\t"
37 "ldd [%1 + 0x18], %%l4\n\t"
38 "xor %%g2, %%o4, %%g2\n\t"
39 "xor %%g3, %%o5, %%g3\n\t"
40 "xor %%g4, %%l0, %%g4\n\t"
41 "xor %%g5, %%l1, %%g5\n\t"
42 "xor %%o0, %%l2, %%o0\n\t"
43 "xor %%o1, %%l3, %%o1\n\t"
44 "xor %%o2, %%l4, %%o2\n\t"
45 "xor %%o3, %%l5, %%o3\n\t"
46 "std %%g2, [%0 + 0x00]\n\t"
47 "std %%g4, [%0 + 0x08]\n\t"
48 "std %%o0, [%0 + 0x10]\n\t"
49 "std %%o2, [%0 + 0x18]\n"
50 :
51 : "r" (p1), "r" (p2)
52 : "g2", "g3", "g4", "g5",
53 "o0", "o1", "o2", "o3", "o4", "o5",
54 "l0", "l1", "l2", "l3", "l4", "l5");
55 p1 += 8;
56 p2 += 8;
57 } while (--lines > 0);
58}
59
60static void
61sparc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
62 unsigned long *p3)
63{
64 int lines = bytes / (sizeof (long)) / 8;
65
66 do {
67 __asm__ __volatile__(
68 "ldd [%0 + 0x00], %%g2\n\t"
69 "ldd [%0 + 0x08], %%g4\n\t"
70 "ldd [%0 + 0x10], %%o0\n\t"
71 "ldd [%0 + 0x18], %%o2\n\t"
72 "ldd [%1 + 0x00], %%o4\n\t"
73 "ldd [%1 + 0x08], %%l0\n\t"
74 "ldd [%1 + 0x10], %%l2\n\t"
75 "ldd [%1 + 0x18], %%l4\n\t"
76 "xor %%g2, %%o4, %%g2\n\t"
77 "xor %%g3, %%o5, %%g3\n\t"
78 "ldd [%2 + 0x00], %%o4\n\t"
79 "xor %%g4, %%l0, %%g4\n\t"
80 "xor %%g5, %%l1, %%g5\n\t"
81 "ldd [%2 + 0x08], %%l0\n\t"
82 "xor %%o0, %%l2, %%o0\n\t"
83 "xor %%o1, %%l3, %%o1\n\t"
84 "ldd [%2 + 0x10], %%l2\n\t"
85 "xor %%o2, %%l4, %%o2\n\t"
86 "xor %%o3, %%l5, %%o3\n\t"
87 "ldd [%2 + 0x18], %%l4\n\t"
88 "xor %%g2, %%o4, %%g2\n\t"
89 "xor %%g3, %%o5, %%g3\n\t"
90 "xor %%g4, %%l0, %%g4\n\t"
91 "xor %%g5, %%l1, %%g5\n\t"
92 "xor %%o0, %%l2, %%o0\n\t"
93 "xor %%o1, %%l3, %%o1\n\t"
94 "xor %%o2, %%l4, %%o2\n\t"
95 "xor %%o3, %%l5, %%o3\n\t"
96 "std %%g2, [%0 + 0x00]\n\t"
97 "std %%g4, [%0 + 0x08]\n\t"
98 "std %%o0, [%0 + 0x10]\n\t"
99 "std %%o2, [%0 + 0x18]\n"
100 :
101 : "r" (p1), "r" (p2), "r" (p3)
102 : "g2", "g3", "g4", "g5",
103 "o0", "o1", "o2", "o3", "o4", "o5",
104 "l0", "l1", "l2", "l3", "l4", "l5");
105 p1 += 8;
106 p2 += 8;
107 p3 += 8;
108 } while (--lines > 0);
109}
110
111static void
112sparc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
113 unsigned long *p3, unsigned long *p4)
114{
115 int lines = bytes / (sizeof (long)) / 8;
116
117 do {
118 __asm__ __volatile__(
119 "ldd [%0 + 0x00], %%g2\n\t"
120 "ldd [%0 + 0x08], %%g4\n\t"
121 "ldd [%0 + 0x10], %%o0\n\t"
122 "ldd [%0 + 0x18], %%o2\n\t"
123 "ldd [%1 + 0x00], %%o4\n\t"
124 "ldd [%1 + 0x08], %%l0\n\t"
125 "ldd [%1 + 0x10], %%l2\n\t"
126 "ldd [%1 + 0x18], %%l4\n\t"
127 "xor %%g2, %%o4, %%g2\n\t"
128 "xor %%g3, %%o5, %%g3\n\t"
129 "ldd [%2 + 0x00], %%o4\n\t"
130 "xor %%g4, %%l0, %%g4\n\t"
131 "xor %%g5, %%l1, %%g5\n\t"
132 "ldd [%2 + 0x08], %%l0\n\t"
133 "xor %%o0, %%l2, %%o0\n\t"
134 "xor %%o1, %%l3, %%o1\n\t"
135 "ldd [%2 + 0x10], %%l2\n\t"
136 "xor %%o2, %%l4, %%o2\n\t"
137 "xor %%o3, %%l5, %%o3\n\t"
138 "ldd [%2 + 0x18], %%l4\n\t"
139 "xor %%g2, %%o4, %%g2\n\t"
140 "xor %%g3, %%o5, %%g3\n\t"
141 "ldd [%3 + 0x00], %%o4\n\t"
142 "xor %%g4, %%l0, %%g4\n\t"
143 "xor %%g5, %%l1, %%g5\n\t"
144 "ldd [%3 + 0x08], %%l0\n\t"
145 "xor %%o0, %%l2, %%o0\n\t"
146 "xor %%o1, %%l3, %%o1\n\t"
147 "ldd [%3 + 0x10], %%l2\n\t"
148 "xor %%o2, %%l4, %%o2\n\t"
149 "xor %%o3, %%l5, %%o3\n\t"
150 "ldd [%3 + 0x18], %%l4\n\t"
151 "xor %%g2, %%o4, %%g2\n\t"
152 "xor %%g3, %%o5, %%g3\n\t"
153 "xor %%g4, %%l0, %%g4\n\t"
154 "xor %%g5, %%l1, %%g5\n\t"
155 "xor %%o0, %%l2, %%o0\n\t"
156 "xor %%o1, %%l3, %%o1\n\t"
157 "xor %%o2, %%l4, %%o2\n\t"
158 "xor %%o3, %%l5, %%o3\n\t"
159 "std %%g2, [%0 + 0x00]\n\t"
160 "std %%g4, [%0 + 0x08]\n\t"
161 "std %%o0, [%0 + 0x10]\n\t"
162 "std %%o2, [%0 + 0x18]\n"
163 :
164 : "r" (p1), "r" (p2), "r" (p3), "r" (p4)
165 : "g2", "g3", "g4", "g5",
166 "o0", "o1", "o2", "o3", "o4", "o5",
167 "l0", "l1", "l2", "l3", "l4", "l5");
168 p1 += 8;
169 p2 += 8;
170 p3 += 8;
171 p4 += 8;
172 } while (--lines > 0);
173}
174
175static void
176sparc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
177 unsigned long *p3, unsigned long *p4, unsigned long *p5)
178{
179 int lines = bytes / (sizeof (long)) / 8;
180
181 do {
182 __asm__ __volatile__(
183 "ldd [%0 + 0x00], %%g2\n\t"
184 "ldd [%0 + 0x08], %%g4\n\t"
185 "ldd [%0 + 0x10], %%o0\n\t"
186 "ldd [%0 + 0x18], %%o2\n\t"
187 "ldd [%1 + 0x00], %%o4\n\t"
188 "ldd [%1 + 0x08], %%l0\n\t"
189 "ldd [%1 + 0x10], %%l2\n\t"
190 "ldd [%1 + 0x18], %%l4\n\t"
191 "xor %%g2, %%o4, %%g2\n\t"
192 "xor %%g3, %%o5, %%g3\n\t"
193 "ldd [%2 + 0x00], %%o4\n\t"
194 "xor %%g4, %%l0, %%g4\n\t"
195 "xor %%g5, %%l1, %%g5\n\t"
196 "ldd [%2 + 0x08], %%l0\n\t"
197 "xor %%o0, %%l2, %%o0\n\t"
198 "xor %%o1, %%l3, %%o1\n\t"
199 "ldd [%2 + 0x10], %%l2\n\t"
200 "xor %%o2, %%l4, %%o2\n\t"
201 "xor %%o3, %%l5, %%o3\n\t"
202 "ldd [%2 + 0x18], %%l4\n\t"
203 "xor %%g2, %%o4, %%g2\n\t"
204 "xor %%g3, %%o5, %%g3\n\t"
205 "ldd [%3 + 0x00], %%o4\n\t"
206 "xor %%g4, %%l0, %%g4\n\t"
207 "xor %%g5, %%l1, %%g5\n\t"
208 "ldd [%3 + 0x08], %%l0\n\t"
209 "xor %%o0, %%l2, %%o0\n\t"
210 "xor %%o1, %%l3, %%o1\n\t"
211 "ldd [%3 + 0x10], %%l2\n\t"
212 "xor %%o2, %%l4, %%o2\n\t"
213 "xor %%o3, %%l5, %%o3\n\t"
214 "ldd [%3 + 0x18], %%l4\n\t"
215 "xor %%g2, %%o4, %%g2\n\t"
216 "xor %%g3, %%o5, %%g3\n\t"
217 "ldd [%4 + 0x00], %%o4\n\t"
218 "xor %%g4, %%l0, %%g4\n\t"
219 "xor %%g5, %%l1, %%g5\n\t"
220 "ldd [%4 + 0x08], %%l0\n\t"
221 "xor %%o0, %%l2, %%o0\n\t"
222 "xor %%o1, %%l3, %%o1\n\t"
223 "ldd [%4 + 0x10], %%l2\n\t"
224 "xor %%o2, %%l4, %%o2\n\t"
225 "xor %%o3, %%l5, %%o3\n\t"
226 "ldd [%4 + 0x18], %%l4\n\t"
227 "xor %%g2, %%o4, %%g2\n\t"
228 "xor %%g3, %%o5, %%g3\n\t"
229 "xor %%g4, %%l0, %%g4\n\t"
230 "xor %%g5, %%l1, %%g5\n\t"
231 "xor %%o0, %%l2, %%o0\n\t"
232 "xor %%o1, %%l3, %%o1\n\t"
233 "xor %%o2, %%l4, %%o2\n\t"
234 "xor %%o3, %%l5, %%o3\n\t"
235 "std %%g2, [%0 + 0x00]\n\t"
236 "std %%g4, [%0 + 0x08]\n\t"
237 "std %%o0, [%0 + 0x10]\n\t"
238 "std %%o2, [%0 + 0x18]\n"
239 :
240 : "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5)
241 : "g2", "g3", "g4", "g5",
242 "o0", "o1", "o2", "o3", "o4", "o5",
243 "l0", "l1", "l2", "l3", "l4", "l5");
244 p1 += 8;
245 p2 += 8;
246 p3 += 8;
247 p4 += 8;
248 p5 += 8;
249 } while (--lines > 0);
250}
251
252static struct xor_block_template xor_block_SPARC = {
253 .name = "SPARC",
254 .do_2 = sparc_2,
255 .do_3 = sparc_3,
256 .do_4 = sparc_4,
257 .do_5 = sparc_5,
258};
259
260/* For grins, also test the generic routines. */
261#include <asm-generic/xor.h>
262
263#undef XOR_TRY_TEMPLATES
264#define XOR_TRY_TEMPLATES \
265 do { \
266 xor_speed(&xor_block_8regs); \
267 xor_speed(&xor_block_32regs); \
268 xor_speed(&xor_block_SPARC); \
269 } while (0)
diff --git a/include/asm-sparc/xor_64.h b/include/asm-sparc/xor_64.h
new file mode 100644
index 000000000000..a0233884fc94
--- /dev/null
+++ b/include/asm-sparc/xor_64.h
@@ -0,0 +1,70 @@
1/*
2 * include/asm-sparc64/xor.h
3 *
4 * High speed xor_block operation for RAID4/5 utilizing the
5 * UltraSparc Visual Instruction Set and Niagara block-init
6 * twin-load instructions.
7 *
8 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
9 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <asm/spitfire.h>
22
23extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
24extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
25 unsigned long *);
26extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
27 unsigned long *, unsigned long *);
28extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
29 unsigned long *, unsigned long *, unsigned long *);
30
31/* XXX Ugh, write cheetah versions... -DaveM */
32
33static struct xor_block_template xor_block_VIS = {
34 .name = "VIS",
35 .do_2 = xor_vis_2,
36 .do_3 = xor_vis_3,
37 .do_4 = xor_vis_4,
38 .do_5 = xor_vis_5,
39};
40
41extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
42extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
43 unsigned long *);
44extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
45 unsigned long *, unsigned long *);
46extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
47 unsigned long *, unsigned long *, unsigned long *);
48
49static struct xor_block_template xor_block_niagara = {
50 .name = "Niagara",
51 .do_2 = xor_niagara_2,
52 .do_3 = xor_niagara_3,
53 .do_4 = xor_niagara_4,
54 .do_5 = xor_niagara_5,
55};
56
57#undef XOR_TRY_TEMPLATES
58#define XOR_TRY_TEMPLATES \
59 do { \
60 xor_speed(&xor_block_VIS); \
61 xor_speed(&xor_block_niagara); \
62 } while (0)
63
64/* For VIS for everything except Niagara. */
65#define XOR_SELECT_TEMPLATE(FASTEST) \
66 ((tlb_type == hypervisor && \
67 (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \
68 sun4v_chip_type == SUN4V_CHIP_NIAGARA2)) ? \
69 &xor_block_niagara : \
70 &xor_block_VIS)