aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-alpha
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-alpha
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-alpha')
-rw-r--r--include/asm-alpha/8253pit.h10
-rw-r--r--include/asm-alpha/a.out.h106
-rw-r--r--include/asm-alpha/agp.h13
-rw-r--r--include/asm-alpha/agp_backend.h42
-rw-r--r--include/asm-alpha/atomic.h198
-rw-r--r--include/asm-alpha/bitops.h507
-rw-r--r--include/asm-alpha/bug.h15
-rw-r--r--include/asm-alpha/bugs.h20
-rw-r--r--include/asm-alpha/byteorder.h47
-rw-r--r--include/asm-alpha/cache.h25
-rw-r--r--include/asm-alpha/cacheflush.h74
-rw-r--r--include/asm-alpha/checksum.h77
-rw-r--r--include/asm-alpha/compiler.h103
-rw-r--r--include/asm-alpha/console.h75
-rw-r--r--include/asm-alpha/core_apecs.h517
-rw-r--r--include/asm-alpha/core_cia.h501
-rw-r--r--include/asm-alpha/core_irongate.h232
-rw-r--r--include/asm-alpha/core_lca.h361
-rw-r--r--include/asm-alpha/core_marvel.h378
-rw-r--r--include/asm-alpha/core_mcpcia.h379
-rw-r--r--include/asm-alpha/core_polaris.h110
-rw-r--r--include/asm-alpha/core_t2.h628
-rw-r--r--include/asm-alpha/core_titan.h415
-rw-r--r--include/asm-alpha/core_tsunami.h344
-rw-r--r--include/asm-alpha/core_wildfire.h318
-rw-r--r--include/asm-alpha/cputime.h6
-rw-r--r--include/asm-alpha/current.h9
-rw-r--r--include/asm-alpha/delay.h10
-rw-r--r--include/asm-alpha/div64.h1
-rw-r--r--include/asm-alpha/dma-mapping.h67
-rw-r--r--include/asm-alpha/dma.h377
-rw-r--r--include/asm-alpha/elf.h185
-rw-r--r--include/asm-alpha/err_common.h118
-rw-r--r--include/asm-alpha/err_ev6.h6
-rw-r--r--include/asm-alpha/err_ev7.h202
-rw-r--r--include/asm-alpha/errno.h119
-rw-r--r--include/asm-alpha/fcntl.h75
-rw-r--r--include/asm-alpha/floppy.h119
-rw-r--r--include/asm-alpha/fpu.h193
-rw-r--r--include/asm-alpha/gct.h58
-rw-r--r--include/asm-alpha/gentrap.h37
-rw-r--r--include/asm-alpha/hardirq.h29
-rw-r--r--include/asm-alpha/hdreg.h1
-rw-r--r--include/asm-alpha/hw_irq.h16
-rw-r--r--include/asm-alpha/hwrpb.h220
-rw-r--r--include/asm-alpha/ide.h61
-rw-r--r--include/asm-alpha/io.h682
-rw-r--r--include/asm-alpha/io_trivial.h127
-rw-r--r--include/asm-alpha/ioctl.h66
-rw-r--r--include/asm-alpha/ioctls.h112
-rw-r--r--include/asm-alpha/ipcbuf.h28
-rw-r--r--include/asm-alpha/irq.h100
-rw-r--r--include/asm-alpha/jensen.h346
-rw-r--r--include/asm-alpha/kmap_types.h33
-rw-r--r--include/asm-alpha/linkage.h6
-rw-r--r--include/asm-alpha/local.h40
-rw-r--r--include/asm-alpha/machvec.h136
-rw-r--r--include/asm-alpha/mc146818rtc.h27
-rw-r--r--include/asm-alpha/md.h13
-rw-r--r--include/asm-alpha/mman.h50
-rw-r--r--include/asm-alpha/mmu.h7
-rw-r--r--include/asm-alpha/mmu_context.h261
-rw-r--r--include/asm-alpha/mmzone.h131
-rw-r--r--include/asm-alpha/module.h23
-rw-r--r--include/asm-alpha/msgbuf.h27
-rw-r--r--include/asm-alpha/namei.h17
-rw-r--r--include/asm-alpha/numnodes.h7
-rw-r--r--include/asm-alpha/page.h115
-rw-r--r--include/asm-alpha/pal.h51
-rw-r--r--include/asm-alpha/param.h32
-rw-r--r--include/asm-alpha/parport.h18
-rw-r--r--include/asm-alpha/pci.h261
-rw-r--r--include/asm-alpha/percpu.h6
-rw-r--r--include/asm-alpha/pgalloc.h78
-rw-r--r--include/asm-alpha/pgtable.h369
-rw-r--r--include/asm-alpha/poll.h23
-rw-r--r--include/asm-alpha/posix_types.h123
-rw-r--r--include/asm-alpha/processor.h118
-rw-r--r--include/asm-alpha/ptrace.h82
-rw-r--r--include/asm-alpha/reg.h52
-rw-r--r--include/asm-alpha/regdef.h44
-rw-r--r--include/asm-alpha/resource.h22
-rw-r--r--include/asm-alpha/rtc.h10
-rw-r--r--include/asm-alpha/rwsem.h266
-rw-r--r--include/asm-alpha/scatterlist.h21
-rw-r--r--include/asm-alpha/sections.h7
-rw-r--r--include/asm-alpha/segment.h6
-rw-r--r--include/asm-alpha/semaphore.h153
-rw-r--r--include/asm-alpha/sembuf.h22
-rw-r--r--include/asm-alpha/serial.h75
-rw-r--r--include/asm-alpha/setup.h6
-rw-r--r--include/asm-alpha/sfp-machine.h82
-rw-r--r--include/asm-alpha/shmbuf.h38
-rw-r--r--include/asm-alpha/shmparam.h6
-rw-r--r--include/asm-alpha/sigcontext.h34
-rw-r--r--include/asm-alpha/siginfo.h11
-rw-r--r--include/asm-alpha/signal.h197
-rw-r--r--include/asm-alpha/smp.h63
-rw-r--r--include/asm-alpha/socket.h58
-rw-r--r--include/asm-alpha/sockios.h15
-rw-r--r--include/asm-alpha/spinlock.h212
-rw-r--r--include/asm-alpha/stat.h48
-rw-r--r--include/asm-alpha/statfs.h6
-rw-r--r--include/asm-alpha/string.h68
-rw-r--r--include/asm-alpha/suspend.h6
-rw-r--r--include/asm-alpha/sysinfo.h39
-rw-r--r--include/asm-alpha/system.h626
-rw-r--r--include/asm-alpha/termbits.h186
-rw-r--r--include/asm-alpha/termios.h164
-rw-r--r--include/asm-alpha/thread_info.h98
-rw-r--r--include/asm-alpha/timex.h31
-rw-r--r--include/asm-alpha/tlb.h15
-rw-r--r--include/asm-alpha/tlbflush.h158
-rw-r--r--include/asm-alpha/topology.h48
-rw-r--r--include/asm-alpha/types.h63
-rw-r--r--include/asm-alpha/uaccess.h517
-rw-r--r--include/asm-alpha/ucontext.h13
-rw-r--r--include/asm-alpha/unaligned.h6
-rw-r--r--include/asm-alpha/unistd.h656
-rw-r--r--include/asm-alpha/user.h53
-rw-r--r--include/asm-alpha/vga.h51
-rw-r--r--include/asm-alpha/xor.h855
122 files changed, 16026 insertions, 0 deletions
diff --git a/include/asm-alpha/8253pit.h b/include/asm-alpha/8253pit.h
new file mode 100644
index 000000000000..fef5c1450e47
--- /dev/null
+++ b/include/asm-alpha/8253pit.h
@@ -0,0 +1,10 @@
1/*
2 * 8253/8254 Programmable Interval Timer
3 */
4
5#ifndef _8253PIT_H
6#define _8253PIT_H
7
8#define PIT_TICK_RATE 1193180UL
9
10#endif
diff --git a/include/asm-alpha/a.out.h b/include/asm-alpha/a.out.h
new file mode 100644
index 000000000000..d97daf42753d
--- /dev/null
+++ b/include/asm-alpha/a.out.h
@@ -0,0 +1,106 @@
1#ifndef __ALPHA_A_OUT_H__
2#define __ALPHA_A_OUT_H__
3
4#include <linux/types.h>
5
6/*
7 * OSF/1 ECOFF header structs. ECOFF files consist of:
8 * - a file header (struct filehdr),
9 * - an a.out header (struct aouthdr),
10 * - one or more section headers (struct scnhdr).
11 * The filhdr's "f_nscns" field contains the
12 * number of section headers.
13 */
14
15struct filehdr
16{
17 /* OSF/1 "file" header */
18 __u16 f_magic, f_nscns;
19 __u32 f_timdat;
20 __u64 f_symptr;
21 __u32 f_nsyms;
22 __u16 f_opthdr, f_flags;
23};
24
25struct aouthdr
26{
27 __u64 info; /* after that it looks quite normal.. */
28 __u64 tsize;
29 __u64 dsize;
30 __u64 bsize;
31 __u64 entry;
32 __u64 text_start; /* with a few additions that actually make sense */
33 __u64 data_start;
34 __u64 bss_start;
35 __u32 gprmask, fprmask; /* bitmask of general & floating point regs used in binary */
36 __u64 gpvalue;
37};
38
39struct scnhdr
40{
41 char s_name[8];
42 __u64 s_paddr;
43 __u64 s_vaddr;
44 __u64 s_size;
45 __u64 s_scnptr;
46 __u64 s_relptr;
47 __u64 s_lnnoptr;
48 __u16 s_nreloc;
49 __u16 s_nlnno;
50 __u32 s_flags;
51};
52
53struct exec
54{
55 /* OSF/1 "file" header */
56 struct filehdr fh;
57 struct aouthdr ah;
58};
59
60/*
61 * Define's so that the kernel exec code can access the a.out header
62 * fields...
63 */
64#define a_info ah.info
65#define a_text ah.tsize
66#define a_data ah.dsize
67#define a_bss ah.bsize
68#define a_entry ah.entry
69#define a_textstart ah.text_start
70#define a_datastart ah.data_start
71#define a_bssstart ah.bss_start
72#define a_gprmask ah.gprmask
73#define a_fprmask ah.fprmask
74#define a_gpvalue ah.gpvalue
75
76#define N_TXTADDR(x) ((x).a_textstart)
77#define N_DATADDR(x) ((x).a_datastart)
78#define N_BSSADDR(x) ((x).a_bssstart)
79#define N_DRSIZE(x) 0
80#define N_TRSIZE(x) 0
81#define N_SYMSIZE(x) 0
82
83#define AOUTHSZ sizeof(struct aouthdr)
84#define SCNHSZ sizeof(struct scnhdr)
85#define SCNROUND 16
86
87#define N_TXTOFF(x) \
88 ((long) N_MAGIC(x) == ZMAGIC ? 0 : \
89 (sizeof(struct exec) + (x).fh.f_nscns*SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1))
90
91#ifdef __KERNEL__
92
93/* Assume that start addresses below 4G belong to a TASO application.
94 Unfortunately, there is no proper bit in the exec header to check.
95 Worse, we have to notice the start address before swapping to use
96 /sbin/loader, which of course is _not_ a TASO application. */
97#define SET_AOUT_PERSONALITY(BFPM, EX) \
98 set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000L \
99 ? ADDR_LIMIT_32BIT : 0) | PER_OSF4))
100
101#define STACK_TOP \
102 (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
103
104#endif
105
106#endif /* __A_OUT_GNU_H__ */
diff --git a/include/asm-alpha/agp.h b/include/asm-alpha/agp.h
new file mode 100644
index 000000000000..c99dbbb5bcb5
--- /dev/null
+++ b/include/asm-alpha/agp.h
@@ -0,0 +1,13 @@
1#ifndef AGP_H
2#define AGP_H 1
3
4#include <asm/io.h>
5
6/* dummy for now */
7
8#define map_page_into_agp(page)
9#define unmap_page_from_agp(page)
10#define flush_agp_mappings()
11#define flush_agp_cache() mb()
12
13#endif
diff --git a/include/asm-alpha/agp_backend.h b/include/asm-alpha/agp_backend.h
new file mode 100644
index 000000000000..55dd44a2cea7
--- /dev/null
+++ b/include/asm-alpha/agp_backend.h
@@ -0,0 +1,42 @@
1#ifndef _ALPHA_AGP_BACKEND_H
2#define _ALPHA_AGP_BACKEND_H 1
3
4typedef union _alpha_agp_mode {
5 struct {
6 u32 rate : 3;
7 u32 reserved0 : 1;
8 u32 fw : 1;
9 u32 fourgb : 1;
10 u32 reserved1 : 2;
11 u32 enable : 1;
12 u32 sba : 1;
13 u32 reserved2 : 14;
14 u32 rq : 8;
15 } bits;
16 u32 lw;
17} alpha_agp_mode;
18
19typedef struct _alpha_agp_info {
20 struct pci_controller *hose;
21 struct {
22 dma_addr_t bus_base;
23 unsigned long size;
24 void *sysdata;
25 } aperture;
26 alpha_agp_mode capability;
27 alpha_agp_mode mode;
28 void *private;
29 struct alpha_agp_ops *ops;
30} alpha_agp_info;
31
32struct alpha_agp_ops {
33 int (*setup)(alpha_agp_info *);
34 void (*cleanup)(alpha_agp_info *);
35 int (*configure)(alpha_agp_info *);
36 int (*bind)(alpha_agp_info *, off_t, struct agp_memory *);
37 int (*unbind)(alpha_agp_info *, off_t, struct agp_memory *);
38 unsigned long (*translate)(alpha_agp_info *, dma_addr_t);
39};
40
41
42#endif /* _ALPHA_AGP_BACKEND_H */
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
new file mode 100644
index 000000000000..1b383e3cb68c
--- /dev/null
+++ b/include/asm-alpha/atomic.h
@@ -0,0 +1,198 @@
1#ifndef _ALPHA_ATOMIC_H
2#define _ALPHA_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc...
7 *
8 * But use these as seldom as possible since they are much slower
9 * than regular operations.
10 */
11
12
13/*
14 * Counter is volatile to make sure gcc doesn't try to be clever
15 * and move things around on us. We need to use _exactly_ the address
16 * the user gave us, not some alias that contains the same information.
17 */
18typedef struct { volatile int counter; } atomic_t;
19typedef struct { volatile long counter; } atomic64_t;
20
21#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
22#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
23
24#define atomic_read(v) ((v)->counter + 0)
25#define atomic64_read(v) ((v)->counter + 0)
26
27#define atomic_set(v,i) ((v)->counter = (i))
28#define atomic64_set(v,i) ((v)->counter = (i))
29
30/*
31 * To get proper branch prediction for the main line, we must branch
32 * forward to code at the end of this object's .text section, then
33 * branch back to restart the operation.
34 */
35
36static __inline__ void atomic_add(int i, atomic_t * v)
37{
38 unsigned long temp;
39 __asm__ __volatile__(
40 "1: ldl_l %0,%1\n"
41 " addl %0,%2,%0\n"
42 " stl_c %0,%1\n"
43 " beq %0,2f\n"
44 ".subsection 2\n"
45 "2: br 1b\n"
46 ".previous"
47 :"=&r" (temp), "=m" (v->counter)
48 :"Ir" (i), "m" (v->counter));
49}
50
51static __inline__ void atomic64_add(long i, atomic64_t * v)
52{
53 unsigned long temp;
54 __asm__ __volatile__(
55 "1: ldq_l %0,%1\n"
56 " addq %0,%2,%0\n"
57 " stq_c %0,%1\n"
58 " beq %0,2f\n"
59 ".subsection 2\n"
60 "2: br 1b\n"
61 ".previous"
62 :"=&r" (temp), "=m" (v->counter)
63 :"Ir" (i), "m" (v->counter));
64}
65
66static __inline__ void atomic_sub(int i, atomic_t * v)
67{
68 unsigned long temp;
69 __asm__ __volatile__(
70 "1: ldl_l %0,%1\n"
71 " subl %0,%2,%0\n"
72 " stl_c %0,%1\n"
73 " beq %0,2f\n"
74 ".subsection 2\n"
75 "2: br 1b\n"
76 ".previous"
77 :"=&r" (temp), "=m" (v->counter)
78 :"Ir" (i), "m" (v->counter));
79}
80
81static __inline__ void atomic64_sub(long i, atomic64_t * v)
82{
83 unsigned long temp;
84 __asm__ __volatile__(
85 "1: ldq_l %0,%1\n"
86 " subq %0,%2,%0\n"
87 " stq_c %0,%1\n"
88 " beq %0,2f\n"
89 ".subsection 2\n"
90 "2: br 1b\n"
91 ".previous"
92 :"=&r" (temp), "=m" (v->counter)
93 :"Ir" (i), "m" (v->counter));
94}
95
96
97/*
98 * Same as above, but return the result value
99 */
100static __inline__ long atomic_add_return(int i, atomic_t * v)
101{
102 long temp, result;
103 __asm__ __volatile__(
104 "1: ldl_l %0,%1\n"
105 " addl %0,%3,%2\n"
106 " addl %0,%3,%0\n"
107 " stl_c %0,%1\n"
108 " beq %0,2f\n"
109 " mb\n"
110 ".subsection 2\n"
111 "2: br 1b\n"
112 ".previous"
113 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
114 :"Ir" (i), "m" (v->counter) : "memory");
115 return result;
116}
117
118#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
119
120static __inline__ long atomic64_add_return(long i, atomic64_t * v)
121{
122 long temp, result;
123 __asm__ __volatile__(
124 "1: ldq_l %0,%1\n"
125 " addq %0,%3,%2\n"
126 " addq %0,%3,%0\n"
127 " stq_c %0,%1\n"
128 " beq %0,2f\n"
129 " mb\n"
130 ".subsection 2\n"
131 "2: br 1b\n"
132 ".previous"
133 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
134 :"Ir" (i), "m" (v->counter) : "memory");
135 return result;
136}
137
138static __inline__ long atomic_sub_return(int i, atomic_t * v)
139{
140 long temp, result;
141 __asm__ __volatile__(
142 "1: ldl_l %0,%1\n"
143 " subl %0,%3,%2\n"
144 " subl %0,%3,%0\n"
145 " stl_c %0,%1\n"
146 " beq %0,2f\n"
147 " mb\n"
148 ".subsection 2\n"
149 "2: br 1b\n"
150 ".previous"
151 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
152 :"Ir" (i), "m" (v->counter) : "memory");
153 return result;
154}
155
156static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
157{
158 long temp, result;
159 __asm__ __volatile__(
160 "1: ldq_l %0,%1\n"
161 " subq %0,%3,%2\n"
162 " subq %0,%3,%0\n"
163 " stq_c %0,%1\n"
164 " beq %0,2f\n"
165 " mb\n"
166 ".subsection 2\n"
167 "2: br 1b\n"
168 ".previous"
169 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
170 :"Ir" (i), "m" (v->counter) : "memory");
171 return result;
172}
173
174#define atomic_dec_return(v) atomic_sub_return(1,(v))
175#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
176
177#define atomic_inc_return(v) atomic_add_return(1,(v))
178#define atomic64_inc_return(v) atomic64_add_return(1,(v))
179
180#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
181#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
182
183#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
184#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
185#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
186
187#define atomic_inc(v) atomic_add(1,(v))
188#define atomic64_inc(v) atomic64_add(1,(v))
189
190#define atomic_dec(v) atomic_sub(1,(v))
191#define atomic64_dec(v) atomic64_sub(1,(v))
192
193#define smp_mb__before_atomic_dec() smp_mb()
194#define smp_mb__after_atomic_dec() smp_mb()
195#define smp_mb__before_atomic_inc() smp_mb()
196#define smp_mb__after_atomic_inc() smp_mb()
197
198#endif /* _ALPHA_ATOMIC_H */
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
new file mode 100644
index 000000000000..578ed3f1a607
--- /dev/null
+++ b/include/asm-alpha/bitops.h
@@ -0,0 +1,507 @@
1#ifndef _ALPHA_BITOPS_H
2#define _ALPHA_BITOPS_H
3
4#include <linux/config.h>
5#include <asm/compiler.h>
6
7/*
8 * Copyright 1994, Linus Torvalds.
9 */
10
11/*
12 * These have to be done with inline assembly: that way the bit-setting
13 * is guaranteed to be atomic. All bit operations return 0 if the bit
14 * was cleared before the operation and != 0 if it was not.
15 *
16 * To get proper branch prediction for the main line, we must branch
17 * forward to code at the end of this object's .text section, then
18 * branch back to restart the operation.
19 *
20 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
21 */
22
23static inline void
24set_bit(unsigned long nr, volatile void * addr)
25{
26 unsigned long temp;
27 int *m = ((int *) addr) + (nr >> 5);
28
29 __asm__ __volatile__(
30 "1: ldl_l %0,%3\n"
31 " bis %0,%2,%0\n"
32 " stl_c %0,%1\n"
33 " beq %0,2f\n"
34 ".subsection 2\n"
35 "2: br 1b\n"
36 ".previous"
37 :"=&r" (temp), "=m" (*m)
38 :"Ir" (1UL << (nr & 31)), "m" (*m));
39}
40
41/*
42 * WARNING: non atomic version.
43 */
44static inline void
45__set_bit(unsigned long nr, volatile void * addr)
46{
47 int *m = ((int *) addr) + (nr >> 5);
48
49 *m |= 1 << (nr & 31);
50}
51
52#define smp_mb__before_clear_bit() smp_mb()
53#define smp_mb__after_clear_bit() smp_mb()
54
55static inline void
56clear_bit(unsigned long nr, volatile void * addr)
57{
58 unsigned long temp;
59 int *m = ((int *) addr) + (nr >> 5);
60
61 __asm__ __volatile__(
62 "1: ldl_l %0,%3\n"
63 " bic %0,%2,%0\n"
64 " stl_c %0,%1\n"
65 " beq %0,2f\n"
66 ".subsection 2\n"
67 "2: br 1b\n"
68 ".previous"
69 :"=&r" (temp), "=m" (*m)
70 :"Ir" (1UL << (nr & 31)), "m" (*m));
71}
72
73/*
74 * WARNING: non atomic version.
75 */
76static __inline__ void
77__clear_bit(unsigned long nr, volatile void * addr)
78{
79 int *m = ((int *) addr) + (nr >> 5);
80
81 *m &= ~(1 << (nr & 31));
82}
83
84static inline void
85change_bit(unsigned long nr, volatile void * addr)
86{
87 unsigned long temp;
88 int *m = ((int *) addr) + (nr >> 5);
89
90 __asm__ __volatile__(
91 "1: ldl_l %0,%3\n"
92 " xor %0,%2,%0\n"
93 " stl_c %0,%1\n"
94 " beq %0,2f\n"
95 ".subsection 2\n"
96 "2: br 1b\n"
97 ".previous"
98 :"=&r" (temp), "=m" (*m)
99 :"Ir" (1UL << (nr & 31)), "m" (*m));
100}
101
102/*
103 * WARNING: non atomic version.
104 */
105static __inline__ void
106__change_bit(unsigned long nr, volatile void * addr)
107{
108 int *m = ((int *) addr) + (nr >> 5);
109
110 *m ^= 1 << (nr & 31);
111}
112
113static inline int
114test_and_set_bit(unsigned long nr, volatile void *addr)
115{
116 unsigned long oldbit;
117 unsigned long temp;
118 int *m = ((int *) addr) + (nr >> 5);
119
120 __asm__ __volatile__(
121 "1: ldl_l %0,%4\n"
122 " and %0,%3,%2\n"
123 " bne %2,2f\n"
124 " xor %0,%3,%0\n"
125 " stl_c %0,%1\n"
126 " beq %0,3f\n"
127 "2:\n"
128#ifdef CONFIG_SMP
129 " mb\n"
130#endif
131 ".subsection 2\n"
132 "3: br 1b\n"
133 ".previous"
134 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
135 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
136
137 return oldbit != 0;
138}
139
140/*
141 * WARNING: non atomic version.
142 */
143static inline int
144__test_and_set_bit(unsigned long nr, volatile void * addr)
145{
146 unsigned long mask = 1 << (nr & 0x1f);
147 int *m = ((int *) addr) + (nr >> 5);
148 int old = *m;
149
150 *m = old | mask;
151 return (old & mask) != 0;
152}
153
154static inline int
155test_and_clear_bit(unsigned long nr, volatile void * addr)
156{
157 unsigned long oldbit;
158 unsigned long temp;
159 int *m = ((int *) addr) + (nr >> 5);
160
161 __asm__ __volatile__(
162 "1: ldl_l %0,%4\n"
163 " and %0,%3,%2\n"
164 " beq %2,2f\n"
165 " xor %0,%3,%0\n"
166 " stl_c %0,%1\n"
167 " beq %0,3f\n"
168 "2:\n"
169#ifdef CONFIG_SMP
170 " mb\n"
171#endif
172 ".subsection 2\n"
173 "3: br 1b\n"
174 ".previous"
175 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
176 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
177
178 return oldbit != 0;
179}
180
181/*
182 * WARNING: non atomic version.
183 */
184static inline int
185__test_and_clear_bit(unsigned long nr, volatile void * addr)
186{
187 unsigned long mask = 1 << (nr & 0x1f);
188 int *m = ((int *) addr) + (nr >> 5);
189 int old = *m;
190
191 *m = old & ~mask;
192 return (old & mask) != 0;
193}
194
195static inline int
196test_and_change_bit(unsigned long nr, volatile void * addr)
197{
198 unsigned long oldbit;
199 unsigned long temp;
200 int *m = ((int *) addr) + (nr >> 5);
201
202 __asm__ __volatile__(
203 "1: ldl_l %0,%4\n"
204 " and %0,%3,%2\n"
205 " xor %0,%3,%0\n"
206 " stl_c %0,%1\n"
207 " beq %0,3f\n"
208#ifdef CONFIG_SMP
209 " mb\n"
210#endif
211 ".subsection 2\n"
212 "3: br 1b\n"
213 ".previous"
214 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
215 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
216
217 return oldbit != 0;
218}
219
220/*
221 * WARNING: non atomic version.
222 */
223static __inline__ int
224__test_and_change_bit(unsigned long nr, volatile void * addr)
225{
226 unsigned long mask = 1 << (nr & 0x1f);
227 int *m = ((int *) addr) + (nr >> 5);
228 int old = *m;
229
230 *m = old ^ mask;
231 return (old & mask) != 0;
232}
233
234static inline int
235test_bit(int nr, const volatile void * addr)
236{
237 return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
238}
239
240/*
241 * ffz = Find First Zero in word. Undefined if no zero exists,
242 * so code should check against ~0UL first..
243 *
244 * Do a binary search on the bits. Due to the nature of large
245 * constants on the alpha, it is worthwhile to split the search.
246 */
247static inline unsigned long ffz_b(unsigned long x)
248{
249 unsigned long sum, x1, x2, x4;
250
251 x = ~x & -~x; /* set first 0 bit, clear others */
252 x1 = x & 0xAA;
253 x2 = x & 0xCC;
254 x4 = x & 0xF0;
255 sum = x2 ? 2 : 0;
256 sum += (x4 != 0) * 4;
257 sum += (x1 != 0);
258
259 return sum;
260}
261
262static inline unsigned long ffz(unsigned long word)
263{
264#if defined(__alpha_cix__) && defined(__alpha_fix__)
265 /* Whee. EV67 can calculate it directly. */
266 return __kernel_cttz(~word);
267#else
268 unsigned long bits, qofs, bofs;
269
270 bits = __kernel_cmpbge(word, ~0UL);
271 qofs = ffz_b(bits);
272 bits = __kernel_extbl(word, qofs);
273 bofs = ffz_b(bits);
274
275 return qofs*8 + bofs;
276#endif
277}
278
279/*
280 * __ffs = Find First set bit in word. Undefined if no set bit exists.
281 */
282static inline unsigned long __ffs(unsigned long word)
283{
284#if defined(__alpha_cix__) && defined(__alpha_fix__)
285 /* Whee. EV67 can calculate it directly. */
286 return __kernel_cttz(word);
287#else
288 unsigned long bits, qofs, bofs;
289
290 bits = __kernel_cmpbge(0, word);
291 qofs = ffz_b(bits);
292 bits = __kernel_extbl(word, qofs);
293 bofs = ffz_b(~bits);
294
295 return qofs*8 + bofs;
296#endif
297}
298
299#ifdef __KERNEL__
300
301/*
302 * ffs: find first bit set. This is defined the same way as
303 * the libc and compiler builtin ffs routines, therefore
304 * differs in spirit from the above __ffs.
305 */
306
307static inline int ffs(int word)
308{
309 int result = __ffs(word) + 1;
310 return word ? result : 0;
311}
312
313/*
314 * fls: find last bit set.
315 */
316#if defined(__alpha_cix__) && defined(__alpha_fix__)
317static inline int fls(int word)
318{
319 return 64 - __kernel_ctlz(word & 0xffffffff);
320}
321#else
322#define fls generic_fls
323#endif
324
325/* Compute powers of two for the given integer. */
326static inline long floor_log2(unsigned long word)
327{
328#if defined(__alpha_cix__) && defined(__alpha_fix__)
329 return 63 - __kernel_ctlz(word);
330#else
331 long bit;
332 for (bit = -1; word ; bit++)
333 word >>= 1;
334 return bit;
335#endif
336}
337
338static inline long ceil_log2(unsigned long word)
339{
340 long bit = floor_log2(word);
341 return bit + (word > (1UL << bit));
342}
343
344/*
345 * hweightN: returns the hamming weight (i.e. the number
346 * of bits set) of a N-bit word
347 */
348
349#if defined(__alpha_cix__) && defined(__alpha_fix__)
350/* Whee. EV67 can calculate it directly. */
351static inline unsigned long hweight64(unsigned long w)
352{
353 return __kernel_ctpop(w);
354}
355
356#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful)
357#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful)
358#define hweight8(x) (unsigned int) hweight64((x) & 0xfful)
359#else
360static inline unsigned long hweight64(unsigned long w)
361{
362 unsigned long result;
363 for (result = 0; w ; w >>= 1)
364 result += (w & 1);
365 return result;
366}
367
368#define hweight32(x) generic_hweight32(x)
369#define hweight16(x) generic_hweight16(x)
370#define hweight8(x) generic_hweight8(x)
371#endif
372
373#endif /* __KERNEL__ */
374
375/*
376 * Find next zero bit in a bitmap reasonably efficiently..
377 */
378static inline unsigned long
379find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset)
380{
381 const unsigned long *p = addr;
382 unsigned long result = offset & ~63UL;
383 unsigned long tmp;
384
385 p += offset >> 6;
386 if (offset >= size)
387 return size;
388 size -= result;
389 offset &= 63UL;
390 if (offset) {
391 tmp = *(p++);
392 tmp |= ~0UL >> (64-offset);
393 if (size < 64)
394 goto found_first;
395 if (~tmp)
396 goto found_middle;
397 size -= 64;
398 result += 64;
399 }
400 while (size & ~63UL) {
401 if (~(tmp = *(p++)))
402 goto found_middle;
403 result += 64;
404 size -= 64;
405 }
406 if (!size)
407 return result;
408 tmp = *p;
409 found_first:
410 tmp |= ~0UL << size;
411 if (tmp == ~0UL) /* Are any bits zero? */
412 return result + size; /* Nope. */
413 found_middle:
414 return result + ffz(tmp);
415}
416
417/*
418 * Find next one bit in a bitmap reasonably efficiently.
419 */
420static inline unsigned long
421find_next_bit(const void * addr, unsigned long size, unsigned long offset)
422{
423 const unsigned long *p = addr;
424 unsigned long result = offset & ~63UL;
425 unsigned long tmp;
426
427 p += offset >> 6;
428 if (offset >= size)
429 return size;
430 size -= result;
431 offset &= 63UL;
432 if (offset) {
433 tmp = *(p++);
434 tmp &= ~0UL << offset;
435 if (size < 64)
436 goto found_first;
437 if (tmp)
438 goto found_middle;
439 size -= 64;
440 result += 64;
441 }
442 while (size & ~63UL) {
443 if ((tmp = *(p++)))
444 goto found_middle;
445 result += 64;
446 size -= 64;
447 }
448 if (!size)
449 return result;
450 tmp = *p;
451 found_first:
452 tmp &= ~0UL >> (64 - size);
453 if (!tmp)
454 return result + size;
455 found_middle:
456 return result + __ffs(tmp);
457}
458
459/*
460 * The optimizer actually does good code for this case.
461 */
462#define find_first_zero_bit(addr, size) \
463 find_next_zero_bit((addr), (size), 0)
464#define find_first_bit(addr, size) \
465 find_next_bit((addr), (size), 0)
466
467#ifdef __KERNEL__
468
469/*
470 * Every architecture must define this function. It's the fastest
471 * way of searching a 140-bit bitmap where the first 100 bits are
472 * unlikely to be set. It's guaranteed that at least one of the 140
473 * bits is set.
474 */
475static inline unsigned long
476sched_find_first_bit(unsigned long b[3])
477{
478 unsigned long b0 = b[0], b1 = b[1], b2 = b[2];
479 unsigned long ofs;
480
481 ofs = (b1 ? 64 : 128);
482 b1 = (b1 ? b1 : b2);
483 ofs = (b0 ? 0 : ofs);
484 b0 = (b0 ? b0 : b1);
485
486 return __ffs(b0) + ofs;
487}
488
489
490#define ext2_set_bit __test_and_set_bit
491#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
492#define ext2_clear_bit __test_and_clear_bit
493#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
494#define ext2_test_bit test_bit
495#define ext2_find_first_zero_bit find_first_zero_bit
496#define ext2_find_next_zero_bit find_next_zero_bit
497
498/* Bitmap functions for the minix filesystem. */
499#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
500#define minix_set_bit(nr,addr) __set_bit(nr,addr)
501#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
502#define minix_test_bit(nr,addr) test_bit(nr,addr)
503#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
504
505#endif /* __KERNEL__ */
506
507#endif /* _ALPHA_BITOPS_H */
diff --git a/include/asm-alpha/bug.h b/include/asm-alpha/bug.h
new file mode 100644
index 000000000000..ae1e0a5fa492
--- /dev/null
+++ b/include/asm-alpha/bug.h
@@ -0,0 +1,15 @@
1#ifndef _ALPHA_BUG_H
2#define _ALPHA_BUG_H
3
4#include <asm/pal.h>
5
6/* ??? Would be nice to use .gprel32 here, but we can't be sure that the
7 function loaded the GP, so this could fail in modules. */
8#define BUG() \
9 __asm__ __volatile__("call_pal %0 # bugchk\n\t"".long %1\n\t.8byte %2" \
10 : : "i" (PAL_bugchk), "i"(__LINE__), "i"(__FILE__))
11
12#define HAVE_ARCH_BUG
13#include <asm-generic/bug.h>
14
15#endif
diff --git a/include/asm-alpha/bugs.h b/include/asm-alpha/bugs.h
new file mode 100644
index 000000000000..78030d1c7e7e
--- /dev/null
+++ b/include/asm-alpha/bugs.h
@@ -0,0 +1,20 @@
1/*
2 * include/asm-alpha/bugs.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7/*
8 * This is included by init/main.c to check for architecture-dependent bugs.
9 *
10 * Needs:
11 * void check_bugs(void);
12 */
13
14/*
15 * I don't know of any alpha bugs yet.. Nice chip
16 */
17
18static void check_bugs(void)
19{
20}
diff --git a/include/asm-alpha/byteorder.h b/include/asm-alpha/byteorder.h
new file mode 100644
index 000000000000..7af2b8d25486
--- /dev/null
+++ b/include/asm-alpha/byteorder.h
@@ -0,0 +1,47 @@
1#ifndef _ALPHA_BYTEORDER_H
2#define _ALPHA_BYTEORDER_H
3
4#include <asm/types.h>
5#include <linux/compiler.h>
6#include <asm/compiler.h>
7
8#ifdef __GNUC__
9
10static __inline __attribute_const__ __u32 __arch__swab32(__u32 x)
11{
12 /*
13 * Unfortunately, we can't use the 6 instruction sequence
14 * on ev6 since the latency of the UNPKBW is 3, which is
15 * pretty hard to hide. Just in case a future implementation
16 * has a lower latency, here's the sequence (also by Mike Burrows)
17 *
18 * UNPKBW a0, v0 v0: 00AA00BB00CC00DD
19 * SLL v0, 24, a0 a0: BB00CC00DD000000
20 * BIS v0, a0, a0 a0: BBAACCBBDDCC00DD
21 * EXTWL a0, 6, v0 v0: 000000000000BBAA
22 * ZAP a0, 0xf3, a0 a0: 00000000DDCC0000
23 * ADDL a0, v0, v0 v0: ssssssssDDCCBBAA
24 */
25
26 __u64 t0, t1, t2, t3;
27
28 t0 = __kernel_inslh(x, 7); /* t0 : 0000000000AABBCC */
29 t1 = __kernel_inswl(x, 3); /* t1 : 000000CCDD000000 */
30 t1 |= t0; /* t1 : 000000CCDDAABBCC */
31 t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */
32 t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */
33 t3 = t2 & 0x00FF00FF; /* t3 : 0000000000CC00AA */
34 t1 = t0 + t3; /* t1 : ssssssssDDCCBBAA */
35
36 return t1;
37}
38
39#define __arch__swab32 __arch__swab32
40
41#endif /* __GNUC__ */
42
43#define __BYTEORDER_HAS_U64__
44
45#include <linux/byteorder/little_endian.h>
46
47#endif /* _ALPHA_BYTEORDER_H */
diff --git a/include/asm-alpha/cache.h b/include/asm-alpha/cache.h
new file mode 100644
index 000000000000..e69b29501a5f
--- /dev/null
+++ b/include/asm-alpha/cache.h
@@ -0,0 +1,25 @@
1/*
2 * include/asm-alpha/cache.h
3 */
4#ifndef __ARCH_ALPHA_CACHE_H
5#define __ARCH_ALPHA_CACHE_H
6
7#include <linux/config.h>
8
9/* Bytes per L1 (data) cache line. */
10#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
11# define L1_CACHE_BYTES 64
12# define L1_CACHE_SHIFT 6
13#else
14/* Both EV4 and EV5 are write-through, read-allocate,
15 direct-mapped, physical.
16*/
17# define L1_CACHE_BYTES 32
18# define L1_CACHE_SHIFT 5
19#endif
20
21#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
22#define SMP_CACHE_BYTES L1_CACHE_BYTES
23#define L1_CACHE_SHIFT_MAX L1_CACHE_SHIFT
24
25#endif
diff --git a/include/asm-alpha/cacheflush.h b/include/asm-alpha/cacheflush.h
new file mode 100644
index 000000000000..3fc6ef726d8c
--- /dev/null
+++ b/include/asm-alpha/cacheflush.h
@@ -0,0 +1,74 @@
1#ifndef _ALPHA_CACHEFLUSH_H
2#define _ALPHA_CACHEFLUSH_H
3
4#include <linux/config.h>
5#include <linux/mm.h>
6
7/* Caches aren't brain-dead on the Alpha. */
8#define flush_cache_all() do { } while (0)
9#define flush_cache_mm(mm) do { } while (0)
10#define flush_cache_range(vma, start, end) do { } while (0)
11#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
12#define flush_dcache_page(page) do { } while (0)
13#define flush_dcache_mmap_lock(mapping) do { } while (0)
14#define flush_dcache_mmap_unlock(mapping) do { } while (0)
15#define flush_cache_vmap(start, end) do { } while (0)
16#define flush_cache_vunmap(start, end) do { } while (0)
17
18/* Note that the following two definitions are _highly_ dependent
19 on the contexts in which they are used in the kernel. I personally
20 think it is criminal how loosely defined these macros are. */
21
22/* We need to flush the kernel's icache after loading modules. The
23 only other use of this macro is in load_aout_interp which is not
24 used on Alpha.
25
26 Note that this definition should *not* be used for userspace
27 icache flushing. While functional, it is _way_ overkill. The
28 icache is tagged with ASNs and it suffices to allocate a new ASN
29 for the process. */
30#ifndef CONFIG_SMP
31#define flush_icache_range(start, end) imb()
32#else
33#define flush_icache_range(start, end) smp_imb()
34extern void smp_imb(void);
35#endif
36
37/* We need to flush the userspace icache after setting breakpoints in
38 ptrace.
39
40 Instead of indiscriminately using imb, take advantage of the fact
41 that icache entries are tagged with the ASN and load a new mm context. */
42/* ??? Ought to use this in arch/alpha/kernel/signal.c too. */
43
44#ifndef CONFIG_SMP
45extern void __load_new_mm_context(struct mm_struct *);
46static inline void
47flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
48 unsigned long addr, int len)
49{
50 if (vma->vm_flags & VM_EXEC) {
51 struct mm_struct *mm = vma->vm_mm;
52 if (current->active_mm == mm)
53 __load_new_mm_context(mm);
54 else
55 mm->context[smp_processor_id()] = 0;
56 }
57}
58#else
59extern void flush_icache_user_range(struct vm_area_struct *vma,
60 struct page *page, unsigned long addr, int len);
61#endif
62
63/* This is used only in do_no_page and do_swap_page. */
64#define flush_icache_page(vma, page) \
65 flush_icache_user_range((vma), (page), 0, 0)
66
67#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
68do { memcpy(dst, src, len); \
69 flush_icache_user_range(vma, page, vaddr, len); \
70} while (0)
71#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
72 memcpy(dst, src, len)
73
74#endif /* _ALPHA_CACHEFLUSH_H */
diff --git a/include/asm-alpha/checksum.h b/include/asm-alpha/checksum.h
new file mode 100644
index 000000000000..a5c9f08447fb
--- /dev/null
+++ b/include/asm-alpha/checksum.h
@@ -0,0 +1,77 @@
1#ifndef _ALPHA_CHECKSUM_H
2#define _ALPHA_CHECKSUM_H
3
4#include <linux/in6.h>
5
6/*
7 * This is a version of ip_compute_csum() optimized for IP headers,
8 * which always checksum on 4 octet boundaries.
9 */
10extern unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl);
11
12/*
13 * computes the checksum of the TCP/UDP pseudo-header
14 * returns a 16-bit checksum, already complemented
15 */
16extern unsigned short int csum_tcpudp_magic(unsigned long saddr,
17 unsigned long daddr,
18 unsigned short len,
19 unsigned short proto,
20 unsigned int sum);
21
22unsigned int csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
23 unsigned short len, unsigned short proto,
24 unsigned int sum);
25
26/*
27 * computes the checksum of a memory block at buff, length len,
28 * and adds in "sum" (32-bit)
29 *
30 * returns a 32-bit number suitable for feeding into itself
31 * or csum_tcpudp_magic
32 *
33 * this function must be called with even lengths, except
34 * for the last fragment, which may be odd
35 *
36 * it's best to have buff aligned on a 32-bit boundary
37 */
38extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
39
40/*
41 * the same as csum_partial, but copies from src while it
42 * checksums
43 *
44 * here even more important to align src and dst on a 32-bit (or even
45 * better 64-bit) boundary
46 */
47unsigned int csum_partial_copy_from_user(const char __user *src, char *dst, int len, unsigned int sum, int *errp);
48
49unsigned int csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum);
50
51
52/*
53 * this routine is used for miscellaneous IP-like checksums, mainly
54 * in icmp.c
55 */
56
57extern unsigned short ip_compute_csum(unsigned char * buff, int len);
58
59/*
60 * Fold a partial checksum without adding pseudo headers
61 */
62
63static inline unsigned short csum_fold(unsigned int sum)
64{
65 sum = (sum & 0xffff) + (sum >> 16);
66 sum = (sum & 0xffff) + (sum >> 16);
67 return ~sum;
68}
69
70#define _HAVE_ARCH_IPV6_CSUM
71extern unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
72 struct in6_addr *daddr,
73 __u32 len,
74 unsigned short proto,
75 unsigned int sum);
76
77#endif
diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h
new file mode 100644
index 000000000000..399c33b7be51
--- /dev/null
+++ b/include/asm-alpha/compiler.h
@@ -0,0 +1,103 @@
1#ifndef __ALPHA_COMPILER_H
2#define __ALPHA_COMPILER_H
3
4/*
5 * Herein are macros we use when describing various patterns we want to GCC.
6 * In all cases we can get better schedules out of the compiler if we hide
7 * as little as possible inside inline assembly. However, we want to be
8 * able to know what we'll get out before giving up inline assembly. Thus
9 * these tests and macros.
10 */
11
12#if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3
13# define __kernel_insbl(val, shift) __builtin_alpha_insbl(val, shift)
14# define __kernel_inswl(val, shift) __builtin_alpha_inswl(val, shift)
15# define __kernel_insql(val, shift) __builtin_alpha_insql(val, shift)
16# define __kernel_inslh(val, shift) __builtin_alpha_inslh(val, shift)
17# define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift)
18# define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift)
19# define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b)
20# define __kernel_cttz(x) __builtin_ctzl(x)
21# define __kernel_ctlz(x) __builtin_clzl(x)
22# define __kernel_ctpop(x) __builtin_popcountl(x)
23#else
24# define __kernel_insbl(val, shift) \
25 ({ unsigned long __kir; \
26 __asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
27 __kir; })
28# define __kernel_inswl(val, shift) \
29 ({ unsigned long __kir; \
30 __asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
31 __kir; })
32# define __kernel_insql(val, shift) \
33 ({ unsigned long __kir; \
34 __asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
35 __kir; })
36# define __kernel_inslh(val, shift) \
37 ({ unsigned long __kir; \
38 __asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
39 __kir; })
40# define __kernel_extbl(val, shift) \
41 ({ unsigned long __kir; \
42 __asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
43 __kir; })
44# define __kernel_extwl(val, shift) \
45 ({ unsigned long __kir; \
46 __asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \
47 __kir; })
48# define __kernel_cmpbge(a, b) \
49 ({ unsigned long __kir; \
50 __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \
51 __kir; })
52# define __kernel_cttz(x) \
53 ({ unsigned long __kir; \
54 __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \
55 __kir; })
56# define __kernel_ctlz(x) \
57 ({ unsigned long __kir; \
58 __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \
59 __kir; })
60# define __kernel_ctpop(x) \
61 ({ unsigned long __kir; \
62 __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \
63 __kir; })
64#endif
65
66
67/*
68 * Beginning with EGCS 1.1, GCC defines __alpha_bwx__ when the BWX
69 * extension is enabled. Previous versions did not define anything
70 * we could test during compilation -- too bad, so sad.
71 */
72
73#if defined(__alpha_bwx__)
74#define __kernel_ldbu(mem) (mem)
75#define __kernel_ldwu(mem) (mem)
76#define __kernel_stb(val,mem) ((mem) = (val))
77#define __kernel_stw(val,mem) ((mem) = (val))
78#else
79#define __kernel_ldbu(mem) \
80 ({ unsigned char __kir; \
81 __asm__("ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \
82 __kir; })
83#define __kernel_ldwu(mem) \
84 ({ unsigned short __kir; \
85 __asm__("ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \
86 __kir; })
87#define __kernel_stb(val,mem) \
88 __asm__("stb %1,%0" : "=m"(mem) : "r"(val))
89#define __kernel_stw(val,mem) \
90 __asm__("stw %1,%0" : "=m"(mem) : "r"(val))
91#endif
92
93/* Some idiots over in <linux/compiler.h> thought inline should imply
94 always_inline. This breaks stuff. We'll include this file whenever
95 we run into such problems. */
96
97#include <linux/compiler.h>
98#undef inline
99#undef __inline__
100#undef __inline
101
102
103#endif /* __ALPHA_COMPILER_H */
diff --git a/include/asm-alpha/console.h b/include/asm-alpha/console.h
new file mode 100644
index 000000000000..a3ce4e62249b
--- /dev/null
+++ b/include/asm-alpha/console.h
@@ -0,0 +1,75 @@
1#ifndef __AXP_CONSOLE_H
2#define __AXP_CONSOLE_H
3
4/*
5 * Console callback routine numbers
6 */
7#define CCB_GETC 0x01
8#define CCB_PUTS 0x02
9#define CCB_RESET_TERM 0x03
10#define CCB_SET_TERM_INT 0x04
11#define CCB_SET_TERM_CTL 0x05
12#define CCB_PROCESS_KEYCODE 0x06
13#define CCB_OPEN_CONSOLE 0x07
14#define CCB_CLOSE_CONSOLE 0x08
15
16#define CCB_OPEN 0x10
17#define CCB_CLOSE 0x11
18#define CCB_IOCTL 0x12
19#define CCB_READ 0x13
20#define CCB_WRITE 0x14
21
22#define CCB_SET_ENV 0x20
23#define CCB_RESET_ENV 0x21
24#define CCB_GET_ENV 0x22
25#define CCB_SAVE_ENV 0x23
26
27#define CCB_PSWITCH 0x30
28#define CCB_BIOS_EMUL 0x32
29
30/*
31 * Environment variable numbers
32 */
33#define ENV_AUTO_ACTION 0x01
34#define ENV_BOOT_DEV 0x02
35#define ENV_BOOTDEF_DEV 0x03
36#define ENV_BOOTED_DEV 0x04
37#define ENV_BOOT_FILE 0x05
38#define ENV_BOOTED_FILE 0x06
39#define ENV_BOOT_OSFLAGS 0x07
40#define ENV_BOOTED_OSFLAGS 0x08
41#define ENV_BOOT_RESET 0x09
42#define ENV_DUMP_DEV 0x0A
43#define ENV_ENABLE_AUDIT 0x0B
44#define ENV_LICENSE 0x0C
45#define ENV_CHAR_SET 0x0D
46#define ENV_LANGUAGE 0x0E
47#define ENV_TTY_DEV 0x0F
48
49#ifdef __KERNEL__
50#ifndef __ASSEMBLY__
51extern long callback_puts(long unit, const char *s, long length);
52extern long callback_getc(long unit);
53extern long callback_open_console(void);
54extern long callback_close_console(void);
55extern long callback_open(const char *device, long length);
56extern long callback_close(long unit);
57extern long callback_read(long channel, long count, const char *buf, long lbn);
58extern long callback_getenv(long id, const char *buf, unsigned long buf_size);
59extern long callback_setenv(long id, const char *buf, unsigned long buf_size);
60extern long callback_save_env(void);
61
62extern int srm_fixup(unsigned long new_callback_addr,
63 unsigned long new_hwrpb_addr);
64extern long srm_puts(const char *, long);
65extern long srm_printk(const char *, ...)
66 __attribute__ ((format (printf, 1, 2)));
67
68struct crb_struct;
69struct hwrpb_struct;
70extern int callback_init_done;
71extern void * callback_init(void *);
72#endif /* __ASSEMBLY__ */
73#endif /* __KERNEL__ */
74
75#endif /* __AXP_CONSOLE_H */
diff --git a/include/asm-alpha/core_apecs.h b/include/asm-alpha/core_apecs.h
new file mode 100644
index 000000000000..6785ff7e02bc
--- /dev/null
+++ b/include/asm-alpha/core_apecs.h
@@ -0,0 +1,517 @@
1#ifndef __ALPHA_APECS__H__
2#define __ALPHA_APECS__H__
3
4#include <linux/types.h>
5#include <asm/compiler.h>
6
7/*
8 * APECS is the internal name for the 2107x chipset which provides
9 * memory controller and PCI access for the 21064 chip based systems.
10 *
11 * This file is based on:
12 *
13 * DECchip 21071-AA and DECchip 21072-AA Core Logic Chipsets
14 * Data Sheet
15 *
16 * EC-N0648-72
17 *
18 *
19 * david.rusling@reo.mts.dec.com Initial Version.
20 *
21 */
22
23/*
24 An AVANTI *might* be an XL, and an XL has only 27 bits of ISA address
25 that get passed through the PCI<->ISA bridge chip. So we've gotta use
26 both windows to max out the physical memory we can DMA to. Sigh...
27
28 If we try a window at 0 for 1GB as a work-around, we run into conflicts
29 with ISA/PCI bus memory which can't be relocated, like VGA aperture and
30 BIOS ROMs. So we must put the windows high enough to avoid these areas.
31
32 We put window 1 at BUS 64Mb for 64Mb, mapping physical 0 to 64Mb-1,
33 and window 2 at BUS 1Gb for 1Gb, mapping physical 0 to 1Gb-1.
34 Yes, this does map 0 to 64Mb-1 twice, but only window 1 will actually
35 be used for that range (via virt_to_bus()).
36
37 Note that we actually fudge the window 1 maximum as 48Mb instead of 64Mb,
38 to keep virt_to_bus() from returning an address in the first window, for
39 a data area that goes beyond the 64Mb first DMA window. Sigh...
40 The fudge factor MUST match with <asm/dma.h> MAX_DMA_ADDRESS, but
41 we can't just use that here, because of header file looping... :-(
42
43 Window 1 will be used for all DMA from the ISA bus; yes, that does
44 limit what memory an ISA floppy or sound card or Ethernet can touch, but
45 it's also a known limitation on other platforms as well. We use the
46 same technique that is used on INTEL platforms with similar limitation:
47 set MAX_DMA_ADDRESS and clear some pages' DMAable flags during mem_init().
48 We trust that any ISA bus device drivers will *always* ask for DMAable
49 memory explicitly via kmalloc()/get_free_pages() flags arguments.
50
51 Note that most PCI bus devices' drivers do *not* explicitly ask for
52 DMAable memory; they count on being able to DMA to any memory they
53 get from kmalloc()/get_free_pages(). They will also use window 1 for
54 any physical memory accesses below 64Mb; the rest will be handled by
55 window 2, maxing out at 1Gb of memory. I trust this is enough... :-)
56
57 We hope that the area before the first window is large enough so that
58 there will be no overlap at the top end (64Mb). We *must* locate the
59 PCI cards' memory just below window 1, so that there's still the
60 possibility of being able to access it via SPARSE space. This is
61 important for cards such as the Matrox Millennium, whose Xserver
62 wants to access memory-mapped registers in byte and short lengths.
63
64 Note that the XL is treated differently from the AVANTI, even though
65 for most other things they are identical. It didn't seem reasonable to
66 make the AVANTI support pay for the limitations of the XL. It is true,
67 however, that an XL kernel will run on an AVANTI without problems.
68
69 %%% All of this should be obviated by the ability to route
70 everything through the iommu.
71*/
72
73/*
74 * 21071-DA Control and Status registers.
75 * These are used for PCI memory access.
76 */
77#define APECS_IOC_DCSR (IDENT_ADDR + 0x1A0000000UL)
78#define APECS_IOC_PEAR (IDENT_ADDR + 0x1A0000020UL)
79#define APECS_IOC_SEAR (IDENT_ADDR + 0x1A0000040UL)
80#define APECS_IOC_DR1 (IDENT_ADDR + 0x1A0000060UL)
81#define APECS_IOC_DR2 (IDENT_ADDR + 0x1A0000080UL)
82#define APECS_IOC_DR3 (IDENT_ADDR + 0x1A00000A0UL)
83
84#define APECS_IOC_TB1R (IDENT_ADDR + 0x1A00000C0UL)
85#define APECS_IOC_TB2R (IDENT_ADDR + 0x1A00000E0UL)
86
87#define APECS_IOC_PB1R (IDENT_ADDR + 0x1A0000100UL)
88#define APECS_IOC_PB2R (IDENT_ADDR + 0x1A0000120UL)
89
90#define APECS_IOC_PM1R (IDENT_ADDR + 0x1A0000140UL)
91#define APECS_IOC_PM2R (IDENT_ADDR + 0x1A0000160UL)
92
93#define APECS_IOC_HAXR0 (IDENT_ADDR + 0x1A0000180UL)
94#define APECS_IOC_HAXR1 (IDENT_ADDR + 0x1A00001A0UL)
95#define APECS_IOC_HAXR2 (IDENT_ADDR + 0x1A00001C0UL)
96
97#define APECS_IOC_PMLT (IDENT_ADDR + 0x1A00001E0UL)
98
99#define APECS_IOC_TLBTAG0 (IDENT_ADDR + 0x1A0000200UL)
100#define APECS_IOC_TLBTAG1 (IDENT_ADDR + 0x1A0000220UL)
101#define APECS_IOC_TLBTAG2 (IDENT_ADDR + 0x1A0000240UL)
102#define APECS_IOC_TLBTAG3 (IDENT_ADDR + 0x1A0000260UL)
103#define APECS_IOC_TLBTAG4 (IDENT_ADDR + 0x1A0000280UL)
104#define APECS_IOC_TLBTAG5 (IDENT_ADDR + 0x1A00002A0UL)
105#define APECS_IOC_TLBTAG6 (IDENT_ADDR + 0x1A00002C0UL)
106#define APECS_IOC_TLBTAG7 (IDENT_ADDR + 0x1A00002E0UL)
107
108#define APECS_IOC_TLBDATA0 (IDENT_ADDR + 0x1A0000300UL)
109#define APECS_IOC_TLBDATA1 (IDENT_ADDR + 0x1A0000320UL)
110#define APECS_IOC_TLBDATA2 (IDENT_ADDR + 0x1A0000340UL)
111#define APECS_IOC_TLBDATA3 (IDENT_ADDR + 0x1A0000360UL)
112#define APECS_IOC_TLBDATA4 (IDENT_ADDR + 0x1A0000380UL)
113#define APECS_IOC_TLBDATA5 (IDENT_ADDR + 0x1A00003A0UL)
114#define APECS_IOC_TLBDATA6 (IDENT_ADDR + 0x1A00003C0UL)
115#define APECS_IOC_TLBDATA7 (IDENT_ADDR + 0x1A00003E0UL)
116
117#define APECS_IOC_TBIA (IDENT_ADDR + 0x1A0000400UL)
118
119
120/*
121 * 21071-CA Control and Status registers.
122 * These are used to program memory timing,
123 * configure memory and initialise the B-Cache.
124 */
125#define APECS_MEM_GCR (IDENT_ADDR + 0x180000000UL)
126#define APECS_MEM_EDSR (IDENT_ADDR + 0x180000040UL)
127#define APECS_MEM_TAR (IDENT_ADDR + 0x180000060UL)
128#define APECS_MEM_ELAR (IDENT_ADDR + 0x180000080UL)
129#define APECS_MEM_EHAR (IDENT_ADDR + 0x1800000a0UL)
130#define APECS_MEM_SFT_RST (IDENT_ADDR + 0x1800000c0UL)
131#define APECS_MEM_LDxLAR (IDENT_ADDR + 0x1800000e0UL)
132#define APECS_MEM_LDxHAR (IDENT_ADDR + 0x180000100UL)
133#define APECS_MEM_GTR (IDENT_ADDR + 0x180000200UL)
134#define APECS_MEM_RTR (IDENT_ADDR + 0x180000220UL)
135#define APECS_MEM_VFPR (IDENT_ADDR + 0x180000240UL)
136#define APECS_MEM_PDLDR (IDENT_ADDR + 0x180000260UL)
137#define APECS_MEM_PDhDR (IDENT_ADDR + 0x180000280UL)
138
139/* Bank x Base Address Register */
140#define APECS_MEM_B0BAR (IDENT_ADDR + 0x180000800UL)
141#define APECS_MEM_B1BAR (IDENT_ADDR + 0x180000820UL)
142#define APECS_MEM_B2BAR (IDENT_ADDR + 0x180000840UL)
143#define APECS_MEM_B3BAR (IDENT_ADDR + 0x180000860UL)
144#define APECS_MEM_B4BAR (IDENT_ADDR + 0x180000880UL)
145#define APECS_MEM_B5BAR (IDENT_ADDR + 0x1800008A0UL)
146#define APECS_MEM_B6BAR (IDENT_ADDR + 0x1800008C0UL)
147#define APECS_MEM_B7BAR (IDENT_ADDR + 0x1800008E0UL)
148#define APECS_MEM_B8BAR (IDENT_ADDR + 0x180000900UL)
149
150/* Bank x Configuration Register */
151#define APECS_MEM_B0BCR (IDENT_ADDR + 0x180000A00UL)
152#define APECS_MEM_B1BCR (IDENT_ADDR + 0x180000A20UL)
153#define APECS_MEM_B2BCR (IDENT_ADDR + 0x180000A40UL)
154#define APECS_MEM_B3BCR (IDENT_ADDR + 0x180000A60UL)
155#define APECS_MEM_B4BCR (IDENT_ADDR + 0x180000A80UL)
156#define APECS_MEM_B5BCR (IDENT_ADDR + 0x180000AA0UL)
157#define APECS_MEM_B6BCR (IDENT_ADDR + 0x180000AC0UL)
158#define APECS_MEM_B7BCR (IDENT_ADDR + 0x180000AE0UL)
159#define APECS_MEM_B8BCR (IDENT_ADDR + 0x180000B00UL)
160
161/* Bank x Timing Register A */
162#define APECS_MEM_B0TRA (IDENT_ADDR + 0x180000C00UL)
163#define APECS_MEM_B1TRA (IDENT_ADDR + 0x180000C20UL)
164#define APECS_MEM_B2TRA (IDENT_ADDR + 0x180000C40UL)
165#define APECS_MEM_B3TRA (IDENT_ADDR + 0x180000C60UL)
166#define APECS_MEM_B4TRA (IDENT_ADDR + 0x180000C80UL)
167#define APECS_MEM_B5TRA (IDENT_ADDR + 0x180000CA0UL)
168#define APECS_MEM_B6TRA (IDENT_ADDR + 0x180000CC0UL)
169#define APECS_MEM_B7TRA (IDENT_ADDR + 0x180000CE0UL)
170#define APECS_MEM_B8TRA (IDENT_ADDR + 0x180000D00UL)
171
172/* Bank x Timing Register B */
173#define APECS_MEM_B0TRB (IDENT_ADDR + 0x180000E00UL)
174#define APECS_MEM_B1TRB (IDENT_ADDR + 0x180000E20UL)
175#define APECS_MEM_B2TRB (IDENT_ADDR + 0x180000E40UL)
176#define APECS_MEM_B3TRB (IDENT_ADDR + 0x180000E60UL)
177#define APECS_MEM_B4TRB (IDENT_ADDR + 0x180000E80UL)
178#define APECS_MEM_B5TRB (IDENT_ADDR + 0x180000EA0UL)
179#define APECS_MEM_B6TRB (IDENT_ADDR + 0x180000EC0UL)
180#define APECS_MEM_B7TRB (IDENT_ADDR + 0x180000EE0UL)
181#define APECS_MEM_B8TRB (IDENT_ADDR + 0x180000F00UL)
182
183
184/*
185 * Memory spaces:
186 */
187#define APECS_IACK_SC (IDENT_ADDR + 0x1b0000000UL)
188#define APECS_CONF (IDENT_ADDR + 0x1e0000000UL)
189#define APECS_IO (IDENT_ADDR + 0x1c0000000UL)
190#define APECS_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
191#define APECS_DENSE_MEM (IDENT_ADDR + 0x300000000UL)
192
193
194/*
195 * Bit definitions for I/O Controller status register 0:
196 */
197#define APECS_IOC_STAT0_CMD 0xf
198#define APECS_IOC_STAT0_ERR (1<<4)
199#define APECS_IOC_STAT0_LOST (1<<5)
200#define APECS_IOC_STAT0_THIT (1<<6)
201#define APECS_IOC_STAT0_TREF (1<<7)
202#define APECS_IOC_STAT0_CODE_SHIFT 8
203#define APECS_IOC_STAT0_CODE_MASK 0x7
204#define APECS_IOC_STAT0_P_NBR_SHIFT 13
205#define APECS_IOC_STAT0_P_NBR_MASK 0x7ffff
206
207#define APECS_HAE_ADDRESS APECS_IOC_HAXR1
208
209
210/*
211 * Data structure for handling APECS machine checks:
212 */
213
214struct el_apecs_mikasa_sysdata_mcheck
215{
216 unsigned long coma_gcr;
217 unsigned long coma_edsr;
218 unsigned long coma_ter;
219 unsigned long coma_elar;
220 unsigned long coma_ehar;
221 unsigned long coma_ldlr;
222 unsigned long coma_ldhr;
223 unsigned long coma_base0;
224 unsigned long coma_base1;
225 unsigned long coma_base2;
226 unsigned long coma_base3;
227 unsigned long coma_cnfg0;
228 unsigned long coma_cnfg1;
229 unsigned long coma_cnfg2;
230 unsigned long coma_cnfg3;
231 unsigned long epic_dcsr;
232 unsigned long epic_pear;
233 unsigned long epic_sear;
234 unsigned long epic_tbr1;
235 unsigned long epic_tbr2;
236 unsigned long epic_pbr1;
237 unsigned long epic_pbr2;
238 unsigned long epic_pmr1;
239 unsigned long epic_pmr2;
240 unsigned long epic_harx1;
241 unsigned long epic_harx2;
242 unsigned long epic_pmlt;
243 unsigned long epic_tag0;
244 unsigned long epic_tag1;
245 unsigned long epic_tag2;
246 unsigned long epic_tag3;
247 unsigned long epic_tag4;
248 unsigned long epic_tag5;
249 unsigned long epic_tag6;
250 unsigned long epic_tag7;
251 unsigned long epic_data0;
252 unsigned long epic_data1;
253 unsigned long epic_data2;
254 unsigned long epic_data3;
255 unsigned long epic_data4;
256 unsigned long epic_data5;
257 unsigned long epic_data6;
258 unsigned long epic_data7;
259
260 unsigned long pceb_vid;
261 unsigned long pceb_did;
262 unsigned long pceb_revision;
263 unsigned long pceb_command;
264 unsigned long pceb_status;
265 unsigned long pceb_latency;
266 unsigned long pceb_control;
267 unsigned long pceb_arbcon;
268 unsigned long pceb_arbpri;
269
270 unsigned long esc_id;
271 unsigned long esc_revision;
272 unsigned long esc_int0;
273 unsigned long esc_int1;
274 unsigned long esc_elcr0;
275 unsigned long esc_elcr1;
276 unsigned long esc_last_eisa;
277 unsigned long esc_nmi_stat;
278
279 unsigned long pci_ir;
280 unsigned long pci_imr;
281 unsigned long svr_mgr;
282};
283
284/* This for the normal APECS machines. */
285struct el_apecs_sysdata_mcheck
286{
287 unsigned long coma_gcr;
288 unsigned long coma_edsr;
289 unsigned long coma_ter;
290 unsigned long coma_elar;
291 unsigned long coma_ehar;
292 unsigned long coma_ldlr;
293 unsigned long coma_ldhr;
294 unsigned long coma_base0;
295 unsigned long coma_base1;
296 unsigned long coma_base2;
297 unsigned long coma_cnfg0;
298 unsigned long coma_cnfg1;
299 unsigned long coma_cnfg2;
300 unsigned long epic_dcsr;
301 unsigned long epic_pear;
302 unsigned long epic_sear;
303 unsigned long epic_tbr1;
304 unsigned long epic_tbr2;
305 unsigned long epic_pbr1;
306 unsigned long epic_pbr2;
307 unsigned long epic_pmr1;
308 unsigned long epic_pmr2;
309 unsigned long epic_harx1;
310 unsigned long epic_harx2;
311 unsigned long epic_pmlt;
312 unsigned long epic_tag0;
313 unsigned long epic_tag1;
314 unsigned long epic_tag2;
315 unsigned long epic_tag3;
316 unsigned long epic_tag4;
317 unsigned long epic_tag5;
318 unsigned long epic_tag6;
319 unsigned long epic_tag7;
320 unsigned long epic_data0;
321 unsigned long epic_data1;
322 unsigned long epic_data2;
323 unsigned long epic_data3;
324 unsigned long epic_data4;
325 unsigned long epic_data5;
326 unsigned long epic_data6;
327 unsigned long epic_data7;
328};
329
330struct el_apecs_procdata
331{
332 unsigned long paltemp[32]; /* PAL TEMP REGS. */
333 /* EV4-specific fields */
334 unsigned long exc_addr; /* Address of excepting instruction. */
335 unsigned long exc_sum; /* Summary of arithmetic traps. */
336 unsigned long exc_mask; /* Exception mask (from exc_sum). */
337 unsigned long iccsr; /* IBox hardware enables. */
338 unsigned long pal_base; /* Base address for PALcode. */
339 unsigned long hier; /* Hardware Interrupt Enable. */
340 unsigned long hirr; /* Hardware Interrupt Request. */
341 unsigned long csr; /* D-stream fault info. */
342 unsigned long dc_stat; /* D-cache status (ECC/Parity Err). */
343 unsigned long dc_addr; /* EV3 Phys Addr for ECC/DPERR. */
344 unsigned long abox_ctl; /* ABox Control Register. */
345 unsigned long biu_stat; /* BIU Status. */
346 unsigned long biu_addr; /* BUI Address. */
347 unsigned long biu_ctl; /* BIU Control. */
348 unsigned long fill_syndrome;/* For correcting ECC errors. */
349 unsigned long fill_addr; /* Cache block which was being read */
350 unsigned long va; /* Effective VA of fault or miss. */
351 unsigned long bc_tag; /* Backup Cache Tag Probe Results.*/
352};
353
354
355#ifdef __KERNEL__
356
357#ifndef __EXTERN_INLINE
358#define __EXTERN_INLINE extern inline
359#define __IO_EXTERN_INLINE
360#endif
361
362/*
363 * I/O functions:
364 *
365 * Unlike Jensen, the APECS machines have no concept of local
366 * I/O---everything goes over the PCI bus.
367 *
368 * There is plenty room for optimization here. In particular,
369 * the Alpha's insb/insw/extb/extw should be useful in moving
370 * data to/from the right byte-lanes.
371 */
372
373#define vip volatile int __force *
374#define vuip volatile unsigned int __force *
375#define vulp volatile unsigned long __force *
376
377#define APECS_SET_HAE \
378 do { \
379 if (addr >= (1UL << 24)) { \
380 unsigned long msb = addr & 0xf8000000; \
381 addr -= msb; \
382 set_hae(msb); \
383 } \
384 } while (0)
385
386__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr)
387{
388 unsigned long addr = (unsigned long) xaddr;
389 unsigned long result, base_and_type;
390
391 if (addr >= APECS_DENSE_MEM) {
392 addr -= APECS_DENSE_MEM;
393 APECS_SET_HAE;
394 base_and_type = APECS_SPARSE_MEM + 0x00;
395 } else {
396 addr -= APECS_IO;
397 base_and_type = APECS_IO + 0x00;
398 }
399
400 result = *(vip) ((addr << 5) + base_and_type);
401 return __kernel_extbl(result, addr & 3);
402}
403
404__EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr)
405{
406 unsigned long addr = (unsigned long) xaddr;
407 unsigned long w, base_and_type;
408
409 if (addr >= APECS_DENSE_MEM) {
410 addr -= APECS_DENSE_MEM;
411 APECS_SET_HAE;
412 base_and_type = APECS_SPARSE_MEM + 0x00;
413 } else {
414 addr -= APECS_IO;
415 base_and_type = APECS_IO + 0x00;
416 }
417
418 w = __kernel_insbl(b, addr & 3);
419 *(vuip) ((addr << 5) + base_and_type) = w;
420}
421
422__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr)
423{
424 unsigned long addr = (unsigned long) xaddr;
425 unsigned long result, base_and_type;
426
427 if (addr >= APECS_DENSE_MEM) {
428 addr -= APECS_DENSE_MEM;
429 APECS_SET_HAE;
430 base_and_type = APECS_SPARSE_MEM + 0x08;
431 } else {
432 addr -= APECS_IO;
433 base_and_type = APECS_IO + 0x08;
434 }
435
436 result = *(vip) ((addr << 5) + base_and_type);
437 return __kernel_extwl(result, addr & 3);
438}
439
440__EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr)
441{
442 unsigned long addr = (unsigned long) xaddr;
443 unsigned long w, base_and_type;
444
445 if (addr >= APECS_DENSE_MEM) {
446 addr -= APECS_DENSE_MEM;
447 APECS_SET_HAE;
448 base_and_type = APECS_SPARSE_MEM + 0x08;
449 } else {
450 addr -= APECS_IO;
451 base_and_type = APECS_IO + 0x08;
452 }
453
454 w = __kernel_inswl(b, addr & 3);
455 *(vuip) ((addr << 5) + base_and_type) = w;
456}
457
458__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr)
459{
460 unsigned long addr = (unsigned long) xaddr;
461 if (addr < APECS_DENSE_MEM)
462 addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
463 return *(vuip)addr;
464}
465
466__EXTERN_INLINE void apecs_iowrite32(u32 b, void __iomem *xaddr)
467{
468 unsigned long addr = (unsigned long) xaddr;
469 if (addr < APECS_DENSE_MEM)
470 addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18;
471 *(vuip)addr = b;
472}
473
474__EXTERN_INLINE void __iomem *apecs_ioportmap(unsigned long addr)
475{
476 return (void __iomem *)(addr + APECS_IO);
477}
478
479__EXTERN_INLINE void __iomem *apecs_ioremap(unsigned long addr,
480 unsigned long size)
481{
482 return (void __iomem *)(addr + APECS_DENSE_MEM);
483}
484
485__EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr)
486{
487 return addr >= IDENT_ADDR + 0x180000000UL;
488}
489
490__EXTERN_INLINE int apecs_is_mmio(const volatile void __iomem *addr)
491{
492 return (unsigned long)addr >= APECS_DENSE_MEM;
493}
494
495#undef APECS_SET_HAE
496
497#undef vip
498#undef vuip
499#undef vulp
500
501#undef __IO_PREFIX
502#define __IO_PREFIX apecs
503#define apecs_trivial_io_bw 0
504#define apecs_trivial_io_lq 0
505#define apecs_trivial_rw_bw 2
506#define apecs_trivial_rw_lq 1
507#define apecs_trivial_iounmap 1
508#include <asm/io_trivial.h>
509
510#ifdef __IO_EXTERN_INLINE
511#undef __EXTERN_INLINE
512#undef __IO_EXTERN_INLINE
513#endif
514
515#endif /* __KERNEL__ */
516
517#endif /* __ALPHA_APECS__H__ */
diff --git a/include/asm-alpha/core_cia.h b/include/asm-alpha/core_cia.h
new file mode 100644
index 000000000000..3a70d68bfce8
--- /dev/null
+++ b/include/asm-alpha/core_cia.h
@@ -0,0 +1,501 @@
1#ifndef __ALPHA_CIA__H__
2#define __ALPHA_CIA__H__
3
4/* Define to experiment with fitting everything into one 512MB HAE window. */
5#define CIA_ONE_HAE_WINDOW 1
6
7#include <linux/config.h>
8#include <linux/types.h>
9#include <asm/compiler.h>
10
11/*
12 * CIA is the internal name for the 21171 chipset which provides
13 * memory controller and PCI access for the 21164 chip based systems.
14 * Also supported here is the 21172 (CIA-2) and 21174 (PYXIS).
15 *
16 * The lineage is a bit confused, since the 21174 was reportedly started
17 * from the 21171 Pass 1 mask, and so is missing bug fixes that appear
18 * in 21171 Pass 2 and 21172, but it also contains additional features.
19 *
20 * This file is based on:
21 *
22 * DECchip 21171 Core Logic Chipset
23 * Technical Reference Manual
24 *
25 * EC-QE18B-TE
26 *
27 * david.rusling@reo.mts.dec.com Initial Version.
28 *
29 */
30
31/*
32 * CIA ADDRESS BIT DEFINITIONS
33 *
34 * 3333 3333 3322 2222 2222 1111 1111 11
35 * 9876 5432 1098 7654 3210 9876 5432 1098 7654 3210
36 * ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
37 * 1 000
38 * ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
39 * | |\|
40 * | Byte Enable --+ |
41 * | Transfer Length --+
42 * +-- IO space, not cached
43 *
44 * Byte Transfer
45 * Enable Length Transfer Byte Address
46 * adr<6:5> adr<4:3> Length Enable Adder
47 * ---------------------------------------------
48 * 00 00 Byte 1110 0x000
49 * 01 00 Byte 1101 0x020
50 * 10 00 Byte 1011 0x040
51 * 11 00 Byte 0111 0x060
52 *
53 * 00 01 Word 1100 0x008
54 * 01 01 Word 1001 0x028 <= Not supported in this code.
55 * 10 01 Word 0011 0x048
56 *
57 * 00 10 Tribyte 1000 0x010
58 * 01 10 Tribyte 0001 0x030
59 *
60 * 10 11 Longword 0000 0x058
61 *
62 * Note that byte enables are asserted low.
63 *
64 */
65
66#define CIA_MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */
67#define CIA_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */
68#define CIA_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */
69
70/*
71 * 21171-CA Control and Status Registers
72 */
73#define CIA_IOC_CIA_REV (IDENT_ADDR + 0x8740000080UL)
74# define CIA_REV_MASK 0xff
75#define CIA_IOC_PCI_LAT (IDENT_ADDR + 0x87400000C0UL)
76#define CIA_IOC_CIA_CTRL (IDENT_ADDR + 0x8740000100UL)
77# define CIA_CTRL_PCI_EN (1 << 0)
78# define CIA_CTRL_PCI_LOCK_EN (1 << 1)
79# define CIA_CTRL_PCI_LOOP_EN (1 << 2)
80# define CIA_CTRL_FST_BB_EN (1 << 3)
81# define CIA_CTRL_PCI_MST_EN (1 << 4)
82# define CIA_CTRL_PCI_MEM_EN (1 << 5)
83# define CIA_CTRL_PCI_REQ64_EN (1 << 6)
84# define CIA_CTRL_PCI_ACK64_EN (1 << 7)
85# define CIA_CTRL_ADDR_PE_EN (1 << 8)
86# define CIA_CTRL_PERR_EN (1 << 9)
87# define CIA_CTRL_FILL_ERR_EN (1 << 10)
88# define CIA_CTRL_MCHK_ERR_EN (1 << 11)
89# define CIA_CTRL_ECC_CHK_EN (1 << 12)
90# define CIA_CTRL_ASSERT_IDLE_BC (1 << 13)
91# define CIA_CTRL_COM_IDLE_BC (1 << 14)
92# define CIA_CTRL_CSR_IOA_BYPASS (1 << 15)
93# define CIA_CTRL_IO_FLUSHREQ_EN (1 << 16)
94# define CIA_CTRL_CPU_FLUSHREQ_EN (1 << 17)
95# define CIA_CTRL_ARB_CPU_EN (1 << 18)
96# define CIA_CTRL_EN_ARB_LINK (1 << 19)
97# define CIA_CTRL_RD_TYPE_SHIFT 20
98# define CIA_CTRL_RL_TYPE_SHIFT 24
99# define CIA_CTRL_RM_TYPE_SHIFT 28
100# define CIA_CTRL_EN_DMA_RD_PERF (1 << 31)
101#define CIA_IOC_CIA_CNFG (IDENT_ADDR + 0x8740000140UL)
102# define CIA_CNFG_IOA_BWEN (1 << 0)
103# define CIA_CNFG_PCI_MWEN (1 << 4)
104# define CIA_CNFG_PCI_DWEN (1 << 5)
105# define CIA_CNFG_PCI_WLEN (1 << 8)
106#define CIA_IOC_FLASH_CTRL (IDENT_ADDR + 0x8740000200UL)
107#define CIA_IOC_HAE_MEM (IDENT_ADDR + 0x8740000400UL)
108#define CIA_IOC_HAE_IO (IDENT_ADDR + 0x8740000440UL)
109#define CIA_IOC_CFG (IDENT_ADDR + 0x8740000480UL)
110#define CIA_IOC_CACK_EN (IDENT_ADDR + 0x8740000600UL)
111# define CIA_CACK_EN_LOCK_EN (1 << 0)
112# define CIA_CACK_EN_MB_EN (1 << 1)
113# define CIA_CACK_EN_SET_DIRTY_EN (1 << 2)
114# define CIA_CACK_EN_BC_VICTIM_EN (1 << 3)
115
116
117/*
118 * 21171-CA Diagnostic Registers
119 */
120#define CIA_IOC_CIA_DIAG (IDENT_ADDR + 0x8740002000UL)
121#define CIA_IOC_DIAG_CHECK (IDENT_ADDR + 0x8740003000UL)
122
123/*
124 * 21171-CA Performance Monitor registers
125 */
126#define CIA_IOC_PERF_MONITOR (IDENT_ADDR + 0x8740004000UL)
127#define CIA_IOC_PERF_CONTROL (IDENT_ADDR + 0x8740004040UL)
128
129/*
130 * 21171-CA Error registers
131 */
132#define CIA_IOC_CPU_ERR0 (IDENT_ADDR + 0x8740008000UL)
133#define CIA_IOC_CPU_ERR1 (IDENT_ADDR + 0x8740008040UL)
134#define CIA_IOC_CIA_ERR (IDENT_ADDR + 0x8740008200UL)
135# define CIA_ERR_COR_ERR (1 << 0)
136# define CIA_ERR_UN_COR_ERR (1 << 1)
137# define CIA_ERR_CPU_PE (1 << 2)
138# define CIA_ERR_MEM_NEM (1 << 3)
139# define CIA_ERR_PCI_SERR (1 << 4)
140# define CIA_ERR_PERR (1 << 5)
141# define CIA_ERR_PCI_ADDR_PE (1 << 6)
142# define CIA_ERR_RCVD_MAS_ABT (1 << 7)
143# define CIA_ERR_RCVD_TAR_ABT (1 << 8)
144# define CIA_ERR_PA_PTE_INV (1 << 9)
145# define CIA_ERR_FROM_WRT_ERR (1 << 10)
146# define CIA_ERR_IOA_TIMEOUT (1 << 11)
147# define CIA_ERR_LOST_CORR_ERR (1 << 16)
148# define CIA_ERR_LOST_UN_CORR_ERR (1 << 17)
149# define CIA_ERR_LOST_CPU_PE (1 << 18)
150# define CIA_ERR_LOST_MEM_NEM (1 << 19)
151# define CIA_ERR_LOST_PERR (1 << 21)
152# define CIA_ERR_LOST_PCI_ADDR_PE (1 << 22)
153# define CIA_ERR_LOST_RCVD_MAS_ABT (1 << 23)
154# define CIA_ERR_LOST_RCVD_TAR_ABT (1 << 24)
155# define CIA_ERR_LOST_PA_PTE_INV (1 << 25)
156# define CIA_ERR_LOST_FROM_WRT_ERR (1 << 26)
157# define CIA_ERR_LOST_IOA_TIMEOUT (1 << 27)
158# define CIA_ERR_VALID (1 << 31)
159#define CIA_IOC_CIA_STAT (IDENT_ADDR + 0x8740008240UL)
160#define CIA_IOC_ERR_MASK (IDENT_ADDR + 0x8740008280UL)
161#define CIA_IOC_CIA_SYN (IDENT_ADDR + 0x8740008300UL)
162#define CIA_IOC_MEM_ERR0 (IDENT_ADDR + 0x8740008400UL)
163#define CIA_IOC_MEM_ERR1 (IDENT_ADDR + 0x8740008440UL)
164#define CIA_IOC_PCI_ERR0 (IDENT_ADDR + 0x8740008800UL)
165#define CIA_IOC_PCI_ERR1 (IDENT_ADDR + 0x8740008840UL)
166#define CIA_IOC_PCI_ERR3 (IDENT_ADDR + 0x8740008880UL)
167
168/*
169 * 21171-CA System configuration registers
170 */
171#define CIA_IOC_MCR (IDENT_ADDR + 0x8750000000UL)
172#define CIA_IOC_MBA0 (IDENT_ADDR + 0x8750000600UL)
173#define CIA_IOC_MBA2 (IDENT_ADDR + 0x8750000680UL)
174#define CIA_IOC_MBA4 (IDENT_ADDR + 0x8750000700UL)
175#define CIA_IOC_MBA6 (IDENT_ADDR + 0x8750000780UL)
176#define CIA_IOC_MBA8 (IDENT_ADDR + 0x8750000800UL)
177#define CIA_IOC_MBAA (IDENT_ADDR + 0x8750000880UL)
178#define CIA_IOC_MBAC (IDENT_ADDR + 0x8750000900UL)
179#define CIA_IOC_MBAE (IDENT_ADDR + 0x8750000980UL)
180#define CIA_IOC_TMG0 (IDENT_ADDR + 0x8750000B00UL)
181#define CIA_IOC_TMG1 (IDENT_ADDR + 0x8750000B40UL)
182#define CIA_IOC_TMG2 (IDENT_ADDR + 0x8750000B80UL)
183
184/*
185 * 2117A-CA PCI Address and Scatter-Gather Registers.
186 */
187#define CIA_IOC_PCI_TBIA (IDENT_ADDR + 0x8760000100UL)
188
189#define CIA_IOC_PCI_W0_BASE (IDENT_ADDR + 0x8760000400UL)
190#define CIA_IOC_PCI_W0_MASK (IDENT_ADDR + 0x8760000440UL)
191#define CIA_IOC_PCI_T0_BASE (IDENT_ADDR + 0x8760000480UL)
192
193#define CIA_IOC_PCI_W1_BASE (IDENT_ADDR + 0x8760000500UL)
194#define CIA_IOC_PCI_W1_MASK (IDENT_ADDR + 0x8760000540UL)
195#define CIA_IOC_PCI_T1_BASE (IDENT_ADDR + 0x8760000580UL)
196
197#define CIA_IOC_PCI_W2_BASE (IDENT_ADDR + 0x8760000600UL)
198#define CIA_IOC_PCI_W2_MASK (IDENT_ADDR + 0x8760000640UL)
199#define CIA_IOC_PCI_T2_BASE (IDENT_ADDR + 0x8760000680UL)
200
201#define CIA_IOC_PCI_W3_BASE (IDENT_ADDR + 0x8760000700UL)
202#define CIA_IOC_PCI_W3_MASK (IDENT_ADDR + 0x8760000740UL)
203#define CIA_IOC_PCI_T3_BASE (IDENT_ADDR + 0x8760000780UL)
204
205#define CIA_IOC_PCI_Wn_BASE(N) (IDENT_ADDR + 0x8760000400UL + (N)*0x100)
206#define CIA_IOC_PCI_Wn_MASK(N) (IDENT_ADDR + 0x8760000440UL + (N)*0x100)
207#define CIA_IOC_PCI_Tn_BASE(N) (IDENT_ADDR + 0x8760000480UL + (N)*0x100)
208
209#define CIA_IOC_PCI_W_DAC (IDENT_ADDR + 0x87600007C0UL)
210
211/*
212 * 2117A-CA Address Translation Registers.
213 */
214
215/* 8 tag registers, the first 4 of which are lockable. */
216#define CIA_IOC_TB_TAGn(n) \
217 (IDENT_ADDR + 0x8760000800UL + (n)*0x40)
218
219/* 4 page registers per tag register. */
220#define CIA_IOC_TBn_PAGEm(n,m) \
221 (IDENT_ADDR + 0x8760001000UL + (n)*0x100 + (m)*0x40)
222
223/*
224 * Memory spaces:
225 */
226#define CIA_IACK_SC (IDENT_ADDR + 0x8720000000UL)
227#define CIA_CONF (IDENT_ADDR + 0x8700000000UL)
228#define CIA_IO (IDENT_ADDR + 0x8580000000UL)
229#define CIA_SPARSE_MEM (IDENT_ADDR + 0x8000000000UL)
230#define CIA_SPARSE_MEM_R2 (IDENT_ADDR + 0x8400000000UL)
231#define CIA_SPARSE_MEM_R3 (IDENT_ADDR + 0x8500000000UL)
232#define CIA_DENSE_MEM (IDENT_ADDR + 0x8600000000UL)
233#define CIA_BW_MEM (IDENT_ADDR + 0x8800000000UL)
234#define CIA_BW_IO (IDENT_ADDR + 0x8900000000UL)
235#define CIA_BW_CFG_0 (IDENT_ADDR + 0x8a00000000UL)
236#define CIA_BW_CFG_1 (IDENT_ADDR + 0x8b00000000UL)
237
238/*
239 * ALCOR's GRU ASIC registers
240 */
241#define GRU_INT_REQ (IDENT_ADDR + 0x8780000000UL)
242#define GRU_INT_MASK (IDENT_ADDR + 0x8780000040UL)
243#define GRU_INT_EDGE (IDENT_ADDR + 0x8780000080UL)
244#define GRU_INT_HILO (IDENT_ADDR + 0x87800000C0UL)
245#define GRU_INT_CLEAR (IDENT_ADDR + 0x8780000100UL)
246
247#define GRU_CACHE_CNFG (IDENT_ADDR + 0x8780000200UL)
248#define GRU_SCR (IDENT_ADDR + 0x8780000300UL)
249#define GRU_LED (IDENT_ADDR + 0x8780000800UL)
250#define GRU_RESET (IDENT_ADDR + 0x8780000900UL)
251
252#define ALCOR_GRU_INT_REQ_BITS 0x800fffffUL
253#define XLT_GRU_INT_REQ_BITS 0x80003fffUL
254#define GRU_INT_REQ_BITS (alpha_mv.sys.cia.gru_int_req_bits+0)
255
256/*
257 * PYXIS interrupt control registers
258 */
259#define PYXIS_INT_REQ (IDENT_ADDR + 0x87A0000000UL)
260#define PYXIS_INT_MASK (IDENT_ADDR + 0x87A0000040UL)
261#define PYXIS_INT_HILO (IDENT_ADDR + 0x87A00000C0UL)
262#define PYXIS_INT_ROUTE (IDENT_ADDR + 0x87A0000140UL)
263#define PYXIS_GPO (IDENT_ADDR + 0x87A0000180UL)
264#define PYXIS_INT_CNFG (IDENT_ADDR + 0x87A00001C0UL)
265#define PYXIS_RT_COUNT (IDENT_ADDR + 0x87A0000200UL)
266#define PYXIS_INT_TIME (IDENT_ADDR + 0x87A0000240UL)
267#define PYXIS_IIC_CTRL (IDENT_ADDR + 0x87A00002C0UL)
268#define PYXIS_RESET (IDENT_ADDR + 0x8780000900UL)
269
270/* Offset between ram physical addresses and pci64 DAC bus addresses. */
271#define PYXIS_DAC_OFFSET (1UL << 40)
272
273/*
274 * Data structure for handling CIA machine checks.
275 */
276
277/* System-specific info. */
278struct el_CIA_sysdata_mcheck {
279 unsigned long cpu_err0;
280 unsigned long cpu_err1;
281 unsigned long cia_err;
282 unsigned long cia_stat;
283 unsigned long err_mask;
284 unsigned long cia_syn;
285 unsigned long mem_err0;
286 unsigned long mem_err1;
287 unsigned long pci_err0;
288 unsigned long pci_err1;
289 unsigned long pci_err2;
290};
291
292
293#ifdef __KERNEL__
294
295#ifndef __EXTERN_INLINE
296/* Do not touch, this should *NOT* be static inline */
297#define __EXTERN_INLINE extern inline
298#define __IO_EXTERN_INLINE
299#endif
300
301/*
302 * I/O functions:
303 *
304 * CIA (the 2117x PCI/memory support chipset for the EV5 (21164)
305 * series of processors uses a sparse address mapping scheme to
306 * get at PCI memory and I/O.
307 */
308
309/*
310 * Memory functions. 64-bit and 32-bit accesses are done through
311 * dense memory space, everything else through sparse space.
312 *
313 * For reading and writing 8 and 16 bit quantities we need to
314 * go through one of the three sparse address mapping regions
315 * and use the HAE_MEM CSR to provide some bits of the address.
316 * The following few routines use only sparse address region 1
317 * which gives 1Gbyte of accessible space which relates exactly
318 * to the amount of PCI memory mapping *into* system address space.
319 * See p 6-17 of the specification but it looks something like this:
320 *
321 * 21164 Address:
322 *
323 * 3 2 1
324 * 9876543210987654321098765432109876543210
325 * 1ZZZZ0.PCI.QW.Address............BBLL
326 *
327 * ZZ = SBZ
328 * BB = Byte offset
329 * LL = Transfer length
330 *
331 * PCI Address:
332 *
333 * 3 2 1
334 * 10987654321098765432109876543210
335 * HHH....PCI.QW.Address........ 00
336 *
337 * HHH = 31:29 HAE_MEM CSR
338 *
339 */
340
341#define vip volatile int __force *
342#define vuip volatile unsigned int __force *
343#define vulp volatile unsigned long __force *
344
345__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr)
346{
347 unsigned long addr = (unsigned long) xaddr;
348 unsigned long result, base_and_type;
349
350 if (addr >= CIA_DENSE_MEM)
351 base_and_type = CIA_SPARSE_MEM + 0x00;
352 else
353 base_and_type = CIA_IO + 0x00;
354
355 /* We can use CIA_MEM_R1_MASK for io ports too, since it is large
356 enough to cover all io ports, and smaller than CIA_IO. */
357 addr &= CIA_MEM_R1_MASK;
358 result = *(vip) ((addr << 5) + base_and_type);
359 return __kernel_extbl(result, addr & 3);
360}
361
362__EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr)
363{
364 unsigned long addr = (unsigned long) xaddr;
365 unsigned long w, base_and_type;
366
367 if (addr >= CIA_DENSE_MEM)
368 base_and_type = CIA_SPARSE_MEM + 0x00;
369 else
370 base_and_type = CIA_IO + 0x00;
371
372 addr &= CIA_MEM_R1_MASK;
373 w = __kernel_insbl(b, addr & 3);
374 *(vuip) ((addr << 5) + base_and_type) = w;
375}
376
377__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr)
378{
379 unsigned long addr = (unsigned long) xaddr;
380 unsigned long result, base_and_type;
381
382 if (addr >= CIA_DENSE_MEM)
383 base_and_type = CIA_SPARSE_MEM + 0x08;
384 else
385 base_and_type = CIA_IO + 0x08;
386
387 addr &= CIA_MEM_R1_MASK;
388 result = *(vip) ((addr << 5) + base_and_type);
389 return __kernel_extwl(result, addr & 3);
390}
391
392__EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr)
393{
394 unsigned long addr = (unsigned long) xaddr;
395 unsigned long w, base_and_type;
396
397 if (addr >= CIA_DENSE_MEM)
398 base_and_type = CIA_SPARSE_MEM + 0x08;
399 else
400 base_and_type = CIA_IO + 0x08;
401
402 addr &= CIA_MEM_R1_MASK;
403 w = __kernel_inswl(b, addr & 3);
404 *(vuip) ((addr << 5) + base_and_type) = w;
405}
406
407__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr)
408{
409 unsigned long addr = (unsigned long) xaddr;
410 if (addr < CIA_DENSE_MEM)
411 addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18;
412 return *(vuip)addr;
413}
414
415__EXTERN_INLINE void cia_iowrite32(u32 b, void __iomem *xaddr)
416{
417 unsigned long addr = (unsigned long) xaddr;
418 if (addr < CIA_DENSE_MEM)
419 addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18;
420 *(vuip)addr = b;
421}
422
423__EXTERN_INLINE void __iomem *cia_ioportmap(unsigned long addr)
424{
425 return (void __iomem *)(addr + CIA_IO);
426}
427
428__EXTERN_INLINE void __iomem *cia_ioremap(unsigned long addr,
429 unsigned long size)
430{
431 return (void __iomem *)(addr + CIA_DENSE_MEM);
432}
433
434__EXTERN_INLINE int cia_is_ioaddr(unsigned long addr)
435{
436 return addr >= IDENT_ADDR + 0x8000000000UL;
437}
438
439__EXTERN_INLINE int cia_is_mmio(const volatile void __iomem *addr)
440{
441 return (unsigned long)addr >= CIA_DENSE_MEM;
442}
443
444__EXTERN_INLINE void __iomem *cia_bwx_ioportmap(unsigned long addr)
445{
446 return (void __iomem *)(addr + CIA_BW_IO);
447}
448
449__EXTERN_INLINE void __iomem *cia_bwx_ioremap(unsigned long addr,
450 unsigned long size)
451{
452 return (void __iomem *)(addr + CIA_BW_MEM);
453}
454
455__EXTERN_INLINE int cia_bwx_is_ioaddr(unsigned long addr)
456{
457 return addr >= IDENT_ADDR + 0x8000000000UL;
458}
459
460__EXTERN_INLINE int cia_bwx_is_mmio(const volatile void __iomem *addr)
461{
462 return (unsigned long)addr < CIA_BW_IO;
463}
464
465#undef vip
466#undef vuip
467#undef vulp
468
469#undef __IO_PREFIX
470#define __IO_PREFIX cia
471#define cia_trivial_rw_bw 2
472#define cia_trivial_rw_lq 1
473#define cia_trivial_io_bw 0
474#define cia_trivial_io_lq 0
475#define cia_trivial_iounmap 1
476#include <asm/io_trivial.h>
477
478#undef __IO_PREFIX
479#define __IO_PREFIX cia_bwx
480#define cia_bwx_trivial_rw_bw 1
481#define cia_bwx_trivial_rw_lq 1
482#define cia_bwx_trivial_io_bw 1
483#define cia_bwx_trivial_io_lq 1
484#define cia_bwx_trivial_iounmap 1
485#include <asm/io_trivial.h>
486
487#undef __IO_PREFIX
488#ifdef CONFIG_ALPHA_PYXIS
489#define __IO_PREFIX cia_bwx
490#else
491#define __IO_PREFIX cia
492#endif
493
494#ifdef __IO_EXTERN_INLINE
495#undef __EXTERN_INLINE
496#undef __IO_EXTERN_INLINE
497#endif
498
499#endif /* __KERNEL__ */
500
501#endif /* __ALPHA_CIA__H__ */
diff --git a/include/asm-alpha/core_irongate.h b/include/asm-alpha/core_irongate.h
new file mode 100644
index 000000000000..24b2db541501
--- /dev/null
+++ b/include/asm-alpha/core_irongate.h
@@ -0,0 +1,232 @@
1#ifndef __ALPHA_IRONGATE__H__
2#define __ALPHA_IRONGATE__H__
3
4#include <linux/types.h>
5#include <asm/compiler.h>
6
7/*
8 * IRONGATE is the internal name for the AMD-751 K7 core logic chipset
9 * which provides memory controller and PCI access for NAUTILUS-based
10 * EV6 (21264) systems.
11 *
12 * This file is based on:
13 *
14 * IronGate management library, (c) 1999 Alpha Processor, Inc.
15 * Copyright (C) 1999 Alpha Processor, Inc.,
16 * (David Daniel, Stig Telfer, Soohoon Lee)
17 */
18
19/*
20 * The 21264 supports, and internally recognizes, a 44-bit physical
21 * address space that is divided equally between memory address space
22 * and I/O address space. Memory address space resides in the lower
23 * half of the physical address space (PA[43]=0) and I/O address space
24 * resides in the upper half of the physical address space (PA[43]=1).
25 */
26
27/*
28 * Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access
29 * through the routines given is 32-bit.
30 *
31 * The first 0x40 bytes are standard as per the PCI spec.
32 */
33
34typedef volatile __u32 igcsr32;
35
36typedef struct {
37 igcsr32 dev_vendor; /* 0x00 - device ID, vendor ID */
38 igcsr32 stat_cmd; /* 0x04 - status, command */
39 igcsr32 class; /* 0x08 - class code, rev ID */
40 igcsr32 latency; /* 0x0C - header type, PCI latency */
41 igcsr32 bar0; /* 0x10 - BAR0 - AGP */
42 igcsr32 bar1; /* 0x14 - BAR1 - GART */
43 igcsr32 bar2; /* 0x18 - Power Management reg block */
44
45 igcsr32 rsrvd0[6]; /* 0x1C-0x33 reserved */
46
47 igcsr32 capptr; /* 0x34 - Capabilities pointer */
48
49 igcsr32 rsrvd1[2]; /* 0x38-0x3F reserved */
50
51 igcsr32 bacsr10; /* 0x40 - base address chip selects */
52 igcsr32 bacsr32; /* 0x44 - base address chip selects */
53 igcsr32 bacsr54_eccms761; /* 0x48 - 751: base addr. chip selects
54 761: ECC, mode/status */
55
56 igcsr32 rsrvd2[1]; /* 0x4C-0x4F reserved */
57
58 igcsr32 drammap; /* 0x50 - address mapping control */
59 igcsr32 dramtm; /* 0x54 - timing, driver strength */
60 igcsr32 dramms; /* 0x58 - DRAM mode/status */
61
62 igcsr32 rsrvd3[1]; /* 0x5C-0x5F reserved */
63
64 igcsr32 biu0; /* 0x60 - bus interface unit */
65 igcsr32 biusip; /* 0x64 - Serial initialisation pkt */
66
67 igcsr32 rsrvd4[2]; /* 0x68-0x6F reserved */
68
69 igcsr32 mro; /* 0x70 - memory request optimiser */
70
71 igcsr32 rsrvd5[3]; /* 0x74-0x7F reserved */
72
73 igcsr32 whami; /* 0x80 - who am I */
74 igcsr32 pciarb; /* 0x84 - PCI arbitration control */
75 igcsr32 pcicfg; /* 0x88 - PCI config status */
76
77 igcsr32 rsrvd6[4]; /* 0x8C-0x9B reserved */
78
79 igcsr32 pci_mem; /* 0x9C - PCI top of memory,
80 761 only */
81
82 /* AGP (bus 1) control registers */
83 igcsr32 agpcap; /* 0xA0 - AGP Capability Identifier */
84 igcsr32 agpstat; /* 0xA4 - AGP status register */
85 igcsr32 agpcmd; /* 0xA8 - AGP control register */
86 igcsr32 agpva; /* 0xAC - AGP Virtual Address Space */
87 igcsr32 agpmode; /* 0xB0 - AGP/GART mode control */
88} Irongate0;
89
90
91typedef struct {
92
93 igcsr32 dev_vendor; /* 0x00 - Device and Vendor IDs */
94 igcsr32 stat_cmd; /* 0x04 - Status and Command regs */
95 igcsr32 class; /* 0x08 - subclass, baseclass etc */
96 igcsr32 htype; /* 0x0C - header type (at 0x0E) */
97 igcsr32 rsrvd0[2]; /* 0x10-0x17 reserved */
98 igcsr32 busnos; /* 0x18 - Primary, secondary bus nos */
99 igcsr32 io_baselim_regs; /* 0x1C - IO base, IO lim, AGP status */
100 igcsr32 mem_baselim; /* 0x20 - memory base, memory lim */
101 igcsr32 pfmem_baselim; /* 0x24 - prefetchable base, lim */
102 igcsr32 rsrvd1[2]; /* 0x28-0x2F reserved */
103 igcsr32 io_baselim; /* 0x30 - IO base, IO limit */
104 igcsr32 rsrvd2[2]; /* 0x34-0x3B - reserved */
105 igcsr32 interrupt; /* 0x3C - interrupt, PCI bridge ctrl */
106
107} Irongate1;
108
109extern igcsr32 *IronECC;
110
111/*
112 * Memory spaces:
113 */
114
115/* Irongate is consistent with a subset of the Tsunami memory map */
116#ifdef USE_48_BIT_KSEG
117#define IRONGATE_BIAS 0x80000000000UL
118#else
119#define IRONGATE_BIAS 0x10000000000UL
120#endif
121
122
123#define IRONGATE_MEM (IDENT_ADDR | IRONGATE_BIAS | 0x000000000UL)
124#define IRONGATE_IACK_SC (IDENT_ADDR | IRONGATE_BIAS | 0x1F8000000UL)
125#define IRONGATE_IO (IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL)
126#define IRONGATE_CONF (IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL)
127
128/*
129 * PCI Configuration space accesses are formed like so:
130 *
131 * 0x1FE << 24 | : 2 2 2 2 1 1 1 1 : 1 1 1 1 1 1 0 0 : 0 0 0 0 0 0 0 0 :
132 * : 3 2 1 0 9 8 7 6 : 5 4 3 2 1 0 9 8 : 7 6 5 4 3 2 1 0 :
133 * ---bus numer--- -device-- -fun- ---register----
134 */
135
136#define IGCSR(dev,fun,reg) ( IRONGATE_CONF | \
137 ((dev)<<11) | \
138 ((fun)<<8) | \
139 (reg) )
140
141#define IRONGATE0 ((Irongate0 *) IGCSR(0, 0, 0))
142#define IRONGATE1 ((Irongate1 *) IGCSR(1, 0, 0))
143
144/*
145 * Data structure for handling IRONGATE machine checks:
146 * This is the standard OSF logout frame
147 */
148
149#define SCB_Q_SYSERR 0x620 /* OSF definitions */
150#define SCB_Q_PROCERR 0x630
151#define SCB_Q_SYSMCHK 0x660
152#define SCB_Q_PROCMCHK 0x670
153
154struct el_IRONGATE_sysdata_mcheck {
155 __u32 FrameSize; /* Bytes, including this field */
156 __u32 FrameFlags; /* <31> = Retry, <30> = Second Error */
157 __u32 CpuOffset; /* Offset to CPU-specific into */
158 __u32 SystemOffset; /* Offset to system-specific info */
159 __u32 MCHK_Code;
160 __u32 MCHK_Frame_Rev;
161 __u64 I_STAT;
162 __u64 DC_STAT;
163 __u64 C_ADDR;
164 __u64 DC1_SYNDROME;
165 __u64 DC0_SYNDROME;
166 __u64 C_STAT;
167 __u64 C_STS;
168 __u64 RESERVED0;
169 __u64 EXC_ADDR;
170 __u64 IER_CM;
171 __u64 ISUM;
172 __u64 MM_STAT;
173 __u64 PAL_BASE;
174 __u64 I_CTL;
175 __u64 PCTX;
176};
177
178
179#ifdef __KERNEL__
180
181#ifndef __EXTERN_INLINE
182#define __EXTERN_INLINE extern inline
183#define __IO_EXTERN_INLINE
184#endif
185
186/*
187 * I/O functions:
188 *
189 * IRONGATE (AMD-751) PCI/memory support chip for the EV6 (21264) and
190 * K7 can only use linear accesses to get at PCI memory and I/O spaces.
191 */
192
193/*
194 * Memory functions. All accesses are done through linear space.
195 */
196
197__EXTERN_INLINE void __iomem *irongate_ioportmap(unsigned long addr)
198{
199 return (void __iomem *)(addr + IRONGATE_IO);
200}
201
202extern void __iomem *irongate_ioremap(unsigned long addr, unsigned long size);
203extern void irongate_iounmap(volatile void __iomem *addr);
204
205__EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr)
206{
207 return addr >= IRONGATE_MEM;
208}
209
210__EXTERN_INLINE int irongate_is_mmio(const volatile void __iomem *xaddr)
211{
212 unsigned long addr = (unsigned long)xaddr;
213 return addr < IRONGATE_IO || addr >= IRONGATE_CONF;
214}
215
216#undef __IO_PREFIX
217#define __IO_PREFIX irongate
218#define irongate_trivial_rw_bw 1
219#define irongate_trivial_rw_lq 1
220#define irongate_trivial_io_bw 1
221#define irongate_trivial_io_lq 1
222#define irongate_trivial_iounmap 0
223#include <asm/io_trivial.h>
224
225#ifdef __IO_EXTERN_INLINE
226#undef __EXTERN_INLINE
227#undef __IO_EXTERN_INLINE
228#endif
229
230#endif /* __KERNEL__ */
231
232#endif /* __ALPHA_IRONGATE__H__ */
diff --git a/include/asm-alpha/core_lca.h b/include/asm-alpha/core_lca.h
new file mode 100644
index 000000000000..f7cb4b460954
--- /dev/null
+++ b/include/asm-alpha/core_lca.h
@@ -0,0 +1,361 @@
1#ifndef __ALPHA_LCA__H__
2#define __ALPHA_LCA__H__
3
4#include <asm/system.h>
5#include <asm/compiler.h>
6
7/*
8 * Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068,
9 * for example).
10 *
11 * This file is based on:
12 *
13 * DECchip 21066 and DECchip 21068 Alpha AXP Microprocessors
14 * Hardware Reference Manual; Digital Equipment Corp.; May 1994;
15 * Maynard, MA; Order Number: EC-N2681-71.
16 */
17
18/*
19 * NOTE: The LCA uses a Host Address Extension (HAE) register to access
20 * PCI addresses that are beyond the first 27 bits of address
21 * space. Updating the HAE requires an external cycle (and
22 * a memory barrier), which tends to be slow. Instead of updating
23 * it on each sparse memory access, we keep the current HAE value
24 * cached in variable cache_hae. Only if the cached HAE differs
25 * from the desired HAE value do we actually updated HAE register.
26 * The HAE register is preserved by the interrupt handler entry/exit
27 * code, so this scheme works even in the presence of interrupts.
28 *
29 * Dense memory space doesn't require the HAE, but is restricted to
30 * aligned 32 and 64 bit accesses. Special Cycle and Interrupt
31 * Acknowledge cycles may also require the use of the HAE. The LCA
32 * limits I/O address space to the bottom 24 bits of address space,
33 * but this easily covers the 16 bit ISA I/O address space.
34 */
35
36/*
37 * NOTE 2! The memory operations do not set any memory barriers, as
38 * it's not needed for cases like a frame buffer that is essentially
39 * memory-like. You need to do them by hand if the operations depend
40 * on ordering.
41 *
42 * Similarly, the port I/O operations do a "mb" only after a write
43 * operation: if an mb is needed before (as in the case of doing
44 * memory mapped I/O first, and then a port I/O operation to the same
45 * device), it needs to be done by hand.
46 *
47 * After the above has bitten me 100 times, I'll give up and just do
48 * the mb all the time, but right now I'm hoping this will work out.
49 * Avoiding mb's may potentially be a noticeable speed improvement,
50 * but I can't honestly say I've tested it.
51 *
52 * Handling interrupts that need to do mb's to synchronize to
53 * non-interrupts is another fun race area. Don't do it (because if
54 * you do, I'll have to do *everything* with interrupts disabled,
55 * ugh).
56 */
57
58/*
59 * Memory Controller registers:
60 */
61#define LCA_MEM_BCR0 (IDENT_ADDR + 0x120000000UL)
62#define LCA_MEM_BCR1 (IDENT_ADDR + 0x120000008UL)
63#define LCA_MEM_BCR2 (IDENT_ADDR + 0x120000010UL)
64#define LCA_MEM_BCR3 (IDENT_ADDR + 0x120000018UL)
65#define LCA_MEM_BMR0 (IDENT_ADDR + 0x120000020UL)
66#define LCA_MEM_BMR1 (IDENT_ADDR + 0x120000028UL)
67#define LCA_MEM_BMR2 (IDENT_ADDR + 0x120000030UL)
68#define LCA_MEM_BMR3 (IDENT_ADDR + 0x120000038UL)
69#define LCA_MEM_BTR0 (IDENT_ADDR + 0x120000040UL)
70#define LCA_MEM_BTR1 (IDENT_ADDR + 0x120000048UL)
71#define LCA_MEM_BTR2 (IDENT_ADDR + 0x120000050UL)
72#define LCA_MEM_BTR3 (IDENT_ADDR + 0x120000058UL)
73#define LCA_MEM_GTR (IDENT_ADDR + 0x120000060UL)
74#define LCA_MEM_ESR (IDENT_ADDR + 0x120000068UL)
75#define LCA_MEM_EAR (IDENT_ADDR + 0x120000070UL)
76#define LCA_MEM_CAR (IDENT_ADDR + 0x120000078UL)
77#define LCA_MEM_VGR (IDENT_ADDR + 0x120000080UL)
78#define LCA_MEM_PLM (IDENT_ADDR + 0x120000088UL)
79#define LCA_MEM_FOR (IDENT_ADDR + 0x120000090UL)
80
81/*
82 * I/O Controller registers:
83 */
84#define LCA_IOC_HAE (IDENT_ADDR + 0x180000000UL)
85#define LCA_IOC_CONF (IDENT_ADDR + 0x180000020UL)
86#define LCA_IOC_STAT0 (IDENT_ADDR + 0x180000040UL)
87#define LCA_IOC_STAT1 (IDENT_ADDR + 0x180000060UL)
88#define LCA_IOC_TBIA (IDENT_ADDR + 0x180000080UL)
89#define LCA_IOC_TB_ENA (IDENT_ADDR + 0x1800000a0UL)
90#define LCA_IOC_SFT_RST (IDENT_ADDR + 0x1800000c0UL)
91#define LCA_IOC_PAR_DIS (IDENT_ADDR + 0x1800000e0UL)
92#define LCA_IOC_W_BASE0 (IDENT_ADDR + 0x180000100UL)
93#define LCA_IOC_W_BASE1 (IDENT_ADDR + 0x180000120UL)
94#define LCA_IOC_W_MASK0 (IDENT_ADDR + 0x180000140UL)
95#define LCA_IOC_W_MASK1 (IDENT_ADDR + 0x180000160UL)
96#define LCA_IOC_T_BASE0 (IDENT_ADDR + 0x180000180UL)
97#define LCA_IOC_T_BASE1 (IDENT_ADDR + 0x1800001a0UL)
98#define LCA_IOC_TB_TAG0 (IDENT_ADDR + 0x188000000UL)
99#define LCA_IOC_TB_TAG1 (IDENT_ADDR + 0x188000020UL)
100#define LCA_IOC_TB_TAG2 (IDENT_ADDR + 0x188000040UL)
101#define LCA_IOC_TB_TAG3 (IDENT_ADDR + 0x188000060UL)
102#define LCA_IOC_TB_TAG4 (IDENT_ADDR + 0x188000070UL)
103#define LCA_IOC_TB_TAG5 (IDENT_ADDR + 0x1880000a0UL)
104#define LCA_IOC_TB_TAG6 (IDENT_ADDR + 0x1880000c0UL)
105#define LCA_IOC_TB_TAG7 (IDENT_ADDR + 0x1880000e0UL)
106
107/*
108 * Memory spaces:
109 */
110#define LCA_IACK_SC (IDENT_ADDR + 0x1a0000000UL)
111#define LCA_CONF (IDENT_ADDR + 0x1e0000000UL)
112#define LCA_IO (IDENT_ADDR + 0x1c0000000UL)
113#define LCA_SPARSE_MEM (IDENT_ADDR + 0x200000000UL)
114#define LCA_DENSE_MEM (IDENT_ADDR + 0x300000000UL)
115
116/*
117 * Bit definitions for I/O Controller status register 0:
118 */
119#define LCA_IOC_STAT0_CMD 0xf
120#define LCA_IOC_STAT0_ERR (1<<4)
121#define LCA_IOC_STAT0_LOST (1<<5)
122#define LCA_IOC_STAT0_THIT (1<<6)
123#define LCA_IOC_STAT0_TREF (1<<7)
124#define LCA_IOC_STAT0_CODE_SHIFT 8
125#define LCA_IOC_STAT0_CODE_MASK 0x7
126#define LCA_IOC_STAT0_P_NBR_SHIFT 13
127#define LCA_IOC_STAT0_P_NBR_MASK 0x7ffff
128
129#define LCA_HAE_ADDRESS LCA_IOC_HAE
130
131/* LCA PMR Power Management register defines */
132#define LCA_PMR_ADDR (IDENT_ADDR + 0x120000098UL)
133#define LCA_PMR_PDIV 0x7 /* Primary clock divisor */
134#define LCA_PMR_ODIV 0x38 /* Override clock divisor */
135#define LCA_PMR_INTO 0x40 /* Interrupt override */
136#define LCA_PMR_DMAO 0x80 /* DMA override */
137#define LCA_PMR_OCCEB 0xffff0000L /* Override cycle counter - even bits */
138#define LCA_PMR_OCCOB 0xffff000000000000L /* Override cycle counter - even bits */
139#define LCA_PMR_PRIMARY_MASK 0xfffffffffffffff8L
140
141/* LCA PMR Macros */
142
143#define LCA_READ_PMR (*(volatile unsigned long *)LCA_PMR_ADDR)
144#define LCA_WRITE_PMR(d) (*((volatile unsigned long *)LCA_PMR_ADDR) = (d))
145
146#define LCA_GET_PRIMARY(r) ((r) & LCA_PMR_PDIV)
147#define LCA_GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV)
148#define LCA_SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK)|(c)))
149
150/* LCA PMR Divisor values */
151#define LCA_PMR_DIV_1 0x0
152#define LCA_PMR_DIV_1_5 0x1
153#define LCA_PMR_DIV_2 0x2
154#define LCA_PMR_DIV_4 0x3
155#define LCA_PMR_DIV_8 0x4
156#define LCA_PMR_DIV_16 0x5
157#define LCA_PMR_DIV_MIN DIV_1
158#define LCA_PMR_DIV_MAX DIV_16
159
160
161/*
162 * Data structure for handling LCA machine checks. Correctable errors
163 * result in a short logout frame, uncorrectable ones in a long one.
164 */
165struct el_lca_mcheck_short {
166 struct el_common h; /* common logout header */
167 unsigned long esr; /* error-status register */
168 unsigned long ear; /* error-address register */
169 unsigned long dc_stat; /* dcache status register */
170 unsigned long ioc_stat0; /* I/O controller status register 0 */
171 unsigned long ioc_stat1; /* I/O controller status register 1 */
172};
173
174struct el_lca_mcheck_long {
175 struct el_common h; /* common logout header */
176 unsigned long pt[31]; /* PAL temps */
177 unsigned long exc_addr; /* exception address */
178 unsigned long pad1[3];
179 unsigned long pal_base; /* PALcode base address */
180 unsigned long hier; /* hw interrupt enable */
181 unsigned long hirr; /* hw interrupt request */
182 unsigned long mm_csr; /* MMU control & status */
183 unsigned long dc_stat; /* data cache status */
184 unsigned long dc_addr; /* data cache addr register */
185 unsigned long abox_ctl; /* address box control register */
186 unsigned long esr; /* error status register */
187 unsigned long ear; /* error address register */
188 unsigned long car; /* cache control register */
189 unsigned long ioc_stat0; /* I/O controller status register 0 */
190 unsigned long ioc_stat1; /* I/O controller status register 1 */
191 unsigned long va; /* virtual address register */
192};
193
194union el_lca {
195 struct el_common * c;
196 struct el_lca_mcheck_long * l;
197 struct el_lca_mcheck_short * s;
198};
199
200#ifdef __KERNEL__
201
202#ifndef __EXTERN_INLINE
203#define __EXTERN_INLINE extern inline
204#define __IO_EXTERN_INLINE
205#endif
206
207/*
208 * I/O functions:
209 *
210 * Unlike Jensen, the Noname machines have no concept of local
211 * I/O---everything goes over the PCI bus.
212 *
213 * There is plenty room for optimization here. In particular,
214 * the Alpha's insb/insw/extb/extw should be useful in moving
215 * data to/from the right byte-lanes.
216 */
217
218#define vip volatile int __force *
219#define vuip volatile unsigned int __force *
220#define vulp volatile unsigned long __force *
221
222#define LCA_SET_HAE \
223 do { \
224 if (addr >= (1UL << 24)) { \
225 unsigned long msb = addr & 0xf8000000; \
226 addr -= msb; \
227 set_hae(msb); \
228 } \
229 } while (0)
230
231
232__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr)
233{
234 unsigned long addr = (unsigned long) xaddr;
235 unsigned long result, base_and_type;
236
237 if (addr >= LCA_DENSE_MEM) {
238 addr -= LCA_DENSE_MEM;
239 LCA_SET_HAE;
240 base_and_type = LCA_SPARSE_MEM + 0x00;
241 } else {
242 addr -= LCA_IO;
243 base_and_type = LCA_IO + 0x00;
244 }
245
246 result = *(vip) ((addr << 5) + base_and_type);
247 return __kernel_extbl(result, addr & 3);
248}
249
250__EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr)
251{
252 unsigned long addr = (unsigned long) xaddr;
253 unsigned long w, base_and_type;
254
255 if (addr >= LCA_DENSE_MEM) {
256 addr -= LCA_DENSE_MEM;
257 LCA_SET_HAE;
258 base_and_type = LCA_SPARSE_MEM + 0x00;
259 } else {
260 addr -= LCA_IO;
261 base_and_type = LCA_IO + 0x00;
262 }
263
264 w = __kernel_insbl(b, addr & 3);
265 *(vuip) ((addr << 5) + base_and_type) = w;
266}
267
268__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr)
269{
270 unsigned long addr = (unsigned long) xaddr;
271 unsigned long result, base_and_type;
272
273 if (addr >= LCA_DENSE_MEM) {
274 addr -= LCA_DENSE_MEM;
275 LCA_SET_HAE;
276 base_and_type = LCA_SPARSE_MEM + 0x08;
277 } else {
278 addr -= LCA_IO;
279 base_and_type = LCA_IO + 0x08;
280 }
281
282 result = *(vip) ((addr << 5) + base_and_type);
283 return __kernel_extwl(result, addr & 3);
284}
285
286__EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr)
287{
288 unsigned long addr = (unsigned long) xaddr;
289 unsigned long w, base_and_type;
290
291 if (addr >= LCA_DENSE_MEM) {
292 addr -= LCA_DENSE_MEM;
293 LCA_SET_HAE;
294 base_and_type = LCA_SPARSE_MEM + 0x08;
295 } else {
296 addr -= LCA_IO;
297 base_and_type = LCA_IO + 0x08;
298 }
299
300 w = __kernel_inswl(b, addr & 3);
301 *(vuip) ((addr << 5) + base_and_type) = w;
302}
303
304__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr)
305{
306 unsigned long addr = (unsigned long) xaddr;
307 if (addr < LCA_DENSE_MEM)
308 addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
309 return *(vuip)addr;
310}
311
312__EXTERN_INLINE void lca_iowrite32(u32 b, void __iomem *xaddr)
313{
314 unsigned long addr = (unsigned long) xaddr;
315 if (addr < LCA_DENSE_MEM)
316 addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18;
317 *(vuip)addr = b;
318}
319
320__EXTERN_INLINE void __iomem *lca_ioportmap(unsigned long addr)
321{
322 return (void __iomem *)(addr + LCA_IO);
323}
324
325__EXTERN_INLINE void __iomem *lca_ioremap(unsigned long addr,
326 unsigned long size)
327{
328 return (void __iomem *)(addr + LCA_DENSE_MEM);
329}
330
331__EXTERN_INLINE int lca_is_ioaddr(unsigned long addr)
332{
333 return addr >= IDENT_ADDR + 0x120000000UL;
334}
335
336__EXTERN_INLINE int lca_is_mmio(const volatile void __iomem *addr)
337{
338 return (unsigned long)addr >= LCA_DENSE_MEM;
339}
340
341#undef vip
342#undef vuip
343#undef vulp
344
345#undef __IO_PREFIX
346#define __IO_PREFIX lca
347#define lca_trivial_rw_bw 2
348#define lca_trivial_rw_lq 1
349#define lca_trivial_io_bw 0
350#define lca_trivial_io_lq 0
351#define lca_trivial_iounmap 1
352#include <asm/io_trivial.h>
353
354#ifdef __IO_EXTERN_INLINE
355#undef __EXTERN_INLINE
356#undef __IO_EXTERN_INLINE
357#endif
358
359#endif /* __KERNEL__ */
360
361#endif /* __ALPHA_LCA__H__ */
diff --git a/include/asm-alpha/core_marvel.h b/include/asm-alpha/core_marvel.h
new file mode 100644
index 000000000000..30d55fe7aaf6
--- /dev/null
+++ b/include/asm-alpha/core_marvel.h
@@ -0,0 +1,378 @@
1/*
2 * Marvel systems use the IO7 I/O chip provides PCI/PCIX/AGP access
3 *
4 * This file is based on:
5 *
6 * Marvel / EV7 System Programmer's Manual
7 * Revision 1.00
8 * 14 May 2001
9 */
10
11#ifndef __ALPHA_MARVEL__H__
12#define __ALPHA_MARVEL__H__
13
14#include <linux/types.h>
15#include <linux/pci.h>
16#include <linux/spinlock.h>
17
18#include <asm/compiler.h>
19
20#define MARVEL_MAX_PIDS 32 /* as long as we rely on 43-bit superpage */
21#define MARVEL_IRQ_VEC_PE_SHIFT (10)
22#define MARVEL_IRQ_VEC_IRQ_MASK ((1 << MARVEL_IRQ_VEC_PE_SHIFT) - 1)
23#define MARVEL_NR_IRQS \
24 (16 + (MARVEL_MAX_PIDS * (1 << MARVEL_IRQ_VEC_PE_SHIFT)))
25
26/*
27 * EV7 RBOX Registers
28 */
29typedef struct {
30 volatile unsigned long csr __attribute__((aligned(16)));
31} ev7_csr;
32
33typedef struct {
34 ev7_csr RBOX_CFG; /* 0x0000 */
35 ev7_csr RBOX_NSVC;
36 ev7_csr RBOX_EWVC;
37 ev7_csr RBOX_WHAMI;
38 ev7_csr RBOX_TCTL; /* 0x0040 */
39 ev7_csr RBOX_INT;
40 ev7_csr RBOX_IMASK;
41 ev7_csr RBOX_IREQ;
42 ev7_csr RBOX_INTQ; /* 0x0080 */
43 ev7_csr RBOX_INTA;
44 ev7_csr RBOX_IT;
45 ev7_csr RBOX_SCRATCH1;
46 ev7_csr RBOX_SCRATCH2; /* 0x00c0 */
47 ev7_csr RBOX_L_ERR;
48} ev7_csrs;
49
50/*
51 * EV7 CSR addressing macros
52 */
53#define EV7_MASK40(addr) ((addr) & ((1UL << 41) - 1))
54#define EV7_KERN_ADDR(addr) ((void *)(IDENT_ADDR | EV7_MASK40(addr)))
55
56#define EV7_PE_MASK 0x1ffUL /* 9 bits ( 256 + mem/io ) */
57#define EV7_IPE(pe) ((~((long)(pe)) & EV7_PE_MASK) << 35)
58
59#define EV7_CSR_PHYS(pe, off) (EV7_IPE(pe) | (0x7FFCUL << 20) | (off))
60#define EV7_CSRS_PHYS(pe) (EV7_CSR_PHYS(pe, 0UL))
61
62#define EV7_CSR_KERN(pe, off) (EV7_KERN_ADDR(EV7_CSR_PHYS(pe, off)))
63#define EV7_CSRS_KERN(pe) (EV7_KERN_ADDR(EV7_CSRS_PHYS(pe)))
64
65#define EV7_CSR_OFFSET(name) ((unsigned long)&((ev7_csrs *)NULL)->name.csr)
66
67/*
68 * IO7 registers
69 */
70typedef struct {
71 volatile unsigned long csr __attribute__((aligned(64)));
72} io7_csr;
73
74typedef struct {
75 /* I/O Port Control Registers */
76 io7_csr POx_CTRL; /* 0x0000 */
77 io7_csr POx_CACHE_CTL;
78 io7_csr POx_TIMER;
79 io7_csr POx_IO_ADR_EXT;
80 io7_csr POx_MEM_ADR_EXT; /* 0x0100 */
81 io7_csr POx_XCAL_CTRL;
82 io7_csr rsvd1[2]; /* ?? spec doesn't show 0x180 */
83 io7_csr POx_DM_SOURCE; /* 0x0200 */
84 io7_csr POx_DM_DEST;
85 io7_csr POx_DM_SIZE;
86 io7_csr POx_DM_CTRL;
87 io7_csr rsvd2[4]; /* 0x0300 */
88
89 /* AGP Control Registers -- port 3 only */
90 io7_csr AGP_CAP_ID; /* 0x0400 */
91 io7_csr AGP_STAT;
92 io7_csr AGP_CMD;
93 io7_csr rsvd3;
94
95 /* I/O Port Monitor Registers */
96 io7_csr POx_MONCTL; /* 0x0500 */
97 io7_csr POx_CTRA;
98 io7_csr POx_CTRB;
99 io7_csr POx_CTR56;
100 io7_csr POx_SCRATCH; /* 0x0600 */
101 io7_csr POx_XTRA_A;
102 io7_csr POx_XTRA_TS;
103 io7_csr POx_XTRA_Z;
104 io7_csr rsvd4; /* 0x0700 */
105 io7_csr POx_THRESHA;
106 io7_csr POx_THRESHB;
107 io7_csr rsvd5[33];
108
109 /* System Address Space Window Control Registers */
110
111 io7_csr POx_WBASE[4]; /* 0x1000 */
112 io7_csr POx_WMASK[4];
113 io7_csr POx_TBASE[4];
114 io7_csr POx_SG_TBIA;
115 io7_csr POx_MSI_WBASE;
116 io7_csr rsvd6[50];
117
118 /* I/O Port Error Registers */
119 io7_csr POx_ERR_SUM;
120 io7_csr POx_FIRST_ERR;
121 io7_csr POx_MSK_HEI;
122 io7_csr POx_TLB_ERR;
123 io7_csr POx_SPL_COMPLT;
124 io7_csr POx_TRANS_SUM;
125 io7_csr POx_FRC_PCI_ERR;
126 io7_csr POx_MULT_ERR;
127 io7_csr rsvd7[8];
128
129 /* I/O Port End of Interrupt Registers */
130 io7_csr EOI_DAT;
131 io7_csr rsvd8[7];
132 io7_csr POx_IACK_SPECIAL;
133 io7_csr rsvd9[103];
134} io7_ioport_csrs;
135
136typedef struct {
137 io7_csr IO_ASIC_REV; /* 0x30.0000 */
138 io7_csr IO_SYS_REV;
139 io7_csr SER_CHAIN3;
140 io7_csr PO7_RST1;
141 io7_csr PO7_RST2; /* 0x30.0100 */
142 io7_csr POx_RST[4];
143 io7_csr IO7_DWNH;
144 io7_csr IO7_MAF;
145 io7_csr IO7_MAF_TO;
146 io7_csr IO7_ACC_CLUMP; /* 0x30.0300 */
147 io7_csr IO7_PMASK;
148 io7_csr IO7_IOMASK;
149 io7_csr IO7_UPH;
150 io7_csr IO7_UPH_TO; /* 0x30.0400 */
151 io7_csr RBX_IREQ_OFF;
152 io7_csr RBX_INTA_OFF;
153 io7_csr INT_RTY;
154 io7_csr PO7_MONCTL; /* 0x30.0500 */
155 io7_csr PO7_CTRA;
156 io7_csr PO7_CTRB;
157 io7_csr PO7_CTR56;
158 io7_csr PO7_SCRATCH; /* 0x30.0600 */
159 io7_csr PO7_XTRA_A;
160 io7_csr PO7_XTRA_TS;
161 io7_csr PO7_XTRA_Z;
162 io7_csr PO7_PMASK; /* 0x30.0700 */
163 io7_csr PO7_THRESHA;
164 io7_csr PO7_THRESHB;
165 io7_csr rsvd1[97];
166 io7_csr PO7_ERROR_SUM; /* 0x30.2000 */
167 io7_csr PO7_BHOLE_MASK;
168 io7_csr PO7_HEI_MSK;
169 io7_csr PO7_CRD_MSK;
170 io7_csr PO7_UNCRR_SYM; /* 0x30.2100 */
171 io7_csr PO7_CRRCT_SYM;
172 io7_csr PO7_ERR_PKT[2];
173 io7_csr PO7_UGBGE_SYM; /* 0x30.2200 */
174 io7_csr rsbv2[887];
175 io7_csr PO7_LSI_CTL[128]; /* 0x31.0000 */
176 io7_csr rsvd3[123];
177 io7_csr HLT_CTL; /* 0x31.3ec0 */
178 io7_csr HPI_CTL; /* 0x31.3f00 */
179 io7_csr CRD_CTL;
180 io7_csr STV_CTL;
181 io7_csr HEI_CTL;
182 io7_csr PO7_MSI_CTL[16]; /* 0x31.4000 */
183 io7_csr rsvd4[240];
184
185 /*
186 * Interrupt Diagnostic / Test
187 */
188 struct {
189 io7_csr INT_PND;
190 io7_csr INT_CLR;
191 io7_csr INT_EOI;
192 io7_csr rsvd[29];
193 } INT_DIAG[4];
194 io7_csr rsvd5[125]; /* 0x31.a000 */
195 io7_csr MISC_PND; /* 0x31.b800 */
196 io7_csr rsvd6[31];
197 io7_csr MSI_PND[16]; /* 0x31.c000 */
198 io7_csr rsvd7[16];
199 io7_csr MSI_CLR[16]; /* 0x31.c800 */
200} io7_port7_csrs;
201
202/*
203 * IO7 DMA Window Base register (POx_WBASEx)
204 */
205#define wbase_m_ena 0x1
206#define wbase_m_sg 0x2
207#define wbase_m_dac 0x4
208#define wbase_m_addr 0xFFF00000
209union IO7_POx_WBASE {
210 struct {
211 unsigned ena : 1; /* <0> */
212 unsigned sg : 1; /* <1> */
213 unsigned dac : 1; /* <2> -- window 3 only */
214 unsigned rsvd1 : 17;
215 unsigned addr : 12; /* <31:20> */
216 unsigned rsvd2 : 32;
217 } bits;
218 unsigned as_long[2];
219 unsigned as_quad;
220};
221
222/*
223 * IO7 IID (Interrupt IDentifier) format
224 *
225 * For level-sensative interrupts, int_num is encoded as:
226 *
227 * bus/port slot/device INTx
228 * <7:5> <4:2> <1:0>
229 */
230union IO7_IID {
231 struct {
232 unsigned int_num : 9; /* <8:0> */
233 unsigned tpu_mask : 4; /* <12:9> rsvd */
234 unsigned msi : 1; /* 13 */
235 unsigned ipe : 10; /* <23:14> */
236 unsigned long rsvd : 40;
237 } bits;
238 unsigned int as_long[2];
239 unsigned long as_quad;
240};
241
242/*
243 * IO7 addressing macros
244 */
245#define IO7_KERN_ADDR(addr) (EV7_KERN_ADDR(addr))
246
247#define IO7_PORT_MASK 0x07UL /* 3 bits of port */
248
249#define IO7_IPE(pe) (EV7_IPE(pe))
250#define IO7_IPORT(port) ((~((long)(port)) & IO7_PORT_MASK) << 32)
251
252#define IO7_HOSE(pe, port) (IO7_IPE(pe) | IO7_IPORT(port))
253
254#define IO7_MEM_PHYS(pe, port) (IO7_HOSE(pe, port) | 0x00000000UL)
255#define IO7_CONF_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFE000000UL)
256#define IO7_IO_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFF000000UL)
257#define IO7_CSR_PHYS(pe, port, off) \
258 (IO7_HOSE(pe, port) | 0xFF800000UL | (off))
259#define IO7_CSRS_PHYS(pe, port) (IO7_CSR_PHYS(pe, port, 0UL))
260#define IO7_PORT7_CSRS_PHYS(pe) (IO7_CSR_PHYS(pe, 7, 0x300000UL))
261
262#define IO7_MEM_KERN(pe, port) (IO7_KERN_ADDR(IO7_MEM_PHYS(pe, port)))
263#define IO7_CONF_KERN(pe, port) (IO7_KERN_ADDR(IO7_CONF_PHYS(pe, port)))
264#define IO7_IO_KERN(pe, port) (IO7_KERN_ADDR(IO7_IO_PHYS(pe, port)))
265#define IO7_CSR_KERN(pe, port, off) (IO7_KERN_ADDR(IO7_CSR_PHYS(pe,port,off)))
266#define IO7_CSRS_KERN(pe, port) (IO7_KERN_ADDR(IO7_CSRS_PHYS(pe, port)))
267#define IO7_PORT7_CSRS_KERN(pe) (IO7_KERN_ADDR(IO7_PORT7_CSRS_PHYS(pe)))
268
269#define IO7_PLL_RNGA(pll) (((pll) >> 3) & 0x7)
270#define IO7_PLL_RNGB(pll) (((pll) >> 6) & 0x7)
271
272#define IO7_MEM_SPACE (2UL * 1024 * 1024 * 1024) /* 2GB MEM */
273#define IO7_IO_SPACE (8UL * 1024 * 1024) /* 8MB I/O */
274
275
276/*
277 * Offset between ram physical addresses and pci64 DAC addresses
278 */
279#define IO7_DAC_OFFSET (1UL << 49)
280
281/*
282 * This is needed to satisify the IO() macro used in initializing the machvec
283 */
284#define MARVEL_IACK_SC \
285 ((unsigned long) \
286 (&(((io7_ioport_csrs *)IO7_CSRS_KERN(0, 0))->POx_IACK_SPECIAL)))
287
288#ifdef __KERNEL__
289
290/*
291 * IO7 structs
292 */
293#define IO7_NUM_PORTS 4
294#define IO7_AGP_PORT 3
295
296struct io7_port {
297 struct io7 *io7;
298 struct pci_controller *hose;
299
300 int enabled;
301 unsigned int port;
302 io7_ioport_csrs *csrs;
303
304 unsigned long saved_wbase[4];
305 unsigned long saved_wmask[4];
306 unsigned long saved_tbase[4];
307};
308
309struct io7 {
310 struct io7 *next;
311
312 unsigned int pe;
313 io7_port7_csrs *csrs;
314 struct io7_port ports[IO7_NUM_PORTS];
315
316 spinlock_t irq_lock;
317};
318
319#ifndef __EXTERN_INLINE
320# define __EXTERN_INLINE extern inline
321# define __IO_EXTERN_INLINE
322#endif
323
324/*
325 * I/O functions. All access through linear space.
326 */
327
328/*
329 * Memory functions. All accesses through linear space.
330 */
331
332#define vucp volatile unsigned char __force *
333#define vusp volatile unsigned short __force *
334
335extern unsigned int marvel_ioread8(void __iomem *);
336extern void marvel_iowrite8(u8 b, void __iomem *);
337
338__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr)
339{
340 return __kernel_ldwu(*(vusp)addr);
341}
342
343__EXTERN_INLINE void marvel_iowrite16(u16 b, void __iomem *addr)
344{
345 __kernel_stw(b, *(vusp)addr);
346}
347
348extern void __iomem *marvel_ioremap(unsigned long addr, unsigned long size);
349extern void marvel_iounmap(volatile void __iomem *addr);
350extern void __iomem *marvel_ioportmap (unsigned long addr);
351
352__EXTERN_INLINE int marvel_is_ioaddr(unsigned long addr)
353{
354 return (addr >> 40) & 1;
355}
356
357extern int marvel_is_mmio(const volatile void __iomem *);
358
359#undef vucp
360#undef vusp
361
362#undef __IO_PREFIX
363#define __IO_PREFIX marvel
364#define marvel_trivial_rw_bw 1
365#define marvel_trivial_rw_lq 1
366#define marvel_trivial_io_bw 0
367#define marvel_trivial_io_lq 1
368#define marvel_trivial_iounmap 0
369#include <asm/io_trivial.h>
370
371#ifdef __IO_EXTERN_INLINE
372# undef __EXTERN_INLINE
373# undef __IO_EXTERN_INLINE
374#endif
375
376#endif /* __KERNEL__ */
377
378#endif /* __ALPHA_MARVEL__H__ */
diff --git a/include/asm-alpha/core_mcpcia.h b/include/asm-alpha/core_mcpcia.h
new file mode 100644
index 000000000000..980a3c51b18e
--- /dev/null
+++ b/include/asm-alpha/core_mcpcia.h
@@ -0,0 +1,379 @@
1#ifndef __ALPHA_MCPCIA__H__
2#define __ALPHA_MCPCIA__H__
3
4/* Define to experiment with fitting everything into one 128MB HAE window.
5 One window per bus, that is. */
6#define MCPCIA_ONE_HAE_WINDOW 1
7
8#include <linux/types.h>
9#include <linux/pci.h>
10#include <asm/compiler.h>
11
12/*
13 * MCPCIA is the internal name for a core logic chipset which provides
14 * PCI access for the RAWHIDE family of systems.
15 *
16 * This file is based on:
17 *
18 * RAWHIDE System Programmer's Manual
19 * 16-May-96
20 * Rev. 1.4
21 *
22 */
23
24/*------------------------------------------------------------------------**
25** **
26** I/O procedures **
27** **
28** inport[b|w|t|l], outport[b|w|t|l] 8:16:24:32 IO xfers **
29** inportbxt: 8 bits only **
30** inport: alias of inportw **
31** outport: alias of outportw **
32** **
33** inmem[b|w|t|l], outmem[b|w|t|l] 8:16:24:32 ISA memory xfers **
34** inmembxt: 8 bits only **
35** inmem: alias of inmemw **
36** outmem: alias of outmemw **
37** **
38**------------------------------------------------------------------------*/
39
40
41/* MCPCIA ADDRESS BIT DEFINITIONS
42 *
43 * 3333 3333 3322 2222 2222 1111 1111 11
44 * 9876 5432 1098 7654 3210 9876 5432 1098 7654 3210
45 * ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
46 * 1 000
47 * ---- ---- ---- ---- ---- ---- ---- ---- ---- ----
48 * | |\|
49 * | Byte Enable --+ |
50 * | Transfer Length --+
51 * +-- IO space, not cached
52 *
53 * Byte Transfer
54 * Enable Length Transfer Byte Address
55 * adr<6:5> adr<4:3> Length Enable Adder
56 * ---------------------------------------------
57 * 00 00 Byte 1110 0x000
58 * 01 00 Byte 1101 0x020
59 * 10 00 Byte 1011 0x040
60 * 11 00 Byte 0111 0x060
61 *
62 * 00 01 Word 1100 0x008
63 * 01 01 Word 1001 0x028 <= Not supported in this code.
64 * 10 01 Word 0011 0x048
65 *
66 * 00 10 Tribyte 1000 0x010
67 * 01 10 Tribyte 0001 0x030
68 *
69 * 10 11 Longword 0000 0x058
70 *
71 * Note that byte enables are asserted low.
72 *
73 */
74
75#define MCPCIA_MID(m) ((unsigned long)(m) << 33)
76
77/* Dodge has PCI0 and PCI1 at MID 4 and 5 respectively.
78 Durango adds PCI2 and PCI3 at MID 6 and 7 respectively. */
79#define MCPCIA_HOSE2MID(h) ((h) + 4)
80
81#define MCPCIA_MEM_MASK 0x07ffffff /* SPARSE Mem region mask is 27 bits */
82
83/*
84 * Memory spaces:
85 */
86#define MCPCIA_SPARSE(m) (IDENT_ADDR + 0xf000000000UL + MCPCIA_MID(m))
87#define MCPCIA_DENSE(m) (IDENT_ADDR + 0xf100000000UL + MCPCIA_MID(m))
88#define MCPCIA_IO(m) (IDENT_ADDR + 0xf180000000UL + MCPCIA_MID(m))
89#define MCPCIA_CONF(m) (IDENT_ADDR + 0xf1c0000000UL + MCPCIA_MID(m))
90#define MCPCIA_CSR(m) (IDENT_ADDR + 0xf1e0000000UL + MCPCIA_MID(m))
91#define MCPCIA_IO_IACK(m) (IDENT_ADDR + 0xf1f0000000UL + MCPCIA_MID(m))
92#define MCPCIA_DENSE_IO(m) (IDENT_ADDR + 0xe1fc000000UL + MCPCIA_MID(m))
93#define MCPCIA_DENSE_CONF(m) (IDENT_ADDR + 0xe1fe000000UL + MCPCIA_MID(m))
94
95/*
96 * General Registers
97 */
98#define MCPCIA_REV(m) (MCPCIA_CSR(m) + 0x000)
99#define MCPCIA_WHOAMI(m) (MCPCIA_CSR(m) + 0x040)
100#define MCPCIA_PCI_LAT(m) (MCPCIA_CSR(m) + 0x080)
101#define MCPCIA_CAP_CTRL(m) (MCPCIA_CSR(m) + 0x100)
102#define MCPCIA_HAE_MEM(m) (MCPCIA_CSR(m) + 0x400)
103#define MCPCIA_HAE_IO(m) (MCPCIA_CSR(m) + 0x440)
104#define _MCPCIA_IACK_SC(m) (MCPCIA_CSR(m) + 0x480)
105#define MCPCIA_HAE_DENSE(m) (MCPCIA_CSR(m) + 0x4C0)
106
107/*
108 * Interrupt Control registers
109 */
110#define MCPCIA_INT_CTL(m) (MCPCIA_CSR(m) + 0x500)
111#define MCPCIA_INT_REQ(m) (MCPCIA_CSR(m) + 0x540)
112#define MCPCIA_INT_TARG(m) (MCPCIA_CSR(m) + 0x580)
113#define MCPCIA_INT_ADR(m) (MCPCIA_CSR(m) + 0x5C0)
114#define MCPCIA_INT_ADR_EXT(m) (MCPCIA_CSR(m) + 0x600)
115#define MCPCIA_INT_MASK0(m) (MCPCIA_CSR(m) + 0x640)
116#define MCPCIA_INT_MASK1(m) (MCPCIA_CSR(m) + 0x680)
117#define MCPCIA_INT_ACK0(m) (MCPCIA_CSR(m) + 0x10003f00)
118#define MCPCIA_INT_ACK1(m) (MCPCIA_CSR(m) + 0x10003f40)
119
120/*
121 * Performance Monitor registers
122 */
123#define MCPCIA_PERF_MON(m) (MCPCIA_CSR(m) + 0x300)
124#define MCPCIA_PERF_CONT(m) (MCPCIA_CSR(m) + 0x340)
125
126/*
127 * Diagnostic Registers
128 */
129#define MCPCIA_CAP_DIAG(m) (MCPCIA_CSR(m) + 0x700)
130#define MCPCIA_TOP_OF_MEM(m) (MCPCIA_CSR(m) + 0x7C0)
131
132/*
133 * Error registers
134 */
135#define MCPCIA_MC_ERR0(m) (MCPCIA_CSR(m) + 0x800)
136#define MCPCIA_MC_ERR1(m) (MCPCIA_CSR(m) + 0x840)
137#define MCPCIA_CAP_ERR(m) (MCPCIA_CSR(m) + 0x880)
138#define MCPCIA_PCI_ERR1(m) (MCPCIA_CSR(m) + 0x1040)
139#define MCPCIA_MDPA_STAT(m) (MCPCIA_CSR(m) + 0x4000)
140#define MCPCIA_MDPA_SYN(m) (MCPCIA_CSR(m) + 0x4040)
141#define MCPCIA_MDPA_DIAG(m) (MCPCIA_CSR(m) + 0x4080)
142#define MCPCIA_MDPB_STAT(m) (MCPCIA_CSR(m) + 0x8000)
143#define MCPCIA_MDPB_SYN(m) (MCPCIA_CSR(m) + 0x8040)
144#define MCPCIA_MDPB_DIAG(m) (MCPCIA_CSR(m) + 0x8080)
145
146/*
147 * PCI Address Translation Registers.
148 */
149#define MCPCIA_SG_TBIA(m) (MCPCIA_CSR(m) + 0x1300)
150#define MCPCIA_HBASE(m) (MCPCIA_CSR(m) + 0x1340)
151
152#define MCPCIA_W0_BASE(m) (MCPCIA_CSR(m) + 0x1400)
153#define MCPCIA_W0_MASK(m) (MCPCIA_CSR(m) + 0x1440)
154#define MCPCIA_T0_BASE(m) (MCPCIA_CSR(m) + 0x1480)
155
156#define MCPCIA_W1_BASE(m) (MCPCIA_CSR(m) + 0x1500)
157#define MCPCIA_W1_MASK(m) (MCPCIA_CSR(m) + 0x1540)
158#define MCPCIA_T1_BASE(m) (MCPCIA_CSR(m) + 0x1580)
159
160#define MCPCIA_W2_BASE(m) (MCPCIA_CSR(m) + 0x1600)
161#define MCPCIA_W2_MASK(m) (MCPCIA_CSR(m) + 0x1640)
162#define MCPCIA_T2_BASE(m) (MCPCIA_CSR(m) + 0x1680)
163
164#define MCPCIA_W3_BASE(m) (MCPCIA_CSR(m) + 0x1700)
165#define MCPCIA_W3_MASK(m) (MCPCIA_CSR(m) + 0x1740)
166#define MCPCIA_T3_BASE(m) (MCPCIA_CSR(m) + 0x1780)
167
168/* Hack! Only words for bus 0. */
169
170#ifndef MCPCIA_ONE_HAE_WINDOW
171#define MCPCIA_HAE_ADDRESS MCPCIA_HAE_MEM(4)
172#endif
173#define MCPCIA_IACK_SC _MCPCIA_IACK_SC(4)
174
175/*
176 * The canonical non-remaped I/O and MEM addresses have these values
177 * subtracted out. This is arranged so that folks manipulating ISA
178 * devices can use their familiar numbers and have them map to bus 0.
179 */
180
181#define MCPCIA_IO_BIAS MCPCIA_IO(4)
182#define MCPCIA_MEM_BIAS MCPCIA_DENSE(4)
183
184/* Offset between ram physical addresses and pci64 DAC bus addresses. */
185#define MCPCIA_DAC_OFFSET (1UL << 40)
186
187/*
188 * Data structure for handling MCPCIA machine checks:
189 */
190struct el_MCPCIA_uncorrected_frame_mcheck {
191 struct el_common header;
192 struct el_common_EV5_uncorrectable_mcheck procdata;
193};
194
195
196#ifdef __KERNEL__
197
198#ifndef __EXTERN_INLINE
199#define __EXTERN_INLINE extern inline
200#define __IO_EXTERN_INLINE
201#endif
202
203/*
204 * I/O functions:
205 *
206 * MCPCIA, the RAWHIDE family PCI/memory support chipset for the EV5 (21164)
207 * and EV56 (21164a) processors, can use either a sparse address mapping
208 * scheme, or the so-called byte-word PCI address space, to get at PCI memory
209 * and I/O.
210 *
211 * Unfortunately, we can't use BWIO with EV5, so for now, we always use SPARSE.
212 */
213
214/*
215 * Memory functions. 64-bit and 32-bit accesses are done through
216 * dense memory space, everything else through sparse space.
217 *
218 * For reading and writing 8 and 16 bit quantities we need to
219 * go through one of the three sparse address mapping regions
220 * and use the HAE_MEM CSR to provide some bits of the address.
221 * The following few routines use only sparse address region 1
222 * which gives 1Gbyte of accessible space which relates exactly
223 * to the amount of PCI memory mapping *into* system address space.
224 * See p 6-17 of the specification but it looks something like this:
225 *
226 * 21164 Address:
227 *
228 * 3 2 1
229 * 9876543210987654321098765432109876543210
230 * 1ZZZZ0.PCI.QW.Address............BBLL
231 *
232 * ZZ = SBZ
233 * BB = Byte offset
234 * LL = Transfer length
235 *
236 * PCI Address:
237 *
238 * 3 2 1
239 * 10987654321098765432109876543210
240 * HHH....PCI.QW.Address........ 00
241 *
242 * HHH = 31:29 HAE_MEM CSR
243 *
244 */
245
246#define vip volatile int __force *
247#define vuip volatile unsigned int __force *
248
249#ifdef MCPCIA_ONE_HAE_WINDOW
250#define MCPCIA_FROB_MMIO \
251 if (__mcpcia_is_mmio(hose)) { \
252 set_hae(hose & 0xffffffff); \
253 hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \
254 }
255#else
256#define MCPCIA_FROB_MMIO \
257 if (__mcpcia_is_mmio(hose)) { \
258 hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \
259 }
260#endif
261
262static inline int __mcpcia_is_mmio(unsigned long addr)
263{
264 return (addr & 0x80000000UL) == 0;
265}
266
267__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr)
268{
269 unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
270 unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
271 unsigned long result;
272
273 MCPCIA_FROB_MMIO;
274
275 result = *(vip) ((addr << 5) + hose + 0x00);
276 return __kernel_extbl(result, addr & 3);
277}
278
279__EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr)
280{
281 unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
282 unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
283 unsigned long w;
284
285 MCPCIA_FROB_MMIO;
286
287 w = __kernel_insbl(b, addr & 3);
288 *(vuip) ((addr << 5) + hose + 0x00) = w;
289}
290
291__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr)
292{
293 unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
294 unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
295 unsigned long result;
296
297 MCPCIA_FROB_MMIO;
298
299 result = *(vip) ((addr << 5) + hose + 0x08);
300 return __kernel_extwl(result, addr & 3);
301}
302
303__EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr)
304{
305 unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
306 unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
307 unsigned long w;
308
309 MCPCIA_FROB_MMIO;
310
311 w = __kernel_inswl(b, addr & 3);
312 *(vuip) ((addr << 5) + hose + 0x08) = w;
313}
314
315__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr)
316{
317 unsigned long addr = (unsigned long)xaddr;
318
319 if (!__mcpcia_is_mmio(addr))
320 addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18;
321
322 return *(vuip)addr;
323}
324
325__EXTERN_INLINE void mcpcia_iowrite32(u32 b, void __iomem *xaddr)
326{
327 unsigned long addr = (unsigned long)xaddr;
328
329 if (!__mcpcia_is_mmio(addr))
330 addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18;
331
332 *(vuip)addr = b;
333}
334
335
336__EXTERN_INLINE void __iomem *mcpcia_ioportmap(unsigned long addr)
337{
338 return (void __iomem *)(addr + MCPCIA_IO_BIAS);
339}
340
341__EXTERN_INLINE void __iomem *mcpcia_ioremap(unsigned long addr,
342 unsigned long size)
343{
344 return (void __iomem *)(addr + MCPCIA_MEM_BIAS);
345}
346
347__EXTERN_INLINE int mcpcia_is_ioaddr(unsigned long addr)
348{
349 return addr >= MCPCIA_SPARSE(0);
350}
351
352__EXTERN_INLINE int mcpcia_is_mmio(const volatile void __iomem *xaddr)
353{
354 unsigned long addr = (unsigned long) xaddr;
355 return __mcpcia_is_mmio(addr);
356}
357
358#undef MCPCIA_FROB_MMIO
359
360#undef vip
361#undef vuip
362
363#undef __IO_PREFIX
364#define __IO_PREFIX mcpcia
365#define mcpcia_trivial_rw_bw 2
366#define mcpcia_trivial_rw_lq 1
367#define mcpcia_trivial_io_bw 0
368#define mcpcia_trivial_io_lq 0
369#define mcpcia_trivial_iounmap 1
370#include <asm/io_trivial.h>
371
372#ifdef __IO_EXTERN_INLINE
373#undef __EXTERN_INLINE
374#undef __IO_EXTERN_INLINE
375#endif
376
377#endif /* __KERNEL__ */
378
379#endif /* __ALPHA_MCPCIA__H__ */
diff --git a/include/asm-alpha/core_polaris.h b/include/asm-alpha/core_polaris.h
new file mode 100644
index 000000000000..2f966b64659d
--- /dev/null
+++ b/include/asm-alpha/core_polaris.h
@@ -0,0 +1,110 @@
1#ifndef __ALPHA_POLARIS__H__
2#define __ALPHA_POLARIS__H__
3
4#include <linux/types.h>
5#include <asm/compiler.h>
6
7/*
8 * POLARIS is the internal name for a core logic chipset which provides
9 * memory controller and PCI access for the 21164PC chip based systems.
10 *
11 * This file is based on:
12 *
13 * Polaris System Controller
14 * Device Functional Specification
15 * 22-Jan-98
16 * Rev. 4.2
17 *
18 */
19
20/* Polaris memory regions */
21#define POLARIS_SPARSE_MEM_BASE (IDENT_ADDR + 0xf800000000UL)
22#define POLARIS_DENSE_MEM_BASE (IDENT_ADDR + 0xf900000000UL)
23#define POLARIS_SPARSE_IO_BASE (IDENT_ADDR + 0xf980000000UL)
24#define POLARIS_SPARSE_CONFIG_BASE (IDENT_ADDR + 0xf9c0000000UL)
25#define POLARIS_IACK_BASE (IDENT_ADDR + 0xf9f8000000UL)
26#define POLARIS_DENSE_IO_BASE (IDENT_ADDR + 0xf9fc000000UL)
27#define POLARIS_DENSE_CONFIG_BASE (IDENT_ADDR + 0xf9fe000000UL)
28
29#define POLARIS_IACK_SC POLARIS_IACK_BASE
30
31/* The Polaris command/status registers live in PCI Config space for
32 * bus 0/device 0. As such, they may be bytes, words, or doublewords.
33 */
34#define POLARIS_W_VENID (POLARIS_DENSE_CONFIG_BASE)
35#define POLARIS_W_DEVID (POLARIS_DENSE_CONFIG_BASE+2)
36#define POLARIS_W_CMD (POLARIS_DENSE_CONFIG_BASE+4)
37#define POLARIS_W_STATUS (POLARIS_DENSE_CONFIG_BASE+6)
38
39/*
40 * Data structure for handling POLARIS machine checks:
41 */
42struct el_POLARIS_sysdata_mcheck {
43 u_long psc_status;
44 u_long psc_pcictl0;
45 u_long psc_pcictl1;
46 u_long psc_pcictl2;
47};
48
49#ifdef __KERNEL__
50
51#ifndef __EXTERN_INLINE
52#define __EXTERN_INLINE extern inline
53#define __IO_EXTERN_INLINE
54#endif
55
56/*
57 * I/O functions:
58 *
59 * POLARIS, the PCI/memory support chipset for the PCA56 (21164PC)
60 * processors, can use either a sparse address mapping scheme, or the
61 * so-called byte-word PCI address space, to get at PCI memory and I/O.
62 *
63 * However, we will support only the BWX form.
64 */
65
66/*
67 * Memory functions. Polaris allows all accesses (byte/word
68 * as well as long/quad) to be done through dense space.
69 *
70 * We will only support DENSE access via BWX insns.
71 */
72
73__EXTERN_INLINE void __iomem *polaris_ioportmap(unsigned long addr)
74{
75 return (void __iomem *)(addr + POLARIS_DENSE_IO_BASE);
76}
77
78__EXTERN_INLINE void __iomem *polaris_ioremap(unsigned long addr,
79 unsigned long size)
80{
81 return (void __iomem *)(addr + POLARIS_DENSE_MEM_BASE);
82}
83
84__EXTERN_INLINE int polaris_is_ioaddr(unsigned long addr)
85{
86 return addr >= POLARIS_SPARSE_MEM_BASE;
87}
88
89__EXTERN_INLINE int polaris_is_mmio(const volatile void __iomem *addr)
90{
91 return (unsigned long)addr < POLARIS_SPARSE_IO_BASE;
92}
93
94#undef __IO_PREFIX
95#define __IO_PREFIX polaris
96#define polaris_trivial_rw_bw 1
97#define polaris_trivial_rw_lq 1
98#define polaris_trivial_io_bw 1
99#define polaris_trivial_io_lq 1
100#define polaris_trivial_iounmap 1
101#include <asm/io_trivial.h>
102
103#ifdef __IO_EXTERN_INLINE
104#undef __EXTERN_INLINE
105#undef __IO_EXTERN_INLINE
106#endif
107
108#endif /* __KERNEL__ */
109
110#endif /* __ALPHA_POLARIS__H__ */
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h
new file mode 100644
index 000000000000..5c1c40338c82
--- /dev/null
+++ b/include/asm-alpha/core_t2.h
@@ -0,0 +1,628 @@
1#ifndef __ALPHA_T2__H__
2#define __ALPHA_T2__H__
3
4#include <linux/config.h>
5#include <linux/types.h>
6#include <linux/spinlock.h>
7#include <asm/compiler.h>
8#include <asm/system.h>
9
10/*
11 * T2 is the internal name for the core logic chipset which provides
12 * memory controller and PCI access for the SABLE-based systems.
13 *
14 * This file is based on:
15 *
16 * SABLE I/O Specification
17 * Revision/Update Information: 1.3
18 *
19 * jestabro@amt.tay1.dec.com Initial Version.
20 *
21 */
22
23#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */
24
25/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
26/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
27#define _GAMMA_BIAS 0x8000000000UL
28
29#if defined(CONFIG_ALPHA_GENERIC)
30#define GAMMA_BIAS alpha_mv.sys.t2.gamma_bias
31#elif defined(CONFIG_ALPHA_GAMMA)
32#define GAMMA_BIAS _GAMMA_BIAS
33#else
34#define GAMMA_BIAS 0
35#endif
36
37/*
38 * Memory spaces:
39 */
40#define T2_CONF (IDENT_ADDR + GAMMA_BIAS + 0x390000000UL)
41#define T2_IO (IDENT_ADDR + GAMMA_BIAS + 0x3a0000000UL)
42#define T2_SPARSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x200000000UL)
43#define T2_DENSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x3c0000000UL)
44
45#define T2_IOCSR (IDENT_ADDR + GAMMA_BIAS + 0x38e000000UL)
46#define T2_CERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000020UL)
47#define T2_CERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000040UL)
48#define T2_CERR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000060UL)
49#define T2_PERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000080UL)
50#define T2_PERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000a0UL)
51#define T2_PSCR (IDENT_ADDR + GAMMA_BIAS + 0x38e0000c0UL)
52#define T2_HAE_1 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000e0UL)
53#define T2_HAE_2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000100UL)
54#define T2_HBASE (IDENT_ADDR + GAMMA_BIAS + 0x38e000120UL)
55#define T2_WBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000140UL)
56#define T2_WMASK1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000160UL)
57#define T2_TBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000180UL)
58#define T2_WBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001a0UL)
59#define T2_WMASK2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001c0UL)
60#define T2_TBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001e0UL)
61#define T2_TLBBR (IDENT_ADDR + GAMMA_BIAS + 0x38e000200UL)
62#define T2_IVR (IDENT_ADDR + GAMMA_BIAS + 0x38e000220UL)
63#define T2_HAE_3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL)
64#define T2_HAE_4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL)
65
66/* The CSRs below are T3/T4 only */
67#define T2_WBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000280UL)
68#define T2_WMASK3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002a0UL)
69#define T2_TBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002c0UL)
70
71#define T2_TDR0 (IDENT_ADDR + GAMMA_BIAS + 0x38e000300UL)
72#define T2_TDR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000320UL)
73#define T2_TDR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000340UL)
74#define T2_TDR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000360UL)
75#define T2_TDR4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000380UL)
76#define T2_TDR5 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003a0UL)
77#define T2_TDR6 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003c0UL)
78#define T2_TDR7 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003e0UL)
79
80#define T2_WBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000400UL)
81#define T2_WMASK4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000420UL)
82#define T2_TBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000440UL)
83
84#define T2_AIR (IDENT_ADDR + GAMMA_BIAS + 0x38e000460UL)
85#define T2_VAR (IDENT_ADDR + GAMMA_BIAS + 0x38e000480UL)
86#define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL)
87#define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL)
88
89#define T2_HAE_ADDRESS T2_HAE_1
90
91/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
92 3.8fff.ffff
93 *
94 * +--------------+ 3 8000 0000
95 * | CPU 0 CSRs |
96 * +--------------+ 3 8100 0000
97 * | CPU 1 CSRs |
98 * +--------------+ 3 8200 0000
99 * | CPU 2 CSRs |
100 * +--------------+ 3 8300 0000
101 * | CPU 3 CSRs |
102 * +--------------+ 3 8400 0000
103 * | CPU Reserved |
104 * +--------------+ 3 8700 0000
105 * | Mem Reserved |
106 * +--------------+ 3 8800 0000
107 * | Mem 0 CSRs |
108 * +--------------+ 3 8900 0000
109 * | Mem 1 CSRs |
110 * +--------------+ 3 8a00 0000
111 * | Mem 2 CSRs |
112 * +--------------+ 3 8b00 0000
113 * | Mem 3 CSRs |
114 * +--------------+ 3 8c00 0000
115 * | Mem Reserved |
116 * +--------------+ 3 8e00 0000
117 * | PCI Bridge |
118 * +--------------+ 3 8f00 0000
119 * | Expansion IO |
120 * +--------------+ 3 9000 0000
121 *
122 *
123 */
124#define T2_CPU0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x380000000L)
125#define T2_CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L)
126#define T2_CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L)
127#define T2_CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L)
128
129#define T2_CPUn_BASE(n) (T2_CPU0_BASE + (((n)&3) * 0x001000000L))
130
131#define T2_MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L)
132#define T2_MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L)
133#define T2_MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L)
134#define T2_MEM3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38b000000L)
135
136
137/*
138 * Sable CPU Module CSRS
139 *
140 * These are CSRs for hardware other than the CPU chip on the CPU module.
141 * The CPU module has Backup Cache control logic, Cbus control logic, and
142 * interrupt control logic on it. There is a duplicate tag store to speed
143 * up maintaining cache coherency.
144 */
145
146struct sable_cpu_csr {
147 unsigned long bcc; long fill_00[3]; /* Backup Cache Control */
148 unsigned long bcce; long fill_01[3]; /* Backup Cache Correctable Error */
149 unsigned long bccea; long fill_02[3]; /* B-Cache Corr Err Address Latch */
150 unsigned long bcue; long fill_03[3]; /* B-Cache Uncorrectable Error */
151 unsigned long bcuea; long fill_04[3]; /* B-Cache Uncorr Err Addr Latch */
152 unsigned long dter; long fill_05[3]; /* Duplicate Tag Error */
153 unsigned long cbctl; long fill_06[3]; /* CBus Control */
154 unsigned long cbe; long fill_07[3]; /* CBus Error */
155 unsigned long cbeal; long fill_08[3]; /* CBus Error Addr Latch low */
156 unsigned long cbeah; long fill_09[3]; /* CBus Error Addr Latch high */
157 unsigned long pmbx; long fill_10[3]; /* Processor Mailbox */
158 unsigned long ipir; long fill_11[3]; /* Inter-Processor Int Request */
159 unsigned long sic; long fill_12[3]; /* System Interrupt Clear */
160 unsigned long adlk; long fill_13[3]; /* Address Lock (LDxL/STxC) */
161 unsigned long madrl; long fill_14[3]; /* CBus Miss Address */
162 unsigned long rev; long fill_15[3]; /* CMIC Revision */
163};
164
165/*
166 * Data structure for handling T2 machine checks:
167 */
168struct el_t2_frame_header {
169 unsigned int elcf_fid; /* Frame ID (from above) */
170 unsigned int elcf_size; /* Size of frame in bytes */
171};
172
173struct el_t2_procdata_mcheck {
174 unsigned long elfmc_paltemp[32]; /* PAL TEMP REGS. */
175 /* EV4-specific fields */
176 unsigned long elfmc_exc_addr; /* Addr of excepting insn. */
177 unsigned long elfmc_exc_sum; /* Summary of arith traps. */
178 unsigned long elfmc_exc_mask; /* Exception mask (from exc_sum). */
179 unsigned long elfmc_iccsr; /* IBox hardware enables. */
180 unsigned long elfmc_pal_base; /* Base address for PALcode. */
181 unsigned long elfmc_hier; /* Hardware Interrupt Enable. */
182 unsigned long elfmc_hirr; /* Hardware Interrupt Request. */
183 unsigned long elfmc_mm_csr; /* D-stream fault info. */
184 unsigned long elfmc_dc_stat; /* D-cache status (ECC/Parity Err). */
185 unsigned long elfmc_dc_addr; /* EV3 Phys Addr for ECC/DPERR. */
186 unsigned long elfmc_abox_ctl; /* ABox Control Register. */
187 unsigned long elfmc_biu_stat; /* BIU Status. */
188 unsigned long elfmc_biu_addr; /* BUI Address. */
189 unsigned long elfmc_biu_ctl; /* BIU Control. */
190 unsigned long elfmc_fill_syndrome; /* For correcting ECC errors. */
191 unsigned long elfmc_fill_addr;/* Cache block which was being read. */
192 unsigned long elfmc_va; /* Effective VA of fault or miss. */
193 unsigned long elfmc_bc_tag; /* Backup Cache Tag Probe Results. */
194};
195
196/*
197 * Sable processor specific Machine Check Data segment.
198 */
199
200struct el_t2_logout_header {
201 unsigned int elfl_size; /* size in bytes of logout area. */
202 unsigned int elfl_sbz1:31; /* Should be zero. */
203 unsigned int elfl_retry:1; /* Retry flag. */
204 unsigned int elfl_procoffset; /* Processor-specific offset. */
205 unsigned int elfl_sysoffset; /* Offset of system-specific. */
206 unsigned int elfl_error_type; /* PAL error type code. */
207 unsigned int elfl_frame_rev; /* PAL Frame revision. */
208};
209struct el_t2_sysdata_mcheck {
210 unsigned long elcmc_bcc; /* CSR 0 */
211 unsigned long elcmc_bcce; /* CSR 1 */
212 unsigned long elcmc_bccea; /* CSR 2 */
213 unsigned long elcmc_bcue; /* CSR 3 */
214 unsigned long elcmc_bcuea; /* CSR 4 */
215 unsigned long elcmc_dter; /* CSR 5 */
216 unsigned long elcmc_cbctl; /* CSR 6 */
217 unsigned long elcmc_cbe; /* CSR 7 */
218 unsigned long elcmc_cbeal; /* CSR 8 */
219 unsigned long elcmc_cbeah; /* CSR 9 */
220 unsigned long elcmc_pmbx; /* CSR 10 */
221 unsigned long elcmc_ipir; /* CSR 11 */
222 unsigned long elcmc_sic; /* CSR 12 */
223 unsigned long elcmc_adlk; /* CSR 13 */
224 unsigned long elcmc_madrl; /* CSR 14 */
225 unsigned long elcmc_crrev4; /* CSR 15 */
226};
227
228/*
229 * Sable memory error frame - sable pfms section 3.42
230 */
231struct el_t2_data_memory {
232 struct el_t2_frame_header elcm_hdr; /* ID$MEM-FERR = 0x08 */
233 unsigned int elcm_module; /* Module id. */
234 unsigned int elcm_res04; /* Reserved. */
235 unsigned long elcm_merr; /* CSR0: Error Reg 1. */
236 unsigned long elcm_mcmd1; /* CSR1: Command Trap 1. */
237 unsigned long elcm_mcmd2; /* CSR2: Command Trap 2. */
238 unsigned long elcm_mconf; /* CSR3: Configuration. */
239 unsigned long elcm_medc1; /* CSR4: EDC Status 1. */
240 unsigned long elcm_medc2; /* CSR5: EDC Status 2. */
241 unsigned long elcm_medcc; /* CSR6: EDC Control. */
242 unsigned long elcm_msctl; /* CSR7: Stream Buffer Control. */
243 unsigned long elcm_mref; /* CSR8: Refresh Control. */
244 unsigned long elcm_filter; /* CSR9: CRD Filter Control. */
245};
246
247
248/*
249 * Sable other CPU error frame - sable pfms section 3.43
250 */
251struct el_t2_data_other_cpu {
252 short elco_cpuid; /* CPU ID */
253 short elco_res02[3];
254 unsigned long elco_bcc; /* CSR 0 */
255 unsigned long elco_bcce; /* CSR 1 */
256 unsigned long elco_bccea; /* CSR 2 */
257 unsigned long elco_bcue; /* CSR 3 */
258 unsigned long elco_bcuea; /* CSR 4 */
259 unsigned long elco_dter; /* CSR 5 */
260 unsigned long elco_cbctl; /* CSR 6 */
261 unsigned long elco_cbe; /* CSR 7 */
262 unsigned long elco_cbeal; /* CSR 8 */
263 unsigned long elco_cbeah; /* CSR 9 */
264 unsigned long elco_pmbx; /* CSR 10 */
265 unsigned long elco_ipir; /* CSR 11 */
266 unsigned long elco_sic; /* CSR 12 */
267 unsigned long elco_adlk; /* CSR 13 */
268 unsigned long elco_madrl; /* CSR 14 */
269 unsigned long elco_crrev4; /* CSR 15 */
270};
271
272/*
273 * Sable other CPU error frame - sable pfms section 3.44
274 */
275struct el_t2_data_t2{
276 struct el_t2_frame_header elct_hdr; /* ID$T2-FRAME */
277 unsigned long elct_iocsr; /* IO Control and Status Register */
278 unsigned long elct_cerr1; /* Cbus Error Register 1 */
279 unsigned long elct_cerr2; /* Cbus Error Register 2 */
280 unsigned long elct_cerr3; /* Cbus Error Register 3 */
281 unsigned long elct_perr1; /* PCI Error Register 1 */
282 unsigned long elct_perr2; /* PCI Error Register 2 */
283 unsigned long elct_hae0_1; /* High Address Extension Register 1 */
284 unsigned long elct_hae0_2; /* High Address Extension Register 2 */
285 unsigned long elct_hbase; /* High Base Register */
286 unsigned long elct_wbase1; /* Window Base Register 1 */
287 unsigned long elct_wmask1; /* Window Mask Register 1 */
288 unsigned long elct_tbase1; /* Translated Base Register 1 */
289 unsigned long elct_wbase2; /* Window Base Register 2 */
290 unsigned long elct_wmask2; /* Window Mask Register 2 */
291 unsigned long elct_tbase2; /* Translated Base Register 2 */
292 unsigned long elct_tdr0; /* TLB Data Register 0 */
293 unsigned long elct_tdr1; /* TLB Data Register 1 */
294 unsigned long elct_tdr2; /* TLB Data Register 2 */
295 unsigned long elct_tdr3; /* TLB Data Register 3 */
296 unsigned long elct_tdr4; /* TLB Data Register 4 */
297 unsigned long elct_tdr5; /* TLB Data Register 5 */
298 unsigned long elct_tdr6; /* TLB Data Register 6 */
299 unsigned long elct_tdr7; /* TLB Data Register 7 */
300};
301
302/*
303 * Sable error log data structure - sable pfms section 3.40
304 */
305struct el_t2_data_corrected {
306 unsigned long elcpb_biu_stat;
307 unsigned long elcpb_biu_addr;
308 unsigned long elcpb_biu_ctl;
309 unsigned long elcpb_fill_syndrome;
310 unsigned long elcpb_fill_addr;
311 unsigned long elcpb_bc_tag;
312};
313
314/*
315 * Sable error log data structure
316 * Note there are 4 memory slots on sable (see t2.h)
317 */
318struct el_t2_frame_mcheck {
319 struct el_t2_frame_header elfmc_header; /* ID$P-FRAME_MCHECK */
320 struct el_t2_logout_header elfmc_hdr;
321 struct el_t2_procdata_mcheck elfmc_procdata;
322 struct el_t2_sysdata_mcheck elfmc_sysdata;
323 struct el_t2_data_t2 elfmc_t2data;
324 struct el_t2_data_memory elfmc_memdata[4];
325 struct el_t2_frame_header elfmc_footer; /* empty */
326};
327
328
329/*
330 * Sable error log data structures on memory errors
331 */
332struct el_t2_frame_corrected {
333 struct el_t2_frame_header elfcc_header; /* ID$P-BC-COR */
334 struct el_t2_logout_header elfcc_hdr;
335 struct el_t2_data_corrected elfcc_procdata;
336/* struct el_t2_data_t2 elfcc_t2data; */
337/* struct el_t2_data_memory elfcc_memdata[4]; */
338 struct el_t2_frame_header elfcc_footer; /* empty */
339};
340
341
342#ifdef __KERNEL__
343
344#ifndef __EXTERN_INLINE
345#define __EXTERN_INLINE extern inline
346#define __IO_EXTERN_INLINE
347#endif
348
349/*
350 * I/O functions:
351 *
352 * T2 (the core logic PCI/memory support chipset for the SABLE
353 * series of processors uses a sparse address mapping scheme to
354 * get at PCI memory and I/O.
355 */
356
357#define vip volatile int *
358#define vuip volatile unsigned int *
359
360static inline u8 t2_inb(unsigned long addr)
361{
362 long result = *(vip) ((addr << 5) + T2_IO + 0x00);
363 return __kernel_extbl(result, addr & 3);
364}
365
366static inline void t2_outb(u8 b, unsigned long addr)
367{
368 unsigned long w;
369
370 w = __kernel_insbl(b, addr & 3);
371 *(vuip) ((addr << 5) + T2_IO + 0x00) = w;
372 mb();
373}
374
375static inline u16 t2_inw(unsigned long addr)
376{
377 long result = *(vip) ((addr << 5) + T2_IO + 0x08);
378 return __kernel_extwl(result, addr & 3);
379}
380
381static inline void t2_outw(u16 b, unsigned long addr)
382{
383 unsigned long w;
384
385 w = __kernel_inswl(b, addr & 3);
386 *(vuip) ((addr << 5) + T2_IO + 0x08) = w;
387 mb();
388}
389
390static inline u32 t2_inl(unsigned long addr)
391{
392 return *(vuip) ((addr << 5) + T2_IO + 0x18);
393}
394
395static inline void t2_outl(u32 b, unsigned long addr)
396{
397 *(vuip) ((addr << 5) + T2_IO + 0x18) = b;
398 mb();
399}
400
401
402/*
403 * Memory functions.
404 *
405 * For reading and writing 8 and 16 bit quantities we need to
406 * go through one of the three sparse address mapping regions
407 * and use the HAE_MEM CSR to provide some bits of the address.
408 * The following few routines use only sparse address region 1
409 * which gives 1Gbyte of accessible space which relates exactly
410 * to the amount of PCI memory mapping *into* system address space.
411 * See p 6-17 of the specification but it looks something like this:
412 *
413 * 21164 Address:
414 *
415 * 3 2 1
416 * 9876543210987654321098765432109876543210
417 * 1ZZZZ0.PCI.QW.Address............BBLL
418 *
419 * ZZ = SBZ
420 * BB = Byte offset
421 * LL = Transfer length
422 *
423 * PCI Address:
424 *
425 * 3 2 1
426 * 10987654321098765432109876543210
427 * HHH....PCI.QW.Address........ 00
428 *
429 * HHH = 31:29 HAE_MEM CSR
430 *
431 */
432
433#define t2_set_hae { \
434 msb = addr >> 27; \
435 addr &= T2_MEM_R1_MASK; \
436 set_hae(msb); \
437}
438
439static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED;
440
441__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
442{
443 unsigned long addr = (unsigned long) xaddr;
444 unsigned long result, msb;
445 unsigned long flags;
446 spin_lock_irqsave(&t2_hae_lock, flags);
447
448 t2_set_hae;
449
450 result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
451 spin_unlock_irqrestore(&t2_hae_lock, flags);
452 return __kernel_extbl(result, addr & 3);
453}
454
455__EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr)
456{
457 unsigned long addr = (unsigned long) xaddr;
458 unsigned long result, msb;
459 unsigned long flags;
460 spin_lock_irqsave(&t2_hae_lock, flags);
461
462 t2_set_hae;
463
464 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
465 spin_unlock_irqrestore(&t2_hae_lock, flags);
466 return __kernel_extwl(result, addr & 3);
467}
468
469/*
470 * On SABLE with T2, we must use SPARSE memory even for 32-bit access,
471 * because we cannot access all of DENSE without changing its HAE.
472 */
473__EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr)
474{
475 unsigned long addr = (unsigned long) xaddr;
476 unsigned long result, msb;
477 unsigned long flags;
478 spin_lock_irqsave(&t2_hae_lock, flags);
479
480 t2_set_hae;
481
482 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
483 spin_unlock_irqrestore(&t2_hae_lock, flags);
484 return result & 0xffffffffUL;
485}
486
487__EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr)
488{
489 unsigned long addr = (unsigned long) xaddr;
490 unsigned long r0, r1, work, msb;
491 unsigned long flags;
492 spin_lock_irqsave(&t2_hae_lock, flags);
493
494 t2_set_hae;
495
496 work = (addr << 5) + T2_SPARSE_MEM + 0x18;
497 r0 = *(vuip)(work);
498 r1 = *(vuip)(work + (4 << 5));
499 spin_unlock_irqrestore(&t2_hae_lock, flags);
500 return r1 << 32 | r0;
501}
502
503__EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr)
504{
505 unsigned long addr = (unsigned long) xaddr;
506 unsigned long msb, w;
507 unsigned long flags;
508 spin_lock_irqsave(&t2_hae_lock, flags);
509
510 t2_set_hae;
511
512 w = __kernel_insbl(b, addr & 3);
513 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
514 spin_unlock_irqrestore(&t2_hae_lock, flags);
515}
516
517__EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr)
518{
519 unsigned long addr = (unsigned long) xaddr;
520 unsigned long msb, w;
521 unsigned long flags;
522 spin_lock_irqsave(&t2_hae_lock, flags);
523
524 t2_set_hae;
525
526 w = __kernel_inswl(b, addr & 3);
527 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
528 spin_unlock_irqrestore(&t2_hae_lock, flags);
529}
530
531/*
532 * On SABLE with T2, we must use SPARSE memory even for 32-bit access,
533 * because we cannot access all of DENSE without changing its HAE.
534 */
535__EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr)
536{
537 unsigned long addr = (unsigned long) xaddr;
538 unsigned long msb;
539 unsigned long flags;
540 spin_lock_irqsave(&t2_hae_lock, flags);
541
542 t2_set_hae;
543
544 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
545 spin_unlock_irqrestore(&t2_hae_lock, flags);
546}
547
548__EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr)
549{
550 unsigned long addr = (unsigned long) xaddr;
551 unsigned long msb, work;
552 unsigned long flags;
553 spin_lock_irqsave(&t2_hae_lock, flags);
554
555 t2_set_hae;
556
557 work = (addr << 5) + T2_SPARSE_MEM + 0x18;
558 *(vuip)work = b;
559 *(vuip)(work + (4 << 5)) = b >> 32;
560 spin_unlock_irqrestore(&t2_hae_lock, flags);
561}
562
563__EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
564{
565 return (void __iomem *)(addr + T2_IO);
566}
567
568__EXTERN_INLINE void __iomem *t2_ioremap(unsigned long addr,
569 unsigned long size)
570{
571 return (void __iomem *)(addr + T2_DENSE_MEM);
572}
573
574__EXTERN_INLINE int t2_is_ioaddr(unsigned long addr)
575{
576 return (long)addr >= 0;
577}
578
579__EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr)
580{
581 return (unsigned long)addr >= T2_DENSE_MEM;
582}
583
584/* New-style ioread interface. The mmio routines are so ugly for T2 that
585 it doesn't make sense to merge the pio and mmio routines. */
586
587#define IOPORT(OS, NS) \
588__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \
589{ \
590 if (t2_is_mmio(xaddr)) \
591 return t2_read##OS(xaddr - T2_DENSE_MEM); \
592 else \
593 return t2_in##OS((unsigned long)xaddr - T2_IO); \
594} \
595__EXTERN_INLINE void t2_iowrite##NS(u##NS b, void __iomem *xaddr) \
596{ \
597 if (t2_is_mmio(xaddr)) \
598 t2_write##OS(b, xaddr - T2_DENSE_MEM); \
599 else \
600 t2_out##OS(b, (unsigned long)xaddr - T2_IO); \
601}
602
603IOPORT(b, 8)
604IOPORT(w, 16)
605IOPORT(l, 32)
606
607#undef IOPORT
608
609#undef vip
610#undef vuip
611
612#undef __IO_PREFIX
613#define __IO_PREFIX t2
614#define t2_trivial_rw_bw 0
615#define t2_trivial_rw_lq 0
616#define t2_trivial_io_bw 0
617#define t2_trivial_io_lq 0
618#define t2_trivial_iounmap 1
619#include <asm/io_trivial.h>
620
621#ifdef __IO_EXTERN_INLINE
622#undef __EXTERN_INLINE
623#undef __IO_EXTERN_INLINE
624#endif
625
626#endif /* __KERNEL__ */
627
628#endif /* __ALPHA_T2__H__ */
diff --git a/include/asm-alpha/core_titan.h b/include/asm-alpha/core_titan.h
new file mode 100644
index 000000000000..a64ccbff7d98
--- /dev/null
+++ b/include/asm-alpha/core_titan.h
@@ -0,0 +1,415 @@
1#ifndef __ALPHA_TITAN__H__
2#define __ALPHA_TITAN__H__
3
4#include <linux/types.h>
5#include <linux/pci.h>
6#include <asm/compiler.h>
7
8/*
9 * TITAN is the internal names for a core logic chipset which provides
10 * memory controller and PCI/AGP access for 21264 based systems.
11 *
12 * This file is based on:
13 *
14 * Titan Chipset Engineering Specification
15 * Revision 0.12
16 * 13 July 1999
17 *
18 */
19
20/* XXX: Do we need to conditionalize on this? */
21#ifdef USE_48_BIT_KSEG
22#define TI_BIAS 0x80000000000UL
23#else
24#define TI_BIAS 0x10000000000UL
25#endif
26
27/*
28 * CChip, DChip, and PChip registers
29 */
30
31typedef struct {
32 volatile unsigned long csr __attribute__((aligned(64)));
33} titan_64;
34
35typedef struct {
36 titan_64 csc;
37 titan_64 mtr;
38 titan_64 misc;
39 titan_64 mpd;
40 titan_64 aar0;
41 titan_64 aar1;
42 titan_64 aar2;
43 titan_64 aar3;
44 titan_64 dim0;
45 titan_64 dim1;
46 titan_64 dir0;
47 titan_64 dir1;
48 titan_64 drir;
49 titan_64 prben;
50 titan_64 iic0;
51 titan_64 iic1;
52 titan_64 mpr0;
53 titan_64 mpr1;
54 titan_64 mpr2;
55 titan_64 mpr3;
56 titan_64 rsvd[2];
57 titan_64 ttr;
58 titan_64 tdr;
59 titan_64 dim2;
60 titan_64 dim3;
61 titan_64 dir2;
62 titan_64 dir3;
63 titan_64 iic2;
64 titan_64 iic3;
65 titan_64 pwr;
66 titan_64 reserved[17];
67 titan_64 cmonctla;
68 titan_64 cmonctlb;
69 titan_64 cmoncnt01;
70 titan_64 cmoncnt23;
71 titan_64 cpen;
72} titan_cchip;
73
74typedef struct {
75 titan_64 dsc;
76 titan_64 str;
77 titan_64 drev;
78 titan_64 dsc2;
79} titan_dchip;
80
81typedef struct {
82 titan_64 wsba[4];
83 titan_64 wsm[4];
84 titan_64 tba[4];
85 titan_64 pctl;
86 titan_64 plat;
87 titan_64 reserved0[2];
88 union {
89 struct {
90 titan_64 serror;
91 titan_64 serren;
92 titan_64 serrset;
93 titan_64 reserved0;
94 titan_64 gperror;
95 titan_64 gperren;
96 titan_64 gperrset;
97 titan_64 reserved1;
98 titan_64 gtlbiv;
99 titan_64 gtlbia;
100 titan_64 reserved2[2];
101 titan_64 sctl;
102 titan_64 reserved3[3];
103 } g;
104 struct {
105 titan_64 agperror;
106 titan_64 agperren;
107 titan_64 agperrset;
108 titan_64 agplastwr;
109 titan_64 aperror;
110 titan_64 aperren;
111 titan_64 aperrset;
112 titan_64 reserved0;
113 titan_64 atlbiv;
114 titan_64 atlbia;
115 titan_64 reserved1[6];
116 } a;
117 } port_specific;
118 titan_64 sprst;
119 titan_64 reserved1[31];
120} titan_pachip_port;
121
122typedef struct {
123 titan_pachip_port g_port;
124 titan_pachip_port a_port;
125} titan_pachip;
126
127#define TITAN_cchip ((titan_cchip *)(IDENT_ADDR+TI_BIAS+0x1A0000000UL))
128#define TITAN_dchip ((titan_dchip *)(IDENT_ADDR+TI_BIAS+0x1B0000800UL))
129#define TITAN_pachip0 ((titan_pachip *)(IDENT_ADDR+TI_BIAS+0x180000000UL))
130#define TITAN_pachip1 ((titan_pachip *)(IDENT_ADDR+TI_BIAS+0x380000000UL))
131extern unsigned TITAN_agp;
132extern int TITAN_bootcpu;
133
134/*
135 * TITAN PA-chip Window Space Base Address register.
136 * (WSBA[0-2])
137 */
138#define wsba_m_ena 0x1
139#define wsba_m_sg 0x2
140#define wsba_m_addr 0xFFF00000
141#define wmask_k_sz1gb 0x3FF00000
142union TPAchipWSBA {
143 struct {
144 unsigned wsba_v_ena : 1;
145 unsigned wsba_v_sg : 1;
146 unsigned wsba_v_rsvd1 : 18;
147 unsigned wsba_v_addr : 12;
148 unsigned wsba_v_rsvd2 : 32;
149 } wsba_r_bits;
150 int wsba_q_whole [2];
151};
152
153/*
154 * TITAN PA-chip Control Register
155 * This definition covers both the G-Port GPCTL and the A-PORT APCTL.
156 * Bits <51:0> are the same in both cases. APCTL<63:52> are only
157 * applicable to AGP.
158 */
159#define pctl_m_fbtb 0x00000001
160#define pctl_m_thdis 0x00000002
161#define pctl_m_chaindis 0x00000004
162#define pctl_m_tgtlat 0x00000018
163#define pctl_m_hole 0x00000020
164#define pctl_m_mwin 0x00000040
165#define pctl_m_arbena 0x00000080
166#define pctl_m_prigrp 0x0000FF00
167#define pctl_m_ppri 0x00010000
168#define pctl_m_pcispd66 0x00020000
169#define pctl_m_cngstlt 0x003C0000
170#define pctl_m_ptpdesten 0x3FC00000
171#define pctl_m_dpcen 0x40000000
172#define pctl_m_apcen 0x0000000080000000UL
173#define pctl_m_dcrtv 0x0000000300000000UL
174#define pctl_m_en_stepping 0x0000000400000000UL
175#define apctl_m_rsvd1 0x000FFFF800000000UL
176#define apctl_m_agp_rate 0x0030000000000000UL
177#define apctl_m_agp_sba_en 0x0040000000000000UL
178#define apctl_m_agp_en 0x0080000000000000UL
179#define apctl_m_rsvd2 0x0100000000000000UL
180#define apctl_m_agp_present 0x0200000000000000UL
181#define apctl_agp_hp_rd 0x1C00000000000000UL
182#define apctl_agp_lp_rd 0xE000000000000000UL
183#define gpctl_m_rsvd 0xFFFFFFF800000000UL
184union TPAchipPCTL {
185 struct {
186 unsigned pctl_v_fbtb : 1; /* A/G [0] */
187 unsigned pctl_v_thdis : 1; /* A/G [1] */
188 unsigned pctl_v_chaindis : 1; /* A/G [2] */
189 unsigned pctl_v_tgtlat : 2; /* A/G [4:3] */
190 unsigned pctl_v_hole : 1; /* A/G [5] */
191 unsigned pctl_v_mwin : 1; /* A/G [6] */
192 unsigned pctl_v_arbena : 1; /* A/G [7] */
193 unsigned pctl_v_prigrp : 8; /* A/G [15:8] */
194 unsigned pctl_v_ppri : 1; /* A/G [16] */
195 unsigned pctl_v_pcispd66 : 1; /* A/G [17] */
196 unsigned pctl_v_cngstlt : 4; /* A/G [21:18] */
197 unsigned pctl_v_ptpdesten : 8; /* A/G [29:22] */
198 unsigned pctl_v_dpcen : 1; /* A/G [30] */
199 unsigned pctl_v_apcen : 1; /* A/G [31] */
200 unsigned pctl_v_dcrtv : 2; /* A/G [33:32] */
201 unsigned pctl_v_en_stepping :1; /* A/G [34] */
202 unsigned apctl_v_rsvd1 : 17; /* A [51:35] */
203 unsigned apctl_v_agp_rate : 2; /* A [53:52] */
204 unsigned apctl_v_agp_sba_en : 1; /* A [54] */
205 unsigned apctl_v_agp_en : 1; /* A [55] */
206 unsigned apctl_v_rsvd2 : 1; /* A [56] */
207 unsigned apctl_v_agp_present : 1; /* A [57] */
208 unsigned apctl_v_agp_hp_rd : 3; /* A [60:58] */
209 unsigned apctl_v_agp_lp_rd : 3; /* A [63:61] */
210 } pctl_r_bits;
211 unsigned int pctl_l_whole [2];
212 unsigned long pctl_q_whole;
213};
214
215/*
216 * SERROR / SERREN / SERRSET
217 */
218union TPAchipSERR {
219 struct {
220 unsigned serr_v_lost_uecc : 1; /* [0] */
221 unsigned serr_v_uecc : 1; /* [1] */
222 unsigned serr_v_cre : 1; /* [2] */
223 unsigned serr_v_nxio : 1; /* [3] */
224 unsigned serr_v_lost_cre : 1; /* [4] */
225 unsigned serr_v_rsvd0 : 10; /* [14:5] */
226 unsigned serr_v_addr : 32; /* [46:15] */
227 unsigned serr_v_rsvd1 : 5; /* [51:47] */
228 unsigned serr_v_source : 2; /* [53:52] */
229 unsigned serr_v_cmd : 2; /* [55:54] */
230 unsigned serr_v_syn : 8; /* [63:56] */
231 } serr_r_bits;
232 unsigned int serr_l_whole[2];
233 unsigned long serr_q_whole;
234};
235
236/*
237 * GPERROR / APERROR / GPERREN / APERREN / GPERRSET / APERRSET
238 */
239union TPAchipPERR {
240 struct {
241 unsigned long perr_v_lost : 1; /* [0] */
242 unsigned long perr_v_serr : 1; /* [1] */
243 unsigned long perr_v_perr : 1; /* [2] */
244 unsigned long perr_v_dcrto : 1; /* [3] */
245 unsigned long perr_v_sge : 1; /* [4] */
246 unsigned long perr_v_ape : 1; /* [5] */
247 unsigned long perr_v_ta : 1; /* [6] */
248 unsigned long perr_v_dpe : 1; /* [7] */
249 unsigned long perr_v_nds : 1; /* [8] */
250 unsigned long perr_v_iptpr : 1; /* [9] */
251 unsigned long perr_v_iptpw : 1; /* [10] */
252 unsigned long perr_v_rsvd0 : 3; /* [13:11] */
253 unsigned long perr_v_addr : 33; /* [46:14] */
254 unsigned long perr_v_dac : 1; /* [47] */
255 unsigned long perr_v_mwin : 1; /* [48] */
256 unsigned long perr_v_rsvd1 : 3; /* [51:49] */
257 unsigned long perr_v_cmd : 4; /* [55:52] */
258 unsigned long perr_v_rsvd2 : 8; /* [63:56] */
259 } perr_r_bits;
260 unsigned int perr_l_whole[2];
261 unsigned long perr_q_whole;
262};
263
264/*
265 * AGPERROR / AGPERREN / AGPERRSET
266 */
267union TPAchipAGPERR {
268 struct {
269 unsigned agperr_v_lost : 1; /* [0] */
270 unsigned agperr_v_lpqfull : 1; /* [1] */
271 unsigned apgerr_v_hpqfull : 1; /* [2] */
272 unsigned agperr_v_rescmd : 1; /* [3] */
273 unsigned agperr_v_ipte : 1; /* [4] */
274 unsigned agperr_v_ptp : 1; /* [5] */
275 unsigned agperr_v_nowindow : 1; /* [6] */
276 unsigned agperr_v_rsvd0 : 8; /* [14:7] */
277 unsigned agperr_v_addr : 32; /* [46:15] */
278 unsigned agperr_v_rsvd1 : 1; /* [47] */
279 unsigned agperr_v_dac : 1; /* [48] */
280 unsigned agperr_v_mwin : 1; /* [49] */
281 unsigned agperr_v_cmd : 3; /* [52:50] */
282 unsigned agperr_v_length : 6; /* [58:53] */
283 unsigned agperr_v_fence : 1; /* [59] */
284 unsigned agperr_v_rsvd2 : 4; /* [63:60] */
285 } agperr_r_bits;
286 unsigned int agperr_l_whole[2];
287 unsigned long agperr_q_whole;
288};
289/*
290 * Memory spaces:
291 * Hose numbers are assigned as follows:
292 * 0 - pachip 0 / G Port
293 * 1 - pachip 1 / G Port
294 * 2 - pachip 0 / A Port
295 * 3 - pachip 1 / A Port
296 */
297#define TITAN_HOSE_SHIFT (33)
298#define TITAN_HOSE(h) (((unsigned long)(h)) << TITAN_HOSE_SHIFT)
299#define TITAN_BASE (IDENT_ADDR + TI_BIAS)
300#define TITAN_MEM(h) (TITAN_BASE+TITAN_HOSE(h)+0x000000000UL)
301#define _TITAN_IACK_SC(h) (TITAN_BASE+TITAN_HOSE(h)+0x1F8000000UL)
302#define TITAN_IO(h) (TITAN_BASE+TITAN_HOSE(h)+0x1FC000000UL)
303#define TITAN_CONF(h) (TITAN_BASE+TITAN_HOSE(h)+0x1FE000000UL)
304
305#define TITAN_HOSE_MASK TITAN_HOSE(3)
306#define TITAN_IACK_SC _TITAN_IACK_SC(0) /* hack! */
307
308/*
309 * The canonical non-remaped I/O and MEM addresses have these values
310 * subtracted out. This is arranged so that folks manipulating ISA
311 * devices can use their familiar numbers and have them map to bus 0.
312 */
313
314#define TITAN_IO_BIAS TITAN_IO(0)
315#define TITAN_MEM_BIAS TITAN_MEM(0)
316
317/* The IO address space is larger than 0xffff */
318#define TITAN_IO_SPACE (TITAN_CONF(0) - TITAN_IO(0))
319
320/* TIG Space */
321#define TITAN_TIG_SPACE (TITAN_BASE + 0x100000000UL)
322
323/* Offset between ram physical addresses and pci64 DAC bus addresses. */
324/* ??? Just a guess. Ought to confirm it hasn't been moved. */
325#define TITAN_DAC_OFFSET (1UL << 40)
326
327/*
328 * Data structure for handling TITAN machine checks:
329 */
330#define SCB_Q_SYSERR 0x620
331#define SCB_Q_PROCERR 0x630
332#define SCB_Q_SYSMCHK 0x660
333#define SCB_Q_PROCMCHK 0x670
334#define SCB_Q_SYSEVENT 0x680 /* environmental / system management */
335struct el_TITAN_sysdata_mcheck {
336 u64 summary; /* 0x00 */
337 u64 c_dirx; /* 0x08 */
338 u64 c_misc; /* 0x10 */
339 u64 p0_serror; /* 0x18 */
340 u64 p0_gperror; /* 0x20 */
341 u64 p0_aperror; /* 0x28 */
342 u64 p0_agperror;/* 0x30 */
343 u64 p1_serror; /* 0x38 */
344 u64 p1_gperror; /* 0x40 */
345 u64 p1_aperror; /* 0x48 */
346 u64 p1_agperror;/* 0x50 */
347};
348
349/*
350 * System area for a privateer 680 environmental/system management mcheck
351 */
352struct el_PRIVATEER_envdata_mcheck {
353 u64 summary; /* 0x00 */
354 u64 c_dirx; /* 0x08 */
355 u64 smir; /* 0x10 */
356 u64 cpuir; /* 0x18 */
357 u64 psir; /* 0x20 */
358 u64 fault; /* 0x28 */
359 u64 sys_doors; /* 0x30 */
360 u64 temp_warn; /* 0x38 */
361 u64 fan_ctrl; /* 0x40 */
362 u64 code; /* 0x48 */
363 u64 reserved; /* 0x50 */
364};
365
366#ifdef __KERNEL__
367
368#ifndef __EXTERN_INLINE
369#define __EXTERN_INLINE extern inline
370#define __IO_EXTERN_INLINE
371#endif
372
373/*
374 * I/O functions:
375 *
376 * TITAN, a 21??? PCI/memory support chipset for the EV6 (21264)
377 * can only use linear accesses to get at PCI/AGP memory and I/O spaces.
378 */
379
380/*
381 * Memory functions. all accesses are done through linear space.
382 */
383
384__EXTERN_INLINE void __iomem *titan_ioportmap(unsigned long addr)
385{
386 return (void __iomem *)(addr + TITAN_IO_BIAS);
387}
388
389extern void __iomem *titan_ioremap(unsigned long addr, unsigned long size);
390extern void titan_iounmap(volatile void __iomem *addr);
391
392__EXTERN_INLINE int titan_is_ioaddr(unsigned long addr)
393{
394 return addr >= TITAN_BASE;
395}
396
397extern int titan_is_mmio(const volatile void __iomem *addr);
398
399#undef __IO_PREFIX
400#define __IO_PREFIX titan
401#define titan_trivial_rw_bw 1
402#define titan_trivial_rw_lq 1
403#define titan_trivial_io_bw 1
404#define titan_trivial_io_lq 1
405#define titan_trivial_iounmap 0
406#include <asm/io_trivial.h>
407
408#ifdef __IO_EXTERN_INLINE
409#undef __EXTERN_INLINE
410#undef __IO_EXTERN_INLINE
411#endif
412
413#endif /* __KERNEL__ */
414
415#endif /* __ALPHA_TITAN__H__ */
diff --git a/include/asm-alpha/core_tsunami.h b/include/asm-alpha/core_tsunami.h
new file mode 100644
index 000000000000..44e635d2c571
--- /dev/null
+++ b/include/asm-alpha/core_tsunami.h
@@ -0,0 +1,344 @@
1#ifndef __ALPHA_TSUNAMI__H__
2#define __ALPHA_TSUNAMI__H__
3
4#include <linux/types.h>
5#include <asm/compiler.h>
6
7/*
8 * TSUNAMI/TYPHOON are the internal names for the core logic chipset which
9 * provides memory controller and PCI access for the 21264 based systems.
10 *
11 * This file is based on:
12 *
13 * Tsunami System Programmers Manual
14 * Preliminary, Chapters 2-5
15 *
16 */
17
18/* XXX: Do we need to conditionalize on this? */
19#ifdef USE_48_BIT_KSEG
20#define TS_BIAS 0x80000000000UL
21#else
22#define TS_BIAS 0x10000000000UL
23#endif
24
25/*
26 * CChip, DChip, and PChip registers
27 */
28
29typedef struct {
30 volatile unsigned long csr __attribute__((aligned(64)));
31} tsunami_64;
32
33typedef struct {
34 tsunami_64 csc;
35 tsunami_64 mtr;
36 tsunami_64 misc;
37 tsunami_64 mpd;
38 tsunami_64 aar0;
39 tsunami_64 aar1;
40 tsunami_64 aar2;
41 tsunami_64 aar3;
42 tsunami_64 dim0;
43 tsunami_64 dim1;
44 tsunami_64 dir0;
45 tsunami_64 dir1;
46 tsunami_64 drir;
47 tsunami_64 prben;
48 tsunami_64 iic; /* a.k.a. iic0 */
49 tsunami_64 wdr; /* a.k.a. iic1 */
50 tsunami_64 mpr0;
51 tsunami_64 mpr1;
52 tsunami_64 mpr2;
53 tsunami_64 mpr3;
54 tsunami_64 mctl;
55 tsunami_64 __pad1;
56 tsunami_64 ttr;
57 tsunami_64 tdr;
58 tsunami_64 dim2;
59 tsunami_64 dim3;
60 tsunami_64 dir2;
61 tsunami_64 dir3;
62 tsunami_64 iic2;
63 tsunami_64 iic3;
64} tsunami_cchip;
65
66typedef struct {
67 tsunami_64 dsc;
68 tsunami_64 str;
69 tsunami_64 drev;
70} tsunami_dchip;
71
72typedef struct {
73 tsunami_64 wsba[4];
74 tsunami_64 wsm[4];
75 tsunami_64 tba[4];
76 tsunami_64 pctl;
77 tsunami_64 plat;
78 tsunami_64 reserved;
79 tsunami_64 perror;
80 tsunami_64 perrmask;
81 tsunami_64 perrset;
82 tsunami_64 tlbiv;
83 tsunami_64 tlbia;
84 tsunami_64 pmonctl;
85 tsunami_64 pmoncnt;
86} tsunami_pchip;
87
88#define TSUNAMI_cchip ((tsunami_cchip *)(IDENT_ADDR+TS_BIAS+0x1A0000000UL))
89#define TSUNAMI_dchip ((tsunami_dchip *)(IDENT_ADDR+TS_BIAS+0x1B0000800UL))
90#define TSUNAMI_pchip0 ((tsunami_pchip *)(IDENT_ADDR+TS_BIAS+0x180000000UL))
91#define TSUNAMI_pchip1 ((tsunami_pchip *)(IDENT_ADDR+TS_BIAS+0x380000000UL))
92extern int TSUNAMI_bootcpu;
93
94/*
95 * TSUNAMI Pchip Error register.
96 */
97
98#define perror_m_lost 0x1
99#define perror_m_serr 0x2
100#define perror_m_perr 0x4
101#define perror_m_dcrto 0x8
102#define perror_m_sge 0x10
103#define perror_m_ape 0x20
104#define perror_m_ta 0x40
105#define perror_m_rdpe 0x80
106#define perror_m_nds 0x100
107#define perror_m_rto 0x200
108#define perror_m_uecc 0x400
109#define perror_m_cre 0x800
110#define perror_m_addrl 0xFFFFFFFF0000UL
111#define perror_m_addrh 0x7000000000000UL
112#define perror_m_cmd 0xF0000000000000UL
113#define perror_m_syn 0xFF00000000000000UL
114union TPchipPERROR {
115 struct {
116 unsigned int perror_v_lost : 1;
117 unsigned perror_v_serr : 1;
118 unsigned perror_v_perr : 1;
119 unsigned perror_v_dcrto : 1;
120 unsigned perror_v_sge : 1;
121 unsigned perror_v_ape : 1;
122 unsigned perror_v_ta : 1;
123 unsigned perror_v_rdpe : 1;
124 unsigned perror_v_nds : 1;
125 unsigned perror_v_rto : 1;
126 unsigned perror_v_uecc : 1;
127 unsigned perror_v_cre : 1;
128 unsigned perror_v_rsvd1 : 4;
129 unsigned perror_v_addrl : 32;
130 unsigned perror_v_addrh : 3;
131 unsigned perror_v_rsvd2 : 1;
132 unsigned perror_v_cmd : 4;
133 unsigned perror_v_syn : 8;
134 } perror_r_bits;
135 int perror_q_whole [2];
136};
137
138/*
139 * TSUNAMI Pchip Window Space Base Address register.
140 */
141#define wsba_m_ena 0x1
142#define wsba_m_sg 0x2
143#define wsba_m_ptp 0x4
144#define wsba_m_addr 0xFFF00000
145#define wmask_k_sz1gb 0x3FF00000
146union TPchipWSBA {
147 struct {
148 unsigned wsba_v_ena : 1;
149 unsigned wsba_v_sg : 1;
150 unsigned wsba_v_ptp : 1;
151 unsigned wsba_v_rsvd1 : 17;
152 unsigned wsba_v_addr : 12;
153 unsigned wsba_v_rsvd2 : 32;
154 } wsba_r_bits;
155 int wsba_q_whole [2];
156};
157
158/*
159 * TSUNAMI Pchip Control Register
160 */
161#define pctl_m_fdsc 0x1
162#define pctl_m_fbtb 0x2
163#define pctl_m_thdis 0x4
164#define pctl_m_chaindis 0x8
165#define pctl_m_tgtlat 0x10
166#define pctl_m_hole 0x20
167#define pctl_m_mwin 0x40
168#define pctl_m_arbena 0x80
169#define pctl_m_prigrp 0x7F00
170#define pctl_m_ppri 0x8000
171#define pctl_m_rsvd1 0x30000
172#define pctl_m_eccen 0x40000
173#define pctl_m_padm 0x80000
174#define pctl_m_cdqmax 0xF00000
175#define pctl_m_rev 0xFF000000
176#define pctl_m_crqmax 0xF00000000UL
177#define pctl_m_ptpmax 0xF000000000UL
178#define pctl_m_pclkx 0x30000000000UL
179#define pctl_m_fdsdis 0x40000000000UL
180#define pctl_m_fdwdis 0x80000000000UL
181#define pctl_m_ptevrfy 0x100000000000UL
182#define pctl_m_rpp 0x200000000000UL
183#define pctl_m_pid 0xC00000000000UL
184#define pctl_m_rsvd2 0xFFFF000000000000UL
185
186union TPchipPCTL {
187 struct {
188 unsigned pctl_v_fdsc : 1;
189 unsigned pctl_v_fbtb : 1;
190 unsigned pctl_v_thdis : 1;
191 unsigned pctl_v_chaindis : 1;
192 unsigned pctl_v_tgtlat : 1;
193 unsigned pctl_v_hole : 1;
194 unsigned pctl_v_mwin : 1;
195 unsigned pctl_v_arbena : 1;
196 unsigned pctl_v_prigrp : 7;
197 unsigned pctl_v_ppri : 1;
198 unsigned pctl_v_rsvd1 : 2;
199 unsigned pctl_v_eccen : 1;
200 unsigned pctl_v_padm : 1;
201 unsigned pctl_v_cdqmax : 4;
202 unsigned pctl_v_rev : 8;
203 unsigned pctl_v_crqmax : 4;
204 unsigned pctl_v_ptpmax : 4;
205 unsigned pctl_v_pclkx : 2;
206 unsigned pctl_v_fdsdis : 1;
207 unsigned pctl_v_fdwdis : 1;
208 unsigned pctl_v_ptevrfy : 1;
209 unsigned pctl_v_rpp : 1;
210 unsigned pctl_v_pid : 2;
211 unsigned pctl_v_rsvd2 : 16;
212 } pctl_r_bits;
213 int pctl_q_whole [2];
214};
215
216/*
217 * TSUNAMI Pchip Error Mask Register.
218 */
219#define perrmask_m_lost 0x1
220#define perrmask_m_serr 0x2
221#define perrmask_m_perr 0x4
222#define perrmask_m_dcrto 0x8
223#define perrmask_m_sge 0x10
224#define perrmask_m_ape 0x20
225#define perrmask_m_ta 0x40
226#define perrmask_m_rdpe 0x80
227#define perrmask_m_nds 0x100
228#define perrmask_m_rto 0x200
229#define perrmask_m_uecc 0x400
230#define perrmask_m_cre 0x800
231#define perrmask_m_rsvd 0xFFFFFFFFFFFFF000UL
232union TPchipPERRMASK {
233 struct {
234 unsigned int perrmask_v_lost : 1;
235 unsigned perrmask_v_serr : 1;
236 unsigned perrmask_v_perr : 1;
237 unsigned perrmask_v_dcrto : 1;
238 unsigned perrmask_v_sge : 1;
239 unsigned perrmask_v_ape : 1;
240 unsigned perrmask_v_ta : 1;
241 unsigned perrmask_v_rdpe : 1;
242 unsigned perrmask_v_nds : 1;
243 unsigned perrmask_v_rto : 1;
244 unsigned perrmask_v_uecc : 1;
245 unsigned perrmask_v_cre : 1;
246 unsigned perrmask_v_rsvd1 : 20;
247 unsigned perrmask_v_rsvd2 : 32;
248 } perrmask_r_bits;
249 int perrmask_q_whole [2];
250};
251
252/*
253 * Memory spaces:
254 */
255#define TSUNAMI_HOSE(h) (((unsigned long)(h)) << 33)
256#define TSUNAMI_BASE (IDENT_ADDR + TS_BIAS)
257
258#define TSUNAMI_MEM(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x000000000UL)
259#define _TSUNAMI_IACK_SC(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1F8000000UL)
260#define TSUNAMI_IO(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FC000000UL)
261#define TSUNAMI_CONF(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FE000000UL)
262
263#define TSUNAMI_IACK_SC _TSUNAMI_IACK_SC(0) /* hack! */
264
265
266/*
267 * The canonical non-remaped I/O and MEM addresses have these values
268 * subtracted out. This is arranged so that folks manipulating ISA
269 * devices can use their familiar numbers and have them map to bus 0.
270 */
271
272#define TSUNAMI_IO_BIAS TSUNAMI_IO(0)
273#define TSUNAMI_MEM_BIAS TSUNAMI_MEM(0)
274
275/* The IO address space is larger than 0xffff */
276#define TSUNAMI_IO_SPACE (TSUNAMI_CONF(0) - TSUNAMI_IO(0))
277
278/* Offset between ram physical addresses and pci64 DAC bus addresses. */
279#define TSUNAMI_DAC_OFFSET (1UL << 40)
280
281/*
282 * Data structure for handling TSUNAMI machine checks:
283 */
284struct el_TSUNAMI_sysdata_mcheck {
285};
286
287
288#ifdef __KERNEL__
289
290#ifndef __EXTERN_INLINE
291#define __EXTERN_INLINE extern inline
292#define __IO_EXTERN_INLINE
293#endif
294
295/*
296 * I/O functions:
297 *
298 * TSUNAMI, the 21??? PCI/memory support chipset for the EV6 (21264)
299 * can only use linear accesses to get at PCI memory and I/O spaces.
300 */
301
302/*
303 * Memory functions. all accesses are done through linear space.
304 */
305
306__EXTERN_INLINE void __iomem *tsunami_ioportmap(unsigned long addr)
307{
308 return (void __iomem *)(addr + TSUNAMI_IO_BIAS);
309}
310
311__EXTERN_INLINE void __iomem *tsunami_ioremap(unsigned long addr,
312 unsigned long size)
313{
314 return (void __iomem *)(addr + TSUNAMI_MEM_BIAS);
315}
316
317__EXTERN_INLINE int tsunami_is_ioaddr(unsigned long addr)
318{
319 return addr >= TSUNAMI_BASE;
320}
321
322__EXTERN_INLINE int tsunami_is_mmio(const volatile void __iomem *xaddr)
323{
324 unsigned long addr = (unsigned long) xaddr;
325 return (addr & 0x100000000UL) == 0;
326}
327
328#undef __IO_PREFIX
329#define __IO_PREFIX tsunami
330#define tsunami_trivial_rw_bw 1
331#define tsunami_trivial_rw_lq 1
332#define tsunami_trivial_io_bw 1
333#define tsunami_trivial_io_lq 1
334#define tsunami_trivial_iounmap 1
335#include <asm/io_trivial.h>
336
337#ifdef __IO_EXTERN_INLINE
338#undef __EXTERN_INLINE
339#undef __IO_EXTERN_INLINE
340#endif
341
342#endif /* __KERNEL__ */
343
344#endif /* __ALPHA_TSUNAMI__H__ */
diff --git a/include/asm-alpha/core_wildfire.h b/include/asm-alpha/core_wildfire.h
new file mode 100644
index 000000000000..12af803d445a
--- /dev/null
+++ b/include/asm-alpha/core_wildfire.h
@@ -0,0 +1,318 @@
1#ifndef __ALPHA_WILDFIRE__H__
2#define __ALPHA_WILDFIRE__H__
3
4#include <linux/types.h>
5#include <asm/compiler.h>
6
7#define WILDFIRE_MAX_QBB 8 /* more than 8 requires other mods */
8#define WILDFIRE_PCA_PER_QBB 4
9#define WILDFIRE_IRQ_PER_PCA 64
10
11#define WILDFIRE_NR_IRQS \
12 (WILDFIRE_MAX_QBB * WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
13
14extern unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
15extern unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
16#define QBB_MAP_EMPTY 0xff
17
18extern unsigned long wildfire_hard_qbb_mask;
19extern unsigned long wildfire_soft_qbb_mask;
20extern unsigned long wildfire_gp_mask;
21extern unsigned long wildfire_hs_mask;
22extern unsigned long wildfire_iop_mask;
23extern unsigned long wildfire_ior_mask;
24extern unsigned long wildfire_pca_mask;
25extern unsigned long wildfire_cpu_mask;
26extern unsigned long wildfire_mem_mask;
27
28#define WILDFIRE_QBB_EXISTS(qbbno) (wildfire_soft_qbb_mask & (1 << (qbbno)))
29
30#define WILDFIRE_MEM_EXISTS(qbbno) (wildfire_mem_mask & (0xf << ((qbbno) << 2)))
31
32#define WILDFIRE_PCA_EXISTS(qbbno, pcano) \
33 (wildfire_pca_mask & (1 << (((qbbno) << 2) + (pcano))))
34
35typedef struct {
36 volatile unsigned long csr __attribute__((aligned(64)));
37} wildfire_64;
38
39typedef struct {
40 volatile unsigned long csr __attribute__((aligned(256)));
41} wildfire_256;
42
43typedef struct {
44 volatile unsigned long csr __attribute__((aligned(2048)));
45} wildfire_2k;
46
47typedef struct {
48 wildfire_64 qsd_whami;
49 wildfire_64 qsd_rev;
50 wildfire_64 qsd_port_present;
51 wildfire_64 qsd_port_active;
52 wildfire_64 qsd_fault_ena;
53 wildfire_64 qsd_cpu_int_ena;
54 wildfire_64 qsd_mem_config;
55 wildfire_64 qsd_err_sum;
56 wildfire_64 ce_sum[4];
57 wildfire_64 dev_init[4];
58 wildfire_64 it_int[4];
59 wildfire_64 ip_int[4];
60 wildfire_64 uce_sum[4];
61 wildfire_64 se_sum__non_dev_int[4];
62 wildfire_64 scratch[4];
63 wildfire_64 qsd_timer;
64 wildfire_64 qsd_diag;
65} wildfire_qsd;
66
67typedef struct {
68 wildfire_256 qsd_whami;
69 wildfire_256 __pad1;
70 wildfire_256 ce_sum;
71 wildfire_256 dev_init;
72 wildfire_256 it_int;
73 wildfire_256 ip_int;
74 wildfire_256 uce_sum;
75 wildfire_256 se_sum;
76} wildfire_fast_qsd;
77
78typedef struct {
79 wildfire_2k qsa_qbb_id;
80 wildfire_2k __pad1;
81 wildfire_2k qsa_port_ena;
82 wildfire_2k qsa_scratch;
83 wildfire_2k qsa_config[5];
84 wildfire_2k qsa_ref_int;
85 wildfire_2k qsa_qbb_pop[2];
86 wildfire_2k qsa_dtag_fc;
87 wildfire_2k __pad2[3];
88 wildfire_2k qsa_diag;
89 wildfire_2k qsa_diag_lock[4];
90 wildfire_2k __pad3[11];
91 wildfire_2k qsa_cpu_err_sum;
92 wildfire_2k qsa_misc_err_sum;
93 wildfire_2k qsa_tmo_err_sum;
94 wildfire_2k qsa_err_ena;
95 wildfire_2k qsa_tmo_config;
96 wildfire_2k qsa_ill_cmd_err_sum;
97 wildfire_2k __pad4[26];
98 wildfire_2k qsa_busy_mask;
99 wildfire_2k qsa_arr_valid;
100 wildfire_2k __pad5[2];
101 wildfire_2k qsa_port_map[4];
102 wildfire_2k qsa_arr_addr[8];
103 wildfire_2k qsa_arr_mask[8];
104} wildfire_qsa;
105
106typedef struct {
107 wildfire_64 ioa_config;
108 wildfire_64 iod_config;
109 wildfire_64 iop_switch_credits;
110 wildfire_64 __pad1;
111 wildfire_64 iop_hose_credits;
112 wildfire_64 __pad2[11];
113 struct {
114 wildfire_64 __pad3;
115 wildfire_64 init;
116 } iop_hose[4];
117 wildfire_64 ioa_hose_0_ctrl;
118 wildfire_64 iod_hose_0_ctrl;
119 wildfire_64 ioa_hose_1_ctrl;
120 wildfire_64 iod_hose_1_ctrl;
121 wildfire_64 ioa_hose_2_ctrl;
122 wildfire_64 iod_hose_2_ctrl;
123 wildfire_64 ioa_hose_3_ctrl;
124 wildfire_64 iod_hose_3_ctrl;
125 struct {
126 wildfire_64 target;
127 wildfire_64 __pad4;
128 } iop_dev_int[4];
129
130 wildfire_64 iop_err_int_target;
131 wildfire_64 __pad5[7];
132 wildfire_64 iop_qbb_err_sum;
133 wildfire_64 __pad6;
134 wildfire_64 iop_qbb_se_sum;
135 wildfire_64 __pad7;
136 wildfire_64 ioa_err_sum;
137 wildfire_64 iod_err_sum;
138 wildfire_64 __pad8[4];
139 wildfire_64 ioa_diag_force_err;
140 wildfire_64 iod_diag_force_err;
141 wildfire_64 __pad9[4];
142 wildfire_64 iop_diag_send_err_int;
143 wildfire_64 __pad10[15];
144 wildfire_64 ioa_scratch;
145 wildfire_64 iod_scratch;
146} wildfire_iop;
147
148typedef struct {
149 wildfire_2k gpa_qbb_map[4];
150 wildfire_2k gpa_mem_pop_map;
151 wildfire_2k gpa_scratch;
152 wildfire_2k gpa_diag;
153 wildfire_2k gpa_config_0;
154 wildfire_2k __pad1;
155 wildfire_2k gpa_init_id;
156 wildfire_2k gpa_config_2;
157 /* not complete */
158} wildfire_gp;
159
160typedef struct {
161 wildfire_64 pca_what_am_i;
162 wildfire_64 pca_err_sum;
163 wildfire_64 pca_diag_force_err;
164 wildfire_64 pca_diag_send_err_int;
165 wildfire_64 pca_hose_credits;
166 wildfire_64 pca_scratch;
167 wildfire_64 pca_micro_addr;
168 wildfire_64 pca_micro_data;
169 wildfire_64 pca_pend_int;
170 wildfire_64 pca_sent_int;
171 wildfire_64 __pad1;
172 wildfire_64 pca_stdio_edge_level;
173 wildfire_64 __pad2[52];
174 struct {
175 wildfire_64 target;
176 wildfire_64 enable;
177 } pca_int[4];
178 wildfire_64 __pad3[56];
179 wildfire_64 pca_alt_sent_int[32];
180} wildfire_pca;
181
182typedef struct {
183 wildfire_64 ne_what_am_i;
184 /* not complete */
185} wildfire_ne;
186
187typedef struct {
188 wildfire_64 fe_what_am_i;
189 /* not complete */
190} wildfire_fe;
191
192typedef struct {
193 wildfire_64 pci_io_addr_ext;
194 wildfire_64 pci_ctrl;
195 wildfire_64 pci_err_sum;
196 wildfire_64 pci_err_addr;
197 wildfire_64 pci_stall_cnt;
198 wildfire_64 pci_iack_special;
199 wildfire_64 __pad1[2];
200 wildfire_64 pci_pend_int;
201 wildfire_64 pci_sent_int;
202 wildfire_64 __pad2[54];
203 struct {
204 wildfire_64 wbase;
205 wildfire_64 wmask;
206 wildfire_64 tbase;
207 } pci_window[4];
208 wildfire_64 pci_flush_tlb;
209 wildfire_64 pci_perf_mon;
210} wildfire_pci;
211
212#define WILDFIRE_ENTITY_SHIFT 18
213
214#define WILDFIRE_GP_ENTITY (0x10UL << WILDFIRE_ENTITY_SHIFT)
215#define WILDFIRE_IOP_ENTITY (0x08UL << WILDFIRE_ENTITY_SHIFT)
216#define WILDFIRE_QSA_ENTITY (0x04UL << WILDFIRE_ENTITY_SHIFT)
217#define WILDFIRE_QSD_ENTITY_SLOW (0x05UL << WILDFIRE_ENTITY_SHIFT)
218#define WILDFIRE_QSD_ENTITY_FAST (0x01UL << WILDFIRE_ENTITY_SHIFT)
219
220#define WILDFIRE_PCA_ENTITY(pca) ((0xc|(pca))<<WILDFIRE_ENTITY_SHIFT)
221
222#define WILDFIRE_BASE (IDENT_ADDR | (1UL << 40))
223
224#define WILDFIRE_QBB_MASK 0x0fUL /* for now, only 4 bits/16 QBBs */
225
226#define WILDFIRE_QBB(q) ((~((long)(q)) & WILDFIRE_QBB_MASK) << 36)
227#define WILDFIRE_HOSE(h) ((long)(h) << 33)
228
229#define WILDFIRE_QBB_IO(q) (WILDFIRE_BASE | WILDFIRE_QBB(q))
230#define WILDFIRE_QBB_HOSE(q,h) (WILDFIRE_QBB_IO(q) | WILDFIRE_HOSE(h))
231
232#define WILDFIRE_MEM(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x000000000UL)
233#define WILDFIRE_CONF(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FE000000UL)
234#define WILDFIRE_IO(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FF000000UL)
235
236#define WILDFIRE_qsd(q) \
237 ((wildfire_qsd *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSD_ENTITY_SLOW|(((1UL<<13)-1)<<23)))
238
239#define WILDFIRE_fast_qsd() \
240 ((wildfire_fast_qsd *)(WILDFIRE_QBB_IO(0)|WILDFIRE_QSD_ENTITY_FAST|(((1UL<<13)-1)<<23)))
241
242#define WILDFIRE_qsa(q) \
243 ((wildfire_qsa *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSA_ENTITY|(((1UL<<13)-1)<<23)))
244
245#define WILDFIRE_iop(q) \
246 ((wildfire_iop *)(WILDFIRE_QBB_IO(q)|WILDFIRE_IOP_ENTITY|(((1UL<<13)-1)<<23)))
247
248#define WILDFIRE_gp(q) \
249 ((wildfire_gp *)(WILDFIRE_QBB_IO(q)|WILDFIRE_GP_ENTITY|(((1UL<<13)-1)<<23)))
250
251#define WILDFIRE_pca(q,pca) \
252 ((wildfire_pca *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)))
253
254#define WILDFIRE_ne(q,pca) \
255 ((wildfire_ne *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(1UL<<16)))
256
257#define WILDFIRE_fe(q,pca) \
258 ((wildfire_fe *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(3UL<<15)))
259
260#define WILDFIRE_pci(q,h) \
261 ((wildfire_pci *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(((h)&6)>>1)|((((h)&1)|2)<<16)|(((1UL<<13)-1)<<23)))
262
263#define WILDFIRE_IO_BIAS WILDFIRE_IO(0,0)
264#define WILDFIRE_MEM_BIAS WILDFIRE_MEM(0,0) /* ??? */
265
266/* The IO address space is larger than 0xffff */
267#define WILDFIRE_IO_SPACE (8UL*1024*1024)
268
269#ifdef __KERNEL__
270
271#ifndef __EXTERN_INLINE
272#define __EXTERN_INLINE extern inline
273#define __IO_EXTERN_INLINE
274#endif
275
276/*
277 * Memory functions. all accesses are done through linear space.
278 */
279
280__EXTERN_INLINE void __iomem *wildfire_ioportmap(unsigned long addr)
281{
282 return (void __iomem *)(addr + WILDFIRE_IO_BIAS);
283}
284
285__EXTERN_INLINE void __iomem *wildfire_ioremap(unsigned long addr,
286 unsigned long size)
287{
288 return (void __iomem *)(addr + WILDFIRE_MEM_BIAS);
289}
290
291__EXTERN_INLINE int wildfire_is_ioaddr(unsigned long addr)
292{
293 return addr >= WILDFIRE_BASE;
294}
295
296__EXTERN_INLINE int wildfire_is_mmio(const volatile void __iomem *xaddr)
297{
298 unsigned long addr = (unsigned long)addr;
299 return (addr & 0x100000000UL) == 0;
300}
301
302#undef __IO_PREFIX
303#define __IO_PREFIX wildfire
304#define wildfire_trivial_rw_bw 1
305#define wildfire_trivial_rw_lq 1
306#define wildfire_trivial_io_bw 1
307#define wildfire_trivial_io_lq 1
308#define wildfire_trivial_iounmap 1
309#include <asm/io_trivial.h>
310
311#ifdef __IO_EXTERN_INLINE
312#undef __EXTERN_INLINE
313#undef __IO_EXTERN_INLINE
314#endif
315
316#endif /* __KERNEL__ */
317
318#endif /* __ALPHA_WILDFIRE__H__ */
diff --git a/include/asm-alpha/cputime.h b/include/asm-alpha/cputime.h
new file mode 100644
index 000000000000..19577fd93230
--- /dev/null
+++ b/include/asm-alpha/cputime.h
@@ -0,0 +1,6 @@
1#ifndef __ALPHA_CPUTIME_H
2#define __ALPHA_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __ALPHA_CPUTIME_H */
diff --git a/include/asm-alpha/current.h b/include/asm-alpha/current.h
new file mode 100644
index 000000000000..8d88a13c1bec
--- /dev/null
+++ b/include/asm-alpha/current.h
@@ -0,0 +1,9 @@
1#ifndef _ALPHA_CURRENT_H
2#define _ALPHA_CURRENT_H
3
4#include <linux/thread_info.h>
5
6#define get_current() (current_thread_info()->task + 0)
7#define current get_current()
8
9#endif /* _ALPHA_CURRENT_H */
diff --git a/include/asm-alpha/delay.h b/include/asm-alpha/delay.h
new file mode 100644
index 000000000000..2aa3f410f7e6
--- /dev/null
+++ b/include/asm-alpha/delay.h
@@ -0,0 +1,10 @@
1#ifndef __ALPHA_DELAY_H
2#define __ALPHA_DELAY_H
3
4extern void __delay(int loops);
5extern void udelay(unsigned long usecs);
6
7extern void ndelay(unsigned long nsecs);
8#define ndelay ndelay
9
10#endif /* defined(__ALPHA_DELAY_H) */
diff --git a/include/asm-alpha/div64.h b/include/asm-alpha/div64.h
new file mode 100644
index 000000000000..6cd978cefb28
--- /dev/null
+++ b/include/asm-alpha/div64.h
@@ -0,0 +1 @@
#include <asm-generic/div64.h>
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
new file mode 100644
index 000000000000..c675f282d6ad
--- /dev/null
+++ b/include/asm-alpha/dma-mapping.h
@@ -0,0 +1,67 @@
1#ifndef _ALPHA_DMA_MAPPING_H
2#define _ALPHA_DMA_MAPPING_H
3
4#include <linux/config.h>
5
6#ifdef CONFIG_PCI
7
8#include <linux/pci.h>
9
10#define dma_map_single(dev, va, size, dir) \
11 pci_map_single(alpha_gendev_to_pci(dev), va, size, dir)
12#define dma_unmap_single(dev, addr, size, dir) \
13 pci_unmap_single(alpha_gendev_to_pci(dev), addr, size, dir)
14#define dma_alloc_coherent(dev, size, addr, gfp) \
15 pci_alloc_consistent(alpha_gendev_to_pci(dev), size, addr)
16#define dma_free_coherent(dev, size, va, addr) \
17 pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr)
18#define dma_map_page(dev, page, off, size, dir) \
19 pci_map_single(alpha_gendev_to_pci(dev), page, off, size, dir)
20#define dma_unmap_page(dev, addr, size, dir) \
21 pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir)
22#define dma_map_sg(dev, sg, nents, dir) \
23 pci_map_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
24#define dma_unmap_sg(dev, sg, nents, dir) \
25 pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
26#define dma_supported(dev, mask) \
27 pci_dma_supported(alpha_gendev_to_pci(dev), mask)
28#define dma_mapping_error(addr) \
29 pci_dma_mapping_error(addr)
30
31#else /* no PCI - no IOMMU. */
32
33void *dma_alloc_coherent(struct device *dev, size_t size,
34 dma_addr_t *dma_handle, int gfp);
35int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
36 enum dma_data_direction direction);
37
38#define dma_free_coherent(dev, size, va, addr) \
39 free_pages((unsigned long)va, get_order(size))
40#define dma_supported(dev, mask) (mask < 0x00ffffffUL ? 0 : 1)
41#define dma_map_single(dev, va, size, dir) virt_to_phys(va)
42#define dma_map_page(dev, page, off, size, dir) (page_to_pa(page) + off)
43
44#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
45#define dma_unmap_page(dev, addr, size, dir) do { } while (0)
46#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
47
48#define dma_mapping_error(addr) (0)
49
50#endif /* !CONFIG_PCI */
51
52#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
53#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
54#define dma_is_consistent(dev) (1)
55
56int dma_set_mask(struct device *dev, u64 mask);
57
58#define dma_sync_single_for_cpu(dev, addr, size, dir) do { } while (0)
59#define dma_sync_single_for_device(dev, addr, size, dir) do { } while (0)
60#define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0)
61#define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0)
62#define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0)
63#define dma_cache_sync(va, size, dir) do { } while (0)
64
65#define dma_get_cache_alignment() L1_CACHE_BYTES
66
67#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/include/asm-alpha/dma.h b/include/asm-alpha/dma.h
new file mode 100644
index 000000000000..683afaa3deed
--- /dev/null
+++ b/include/asm-alpha/dma.h
@@ -0,0 +1,377 @@
1/*
2 * include/asm-alpha/dma.h
3 *
4 * This is essentially the same as the i386 DMA stuff, as the AlphaPCs
5 * use ISA-compatible dma. The only extension is support for high-page
6 * registers that allow to set the top 8 bits of a 32-bit DMA address.
7 * This register should be written last when setting up a DMA address
8 * as this will also enable DMA across 64 KB boundaries.
9 */
10
11/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $
12 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
13 * Written by Hennus Bergman, 1992.
14 * High DMA channel support & info by Hannu Savolainen
15 * and John Boyd, Nov. 1992.
16 */
17
18#ifndef _ASM_DMA_H
19#define _ASM_DMA_H
20
21#include <linux/config.h>
22#include <linux/spinlock.h>
23#include <asm/io.h>
24
25#define dma_outb outb
26#define dma_inb inb
27
28/*
29 * NOTES about DMA transfers:
30 *
31 * controller 1: channels 0-3, byte operations, ports 00-1F
32 * controller 2: channels 4-7, word operations, ports C0-DF
33 *
34 * - ALL registers are 8 bits only, regardless of transfer size
35 * - channel 4 is not used - cascades 1 into 2.
36 * - channels 0-3 are byte - addresses/counts are for physical bytes
37 * - channels 5-7 are word - addresses/counts are for physical words
38 * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
39 * - transfer count loaded to registers is 1 less than actual count
40 * - controller 2 offsets are all even (2x offsets for controller 1)
41 * - page registers for 5-7 don't use data bit 0, represent 128K pages
42 * - page registers for 0-3 use bit 0, represent 64K pages
43 *
44 * DMA transfers are limited to the lower 16MB of _physical_ memory.
45 * Note that addresses loaded into registers must be _physical_ addresses,
46 * not logical addresses (which may differ if paging is active).
47 *
48 * Address mapping for channels 0-3:
49 *
50 * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
51 * | ... | | ... | | ... |
52 * | ... | | ... | | ... |
53 * | ... | | ... | | ... |
54 * P7 ... P0 A7 ... A0 A7 ... A0
55 * | Page | Addr MSB | Addr LSB | (DMA registers)
56 *
57 * Address mapping for channels 5-7:
58 *
59 * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
60 * | ... | \ \ ... \ \ \ ... \ \
61 * | ... | \ \ ... \ \ \ ... \ (not used)
62 * | ... | \ \ ... \ \ \ ... \
63 * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
64 * | Page | Addr MSB | Addr LSB | (DMA registers)
65 *
66 * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
67 * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
68 * the hardware level, so odd-byte transfers aren't possible).
69 *
70 * Transfer count (_not # bytes_) is limited to 64K, represented as actual
71 * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
72 * and up to 128K bytes may be transferred on channels 5-7 in one operation.
73 *
74 */
75
76#define MAX_DMA_CHANNELS 8
77
78/*
79 ISA DMA limitations on Alpha platforms,
80
81 These may be due to SIO (PCI<->ISA bridge) chipset limitation, or
82 just a wiring limit.
83*/
84
85/* The maximum address for ISA DMA transfer on Alpha XL, due to an
86 hardware SIO limitation, is 64MB.
87*/
88#define ALPHA_XL_MAX_ISA_DMA_ADDRESS 0x04000000UL
89
90/* The maximum address for ISA DMA transfer on RUFFIAN,
91 due to an hardware SIO limitation, is 16MB.
92*/
93#define ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS 0x01000000UL
94
95/* The maximum address for ISA DMA transfer on SABLE, and some ALCORs,
96 due to an hardware SIO chip limitation, is 2GB.
97*/
98#define ALPHA_SABLE_MAX_ISA_DMA_ADDRESS 0x80000000UL
99#define ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS 0x80000000UL
100
101/*
102 Maximum address for all the others is the complete 32-bit bus
103 address space.
104*/
105#define ALPHA_MAX_ISA_DMA_ADDRESS 0x100000000UL
106
107#ifdef CONFIG_ALPHA_GENERIC
108# define MAX_ISA_DMA_ADDRESS (alpha_mv.max_isa_dma_address)
109#else
110# if defined(CONFIG_ALPHA_XL)
111# define MAX_ISA_DMA_ADDRESS ALPHA_XL_MAX_ISA_DMA_ADDRESS
112# elif defined(CONFIG_ALPHA_RUFFIAN)
113# define MAX_ISA_DMA_ADDRESS ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS
114# elif defined(CONFIG_ALPHA_SABLE)
115# define MAX_ISA_DMA_ADDRESS ALPHA_SABLE_MAX_ISA_DMA_ADDRESS
116# elif defined(CONFIG_ALPHA_ALCOR)
117# define MAX_ISA_DMA_ADDRESS ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS
118# else
119# define MAX_ISA_DMA_ADDRESS ALPHA_MAX_ISA_DMA_ADDRESS
120# endif
121#endif
122
123/* If we have the iommu, we don't have any address limitations on DMA.
124 Otherwise (Nautilus, RX164), we have to have 0-16 Mb DMA zone
125 like i386. */
126#define MAX_DMA_ADDRESS (alpha_mv.mv_pci_tbi ? \
127 ~0UL : IDENT_ADDR + 0x01000000)
128
129/* 8237 DMA controllers */
130#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
131#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
132
133/* DMA controller registers */
134#define DMA1_CMD_REG 0x08 /* command register (w) */
135#define DMA1_STAT_REG 0x08 /* status register (r) */
136#define DMA1_REQ_REG 0x09 /* request register (w) */
137#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
138#define DMA1_MODE_REG 0x0B /* mode register (w) */
139#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
140#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
141#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
142#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
143#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
144#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG)
145
146#define DMA2_CMD_REG 0xD0 /* command register (w) */
147#define DMA2_STAT_REG 0xD0 /* status register (r) */
148#define DMA2_REQ_REG 0xD2 /* request register (w) */
149#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
150#define DMA2_MODE_REG 0xD6 /* mode register (w) */
151#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
152#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
153#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
154#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
155#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
156#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG)
157
158#define DMA_ADDR_0 0x00 /* DMA address registers */
159#define DMA_ADDR_1 0x02
160#define DMA_ADDR_2 0x04
161#define DMA_ADDR_3 0x06
162#define DMA_ADDR_4 0xC0
163#define DMA_ADDR_5 0xC4
164#define DMA_ADDR_6 0xC8
165#define DMA_ADDR_7 0xCC
166
167#define DMA_CNT_0 0x01 /* DMA count registers */
168#define DMA_CNT_1 0x03
169#define DMA_CNT_2 0x05
170#define DMA_CNT_3 0x07
171#define DMA_CNT_4 0xC2
172#define DMA_CNT_5 0xC6
173#define DMA_CNT_6 0xCA
174#define DMA_CNT_7 0xCE
175
176#define DMA_PAGE_0 0x87 /* DMA page registers */
177#define DMA_PAGE_1 0x83
178#define DMA_PAGE_2 0x81
179#define DMA_PAGE_3 0x82
180#define DMA_PAGE_5 0x8B
181#define DMA_PAGE_6 0x89
182#define DMA_PAGE_7 0x8A
183
184#define DMA_HIPAGE_0 (0x400 | DMA_PAGE_0)
185#define DMA_HIPAGE_1 (0x400 | DMA_PAGE_1)
186#define DMA_HIPAGE_2 (0x400 | DMA_PAGE_2)
187#define DMA_HIPAGE_3 (0x400 | DMA_PAGE_3)
188#define DMA_HIPAGE_4 (0x400 | DMA_PAGE_4)
189#define DMA_HIPAGE_5 (0x400 | DMA_PAGE_5)
190#define DMA_HIPAGE_6 (0x400 | DMA_PAGE_6)
191#define DMA_HIPAGE_7 (0x400 | DMA_PAGE_7)
192
193#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
194#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
195#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
196
197#define DMA_AUTOINIT 0x10
198
199extern spinlock_t dma_spin_lock;
200
201static __inline__ unsigned long claim_dma_lock(void)
202{
203 unsigned long flags;
204 spin_lock_irqsave(&dma_spin_lock, flags);
205 return flags;
206}
207
208static __inline__ void release_dma_lock(unsigned long flags)
209{
210 spin_unlock_irqrestore(&dma_spin_lock, flags);
211}
212
213/* enable/disable a specific DMA channel */
214static __inline__ void enable_dma(unsigned int dmanr)
215{
216 if (dmanr<=3)
217 dma_outb(dmanr, DMA1_MASK_REG);
218 else
219 dma_outb(dmanr & 3, DMA2_MASK_REG);
220}
221
222static __inline__ void disable_dma(unsigned int dmanr)
223{
224 if (dmanr<=3)
225 dma_outb(dmanr | 4, DMA1_MASK_REG);
226 else
227 dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
228}
229
230/* Clear the 'DMA Pointer Flip Flop'.
231 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
232 * Use this once to initialize the FF to a known state.
233 * After that, keep track of it. :-)
234 * --- In order to do that, the DMA routines below should ---
235 * --- only be used while interrupts are disabled! ---
236 */
237static __inline__ void clear_dma_ff(unsigned int dmanr)
238{
239 if (dmanr<=3)
240 dma_outb(0, DMA1_CLEAR_FF_REG);
241 else
242 dma_outb(0, DMA2_CLEAR_FF_REG);
243}
244
245/* set mode (above) for a specific DMA channel */
246static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
247{
248 if (dmanr<=3)
249 dma_outb(mode | dmanr, DMA1_MODE_REG);
250 else
251 dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
252}
253
254/* set extended mode for a specific DMA channel */
255static __inline__ void set_dma_ext_mode(unsigned int dmanr, char ext_mode)
256{
257 if (dmanr<=3)
258 dma_outb(ext_mode | dmanr, DMA1_EXT_MODE_REG);
259 else
260 dma_outb(ext_mode | (dmanr&3), DMA2_EXT_MODE_REG);
261}
262
263/* Set only the page register bits of the transfer address.
264 * This is used for successive transfers when we know the contents of
265 * the lower 16 bits of the DMA current address register.
266 */
267static __inline__ void set_dma_page(unsigned int dmanr, unsigned int pagenr)
268{
269 switch(dmanr) {
270 case 0:
271 dma_outb(pagenr, DMA_PAGE_0);
272 dma_outb((pagenr >> 8), DMA_HIPAGE_0);
273 break;
274 case 1:
275 dma_outb(pagenr, DMA_PAGE_1);
276 dma_outb((pagenr >> 8), DMA_HIPAGE_1);
277 break;
278 case 2:
279 dma_outb(pagenr, DMA_PAGE_2);
280 dma_outb((pagenr >> 8), DMA_HIPAGE_2);
281 break;
282 case 3:
283 dma_outb(pagenr, DMA_PAGE_3);
284 dma_outb((pagenr >> 8), DMA_HIPAGE_3);
285 break;
286 case 5:
287 dma_outb(pagenr & 0xfe, DMA_PAGE_5);
288 dma_outb((pagenr >> 8), DMA_HIPAGE_5);
289 break;
290 case 6:
291 dma_outb(pagenr & 0xfe, DMA_PAGE_6);
292 dma_outb((pagenr >> 8), DMA_HIPAGE_6);
293 break;
294 case 7:
295 dma_outb(pagenr & 0xfe, DMA_PAGE_7);
296 dma_outb((pagenr >> 8), DMA_HIPAGE_7);
297 break;
298 }
299}
300
301
302/* Set transfer address & page bits for specific DMA channel.
303 * Assumes dma flipflop is clear.
304 */
305static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
306{
307 if (dmanr <= 3) {
308 dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
309 dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
310 } else {
311 dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
312 dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
313 }
314 set_dma_page(dmanr, a>>16); /* set hipage last to enable 32-bit mode */
315}
316
317
318/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
319 * a specific DMA channel.
320 * You must ensure the parameters are valid.
321 * NOTE: from a manual: "the number of transfers is one more
322 * than the initial word count"! This is taken into account.
323 * Assumes dma flip-flop is clear.
324 * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
325 */
326static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
327{
328 count--;
329 if (dmanr <= 3) {
330 dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
331 dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
332 } else {
333 dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
334 dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
335 }
336}
337
338
339/* Get DMA residue count. After a DMA transfer, this
340 * should return zero. Reading this while a DMA transfer is
341 * still in progress will return unpredictable results.
342 * If called before the channel has been used, it may return 1.
343 * Otherwise, it returns the number of _bytes_ left to transfer.
344 *
345 * Assumes DMA flip-flop is clear.
346 */
347static __inline__ int get_dma_residue(unsigned int dmanr)
348{
349 unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
350 : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
351
352 /* using short to get 16-bit wrap around */
353 unsigned short count;
354
355 count = 1 + dma_inb(io_port);
356 count += dma_inb(io_port) << 8;
357
358 return (dmanr<=3)? count : (count<<1);
359}
360
361
362/* These are in kernel/dma.c: */
363extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
364extern void free_dma(unsigned int dmanr); /* release it again */
365#define KERNEL_HAVE_CHECK_DMA
366extern int check_dma(unsigned int dmanr);
367
368/* From PCI */
369
370#ifdef CONFIG_PCI
371extern int isa_dma_bridge_buggy;
372#else
373#define isa_dma_bridge_buggy (0)
374#endif
375
376
377#endif /* _ASM_DMA_H */
diff --git a/include/asm-alpha/elf.h b/include/asm-alpha/elf.h
new file mode 100644
index 000000000000..e94a945a2314
--- /dev/null
+++ b/include/asm-alpha/elf.h
@@ -0,0 +1,185 @@
1#ifndef __ASM_ALPHA_ELF_H
2#define __ASM_ALPHA_ELF_H
3
4/* Special values for the st_other field in the symbol table. */
5
6#define STO_ALPHA_NOPV 0x80
7#define STO_ALPHA_STD_GPLOAD 0x88
8
9/*
10 * Alpha ELF relocation types
11 */
12#define R_ALPHA_NONE 0 /* No reloc */
13#define R_ALPHA_REFLONG 1 /* Direct 32 bit */
14#define R_ALPHA_REFQUAD 2 /* Direct 64 bit */
15#define R_ALPHA_GPREL32 3 /* GP relative 32 bit */
16#define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */
17#define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */
18#define R_ALPHA_GPDISP 6 /* Add displacement to GP */
19#define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */
20#define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */
21#define R_ALPHA_SREL16 9 /* PC relative 16 bit */
22#define R_ALPHA_SREL32 10 /* PC relative 32 bit */
23#define R_ALPHA_SREL64 11 /* PC relative 64 bit */
24#define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */
25#define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */
26#define R_ALPHA_GPREL16 19 /* GP relative 16 bit */
27#define R_ALPHA_COPY 24 /* Copy symbol at runtime */
28#define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */
29#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */
30#define R_ALPHA_RELATIVE 27 /* Adjust by program base */
31#define R_ALPHA_BRSGP 28
32#define R_ALPHA_TLSGD 29
33#define R_ALPHA_TLS_LDM 30
34#define R_ALPHA_DTPMOD64 31
35#define R_ALPHA_GOTDTPREL 32
36#define R_ALPHA_DTPREL64 33
37#define R_ALPHA_DTPRELHI 34
38#define R_ALPHA_DTPRELLO 35
39#define R_ALPHA_DTPREL16 36
40#define R_ALPHA_GOTTPREL 37
41#define R_ALPHA_TPREL64 38
42#define R_ALPHA_TPRELHI 39
43#define R_ALPHA_TPRELLO 40
44#define R_ALPHA_TPREL16 41
45
46#define SHF_ALPHA_GPREL 0x10000000
47
48/* Legal values for e_flags field of Elf64_Ehdr. */
49
50#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */
51
52/*
53 * ELF register definitions..
54 */
55
56/*
57 * The OSF/1 version of <sys/procfs.h> makes gregset_t 46 entries long.
58 * I have no idea why that is so. For now, we just leave it at 33
59 * (32 general regs + processor status word).
60 */
61#define ELF_NGREG 33
62#define ELF_NFPREG 32
63
64typedef unsigned long elf_greg_t;
65typedef elf_greg_t elf_gregset_t[ELF_NGREG];
66
67typedef double elf_fpreg_t;
68typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
69
70/*
71 * This is used to ensure we don't load something for the wrong architecture.
72 */
73#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
74
75/*
76 * These are used to set parameters in the core dumps.
77 */
78#define ELF_CLASS ELFCLASS64
79#define ELF_DATA ELFDATA2LSB
80#define ELF_ARCH EM_ALPHA
81
82#define USE_ELF_CORE_DUMP
83#define ELF_EXEC_PAGESIZE 8192
84
85/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
86 use of this is to invoke "./ld.so someprog" to test out a new version of
87 the loader. We need to make sure that it is out of the way of the program
88 that it will "exec", and that there is sufficient room for the brk. */
89
90#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
91
92/* $0 is set by ld.so to a pointer to a function which might be
93 registered using atexit. This provides a mean for the dynamic
94 linker to call DT_FINI functions for shared libraries that have
95 been loaded before the code runs.
96
97 So that we can use the same startup file with static executables,
98 we start programs with a value of 0 to indicate that there is no
99 such function. */
100
101#define ELF_PLAT_INIT(_r, load_addr) _r->r0 = 0
102
103/* The registers are layed out in pt_regs for PAL and syscall
104 convenience. Re-order them for the linear elf_gregset_t. */
105
106struct pt_regs;
107struct thread_info;
108struct task_struct;
109extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt,
110 struct thread_info *ti);
111#define ELF_CORE_COPY_REGS(DEST, REGS) \
112 dump_elf_thread(DEST, REGS, current_thread_info());
113
114/* Similar, but for a thread other than current. */
115
116extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task);
117#define ELF_CORE_COPY_TASK_REGS(TASK, DEST) \
118 dump_elf_task(*(DEST), TASK)
119
120/* Similar, but for the FP registers. */
121
122extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task);
123#define ELF_CORE_COPY_FPREGS(TASK, DEST) \
124 dump_elf_task_fp(*(DEST), TASK)
125
126/* This yields a mask that user programs can use to figure out what
127 instruction set this CPU supports. This is trivial on Alpha,
128 but not so on other machines. */
129
130#define ELF_HWCAP (~amask(-1))
131
132/* This yields a string that ld.so will use to load implementation
133 specific libraries for optimization. This is more specific in
134 intent than poking at uname or /proc/cpuinfo. */
135
136#define ELF_PLATFORM \
137({ \
138 enum implver_enum i_ = implver(); \
139 ( i_ == IMPLVER_EV4 ? "ev4" \
140 : i_ == IMPLVER_EV5 \
141 ? (amask(AMASK_BWX) ? "ev5" : "ev56") \
142 : amask (AMASK_CIX) ? "ev6" : "ev67"); \
143})
144
145/* Reserve these numbers for any future use of a VDSO. */
146#if 0
147#define AT_SYSINFO 32
148#define AT_SYSINFO_EHDR 33
149#endif
150
151/* More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the
152 value is -1, then the cache doesn't exist. Otherwise:
153
154 bit 0-3: Cache set-associativity; 0 means fully associative.
155 bit 4-7: Log2 of cacheline size.
156 bit 8-31: Size of the entire cache >> 8.
157 bit 32-63: Reserved.
158*/
159
160#define AT_L1I_CACHESHAPE 34
161#define AT_L1D_CACHESHAPE 35
162#define AT_L2_CACHESHAPE 36
163#define AT_L3_CACHESHAPE 37
164
165#ifdef __KERNEL__
166
167#define SET_PERSONALITY(EX, IBCS2) \
168 set_personality(((EX).e_flags & EF_ALPHA_32BIT) \
169 ? PER_LINUX_32BIT : (IBCS2) ? PER_SVR4 : PER_LINUX)
170
171extern int alpha_l1i_cacheshape;
172extern int alpha_l1d_cacheshape;
173extern int alpha_l2_cacheshape;
174extern int alpha_l3_cacheshape;
175
176#define ARCH_DLINFO \
177 do { \
178 NEW_AUX_ENT(AT_L1I_CACHESHAPE, alpha_l1i_cacheshape); \
179 NEW_AUX_ENT(AT_L1D_CACHESHAPE, alpha_l1d_cacheshape); \
180 NEW_AUX_ENT(AT_L2_CACHESHAPE, alpha_l2_cacheshape); \
181 NEW_AUX_ENT(AT_L3_CACHESHAPE, alpha_l3_cacheshape); \
182 } while (0)
183
184#endif /* __KERNEL__ */
185#endif /* __ASM_ALPHA_ELF_H */
diff --git a/include/asm-alpha/err_common.h b/include/asm-alpha/err_common.h
new file mode 100644
index 000000000000..c25095942107
--- /dev/null
+++ b/include/asm-alpha/err_common.h
@@ -0,0 +1,118 @@
1/*
2 * linux/include/asm-alpha/err_common.h
3 *
4 * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
5 *
6 * Contains declarations and macros to support Alpha error handling
7 * implementations.
8 */
9
10#ifndef __ALPHA_ERR_COMMON_H
11#define __ALPHA_ERR_COMMON_H 1
12
13/*
14 * SCB Vector definitions
15 */
16#define SCB_Q_SYSERR 0x620
17#define SCB_Q_PROCERR 0x630
18#define SCB_Q_SYSMCHK 0x660
19#define SCB_Q_PROCMCHK 0x670
20#define SCB_Q_SYSEVENT 0x680
21
22/*
23 * Disposition definitions for logout frame parser
24 */
25#define MCHK_DISPOSITION_UNKNOWN_ERROR 0x00
26#define MCHK_DISPOSITION_REPORT 0x01
27#define MCHK_DISPOSITION_DISMISS 0x02
28
29/*
30 * Error Log definitions
31 */
32/*
33 * Types
34 */
35
36#define EL_CLASS__TERMINATION (0)
37# define EL_TYPE__TERMINATION__TERMINATION (0)
38#define EL_CLASS__HEADER (5)
39# define EL_TYPE__HEADER__SYSTEM_ERROR_FRAME (1)
40# define EL_TYPE__HEADER__SYSTEM_EVENT_FRAME (2)
41# define EL_TYPE__HEADER__HALT_FRAME (3)
42# define EL_TYPE__HEADER__LOGOUT_FRAME (19)
43#define EL_CLASS__GENERAL_NOTIFICATION (9)
44#define EL_CLASS__PCI_ERROR_FRAME (11)
45#define EL_CLASS__REGATTA_FAMILY (12)
46# define EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME (1)
47# define EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME (2)
48# define EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME (3)
49# define EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED (8)
50# define EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED (9)
51# define EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED (10)
52# define EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT (11)
53# define EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT (12)
54#define EL_CLASS__PAL (14)
55# define EL_TYPE__PAL__LOGOUT_FRAME (1)
56# define EL_TYPE__PAL__EV7_PROCESSOR (4)
57# define EL_TYPE__PAL__EV7_ZBOX (5)
58# define EL_TYPE__PAL__EV7_RBOX (6)
59# define EL_TYPE__PAL__EV7_IO (7)
60# define EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE (10)
61# define EL_TYPE__PAL__ENV__AIRMOVER_FAN (11)
62# define EL_TYPE__PAL__ENV__VOLTAGE (12)
63# define EL_TYPE__PAL__ENV__INTRUSION (13)
64# define EL_TYPE__PAL__ENV__POWER_SUPPLY (14)
65# define EL_TYPE__PAL__ENV__LAN (15)
66# define EL_TYPE__PAL__ENV__HOT_PLUG (16)
67
68union el_timestamp {
69 struct {
70 u8 second;
71 u8 minute;
72 u8 hour;
73 u8 day;
74 u8 month;
75 u8 year;
76 } b;
77 u64 as_int;
78};
79
80struct el_subpacket {
81 u16 length; /* length of header (in bytes) */
82 u16 class; /* header class and type... */
83 u16 type; /* ...determine content */
84 u16 revision; /* header revision */
85 union {
86 struct { /* Class 5, Type 1 - System Error */
87 u32 frame_length;
88 u32 frame_packet_count;
89 } sys_err;
90 struct { /* Class 5, Type 2 - System Event */
91 union el_timestamp timestamp;
92 u32 frame_length;
93 u32 frame_packet_count;
94 } sys_event;
95 struct { /* Class 5, Type 3 - Double Error Halt */
96 u16 halt_code;
97 u16 reserved;
98 union el_timestamp timestamp;
99 u32 frame_length;
100 u32 frame_packet_count;
101 } err_halt;
102 struct { /* Clasee 5, Type 19 - Logout Frame Header */
103 u32 frame_length;
104 u32 frame_flags;
105 u32 cpu_offset;
106 u32 system_offset;
107 } logout_header;
108 struct { /* Class 12 - Regatta */
109 u64 cpuid;
110 u64 data_start[1];
111 } regatta_frame;
112 struct { /* Raw */
113 u64 data_start[1];
114 } raw;
115 } by_type;
116};
117
118#endif /* __ALPHA_ERR_COMMON_H */
diff --git a/include/asm-alpha/err_ev6.h b/include/asm-alpha/err_ev6.h
new file mode 100644
index 000000000000..ea637791e4a9
--- /dev/null
+++ b/include/asm-alpha/err_ev6.h
@@ -0,0 +1,6 @@
1#ifndef __ALPHA_ERR_EV6_H
2#define __ALPHA_ERR_EV6_H 1
3
4/* Dummy include for now. */
5
6#endif /* __ALPHA_ERR_EV6_H */
diff --git a/include/asm-alpha/err_ev7.h b/include/asm-alpha/err_ev7.h
new file mode 100644
index 000000000000..87f99777c2e4
--- /dev/null
+++ b/include/asm-alpha/err_ev7.h
@@ -0,0 +1,202 @@
1#ifndef __ALPHA_ERR_EV7_H
2#define __ALPHA_ERR_EV7_H 1
3
4/*
5 * Data for el packet class PAL (14), type LOGOUT_FRAME (1)
6 */
7struct ev7_pal_logout_subpacket {
8 u32 mchk_code;
9 u32 subpacket_count;
10 u64 whami;
11 u64 rbox_whami;
12 u64 rbox_int;
13 u64 exc_addr;
14 union el_timestamp timestamp;
15 u64 halt_code;
16 u64 reserved;
17};
18
19/*
20 * Data for el packet class PAL (14), type EV7_PROCESSOR (4)
21 */
22struct ev7_pal_processor_subpacket {
23 u64 i_stat;
24 u64 dc_stat;
25 u64 c_addr;
26 u64 c_syndrome_1;
27 u64 c_syndrome_0;
28 u64 c_stat;
29 u64 c_sts;
30 u64 mm_stat;
31 u64 exc_addr;
32 u64 ier_cm;
33 u64 isum;
34 u64 pal_base;
35 u64 i_ctl;
36 u64 process_context;
37 u64 cbox_ctl;
38 u64 cbox_stp_ctl;
39 u64 cbox_acc_ctl;
40 u64 cbox_lcl_set;
41 u64 cbox_gbl_set;
42 u64 bbox_ctl;
43 u64 bbox_err_sts;
44 u64 bbox_err_idx;
45 u64 cbox_ddp_err_sts;
46 u64 bbox_dat_rmp;
47 u64 reserved[2];
48};
49
50/*
51 * Data for el packet class PAL (14), type EV7_ZBOX (5)
52 */
53struct ev7_pal_zbox_subpacket {
54 u32 zbox0_dram_err_status_1;
55 u32 zbox0_dram_err_status_2;
56 u32 zbox0_dram_err_status_3;
57 u32 zbox0_dram_err_ctl;
58 u32 zbox0_dram_err_adr;
59 u32 zbox0_dift_timeout;
60 u32 zbox0_dram_mapper_ctl;
61 u32 zbox0_frc_err_adr;
62 u32 zbox0_dift_err_status;
63 u32 reserved1;
64 u32 zbox1_dram_err_status_1;
65 u32 zbox1_dram_err_status_2;
66 u32 zbox1_dram_err_status_3;
67 u32 zbox1_dram_err_ctl;
68 u32 zbox1_dram_err_adr;
69 u32 zbox1_dift_timeout;
70 u32 zbox1_dram_mapper_ctl;
71 u32 zbox1_frc_err_adr;
72 u32 zbox1_dift_err_status;
73 u32 reserved2;
74 u64 cbox_ctl;
75 u64 cbox_stp_ctl;
76 u64 zbox0_error_pa;
77 u64 zbox1_error_pa;
78 u64 zbox0_ored_syndrome;
79 u64 zbox1_ored_syndrome;
80 u64 reserved3[2];
81};
82
83/*
84 * Data for el packet class PAL (14), type EV7_RBOX (6)
85 */
86struct ev7_pal_rbox_subpacket {
87 u64 rbox_cfg;
88 u64 rbox_n_cfg;
89 u64 rbox_s_cfg;
90 u64 rbox_e_cfg;
91 u64 rbox_w_cfg;
92 u64 rbox_n_err;
93 u64 rbox_s_err;
94 u64 rbox_e_err;
95 u64 rbox_w_err;
96 u64 rbox_io_cfg;
97 u64 rbox_io_err;
98 u64 rbox_l_err;
99 u64 rbox_whoami;
100 u64 rbox_imask;
101 u64 rbox_intq;
102 u64 rbox_int;
103 u64 reserved[2];
104};
105
106/*
107 * Data for el packet class PAL (14), type EV7_IO (7)
108 */
109struct ev7_pal_io_one_port {
110 u64 pox_err_sum;
111 u64 pox_tlb_err;
112 u64 pox_spl_cmplt;
113 u64 pox_trans_sum;
114 u64 pox_first_err;
115 u64 pox_mult_err;
116 u64 pox_dm_source;
117 u64 pox_dm_dest;
118 u64 pox_dm_size;
119 u64 pox_dm_ctrl;
120 u64 reserved;
121};
122
123struct ev7_pal_io_subpacket {
124 u64 io_asic_rev;
125 u64 io_sys_rev;
126 u64 io7_uph;
127 u64 hpi_ctl;
128 u64 crd_ctl;
129 u64 hei_ctl;
130 u64 po7_error_sum;
131 u64 po7_uncrr_sym;
132 u64 po7_crrct_sym;
133 u64 po7_ugbge_sym;
134 u64 po7_err_pkt0;
135 u64 po7_err_pkt1;
136 u64 reserved[2];
137 struct ev7_pal_io_one_port ports[4];
138};
139
140/*
141 * Environmental subpacket. Data used for el packets:
142 * class PAL (14), type AMBIENT_TEMPERATURE (10)
143 * class PAL (14), type AIRMOVER_FAN (11)
144 * class PAL (14), type VOLTAGE (12)
145 * class PAL (14), type INTRUSION (13)
146 * class PAL (14), type POWER_SUPPLY (14)
147 * class PAL (14), type LAN (15)
148 * class PAL (14), type HOT_PLUG (16)
149 */
150struct ev7_pal_environmental_subpacket {
151 u16 cabinet;
152 u16 drawer;
153 u16 reserved1[2];
154 u8 module_type;
155 u8 unit_id; /* unit reporting condition */
156 u8 reserved2;
157 u8 condition; /* condition reported */
158};
159
160/*
161 * Convert environmental type to index
162 */
163static inline int ev7_lf_env_index(int type)
164{
165 BUG_ON((type < EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE)
166 || (type > EL_TYPE__PAL__ENV__HOT_PLUG));
167
168 return type - EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE;
169}
170
171/*
172 * Data for generic el packet class PAL.
173 */
174struct ev7_pal_subpacket {
175 union {
176 struct ev7_pal_logout_subpacket logout; /* Type 1 */
177 struct ev7_pal_processor_subpacket ev7; /* Type 4 */
178 struct ev7_pal_zbox_subpacket zbox; /* Type 5 */
179 struct ev7_pal_rbox_subpacket rbox; /* Type 6 */
180 struct ev7_pal_io_subpacket io; /* Type 7 */
181 struct ev7_pal_environmental_subpacket env; /* Type 10-16 */
182 u64 as_quad[1]; /* Raw u64 */
183 } by_type;
184};
185
186/*
187 * Struct to contain collected logout from subpackets.
188 */
189struct ev7_lf_subpackets {
190 struct ev7_pal_logout_subpacket *logout; /* Type 1 */
191 struct ev7_pal_processor_subpacket *ev7; /* Type 4 */
192 struct ev7_pal_zbox_subpacket *zbox; /* Type 5 */
193 struct ev7_pal_rbox_subpacket *rbox; /* Type 6 */
194 struct ev7_pal_io_subpacket *io; /* Type 7 */
195 struct ev7_pal_environmental_subpacket *env[7]; /* Type 10-16 */
196
197 unsigned int io_pid;
198};
199
200#endif /* __ALPHA_ERR_EV7_H */
201
202
diff --git a/include/asm-alpha/errno.h b/include/asm-alpha/errno.h
new file mode 100644
index 000000000000..c85ab6b9d6c6
--- /dev/null
+++ b/include/asm-alpha/errno.h
@@ -0,0 +1,119 @@
1#ifndef _ALPHA_ERRNO_H
2#define _ALPHA_ERRNO_H
3
4#include <asm-generic/errno-base.h>
5
6#undef EAGAIN /* 11 in errno-base.h */
7
8#define EDEADLK 11 /* Resource deadlock would occur */
9
10#define EAGAIN 35 /* Try again */
11#define EWOULDBLOCK EAGAIN /* Operation would block */
12#define EINPROGRESS 36 /* Operation now in progress */
13#define EALREADY 37 /* Operation already in progress */
14#define ENOTSOCK 38 /* Socket operation on non-socket */
15#define EDESTADDRREQ 39 /* Destination address required */
16#define EMSGSIZE 40 /* Message too long */
17#define EPROTOTYPE 41 /* Protocol wrong type for socket */
18#define ENOPROTOOPT 42 /* Protocol not available */
19#define EPROTONOSUPPORT 43 /* Protocol not supported */
20#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
21#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */
22#define EPFNOSUPPORT 46 /* Protocol family not supported */
23#define EAFNOSUPPORT 47 /* Address family not supported by protocol */
24#define EADDRINUSE 48 /* Address already in use */
25#define EADDRNOTAVAIL 49 /* Cannot assign requested address */
26#define ENETDOWN 50 /* Network is down */
27#define ENETUNREACH 51 /* Network is unreachable */
28#define ENETRESET 52 /* Network dropped connection because of reset */
29#define ECONNABORTED 53 /* Software caused connection abort */
30#define ECONNRESET 54 /* Connection reset by peer */
31#define ENOBUFS 55 /* No buffer space available */
32#define EISCONN 56 /* Transport endpoint is already connected */
33#define ENOTCONN 57 /* Transport endpoint is not connected */
34#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */
35#define ETOOMANYREFS 59 /* Too many references: cannot splice */
36#define ETIMEDOUT 60 /* Connection timed out */
37#define ECONNREFUSED 61 /* Connection refused */
38#define ELOOP 62 /* Too many symbolic links encountered */
39#define ENAMETOOLONG 63 /* File name too long */
40#define EHOSTDOWN 64 /* Host is down */
41#define EHOSTUNREACH 65 /* No route to host */
42#define ENOTEMPTY 66 /* Directory not empty */
43
44#define EUSERS 68 /* Too many users */
45#define EDQUOT 69 /* Quota exceeded */
46#define ESTALE 70 /* Stale NFS file handle */
47#define EREMOTE 71 /* Object is remote */
48
49#define ENOLCK 77 /* No record locks available */
50#define ENOSYS 78 /* Function not implemented */
51
52#define ENOMSG 80 /* No message of desired type */
53#define EIDRM 81 /* Identifier removed */
54#define ENOSR 82 /* Out of streams resources */
55#define ETIME 83 /* Timer expired */
56#define EBADMSG 84 /* Not a data message */
57#define EPROTO 85 /* Protocol error */
58#define ENODATA 86 /* No data available */
59#define ENOSTR 87 /* Device not a stream */
60
61#define ENOPKG 92 /* Package not installed */
62
63#define EILSEQ 116 /* Illegal byte sequence */
64
65/* The following are just random noise.. */
66#define ECHRNG 88 /* Channel number out of range */
67#define EL2NSYNC 89 /* Level 2 not synchronized */
68#define EL3HLT 90 /* Level 3 halted */
69#define EL3RST 91 /* Level 3 reset */
70
71#define ELNRNG 93 /* Link number out of range */
72#define EUNATCH 94 /* Protocol driver not attached */
73#define ENOCSI 95 /* No CSI structure available */
74#define EL2HLT 96 /* Level 2 halted */
75#define EBADE 97 /* Invalid exchange */
76#define EBADR 98 /* Invalid request descriptor */
77#define EXFULL 99 /* Exchange full */
78#define ENOANO 100 /* No anode */
79#define EBADRQC 101 /* Invalid request code */
80#define EBADSLT 102 /* Invalid slot */
81
82#define EDEADLOCK EDEADLK
83
84#define EBFONT 104 /* Bad font file format */
85#define ENONET 105 /* Machine is not on the network */
86#define ENOLINK 106 /* Link has been severed */
87#define EADV 107 /* Advertise error */
88#define ESRMNT 108 /* Srmount error */
89#define ECOMM 109 /* Communication error on send */
90#define EMULTIHOP 110 /* Multihop attempted */
91#define EDOTDOT 111 /* RFS specific error */
92#define EOVERFLOW 112 /* Value too large for defined data type */
93#define ENOTUNIQ 113 /* Name not unique on network */
94#define EBADFD 114 /* File descriptor in bad state */
95#define EREMCHG 115 /* Remote address changed */
96
97#define EUCLEAN 117 /* Structure needs cleaning */
98#define ENOTNAM 118 /* Not a XENIX named type file */
99#define ENAVAIL 119 /* No XENIX semaphores available */
100#define EISNAM 120 /* Is a named type file */
101#define EREMOTEIO 121 /* Remote I/O error */
102
103#define ELIBACC 122 /* Can not access a needed shared library */
104#define ELIBBAD 123 /* Accessing a corrupted shared library */
105#define ELIBSCN 124 /* .lib section in a.out corrupted */
106#define ELIBMAX 125 /* Attempting to link in too many shared libraries */
107#define ELIBEXEC 126 /* Cannot exec a shared library directly */
108#define ERESTART 127 /* Interrupted system call should be restarted */
109#define ESTRPIPE 128 /* Streams pipe error */
110
111#define ENOMEDIUM 129 /* No medium found */
112#define EMEDIUMTYPE 130 /* Wrong medium type */
113#define ECANCELED 131 /* Operation Cancelled */
114#define ENOKEY 132 /* Required key not available */
115#define EKEYEXPIRED 133 /* Key has expired */
116#define EKEYREVOKED 134 /* Key has been revoked */
117#define EKEYREJECTED 135 /* Key was rejected by service */
118
119#endif
diff --git a/include/asm-alpha/fcntl.h b/include/asm-alpha/fcntl.h
new file mode 100644
index 000000000000..6b7d6c1649ce
--- /dev/null
+++ b/include/asm-alpha/fcntl.h
@@ -0,0 +1,75 @@
1#ifndef _ALPHA_FCNTL_H
2#define _ALPHA_FCNTL_H
3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 01000 /* not fcntl */
11#define O_TRUNC 02000 /* not fcntl */
12#define O_EXCL 04000 /* not fcntl */
13#define O_NOCTTY 010000 /* not fcntl */
14
15#define O_NONBLOCK 00004
16#define O_APPEND 00010
17#define O_NDELAY O_NONBLOCK
18#define O_SYNC 040000
19#define FASYNC 020000 /* fcntl, for BSD compatibility */
20#define O_DIRECTORY 0100000 /* must be a directory */
21#define O_NOFOLLOW 0200000 /* don't follow links */
22#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */
23#define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */
24#define O_NOATIME 04000000
25
26#define F_DUPFD 0 /* dup */
27#define F_GETFD 1 /* get close_on_exec */
28#define F_SETFD 2 /* set/clear close_on_exec */
29#define F_GETFL 3 /* get file->f_flags */
30#define F_SETFL 4 /* set file->f_flags */
31#define F_GETLK 7
32#define F_SETLK 8
33#define F_SETLKW 9
34
35#define F_SETOWN 5 /* for sockets. */
36#define F_GETOWN 6 /* for sockets. */
37#define F_SETSIG 10 /* for sockets. */
38#define F_GETSIG 11 /* for sockets. */
39
40/* for F_[GET|SET]FL */
41#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
42
43/* for posix fcntl() and lockf() */
44#define F_RDLCK 1
45#define F_WRLCK 2
46#define F_UNLCK 8
47
48/* for old implementation of bsd flock () */
49#define F_EXLCK 16 /* or 3 */
50#define F_SHLCK 32 /* or 4 */
51
52#define F_INPROGRESS 64
53
54/* operations for bsd flock(), also used by the kernel implementation */
55#define LOCK_SH 1 /* shared lock */
56#define LOCK_EX 2 /* exclusive lock */
57#define LOCK_NB 4 /* or'd with one of the above to prevent
58 blocking */
59#define LOCK_UN 8 /* remove lock */
60#define LOCK_MAND 32 /* This is a mandatory flock */
61#define LOCK_READ 64 /* ... Which allows concurrent read operations */
62#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
63#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
64
65struct flock {
66 short l_type;
67 short l_whence;
68 __kernel_off_t l_start;
69 __kernel_off_t l_len;
70 __kernel_pid_t l_pid;
71};
72
73#define F_LINUX_SPECIFIC_BASE 1024
74
75#endif
diff --git a/include/asm-alpha/floppy.h b/include/asm-alpha/floppy.h
new file mode 100644
index 000000000000..289a00d51a90
--- /dev/null
+++ b/include/asm-alpha/floppy.h
@@ -0,0 +1,119 @@
1/*
2 * Architecture specific parts of the Floppy driver
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1995
9 */
10#ifndef __ASM_ALPHA_FLOPPY_H
11#define __ASM_ALPHA_FLOPPY_H
12
13#include <linux/config.h>
14
15#define fd_inb(port) inb_p(port)
16#define fd_outb(value,port) outb_p(value,port)
17
18#define fd_enable_dma() enable_dma(FLOPPY_DMA)
19#define fd_disable_dma() disable_dma(FLOPPY_DMA)
20#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy")
21#define fd_free_dma() free_dma(FLOPPY_DMA)
22#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
23#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode)
24#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA,virt_to_bus(addr))
25#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
26#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
27#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
28#define fd_cacheflush(addr,size) /* nothing */
29#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt, \
30 SA_INTERRUPT|SA_SAMPLE_RANDOM, \
31 "floppy", NULL)
32#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
33
34#ifdef CONFIG_PCI
35
36#include <linux/pci.h>
37
38#define fd_dma_setup(addr,size,mode,io) alpha_fd_dma_setup(addr,size,mode,io)
39
40static __inline__ int
41alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
42{
43 static unsigned long prev_size;
44 static dma_addr_t bus_addr = 0;
45 static char *prev_addr;
46 static int prev_dir;
47 int dir;
48
49 dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
50
51 if (bus_addr
52 && (addr != prev_addr || size != prev_size || dir != prev_dir)) {
53 /* different from last time -- unmap prev */
54 pci_unmap_single(isa_bridge, bus_addr, prev_size, prev_dir);
55 bus_addr = 0;
56 }
57
58 if (!bus_addr) /* need to map it */
59 bus_addr = pci_map_single(isa_bridge, addr, size, dir);
60
61 /* remember this one as prev */
62 prev_addr = addr;
63 prev_size = size;
64 prev_dir = dir;
65
66 fd_clear_dma_ff();
67 fd_cacheflush(addr, size);
68 fd_set_dma_mode(mode);
69 set_dma_addr(FLOPPY_DMA, bus_addr);
70 fd_set_dma_count(size);
71 virtual_dma_port = io;
72 fd_enable_dma();
73
74 return 0;
75}
76
77#endif /* CONFIG_PCI */
78
79__inline__ void virtual_dma_init(void)
80{
81 /* Nothing to do on an Alpha */
82}
83
84static int FDC1 = 0x3f0;
85static int FDC2 = -1;
86
87/*
88 * Again, the CMOS information doesn't work on the alpha..
89 */
90#define FLOPPY0_TYPE 6
91#define FLOPPY1_TYPE 0
92
93#define N_FDC 2
94#define N_DRIVE 8
95
96#define FLOPPY_MOTOR_MASK 0xf0
97
98/*
99 * Most Alphas have no problems with floppy DMA crossing 64k borders,
100 * except for certain ones, like XL and RUFFIAN.
101 *
102 * However, the test is simple and fast, and this *is* floppy, after all,
103 * so we do it for all platforms, just to make sure.
104 *
105 * This is advantageous in other circumstances as well, as in moving
106 * about the PCI DMA windows and forcing the floppy to start doing
107 * scatter-gather when it never had before, and there *is* a problem
108 * on that platform... ;-}
109 */
110
111static inline unsigned long CROSS_64KB(void *a, unsigned long s)
112{
113 unsigned long p = (unsigned long)a;
114 return ((p + s - 1) ^ p) & ~0xffffUL;
115}
116
117#define EXTRA_FLOPPY_PARAMS
118
119#endif /* __ASM_ALPHA_FLOPPY_H */
diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h
new file mode 100644
index 000000000000..c203fc2fa5cd
--- /dev/null
+++ b/include/asm-alpha/fpu.h
@@ -0,0 +1,193 @@
1#ifndef __ASM_ALPHA_FPU_H
2#define __ASM_ALPHA_FPU_H
3
4/*
5 * Alpha floating-point control register defines:
6 */
7#define FPCR_DNOD (1UL<<47) /* denorm INV trap disable */
8#define FPCR_DNZ (1UL<<48) /* denorms to zero */
9#define FPCR_INVD (1UL<<49) /* invalid op disable (opt.) */
10#define FPCR_DZED (1UL<<50) /* division by zero disable (opt.) */
11#define FPCR_OVFD (1UL<<51) /* overflow disable (optional) */
12#define FPCR_INV (1UL<<52) /* invalid operation */
13#define FPCR_DZE (1UL<<53) /* division by zero */
14#define FPCR_OVF (1UL<<54) /* overflow */
15#define FPCR_UNF (1UL<<55) /* underflow */
16#define FPCR_INE (1UL<<56) /* inexact */
17#define FPCR_IOV (1UL<<57) /* integer overflow */
18#define FPCR_UNDZ (1UL<<60) /* underflow to zero (opt.) */
19#define FPCR_UNFD (1UL<<61) /* underflow disable (opt.) */
20#define FPCR_INED (1UL<<62) /* inexact disable (opt.) */
21#define FPCR_SUM (1UL<<63) /* summary bit */
22
23#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */
24#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */
25#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */
26#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */
27#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */
28#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT)
29
30#define FPCR_MASK 0xffff800000000000L
31
32/*
33 * IEEE trap enables are implemented in software. These per-thread
34 * bits are stored in the "ieee_state" field of "struct thread_info".
35 * Thus, the bits are defined so as not to conflict with the
36 * floating-point enable bit (which is architected). On top of that,
37 * we want to make these bits compatible with OSF/1 so
38 * ieee_set_fp_control() etc. can be implemented easily and
39 * compatibly. The corresponding definitions are in
40 * /usr/include/machine/fpu.h under OSF/1.
41 */
42#define IEEE_TRAP_ENABLE_INV (1UL<<1) /* invalid op */
43#define IEEE_TRAP_ENABLE_DZE (1UL<<2) /* division by zero */
44#define IEEE_TRAP_ENABLE_OVF (1UL<<3) /* overflow */
45#define IEEE_TRAP_ENABLE_UNF (1UL<<4) /* underflow */
46#define IEEE_TRAP_ENABLE_INE (1UL<<5) /* inexact */
47#define IEEE_TRAP_ENABLE_DNO (1UL<<6) /* denorm */
48#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\
49 IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\
50 IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO)
51
52/* Denorm and Underflow flushing */
53#define IEEE_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */
54#define IEEE_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */
55
56#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ)
57
58/* status bits coming from fpcr: */
59#define IEEE_STATUS_INV (1UL<<17)
60#define IEEE_STATUS_DZE (1UL<<18)
61#define IEEE_STATUS_OVF (1UL<<19)
62#define IEEE_STATUS_UNF (1UL<<20)
63#define IEEE_STATUS_INE (1UL<<21)
64#define IEEE_STATUS_DNO (1UL<<22)
65
66#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \
67 IEEE_STATUS_OVF | IEEE_STATUS_UNF | \
68 IEEE_STATUS_INE | IEEE_STATUS_DNO)
69
70#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \
71 IEEE_STATUS_MASK | IEEE_MAP_MASK)
72
73#define IEEE_CURRENT_RM_SHIFT 32
74#define IEEE_CURRENT_RM_MASK (3UL<<IEEE_CURRENT_RM_SHIFT)
75
76#define IEEE_STATUS_TO_EXCSUM_SHIFT 16
77
78#define IEEE_INHERIT (1UL<<63) /* inherit on thread create? */
79
80/*
81 * Convert the software IEEE trap enable and status bits into the
82 * hardware fpcr format.
83 *
84 * Digital Unix engineers receive my thanks for not defining the
85 * software bits identical to the hardware bits. The chip designers
86 * receive my thanks for making all the not-implemented fpcr bits
87 * RAZ forcing us to use system calls to read/write this value.
88 */
89
90static inline unsigned long
91ieee_swcr_to_fpcr(unsigned long sw)
92{
93 unsigned long fp;
94 fp = (sw & IEEE_STATUS_MASK) << 35;
95 fp |= (sw & IEEE_MAP_DMZ) << 36;
96 fp |= (sw & IEEE_STATUS_MASK ? FPCR_SUM : 0);
97 fp |= (~sw & (IEEE_TRAP_ENABLE_INV
98 | IEEE_TRAP_ENABLE_DZE
99 | IEEE_TRAP_ENABLE_OVF)) << 48;
100 fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57;
101 fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
102 fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41;
103 return fp;
104}
105
106static inline unsigned long
107ieee_fpcr_to_swcr(unsigned long fp)
108{
109 unsigned long sw;
110 sw = (fp >> 35) & IEEE_STATUS_MASK;
111 sw |= (fp >> 36) & IEEE_MAP_DMZ;
112 sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV
113 | IEEE_TRAP_ENABLE_DZE
114 | IEEE_TRAP_ENABLE_OVF);
115 sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE);
116 sw |= (fp >> 47) & IEEE_MAP_UMZ;
117 sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO;
118 return sw;
119}
120
121#ifdef __KERNEL__
122
123/* The following two functions don't need trapb/excb instructions
124 around the mf_fpcr/mt_fpcr instructions because (a) the kernel
125 never generates arithmetic faults and (b) call_pal instructions
126 are implied trap barriers. */
127
128static inline unsigned long
129rdfpcr(void)
130{
131 unsigned long tmp, ret;
132
133#if defined(__alpha_cix__) || defined(__alpha_fix__)
134 __asm__ __volatile__ (
135 "ftoit $f0,%0\n\t"
136 "mf_fpcr $f0\n\t"
137 "ftoit $f0,%1\n\t"
138 "itoft %0,$f0"
139 : "=r"(tmp), "=r"(ret));
140#else
141 __asm__ __volatile__ (
142 "stt $f0,%0\n\t"
143 "mf_fpcr $f0\n\t"
144 "stt $f0,%1\n\t"
145 "ldt $f0,%0"
146 : "=m"(tmp), "=m"(ret));
147#endif
148
149 return ret;
150}
151
152static inline void
153wrfpcr(unsigned long val)
154{
155 unsigned long tmp;
156
157#if defined(__alpha_cix__) || defined(__alpha_fix__)
158 __asm__ __volatile__ (
159 "ftoit $f0,%0\n\t"
160 "itoft %1,$f0\n\t"
161 "mt_fpcr $f0\n\t"
162 "itoft %0,$f0"
163 : "=&r"(tmp) : "r"(val));
164#else
165 __asm__ __volatile__ (
166 "stt $f0,%0\n\t"
167 "ldt $f0,%1\n\t"
168 "mt_fpcr $f0\n\t"
169 "ldt $f0,%0"
170 : "=m"(tmp) : "m"(val));
171#endif
172}
173
174static inline unsigned long
175swcr_update_status(unsigned long swcr, unsigned long fpcr)
176{
177 /* EV6 implements most of the bits in hardware. Collect
178 the acrued exception bits from the real fpcr. */
179 if (implver() == IMPLVER_EV6) {
180 swcr &= ~IEEE_STATUS_MASK;
181 swcr |= (fpcr >> 35) & IEEE_STATUS_MASK;
182 }
183 return swcr;
184}
185
186extern unsigned long alpha_read_fp_reg (unsigned long reg);
187extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
188extern unsigned long alpha_read_fp_reg_s (unsigned long reg);
189extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val);
190
191#endif /* __KERNEL__ */
192
193#endif /* __ASM_ALPHA_FPU_H */
diff --git a/include/asm-alpha/gct.h b/include/asm-alpha/gct.h
new file mode 100644
index 000000000000..3504c704927c
--- /dev/null
+++ b/include/asm-alpha/gct.h
@@ -0,0 +1,58 @@
1#ifndef __ALPHA_GCT_H
2#define __ALPHA_GCT_H
3
4typedef u64 gct_id;
5typedef u64 gct6_handle;
6
7typedef struct __gct6_node {
8 u8 type;
9 u8 subtype;
10 u16 size;
11 u32 hd_extension;
12 gct6_handle owner;
13 gct6_handle active_user;
14 gct_id id;
15 u64 flags;
16 u16 rev;
17 u16 change_counter;
18 u16 max_child;
19 u16 reserved1;
20 gct6_handle saved_owner;
21 gct6_handle affinity;
22 gct6_handle parent;
23 gct6_handle next;
24 gct6_handle prev;
25 gct6_handle child;
26 u64 fw_flags;
27 u64 os_usage;
28 u64 fru_id;
29 u32 checksum;
30 u32 magic; /* 'GLXY' */
31} gct6_node;
32
33typedef struct {
34 u8 type;
35 u8 subtype;
36 void (*callout)(gct6_node *);
37} gct6_search_struct;
38
39#define GCT_NODE_MAGIC 0x59584c47 /* 'GLXY' */
40
41/*
42 * node types
43 */
44#define GCT_TYPE_HOSE 0x0E
45
46/*
47 * node subtypes
48 */
49#define GCT_SUBTYPE_IO_PORT_MODULE 0x2C
50
51#define GCT_NODE_PTR(off) ((gct6_node *)((char *)hwrpb + \
52 hwrpb->frut_offset + \
53 (gct6_handle)(off))) \
54
55int gct6_find_nodes(gct6_node *, gct6_search_struct *);
56
57#endif /* __ALPHA_GCT_H */
58
diff --git a/include/asm-alpha/gentrap.h b/include/asm-alpha/gentrap.h
new file mode 100644
index 000000000000..ae50cc3192c7
--- /dev/null
+++ b/include/asm-alpha/gentrap.h
@@ -0,0 +1,37 @@
1#ifndef _ASMAXP_GENTRAP_H
2#define _ASMAXP_GENTRAP_H
3
4/*
5 * Definitions for gentrap causes. They are generated by user-level
6 * programs and therefore should be compatible with the corresponding
7 * OSF/1 definitions.
8 */
9#define GEN_INTOVF -1 /* integer overflow */
10#define GEN_INTDIV -2 /* integer division by zero */
11#define GEN_FLTOVF -3 /* fp overflow */
12#define GEN_FLTDIV -4 /* fp division by zero */
13#define GEN_FLTUND -5 /* fp underflow */
14#define GEN_FLTINV -6 /* invalid fp operand */
15#define GEN_FLTINE -7 /* inexact fp operand */
16#define GEN_DECOVF -8 /* decimal overflow (for COBOL??) */
17#define GEN_DECDIV -9 /* decimal division by zero */
18#define GEN_DECINV -10 /* invalid decimal operand */
19#define GEN_ROPRAND -11 /* reserved operand */
20#define GEN_ASSERTERR -12 /* assertion error */
21#define GEN_NULPTRERR -13 /* null pointer error */
22#define GEN_STKOVF -14 /* stack overflow */
23#define GEN_STRLENERR -15 /* string length error */
24#define GEN_SUBSTRERR -16 /* substring error */
25#define GEN_RANGERR -17 /* range error */
26#define GEN_SUBRNG -18
27#define GEN_SUBRNG1 -19
28#define GEN_SUBRNG2 -20
29#define GEN_SUBRNG3 -21 /* these report range errors for */
30#define GEN_SUBRNG4 -22 /* subscripting (indexing) at levels 0..7 */
31#define GEN_SUBRNG5 -23
32#define GEN_SUBRNG6 -24
33#define GEN_SUBRNG7 -25
34
35/* the remaining codes (-26..-1023) are reserved. */
36
37#endif /* _ASMAXP_GENTRAP_H */
diff --git a/include/asm-alpha/hardirq.h b/include/asm-alpha/hardirq.h
new file mode 100644
index 000000000000..c0593f9b21e1
--- /dev/null
+++ b/include/asm-alpha/hardirq.h
@@ -0,0 +1,29 @@
1#ifndef _ALPHA_HARDIRQ_H
2#define _ALPHA_HARDIRQ_H
3
4#include <linux/config.h>
5#include <linux/threads.h>
6#include <linux/cache.h>
7
8
9/* entry.S is sensitive to the offsets of these fields */
10typedef struct {
11 unsigned long __softirq_pending;
12} ____cacheline_aligned irq_cpustat_t;
13
14#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
15
16#define HARDIRQ_BITS 12
17
18/*
19 * The hardirq mask has to be large enough to have
20 * space for potentially nestable IRQ sources in the system
21 * to nest on a single CPU. On Alpha, interrupts are masked at the CPU
22 * by IPL as well as at the system level. We only have 8 IPLs (UNIX PALcode)
23 * so we really only have 8 nestable IRQs, but allow some overhead
24 */
25#if (1 << HARDIRQ_BITS) < 16
26#error HARDIRQ_BITS is too low!
27#endif
28
29#endif /* _ALPHA_HARDIRQ_H */
diff --git a/include/asm-alpha/hdreg.h b/include/asm-alpha/hdreg.h
new file mode 100644
index 000000000000..7f7fd1af0af3
--- /dev/null
+++ b/include/asm-alpha/hdreg.h
@@ -0,0 +1 @@
#include <asm-generic/hdreg.h>
diff --git a/include/asm-alpha/hw_irq.h b/include/asm-alpha/hw_irq.h
new file mode 100644
index 000000000000..a310b9efc906
--- /dev/null
+++ b/include/asm-alpha/hw_irq.h
@@ -0,0 +1,16 @@
1#ifndef _ALPHA_HW_IRQ_H
2#define _ALPHA_HW_IRQ_H
3
4#include <linux/config.h>
5
6static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) {}
7
8extern volatile unsigned long irq_err_count;
9
10#ifdef CONFIG_ALPHA_GENERIC
11#define ACTUAL_NR_IRQS alpha_mv.nr_irqs
12#else
13#define ACTUAL_NR_IRQS NR_IRQS
14#endif
15
16#endif
diff --git a/include/asm-alpha/hwrpb.h b/include/asm-alpha/hwrpb.h
new file mode 100644
index 000000000000..8e8f871af7cf
--- /dev/null
+++ b/include/asm-alpha/hwrpb.h
@@ -0,0 +1,220 @@
1#ifndef __ALPHA_HWRPB_H
2#define __ALPHA_HWRPB_H
3
4#define INIT_HWRPB ((struct hwrpb_struct *) 0x10000000)
5
6/*
7 * DEC processor types for Alpha systems. Found in HWRPB.
8 * These values are architected.
9 */
10
11#define EV3_CPU 1 /* EV3 */
12#define EV4_CPU 2 /* EV4 (21064) */
13#define LCA4_CPU 4 /* LCA4 (21066/21068) */
14#define EV5_CPU 5 /* EV5 (21164) */
15#define EV45_CPU 6 /* EV4.5 (21064/xxx) */
16#define EV56_CPU 7 /* EV5.6 (21164) */
17#define EV6_CPU 8 /* EV6 (21264) */
18#define PCA56_CPU 9 /* PCA56 (21164PC) */
19#define PCA57_CPU 10 /* PCA57 (notyet) */
20#define EV67_CPU 11 /* EV67 (21264A) */
21#define EV68CB_CPU 12 /* EV68CB (21264C) */
22#define EV68AL_CPU 13 /* EV68AL (21264B) */
23#define EV68CX_CPU 14 /* EV68CX (21264D) */
24#define EV7_CPU 15 /* EV7 (21364) */
25#define EV79_CPU 16 /* EV79 (21364??) */
26#define EV69_CPU 17 /* EV69 (21264/EV69A) */
27
28/*
29 * DEC system types for Alpha systems. Found in HWRPB.
30 * These values are architected.
31 */
32
33#define ST_ADU 1 /* Alpha ADU systype */
34#define ST_DEC_4000 2 /* Cobra systype */
35#define ST_DEC_7000 3 /* Ruby systype */
36#define ST_DEC_3000_500 4 /* Flamingo systype */
37#define ST_DEC_2000_300 6 /* Jensen systype */
38#define ST_DEC_3000_300 7 /* Pelican systype */
39#define ST_DEC_2100_A500 9 /* Sable systype */
40#define ST_DEC_AXPVME_64 10 /* AXPvme system type */
41#define ST_DEC_AXPPCI_33 11 /* NoName system type */
42#define ST_DEC_TLASER 12 /* Turbolaser systype */
43#define ST_DEC_2100_A50 13 /* Avanti systype */
44#define ST_DEC_MUSTANG 14 /* Mustang systype */
45#define ST_DEC_ALCOR 15 /* Alcor (EV5) systype */
46#define ST_DEC_1000 17 /* Mikasa systype */
47#define ST_DEC_EB64 18 /* EB64 systype */
48#define ST_DEC_EB66 19 /* EB66 systype */
49#define ST_DEC_EB64P 20 /* EB64+ systype */
50#define ST_DEC_BURNS 21 /* laptop systype */
51#define ST_DEC_RAWHIDE 22 /* Rawhide systype */
52#define ST_DEC_K2 23 /* K2 systype */
53#define ST_DEC_LYNX 24 /* Lynx systype */
54#define ST_DEC_XL 25 /* Alpha XL systype */
55#define ST_DEC_EB164 26 /* EB164 systype */
56#define ST_DEC_NORITAKE 27 /* Noritake systype */
57#define ST_DEC_CORTEX 28 /* Cortex systype */
58#define ST_DEC_MIATA 30 /* Miata systype */
59#define ST_DEC_XXM 31 /* XXM systype */
60#define ST_DEC_TAKARA 32 /* Takara systype */
61#define ST_DEC_YUKON 33 /* Yukon systype */
62#define ST_DEC_TSUNAMI 34 /* Tsunami systype */
63#define ST_DEC_WILDFIRE 35 /* Wildfire systype */
64#define ST_DEC_CUSCO 36 /* CUSCO systype */
65#define ST_DEC_EIGER 37 /* Eiger systype */
66#define ST_DEC_TITAN 38 /* Titan systype */
67#define ST_DEC_MARVEL 39 /* Marvel systype */
68
69/* UNOFFICIAL!!! */
70#define ST_UNOFFICIAL_BIAS 100
71#define ST_DTI_RUFFIAN 101 /* RUFFIAN systype */
72
73/* Alpha Processor, Inc. systems */
74#define ST_API_BIAS 200
75#define ST_API_NAUTILUS 201 /* UP1000 systype */
76
77struct pcb_struct {
78 unsigned long ksp;
79 unsigned long usp;
80 unsigned long ptbr;
81 unsigned int pcc;
82 unsigned int asn;
83 unsigned long unique;
84 unsigned long flags;
85 unsigned long res1, res2;
86};
87
88struct percpu_struct {
89 unsigned long hwpcb[16];
90 unsigned long flags;
91 unsigned long pal_mem_size;
92 unsigned long pal_scratch_size;
93 unsigned long pal_mem_pa;
94 unsigned long pal_scratch_pa;
95 unsigned long pal_revision;
96 unsigned long type;
97 unsigned long variation;
98 unsigned long revision;
99 unsigned long serial_no[2];
100 unsigned long logout_area_pa;
101 unsigned long logout_area_len;
102 unsigned long halt_PCBB;
103 unsigned long halt_PC;
104 unsigned long halt_PS;
105 unsigned long halt_arg;
106 unsigned long halt_ra;
107 unsigned long halt_pv;
108 unsigned long halt_reason;
109 unsigned long res;
110 unsigned long ipc_buffer[21];
111 unsigned long palcode_avail[16];
112 unsigned long compatibility;
113 unsigned long console_data_log_pa;
114 unsigned long console_data_log_length;
115 unsigned long bcache_info;
116};
117
118struct procdesc_struct {
119 unsigned long weird_vms_stuff;
120 unsigned long address;
121};
122
123struct vf_map_struct {
124 unsigned long va;
125 unsigned long pa;
126 unsigned long count;
127};
128
129struct crb_struct {
130 struct procdesc_struct * dispatch_va;
131 struct procdesc_struct * dispatch_pa;
132 struct procdesc_struct * fixup_va;
133 struct procdesc_struct * fixup_pa;
134 /* virtual->physical map */
135 unsigned long map_entries;
136 unsigned long map_pages;
137 struct vf_map_struct map[1];
138};
139
140struct memclust_struct {
141 unsigned long start_pfn;
142 unsigned long numpages;
143 unsigned long numtested;
144 unsigned long bitmap_va;
145 unsigned long bitmap_pa;
146 unsigned long bitmap_chksum;
147 unsigned long usage;
148};
149
150struct memdesc_struct {
151 unsigned long chksum;
152 unsigned long optional_pa;
153 unsigned long numclusters;
154 struct memclust_struct cluster[0];
155};
156
157struct dsr_struct {
158 long smm; /* SMM nubber used by LMF */
159 unsigned long lurt_off; /* offset to LURT table */
160 unsigned long sysname_off; /* offset to sysname char count */
161};
162
163struct hwrpb_struct {
164 unsigned long phys_addr; /* check: physical address of the hwrpb */
165 unsigned long id; /* check: "HWRPB\0\0\0" */
166 unsigned long revision;
167 unsigned long size; /* size of hwrpb */
168 unsigned long cpuid;
169 unsigned long pagesize; /* 8192, I hope */
170 unsigned long pa_bits; /* number of physical address bits */
171 unsigned long max_asn;
172 unsigned char ssn[16]; /* system serial number: big bother is watching */
173 unsigned long sys_type;
174 unsigned long sys_variation;
175 unsigned long sys_revision;
176 unsigned long intr_freq; /* interval clock frequency * 4096 */
177 unsigned long cycle_freq; /* cycle counter frequency */
178 unsigned long vptb; /* Virtual Page Table Base address */
179 unsigned long res1;
180 unsigned long tbhb_offset; /* Translation Buffer Hint Block */
181 unsigned long nr_processors;
182 unsigned long processor_size;
183 unsigned long processor_offset;
184 unsigned long ctb_nr;
185 unsigned long ctb_size; /* console terminal block size */
186 unsigned long ctbt_offset; /* console terminal block table offset */
187 unsigned long crb_offset; /* console callback routine block */
188 unsigned long mddt_offset; /* memory data descriptor table */
189 unsigned long cdb_offset; /* configuration data block (or NULL) */
190 unsigned long frut_offset; /* FRU table (or NULL) */
191 void (*save_terminal)(unsigned long);
192 unsigned long save_terminal_data;
193 void (*restore_terminal)(unsigned long);
194 unsigned long restore_terminal_data;
195 void (*CPU_restart)(unsigned long);
196 unsigned long CPU_restart_data;
197 unsigned long res2;
198 unsigned long res3;
199 unsigned long chksum;
200 unsigned long rxrdy;
201 unsigned long txrdy;
202 unsigned long dsr_offset; /* "Dynamic System Recognition Data Block Table" */
203};
204
205#ifdef __KERNEL__
206
207extern struct hwrpb_struct *hwrpb;
208
209static inline void
210hwrpb_update_checksum(struct hwrpb_struct *h)
211{
212 unsigned long sum = 0, *l;
213 for (l = (unsigned long *) h; l < (unsigned long *) &h->chksum; ++l)
214 sum += *l;
215 h->chksum = sum;
216}
217
218#endif /* __KERNEL__ */
219
220#endif /* __ALPHA_HWRPB_H */
diff --git a/include/asm-alpha/ide.h b/include/asm-alpha/ide.h
new file mode 100644
index 000000000000..68934a25931f
--- /dev/null
+++ b/include/asm-alpha/ide.h
@@ -0,0 +1,61 @@
1/*
2 * linux/include/asm-alpha/ide.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 */
6
7/*
8 * This file contains the alpha architecture specific IDE code.
9 */
10
11#ifndef __ASMalpha_IDE_H
12#define __ASMalpha_IDE_H
13
14#ifdef __KERNEL__
15
16#include <linux/config.h>
17
18#ifndef MAX_HWIFS
19#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS
20#endif
21
22#define IDE_ARCH_OBSOLETE_DEFAULTS
23
24static inline int ide_default_irq(unsigned long base)
25{
26 switch (base) {
27 case 0x1f0: return 14;
28 case 0x170: return 15;
29 case 0x1e8: return 11;
30 case 0x168: return 10;
31 default:
32 return 0;
33 }
34}
35
36static inline unsigned long ide_default_io_base(int index)
37{
38 switch (index) {
39 case 0: return 0x1f0;
40 case 1: return 0x170;
41 case 2: return 0x1e8;
42 case 3: return 0x168;
43 default:
44 return 0;
45 }
46}
47
48#define IDE_ARCH_OBSOLETE_INIT
49#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */
50
51#ifdef CONFIG_PCI
52#define ide_init_default_irq(base) (0)
53#else
54#define ide_init_default_irq(base) ide_default_irq(base)
55#endif
56
57#include <asm-generic/ide_iops.h>
58
59#endif /* __KERNEL__ */
60
61#endif /* __ASMalpha_IDE_H */
diff --git a/include/asm-alpha/io.h b/include/asm-alpha/io.h
new file mode 100644
index 000000000000..871dd7ad909d
--- /dev/null
+++ b/include/asm-alpha/io.h
@@ -0,0 +1,682 @@
1#ifndef __ALPHA_IO_H
2#define __ALPHA_IO_H
3
4#ifdef __KERNEL__
5
6#include <linux/config.h>
7#include <linux/kernel.h>
8#include <asm/compiler.h>
9#include <asm/system.h>
10#include <asm/pgtable.h>
11#include <asm/machvec.h>
12#include <asm/hwrpb.h>
13
14/* The generic header contains only prototypes. Including it ensures that
15 the implementation we have here matches that interface. */
16#include <asm-generic/iomap.h>
17
18/* We don't use IO slowdowns on the Alpha, but.. */
19#define __SLOW_DOWN_IO do { } while (0)
20#define SLOW_DOWN_IO do { } while (0)
21
22/*
23 * Virtual -> physical identity mapping starts at this offset
24 */
25#ifdef USE_48_BIT_KSEG
26#define IDENT_ADDR 0xffff800000000000UL
27#else
28#define IDENT_ADDR 0xfffffc0000000000UL
29#endif
30
31/*
32 * We try to avoid hae updates (thus the cache), but when we
33 * do need to update the hae, we need to do it atomically, so
34 * that any interrupts wouldn't get confused with the hae
35 * register not being up-to-date with respect to the hardware
36 * value.
37 */
38static inline void __set_hae(unsigned long new_hae)
39{
40 unsigned long flags;
41 local_irq_save(flags);
42
43 alpha_mv.hae_cache = new_hae;
44 *alpha_mv.hae_register = new_hae;
45 mb();
46 /* Re-read to make sure it was written. */
47 new_hae = *alpha_mv.hae_register;
48
49 local_irq_restore(flags);
50}
51
52static inline void set_hae(unsigned long new_hae)
53{
54 if (new_hae != alpha_mv.hae_cache)
55 __set_hae(new_hae);
56}
57
58/*
59 * Change virtual addresses to physical addresses and vv.
60 */
61#ifdef USE_48_BIT_KSEG
62static inline unsigned long virt_to_phys(void *address)
63{
64 return (unsigned long)address - IDENT_ADDR;
65}
66
67static inline void * phys_to_virt(unsigned long address)
68{
69 return (void *) (address + IDENT_ADDR);
70}
71#else
72static inline unsigned long virt_to_phys(void *address)
73{
74 unsigned long phys = (unsigned long)address;
75
76 /* Sign-extend from bit 41. */
77 phys <<= (64 - 41);
78 phys = (long)phys >> (64 - 41);
79
80 /* Crop to the physical address width of the processor. */
81 phys &= (1ul << hwrpb->pa_bits) - 1;
82
83 return phys;
84}
85
86static inline void * phys_to_virt(unsigned long address)
87{
88 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
89}
90#endif
91
92#define page_to_phys(page) page_to_pa(page)
93
94/* This depends on working iommu. */
95#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
96
97/* Maximum PIO space address supported? */
98#define IO_SPACE_LIMIT 0xffff
99
100/*
101 * Change addresses as seen by the kernel (virtual) to addresses as
102 * seen by a device (bus), and vice versa.
103 *
104 * Note that this only works for a limited range of kernel addresses,
105 * and very well may not span all memory. Consider this interface
106 * deprecated in favour of the mapping functions in <asm/pci.h>.
107 */
108extern unsigned long __direct_map_base;
109extern unsigned long __direct_map_size;
110
111static inline unsigned long virt_to_bus(void *address)
112{
113 unsigned long phys = virt_to_phys(address);
114 unsigned long bus = phys + __direct_map_base;
115 return phys <= __direct_map_size ? bus : 0;
116}
117
118static inline void *bus_to_virt(unsigned long address)
119{
120 void *virt;
121
122 /* This check is a sanity check but also ensures that bus address 0
123 maps to virtual address 0 which is useful to detect null pointers
124 (the NCR driver is much simpler if NULL pointers are preserved). */
125 address -= __direct_map_base;
126 virt = phys_to_virt(address);
127 return (long)address <= 0 ? NULL : virt;
128}
129
130/*
131 * There are different chipsets to interface the Alpha CPUs to the world.
132 */
133
134#define IO_CONCAT(a,b) _IO_CONCAT(a,b)
135#define _IO_CONCAT(a,b) a ## _ ## b
136
137#ifdef CONFIG_ALPHA_GENERIC
138
139/* In a generic kernel, we always go through the machine vector. */
140
141#define REMAP1(TYPE, NAME, QUAL) \
142static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
143{ \
144 return alpha_mv.mv_##NAME(addr); \
145}
146
147#define REMAP2(TYPE, NAME, QUAL) \
148static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
149{ \
150 alpha_mv.mv_##NAME(b, addr); \
151}
152
153REMAP1(unsigned int, ioread8, /**/)
154REMAP1(unsigned int, ioread16, /**/)
155REMAP1(unsigned int, ioread32, /**/)
156REMAP1(u8, readb, const volatile)
157REMAP1(u16, readw, const volatile)
158REMAP1(u32, readl, const volatile)
159REMAP1(u64, readq, const volatile)
160
161REMAP2(u8, iowrite8, /**/)
162REMAP2(u16, iowrite16, /**/)
163REMAP2(u32, iowrite32, /**/)
164REMAP2(u8, writeb, volatile)
165REMAP2(u16, writew, volatile)
166REMAP2(u32, writel, volatile)
167REMAP2(u64, writeq, volatile)
168
169#undef REMAP1
170#undef REMAP2
171
172static inline void __iomem *generic_ioportmap(unsigned long a)
173{
174 return alpha_mv.mv_ioportmap(a);
175}
176
177static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
178{
179 return alpha_mv.mv_ioremap(a, s);
180}
181
182static inline void generic_iounmap(volatile void __iomem *a)
183{
184 return alpha_mv.mv_iounmap(a);
185}
186
187static inline int generic_is_ioaddr(unsigned long a)
188{
189 return alpha_mv.mv_is_ioaddr(a);
190}
191
192static inline int generic_is_mmio(const volatile void __iomem *a)
193{
194 return alpha_mv.mv_is_mmio(a);
195}
196
197#define __IO_PREFIX generic
198#define generic_trivial_rw_bw 0
199#define generic_trivial_rw_lq 0
200#define generic_trivial_io_bw 0
201#define generic_trivial_io_lq 0
202#define generic_trivial_iounmap 0
203
204#else
205
206#if defined(CONFIG_ALPHA_APECS)
207# include <asm/core_apecs.h>
208#elif defined(CONFIG_ALPHA_CIA)
209# include <asm/core_cia.h>
210#elif defined(CONFIG_ALPHA_IRONGATE)
211# include <asm/core_irongate.h>
212#elif defined(CONFIG_ALPHA_JENSEN)
213# include <asm/jensen.h>
214#elif defined(CONFIG_ALPHA_LCA)
215# include <asm/core_lca.h>
216#elif defined(CONFIG_ALPHA_MARVEL)
217# include <asm/core_marvel.h>
218#elif defined(CONFIG_ALPHA_MCPCIA)
219# include <asm/core_mcpcia.h>
220#elif defined(CONFIG_ALPHA_POLARIS)
221# include <asm/core_polaris.h>
222#elif defined(CONFIG_ALPHA_T2)
223# include <asm/core_t2.h>
224#elif defined(CONFIG_ALPHA_TSUNAMI)
225# include <asm/core_tsunami.h>
226#elif defined(CONFIG_ALPHA_TITAN)
227# include <asm/core_titan.h>
228#elif defined(CONFIG_ALPHA_WILDFIRE)
229# include <asm/core_wildfire.h>
230#else
231#error "What system is this?"
232#endif
233
234#endif /* GENERIC */
235
236/*
237 * We always have external versions of these routines.
238 */
239extern u8 inb(unsigned long port);
240extern u16 inw(unsigned long port);
241extern u32 inl(unsigned long port);
242extern void outb(u8 b, unsigned long port);
243extern void outw(u16 b, unsigned long port);
244extern void outl(u32 b, unsigned long port);
245
246extern u8 readb(const volatile void __iomem *addr);
247extern u16 readw(const volatile void __iomem *addr);
248extern u32 readl(const volatile void __iomem *addr);
249extern u64 readq(const volatile void __iomem *addr);
250extern void writeb(u8 b, volatile void __iomem *addr);
251extern void writew(u16 b, volatile void __iomem *addr);
252extern void writel(u32 b, volatile void __iomem *addr);
253extern void writeq(u64 b, volatile void __iomem *addr);
254
255extern u8 __raw_readb(const volatile void __iomem *addr);
256extern u16 __raw_readw(const volatile void __iomem *addr);
257extern u32 __raw_readl(const volatile void __iomem *addr);
258extern u64 __raw_readq(const volatile void __iomem *addr);
259extern void __raw_writeb(u8 b, volatile void __iomem *addr);
260extern void __raw_writew(u16 b, volatile void __iomem *addr);
261extern void __raw_writel(u32 b, volatile void __iomem *addr);
262extern void __raw_writeq(u64 b, volatile void __iomem *addr);
263
264/*
265 * Mapping from port numbers to __iomem space is pretty easy.
266 */
267
268/* These two have to be extern inline because of the extern prototype from
269 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for
270 the same declaration. */
271extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
272{
273 return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
274}
275
276extern inline void ioport_unmap(void __iomem *addr)
277{
278}
279
280static inline void __iomem *ioremap(unsigned long port, unsigned long size)
281{
282 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
283}
284
285static inline void __iomem *__ioremap(unsigned long port, unsigned long size,
286 unsigned long flags)
287{
288 return ioremap(port, size);
289}
290
291static inline void __iomem * ioremap_nocache(unsigned long offset,
292 unsigned long size)
293{
294 return ioremap(offset, size);
295}
296
297static inline void iounmap(volatile void __iomem *addr)
298{
299 IO_CONCAT(__IO_PREFIX,iounmap)(addr);
300}
301
302static inline int __is_ioaddr(unsigned long addr)
303{
304 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
305}
306#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
307
308static inline int __is_mmio(const volatile void __iomem *addr)
309{
310 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
311}
312
313
314/*
315 * If the actual I/O bits are sufficiently trivial, then expand inline.
316 */
317
318#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
319extern inline unsigned int ioread8(void __iomem *addr)
320{
321 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
322 mb();
323 return ret;
324}
325
326extern inline unsigned int ioread16(void __iomem *addr)
327{
328 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
329 mb();
330 return ret;
331}
332
333extern inline void iowrite8(u8 b, void __iomem *addr)
334{
335 IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
336 mb();
337}
338
339extern inline void iowrite16(u16 b, void __iomem *addr)
340{
341 IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
342 mb();
343}
344
345extern inline u8 inb(unsigned long port)
346{
347 return ioread8(ioport_map(port, 1));
348}
349
350extern inline u16 inw(unsigned long port)
351{
352 return ioread16(ioport_map(port, 2));
353}
354
355extern inline void outb(u8 b, unsigned long port)
356{
357 iowrite8(b, ioport_map(port, 1));
358}
359
360extern inline void outw(u16 b, unsigned long port)
361{
362 iowrite16(b, ioport_map(port, 2));
363}
364#endif
365
366#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
367extern inline unsigned int ioread32(void __iomem *addr)
368{
369 unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
370 mb();
371 return ret;
372}
373
374extern inline void iowrite32(u32 b, void __iomem *addr)
375{
376 IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
377 mb();
378}
379
380extern inline u32 inl(unsigned long port)
381{
382 return ioread32(ioport_map(port, 4));
383}
384
385extern inline void outl(u32 b, unsigned long port)
386{
387 iowrite32(b, ioport_map(port, 4));
388}
389#endif
390
391#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
392extern inline u8 __raw_readb(const volatile void __iomem *addr)
393{
394 return IO_CONCAT(__IO_PREFIX,readb)(addr);
395}
396
397extern inline u16 __raw_readw(const volatile void __iomem *addr)
398{
399 return IO_CONCAT(__IO_PREFIX,readw)(addr);
400}
401
402extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
403{
404 IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
405}
406
407extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
408{
409 IO_CONCAT(__IO_PREFIX,writew)(b, addr);
410}
411
412extern inline u8 readb(const volatile void __iomem *addr)
413{
414 u8 ret = __raw_readb(addr);
415 mb();
416 return ret;
417}
418
419extern inline u16 readw(const volatile void __iomem *addr)
420{
421 u16 ret = __raw_readw(addr);
422 mb();
423 return ret;
424}
425
426extern inline void writeb(u8 b, volatile void __iomem *addr)
427{
428 __raw_writeb(b, addr);
429 mb();
430}
431
432extern inline void writew(u16 b, volatile void __iomem *addr)
433{
434 __raw_writew(b, addr);
435 mb();
436}
437#endif
438
439#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
440extern inline u32 __raw_readl(const volatile void __iomem *addr)
441{
442 return IO_CONCAT(__IO_PREFIX,readl)(addr);
443}
444
445extern inline u64 __raw_readq(const volatile void __iomem *addr)
446{
447 return IO_CONCAT(__IO_PREFIX,readq)(addr);
448}
449
450extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
451{
452 IO_CONCAT(__IO_PREFIX,writel)(b, addr);
453}
454
455extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
456{
457 IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
458}
459
460extern inline u32 readl(const volatile void __iomem *addr)
461{
462 u32 ret = __raw_readl(addr);
463 mb();
464 return ret;
465}
466
467extern inline u64 readq(const volatile void __iomem *addr)
468{
469 u64 ret = __raw_readq(addr);
470 mb();
471 return ret;
472}
473
474extern inline void writel(u32 b, volatile void __iomem *addr)
475{
476 __raw_writel(b, addr);
477 mb();
478}
479
480extern inline void writeq(u64 b, volatile void __iomem *addr)
481{
482 __raw_writeq(b, addr);
483 mb();
484}
485#endif
486
487#define inb_p inb
488#define inw_p inw
489#define inl_p inl
490#define outb_p outb
491#define outw_p outw
492#define outl_p outl
493#define readb_relaxed(addr) __raw_readb(addr)
494#define readw_relaxed(addr) __raw_readw(addr)
495#define readl_relaxed(addr) __raw_readl(addr)
496#define readq_relaxed(addr) __raw_readq(addr)
497
498#define mmiowb()
499
500/*
501 * String version of IO memory access ops:
502 */
503extern void memcpy_fromio(void *, const volatile void __iomem *, long);
504extern void memcpy_toio(volatile void __iomem *, const void *, long);
505extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
506
507static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
508{
509 _memset_c_io(addr, 0x0101010101010101UL * c, len);
510}
511
512#define __HAVE_ARCH_MEMSETW_IO
513static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
514{
515 _memset_c_io(addr, 0x0001000100010001UL * c, len);
516}
517
518/*
519 * String versions of in/out ops:
520 */
521extern void insb (unsigned long port, void *dst, unsigned long count);
522extern void insw (unsigned long port, void *dst, unsigned long count);
523extern void insl (unsigned long port, void *dst, unsigned long count);
524extern void outsb (unsigned long port, const void *src, unsigned long count);
525extern void outsw (unsigned long port, const void *src, unsigned long count);
526extern void outsl (unsigned long port, const void *src, unsigned long count);
527
528/*
529 * XXX - We don't have csum_partial_copy_fromio() yet, so we cheat here and
530 * just copy it. The net code will then do the checksum later. Presently
531 * only used by some shared memory 8390 Ethernet cards anyway.
532 */
533
534#define eth_io_copy_and_sum(skb,src,len,unused) \
535 memcpy_fromio((skb)->data,src,len)
536
537#define isa_eth_io_copy_and_sum(skb,src,len,unused) \
538 isa_memcpy_fromio((skb)->data,src,len)
539
540static inline int
541check_signature(const volatile void __iomem *io_addr,
542 const unsigned char *signature, int length)
543{
544 do {
545 if (readb(io_addr) != *signature)
546 return 0;
547 io_addr++;
548 signature++;
549 } while (--length);
550 return 1;
551}
552
553
554/*
555 * ISA space is mapped to some machine-specific location on Alpha.
556 * Call into the existing hooks to get the address translated.
557 */
558
559static inline u8
560isa_readb(unsigned long offset)
561{
562 void __iomem *addr = ioremap(offset, 1);
563 u8 ret = readb(addr);
564 iounmap(addr);
565 return ret;
566}
567
568static inline u16
569isa_readw(unsigned long offset)
570{
571 void __iomem *addr = ioremap(offset, 2);
572 u16 ret = readw(addr);
573 iounmap(addr);
574 return ret;
575}
576
577static inline u32
578isa_readl(unsigned long offset)
579{
580 void __iomem *addr = ioremap(offset, 2);
581 u32 ret = readl(addr);
582 iounmap(addr);
583 return ret;
584}
585
586static inline void
587isa_writeb(u8 b, unsigned long offset)
588{
589 void __iomem *addr = ioremap(offset, 2);
590 writeb(b, addr);
591 iounmap(addr);
592}
593
594static inline void
595isa_writew(u16 w, unsigned long offset)
596{
597 void __iomem *addr = ioremap(offset, 2);
598 writew(w, addr);
599 iounmap(addr);
600}
601
602static inline void
603isa_writel(u32 l, unsigned long offset)
604{
605 void __iomem *addr = ioremap(offset, 2);
606 writel(l, addr);
607 iounmap(addr);
608}
609
610static inline void
611isa_memset_io(unsigned long offset, u8 val, long n)
612{
613 void __iomem *addr = ioremap(offset, n);
614 memset_io(addr, val, n);
615 iounmap(addr);
616}
617
618static inline void
619isa_memcpy_fromio(void *dest, unsigned long offset, long n)
620{
621 void __iomem *addr = ioremap(offset, n);
622 memcpy_fromio(dest, addr, n);
623 iounmap(addr);
624}
625
626static inline void
627isa_memcpy_toio(unsigned long offset, const void *src, long n)
628{
629 void __iomem *addr = ioremap(offset, n);
630 memcpy_toio(addr, src, n);
631 iounmap(addr);
632}
633
634/*
635 * The Alpha Jensen hardware for some rather strange reason puts
636 * the RTC clock at 0x170 instead of 0x70. Probably due to some
637 * misguided idea about using 0x70 for NMI stuff.
638 *
639 * These defines will override the defaults when doing RTC queries
640 */
641
642#ifdef CONFIG_ALPHA_GENERIC
643# define RTC_PORT(x) ((x) + alpha_mv.rtc_port)
644#else
645# ifdef CONFIG_ALPHA_JENSEN
646# define RTC_PORT(x) (0x170+(x))
647# else
648# define RTC_PORT(x) (0x70 + (x))
649# endif
650#endif
651#define RTC_ALWAYS_BCD 0
652
653/* Nothing to do */
654
655#define dma_cache_inv(_start,_size) do { } while (0)
656#define dma_cache_wback(_start,_size) do { } while (0)
657#define dma_cache_wback_inv(_start,_size) do { } while (0)
658
659/*
660 * Some mucking forons use if[n]def writeq to check if platform has it.
661 * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
662 * to play with; for now just use cpp anti-recursion logics and make sure
663 * that damn thing is defined and expands to itself.
664 */
665
666#define writeq writeq
667#define readq readq
668
669/*
670 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
671 * access
672 */
673#define xlate_dev_mem_ptr(p) __va(p)
674
675/*
676 * Convert a virtual cached pointer to an uncached pointer
677 */
678#define xlate_dev_kmem_ptr(p) p
679
680#endif /* __KERNEL__ */
681
682#endif /* __ALPHA_IO_H */
diff --git a/include/asm-alpha/io_trivial.h b/include/asm-alpha/io_trivial.h
new file mode 100644
index 000000000000..b10d1aa4cdd1
--- /dev/null
+++ b/include/asm-alpha/io_trivial.h
@@ -0,0 +1,127 @@
1/* Trivial implementations of basic i/o routines. Assumes that all
2 of the hard work has been done by ioremap and ioportmap, and that
3 access to i/o space is linear. */
4
5/* This file may be included multiple times. */
6
7#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
8__EXTERN_INLINE unsigned int
9IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a)
10{
11 return __kernel_ldbu(*(volatile u8 __force *)a);
12}
13
14__EXTERN_INLINE unsigned int
15IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a)
16{
17 return __kernel_ldwu(*(volatile u16 __force *)a);
18}
19
20__EXTERN_INLINE void
21IO_CONCAT(__IO_PREFIX,iowrite8)(u8 b, void __iomem *a)
22{
23 __kernel_stb(b, *(volatile u8 __force *)a);
24}
25
26__EXTERN_INLINE void
27IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a)
28{
29 __kernel_stw(b, *(volatile u16 __force *)a);
30}
31#endif
32
33#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
34__EXTERN_INLINE unsigned int
35IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a)
36{
37 return *(volatile u32 __force *)a;
38}
39
40__EXTERN_INLINE void
41IO_CONCAT(__IO_PREFIX,iowrite32)(u32 b, void __iomem *a)
42{
43 *(volatile u32 __force *)a = b;
44}
45#endif
46
47#if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
48__EXTERN_INLINE u8
49IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a)
50{
51 return __kernel_ldbu(*(const volatile u8 __force *)a);
52}
53
54__EXTERN_INLINE u16
55IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a)
56{
57 return __kernel_ldwu(*(const volatile u16 __force *)a);
58}
59
60__EXTERN_INLINE void
61IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a)
62{
63 __kernel_stb(b, *(volatile u8 __force *)a);
64}
65
66__EXTERN_INLINE void
67IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a)
68{
69 __kernel_stw(b, *(volatile u16 __force *)a);
70}
71#elif IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 2
72__EXTERN_INLINE u8
73IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a)
74{
75 return IO_CONCAT(__IO_PREFIX,ioread8)((void __iomem *)a);
76}
77
78__EXTERN_INLINE u16
79IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a)
80{
81 return IO_CONCAT(__IO_PREFIX,ioread16)((void __iomem *)a);
82}
83
84__EXTERN_INLINE void
85IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a)
86{
87 IO_CONCAT(__IO_PREFIX,iowrite8)(b, (void __iomem *)a);
88}
89
90__EXTERN_INLINE void
91IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a)
92{
93 IO_CONCAT(__IO_PREFIX,iowrite16)(b, (void __iomem *)a);
94}
95#endif
96
97#if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
98__EXTERN_INLINE u32
99IO_CONCAT(__IO_PREFIX,readl)(const volatile void __iomem *a)
100{
101 return *(const volatile u32 __force *)a;
102}
103
104__EXTERN_INLINE u64
105IO_CONCAT(__IO_PREFIX,readq)(const volatile void __iomem *a)
106{
107 return *(const volatile u64 __force *)a;
108}
109
110__EXTERN_INLINE void
111IO_CONCAT(__IO_PREFIX,writel)(u32 b, volatile void __iomem *a)
112{
113 *(volatile u32 __force *)a = b;
114}
115
116__EXTERN_INLINE void
117IO_CONCAT(__IO_PREFIX,writeq)(u64 b, volatile void __iomem *a)
118{
119 *(volatile u64 __force *)a = b;
120}
121#endif
122
123#if IO_CONCAT(__IO_PREFIX,trivial_iounmap)
124__EXTERN_INLINE void IO_CONCAT(__IO_PREFIX,iounmap)(volatile void __iomem *a)
125{
126}
127#endif
diff --git a/include/asm-alpha/ioctl.h b/include/asm-alpha/ioctl.h
new file mode 100644
index 000000000000..fc63727f4178
--- /dev/null
+++ b/include/asm-alpha/ioctl.h
@@ -0,0 +1,66 @@
1#ifndef _ALPHA_IOCTL_H
2#define _ALPHA_IOCTL_H
3
4/*
5 * The original linux ioctl numbering scheme was just a general
6 * "anything goes" setup, where more or less random numbers were
7 * assigned. Sorry, I was clueless when I started out on this.
8 *
9 * On the alpha, we'll try to clean it up a bit, using a more sane
10 * ioctl numbering, and also trying to be compatible with OSF/1 in
11 * the process. I'd like to clean it up for the i386 as well, but
12 * it's so painful recognizing both the new and the old numbers..
13 */
14
15#define _IOC_NRBITS 8
16#define _IOC_TYPEBITS 8
17#define _IOC_SIZEBITS 13
18#define _IOC_DIRBITS 3
19
20#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
21#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
22#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
23#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
24
25#define _IOC_NRSHIFT 0
26#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
27#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
28#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
29
30/*
31 * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit.
32 * And this turns out useful to catch old ioctl numbers in header
33 * files for us.
34 */
35#define _IOC_NONE 1U
36#define _IOC_READ 2U
37#define _IOC_WRITE 4U
38
39#define _IOC(dir,type,nr,size) \
40 ((unsigned int) \
41 (((dir) << _IOC_DIRSHIFT) | \
42 ((type) << _IOC_TYPESHIFT) | \
43 ((nr) << _IOC_NRSHIFT) | \
44 ((size) << _IOC_SIZESHIFT)))
45
46/* used to create numbers */
47#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
48#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
49#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
50#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
51
52/* used to decode them.. */
53#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
54#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
55#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
56#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
57
58/* ...and for the drivers/sound files... */
59
60#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
61#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
62#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
63#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
64#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
65
66#endif /* _ALPHA_IOCTL_H */
diff --git a/include/asm-alpha/ioctls.h b/include/asm-alpha/ioctls.h
new file mode 100644
index 000000000000..67bb9f6fdbe4
--- /dev/null
+++ b/include/asm-alpha/ioctls.h
@@ -0,0 +1,112 @@
1#ifndef _ASM_ALPHA_IOCTLS_H
2#define _ASM_ALPHA_IOCTLS_H
3
4#include <asm/ioctl.h>
5
6#define FIOCLEX _IO('f', 1)
7#define FIONCLEX _IO('f', 2)
8#define FIOASYNC _IOW('f', 125, int)
9#define FIONBIO _IOW('f', 126, int)
10#define FIONREAD _IOR('f', 127, int)
11#define TIOCINQ FIONREAD
12#define FIOQSIZE _IOR('f', 128, loff_t)
13
14#define TIOCGETP _IOR('t', 8, struct sgttyb)
15#define TIOCSETP _IOW('t', 9, struct sgttyb)
16#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */
17
18#define TIOCSETC _IOW('t', 17, struct tchars)
19#define TIOCGETC _IOR('t', 18, struct tchars)
20#define TCGETS _IOR('t', 19, struct termios)
21#define TCSETS _IOW('t', 20, struct termios)
22#define TCSETSW _IOW('t', 21, struct termios)
23#define TCSETSF _IOW('t', 22, struct termios)
24
25#define TCGETA _IOR('t', 23, struct termio)
26#define TCSETA _IOW('t', 24, struct termio)
27#define TCSETAW _IOW('t', 25, struct termio)
28#define TCSETAF _IOW('t', 28, struct termio)
29
30#define TCSBRK _IO('t', 29)
31#define TCXONC _IO('t', 30)
32#define TCFLSH _IO('t', 31)
33
34#define TIOCSWINSZ _IOW('t', 103, struct winsize)
35#define TIOCGWINSZ _IOR('t', 104, struct winsize)
36#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
37#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
38#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
39
40#define TIOCGLTC _IOR('t', 116, struct ltchars)
41#define TIOCSLTC _IOW('t', 117, struct ltchars)
42#define TIOCSPGRP _IOW('t', 118, int)
43#define TIOCGPGRP _IOR('t', 119, int)
44
45#define TIOCEXCL 0x540C
46#define TIOCNXCL 0x540D
47#define TIOCSCTTY 0x540E
48
49#define TIOCSTI 0x5412
50#define TIOCMGET 0x5415
51#define TIOCMBIS 0x5416
52#define TIOCMBIC 0x5417
53#define TIOCMSET 0x5418
54# define TIOCM_LE 0x001
55# define TIOCM_DTR 0x002
56# define TIOCM_RTS 0x004
57# define TIOCM_ST 0x008
58# define TIOCM_SR 0x010
59# define TIOCM_CTS 0x020
60# define TIOCM_CAR 0x040
61# define TIOCM_RNG 0x080
62# define TIOCM_DSR 0x100
63# define TIOCM_CD TIOCM_CAR
64# define TIOCM_RI TIOCM_RNG
65# define TIOCM_OUT1 0x2000
66# define TIOCM_OUT2 0x4000
67# define TIOCM_LOOP 0x8000
68
69#define TIOCGSOFTCAR 0x5419
70#define TIOCSSOFTCAR 0x541A
71#define TIOCLINUX 0x541C
72#define TIOCCONS 0x541D
73#define TIOCGSERIAL 0x541E
74#define TIOCSSERIAL 0x541F
75#define TIOCPKT 0x5420
76# define TIOCPKT_DATA 0
77# define TIOCPKT_FLUSHREAD 1
78# define TIOCPKT_FLUSHWRITE 2
79# define TIOCPKT_STOP 4
80# define TIOCPKT_START 8
81# define TIOCPKT_NOSTOP 16
82# define TIOCPKT_DOSTOP 32
83
84
85#define TIOCNOTTY 0x5422
86#define TIOCSETD 0x5423
87#define TIOCGETD 0x5424
88#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
89#define TIOCSBRK 0x5427 /* BSD compatibility */
90#define TIOCCBRK 0x5428 /* BSD compatibility */
91#define TIOCGSID 0x5429 /* Return the session ID of FD */
92#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
93#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
94
95#define TIOCSERCONFIG 0x5453
96#define TIOCSERGWILD 0x5454
97#define TIOCSERSWILD 0x5455
98#define TIOCGLCKTRMIOS 0x5456
99#define TIOCSLCKTRMIOS 0x5457
100#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
101#define TIOCSERGETLSR 0x5459 /* Get line status register */
102 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
103# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
104#define TIOCSERGETMULTI 0x545A /* Get multiport config */
105#define TIOCSERSETMULTI 0x545B /* Set multiport config */
106
107#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
108#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
109#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
110#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
111
112#endif /* _ASM_ALPHA_IOCTLS_H */
diff --git a/include/asm-alpha/ipcbuf.h b/include/asm-alpha/ipcbuf.h
new file mode 100644
index 000000000000..d9c0e1a50702
--- /dev/null
+++ b/include/asm-alpha/ipcbuf.h
@@ -0,0 +1,28 @@
1#ifndef _ALPHA_IPCBUF_H
2#define _ALPHA_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for alpha architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit seq
11 * - 2 miscellaneous 64-bit values
12 */
13
14struct ipc64_perm
15{
16 __kernel_key_t key;
17 __kernel_uid_t uid;
18 __kernel_gid_t gid;
19 __kernel_uid_t cuid;
20 __kernel_gid_t cgid;
21 __kernel_mode_t mode;
22 unsigned short seq;
23 unsigned short __pad1;
24 unsigned long __unused1;
25 unsigned long __unused2;
26};
27
28#endif /* _ALPHA_IPCBUF_H */
diff --git a/include/asm-alpha/irq.h b/include/asm-alpha/irq.h
new file mode 100644
index 000000000000..566db720000a
--- /dev/null
+++ b/include/asm-alpha/irq.h
@@ -0,0 +1,100 @@
1#ifndef _ALPHA_IRQ_H
2#define _ALPHA_IRQ_H
3
4/*
5 * linux/include/alpha/irq.h
6 *
7 * (C) 1994 Linus Torvalds
8 */
9
10#include <linux/linkage.h>
11#include <linux/config.h>
12
13#if defined(CONFIG_ALPHA_GENERIC)
14
15/* Here NR_IRQS is not exact, but rather an upper bound. This is used
16 many places throughout the kernel to size static arrays. That's ok,
17 we'll use alpha_mv.nr_irqs when we want the real thing. */
18
19/* When LEGACY_START_ADDRESS is selected, we leave out:
20 TITAN
21 WILDFIRE
22 MARVEL
23
24 This helps keep the kernel object size reasonable for the majority
25 of machines.
26*/
27
28# if defined(CONFIG_ALPHA_LEGACY_START_ADDRESS)
29# define NR_IRQS (128) /* max is RAWHIDE/TAKARA */
30# else
31# define NR_IRQS (32768 + 16) /* marvel - 32 pids */
32# endif
33
34#elif defined(CONFIG_ALPHA_CABRIOLET) || \
35 defined(CONFIG_ALPHA_EB66P) || \
36 defined(CONFIG_ALPHA_EB164) || \
37 defined(CONFIG_ALPHA_PC164) || \
38 defined(CONFIG_ALPHA_LX164)
39# define NR_IRQS 35
40
41#elif defined(CONFIG_ALPHA_EB66) || \
42 defined(CONFIG_ALPHA_EB64P) || \
43 defined(CONFIG_ALPHA_MIKASA)
44# define NR_IRQS 32
45
46#elif defined(CONFIG_ALPHA_ALCOR) || \
47 defined(CONFIG_ALPHA_MIATA) || \
48 defined(CONFIG_ALPHA_RUFFIAN) || \
49 defined(CONFIG_ALPHA_RX164) || \
50 defined(CONFIG_ALPHA_NORITAKE)
51# define NR_IRQS 48
52
53#elif defined(CONFIG_ALPHA_SABLE) || \
54 defined(CONFIG_ALPHA_SX164)
55# define NR_IRQS 40
56
57#elif defined(CONFIG_ALPHA_DP264) || \
58 defined(CONFIG_ALPHA_LYNX) || \
59 defined(CONFIG_ALPHA_SHARK) || \
60 defined(CONFIG_ALPHA_EIGER)
61# define NR_IRQS 64
62
63#elif defined(CONFIG_ALPHA_TITAN)
64#define NR_IRQS 80
65
66#elif defined(CONFIG_ALPHA_RAWHIDE) || \
67 defined(CONFIG_ALPHA_TAKARA)
68# define NR_IRQS 128
69
70#elif defined(CONFIG_ALPHA_WILDFIRE)
71# define NR_IRQS 2048 /* enuff for 8 QBBs */
72
73#elif defined(CONFIG_ALPHA_MARVEL)
74# define NR_IRQS (32768 + 16) /* marvel - 32 pids*/
75
76#else /* everyone else */
77# define NR_IRQS 16
78#endif
79
80static __inline__ int irq_canonicalize(int irq)
81{
82 /*
83 * XXX is this true for all Alpha's? The old serial driver
84 * did it this way for years without any complaints, so....
85 */
86 return ((irq == 2) ? 9 : irq);
87}
88
89extern void disable_irq(unsigned int);
90extern void disable_irq_nosync(unsigned int);
91extern void enable_irq(unsigned int);
92
93struct pt_regs;
94extern void (*perf_irq)(unsigned long, struct pt_regs *);
95
96struct irqaction;
97int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
98
99
100#endif /* _ALPHA_IRQ_H */
diff --git a/include/asm-alpha/jensen.h b/include/asm-alpha/jensen.h
new file mode 100644
index 000000000000..964b06ead43b
--- /dev/null
+++ b/include/asm-alpha/jensen.h
@@ -0,0 +1,346 @@
1#ifndef __ALPHA_JENSEN_H
2#define __ALPHA_JENSEN_H
3
4#include <asm/compiler.h>
5
6/*
7 * Defines for the AlphaPC EISA IO and memory address space.
8 */
9
10/*
11 * NOTE! The memory operations do not set any memory barriers, as it's
12 * not needed for cases like a frame buffer that is essentially memory-like.
13 * You need to do them by hand if the operations depend on ordering.
14 *
15 * Similarly, the port IO operations do a "mb" only after a write operation:
16 * if an mb is needed before (as in the case of doing memory mapped IO
17 * first, and then a port IO operation to the same device), it needs to be
18 * done by hand.
19 *
20 * After the above has bitten me 100 times, I'll give up and just do the
21 * mb all the time, but right now I'm hoping this will work out. Avoiding
22 * mb's may potentially be a noticeable speed improvement, but I can't
23 * honestly say I've tested it.
24 *
25 * Handling interrupts that need to do mb's to synchronize to non-interrupts
26 * is another fun race area. Don't do it (because if you do, I'll have to
27 * do *everything* with interrupts disabled, ugh).
28 */
29
30/*
31 * EISA Interrupt Acknowledge address
32 */
33#define EISA_INTA (IDENT_ADDR + 0x100000000UL)
34
35/*
36 * FEPROM addresses
37 */
38#define EISA_FEPROM0 (IDENT_ADDR + 0x180000000UL)
39#define EISA_FEPROM1 (IDENT_ADDR + 0x1A0000000UL)
40
41/*
42 * VL82C106 base address
43 */
44#define EISA_VL82C106 (IDENT_ADDR + 0x1C0000000UL)
45
46/*
47 * EISA "Host Address Extension" address (bits 25-31 of the EISA address)
48 */
49#define EISA_HAE (IDENT_ADDR + 0x1D0000000UL)
50
51/*
52 * "SYSCTL" register address
53 */
54#define EISA_SYSCTL (IDENT_ADDR + 0x1E0000000UL)
55
56/*
57 * "spare" register address
58 */
59#define EISA_SPARE (IDENT_ADDR + 0x1F0000000UL)
60
61/*
62 * EISA memory address offset
63 */
64#define EISA_MEM (IDENT_ADDR + 0x200000000UL)
65
66/*
67 * EISA IO address offset
68 */
69#define EISA_IO (IDENT_ADDR + 0x300000000UL)
70
71
72#ifdef __KERNEL__
73
74#ifndef __EXTERN_INLINE
75#define __EXTERN_INLINE extern inline
76#define __IO_EXTERN_INLINE
77#endif
78
79/*
80 * Handle the "host address register". This needs to be set
81 * to the high 7 bits of the EISA address. This is also needed
82 * for EISA IO addresses, which are only 16 bits wide (the
83 * hae needs to be set to 0).
84 *
85 * HAE isn't needed for the local IO operations, though.
86 */
87
88#define JENSEN_HAE_ADDRESS EISA_HAE
89#define JENSEN_HAE_MASK 0x1ffffff
90
91__EXTERN_INLINE void jensen_set_hae(unsigned long addr)
92{
93 /* hae on the Jensen is bits 31:25 shifted right */
94 addr >>= 25;
95 if (addr != alpha_mv.hae_cache)
96 set_hae(addr);
97}
98
99#define vuip volatile unsigned int *
100
101/*
102 * IO functions
103 *
104 * The "local" functions are those that don't go out to the EISA bus,
105 * but instead act on the VL82C106 chip directly.. This is mainly the
106 * keyboard, RTC, printer and first two serial lines..
107 *
108 * The local stuff makes for some complications, but it seems to be
109 * gone in the PCI version. I hope I can get DEC suckered^H^H^H^H^H^H^H^H
110 * convinced that I need one of the newer machines.
111 */
112
113static inline unsigned int jensen_local_inb(unsigned long addr)
114{
115 return 0xff & *(vuip)((addr << 9) + EISA_VL82C106);
116}
117
118static inline void jensen_local_outb(u8 b, unsigned long addr)
119{
120 *(vuip)((addr << 9) + EISA_VL82C106) = b;
121 mb();
122}
123
124static inline unsigned int jensen_bus_inb(unsigned long addr)
125{
126 long result;
127
128 jensen_set_hae(0);
129 result = *(volatile int *)((addr << 7) + EISA_IO + 0x00);
130 return __kernel_extbl(result, addr & 3);
131}
132
133static inline void jensen_bus_outb(u8 b, unsigned long addr)
134{
135 jensen_set_hae(0);
136 *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101;
137 mb();
138}
139
140/*
141 * It seems gcc is not very good at optimizing away logical
142 * operations that result in operations across inline functions.
143 * Which is why this is a macro.
144 */
145
146#define jensen_is_local(addr) ( \
147/* keyboard */ (addr == 0x60 || addr == 0x64) || \
148/* RTC */ (addr == 0x170 || addr == 0x171) || \
149/* mb COM2 */ (addr >= 0x2f8 && addr <= 0x2ff) || \
150/* mb LPT1 */ (addr >= 0x3bc && addr <= 0x3be) || \
151/* mb COM2 */ (addr >= 0x3f8 && addr <= 0x3ff))
152
153__EXTERN_INLINE u8 jensen_inb(unsigned long addr)
154{
155 if (jensen_is_local(addr))
156 return jensen_local_inb(addr);
157 else
158 return jensen_bus_inb(addr);
159}
160
161__EXTERN_INLINE void jensen_outb(u8 b, unsigned long addr)
162{
163 if (jensen_is_local(addr))
164 jensen_local_outb(b, addr);
165 else
166 jensen_bus_outb(b, addr);
167}
168
169__EXTERN_INLINE u16 jensen_inw(unsigned long addr)
170{
171 long result;
172
173 jensen_set_hae(0);
174 result = *(volatile int *) ((addr << 7) + EISA_IO + 0x20);
175 result >>= (addr & 3) * 8;
176 return 0xffffUL & result;
177}
178
179__EXTERN_INLINE u32 jensen_inl(unsigned long addr)
180{
181 jensen_set_hae(0);
182 return *(vuip) ((addr << 7) + EISA_IO + 0x60);
183}
184
185__EXTERN_INLINE void jensen_outw(u16 b, unsigned long addr)
186{
187 jensen_set_hae(0);
188 *(vuip) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001;
189 mb();
190}
191
192__EXTERN_INLINE void jensen_outl(u32 b, unsigned long addr)
193{
194 jensen_set_hae(0);
195 *(vuip) ((addr << 7) + EISA_IO + 0x60) = b;
196 mb();
197}
198
199/*
200 * Memory functions.
201 */
202
203__EXTERN_INLINE u8 jensen_readb(const volatile void __iomem *xaddr)
204{
205 unsigned long addr = (unsigned long) xaddr;
206 long result;
207
208 jensen_set_hae(addr);
209 addr &= JENSEN_HAE_MASK;
210 result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x00);
211 result >>= (addr & 3) * 8;
212 return 0xffUL & result;
213}
214
215__EXTERN_INLINE u16 jensen_readw(const volatile void __iomem *xaddr)
216{
217 unsigned long addr = (unsigned long) xaddr;
218 long result;
219
220 jensen_set_hae(addr);
221 addr &= JENSEN_HAE_MASK;
222 result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x20);
223 result >>= (addr & 3) * 8;
224 return 0xffffUL & result;
225}
226
227__EXTERN_INLINE u32 jensen_readl(const volatile void __iomem *xaddr)
228{
229 unsigned long addr = (unsigned long) xaddr;
230 jensen_set_hae(addr);
231 addr &= JENSEN_HAE_MASK;
232 return *(vuip) ((addr << 7) + EISA_MEM + 0x60);
233}
234
235__EXTERN_INLINE u64 jensen_readq(const volatile void __iomem *xaddr)
236{
237 unsigned long addr = (unsigned long) xaddr;
238 unsigned long r0, r1;
239
240 jensen_set_hae(addr);
241 addr &= JENSEN_HAE_MASK;
242 addr = (addr << 7) + EISA_MEM + 0x60;
243 r0 = *(vuip) (addr);
244 r1 = *(vuip) (addr + (4 << 7));
245 return r1 << 32 | r0;
246}
247
248__EXTERN_INLINE void jensen_writeb(u8 b, volatile void __iomem *xaddr)
249{
250 unsigned long addr = (unsigned long) xaddr;
251 jensen_set_hae(addr);
252 addr &= JENSEN_HAE_MASK;
253 *(vuip) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101;
254}
255
256__EXTERN_INLINE void jensen_writew(u16 b, volatile void __iomem *xaddr)
257{
258 unsigned long addr = (unsigned long) xaddr;
259 jensen_set_hae(addr);
260 addr &= JENSEN_HAE_MASK;
261 *(vuip) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001;
262}
263
264__EXTERN_INLINE void jensen_writel(u32 b, volatile void __iomem *xaddr)
265{
266 unsigned long addr = (unsigned long) xaddr;
267 jensen_set_hae(addr);
268 addr &= JENSEN_HAE_MASK;
269 *(vuip) ((addr << 7) + EISA_MEM + 0x60) = b;
270}
271
272__EXTERN_INLINE void jensen_writeq(u64 b, volatile void __iomem *xaddr)
273{
274 unsigned long addr = (unsigned long) xaddr;
275 jensen_set_hae(addr);
276 addr &= JENSEN_HAE_MASK;
277 addr = (addr << 7) + EISA_MEM + 0x60;
278 *(vuip) (addr) = b;
279 *(vuip) (addr + (4 << 7)) = b >> 32;
280}
281
282__EXTERN_INLINE void __iomem *jensen_ioportmap(unsigned long addr)
283{
284 return (void __iomem *)addr;
285}
286
287__EXTERN_INLINE void __iomem *jensen_ioremap(unsigned long addr,
288 unsigned long size)
289{
290 return (void __iomem *)(addr + 0x100000000ul);
291}
292
293__EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr)
294{
295 return (long)addr >= 0;
296}
297
298__EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr)
299{
300 return (unsigned long)addr >= 0x100000000ul;
301}
302
303/* New-style ioread interface. All the routines are so ugly for Jensen
304 that it doesn't make sense to merge them. */
305
306#define IOPORT(OS, NS) \
307__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \
308{ \
309 if (jensen_is_mmio(xaddr)) \
310 return jensen_read##OS(xaddr - 0x100000000ul); \
311 else \
312 return jensen_in##OS((unsigned long)xaddr); \
313} \
314__EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr) \
315{ \
316 if (jensen_is_mmio(xaddr)) \
317 jensen_write##OS(b, xaddr - 0x100000000ul); \
318 else \
319 jensen_out##OS(b, (unsigned long)xaddr); \
320}
321
322IOPORT(b, 8)
323IOPORT(w, 16)
324IOPORT(l, 32)
325
326#undef IOPORT
327
328#undef vuip
329
330#undef __IO_PREFIX
331#define __IO_PREFIX jensen
332#define jensen_trivial_rw_bw 0
333#define jensen_trivial_rw_lq 0
334#define jensen_trivial_io_bw 0
335#define jensen_trivial_io_lq 0
336#define jensen_trivial_iounmap 1
337#include <asm/io_trivial.h>
338
339#ifdef __IO_EXTERN_INLINE
340#undef __EXTERN_INLINE
341#undef __IO_EXTERN_INLINE
342#endif
343
344#endif /* __KERNEL__ */
345
346#endif /* __ALPHA_JENSEN_H */
diff --git a/include/asm-alpha/kmap_types.h b/include/asm-alpha/kmap_types.h
new file mode 100644
index 000000000000..3d10cd3ea75f
--- /dev/null
+++ b/include/asm-alpha/kmap_types.h
@@ -0,0 +1,33 @@
1#ifndef _ASM_KMAP_TYPES_H
2#define _ASM_KMAP_TYPES_H
3
4/* Dummy header just to define km_type. */
5
6#include <linux/config.h>
7
8#ifdef CONFIG_DEBUG_HIGHMEM
9# define D(n) __KM_FENCE_##n ,
10#else
11# define D(n)
12#endif
13
14enum km_type {
15D(0) KM_BOUNCE_READ,
16D(1) KM_SKB_SUNRPC_DATA,
17D(2) KM_SKB_DATA_SOFTIRQ,
18D(3) KM_USER0,
19D(4) KM_USER1,
20D(5) KM_BIO_SRC_IRQ,
21D(6) KM_BIO_DST_IRQ,
22D(7) KM_PTE0,
23D(8) KM_PTE1,
24D(9) KM_IRQ0,
25D(10) KM_IRQ1,
26D(11) KM_SOFTIRQ0,
27D(12) KM_SOFTIRQ1,
28D(13) KM_TYPE_NR
29};
30
31#undef D
32
33#endif
diff --git a/include/asm-alpha/linkage.h b/include/asm-alpha/linkage.h
new file mode 100644
index 000000000000..291c2d01c44f
--- /dev/null
+++ b/include/asm-alpha/linkage.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H
3
4/* Nothing to see here... */
5
6#endif
diff --git a/include/asm-alpha/local.h b/include/asm-alpha/local.h
new file mode 100644
index 000000000000..90a510fa358e
--- /dev/null
+++ b/include/asm-alpha/local.h
@@ -0,0 +1,40 @@
1#ifndef _ALPHA_LOCAL_H
2#define _ALPHA_LOCAL_H
3
4#include <linux/percpu.h>
5#include <asm/atomic.h>
6
7typedef atomic64_t local_t;
8
9#define LOCAL_INIT(i) ATOMIC64_INIT(i)
10#define local_read(v) atomic64_read(v)
11#define local_set(v,i) atomic64_set(v,i)
12
13#define local_inc(v) atomic64_inc(v)
14#define local_dec(v) atomic64_dec(v)
15#define local_add(i, v) atomic64_add(i, v)
16#define local_sub(i, v) atomic64_sub(i, v)
17
18#define __local_inc(v) ((v)->counter++)
19#define __local_dec(v) ((v)->counter++)
20#define __local_add(i,v) ((v)->counter+=(i))
21#define __local_sub(i,v) ((v)->counter-=(i))
22
23/* Use these for per-cpu local_t variables: on some archs they are
24 * much more efficient than these naive implementations. Note they take
25 * a variable, not an address.
26 */
27#define cpu_local_read(v) local_read(&__get_cpu_var(v))
28#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
29
30#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
31#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
32#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
33#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
34
35#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
36#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
37#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
38#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
39
40#endif /* _ALPHA_LOCAL_H */
diff --git a/include/asm-alpha/machvec.h b/include/asm-alpha/machvec.h
new file mode 100644
index 000000000000..ece166a203ec
--- /dev/null
+++ b/include/asm-alpha/machvec.h
@@ -0,0 +1,136 @@
1#ifndef __ALPHA_MACHVEC_H
2#define __ALPHA_MACHVEC_H 1
3
4#include <linux/config.h>
5#include <linux/types.h>
6
7/*
8 * This file gets pulled in by asm/io.h from user space. We don't
9 * want most of this escaping.
10 */
11
12#ifdef __KERNEL__
13
14/* The following structure vectors all of the I/O and IRQ manipulation
15 from the generic kernel to the hardware specific backend. */
16
17struct task_struct;
18struct mm_struct;
19struct pt_regs;
20struct vm_area_struct;
21struct linux_hose_info;
22struct pci_dev;
23struct pci_ops;
24struct pci_controller;
25struct _alpha_agp_info;
26
27struct alpha_machine_vector
28{
29 /* This "belongs" down below with the rest of the runtime
30 variables, but it is convenient for entry.S if these
31 two slots are at the beginning of the struct. */
32 unsigned long hae_cache;
33 unsigned long *hae_register;
34
35 int nr_irqs;
36 int rtc_port;
37 unsigned int max_asn;
38 unsigned long max_isa_dma_address;
39 unsigned long irq_probe_mask;
40 unsigned long iack_sc;
41 unsigned long min_io_address;
42 unsigned long min_mem_address;
43 unsigned long pci_dac_offset;
44
45 void (*mv_pci_tbi)(struct pci_controller *hose,
46 dma_addr_t start, dma_addr_t end);
47
48 unsigned int (*mv_ioread8)(void __iomem *);
49 unsigned int (*mv_ioread16)(void __iomem *);
50 unsigned int (*mv_ioread32)(void __iomem *);
51
52 void (*mv_iowrite8)(u8, void __iomem *);
53 void (*mv_iowrite16)(u16, void __iomem *);
54 void (*mv_iowrite32)(u32, void __iomem *);
55
56 u8 (*mv_readb)(const volatile void __iomem *);
57 u16 (*mv_readw)(const volatile void __iomem *);
58 u32 (*mv_readl)(const volatile void __iomem *);
59 u64 (*mv_readq)(const volatile void __iomem *);
60
61 void (*mv_writeb)(u8, volatile void __iomem *);
62 void (*mv_writew)(u16, volatile void __iomem *);
63 void (*mv_writel)(u32, volatile void __iomem *);
64 void (*mv_writeq)(u64, volatile void __iomem *);
65
66 void __iomem *(*mv_ioportmap)(unsigned long);
67 void __iomem *(*mv_ioremap)(unsigned long, unsigned long);
68 void (*mv_iounmap)(volatile void __iomem *);
69 int (*mv_is_ioaddr)(unsigned long);
70 int (*mv_is_mmio)(const volatile void __iomem *);
71
72 void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *,
73 struct task_struct *);
74 void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *);
75
76 void (*mv_flush_tlb_current)(struct mm_struct *);
77 void (*mv_flush_tlb_current_page)(struct mm_struct * mm,
78 struct vm_area_struct *vma,
79 unsigned long addr);
80
81 void (*update_irq_hw)(unsigned long, unsigned long, int);
82 void (*ack_irq)(unsigned long);
83 void (*device_interrupt)(unsigned long vector, struct pt_regs *regs);
84 void (*machine_check)(u64 vector, u64 la, struct pt_regs *regs);
85
86 void (*smp_callin)(void);
87 void (*init_arch)(void);
88 void (*init_irq)(void);
89 void (*init_rtc)(void);
90 void (*init_pci)(void);
91 void (*kill_arch)(int);
92
93 u8 (*pci_swizzle)(struct pci_dev *, u8 *);
94 int (*pci_map_irq)(struct pci_dev *, u8, u8);
95 struct pci_ops *pci_ops;
96
97 struct _alpha_agp_info *(*agp_info)(void);
98
99 const char *vector_name;
100
101 /* NUMA information */
102 int (*pa_to_nid)(unsigned long);
103 int (*cpuid_to_nid)(int);
104 unsigned long (*node_mem_start)(int);
105 unsigned long (*node_mem_size)(int);
106
107 /* System specific parameters. */
108 union {
109 struct {
110 unsigned long gru_int_req_bits;
111 } cia;
112
113 struct {
114 unsigned long gamma_bias;
115 } t2;
116
117 struct {
118 unsigned int route_tab;
119 } sio;
120 } sys;
121};
122
123extern struct alpha_machine_vector alpha_mv;
124
125#ifdef CONFIG_ALPHA_GENERIC
126extern int alpha_using_srm;
127#else
128#ifdef CONFIG_ALPHA_SRM
129#define alpha_using_srm 1
130#else
131#define alpha_using_srm 0
132#endif
133#endif /* GENERIC */
134
135#endif
136#endif /* __ALPHA_MACHVEC_H */
diff --git a/include/asm-alpha/mc146818rtc.h b/include/asm-alpha/mc146818rtc.h
new file mode 100644
index 000000000000..097703f1c8cb
--- /dev/null
+++ b/include/asm-alpha/mc146818rtc.h
@@ -0,0 +1,27 @@
1/*
2 * Machine dependent access functions for RTC registers.
3 */
4#ifndef __ASM_ALPHA_MC146818RTC_H
5#define __ASM_ALPHA_MC146818RTC_H
6
7#include <asm/io.h>
8
9#ifndef RTC_PORT
10#define RTC_PORT(x) (0x70 + (x))
11#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */
12#endif
13
14/*
15 * The yet supported machines all access the RTC index register via
16 * an ISA port access but the way to access the date register differs ...
17 */
18#define CMOS_READ(addr) ({ \
19outb_p((addr),RTC_PORT(0)); \
20inb_p(RTC_PORT(1)); \
21})
22#define CMOS_WRITE(val, addr) ({ \
23outb_p((addr),RTC_PORT(0)); \
24outb_p((val),RTC_PORT(1)); \
25})
26
27#endif /* __ASM_ALPHA_MC146818RTC_H */
diff --git a/include/asm-alpha/md.h b/include/asm-alpha/md.h
new file mode 100644
index 000000000000..6c9b8222a4f2
--- /dev/null
+++ b/include/asm-alpha/md.h
@@ -0,0 +1,13 @@
1/* $Id: md.h,v 1.1 1997/12/15 15:11:48 jj Exp $
2 * md.h: High speed xor_block operation for RAID4/5
3 *
4 */
5
6#ifndef __ASM_MD_H
7#define __ASM_MD_H
8
9/* #define HAVE_ARCH_XORBLOCK */
10
11#define MD_XORBLOCK_ALIGNMENT sizeof(long)
12
13#endif /* __ASM_MD_H */
diff --git a/include/asm-alpha/mman.h b/include/asm-alpha/mman.h
new file mode 100644
index 000000000000..eb9c279045ef
--- /dev/null
+++ b/include/asm-alpha/mman.h
@@ -0,0 +1,50 @@
1#ifndef __ALPHA_MMAN_H__
2#define __ALPHA_MMAN_H__
3
4#define PROT_READ 0x1 /* page can be read */
5#define PROT_WRITE 0x2 /* page can be written */
6#define PROT_EXEC 0x4 /* page can be executed */
7#define PROT_SEM 0x8 /* page may be used for atomic ops */
8#define PROT_NONE 0x0 /* page can not be accessed */
9#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
10#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
11
12#define MAP_SHARED 0x01 /* Share changes */
13#define MAP_PRIVATE 0x02 /* Changes are private */
14#define MAP_TYPE 0x0f /* Mask for type of mapping (OSF/1 is _wrong_) */
15#define MAP_FIXED 0x100 /* Interpret addr exactly */
16#define MAP_ANONYMOUS 0x10 /* don't use a file */
17
18/* not used by linux, but here to make sure we don't clash with OSF/1 defines */
19#define _MAP_HASSEMAPHORE 0x0200
20#define _MAP_INHERIT 0x0400
21#define _MAP_UNALIGNED 0x0800
22
23/* These are linux-specific */
24#define MAP_GROWSDOWN 0x01000 /* stack-like segment */
25#define MAP_DENYWRITE 0x02000 /* ETXTBSY */
26#define MAP_EXECUTABLE 0x04000 /* mark it as an executable */
27#define MAP_LOCKED 0x08000 /* lock the mapping */
28#define MAP_NORESERVE 0x10000 /* don't check for reservations */
29#define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */
30#define MAP_NONBLOCK 0x40000 /* do not block on IO */
31
32#define MS_ASYNC 1 /* sync memory asynchronously */
33#define MS_SYNC 2 /* synchronous memory sync */
34#define MS_INVALIDATE 4 /* invalidate the caches */
35
36#define MCL_CURRENT 8192 /* lock all currently mapped pages */
37#define MCL_FUTURE 16384 /* lock all additions to address space */
38
39#define MADV_NORMAL 0 /* no further special treatment */
40#define MADV_RANDOM 1 /* expect random page references */
41#define MADV_SEQUENTIAL 2 /* expect sequential page references */
42#define MADV_WILLNEED 3 /* will need these pages */
43#define MADV_SPACEAVAIL 5 /* ensure resources are available */
44#define MADV_DONTNEED 6 /* don't need these pages */
45
46/* compatibility flags */
47#define MAP_ANON MAP_ANONYMOUS
48#define MAP_FILE 0
49
50#endif /* __ALPHA_MMAN_H__ */
diff --git a/include/asm-alpha/mmu.h b/include/asm-alpha/mmu.h
new file mode 100644
index 000000000000..3dc127779329
--- /dev/null
+++ b/include/asm-alpha/mmu.h
@@ -0,0 +1,7 @@
1#ifndef __ALPHA_MMU_H
2#define __ALPHA_MMU_H
3
4/* The alpha MMU context is one "unsigned long" bitmap per CPU */
5typedef unsigned long mm_context_t[NR_CPUS];
6
7#endif
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
new file mode 100644
index 000000000000..a714d0cdc204
--- /dev/null
+++ b/include/asm-alpha/mmu_context.h
@@ -0,0 +1,261 @@
1#ifndef __ALPHA_MMU_CONTEXT_H
2#define __ALPHA_MMU_CONTEXT_H
3
4/*
5 * get a new mmu context..
6 *
7 * Copyright (C) 1996, Linus Torvalds
8 */
9
10#include <linux/config.h>
11#include <asm/system.h>
12#include <asm/machvec.h>
13#include <asm/compiler.h>
14
15/*
16 * Force a context reload. This is needed when we change the page
17 * table pointer or when we update the ASN of the current process.
18 */
19
20/* Don't get into trouble with dueling __EXTERN_INLINEs. */
21#ifndef __EXTERN_INLINE
22#include <asm/io.h>
23#endif
24
25
26extern inline unsigned long
27__reload_thread(struct pcb_struct *pcb)
28{
29 register unsigned long a0 __asm__("$16");
30 register unsigned long v0 __asm__("$0");
31
32 a0 = virt_to_phys(pcb);
33 __asm__ __volatile__(
34 "call_pal %2 #__reload_thread"
35 : "=r"(v0), "=r"(a0)
36 : "i"(PAL_swpctx), "r"(a0)
37 : "$1", "$22", "$23", "$24", "$25");
38
39 return v0;
40}
41
42
43/*
44 * The maximum ASN's the processor supports. On the EV4 this is 63
45 * but the PAL-code doesn't actually use this information. On the
46 * EV5 this is 127, and EV6 has 255.
47 *
48 * On the EV4, the ASNs are more-or-less useless anyway, as they are
49 * only used as an icache tag, not for TB entries. On the EV5 and EV6,
50 * ASN's also validate the TB entries, and thus make a lot more sense.
51 *
52 * The EV4 ASN's don't even match the architecture manual, ugh. And
53 * I quote: "If a processor implements address space numbers (ASNs),
54 * and the old PTE has the Address Space Match (ASM) bit clear (ASNs
55 * in use) and the Valid bit set, then entries can also effectively be
56 * made coherent by assigning a new, unused ASN to the currently
57 * running process and not reusing the previous ASN before calling the
58 * appropriate PALcode routine to invalidate the translation buffer (TB)".
59 *
60 * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
61 * work correctly and can thus not be used (explaining the lack of PAL-code
62 * support).
63 */
64#define EV4_MAX_ASN 63
65#define EV5_MAX_ASN 127
66#define EV6_MAX_ASN 255
67
68#ifdef CONFIG_ALPHA_GENERIC
69# define MAX_ASN (alpha_mv.max_asn)
70#else
71# ifdef CONFIG_ALPHA_EV4
72# define MAX_ASN EV4_MAX_ASN
73# elif defined(CONFIG_ALPHA_EV5)
74# define MAX_ASN EV5_MAX_ASN
75# else
76# define MAX_ASN EV6_MAX_ASN
77# endif
78#endif
79
80/*
81 * cpu_last_asn(processor):
82 * 63 0
83 * +-------------+----------------+--------------+
84 * | asn version | this processor | hardware asn |
85 * +-------------+----------------+--------------+
86 */
87
88#ifdef CONFIG_SMP
89#include <asm/smp.h>
90#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn)
91#else
92extern unsigned long last_asn;
93#define cpu_last_asn(cpuid) last_asn
94#endif /* CONFIG_SMP */
95
96#define WIDTH_HARDWARE_ASN 8
97#define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN)
98#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
99
100/*
101 * NOTE! The way this is set up, the high bits of the "asn_cache" (and
102 * the "mm->context") are the ASN _version_ code. A version of 0 is
103 * always considered invalid, so to invalidate another process you only
104 * need to do "p->mm->context = 0".
105 *
106 * If we need more ASN's than the processor has, we invalidate the old
107 * user TLB's (tbiap()) and start a new ASN version. That will automatically
108 * force a new asn for any other processes the next time they want to
109 * run.
110 */
111
112#ifndef __EXTERN_INLINE
113#define __EXTERN_INLINE extern inline
114#define __MMU_EXTERN_INLINE
115#endif
116
117static inline unsigned long
118__get_new_mm_context(struct mm_struct *mm, long cpu)
119{
120 unsigned long asn = cpu_last_asn(cpu);
121 unsigned long next = asn + 1;
122
123 if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
124 tbiap();
125 imb();
126 next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
127 }
128 cpu_last_asn(cpu) = next;
129 return next;
130}
131
132__EXTERN_INLINE void
133ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
134 struct task_struct *next)
135{
136 /* Check if our ASN is of an older version, and thus invalid. */
137 unsigned long asn;
138 unsigned long mmc;
139 long cpu = smp_processor_id();
140
141#ifdef CONFIG_SMP
142 cpu_data[cpu].asn_lock = 1;
143 barrier();
144#endif
145 asn = cpu_last_asn(cpu);
146 mmc = next_mm->context[cpu];
147 if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) {
148 mmc = __get_new_mm_context(next_mm, cpu);
149 next_mm->context[cpu] = mmc;
150 }
151#ifdef CONFIG_SMP
152 else
153 cpu_data[cpu].need_new_asn = 1;
154#endif
155
156 /* Always update the PCB ASN. Another thread may have allocated
157 a new mm->context (via flush_tlb_mm) without the ASN serial
158 number wrapping. We have no way to detect when this is needed. */
159 next->thread_info->pcb.asn = mmc & HARDWARE_ASN_MASK;
160}
161
162__EXTERN_INLINE void
163ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
164 struct task_struct *next)
165{
166 /* As described, ASN's are broken for TLB usage. But we can
167 optimize for switching between threads -- if the mm is
168 unchanged from current we needn't flush. */
169 /* ??? May not be needed because EV4 PALcode recognizes that
170 ASN's are broken and does a tbiap itself on swpctx, under
171 the "Must set ASN or flush" rule. At least this is true
172 for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
173 I'm going to leave this here anyway, just to Be Sure. -- r~ */
174 if (prev_mm != next_mm)
175 tbiap();
176
177 /* Do continue to allocate ASNs, because we can still use them
178 to avoid flushing the icache. */
179 ev5_switch_mm(prev_mm, next_mm, next);
180}
181
182extern void __load_new_mm_context(struct mm_struct *);
183
184#ifdef CONFIG_SMP
185#define check_mmu_context() \
186do { \
187 int cpu = smp_processor_id(); \
188 cpu_data[cpu].asn_lock = 0; \
189 barrier(); \
190 if (cpu_data[cpu].need_new_asn) { \
191 struct mm_struct * mm = current->active_mm; \
192 cpu_data[cpu].need_new_asn = 0; \
193 if (!mm->context[cpu]) \
194 __load_new_mm_context(mm); \
195 } \
196} while(0)
197#else
198#define check_mmu_context() do { } while(0)
199#endif
200
201__EXTERN_INLINE void
202ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
203{
204 __load_new_mm_context(next_mm);
205}
206
207__EXTERN_INLINE void
208ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
209{
210 __load_new_mm_context(next_mm);
211 tbiap();
212}
213
214#define deactivate_mm(tsk,mm) do { } while (0)
215
216#ifdef CONFIG_ALPHA_GENERIC
217# define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
218# define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
219#else
220# ifdef CONFIG_ALPHA_EV4
221# define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
222# define activate_mm(x,y) ev4_activate_mm((x),(y))
223# else
224# define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
225# define activate_mm(x,y) ev5_activate_mm((x),(y))
226# endif
227#endif
228
229extern inline int
230init_new_context(struct task_struct *tsk, struct mm_struct *mm)
231{
232 int i;
233
234 for (i = 0; i < NR_CPUS; i++)
235 if (cpu_online(i))
236 mm->context[i] = 0;
237 if (tsk != current)
238 tsk->thread_info->pcb.ptbr
239 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
240 return 0;
241}
242
243extern inline void
244destroy_context(struct mm_struct *mm)
245{
246 /* Nothing to do. */
247}
248
249static inline void
250enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
251{
252 tsk->thread_info->pcb.ptbr
253 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
254}
255
256#ifdef __MMU_EXTERN_INLINE
257#undef __EXTERN_INLINE
258#undef __MMU_EXTERN_INLINE
259#endif
260
261#endif /* __ALPHA_MMU_CONTEXT_H */
diff --git a/include/asm-alpha/mmzone.h b/include/asm-alpha/mmzone.h
new file mode 100644
index 000000000000..726c150dcbe4
--- /dev/null
+++ b/include/asm-alpha/mmzone.h
@@ -0,0 +1,131 @@
1/*
2 * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
3 * Adapted for the alpha wildfire architecture Jan 2001.
4 */
5#ifndef _ASM_MMZONE_H_
6#define _ASM_MMZONE_H_
7
8#include <linux/config.h>
9#include <asm/smp.h>
10
11struct bootmem_data_t; /* stupid forward decl. */
12
13/*
14 * Following are macros that are specific to this numa platform.
15 */
16
17extern pg_data_t node_data[];
18
19#define alpha_pa_to_nid(pa) \
20 (alpha_mv.pa_to_nid \
21 ? alpha_mv.pa_to_nid(pa) \
22 : (0))
23#define node_mem_start(nid) \
24 (alpha_mv.node_mem_start \
25 ? alpha_mv.node_mem_start(nid) \
26 : (0UL))
27#define node_mem_size(nid) \
28 (alpha_mv.node_mem_size \
29 ? alpha_mv.node_mem_size(nid) \
30 : ((nid) ? (0UL) : (~0UL)))
31
32#define pa_to_nid(pa) alpha_pa_to_nid(pa)
33#define NODE_DATA(nid) (&node_data[(nid)])
34
35#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
36
37#if 1
38#define PLAT_NODE_DATA_LOCALNR(p, n) \
39 (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
40#else
41static inline unsigned long
42PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
43{
44 unsigned long temp;
45 temp = p >> PAGE_SHIFT;
46 return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn;
47}
48#endif
49
50#ifdef CONFIG_DISCONTIGMEM
51
52/*
53 * Following are macros that each numa implementation must define.
54 */
55
56/*
57 * Given a kernel address, find the home node of the underlying memory.
58 */
59#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
60#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
61#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
62
63#define local_mapnr(kvaddr) \
64 ((__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)))
65
66/*
67 * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
68 * and returns the kaddr corresponding to first physical page in the
69 * node's mem_map.
70 */
71#define LOCAL_BASE_ADDR(kaddr) \
72 ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
73 << PAGE_SHIFT))
74
75/* XXX: FIXME -- wli */
76#define kern_addr_valid(kaddr) (0)
77
78#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
79
80#define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)
81
82#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
83#define pte_pfn(pte) (pte_val(pte) >> 32)
84
85#define mk_pte(page, pgprot) \
86({ \
87 pte_t pte; \
88 unsigned long pfn; \
89 \
90 pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \
91 pfn += page_zone(page)->zone_start_pfn << 32; \
92 pte_val(pte) = pfn | pgprot_val(pgprot); \
93 \
94 pte; \
95})
96
97#define pte_page(x) \
98({ \
99 unsigned long kvirt; \
100 struct page * __xx; \
101 \
102 kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
103 __xx = virt_to_page(kvirt); \
104 \
105 __xx; \
106})
107
108#define pfn_to_page(pfn) \
109({ \
110 unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT); \
111 (node_mem_map(kvaddr_to_nid(kaddr)) + local_mapnr(kaddr)); \
112})
113
114#define page_to_pfn(page) \
115 ((page) - page_zone(page)->zone_mem_map + \
116 (page_zone(page)->zone_start_pfn))
117
118#define page_to_pa(page) \
119 ((( (page) - page_zone(page)->zone_mem_map ) \
120 + page_zone(page)->zone_start_pfn) << PAGE_SHIFT)
121
122#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
123#define pfn_valid(pfn) \
124 (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
125 node_spanned_pages(pfn_to_nid(pfn))) \
126
127#define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT))
128
129#endif /* CONFIG_DISCONTIGMEM */
130
131#endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-alpha/module.h b/include/asm-alpha/module.h
new file mode 100644
index 000000000000..7b63743c534a
--- /dev/null
+++ b/include/asm-alpha/module.h
@@ -0,0 +1,23 @@
1#ifndef _ALPHA_MODULE_H
2#define _ALPHA_MODULE_H
3
4struct mod_arch_specific
5{
6 unsigned int gotsecindex;
7};
8
9#define Elf_Sym Elf64_Sym
10#define Elf_Shdr Elf64_Shdr
11#define Elf_Ehdr Elf64_Ehdr
12#define Elf_Phdr Elf64_Phdr
13#define Elf_Dyn Elf64_Dyn
14#define Elf_Rel Elf64_Rel
15#define Elf_Rela Elf64_Rela
16
17#define ARCH_SHF_SMALL SHF_ALPHA_GPREL
18
19#ifdef MODULE
20asm(".section .got,\"aws\",@progbits; .align 3; .previous");
21#endif
22
23#endif /*_ALPHA_MODULE_H*/
diff --git a/include/asm-alpha/msgbuf.h b/include/asm-alpha/msgbuf.h
new file mode 100644
index 000000000000..98496501a2bb
--- /dev/null
+++ b/include/asm-alpha/msgbuf.h
@@ -0,0 +1,27 @@
1#ifndef _ALPHA_MSGBUF_H
2#define _ALPHA_MSGBUF_H
3
4/*
5 * The msqid64_ds structure for alpha architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct msqid64_ds {
14 struct ipc64_perm msg_perm;
15 __kernel_time_t msg_stime; /* last msgsnd time */
16 __kernel_time_t msg_rtime; /* last msgrcv time */
17 __kernel_time_t msg_ctime; /* last change time */
18 unsigned long msg_cbytes; /* current number of bytes on queue */
19 unsigned long msg_qnum; /* number of messages in queue */
20 unsigned long msg_qbytes; /* max number of bytes on queue */
21 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
22 __kernel_pid_t msg_lrpid; /* last receive pid */
23 unsigned long __unused1;
24 unsigned long __unused2;
25};
26
27#endif /* _ALPHA_MSGBUF_H */
diff --git a/include/asm-alpha/namei.h b/include/asm-alpha/namei.h
new file mode 100644
index 000000000000..5cc9bb39499d
--- /dev/null
+++ b/include/asm-alpha/namei.h
@@ -0,0 +1,17 @@
1/* $Id: namei.h,v 1.1 1996/12/13 14:48:21 jj Exp $
2 * linux/include/asm-alpha/namei.h
3 *
4 * Included from linux/fs/namei.c
5 */
6
7#ifndef __ALPHA_NAMEI_H
8#define __ALPHA_NAMEI_H
9
10/* This dummy routine maybe changed to something useful
11 * for /usr/gnemul/ emulation stuff.
12 * Look at asm-sparc/namei.h for details.
13 */
14
15#define __emul_prefix() NULL
16
17#endif /* __ALPHA_NAMEI_H */
diff --git a/include/asm-alpha/numnodes.h b/include/asm-alpha/numnodes.h
new file mode 100644
index 000000000000..cd425827e4f3
--- /dev/null
+++ b/include/asm-alpha/numnodes.h
@@ -0,0 +1,7 @@
1#ifndef _ASM_MAX_NUMNODES_H
2#define _ASM_MAX_NUMNODES_H
3
4/* Max 128 Nodes - Marvel */
5#define NODES_SHIFT 7
6
7#endif /* _ASM_MAX_NUMNODES_H */
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
new file mode 100644
index 000000000000..0577daffc720
--- /dev/null
+++ b/include/asm-alpha/page.h
@@ -0,0 +1,115 @@
1#ifndef _ALPHA_PAGE_H
2#define _ALPHA_PAGE_H
3
4#include <linux/config.h>
5#include <asm/pal.h>
6
7/* PAGE_SHIFT determines the page size */
8#define PAGE_SHIFT 13
9#define PAGE_SIZE (1UL << PAGE_SHIFT)
10#define PAGE_MASK (~(PAGE_SIZE-1))
11
12#ifdef __KERNEL__
13
14#ifndef __ASSEMBLY__
15
16#define STRICT_MM_TYPECHECKS
17
18extern void clear_page(void *page);
19#define clear_user_page(page, vaddr, pg) clear_page(page)
20
21#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vmaddr)
22#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
23
24extern void copy_page(void * _to, void * _from);
25#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
26
27#ifdef STRICT_MM_TYPECHECKS
28/*
29 * These are used to make use of C type-checking..
30 */
31typedef struct { unsigned long pte; } pte_t;
32typedef struct { unsigned long pmd; } pmd_t;
33typedef struct { unsigned long pgd; } pgd_t;
34typedef struct { unsigned long pgprot; } pgprot_t;
35
36#define pte_val(x) ((x).pte)
37#define pmd_val(x) ((x).pmd)
38#define pgd_val(x) ((x).pgd)
39#define pgprot_val(x) ((x).pgprot)
40
41#define __pte(x) ((pte_t) { (x) } )
42#define __pmd(x) ((pmd_t) { (x) } )
43#define __pgd(x) ((pgd_t) { (x) } )
44#define __pgprot(x) ((pgprot_t) { (x) } )
45
46#else
47/*
48 * .. while these make it easier on the compiler
49 */
50typedef unsigned long pte_t;
51typedef unsigned long pmd_t;
52typedef unsigned long pgd_t;
53typedef unsigned long pgprot_t;
54
55#define pte_val(x) (x)
56#define pmd_val(x) (x)
57#define pgd_val(x) (x)
58#define pgprot_val(x) (x)
59
60#define __pte(x) (x)
61#define __pgd(x) (x)
62#define __pgprot(x) (x)
63
64#endif /* STRICT_MM_TYPECHECKS */
65
66/* Pure 2^n version of get_order */
67extern __inline__ int get_order(unsigned long size)
68{
69 int order;
70
71 size = (size-1) >> (PAGE_SHIFT-1);
72 order = -1;
73 do {
74 size >>= 1;
75 order++;
76 } while (size);
77 return order;
78}
79
80#ifdef USE_48_BIT_KSEG
81#define PAGE_OFFSET 0xffff800000000000UL
82#else
83#define PAGE_OFFSET 0xfffffc0000000000UL
84#endif
85
86#else
87
88#ifdef USE_48_BIT_KSEG
89#define PAGE_OFFSET 0xffff800000000000
90#else
91#define PAGE_OFFSET 0xfffffc0000000000
92#endif
93
94#endif /* !__ASSEMBLY__ */
95
96/* to align the pointer to the (next) page boundary */
97#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
98
99#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
100#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
101#ifndef CONFIG_DISCONTIGMEM
102#define pfn_to_page(pfn) (mem_map + (pfn))
103#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
104#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
105
106#define pfn_valid(pfn) ((pfn) < max_mapnr)
107#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
108#endif /* CONFIG_DISCONTIGMEM */
109
110#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
111 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
112
113#endif /* __KERNEL__ */
114
115#endif /* _ALPHA_PAGE_H */
diff --git a/include/asm-alpha/pal.h b/include/asm-alpha/pal.h
new file mode 100644
index 000000000000..9b4ba0d6f00b
--- /dev/null
+++ b/include/asm-alpha/pal.h
@@ -0,0 +1,51 @@
1#ifndef __ALPHA_PAL_H
2#define __ALPHA_PAL_H
3
4/*
5 * Common PAL-code
6 */
7#define PAL_halt 0
8#define PAL_cflush 1
9#define PAL_draina 2
10#define PAL_bpt 128
11#define PAL_bugchk 129
12#define PAL_chmk 131
13#define PAL_callsys 131
14#define PAL_imb 134
15#define PAL_rduniq 158
16#define PAL_wruniq 159
17#define PAL_gentrap 170
18#define PAL_nphalt 190
19
20/*
21 * VMS specific PAL-code
22 */
23#define PAL_swppal 10
24#define PAL_mfpr_vptb 41
25
26/*
27 * OSF specific PAL-code
28 */
29#define PAL_cserve 9
30#define PAL_wripir 13
31#define PAL_rdmces 16
32#define PAL_wrmces 17
33#define PAL_wrfen 43
34#define PAL_wrvptptr 45
35#define PAL_jtopal 46
36#define PAL_swpctx 48
37#define PAL_wrval 49
38#define PAL_rdval 50
39#define PAL_tbi 51
40#define PAL_wrent 52
41#define PAL_swpipl 53
42#define PAL_rdps 54
43#define PAL_wrkgp 55
44#define PAL_wrusp 56
45#define PAL_wrperfmon 57
46#define PAL_rdusp 58
47#define PAL_whami 60
48#define PAL_retsys 61
49#define PAL_rti 63
50
51#endif /* __ALPHA_PAL_H */
diff --git a/include/asm-alpha/param.h b/include/asm-alpha/param.h
new file mode 100644
index 000000000000..3ed0b3b02e52
--- /dev/null
+++ b/include/asm-alpha/param.h
@@ -0,0 +1,32 @@
1#ifndef _ASM_ALPHA_PARAM_H
2#define _ASM_ALPHA_PARAM_H
3
4/* ??? Gross. I don't want to parameterize this, and supposedly the
5 hardware ignores reprogramming. We also need userland buy-in to the
6 change in HZ, since this is visible in the wait4 resources etc. */
7
8#include <linux/config.h>
9
10#ifndef HZ
11# ifndef CONFIG_ALPHA_RAWHIDE
12# define HZ 1024
13# else
14# define HZ 1200
15# endif
16#endif
17
18#define USER_HZ HZ
19
20#define EXEC_PAGESIZE 8192
21
22#ifndef NOGROUP
23#define NOGROUP (-1)
24#endif
25
26#define MAXHOSTNAMELEN 64 /* max length of hostname */
27
28#ifdef __KERNEL__
29# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
30#endif
31
32#endif /* _ASM_ALPHA_PARAM_H */
diff --git a/include/asm-alpha/parport.h b/include/asm-alpha/parport.h
new file mode 100644
index 000000000000..c5ee7cbb2fcd
--- /dev/null
+++ b/include/asm-alpha/parport.h
@@ -0,0 +1,18 @@
1/*
2 * parport.h: platform-specific PC-style parport initialisation
3 *
4 * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk>
5 *
6 * This file should only be included by drivers/parport/parport_pc.c.
7 */
8
9#ifndef _ASM_AXP_PARPORT_H
10#define _ASM_AXP_PARPORT_H 1
11
12static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma);
13static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
14{
15 return parport_pc_find_isa_ports (autoirq, autodma);
16}
17
18#endif /* !(_ASM_AXP_PARPORT_H) */
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
new file mode 100644
index 000000000000..0c7b57bc043a
--- /dev/null
+++ b/include/asm-alpha/pci.h
@@ -0,0 +1,261 @@
1#ifndef __ALPHA_PCI_H
2#define __ALPHA_PCI_H
3
4#ifdef __KERNEL__
5
6#include <linux/spinlock.h>
7#include <asm/scatterlist.h>
8#include <asm/machvec.h>
9
10/*
11 * The following structure is used to manage multiple PCI busses.
12 */
13
14struct pci_dev;
15struct pci_bus;
16struct resource;
17struct pci_iommu_arena;
18struct page;
19
20/* A controller. Used to manage multiple PCI busses. */
21
22struct pci_controller {
23 struct pci_controller *next;
24 struct pci_bus *bus;
25 struct resource *io_space;
26 struct resource *mem_space;
27
28 /* The following are for reporting to userland. The invariant is
29 that if we report a BWX-capable dense memory, we do not report
30 a sparse memory at all, even if it exists. */
31 unsigned long sparse_mem_base;
32 unsigned long dense_mem_base;
33 unsigned long sparse_io_base;
34 unsigned long dense_io_base;
35
36 /* This one's for the kernel only. It's in KSEG somewhere. */
37 unsigned long config_space_base;
38
39 unsigned int index;
40 /* For compatibility with current (as of July 2003) pciutils
41 and XFree86. Eventually will be removed. */
42 unsigned int need_domain_info;
43
44 struct pci_iommu_arena *sg_pci;
45 struct pci_iommu_arena *sg_isa;
46
47 void *sysdata;
48};
49
50/* Override the logic in pci_scan_bus for skipping already-configured
51 bus numbers. */
52
53#define pcibios_assign_all_busses() 1
54#define pcibios_scan_all_fns(a, b) 0
55
56#define PCIBIOS_MIN_IO alpha_mv.min_io_address
57#define PCIBIOS_MIN_MEM alpha_mv.min_mem_address
58
59extern void pcibios_set_master(struct pci_dev *dev);
60
61extern inline void pcibios_penalize_isa_irq(int irq)
62{
63 /* We don't do dynamic PCI IRQ allocation */
64}
65
66/* IOMMU controls. */
67
68/* The PCI address space does not equal the physical memory address space.
69 The networking and block device layers use this boolean for bounce buffer
70 decisions. */
71#define PCI_DMA_BUS_IS_PHYS 0
72
73/* Allocate and map kernel buffer using consistent mode DMA for PCI
74 device. Returns non-NULL cpu-view pointer to the buffer if
75 successful and sets *DMA_ADDRP to the pci side dma address as well,
76 else DMA_ADDRP is undefined. */
77
78extern void *pci_alloc_consistent(struct pci_dev *, size_t, dma_addr_t *);
79
80/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
81 be values that were returned from pci_alloc_consistent. SIZE must
82 be the same as what as passed into pci_alloc_consistent.
83 References to the memory and mappings associated with CPU_ADDR or
84 DMA_ADDR past this call are illegal. */
85
86extern void pci_free_consistent(struct pci_dev *, size_t, void *, dma_addr_t);
87
88/* Map a single buffer of the indicate size for PCI DMA in streaming mode.
89 The 32-bit PCI bus mastering address to use is returned. Once the device
90 is given the dma address, the device owns this memory until either
91 pci_unmap_single or pci_dma_sync_single_for_cpu is performed. */
92
93extern dma_addr_t pci_map_single(struct pci_dev *, void *, size_t, int);
94
95/* Likewise, but for a page instead of an address. */
96extern dma_addr_t pci_map_page(struct pci_dev *, struct page *,
97 unsigned long, size_t, int);
98
99/* Test for pci_map_single or pci_map_page having generated an error. */
100
101static inline int
102pci_dma_mapping_error(dma_addr_t dma_addr)
103{
104 return dma_addr == 0;
105}
106
107/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
108 SIZE must match what was provided for in a previous pci_map_single
109 call. All other usages are undefined. After this call, reads by
110 the cpu to the buffer are guaranteed to see whatever the device
111 wrote there. */
112
113extern void pci_unmap_single(struct pci_dev *, dma_addr_t, size_t, int);
114extern void pci_unmap_page(struct pci_dev *, dma_addr_t, size_t, int);
115
116/* pci_unmap_{single,page} is not a nop, thus... */
117#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
118 dma_addr_t ADDR_NAME;
119#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
120 __u32 LEN_NAME;
121#define pci_unmap_addr(PTR, ADDR_NAME) \
122 ((PTR)->ADDR_NAME)
123#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
124 (((PTR)->ADDR_NAME) = (VAL))
125#define pci_unmap_len(PTR, LEN_NAME) \
126 ((PTR)->LEN_NAME)
127#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
128 (((PTR)->LEN_NAME) = (VAL))
129
130/* Map a set of buffers described by scatterlist in streaming mode for
131 PCI DMA. This is the scatter-gather version of the above
132 pci_map_single interface. Here the scatter gather list elements
133 are each tagged with the appropriate PCI dma address and length.
134 They are obtained via sg_dma_{address,length}(SG).
135
136 NOTE: An implementation may be able to use a smaller number of DMA
137 address/length pairs than there are SG table elements. (for
138 example via virtual mapping capabilities) The routine returns the
139 number of addr/length pairs actually used, at most nents.
140
141 Device ownership issues as mentioned above for pci_map_single are
142 the same here. */
143
144extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int, int);
145
146/* Unmap a set of streaming mode DMA translations. Again, cpu read
147 rules concerning calls here are the same as for pci_unmap_single()
148 above. */
149
150extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int, int);
151
152/* Make physical memory consistent for a single streaming mode DMA
153 translation after a transfer and device currently has ownership
154 of the buffer.
155
156 If you perform a pci_map_single() but wish to interrogate the
157 buffer using the cpu, yet do not wish to teardown the PCI dma
158 mapping, you must call this function before doing so. At the next
159 point you give the PCI dma address back to the card, you must first
160 perform a pci_dma_sync_for_device, and then the device again owns
161 the buffer. */
162
163static inline void
164pci_dma_sync_single_for_cpu(struct pci_dev *dev, dma_addr_t dma_addr,
165 long size, int direction)
166{
167 /* Nothing to do. */
168}
169
170static inline void
171pci_dma_sync_single_for_device(struct pci_dev *dev, dma_addr_t dma_addr,
172 size_t size, int direction)
173{
174 /* Nothing to do. */
175}
176
177/* Make physical memory consistent for a set of streaming mode DMA
178 translations after a transfer. The same as pci_dma_sync_single_*
179 but for a scatter-gather list, same rules and usage. */
180
181static inline void
182pci_dma_sync_sg_for_cpu(struct pci_dev *dev, struct scatterlist *sg,
183 int nents, int direction)
184{
185 /* Nothing to do. */
186}
187
188static inline void
189pci_dma_sync_sg_for_device(struct pci_dev *dev, struct scatterlist *sg,
190 int nents, int direction)
191{
192 /* Nothing to do. */
193}
194
195/* Return whether the given PCI device DMA address mask can
196 be supported properly. For example, if your device can
197 only drive the low 24-bits during PCI bus mastering, then
198 you would pass 0x00ffffff as the mask to this function. */
199
200extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
201
202/* True if the machine supports DAC addressing, and DEV can
203 make use of it given MASK. */
204extern int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
205
206/* Convert to/from DAC dma address and struct page. */
207extern dma64_addr_t pci_dac_page_to_dma(struct pci_dev *, struct page *,
208 unsigned long, int);
209extern struct page *pci_dac_dma_to_page(struct pci_dev *, dma64_addr_t);
210extern unsigned long pci_dac_dma_to_offset(struct pci_dev *, dma64_addr_t);
211
212static inline void
213pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr,
214 size_t len, int direction)
215{
216 /* Nothing to do. */
217}
218
219static inline void
220pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr,
221 size_t len, int direction)
222{
223 /* Nothing to do. */
224}
225
226/* TODO: integrate with include/asm-generic/pci.h ? */
227static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
228{
229 return channel ? 15 : 14;
230}
231
232extern void pcibios_resource_to_bus(struct pci_dev *, struct pci_bus_region *,
233 struct resource *);
234
235#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
236
237static inline int pci_proc_domain(struct pci_bus *bus)
238{
239 struct pci_controller *hose = bus->sysdata;
240 return hose->need_domain_info;
241}
242
243static inline void
244pcibios_add_platform_entries(struct pci_dev *dev)
245{
246}
247
248struct pci_dev *alpha_gendev_to_pci(struct device *dev);
249
250#endif /* __KERNEL__ */
251
252/* Values for the `which' argument to sys_pciconfig_iobase. */
253#define IOBASE_HOSE 0
254#define IOBASE_SPARSE_MEM 1
255#define IOBASE_DENSE_MEM 2
256#define IOBASE_SPARSE_IO 3
257#define IOBASE_DENSE_IO 4
258#define IOBASE_ROOT_BUS 5
259#define IOBASE_FROM_HOSE 0x10000
260
261#endif /* __ALPHA_PCI_H */
diff --git a/include/asm-alpha/percpu.h b/include/asm-alpha/percpu.h
new file mode 100644
index 000000000000..48348fe34c19
--- /dev/null
+++ b/include/asm-alpha/percpu.h
@@ -0,0 +1,6 @@
1#ifndef __ALPHA_PERCPU_H
2#define __ALPHA_PERCPU_H
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ALPHA_PERCPU_H */
diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h
new file mode 100644
index 000000000000..308475642913
--- /dev/null
+++ b/include/asm-alpha/pgalloc.h
@@ -0,0 +1,78 @@
1#ifndef _ALPHA_PGALLOC_H
2#define _ALPHA_PGALLOC_H
3
4#include <linux/config.h>
5#include <linux/mm.h>
6#include <linux/mmzone.h>
7
8/*
9 * Allocate and free page tables. The xxx_kernel() versions are
10 * used to allocate a kernel page table - this turns on ASN bits
11 * if any.
12 */
13
14static inline void
15pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
16{
17 pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
18}
19
20static inline void
21pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
22{
23 pmd_set(pmd, pte);
24}
25
26static inline void
27pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
28{
29 pgd_set(pgd, pmd);
30}
31
32extern pgd_t *pgd_alloc(struct mm_struct *mm);
33
34static inline void
35pgd_free(pgd_t *pgd)
36{
37 free_page((unsigned long)pgd);
38}
39
40static inline pmd_t *
41pmd_alloc_one(struct mm_struct *mm, unsigned long address)
42{
43 pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
44 return ret;
45}
46
47static inline void
48pmd_free(pmd_t *pmd)
49{
50 free_page((unsigned long)pmd);
51}
52
53extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
54
55static inline void
56pte_free_kernel(pte_t *pte)
57{
58 free_page((unsigned long)pte);
59}
60
61static inline struct page *
62pte_alloc_one(struct mm_struct *mm, unsigned long addr)
63{
64 pte_t *pte = pte_alloc_one_kernel(mm, addr);
65 if (pte)
66 return virt_to_page(pte);
67 return NULL;
68}
69
70static inline void
71pte_free(struct page *page)
72{
73 __free_page(page);
74}
75
76#define check_pgt_cache() do { } while (0)
77
78#endif /* _ALPHA_PGALLOC_H */
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
new file mode 100644
index 000000000000..faae196d8377
--- /dev/null
+++ b/include/asm-alpha/pgtable.h
@@ -0,0 +1,369 @@
1#ifndef _ALPHA_PGTABLE_H
2#define _ALPHA_PGTABLE_H
3
4#include <asm-generic/4level-fixup.h>
5
6/*
7 * This file contains the functions and defines necessary to modify and use
8 * the Alpha page table tree.
9 *
10 * This hopefully works with any standard Alpha page-size, as defined
11 * in <asm/page.h> (currently 8192).
12 */
13#include <linux/config.h>
14#include <linux/mmzone.h>
15
16#include <asm/page.h>
17#include <asm/processor.h> /* For TASK_SIZE */
18#include <asm/machvec.h>
19
20/* Certain architectures need to do special things when PTEs
21 * within a page table are directly modified. Thus, the following
22 * hook is made available.
23 */
24#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
25#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
26
27/* PMD_SHIFT determines the size of the area a second-level page table can map */
28#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
29#define PMD_SIZE (1UL << PMD_SHIFT)
30#define PMD_MASK (~(PMD_SIZE-1))
31
32/* PGDIR_SHIFT determines what a third-level page table entry can map */
33#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
34#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
35#define PGDIR_MASK (~(PGDIR_SIZE-1))
36
37/*
38 * Entries per page directory level: the Alpha is three-level, with
39 * all levels having a one-page page table.
40 */
41#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
42#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
43#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
44#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
45#define FIRST_USER_PGD_NR 0
46
47/* Number of pointers that fit on a page: this will go away. */
48#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
49
50#ifdef CONFIG_ALPHA_LARGE_VMALLOC
51#define VMALLOC_START 0xfffffe0000000000
52#else
53#define VMALLOC_START (-2*PGDIR_SIZE)
54#endif
55#define VMALLOC_END (-PGDIR_SIZE)
56
57/*
58 * OSF/1 PAL-code-imposed page table bits
59 */
60#define _PAGE_VALID 0x0001
61#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */
62#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */
63#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */
64#define _PAGE_ASM 0x0010
65#define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */
66#define _PAGE_URE 0x0200 /* xxx */
67#define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */
68#define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */
69
70/* .. and these are ours ... */
71#define _PAGE_DIRTY 0x20000
72#define _PAGE_ACCESSED 0x40000
73#define _PAGE_FILE 0x80000 /* set:pagecache, unset:swap */
74
75/*
76 * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
77 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
78 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
79 * the KRE/URE bits to watch for it. That way we don't need to overload the
80 * KWE/UWE bits with both handling dirty and accessed.
81 *
82 * Note that the kernel uses the accessed bit just to check whether to page
83 * out a page or not, so it doesn't have to be exact anyway.
84 */
85
86#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
87#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
88
89#define _PFN_MASK 0xFFFFFFFF00000000UL
90
91#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
92#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
93
94/*
95 * All the normal masks have the "page accessed" bits on, as any time they are used,
96 * the page is accessed. They are cleared only by the page-out routines
97 */
98#define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
99#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
100#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
101#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
102#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
103
104#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
105
106#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
107#define _PAGE_S(x) _PAGE_NORMAL(x)
108
109/*
110 * The hardware can handle write-only mappings, but as the Alpha
111 * architecture does byte-wide writes with a read-modify-write
112 * sequence, it's not practical to have write-without-read privs.
113 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
114 * arch/alpha/mm/fault.c)
115 */
116 /* xwr */
117#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
118#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
119#define __P010 _PAGE_P(_PAGE_FOE)
120#define __P011 _PAGE_P(_PAGE_FOE)
121#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
122#define __P101 _PAGE_P(_PAGE_FOW)
123#define __P110 _PAGE_P(0)
124#define __P111 _PAGE_P(0)
125
126#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
127#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
128#define __S010 _PAGE_S(_PAGE_FOE)
129#define __S011 _PAGE_S(_PAGE_FOE)
130#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
131#define __S101 _PAGE_S(_PAGE_FOW)
132#define __S110 _PAGE_S(0)
133#define __S111 _PAGE_S(0)
134
135/*
136 * BAD_PAGETABLE is used when we need a bogus page-table, while
137 * BAD_PAGE is used for a bogus page.
138 *
139 * ZERO_PAGE is a global shared page that is always zero: used
140 * for zero-mapped memory areas etc..
141 */
142extern pte_t __bad_page(void);
143extern pmd_t * __bad_pagetable(void);
144
145extern unsigned long __zero_page(void);
146
147#define BAD_PAGETABLE __bad_pagetable()
148#define BAD_PAGE __bad_page()
149#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE))
150
151/* number of bits that fit into a memory pointer */
152#define BITS_PER_PTR (8*sizeof(unsigned long))
153
154/* to align the pointer to a pointer address */
155#define PTR_MASK (~(sizeof(void*)-1))
156
157/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
158#define SIZEOF_PTR_LOG2 3
159
160/* to find an entry in a page-table */
161#define PAGE_PTR(address) \
162 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
163
164/*
165 * On certain platforms whose physical address space can overlap KSEG,
166 * namely EV6 and above, we must re-twiddle the physaddr to restore the
167 * correct high-order bits.
168 *
169 * This is extremely confusing until you realize that this is actually
170 * just working around a userspace bug. The X server was intending to
171 * provide the physical address but instead provided the KSEG address.
172 * Or tried to, except it's not representable.
173 *
174 * On Tsunami there's nothing meaningful at 0x40000000000, so this is
175 * a safe thing to do. Come the first core logic that does put something
176 * in this area -- memory or whathaveyou -- then this hack will have
177 * to go away. So be prepared!
178 */
179
180#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
181#error "EV6-only feature in a generic kernel"
182#endif
183#if defined(CONFIG_ALPHA_GENERIC) || \
184 (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
185#define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT)
186#define PHYS_TWIDDLE(pfn) \
187 ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
188 ? ((pfn) ^= KSEG_PFN) : (pfn))
189#else
190#define PHYS_TWIDDLE(pfn) (pfn)
191#endif
192
193/*
194 * Conversion functions: convert a page and protection to a page entry,
195 * and a page entry and page directory to the page they refer to.
196 */
197#ifndef CONFIG_DISCONTIGMEM
198#define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT)
199
200#define pte_pfn(pte) (pte_val(pte) >> 32)
201#define pte_page(pte) pfn_to_page(pte_pfn(pte))
202#define mk_pte(page, pgprot) \
203({ \
204 pte_t pte; \
205 \
206 pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
207 pte; \
208})
209#endif
210
211extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
212{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
213
214extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
215{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
216
217extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
218{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
219
220extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
221{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
222
223
224extern inline unsigned long
225pmd_page_kernel(pmd_t pmd)
226{
227 return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
228}
229
230#ifndef CONFIG_DISCONTIGMEM
231#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
232#endif
233
234extern inline unsigned long pgd_page(pgd_t pgd)
235{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
236
237extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
238extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
239extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
240{
241 pte_val(*ptep) = 0;
242}
243
244extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
245extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; }
246extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; }
247extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
248
249extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
250extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
251extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
252extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
253
254/*
255 * The following only work if pte_present() is true.
256 * Undefined behaviour if not..
257 */
258extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_FOR); }
259extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
260extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_FOE); }
261extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
262extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
263extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
264
265extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
266extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOR; return pte; }
267extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOE; return pte; }
268extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
269extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
270extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
271extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= ~_PAGE_FOR; return pte; }
272extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_FOE; return pte; }
273extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
274extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
275
276#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
277
278/* to find an entry in a kernel page-table-directory */
279#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
280
281/* to find an entry in a page-table-directory. */
282#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
283#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
284
285/* Find an entry in the second-level page table.. */
286extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
287{
288 return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
289}
290
291/* Find an entry in the third-level page table.. */
292extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
293{
294 return (pte_t *) pmd_page_kernel(*dir)
295 + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
296}
297
298#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
299#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
300#define pte_unmap(pte) do { } while (0)
301#define pte_unmap_nested(pte) do { } while (0)
302
303extern pgd_t swapper_pg_dir[1024];
304
305/*
306 * The Alpha doesn't have any external MMU info: the kernel page
307 * tables contain all the necessary information.
308 */
309extern inline void update_mmu_cache(struct vm_area_struct * vma,
310 unsigned long address, pte_t pte)
311{
312}
313
314/*
315 * Non-present pages: high 24 bits are offset, next 8 bits type,
316 * low 32 bits zero.
317 */
318extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
319{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
320
321#define __swp_type(x) (((x).val >> 32) & 0xff)
322#define __swp_offset(x) ((x).val >> 40)
323#define __swp_entry(type, off) ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
324#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
325#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
326
327#define pte_to_pgoff(pte) (pte_val(pte) >> 32)
328#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
329
330#define PTE_FILE_MAX_BITS 32
331
332#ifndef CONFIG_DISCONTIGMEM
333#define kern_addr_valid(addr) (1)
334#endif
335
336#define io_remap_page_range(vma, start, busaddr, size, prot) \
337({ \
338 void *va = (void __force *)ioremap(busaddr, size); \
339 unsigned long pfn = virt_to_phys(va) >> PAGE_SHIFT; \
340 remap_pfn_range(vma, start, pfn, size, prot); \
341})
342
343#define io_remap_pfn_range(vma, start, pfn, size, prot) \
344 remap_pfn_range(vma, start, pfn, size, prot)
345
346#define MK_IOSPACE_PFN(space, pfn) (pfn)
347#define GET_IOSPACE(pfn) 0
348#define GET_PFN(pfn) (pfn)
349
350#define pte_ERROR(e) \
351 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
352#define pmd_ERROR(e) \
353 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
354#define pgd_ERROR(e) \
355 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
356
357extern void paging_init(void);
358
359#include <asm-generic/pgtable.h>
360
361/*
362 * No page table caches to initialise
363 */
364#define pgtable_cache_init() do { } while (0)
365
366/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
367#define HAVE_ARCH_UNMAPPED_AREA
368
369#endif /* _ALPHA_PGTABLE_H */
diff --git a/include/asm-alpha/poll.h b/include/asm-alpha/poll.h
new file mode 100644
index 000000000000..34f333b762a0
--- /dev/null
+++ b/include/asm-alpha/poll.h
@@ -0,0 +1,23 @@
1#ifndef __ALPHA_POLL_H
2#define __ALPHA_POLL_H
3
4#define POLLIN (1 << 0)
5#define POLLPRI (1 << 1)
6#define POLLOUT (1 << 2)
7#define POLLERR (1 << 3)
8#define POLLHUP (1 << 4)
9#define POLLNVAL (1 << 5)
10#define POLLRDNORM (1 << 6)
11#define POLLRDBAND (1 << 7)
12#define POLLWRNORM (1 << 8)
13#define POLLWRBAND (1 << 9)
14#define POLLMSG (1 << 10)
15#define POLLREMOVE (1 << 11)
16
17struct pollfd {
18 int fd;
19 short events;
20 short revents;
21};
22
23#endif
diff --git a/include/asm-alpha/posix_types.h b/include/asm-alpha/posix_types.h
new file mode 100644
index 000000000000..c78c04a94f4e
--- /dev/null
+++ b/include/asm-alpha/posix_types.h
@@ -0,0 +1,123 @@
1#ifndef _ALPHA_POSIX_TYPES_H
2#define _ALPHA_POSIX_TYPES_H
3
4/*
5 * This file is generally used by user-level software, so you need to
6 * be a little careful about namespace pollution etc. Also, we cannot
7 * assume GCC is being used.
8 */
9
10typedef unsigned int __kernel_ino_t;
11typedef unsigned int __kernel_mode_t;
12typedef unsigned int __kernel_nlink_t;
13typedef long __kernel_off_t;
14typedef long long __kernel_loff_t;
15typedef int __kernel_pid_t;
16typedef int __kernel_ipc_pid_t;
17typedef unsigned int __kernel_uid_t;
18typedef unsigned int __kernel_gid_t;
19typedef unsigned long __kernel_size_t;
20typedef long __kernel_ssize_t;
21typedef long __kernel_ptrdiff_t;
22typedef long __kernel_time_t;
23typedef long __kernel_suseconds_t;
24typedef long __kernel_clock_t;
25typedef int __kernel_daddr_t;
26typedef char * __kernel_caddr_t;
27typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
28typedef unsigned short __kernel_uid16_t;
29typedef unsigned short __kernel_gid16_t;
30typedef int __kernel_clockid_t;
31typedef int __kernel_timer_t;
32
33typedef struct {
34 int val[2];
35} __kernel_fsid_t;
36
37typedef __kernel_uid_t __kernel_old_uid_t;
38typedef __kernel_gid_t __kernel_old_gid_t;
39typedef __kernel_uid_t __kernel_uid32_t;
40typedef __kernel_gid_t __kernel_gid32_t;
41
42typedef unsigned int __kernel_old_dev_t;
43
44#ifdef __KERNEL__
45
46#ifndef __GNUC__
47
48#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
49#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
50#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0)
51#define __FD_ZERO(set) \
52 ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
53
54#else /* __GNUC__ */
55
56/* With GNU C, use inline functions instead so args are evaluated only once: */
57
58#undef __FD_SET
59static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
60{
61 unsigned long _tmp = fd / __NFDBITS;
62 unsigned long _rem = fd % __NFDBITS;
63 fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
64}
65
66#undef __FD_CLR
67static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
68{
69 unsigned long _tmp = fd / __NFDBITS;
70 unsigned long _rem = fd % __NFDBITS;
71 fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
72}
73
74#undef __FD_ISSET
75static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p)
76{
77 unsigned long _tmp = fd / __NFDBITS;
78 unsigned long _rem = fd % __NFDBITS;
79 return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
80}
81
82/*
83 * This will unroll the loop for the normal constant case (8 ints,
84 * for a 256-bit fd_set)
85 */
86#undef __FD_ZERO
87static __inline__ void __FD_ZERO(__kernel_fd_set *p)
88{
89 unsigned long *tmp = p->fds_bits;
90 int i;
91
92 if (__builtin_constant_p(__FDSET_LONGS)) {
93 switch (__FDSET_LONGS) {
94 case 16:
95 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
96 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
97 tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0;
98 tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0;
99 return;
100
101 case 8:
102 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
103 tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0;
104 return;
105
106 case 4:
107 tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0;
108 return;
109 }
110 }
111 i = __FDSET_LONGS;
112 while (i) {
113 i--;
114 *tmp = 0;
115 tmp++;
116 }
117}
118
119#endif /* __GNUC__ */
120
121#endif /* __KERNEL__ */
122
123#endif /* _ALPHA_POSIX_TYPES_H */
diff --git a/include/asm-alpha/processor.h b/include/asm-alpha/processor.h
new file mode 100644
index 000000000000..059780a7d3d7
--- /dev/null
+++ b/include/asm-alpha/processor.h
@@ -0,0 +1,118 @@
1/*
2 * include/asm-alpha/processor.h
3 *
4 * Copyright (C) 1994 Linus Torvalds
5 */
6
7#ifndef __ASM_ALPHA_PROCESSOR_H
8#define __ASM_ALPHA_PROCESSOR_H
9
10#include <linux/personality.h> /* for ADDR_LIMIT_32BIT */
11
12/*
13 * Returns current instruction pointer ("program counter").
14 */
15#define current_text_addr() \
16 ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; })
17
18/*
19 * We have a 42-bit user address space: 4TB user VM...
20 */
21#define TASK_SIZE (0x40000000000UL)
22
23/* This decides where the kernel will search for a free chunk of vm
24 * space during mmap's.
25 */
26#define TASK_UNMAPPED_BASE \
27 ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2)
28
29typedef struct {
30 unsigned long seg;
31} mm_segment_t;
32
33/* This is dead. Everything has been moved to thread_info. */
34struct thread_struct { };
35#define INIT_THREAD { }
36
37/* Return saved PC of a blocked thread. */
38struct task_struct;
39extern unsigned long thread_saved_pc(struct task_struct *);
40
41/* Do necessary setup to start up a newly executed thread. */
42extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
43
44/* Free all resources held by a thread. */
45extern void release_thread(struct task_struct *);
46
47/* Prepare to copy thread state - unlazy all lazy status */
48#define prepare_to_copy(tsk) do { } while (0)
49
50/* Create a kernel thread without removing it from tasklists. */
51extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
52
53unsigned long get_wchan(struct task_struct *p);
54
55/* See arch/alpha/kernel/ptrace.c for details. */
56#define PT_REG(reg) \
57 (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
58
59#define SW_REG(reg) \
60 (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
61 + offsetof(struct switch_stack, reg))
62
63#define KSTK_EIP(tsk) \
64 (*(unsigned long *)(PT_REG(pc) + (unsigned long) ((tsk)->thread_info)))
65
66#define KSTK_ESP(tsk) \
67 ((tsk) == current ? rdusp() : (tsk)->thread_info->pcb.usp)
68
69#define cpu_relax() barrier()
70
71#define ARCH_HAS_PREFETCH
72#define ARCH_HAS_PREFETCHW
73#define ARCH_HAS_SPINLOCK_PREFETCH
74
75#ifndef CONFIG_SMP
76/* Nothing to prefetch. */
77#define spin_lock_prefetch(lock) do { } while (0)
78#endif
79
80#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
81extern inline void prefetch(const void *ptr)
82{
83 __builtin_prefetch(ptr, 0, 3);
84}
85
86extern inline void prefetchw(const void *ptr)
87{
88 __builtin_prefetch(ptr, 1, 3);
89}
90
91#ifdef CONFIG_SMP
92extern inline void spin_lock_prefetch(const void *ptr)
93{
94 __builtin_prefetch(ptr, 1, 3);
95}
96#endif
97
98#else
99extern inline void prefetch(const void *ptr)
100{
101 __asm__ ("ldl $31,%0" : : "m"(*(char *)ptr));
102}
103
104extern inline void prefetchw(const void *ptr)
105{
106 __asm__ ("ldq $31,%0" : : "m"(*(char *)ptr));
107}
108
109#ifdef CONFIG_SMP
110extern inline void spin_lock_prefetch(const void *ptr)
111{
112 __asm__ ("ldq $31,%0" : : "m"(*(char *)ptr));
113}
114#endif
115
116#endif /* GCC 3.1 */
117
118#endif /* __ASM_ALPHA_PROCESSOR_H */
diff --git a/include/asm-alpha/ptrace.h b/include/asm-alpha/ptrace.h
new file mode 100644
index 000000000000..d462c5e14c13
--- /dev/null
+++ b/include/asm-alpha/ptrace.h
@@ -0,0 +1,82 @@
1#ifndef _ASMAXP_PTRACE_H
2#define _ASMAXP_PTRACE_H
3
4
5/*
6 * This struct defines the way the registers are stored on the
7 * kernel stack during a system call or other kernel entry
8 *
9 * NOTE! I want to minimize the overhead of system calls, so this
10 * struct has as little information as possible. I does not have
11 *
12 * - floating point regs: the kernel doesn't change those
13 * - r9-15: saved by the C compiler
14 *
15 * This makes "fork()" and "exec()" a bit more complex, but should
16 * give us low system call latency.
17 */
18
19struct pt_regs {
20 unsigned long r0;
21 unsigned long r1;
22 unsigned long r2;
23 unsigned long r3;
24 unsigned long r4;
25 unsigned long r5;
26 unsigned long r6;
27 unsigned long r7;
28 unsigned long r8;
29 unsigned long r19;
30 unsigned long r20;
31 unsigned long r21;
32 unsigned long r22;
33 unsigned long r23;
34 unsigned long r24;
35 unsigned long r25;
36 unsigned long r26;
37 unsigned long r27;
38 unsigned long r28;
39 unsigned long hae;
40/* JRP - These are the values provided to a0-a2 by PALcode */
41 unsigned long trap_a0;
42 unsigned long trap_a1;
43 unsigned long trap_a2;
44/* These are saved by PAL-code: */
45 unsigned long ps;
46 unsigned long pc;
47 unsigned long gp;
48 unsigned long r16;
49 unsigned long r17;
50 unsigned long r18;
51};
52
53/*
54 * This is the extended stack used by signal handlers and the context
55 * switcher: it's pushed after the normal "struct pt_regs".
56 */
57struct switch_stack {
58 unsigned long r9;
59 unsigned long r10;
60 unsigned long r11;
61 unsigned long r12;
62 unsigned long r13;
63 unsigned long r14;
64 unsigned long r15;
65 unsigned long r26;
66 unsigned long fp[32]; /* fp[31] is fpcr */
67};
68
69#ifdef __KERNEL__
70#define user_mode(regs) (((regs)->ps & 8) != 0)
71#define instruction_pointer(regs) ((regs)->pc)
72#define profile_pc(regs) instruction_pointer(regs)
73extern void show_regs(struct pt_regs *);
74
75#define alpha_task_regs(task) \
76 ((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1)
77
78#define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0)
79
80#endif
81
82#endif
diff --git a/include/asm-alpha/reg.h b/include/asm-alpha/reg.h
new file mode 100644
index 000000000000..86ff916fb069
--- /dev/null
+++ b/include/asm-alpha/reg.h
@@ -0,0 +1,52 @@
1#ifndef __reg_h__
2#define __reg_h__
3
4/*
5 * Exception frame offsets.
6 */
7#define EF_V0 0
8#define EF_T0 1
9#define EF_T1 2
10#define EF_T2 3
11#define EF_T3 4
12#define EF_T4 5
13#define EF_T5 6
14#define EF_T6 7
15#define EF_T7 8
16#define EF_S0 9
17#define EF_S1 10
18#define EF_S2 11
19#define EF_S3 12
20#define EF_S4 13
21#define EF_S5 14
22#define EF_S6 15
23#define EF_A3 16
24#define EF_A4 17
25#define EF_A5 18
26#define EF_T8 19
27#define EF_T9 20
28#define EF_T10 21
29#define EF_T11 22
30#define EF_RA 23
31#define EF_T12 24
32#define EF_AT 25
33#define EF_SP 26
34#define EF_PS 27
35#define EF_PC 28
36#define EF_GP 29
37#define EF_A0 30
38#define EF_A1 31
39#define EF_A2 32
40
41#define EF_SIZE (33*8)
42#define HWEF_SIZE (6*8) /* size of PAL frame (PS-A2) */
43
44#define EF_SSIZE (EF_SIZE - HWEF_SIZE)
45
46/*
47 * Map register number into core file offset.
48 */
49#define CORE_REG(reg, ubase) \
50 (((unsigned long *)((unsigned long)(ubase)))[reg])
51
52#endif /* __reg_h__ */
diff --git a/include/asm-alpha/regdef.h b/include/asm-alpha/regdef.h
new file mode 100644
index 000000000000..142df9c4f8b8
--- /dev/null
+++ b/include/asm-alpha/regdef.h
@@ -0,0 +1,44 @@
1#ifndef __alpha_regdef_h__
2#define __alpha_regdef_h__
3
4#define v0 $0 /* function return value */
5
6#define t0 $1 /* temporary registers (caller-saved) */
7#define t1 $2
8#define t2 $3
9#define t3 $4
10#define t4 $5
11#define t5 $6
12#define t6 $7
13#define t7 $8
14
15#define s0 $9 /* saved-registers (callee-saved registers) */
16#define s1 $10
17#define s2 $11
18#define s3 $12
19#define s4 $13
20#define s5 $14
21#define s6 $15
22#define fp s6 /* frame-pointer (s6 in frame-less procedures) */
23
24#define a0 $16 /* argument registers (caller-saved) */
25#define a1 $17
26#define a2 $18
27#define a3 $19
28#define a4 $20
29#define a5 $21
30
31#define t8 $22 /* more temps (caller-saved) */
32#define t9 $23
33#define t10 $24
34#define t11 $25
35#define ra $26 /* return address register */
36#define t12 $27
37
38#define pv t12 /* procedure-variable register */
39#define AT $at /* assembler temporary */
40#define gp $29 /* global pointer */
41#define sp $30 /* stack pointer */
42#define zero $31 /* reads as zero, writes are noops */
43
44#endif /* __alpha_regdef_h__ */
diff --git a/include/asm-alpha/resource.h b/include/asm-alpha/resource.h
new file mode 100644
index 000000000000..c10874ff5973
--- /dev/null
+++ b/include/asm-alpha/resource.h
@@ -0,0 +1,22 @@
1#ifndef _ALPHA_RESOURCE_H
2#define _ALPHA_RESOURCE_H
3
4/*
5 * Alpha/Linux-specific ordering of these four resource limit IDs,
6 * the rest comes from the generic header:
7 */
8#define RLIMIT_NOFILE 6 /* max number of open files */
9#define RLIMIT_AS 7 /* address space limit */
10#define RLIMIT_NPROC 8 /* max number of processes */
11#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */
12
13/*
14 * SuS says limits have to be unsigned. Fine, it's unsigned, but
15 * we retain the old value for compatibility, especially with DU.
16 * When you run into the 2^63 barrier, you call me.
17 */
18#define RLIM_INFINITY 0x7ffffffffffffffful
19
20#include <asm-generic/resource.h>
21
22#endif /* _ALPHA_RESOURCE_H */
diff --git a/include/asm-alpha/rtc.h b/include/asm-alpha/rtc.h
new file mode 100644
index 000000000000..4e854b1333eb
--- /dev/null
+++ b/include/asm-alpha/rtc.h
@@ -0,0 +1,10 @@
1#ifndef _ALPHA_RTC_H
2#define _ALPHA_RTC_H
3
4/*
5 * Alpha uses the default access methods for the RTC.
6 */
7
8#include <asm-generic/rtc.h>
9
10#endif
diff --git a/include/asm-alpha/rwsem.h b/include/asm-alpha/rwsem.h
new file mode 100644
index 000000000000..8e058a67c9a4
--- /dev/null
+++ b/include/asm-alpha/rwsem.h
@@ -0,0 +1,266 @@
1#ifndef _ALPHA_RWSEM_H
2#define _ALPHA_RWSEM_H
3
4/*
5 * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
6 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
7 */
8
9#ifndef _LINUX_RWSEM_H
10#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
11#endif
12
13#ifdef __KERNEL__
14
15#include <linux/compiler.h>
16#include <linux/list.h>
17#include <linux/spinlock.h>
18
19struct rwsem_waiter;
20
21extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
22extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
23extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
24extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
25
26/*
27 * the semaphore definition
28 */
29struct rw_semaphore {
30 long count;
31#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
32#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
33#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
34#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
35#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
36#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
37 spinlock_t wait_lock;
38 struct list_head wait_list;
39#if RWSEM_DEBUG
40 int debug;
41#endif
42};
43
44#if RWSEM_DEBUG
45#define __RWSEM_DEBUG_INIT , 0
46#else
47#define __RWSEM_DEBUG_INIT /* */
48#endif
49
50#define __RWSEM_INITIALIZER(name) \
51 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
52 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
53
54#define DECLARE_RWSEM(name) \
55 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
56
57static inline void init_rwsem(struct rw_semaphore *sem)
58{
59 sem->count = RWSEM_UNLOCKED_VALUE;
60 spin_lock_init(&sem->wait_lock);
61 INIT_LIST_HEAD(&sem->wait_list);
62#if RWSEM_DEBUG
63 sem->debug = 0;
64#endif
65}
66
67static inline void __down_read(struct rw_semaphore *sem)
68{
69 long oldcount;
70#ifndef CONFIG_SMP
71 oldcount = sem->count;
72 sem->count += RWSEM_ACTIVE_READ_BIAS;
73#else
74 long temp;
75 __asm__ __volatile__(
76 "1: ldq_l %0,%1\n"
77 " addq %0,%3,%2\n"
78 " stq_c %2,%1\n"
79 " beq %2,2f\n"
80 " mb\n"
81 ".subsection 2\n"
82 "2: br 1b\n"
83 ".previous"
84 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
85 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
86#endif
87 if (unlikely(oldcount < 0))
88 rwsem_down_read_failed(sem);
89}
90
91/*
92 * trylock for reading -- returns 1 if successful, 0 if contention
93 */
94static inline int __down_read_trylock(struct rw_semaphore *sem)
95{
96 long old, new, res;
97
98 res = sem->count;
99 do {
100 new = res + RWSEM_ACTIVE_READ_BIAS;
101 if (new <= 0)
102 break;
103 old = res;
104 res = cmpxchg(&sem->count, old, new);
105 } while (res != old);
106 return res >= 0 ? 1 : 0;
107}
108
109static inline void __down_write(struct rw_semaphore *sem)
110{
111 long oldcount;
112#ifndef CONFIG_SMP
113 oldcount = sem->count;
114 sem->count += RWSEM_ACTIVE_WRITE_BIAS;
115#else
116 long temp;
117 __asm__ __volatile__(
118 "1: ldq_l %0,%1\n"
119 " addq %0,%3,%2\n"
120 " stq_c %2,%1\n"
121 " beq %2,2f\n"
122 " mb\n"
123 ".subsection 2\n"
124 "2: br 1b\n"
125 ".previous"
126 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
127 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
128#endif
129 if (unlikely(oldcount))
130 rwsem_down_write_failed(sem);
131}
132
133/*
134 * trylock for writing -- returns 1 if successful, 0 if contention
135 */
136static inline int __down_write_trylock(struct rw_semaphore *sem)
137{
138 long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
139 RWSEM_ACTIVE_WRITE_BIAS);
140 if (ret == RWSEM_UNLOCKED_VALUE)
141 return 1;
142 return 0;
143}
144
145static inline void __up_read(struct rw_semaphore *sem)
146{
147 long oldcount;
148#ifndef CONFIG_SMP
149 oldcount = sem->count;
150 sem->count -= RWSEM_ACTIVE_READ_BIAS;
151#else
152 long temp;
153 __asm__ __volatile__(
154 " mb\n"
155 "1: ldq_l %0,%1\n"
156 " subq %0,%3,%2\n"
157 " stq_c %2,%1\n"
158 " beq %2,2f\n"
159 ".subsection 2\n"
160 "2: br 1b\n"
161 ".previous"
162 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
163 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
164#endif
165 if (unlikely(oldcount < 0))
166 if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
167 rwsem_wake(sem);
168}
169
170static inline void __up_write(struct rw_semaphore *sem)
171{
172 long count;
173#ifndef CONFIG_SMP
174 sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
175 count = sem->count;
176#else
177 long temp;
178 __asm__ __volatile__(
179 " mb\n"
180 "1: ldq_l %0,%1\n"
181 " subq %0,%3,%2\n"
182 " stq_c %2,%1\n"
183 " beq %2,2f\n"
184 " subq %0,%3,%0\n"
185 ".subsection 2\n"
186 "2: br 1b\n"
187 ".previous"
188 :"=&r" (count), "=m" (sem->count), "=&r" (temp)
189 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
190#endif
191 if (unlikely(count))
192 if ((int)count == 0)
193 rwsem_wake(sem);
194}
195
196/*
197 * downgrade write lock to read lock
198 */
199static inline void __downgrade_write(struct rw_semaphore *sem)
200{
201 long oldcount;
202#ifndef CONFIG_SMP
203 oldcount = sem->count;
204 sem->count -= RWSEM_WAITING_BIAS;
205#else
206 long temp;
207 __asm__ __volatile__(
208 "1: ldq_l %0,%1\n"
209 " addq %0,%3,%2\n"
210 " stq_c %2,%1\n"
211 " beq %2,2f\n"
212 " mb\n"
213 ".subsection 2\n"
214 "2: br 1b\n"
215 ".previous"
216 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
217 :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
218#endif
219 if (unlikely(oldcount < 0))
220 rwsem_downgrade_wake(sem);
221}
222
223static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
224{
225#ifndef CONFIG_SMP
226 sem->count += val;
227#else
228 long temp;
229 __asm__ __volatile__(
230 "1: ldq_l %0,%1\n"
231 " addq %0,%2,%0\n"
232 " stq_c %0,%1\n"
233 " beq %0,2f\n"
234 ".subsection 2\n"
235 "2: br 1b\n"
236 ".previous"
237 :"=&r" (temp), "=m" (sem->count)
238 :"Ir" (val), "m" (sem->count));
239#endif
240}
241
242static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
243{
244#ifndef CONFIG_SMP
245 sem->count += val;
246 return sem->count;
247#else
248 long ret, temp;
249 __asm__ __volatile__(
250 "1: ldq_l %0,%1\n"
251 " addq %0,%3,%2\n"
252 " addq %0,%3,%0\n"
253 " stq_c %2,%1\n"
254 " beq %2,2f\n"
255 ".subsection 2\n"
256 "2: br 1b\n"
257 ".previous"
258 :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
259 :"Ir" (val), "m" (sem->count));
260
261 return ret;
262#endif
263}
264
265#endif /* __KERNEL__ */
266#endif /* _ALPHA_RWSEM_H */
diff --git a/include/asm-alpha/scatterlist.h b/include/asm-alpha/scatterlist.h
new file mode 100644
index 000000000000..6afb8bd3aaf9
--- /dev/null
+++ b/include/asm-alpha/scatterlist.h
@@ -0,0 +1,21 @@
1#ifndef _ALPHA_SCATTERLIST_H
2#define _ALPHA_SCATTERLIST_H
3
4#include <asm/page.h>
5
6struct scatterlist {
7 struct page *page;
8 unsigned int offset;
9
10 unsigned int length;
11
12 dma_addr_t dma_address;
13 __u32 dma_length;
14};
15
16#define sg_dma_address(sg) ((sg)->dma_address)
17#define sg_dma_len(sg) ((sg)->dma_length)
18
19#define ISA_DMA_THRESHOLD (~0UL)
20
21#endif /* !(_ALPHA_SCATTERLIST_H) */
diff --git a/include/asm-alpha/sections.h b/include/asm-alpha/sections.h
new file mode 100644
index 000000000000..43b40edd6e44
--- /dev/null
+++ b/include/asm-alpha/sections.h
@@ -0,0 +1,7 @@
1#ifndef _ALPHA_SECTIONS_H
2#define _ALPHA_SECTIONS_H
3
4/* nothing to see, move along */
5#include <asm-generic/sections.h>
6
7#endif
diff --git a/include/asm-alpha/segment.h b/include/asm-alpha/segment.h
new file mode 100644
index 000000000000..0453d97daae7
--- /dev/null
+++ b/include/asm-alpha/segment.h
@@ -0,0 +1,6 @@
1#ifndef __ALPHA_SEGMENT_H
2#define __ALPHA_SEGMENT_H
3
4/* Only here because we have some old header files that expect it.. */
5
6#endif
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
new file mode 100644
index 000000000000..eb2cbd97d404
--- /dev/null
+++ b/include/asm-alpha/semaphore.h
@@ -0,0 +1,153 @@
1#ifndef _ALPHA_SEMAPHORE_H
2#define _ALPHA_SEMAPHORE_H
3
4/*
5 * SMP- and interrupt-safe semaphores..
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1996, 2000 Richard Henderson
9 */
10
11#include <asm/current.h>
12#include <asm/system.h>
13#include <asm/atomic.h>
14#include <linux/compiler.h>
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17
18struct semaphore {
19 atomic_t count;
20 wait_queue_head_t wait;
21};
22
23#define __SEMAPHORE_INITIALIZER(name, n) \
24{ \
25 .count = ATOMIC_INIT(n), \
26 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
27}
28
29#define __MUTEX_INITIALIZER(name) \
30 __SEMAPHORE_INITIALIZER(name,1)
31
32#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
33 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
34
35#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
36#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
37
38static inline void sema_init(struct semaphore *sem, int val)
39{
40 /*
41 * Logically,
42 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
43 * except that gcc produces better initializing by parts yet.
44 */
45
46 atomic_set(&sem->count, val);
47 init_waitqueue_head(&sem->wait);
48}
49
50static inline void init_MUTEX (struct semaphore *sem)
51{
52 sema_init(sem, 1);
53}
54
55static inline void init_MUTEX_LOCKED (struct semaphore *sem)
56{
57 sema_init(sem, 0);
58}
59
60extern void down(struct semaphore *);
61extern void __down_failed(struct semaphore *);
62extern int down_interruptible(struct semaphore *);
63extern int __down_failed_interruptible(struct semaphore *);
64extern int down_trylock(struct semaphore *);
65extern void up(struct semaphore *);
66extern void __up_wakeup(struct semaphore *);
67
68/*
69 * Hidden out of line code is fun, but extremely messy. Rely on newer
70 * compilers to do a respectable job with this. The contention cases
71 * are handled out of line in arch/alpha/kernel/semaphore.c.
72 */
73
74static inline void __down(struct semaphore *sem)
75{
76 long count;
77 might_sleep();
78 count = atomic_dec_return(&sem->count);
79 if (unlikely(count < 0))
80 __down_failed(sem);
81}
82
83static inline int __down_interruptible(struct semaphore *sem)
84{
85 long count;
86 might_sleep();
87 count = atomic_dec_return(&sem->count);
88 if (unlikely(count < 0))
89 return __down_failed_interruptible(sem);
90 return 0;
91}
92
93/*
94 * down_trylock returns 0 on success, 1 if we failed to get the lock.
95 */
96
97static inline int __down_trylock(struct semaphore *sem)
98{
99 long ret;
100
101 /* "Equivalent" C:
102
103 do {
104 ret = ldl_l;
105 --ret;
106 if (ret < 0)
107 break;
108 ret = stl_c = ret;
109 } while (ret == 0);
110 */
111 __asm__ __volatile__(
112 "1: ldl_l %0,%1\n"
113 " subl %0,1,%0\n"
114 " blt %0,2f\n"
115 " stl_c %0,%1\n"
116 " beq %0,3f\n"
117 " mb\n"
118 "2:\n"
119 ".subsection 2\n"
120 "3: br 1b\n"
121 ".previous"
122 : "=&r" (ret), "=m" (sem->count)
123 : "m" (sem->count));
124
125 return ret < 0;
126}
127
128static inline void __up(struct semaphore *sem)
129{
130 if (unlikely(atomic_inc_return(&sem->count) <= 0))
131 __up_wakeup(sem);
132}
133
134#if !defined(CONFIG_DEBUG_SEMAPHORE)
135extern inline void down(struct semaphore *sem)
136{
137 __down(sem);
138}
139extern inline int down_interruptible(struct semaphore *sem)
140{
141 return __down_interruptible(sem);
142}
143extern inline int down_trylock(struct semaphore *sem)
144{
145 return __down_trylock(sem);
146}
147extern inline void up(struct semaphore *sem)
148{
149 __up(sem);
150}
151#endif
152
153#endif
diff --git a/include/asm-alpha/sembuf.h b/include/asm-alpha/sembuf.h
new file mode 100644
index 000000000000..7b38b1534784
--- /dev/null
+++ b/include/asm-alpha/sembuf.h
@@ -0,0 +1,22 @@
1#ifndef _ALPHA_SEMBUF_H
2#define _ALPHA_SEMBUF_H
3
4/*
5 * The semid64_ds structure for alpha architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct semid64_ds {
14 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
15 __kernel_time_t sem_otime; /* last semop time */
16 __kernel_time_t sem_ctime; /* last change time */
17 unsigned long sem_nsems; /* no. of semaphores in array */
18 unsigned long __unused1;
19 unsigned long __unused2;
20};
21
22#endif /* _ALPHA_SEMBUF_H */
diff --git a/include/asm-alpha/serial.h b/include/asm-alpha/serial.h
new file mode 100644
index 000000000000..7b2d9ee95a44
--- /dev/null
+++ b/include/asm-alpha/serial.h
@@ -0,0 +1,75 @@
1/*
2 * include/asm-alpha/serial.h
3 */
4
5#include <linux/config.h>
6
7/*
8 * This assumes you have a 1.8432 MHz clock for your UART.
9 *
10 * It'd be nice if someone built a serial card with a 24.576 MHz
11 * clock, since the 16550A is capable of handling a top speed of 1.5
12 * megabits/second; but this requires the faster clock.
13 */
14#define BASE_BAUD ( 1843200 / 16 )
15
16/* Standard COM flags (except for COM4, because of the 8514 problem) */
17#ifdef CONFIG_SERIAL_DETECT_IRQ
18#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
19#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
20#else
21#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
22#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
23#endif
24
25#ifdef CONFIG_SERIAL_MANY_PORTS
26#define FOURPORT_FLAGS ASYNC_FOURPORT
27#define ACCENT_FLAGS 0
28#define BOCA_FLAGS 0
29#endif
30
31#define STD_SERIAL_PORT_DEFNS \
32 /* UART CLK PORT IRQ FLAGS */ \
33 { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
34 { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
35 { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
36 { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
37
38
39#ifdef CONFIG_SERIAL_MANY_PORTS
40#define EXTRA_SERIAL_PORT_DEFNS \
41 { 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS }, /* ttyS4 */ \
42 { 0, BASE_BAUD, 0x1A8, 9, FOURPORT_FLAGS }, /* ttyS5 */ \
43 { 0, BASE_BAUD, 0x1B0, 9, FOURPORT_FLAGS }, /* ttyS6 */ \
44 { 0, BASE_BAUD, 0x1B8, 9, FOURPORT_FLAGS }, /* ttyS7 */ \
45 { 0, BASE_BAUD, 0x2A0, 5, FOURPORT_FLAGS }, /* ttyS8 */ \
46 { 0, BASE_BAUD, 0x2A8, 5, FOURPORT_FLAGS }, /* ttyS9 */ \
47 { 0, BASE_BAUD, 0x2B0, 5, FOURPORT_FLAGS }, /* ttyS10 */ \
48 { 0, BASE_BAUD, 0x2B8, 5, FOURPORT_FLAGS }, /* ttyS11 */ \
49 { 0, BASE_BAUD, 0x330, 4, ACCENT_FLAGS }, /* ttyS12 */ \
50 { 0, BASE_BAUD, 0x338, 4, ACCENT_FLAGS }, /* ttyS13 */ \
51 { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS14 (spare) */ \
52 { 0, BASE_BAUD, 0x000, 0, 0 }, /* ttyS15 (spare) */ \
53 { 0, BASE_BAUD, 0x100, 12, BOCA_FLAGS }, /* ttyS16 */ \
54 { 0, BASE_BAUD, 0x108, 12, BOCA_FLAGS }, /* ttyS17 */ \
55 { 0, BASE_BAUD, 0x110, 12, BOCA_FLAGS }, /* ttyS18 */ \
56 { 0, BASE_BAUD, 0x118, 12, BOCA_FLAGS }, /* ttyS19 */ \
57 { 0, BASE_BAUD, 0x120, 12, BOCA_FLAGS }, /* ttyS20 */ \
58 { 0, BASE_BAUD, 0x128, 12, BOCA_FLAGS }, /* ttyS21 */ \
59 { 0, BASE_BAUD, 0x130, 12, BOCA_FLAGS }, /* ttyS22 */ \
60 { 0, BASE_BAUD, 0x138, 12, BOCA_FLAGS }, /* ttyS23 */ \
61 { 0, BASE_BAUD, 0x140, 12, BOCA_FLAGS }, /* ttyS24 */ \
62 { 0, BASE_BAUD, 0x148, 12, BOCA_FLAGS }, /* ttyS25 */ \
63 { 0, BASE_BAUD, 0x150, 12, BOCA_FLAGS }, /* ttyS26 */ \
64 { 0, BASE_BAUD, 0x158, 12, BOCA_FLAGS }, /* ttyS27 */ \
65 { 0, BASE_BAUD, 0x160, 12, BOCA_FLAGS }, /* ttyS28 */ \
66 { 0, BASE_BAUD, 0x168, 12, BOCA_FLAGS }, /* ttyS29 */ \
67 { 0, BASE_BAUD, 0x170, 12, BOCA_FLAGS }, /* ttyS30 */ \
68 { 0, BASE_BAUD, 0x178, 12, BOCA_FLAGS }, /* ttyS31 */
69#else
70#define EXTRA_SERIAL_PORT_DEFNS
71#endif
72
73#define SERIAL_PORT_DFNS \
74 STD_SERIAL_PORT_DEFNS \
75 EXTRA_SERIAL_PORT_DEFNS
diff --git a/include/asm-alpha/setup.h b/include/asm-alpha/setup.h
new file mode 100644
index 000000000000..2e023a4aa317
--- /dev/null
+++ b/include/asm-alpha/setup.h
@@ -0,0 +1,6 @@
1#ifndef __ALPHA_SETUP_H
2#define __ALPHA_SETUP_H
3
4#define COMMAND_LINE_SIZE 256
5
6#endif
diff --git a/include/asm-alpha/sfp-machine.h b/include/asm-alpha/sfp-machine.h
new file mode 100644
index 000000000000..5fe63afbd474
--- /dev/null
+++ b/include/asm-alpha/sfp-machine.h
@@ -0,0 +1,82 @@
1/* Machine-dependent software floating-point definitions.
2 Alpha kernel version.
3 Copyright (C) 1997,1998,1999 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by Richard Henderson (rth@cygnus.com),
6 Jakub Jelinek (jakub@redhat.com) and
7 David S. Miller (davem@redhat.com).
8
9 The GNU C Library is free software; you can redistribute it and/or
10 modify it under the terms of the GNU Library General Public License as
11 published by the Free Software Foundation; either version 2 of the
12 License, or (at your option) any later version.
13
14 The GNU C Library is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Library General Public License for more details.
18
19 You should have received a copy of the GNU Library General Public
20 License along with the GNU C Library; see the file COPYING.LIB. If
21 not, write to the Free Software Foundation, Inc.,
22 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23
24#ifndef _SFP_MACHINE_H
25#define _SFP_MACHINE_H
26
27#define _FP_W_TYPE_SIZE 64
28#define _FP_W_TYPE unsigned long
29#define _FP_WS_TYPE signed long
30#define _FP_I_TYPE long
31
32#define _FP_MUL_MEAT_S(R,X,Y) \
33 _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
34#define _FP_MUL_MEAT_D(R,X,Y) \
35 _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
36#define _FP_MUL_MEAT_Q(R,X,Y) \
37 _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
38
39#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
40#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y)
41#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y)
42
43#define _FP_NANFRAC_S _FP_QNANBIT_S
44#define _FP_NANFRAC_D _FP_QNANBIT_D
45#define _FP_NANFRAC_Q _FP_QNANBIT_Q
46#define _FP_NANSIGN_S 1
47#define _FP_NANSIGN_D 1
48#define _FP_NANSIGN_Q 1
49
50#define _FP_KEEPNANFRACP 1
51
52/* Alpha Architecture Handbook, 4.7.10.4 sais that
53 * we should prefer any type of NaN in Fb, then Fa.
54 */
55#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
56 do { \
57 R##_s = Y##_s; \
58 _FP_FRAC_COPY_##wc(R,X); \
59 R##_c = FP_CLS_NAN; \
60 } while (0)
61
62/* Obtain the current rounding mode. */
63#define FP_ROUNDMODE mode
64#define FP_RND_NEAREST (FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT)
65#define FP_RND_ZERO (FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT)
66#define FP_RND_PINF (FPCR_DYN_PLUS >> FPCR_DYN_SHIFT)
67#define FP_RND_MINF (FPCR_DYN_MINUS >> FPCR_DYN_SHIFT)
68
69/* Exception flags. */
70#define FP_EX_INVALID IEEE_TRAP_ENABLE_INV
71#define FP_EX_OVERFLOW IEEE_TRAP_ENABLE_OVF
72#define FP_EX_UNDERFLOW IEEE_TRAP_ENABLE_UNF
73#define FP_EX_DIVZERO IEEE_TRAP_ENABLE_DZE
74#define FP_EX_INEXACT IEEE_TRAP_ENABLE_INE
75#define FP_EX_DENORM IEEE_TRAP_ENABLE_DNO
76
77#define FP_DENORM_ZERO (swcr & IEEE_MAP_DMZ)
78
79/* We write the results always */
80#define FP_INHIBIT_RESULTS 0
81
82#endif
diff --git a/include/asm-alpha/shmbuf.h b/include/asm-alpha/shmbuf.h
new file mode 100644
index 000000000000..37ee84f05085
--- /dev/null
+++ b/include/asm-alpha/shmbuf.h
@@ -0,0 +1,38 @@
1#ifndef _ALPHA_SHMBUF_H
2#define _ALPHA_SHMBUF_H
3
4/*
5 * The shmid64_ds structure for alpha architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 2 miscellaneous 64-bit values
11 */
12
13struct shmid64_ds {
14 struct ipc64_perm shm_perm; /* operation perms */
15 size_t shm_segsz; /* size of segment (bytes) */
16 __kernel_time_t shm_atime; /* last attach time */
17 __kernel_time_t shm_dtime; /* last detach time */
18 __kernel_time_t shm_ctime; /* last change time */
19 __kernel_pid_t shm_cpid; /* pid of creator */
20 __kernel_pid_t shm_lpid; /* pid of last operator */
21 unsigned long shm_nattch; /* no. of current attaches */
22 unsigned long __unused1;
23 unsigned long __unused2;
24};
25
26struct shminfo64 {
27 unsigned long shmmax;
28 unsigned long shmmin;
29 unsigned long shmmni;
30 unsigned long shmseg;
31 unsigned long shmall;
32 unsigned long __unused1;
33 unsigned long __unused2;
34 unsigned long __unused3;
35 unsigned long __unused4;
36};
37
38#endif /* _ALPHA_SHMBUF_H */
diff --git a/include/asm-alpha/shmparam.h b/include/asm-alpha/shmparam.h
new file mode 100644
index 000000000000..cc901d58aebb
--- /dev/null
+++ b/include/asm-alpha/shmparam.h
@@ -0,0 +1,6 @@
1#ifndef _ASMAXP_SHMPARAM_H
2#define _ASMAXP_SHMPARAM_H
3
4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5
6#endif /* _ASMAXP_SHMPARAM_H */
diff --git a/include/asm-alpha/sigcontext.h b/include/asm-alpha/sigcontext.h
new file mode 100644
index 000000000000..323cdb026198
--- /dev/null
+++ b/include/asm-alpha/sigcontext.h
@@ -0,0 +1,34 @@
1#ifndef _ASMAXP_SIGCONTEXT_H
2#define _ASMAXP_SIGCONTEXT_H
3
4struct sigcontext {
5 /*
6 * What should we have here? I'd probably better use the same
7 * stack layout as OSF/1, just in case we ever want to try
8 * running their binaries..
9 *
10 * This is the basic layout, but I don't know if we'll ever
11 * actually fill in all the values..
12 */
13 long sc_onstack;
14 long sc_mask;
15 long sc_pc;
16 long sc_ps;
17 long sc_regs[32];
18 long sc_ownedfp;
19 long sc_fpregs[32];
20 unsigned long sc_fpcr;
21 unsigned long sc_fp_control;
22 unsigned long sc_reserved1, sc_reserved2;
23 unsigned long sc_ssize;
24 char * sc_sbase;
25 unsigned long sc_traparg_a0;
26 unsigned long sc_traparg_a1;
27 unsigned long sc_traparg_a2;
28 unsigned long sc_fp_trap_pc;
29 unsigned long sc_fp_trigger_sum;
30 unsigned long sc_fp_trigger_inst;
31};
32
33
34#endif
diff --git a/include/asm-alpha/siginfo.h b/include/asm-alpha/siginfo.h
new file mode 100644
index 000000000000..86bcab59c52b
--- /dev/null
+++ b/include/asm-alpha/siginfo.h
@@ -0,0 +1,11 @@
1#ifndef _ALPHA_SIGINFO_H
2#define _ALPHA_SIGINFO_H
3
4#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
5#define __ARCH_SI_TRAPNO
6
7#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4)
8
9#include <asm-generic/siginfo.h>
10
11#endif
diff --git a/include/asm-alpha/signal.h b/include/asm-alpha/signal.h
new file mode 100644
index 000000000000..25f98bc5576f
--- /dev/null
+++ b/include/asm-alpha/signal.h
@@ -0,0 +1,197 @@
1#ifndef _ASMAXP_SIGNAL_H
2#define _ASMAXP_SIGNAL_H
3
4#include <linux/types.h>
5
6/* Avoid too many header ordering problems. */
7struct siginfo;
8
9#ifdef __KERNEL__
10/* Digital Unix defines 64 signals. Most things should be clean enough
11 to redefine this at will, if care is taken to make libc match. */
12
13#define _NSIG 64
14#define _NSIG_BPW 64
15#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
16
17typedef unsigned long old_sigset_t; /* at least 32 bits */
18
19typedef struct {
20 unsigned long sig[_NSIG_WORDS];
21} sigset_t;
22
23#else
24/* Here we must cater to libcs that poke about in kernel headers. */
25
26#define NSIG 32
27typedef unsigned long sigset_t;
28
29#endif /* __KERNEL__ */
30
31
32/*
33 * Linux/AXP has different signal numbers that Linux/i386: I'm trying
34 * to make it OSF/1 binary compatible, at least for normal binaries.
35 */
36#define SIGHUP 1
37#define SIGINT 2
38#define SIGQUIT 3
39#define SIGILL 4
40#define SIGTRAP 5
41#define SIGABRT 6
42#define SIGEMT 7
43#define SIGFPE 8
44#define SIGKILL 9
45#define SIGBUS 10
46#define SIGSEGV 11
47#define SIGSYS 12
48#define SIGPIPE 13
49#define SIGALRM 14
50#define SIGTERM 15
51#define SIGURG 16
52#define SIGSTOP 17
53#define SIGTSTP 18
54#define SIGCONT 19
55#define SIGCHLD 20
56#define SIGTTIN 21
57#define SIGTTOU 22
58#define SIGIO 23
59#define SIGXCPU 24
60#define SIGXFSZ 25
61#define SIGVTALRM 26
62#define SIGPROF 27
63#define SIGWINCH 28
64#define SIGINFO 29
65#define SIGUSR1 30
66#define SIGUSR2 31
67
68#define SIGPOLL SIGIO
69#define SIGPWR SIGINFO
70#define SIGIOT SIGABRT
71
72/* These should not be considered constants from userland. */
73#define SIGRTMIN 32
74#define SIGRTMAX _NSIG
75
76/*
77 * SA_FLAGS values:
78 *
79 * SA_ONSTACK indicates that a registered stack_t will be used.
80 * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
81 * SA_RESTART flag to get restarting signals (which were the default long ago)
82 * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
83 * SA_RESETHAND clears the handler when the signal is delivered.
84 * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
85 * SA_NODEFER prevents the current signal from being masked in the handler.
86 *
87 * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
88 * Unix names RESETHAND and NODEFER respectively.
89 */
90
91#define SA_ONSTACK 0x00000001
92#define SA_RESTART 0x00000002
93#define SA_NOCLDSTOP 0x00000004
94#define SA_NODEFER 0x00000008
95#define SA_RESETHAND 0x00000010
96#define SA_NOCLDWAIT 0x00000020
97#define SA_SIGINFO 0x00000040
98
99#define SA_ONESHOT SA_RESETHAND
100#define SA_NOMASK SA_NODEFER
101#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
102
103/*
104 * sigaltstack controls
105 */
106#define SS_ONSTACK 1
107#define SS_DISABLE 2
108
109#define MINSIGSTKSZ 4096
110#define SIGSTKSZ 16384
111
112
113#ifdef __KERNEL__
114/*
115 * These values of sa_flags are used only by the kernel as part of the
116 * irq handling routines.
117 *
118 * SA_INTERRUPT is also used by the irq handling routines.
119 * SA_SHIRQ is for shared interrupt support on PCI and EISA.
120 */
121#define SA_PROBE SA_ONESHOT
122#define SA_SAMPLE_RANDOM SA_RESTART
123#define SA_SHIRQ 0x40000000
124#endif
125
126#define SIG_BLOCK 1 /* for blocking signals */
127#define SIG_UNBLOCK 2 /* for unblocking signals */
128#define SIG_SETMASK 3 /* for setting the signal mask */
129
130/* Type of a signal handler. */
131typedef void __signalfn_t(int);
132typedef __signalfn_t __user *__sighandler_t;
133
134typedef void __restorefn_t(void);
135typedef __restorefn_t __user *__sigrestore_t;
136
137#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
138#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
139#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
140
141#ifdef __KERNEL__
142struct osf_sigaction {
143 __sighandler_t sa_handler;
144 old_sigset_t sa_mask;
145 int sa_flags;
146};
147
148struct sigaction {
149 __sighandler_t sa_handler;
150 unsigned long sa_flags;
151 sigset_t sa_mask; /* mask last for extensibility */
152};
153
154struct k_sigaction {
155 struct sigaction sa;
156 __sigrestore_t ka_restorer;
157};
158#else
159/* Here we must cater to libcs that poke about in kernel headers. */
160
161struct sigaction {
162 union {
163 __sighandler_t _sa_handler;
164 void (*_sa_sigaction)(int, struct siginfo *, void *);
165 } _u;
166 sigset_t sa_mask;
167 int sa_flags;
168};
169
170#define sa_handler _u._sa_handler
171#define sa_sigaction _u._sa_sigaction
172
173#endif /* __KERNEL__ */
174
175typedef struct sigaltstack {
176 void __user *ss_sp;
177 int ss_flags;
178 size_t ss_size;
179} stack_t;
180
181/* sigstack(2) is deprecated, and will be withdrawn in a future version
182 of the X/Open CAE Specification. Use sigaltstack instead. It is only
183 implemented here for OSF/1 compatibility. */
184
185struct sigstack {
186 void __user *ss_sp;
187 int ss_onstack;
188};
189
190#ifdef __KERNEL__
191#include <asm/sigcontext.h>
192
193#define ptrace_signal_deliver(regs, cookie) do { } while (0)
194
195#endif
196
197#endif
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
new file mode 100644
index 000000000000..cbc173ae45aa
--- /dev/null
+++ b/include/asm-alpha/smp.h
@@ -0,0 +1,63 @@
1#ifndef __ASM_SMP_H
2#define __ASM_SMP_H
3
4#include <linux/config.h>
5#include <linux/threads.h>
6#include <linux/cpumask.h>
7#include <linux/bitops.h>
8#include <asm/pal.h>
9
10/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */
11
12static __inline__ unsigned char
13__hard_smp_processor_id(void)
14{
15 register unsigned char __r0 __asm__("$0");
16 __asm__ __volatile__(
17 "call_pal %1 #whami"
18 : "=r"(__r0)
19 :"i" (PAL_whami)
20 : "$1", "$22", "$23", "$24", "$25");
21 return __r0;
22}
23
24#ifdef CONFIG_SMP
25
26#include <asm/irq.h>
27
28struct cpuinfo_alpha {
29 unsigned long loops_per_jiffy;
30 unsigned long last_asn;
31 int need_new_asn;
32 int asn_lock;
33 unsigned long ipi_count;
34 unsigned long prof_multiplier;
35 unsigned long prof_counter;
36 unsigned char mcheck_expected;
37 unsigned char mcheck_taken;
38 unsigned char mcheck_extra;
39} __attribute__((aligned(64)));
40
41extern struct cpuinfo_alpha cpu_data[NR_CPUS];
42
43#define PROC_CHANGE_PENALTY 20
44
45#define hard_smp_processor_id() __hard_smp_processor_id()
46#define smp_processor_id() (current_thread_info()->cpu)
47
48extern cpumask_t cpu_present_mask;
49extern cpumask_t cpu_online_map;
50extern int smp_num_cpus;
51#define cpu_possible_map cpu_present_mask
52
53int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu);
54
55#else /* CONFIG_SMP */
56
57#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; })
58
59#endif /* CONFIG_SMP */
60
61#define NO_PROC_ID (-1)
62
63#endif
diff --git a/include/asm-alpha/socket.h b/include/asm-alpha/socket.h
new file mode 100644
index 000000000000..d00259d3dc78
--- /dev/null
+++ b/include/asm-alpha/socket.h
@@ -0,0 +1,58 @@
1#ifndef _ASM_SOCKET_H
2#define _ASM_SOCKET_H
3
4#include <asm/sockios.h>
5
6/* For setsockopt(2) */
7/*
8 * Note: we only bother about making the SOL_SOCKET options
9 * same as OSF/1, as that's all that "normal" programs are
10 * likely to set. We don't necessarily want to be binary
11 * compatible with _everything_.
12 */
13#define SOL_SOCKET 0xffff
14
15#define SO_DEBUG 0x0001
16#define SO_REUSEADDR 0x0004
17#define SO_KEEPALIVE 0x0008
18#define SO_DONTROUTE 0x0010
19#define SO_BROADCAST 0x0020
20#define SO_LINGER 0x0080
21#define SO_OOBINLINE 0x0100
22/* To add :#define SO_REUSEPORT 0x0200 */
23
24#define SO_TYPE 0x1008
25#define SO_ERROR 0x1007
26#define SO_SNDBUF 0x1001
27#define SO_RCVBUF 0x1002
28#define SO_RCVLOWAT 0x1010
29#define SO_SNDLOWAT 0x1011
30#define SO_RCVTIMEO 0x1012
31#define SO_SNDTIMEO 0x1013
32#define SO_ACCEPTCONN 0x1014
33
34/* linux-specific, might as well be the same as on i386 */
35#define SO_NO_CHECK 11
36#define SO_PRIORITY 12
37#define SO_BSDCOMPAT 14
38
39#define SO_PASSCRED 17
40#define SO_PEERCRED 18
41#define SO_BINDTODEVICE 25
42
43/* Socket filtering */
44#define SO_ATTACH_FILTER 26
45#define SO_DETACH_FILTER 27
46
47#define SO_PEERNAME 28
48#define SO_TIMESTAMP 29
49#define SCM_TIMESTAMP SO_TIMESTAMP
50
51#define SO_PEERSEC 30
52
53/* Security levels - as per NRL IPv6 - don't actually do anything */
54#define SO_SECURITY_AUTHENTICATION 19
55#define SO_SECURITY_ENCRYPTION_TRANSPORT 20
56#define SO_SECURITY_ENCRYPTION_NETWORK 21
57
58#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-alpha/sockios.h b/include/asm-alpha/sockios.h
new file mode 100644
index 000000000000..e4961a740e5f
--- /dev/null
+++ b/include/asm-alpha/sockios.h
@@ -0,0 +1,15 @@
1#ifndef _ASM_ALPHA_SOCKIOS_H
2#define _ASM_ALPHA_SOCKIOS_H
3
4/* Socket-level I/O control calls. */
5
6#define FIOGETOWN _IOR('f', 123, int)
7#define FIOSETOWN _IOW('f', 124, int)
8
9#define SIOCATMARK _IOR('s', 7, int)
10#define SIOCSPGRP _IOW('s', 8, pid_t)
11#define SIOCGPGRP _IOR('s', 9, pid_t)
12
13#define SIOCGSTAMP 0x8906 /* Get stamp - linux-specific */
14
15#endif /* _ASM_ALPHA_SOCKIOS_H */
diff --git a/include/asm-alpha/spinlock.h b/include/asm-alpha/spinlock.h
new file mode 100644
index 000000000000..80780dba9986
--- /dev/null
+++ b/include/asm-alpha/spinlock.h
@@ -0,0 +1,212 @@
1#ifndef _ALPHA_SPINLOCK_H
2#define _ALPHA_SPINLOCK_H
3
4#include <linux/config.h>
5#include <asm/system.h>
6#include <linux/kernel.h>
7#include <asm/current.h>
8
9
10/*
11 * Simple spin lock operations. There are two variants, one clears IRQ's
12 * on the local processor, one does not.
13 *
14 * We make no fairness assumptions. They have a cost.
15 */
16
17typedef struct {
18 volatile unsigned int lock;
19#ifdef CONFIG_DEBUG_SPINLOCK
20 int on_cpu;
21 int line_no;
22 void *previous;
23 struct task_struct * task;
24 const char *base_file;
25#endif
26} spinlock_t;
27
28#ifdef CONFIG_DEBUG_SPINLOCK
29#define SPIN_LOCK_UNLOCKED (spinlock_t){ 0, -1, 0, NULL, NULL, NULL }
30#else
31#define SPIN_LOCK_UNLOCKED (spinlock_t){ 0 }
32#endif
33
34#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
35#define spin_is_locked(x) ((x)->lock != 0)
36#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
37
38#ifdef CONFIG_DEBUG_SPINLOCK
39extern void _raw_spin_unlock(spinlock_t * lock);
40extern void debug_spin_lock(spinlock_t * lock, const char *, int);
41extern int debug_spin_trylock(spinlock_t * lock, const char *, int);
42#define _raw_spin_lock(LOCK) \
43 debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
44#define _raw_spin_trylock(LOCK) \
45 debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
46#else
47static inline void _raw_spin_unlock(spinlock_t * lock)
48{
49 mb();
50 lock->lock = 0;
51}
52
53static inline void _raw_spin_lock(spinlock_t * lock)
54{
55 long tmp;
56
57 __asm__ __volatile__(
58 "1: ldl_l %0,%1\n"
59 " bne %0,2f\n"
60 " lda %0,1\n"
61 " stl_c %0,%1\n"
62 " beq %0,2f\n"
63 " mb\n"
64 ".subsection 2\n"
65 "2: ldl %0,%1\n"
66 " bne %0,2b\n"
67 " br 1b\n"
68 ".previous"
69 : "=&r" (tmp), "=m" (lock->lock)
70 : "m"(lock->lock) : "memory");
71}
72
73static inline int _raw_spin_trylock(spinlock_t *lock)
74{
75 return !test_and_set_bit(0, &lock->lock);
76}
77#endif /* CONFIG_DEBUG_SPINLOCK */
78
79#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
80
81/***********************************************************/
82
83typedef struct {
84 volatile unsigned int lock;
85} rwlock_t;
86
87#define RW_LOCK_UNLOCKED (rwlock_t){ 0 }
88
89#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
90
91static inline int read_can_lock(rwlock_t *lock)
92{
93 return (lock->lock & 1) == 0;
94}
95
96static inline int write_can_lock(rwlock_t *lock)
97{
98 return lock->lock == 0;
99}
100
101#ifdef CONFIG_DEBUG_RWLOCK
102extern void _raw_write_lock(rwlock_t * lock);
103extern void _raw_read_lock(rwlock_t * lock);
104#else
105static inline void _raw_write_lock(rwlock_t * lock)
106{
107 long regx;
108
109 __asm__ __volatile__(
110 "1: ldl_l %1,%0\n"
111 " bne %1,6f\n"
112 " lda %1,1\n"
113 " stl_c %1,%0\n"
114 " beq %1,6f\n"
115 " mb\n"
116 ".subsection 2\n"
117 "6: ldl %1,%0\n"
118 " bne %1,6b\n"
119 " br 1b\n"
120 ".previous"
121 : "=m" (*lock), "=&r" (regx)
122 : "m" (*lock) : "memory");
123}
124
125static inline void _raw_read_lock(rwlock_t * lock)
126{
127 long regx;
128
129 __asm__ __volatile__(
130 "1: ldl_l %1,%0\n"
131 " blbs %1,6f\n"
132 " subl %1,2,%1\n"
133 " stl_c %1,%0\n"
134 " beq %1,6f\n"
135 " mb\n"
136 ".subsection 2\n"
137 "6: ldl %1,%0\n"
138 " blbs %1,6b\n"
139 " br 1b\n"
140 ".previous"
141 : "=m" (*lock), "=&r" (regx)
142 : "m" (*lock) : "memory");
143}
144#endif /* CONFIG_DEBUG_RWLOCK */
145
146static inline int _raw_read_trylock(rwlock_t * lock)
147{
148 long regx;
149 int success;
150
151 __asm__ __volatile__(
152 "1: ldl_l %1,%0\n"
153 " lda %2,0\n"
154 " blbs %1,2f\n"
155 " subl %1,2,%2\n"
156 " stl_c %2,%0\n"
157 " beq %2,6f\n"
158 "2: mb\n"
159 ".subsection 2\n"
160 "6: br 1b\n"
161 ".previous"
162 : "=m" (*lock), "=&r" (regx), "=&r" (success)
163 : "m" (*lock) : "memory");
164
165 return success;
166}
167
168static inline int _raw_write_trylock(rwlock_t * lock)
169{
170 long regx;
171 int success;
172
173 __asm__ __volatile__(
174 "1: ldl_l %1,%0\n"
175 " lda %2,0\n"
176 " bne %1,2f\n"
177 " lda %2,1\n"
178 " stl_c %2,%0\n"
179 " beq %2,6f\n"
180 "2: mb\n"
181 ".subsection 2\n"
182 "6: br 1b\n"
183 ".previous"
184 : "=m" (*lock), "=&r" (regx), "=&r" (success)
185 : "m" (*lock) : "memory");
186
187 return success;
188}
189
190static inline void _raw_write_unlock(rwlock_t * lock)
191{
192 mb();
193 lock->lock = 0;
194}
195
196static inline void _raw_read_unlock(rwlock_t * lock)
197{
198 long regx;
199 __asm__ __volatile__(
200 " mb\n"
201 "1: ldl_l %1,%0\n"
202 " addl %1,2,%1\n"
203 " stl_c %1,%0\n"
204 " beq %1,6f\n"
205 ".subsection 2\n"
206 "6: br 1b\n"
207 ".previous"
208 : "=m" (*lock), "=&r" (regx)
209 : "m" (*lock) : "memory");
210}
211
212#endif /* _ALPHA_SPINLOCK_H */
diff --git a/include/asm-alpha/stat.h b/include/asm-alpha/stat.h
new file mode 100644
index 000000000000..07ad3e6b3f3e
--- /dev/null
+++ b/include/asm-alpha/stat.h
@@ -0,0 +1,48 @@
1#ifndef _ALPHA_STAT_H
2#define _ALPHA_STAT_H
3
4struct stat {
5 unsigned int st_dev;
6 unsigned int st_ino;
7 unsigned int st_mode;
8 unsigned int st_nlink;
9 unsigned int st_uid;
10 unsigned int st_gid;
11 unsigned int st_rdev;
12 long st_size;
13 unsigned long st_atime;
14 unsigned long st_mtime;
15 unsigned long st_ctime;
16 unsigned int st_blksize;
17 unsigned int st_blocks;
18 unsigned int st_flags;
19 unsigned int st_gen;
20};
21
22/* The stat64 structure increases the size of dev_t, blkcnt_t, adds
23 nanosecond resolution times, and padding for expansion. */
24
25struct stat64 {
26 unsigned long st_dev;
27 unsigned long st_ino;
28 unsigned long st_rdev;
29 long st_size;
30 unsigned long st_blocks;
31
32 unsigned int st_mode;
33 unsigned int st_uid;
34 unsigned int st_gid;
35 unsigned int st_blksize;
36 unsigned int st_nlink;
37 unsigned int __pad0;
38
39 unsigned long st_atime;
40 unsigned long st_atime_nsec;
41 unsigned long st_mtime;
42 unsigned long st_mtime_nsec;
43 unsigned long st_ctime;
44 unsigned long st_ctime_nsec;
45 long __unused[3];
46};
47
48#endif
diff --git a/include/asm-alpha/statfs.h b/include/asm-alpha/statfs.h
new file mode 100644
index 000000000000..ad15830baefe
--- /dev/null
+++ b/include/asm-alpha/statfs.h
@@ -0,0 +1,6 @@
1#ifndef _ALPHA_STATFS_H
2#define _ALPHA_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif
diff --git a/include/asm-alpha/string.h b/include/asm-alpha/string.h
new file mode 100644
index 000000000000..9e44fea669bf
--- /dev/null
+++ b/include/asm-alpha/string.h
@@ -0,0 +1,68 @@
1#ifndef __ALPHA_STRING_H__
2#define __ALPHA_STRING_H__
3
4#ifdef __KERNEL__
5
6/*
7 * GCC of any recent vintage doesn't do stupid things with bcopy.
8 * EGCS 1.1 knows all about expanding memcpy inline, others don't.
9 *
10 * Similarly for a memset with data = 0.
11 */
12
13#define __HAVE_ARCH_MEMCPY
14extern void * memcpy(void *, const void *, size_t);
15#define __HAVE_ARCH_MEMMOVE
16extern void * memmove(void *, const void *, size_t);
17
18/* For backward compatibility with modules. Unused otherwise. */
19extern void * __memcpy(void *, const void *, size_t);
20
21#define memcpy __builtin_memcpy
22
23#define __HAVE_ARCH_MEMSET
24extern void * __constant_c_memset(void *, unsigned long, size_t);
25extern void * __memset(void *, int, size_t);
26extern void * memset(void *, int, size_t);
27
28#define memset(s, c, n) \
29(__builtin_constant_p(c) \
30 ? (__builtin_constant_p(n) && (c) == 0 \
31 ? __builtin_memset((s),0,(n)) \
32 : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \
33 : __memset((s),(c),(n)))
34
35#define __HAVE_ARCH_STRCPY
36extern char * strcpy(char *,const char *);
37#define __HAVE_ARCH_STRNCPY
38extern char * strncpy(char *, const char *, size_t);
39#define __HAVE_ARCH_STRCAT
40extern char * strcat(char *, const char *);
41#define __HAVE_ARCH_STRNCAT
42extern char * strncat(char *, const char *, size_t);
43#define __HAVE_ARCH_STRCHR
44extern char * strchr(const char *,int);
45#define __HAVE_ARCH_STRRCHR
46extern char * strrchr(const char *,int);
47#define __HAVE_ARCH_STRLEN
48extern size_t strlen(const char *);
49#define __HAVE_ARCH_MEMCHR
50extern void * memchr(const void *, int, size_t);
51
52/* The following routine is like memset except that it writes 16-bit
53 aligned values. The DEST and COUNT parameters must be even for
54 correct operation. */
55
56#define __HAVE_ARCH_MEMSETW
57extern void * __memsetw(void *dest, unsigned short, size_t count);
58
59#define memsetw(s, c, n) \
60(__builtin_constant_p(c) \
61 ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \
62 : __memsetw((s),(c),(n)))
63
64extern int strcasecmp(const char *, const char *);
65
66#endif /* __KERNEL__ */
67
68#endif /* __ALPHA_STRING_H__ */
diff --git a/include/asm-alpha/suspend.h b/include/asm-alpha/suspend.h
new file mode 100644
index 000000000000..c7042d575851
--- /dev/null
+++ b/include/asm-alpha/suspend.h
@@ -0,0 +1,6 @@
1#ifndef __ALPHA_SUSPEND_H
2#define __ALPHA_SUSPEND_H
3
4/* Dummy include. */
5
6#endif /* __ALPHA_SUSPEND_H */
diff --git a/include/asm-alpha/sysinfo.h b/include/asm-alpha/sysinfo.h
new file mode 100644
index 000000000000..086aba284df2
--- /dev/null
+++ b/include/asm-alpha/sysinfo.h
@@ -0,0 +1,39 @@
1/*
2 * include/asm-alpha/sysinfo.h
3 */
4
5#ifndef __ASM_ALPHA_SYSINFO_H
6#define __ASM_ALPHA_SYSINFO_H
7
8/* This defines the subset of the OSF/1 getsysinfo/setsysinfo calls
9 that we support. */
10
11#define GSI_UACPROC 8
12#define GSI_IEEE_FP_CONTROL 45
13#define GSI_IEEE_STATE_AT_SIGNAL 46
14#define GSI_PROC_TYPE 60
15#define GSI_GET_HWRPB 101
16
17#define SSI_NVPAIRS 1
18#define SSI_IEEE_FP_CONTROL 14
19#define SSI_IEEE_STATE_AT_SIGNAL 15
20#define SSI_IEEE_IGNORE_STATE_AT_SIGNAL 16
21#define SSI_IEEE_RAISE_EXCEPTION 1001 /* linux specific */
22
23#define SSIN_UACPROC 6
24
25#define UAC_BITMASK 7
26#define UAC_NOPRINT 1
27#define UAC_NOFIX 2
28#define UAC_SIGBUS 4
29
30
31#ifdef __KERNEL__
32
33/* This is the shift that is applied to the UAC bits as stored in the
34 per-thread flags. See thread_info.h. */
35#define UAC_SHIFT 6
36
37#endif
38
39#endif /* __ASM_ALPHA_SYSINFO_H */
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
new file mode 100644
index 000000000000..c08ce970ff8c
--- /dev/null
+++ b/include/asm-alpha/system.h
@@ -0,0 +1,626 @@
1#ifndef __ALPHA_SYSTEM_H
2#define __ALPHA_SYSTEM_H
3
4#include <linux/config.h>
5#include <asm/pal.h>
6#include <asm/page.h>
7
8/*
9 * System defines.. Note that this is included both from .c and .S
10 * files, so it does only defines, not any C code.
11 */
12
13/*
14 * We leave one page for the initial stack page, and one page for
15 * the initial process structure. Also, the console eats 3 MB for
16 * the initial bootloader (one of which we can reclaim later).
17 */
18#define BOOT_PCB 0x20000000
19#define BOOT_ADDR 0x20000000
20/* Remove when official MILO sources have ELF support: */
21#define BOOT_SIZE (16*1024)
22
23#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
24#define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */
25#else
26#define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */
27#endif
28
29#define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS)
30#define SWAPPER_PGD KERNEL_START
31#define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000)
32#define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000)
33#define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000)
34#define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000)
35
36#define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000)
37
38/*
39 * This is setup by the secondary bootstrap loader. Because
40 * the zero page is zeroed out as soon as the vm system is
41 * initialized, we need to copy things out into a more permanent
42 * place.
43 */
44#define PARAM ZERO_PGE
45#define COMMAND_LINE ((char*)(PARAM + 0x0000))
46#define INITRD_START (*(unsigned long *) (PARAM+0x100))
47#define INITRD_SIZE (*(unsigned long *) (PARAM+0x108))
48
49#ifndef __ASSEMBLY__
50#include <linux/kernel.h>
51
52/*
53 * This is the logout header that should be common to all platforms
54 * (assuming they are running OSF/1 PALcode, I guess).
55 */
56struct el_common {
57 unsigned int size; /* size in bytes of logout area */
58 unsigned int sbz1 : 30; /* should be zero */
59 unsigned int err2 : 1; /* second error */
60 unsigned int retry : 1; /* retry flag */
61 unsigned int proc_offset; /* processor-specific offset */
62 unsigned int sys_offset; /* system-specific offset */
63 unsigned int code; /* machine check code */
64 unsigned int frame_rev; /* frame revision */
65};
66
67/* Machine Check Frame for uncorrectable errors (Large format)
68 * --- This is used to log uncorrectable errors such as
69 * double bit ECC errors.
70 * --- These errors are detected by both processor and systems.
71 */
72struct el_common_EV5_uncorrectable_mcheck {
73 unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */
74 unsigned long paltemp[24]; /* PAL TEMP REGS. */
75 unsigned long exc_addr; /* Address of excepting instruction*/
76 unsigned long exc_sum; /* Summary of arithmetic traps. */
77 unsigned long exc_mask; /* Exception mask (from exc_sum). */
78 unsigned long pal_base; /* Base address for PALcode. */
79 unsigned long isr; /* Interrupt Status Reg. */
80 unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */
81 unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity
82 <12> set TAG parity*/
83 unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1:
84 <2> Data error in bank 0
85 <3> Data error in bank 1
86 <4> Tag error in bank 0
87 <5> Tag error in bank 1 */
88 unsigned long va; /* Effective VA of fault or miss. */
89 unsigned long mm_stat; /* Holds the reason for D-stream
90 fault or D-cache parity errors */
91 unsigned long sc_addr; /* Address that was being accessed
92 when EV5 detected Secondary cache
93 failure. */
94 unsigned long sc_stat; /* Helps determine if the error was
95 TAG/Data parity(Secondary Cache)*/
96 unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */
97 unsigned long ei_addr; /* Physical address of any transfer
98 that is logged in EV5 EI_STAT */
99 unsigned long fill_syndrome; /* For correcting ECC errors. */
100 unsigned long ei_stat; /* Helps identify reason of any
101 processor uncorrectable error
102 at its external interface. */
103 unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/
104};
105
106struct el_common_EV6_mcheck {
107 unsigned int FrameSize; /* Bytes, including this field */
108 unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */
109 unsigned int CpuOffset; /* Offset to CPU-specific info */
110 unsigned int SystemOffset; /* Offset to system-specific info */
111 unsigned int MCHK_Code;
112 unsigned int MCHK_Frame_Rev;
113 unsigned long I_STAT; /* EV6 Internal Processor Registers */
114 unsigned long DC_STAT; /* (See the 21264 Spec) */
115 unsigned long C_ADDR;
116 unsigned long DC1_SYNDROME;
117 unsigned long DC0_SYNDROME;
118 unsigned long C_STAT;
119 unsigned long C_STS;
120 unsigned long MM_STAT;
121 unsigned long EXC_ADDR;
122 unsigned long IER_CM;
123 unsigned long ISUM;
124 unsigned long RESERVED0;
125 unsigned long PAL_BASE;
126 unsigned long I_CTL;
127 unsigned long PCTX;
128};
129
130extern void halt(void) __attribute__((noreturn));
131#define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
132
133#define switch_to(P,N,L) \
134 do { \
135 (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P)); \
136 check_mmu_context(); \
137 } while (0)
138
139struct task_struct;
140extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
141
142#define mb() \
143__asm__ __volatile__("mb": : :"memory")
144
145#define rmb() \
146__asm__ __volatile__("mb": : :"memory")
147
148#define wmb() \
149__asm__ __volatile__("wmb": : :"memory")
150
151#define read_barrier_depends() \
152__asm__ __volatile__("mb": : :"memory")
153
154#ifdef CONFIG_SMP
155#define smp_mb() mb()
156#define smp_rmb() rmb()
157#define smp_wmb() wmb()
158#define smp_read_barrier_depends() read_barrier_depends()
159#else
160#define smp_mb() barrier()
161#define smp_rmb() barrier()
162#define smp_wmb() barrier()
163#define smp_read_barrier_depends() barrier()
164#endif
165
166#define set_mb(var, value) \
167do { var = value; mb(); } while (0)
168
169#define set_wmb(var, value) \
170do { var = value; wmb(); } while (0)
171
172#define imb() \
173__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
174
175#define draina() \
176__asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory")
177
178enum implver_enum {
179 IMPLVER_EV4,
180 IMPLVER_EV5,
181 IMPLVER_EV6
182};
183
184#ifdef CONFIG_ALPHA_GENERIC
185#define implver() \
186({ unsigned long __implver; \
187 __asm__ ("implver %0" : "=r"(__implver)); \
188 (enum implver_enum) __implver; })
189#else
190/* Try to eliminate some dead code. */
191#ifdef CONFIG_ALPHA_EV4
192#define implver() IMPLVER_EV4
193#endif
194#ifdef CONFIG_ALPHA_EV5
195#define implver() IMPLVER_EV5
196#endif
197#if defined(CONFIG_ALPHA_EV6)
198#define implver() IMPLVER_EV6
199#endif
200#endif
201
202enum amask_enum {
203 AMASK_BWX = (1UL << 0),
204 AMASK_FIX = (1UL << 1),
205 AMASK_CIX = (1UL << 2),
206 AMASK_MAX = (1UL << 8),
207 AMASK_PRECISE_TRAP = (1UL << 9),
208};
209
210#define amask(mask) \
211({ unsigned long __amask, __input = (mask); \
212 __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \
213 __amask; })
214
215#define __CALL_PAL_R0(NAME, TYPE) \
216static inline TYPE NAME(void) \
217{ \
218 register TYPE __r0 __asm__("$0"); \
219 __asm__ __volatile__( \
220 "call_pal %1 # " #NAME \
221 :"=r" (__r0) \
222 :"i" (PAL_ ## NAME) \
223 :"$1", "$16", "$22", "$23", "$24", "$25"); \
224 return __r0; \
225}
226
227#define __CALL_PAL_W1(NAME, TYPE0) \
228static inline void NAME(TYPE0 arg0) \
229{ \
230 register TYPE0 __r16 __asm__("$16") = arg0; \
231 __asm__ __volatile__( \
232 "call_pal %1 # "#NAME \
233 : "=r"(__r16) \
234 : "i"(PAL_ ## NAME), "0"(__r16) \
235 : "$1", "$22", "$23", "$24", "$25"); \
236}
237
238#define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \
239static inline void NAME(TYPE0 arg0, TYPE1 arg1) \
240{ \
241 register TYPE0 __r16 __asm__("$16") = arg0; \
242 register TYPE1 __r17 __asm__("$17") = arg1; \
243 __asm__ __volatile__( \
244 "call_pal %2 # "#NAME \
245 : "=r"(__r16), "=r"(__r17) \
246 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
247 : "$1", "$22", "$23", "$24", "$25"); \
248}
249
250#define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \
251static inline RTYPE NAME(TYPE0 arg0) \
252{ \
253 register RTYPE __r0 __asm__("$0"); \
254 register TYPE0 __r16 __asm__("$16") = arg0; \
255 __asm__ __volatile__( \
256 "call_pal %2 # "#NAME \
257 : "=r"(__r16), "=r"(__r0) \
258 : "i"(PAL_ ## NAME), "0"(__r16) \
259 : "$1", "$22", "$23", "$24", "$25"); \
260 return __r0; \
261}
262
263#define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \
264static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
265{ \
266 register RTYPE __r0 __asm__("$0"); \
267 register TYPE0 __r16 __asm__("$16") = arg0; \
268 register TYPE1 __r17 __asm__("$17") = arg1; \
269 __asm__ __volatile__( \
270 "call_pal %3 # "#NAME \
271 : "=r"(__r16), "=r"(__r17), "=r"(__r0) \
272 : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \
273 : "$1", "$22", "$23", "$24", "$25"); \
274 return __r0; \
275}
276
277__CALL_PAL_W1(cflush, unsigned long);
278__CALL_PAL_R0(rdmces, unsigned long);
279__CALL_PAL_R0(rdps, unsigned long);
280__CALL_PAL_R0(rdusp, unsigned long);
281__CALL_PAL_RW1(swpipl, unsigned long, unsigned long);
282__CALL_PAL_R0(whami, unsigned long);
283__CALL_PAL_W2(wrent, void*, unsigned long);
284__CALL_PAL_W1(wripir, unsigned long);
285__CALL_PAL_W1(wrkgp, unsigned long);
286__CALL_PAL_W1(wrmces, unsigned long);
287__CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long);
288__CALL_PAL_W1(wrusp, unsigned long);
289__CALL_PAL_W1(wrvptptr, unsigned long);
290
291#define IPL_MIN 0
292#define IPL_SW0 1
293#define IPL_SW1 2
294#define IPL_DEV0 3
295#define IPL_DEV1 4
296#define IPL_TIMER 5
297#define IPL_PERF 6
298#define IPL_POWERFAIL 6
299#define IPL_MCHECK 7
300#define IPL_MAX 7
301
302#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
303#undef IPL_MIN
304#define IPL_MIN __min_ipl
305extern int __min_ipl;
306#endif
307
308#define getipl() (rdps() & 7)
309#define setipl(ipl) ((void) swpipl(ipl))
310
311#define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0)
312#define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0)
313#define local_save_flags(flags) ((flags) = rdps())
314#define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0)
315#define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0)
316
317#define irqs_disabled() (getipl() == IPL_MAX)
318
319/*
320 * TB routines..
321 */
322#define __tbi(nr,arg,arg1...) \
323({ \
324 register unsigned long __r16 __asm__("$16") = (nr); \
325 register unsigned long __r17 __asm__("$17"); arg; \
326 __asm__ __volatile__( \
327 "call_pal %3 #__tbi" \
328 :"=r" (__r16),"=r" (__r17) \
329 :"0" (__r16),"i" (PAL_tbi) ,##arg1 \
330 :"$0", "$1", "$22", "$23", "$24", "$25"); \
331})
332
333#define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17))
334#define tbisi(x) __tbi(1,__r17=(x),"1" (__r17))
335#define tbisd(x) __tbi(2,__r17=(x),"1" (__r17))
336#define tbis(x) __tbi(3,__r17=(x),"1" (__r17))
337#define tbiap() __tbi(-1, /* no second argument */)
338#define tbia() __tbi(-2, /* no second argument */)
339
340/*
341 * Atomic exchange.
342 * Since it can be used to implement critical sections
343 * it must clobber "memory" (also for interrupts in UP).
344 */
345
346static inline unsigned long
347__xchg_u8(volatile char *m, unsigned long val)
348{
349 unsigned long ret, tmp, addr64;
350
351 __asm__ __volatile__(
352 " andnot %4,7,%3\n"
353 " insbl %1,%4,%1\n"
354 "1: ldq_l %2,0(%3)\n"
355 " extbl %2,%4,%0\n"
356 " mskbl %2,%4,%2\n"
357 " or %1,%2,%2\n"
358 " stq_c %2,0(%3)\n"
359 " beq %2,2f\n"
360#ifdef CONFIG_SMP
361 " mb\n"
362#endif
363 ".subsection 2\n"
364 "2: br 1b\n"
365 ".previous"
366 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
367 : "r" ((long)m), "1" (val) : "memory");
368
369 return ret;
370}
371
372static inline unsigned long
373__xchg_u16(volatile short *m, unsigned long val)
374{
375 unsigned long ret, tmp, addr64;
376
377 __asm__ __volatile__(
378 " andnot %4,7,%3\n"
379 " inswl %1,%4,%1\n"
380 "1: ldq_l %2,0(%3)\n"
381 " extwl %2,%4,%0\n"
382 " mskwl %2,%4,%2\n"
383 " or %1,%2,%2\n"
384 " stq_c %2,0(%3)\n"
385 " beq %2,2f\n"
386#ifdef CONFIG_SMP
387 " mb\n"
388#endif
389 ".subsection 2\n"
390 "2: br 1b\n"
391 ".previous"
392 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
393 : "r" ((long)m), "1" (val) : "memory");
394
395 return ret;
396}
397
398static inline unsigned long
399__xchg_u32(volatile int *m, unsigned long val)
400{
401 unsigned long dummy;
402
403 __asm__ __volatile__(
404 "1: ldl_l %0,%4\n"
405 " bis $31,%3,%1\n"
406 " stl_c %1,%2\n"
407 " beq %1,2f\n"
408#ifdef CONFIG_SMP
409 " mb\n"
410#endif
411 ".subsection 2\n"
412 "2: br 1b\n"
413 ".previous"
414 : "=&r" (val), "=&r" (dummy), "=m" (*m)
415 : "rI" (val), "m" (*m) : "memory");
416
417 return val;
418}
419
420static inline unsigned long
421__xchg_u64(volatile long *m, unsigned long val)
422{
423 unsigned long dummy;
424
425 __asm__ __volatile__(
426 "1: ldq_l %0,%4\n"
427 " bis $31,%3,%1\n"
428 " stq_c %1,%2\n"
429 " beq %1,2f\n"
430#ifdef CONFIG_SMP
431 " mb\n"
432#endif
433 ".subsection 2\n"
434 "2: br 1b\n"
435 ".previous"
436 : "=&r" (val), "=&r" (dummy), "=m" (*m)
437 : "rI" (val), "m" (*m) : "memory");
438
439 return val;
440}
441
442/* This function doesn't exist, so you'll get a linker error
443 if something tries to do an invalid xchg(). */
444extern void __xchg_called_with_bad_pointer(void);
445
446static inline unsigned long
447__xchg(volatile void *ptr, unsigned long x, int size)
448{
449 switch (size) {
450 case 1:
451 return __xchg_u8(ptr, x);
452 case 2:
453 return __xchg_u16(ptr, x);
454 case 4:
455 return __xchg_u32(ptr, x);
456 case 8:
457 return __xchg_u64(ptr, x);
458 }
459 __xchg_called_with_bad_pointer();
460 return x;
461}
462
463#define xchg(ptr,x) \
464 ({ \
465 __typeof__(*(ptr)) _x_ = (x); \
466 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
467 })
468
469#define tas(ptr) (xchg((ptr),1))
470
471
472/*
473 * Atomic compare and exchange. Compare OLD with MEM, if identical,
474 * store NEW in MEM. Return the initial value in MEM. Success is
475 * indicated by comparing RETURN with OLD.
476 *
477 * The memory barrier should be placed in SMP only when we actually
478 * make the change. If we don't change anything (so if the returned
479 * prev is equal to old) then we aren't acquiring anything new and
480 * we don't need any memory barrier as far I can tell.
481 */
482
483#define __HAVE_ARCH_CMPXCHG 1
484
485static inline unsigned long
486__cmpxchg_u8(volatile char *m, long old, long new)
487{
488 unsigned long prev, tmp, cmp, addr64;
489
490 __asm__ __volatile__(
491 " andnot %5,7,%4\n"
492 " insbl %1,%5,%1\n"
493 "1: ldq_l %2,0(%4)\n"
494 " extbl %2,%5,%0\n"
495 " cmpeq %0,%6,%3\n"
496 " beq %3,2f\n"
497 " mskbl %2,%5,%2\n"
498 " or %1,%2,%2\n"
499 " stq_c %2,0(%4)\n"
500 " beq %2,3f\n"
501#ifdef CONFIG_SMP
502 " mb\n"
503#endif
504 "2:\n"
505 ".subsection 2\n"
506 "3: br 1b\n"
507 ".previous"
508 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
509 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
510
511 return prev;
512}
513
514static inline unsigned long
515__cmpxchg_u16(volatile short *m, long old, long new)
516{
517 unsigned long prev, tmp, cmp, addr64;
518
519 __asm__ __volatile__(
520 " andnot %5,7,%4\n"
521 " inswl %1,%5,%1\n"
522 "1: ldq_l %2,0(%4)\n"
523 " extwl %2,%5,%0\n"
524 " cmpeq %0,%6,%3\n"
525 " beq %3,2f\n"
526 " mskwl %2,%5,%2\n"
527 " or %1,%2,%2\n"
528 " stq_c %2,0(%4)\n"
529 " beq %2,3f\n"
530#ifdef CONFIG_SMP
531 " mb\n"
532#endif
533 "2:\n"
534 ".subsection 2\n"
535 "3: br 1b\n"
536 ".previous"
537 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
538 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
539
540 return prev;
541}
542
543static inline unsigned long
544__cmpxchg_u32(volatile int *m, int old, int new)
545{
546 unsigned long prev, cmp;
547
548 __asm__ __volatile__(
549 "1: ldl_l %0,%5\n"
550 " cmpeq %0,%3,%1\n"
551 " beq %1,2f\n"
552 " mov %4,%1\n"
553 " stl_c %1,%2\n"
554 " beq %1,3f\n"
555#ifdef CONFIG_SMP
556 " mb\n"
557#endif
558 "2:\n"
559 ".subsection 2\n"
560 "3: br 1b\n"
561 ".previous"
562 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
563 : "r"((long) old), "r"(new), "m"(*m) : "memory");
564
565 return prev;
566}
567
568static inline unsigned long
569__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
570{
571 unsigned long prev, cmp;
572
573 __asm__ __volatile__(
574 "1: ldq_l %0,%5\n"
575 " cmpeq %0,%3,%1\n"
576 " beq %1,2f\n"
577 " mov %4,%1\n"
578 " stq_c %1,%2\n"
579 " beq %1,3f\n"
580#ifdef CONFIG_SMP
581 " mb\n"
582#endif
583 "2:\n"
584 ".subsection 2\n"
585 "3: br 1b\n"
586 ".previous"
587 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
588 : "r"((long) old), "r"(new), "m"(*m) : "memory");
589
590 return prev;
591}
592
593/* This function doesn't exist, so you'll get a linker error
594 if something tries to do an invalid cmpxchg(). */
595extern void __cmpxchg_called_with_bad_pointer(void);
596
597static inline unsigned long
598__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
599{
600 switch (size) {
601 case 1:
602 return __cmpxchg_u8(ptr, old, new);
603 case 2:
604 return __cmpxchg_u16(ptr, old, new);
605 case 4:
606 return __cmpxchg_u32(ptr, old, new);
607 case 8:
608 return __cmpxchg_u64(ptr, old, new);
609 }
610 __cmpxchg_called_with_bad_pointer();
611 return old;
612}
613
614#define cmpxchg(ptr,o,n) \
615 ({ \
616 __typeof__(*(ptr)) _o_ = (o); \
617 __typeof__(*(ptr)) _n_ = (n); \
618 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
619 (unsigned long)_n_, sizeof(*(ptr))); \
620 })
621
622#endif /* __ASSEMBLY__ */
623
624#define arch_align_stack(x) (x)
625
626#endif
diff --git a/include/asm-alpha/termbits.h b/include/asm-alpha/termbits.h
new file mode 100644
index 000000000000..f4837fa29420
--- /dev/null
+++ b/include/asm-alpha/termbits.h
@@ -0,0 +1,186 @@
1#ifndef _ALPHA_TERMBITS_H
2#define _ALPHA_TERMBITS_H
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10/*
11 * termios type and macro definitions. Be careful about adding stuff
12 * to this file since it's used in GNU libc and there are strict rules
13 * concerning namespace pollution.
14 */
15
16#define NCCS 19
17struct termios {
18 tcflag_t c_iflag; /* input mode flags */
19 tcflag_t c_oflag; /* output mode flags */
20 tcflag_t c_cflag; /* control mode flags */
21 tcflag_t c_lflag; /* local mode flags */
22 cc_t c_cc[NCCS]; /* control characters */
23 cc_t c_line; /* line discipline (== c_cc[19]) */
24 speed_t c_ispeed; /* input speed */
25 speed_t c_ospeed; /* output speed */
26};
27
28/* c_cc characters */
29#define VEOF 0
30#define VEOL 1
31#define VEOL2 2
32#define VERASE 3
33#define VWERASE 4
34#define VKILL 5
35#define VREPRINT 6
36#define VSWTC 7
37#define VINTR 8
38#define VQUIT 9
39#define VSUSP 10
40#define VSTART 12
41#define VSTOP 13
42#define VLNEXT 14
43#define VDISCARD 15
44#define VMIN 16
45#define VTIME 17
46
47/* c_iflag bits */
48#define IGNBRK 0000001
49#define BRKINT 0000002
50#define IGNPAR 0000004
51#define PARMRK 0000010
52#define INPCK 0000020
53#define ISTRIP 0000040
54#define INLCR 0000100
55#define IGNCR 0000200
56#define ICRNL 0000400
57#define IXON 0001000
58#define IXOFF 0002000
59#define IXANY 0004000
60#define IUCLC 0010000
61#define IMAXBEL 0020000
62#define IUTF8 0040000
63
64/* c_oflag bits */
65#define OPOST 0000001
66#define ONLCR 0000002
67#define OLCUC 0000004
68
69#define OCRNL 0000010
70#define ONOCR 0000020
71#define ONLRET 0000040
72
73#define OFILL 00000100
74#define OFDEL 00000200
75#define NLDLY 00001400
76#define NL0 00000000
77#define NL1 00000400
78#define NL2 00001000
79#define NL3 00001400
80#define TABDLY 00006000
81#define TAB0 00000000
82#define TAB1 00002000
83#define TAB2 00004000
84#define TAB3 00006000
85#define CRDLY 00030000
86#define CR0 00000000
87#define CR1 00010000
88#define CR2 00020000
89#define CR3 00030000
90#define FFDLY 00040000
91#define FF0 00000000
92#define FF1 00040000
93#define BSDLY 00100000
94#define BS0 00000000
95#define BS1 00100000
96#define VTDLY 00200000
97#define VT0 00000000
98#define VT1 00200000
99#define XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */
100
101/* c_cflag bit meaning */
102#define CBAUD 0000037
103#define B0 0000000 /* hang up */
104#define B50 0000001
105#define B75 0000002
106#define B110 0000003
107#define B134 0000004
108#define B150 0000005
109#define B200 0000006
110#define B300 0000007
111#define B600 0000010
112#define B1200 0000011
113#define B1800 0000012
114#define B2400 0000013
115#define B4800 0000014
116#define B9600 0000015
117#define B19200 0000016
118#define B38400 0000017
119#define EXTA B19200
120#define EXTB B38400
121#define CBAUDEX 0000000
122#define B57600 00020
123#define B115200 00021
124#define B230400 00022
125#define B460800 00023
126#define B500000 00024
127#define B576000 00025
128#define B921600 00026
129#define B1000000 00027
130#define B1152000 00030
131#define B1500000 00031
132#define B2000000 00032
133#define B2500000 00033
134#define B3000000 00034
135#define B3500000 00035
136#define B4000000 00036
137
138#define CSIZE 00001400
139#define CS5 00000000
140#define CS6 00000400
141#define CS7 00001000
142#define CS8 00001400
143
144#define CSTOPB 00002000
145#define CREAD 00004000
146#define PARENB 00010000
147#define PARODD 00020000
148#define HUPCL 00040000
149
150#define CLOCAL 00100000
151#define CRTSCTS 020000000000 /* flow control */
152
153/* c_lflag bits */
154#define ISIG 0x00000080
155#define ICANON 0x00000100
156#define XCASE 0x00004000
157#define ECHO 0x00000008
158#define ECHOE 0x00000002
159#define ECHOK 0x00000004
160#define ECHONL 0x00000010
161#define NOFLSH 0x80000000
162#define TOSTOP 0x00400000
163#define ECHOCTL 0x00000040
164#define ECHOPRT 0x00000020
165#define ECHOKE 0x00000001
166#define FLUSHO 0x00800000
167#define PENDIN 0x20000000
168#define IEXTEN 0x00000400
169
170/* Values for the ACTION argument to `tcflow'. */
171#define TCOOFF 0
172#define TCOON 1
173#define TCIOFF 2
174#define TCION 3
175
176/* Values for the QUEUE_SELECTOR argument to `tcflush'. */
177#define TCIFLUSH 0
178#define TCOFLUSH 1
179#define TCIOFLUSH 2
180
181/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */
182#define TCSANOW 0
183#define TCSADRAIN 1
184#define TCSAFLUSH 2
185
186#endif /* _ALPHA_TERMBITS_H */
diff --git a/include/asm-alpha/termios.h b/include/asm-alpha/termios.h
new file mode 100644
index 000000000000..1cfd27f0ad73
--- /dev/null
+++ b/include/asm-alpha/termios.h
@@ -0,0 +1,164 @@
1#ifndef _ALPHA_TERMIOS_H
2#define _ALPHA_TERMIOS_H
3
4#include <asm/ioctls.h>
5#include <asm/termbits.h>
6
7struct sgttyb {
8 char sg_ispeed;
9 char sg_ospeed;
10 char sg_erase;
11 char sg_kill;
12 short sg_flags;
13};
14
15struct tchars {
16 char t_intrc;
17 char t_quitc;
18 char t_startc;
19 char t_stopc;
20 char t_eofc;
21 char t_brkc;
22};
23
24struct ltchars {
25 char t_suspc;
26 char t_dsuspc;
27 char t_rprntc;
28 char t_flushc;
29 char t_werasc;
30 char t_lnextc;
31};
32
33struct winsize {
34 unsigned short ws_row;
35 unsigned short ws_col;
36 unsigned short ws_xpixel;
37 unsigned short ws_ypixel;
38};
39
40#define NCC 8
41struct termio {
42 unsigned short c_iflag; /* input mode flags */
43 unsigned short c_oflag; /* output mode flags */
44 unsigned short c_cflag; /* control mode flags */
45 unsigned short c_lflag; /* local mode flags */
46 unsigned char c_line; /* line discipline */
47 unsigned char c_cc[NCC]; /* control characters */
48};
49
50/*
51 * c_cc characters in the termio structure. Oh, how I love being
52 * backwardly compatible. Notice that character 4 and 5 are
53 * interpreted differently depending on whether ICANON is set in
54 * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise
55 * as _VMIN and V_TIME. This is for compatibility with OSF/1 (which
56 * is compatible with sysV)...
57 */
58#define _VINTR 0
59#define _VQUIT 1
60#define _VERASE 2
61#define _VKILL 3
62#define _VEOF 4
63#define _VMIN 4
64#define _VEOL 5
65#define _VTIME 5
66#define _VEOL2 6
67#define _VSWTC 7
68
69/* line disciplines */
70#define N_TTY 0
71#define N_SLIP 1
72#define N_MOUSE 2
73#define N_PPP 3
74#define N_STRIP 4
75#define N_AX25 5
76#define N_X25 6 /* X.25 async */
77#define N_6PACK 7
78#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
79#define N_R3964 9 /* Reserved for Simatic R3964 module */
80#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
81#define N_IRDA 11 /* Linux IrDa - http://irda.sourceforge.net/ */
82#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
83#define N_HDLC 13 /* synchronous HDLC */
84#define N_SYNC_PPP 14
85#define N_HCI 15 /* Bluetooth HCI UART */
86
87#ifdef __KERNEL__
88/* eof=^D eol=\0 eol2=\0 erase=del
89 werase=^W kill=^U reprint=^R sxtc=\0
90 intr=^C quit=^\ susp=^Z <OSF/1 VDSUSP>
91 start=^Q stop=^S lnext=^V discard=^U
92 vmin=\1 vtime=\0
93*/
94#define INIT_C_CC "\004\000\000\177\027\025\022\000\003\034\032\000\021\023\026\025\001\000"
95
96/*
97 * Translate a "termio" structure into a "termios". Ugh.
98 */
99
100#define user_termio_to_kernel_termios(a_termios, u_termio) \
101({ \
102 struct termios *k_termios = (a_termios); \
103 struct termio k_termio; \
104 int canon, ret; \
105 \
106 ret = copy_from_user(&k_termio, u_termio, sizeof(k_termio)); \
107 if (!ret) { \
108 /* Overwrite only the low bits. */ \
109 *(unsigned short *)&k_termios->c_iflag = k_termio.c_iflag; \
110 *(unsigned short *)&k_termios->c_oflag = k_termio.c_oflag; \
111 *(unsigned short *)&k_termios->c_cflag = k_termio.c_cflag; \
112 *(unsigned short *)&k_termios->c_lflag = k_termio.c_lflag; \
113 canon = k_termio.c_lflag & ICANON; \
114 \
115 k_termios->c_cc[VINTR] = k_termio.c_cc[_VINTR]; \
116 k_termios->c_cc[VQUIT] = k_termio.c_cc[_VQUIT]; \
117 k_termios->c_cc[VERASE] = k_termio.c_cc[_VERASE]; \
118 k_termios->c_cc[VKILL] = k_termio.c_cc[_VKILL]; \
119 k_termios->c_cc[VEOL2] = k_termio.c_cc[_VEOL2]; \
120 k_termios->c_cc[VSWTC] = k_termio.c_cc[_VSWTC]; \
121 k_termios->c_cc[canon ? VEOF : VMIN] = k_termio.c_cc[_VEOF]; \
122 k_termios->c_cc[canon ? VEOL : VTIME] = k_termio.c_cc[_VEOL]; \
123 } \
124 ret; \
125})
126
127/*
128 * Translate a "termios" structure into a "termio". Ugh.
129 *
130 * Note the "fun" _VMIN overloading.
131 */
132#define kernel_termios_to_user_termio(u_termio, a_termios) \
133({ \
134 struct termios *k_termios = (a_termios); \
135 struct termio k_termio; \
136 int canon; \
137 \
138 k_termio.c_iflag = k_termios->c_iflag; \
139 k_termio.c_oflag = k_termios->c_oflag; \
140 k_termio.c_cflag = k_termios->c_cflag; \
141 canon = (k_termio.c_lflag = k_termios->c_lflag) & ICANON; \
142 \
143 k_termio.c_line = k_termios->c_line; \
144 k_termio.c_cc[_VINTR] = k_termios->c_cc[VINTR]; \
145 k_termio.c_cc[_VQUIT] = k_termios->c_cc[VQUIT]; \
146 k_termio.c_cc[_VERASE] = k_termios->c_cc[VERASE]; \
147 k_termio.c_cc[_VKILL] = k_termios->c_cc[VKILL]; \
148 k_termio.c_cc[_VEOF] = k_termios->c_cc[canon ? VEOF : VMIN]; \
149 k_termio.c_cc[_VEOL] = k_termios->c_cc[canon ? VEOL : VTIME]; \
150 k_termio.c_cc[_VEOL2] = k_termios->c_cc[VEOL2]; \
151 k_termio.c_cc[_VSWTC] = k_termios->c_cc[VSWTC]; \
152 \
153 copy_to_user(u_termio, &k_termio, sizeof(k_termio)); \
154})
155
156#define user_termios_to_kernel_termios(k, u) \
157 copy_from_user(k, u, sizeof(struct termios))
158
159#define kernel_termios_to_user_termios(u, k) \
160 copy_to_user(u, k, sizeof(struct termios))
161
162#endif /* __KERNEL__ */
163
164#endif /* _ALPHA_TERMIOS_H */
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
new file mode 100644
index 000000000000..d51491ed00b8
--- /dev/null
+++ b/include/asm-alpha/thread_info.h
@@ -0,0 +1,98 @@
1#ifndef _ALPHA_THREAD_INFO_H
2#define _ALPHA_THREAD_INFO_H
3
4#ifdef __KERNEL__
5
6#ifndef __ASSEMBLY__
7#include <asm/processor.h>
8#include <asm/types.h>
9#include <asm/hwrpb.h>
10#endif
11
12#ifndef __ASSEMBLY__
13struct thread_info {
14 struct pcb_struct pcb; /* palcode state */
15
16 struct task_struct *task; /* main task structure */
17 unsigned int flags; /* low level flags */
18 unsigned int ieee_state; /* see fpu.h */
19
20 struct exec_domain *exec_domain; /* execution domain */
21 mm_segment_t addr_limit; /* thread address space */
22 unsigned cpu; /* current CPU */
23 int preempt_count; /* 0 => preemptable, <0 => BUG */
24
25 int bpt_nsaved;
26 unsigned long bpt_addr[2]; /* breakpoint handling */
27 unsigned int bpt_insn[2];
28
29 struct restart_block restart_block;
30};
31
32/*
33 * Macros/functions for gaining access to the thread information structure.
34 */
35#define INIT_THREAD_INFO(tsk) \
36{ \
37 .task = &tsk, \
38 .exec_domain = &default_exec_domain, \
39 .addr_limit = KERNEL_DS, \
40 .restart_block = { \
41 .fn = do_no_restart_syscall, \
42 }, \
43}
44
45#define init_thread_info (init_thread_union.thread_info)
46#define init_stack (init_thread_union.stack)
47
48/* How to get the thread information struct from C. */
49register struct thread_info *__current_thread_info __asm__("$8");
50#define current_thread_info() __current_thread_info
51
52/* Thread information allocation. */
53#define THREAD_SIZE (2*PAGE_SIZE)
54#define alloc_thread_info(tsk) \
55 ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
56#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
57#define get_thread_info(ti) get_task_struct((ti)->task)
58#define put_thread_info(ti) put_task_struct((ti)->task)
59
60#endif /* __ASSEMBLY__ */
61
62#define PREEMPT_ACTIVE 0x40000000
63
64/*
65 * Thread information flags:
66 * - these are process state flags and used from assembly
67 * - pending work-to-be-done flags come first to fit in and immediate operand.
68 *
69 * TIF_SYSCALL_TRACE is known to be 0 via blbs.
70 */
71#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
72#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
73#define TIF_SIGPENDING 2 /* signal pending */
74#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
75#define TIF_POLLING_NRFLAG 4 /* poll_idle is polling NEED_RESCHED */
76#define TIF_DIE_IF_KERNEL 5 /* dik recursion lock */
77#define TIF_UAC_NOPRINT 6 /* see sysinfo.h */
78#define TIF_UAC_NOFIX 7
79#define TIF_UAC_SIGBUS 8
80#define TIF_MEMDIE 9
81
82#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
83#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
84#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
85#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
86#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
87
88/* Work to do on interrupt/exception return. */
89#define _TIF_WORK_MASK (_TIF_NOTIFY_RESUME \
90 | _TIF_SIGPENDING \
91 | _TIF_NEED_RESCHED)
92
93/* Work to do on any return to userspace. */
94#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
95 | _TIF_SYSCALL_TRACE)
96
97#endif /* __KERNEL__ */
98#endif /* _ALPHA_THREAD_INFO_H */
diff --git a/include/asm-alpha/timex.h b/include/asm-alpha/timex.h
new file mode 100644
index 000000000000..afa0c45e3e98
--- /dev/null
+++ b/include/asm-alpha/timex.h
@@ -0,0 +1,31 @@
1/*
2 * linux/include/asm-alpha/timex.h
3 *
4 * ALPHA architecture timex specifications
5 */
6#ifndef _ASMALPHA_TIMEX_H
7#define _ASMALPHA_TIMEX_H
8
9/* With only one or two oddballs, we use the RTC as the ticker, selecting
10 the 32.768kHz reference clock, which nicely divides down to our HZ. */
11#define CLOCK_TICK_RATE 32768
12
13/*
14 * Standard way to access the cycle counter.
15 * Currently only used on SMP for scheduling.
16 *
17 * Only the low 32 bits are available as a continuously counting entity.
18 * But this only means we'll force a reschedule every 8 seconds or so,
19 * which isn't an evil thing.
20 */
21
22typedef unsigned int cycles_t;
23
24static inline cycles_t get_cycles (void)
25{
26 cycles_t ret;
27 __asm__ __volatile__ ("rpcc %0" : "=r"(ret));
28 return ret;
29}
30
31#endif
diff --git a/include/asm-alpha/tlb.h b/include/asm-alpha/tlb.h
new file mode 100644
index 000000000000..aa91335533e0
--- /dev/null
+++ b/include/asm-alpha/tlb.h
@@ -0,0 +1,15 @@
1#ifndef _ALPHA_TLB_H
2#define _ALPHA_TLB_H
3
4#define tlb_start_vma(tlb, vma) do { } while (0)
5#define tlb_end_vma(tlb, vma) do { } while (0)
6#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
7
8#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
9
10#include <asm-generic/tlb.h>
11
12#define __pte_free_tlb(tlb,pte) pte_free(pte)
13#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
14
15#endif
diff --git a/include/asm-alpha/tlbflush.h b/include/asm-alpha/tlbflush.h
new file mode 100644
index 000000000000..9d484c1fdc82
--- /dev/null
+++ b/include/asm-alpha/tlbflush.h
@@ -0,0 +1,158 @@
1#ifndef _ALPHA_TLBFLUSH_H
2#define _ALPHA_TLBFLUSH_H
3
4#include <linux/config.h>
5#include <linux/mm.h>
6#include <asm/compiler.h>
7
8#ifndef __EXTERN_INLINE
9#define __EXTERN_INLINE extern inline
10#define __MMU_EXTERN_INLINE
11#endif
12
13extern void __load_new_mm_context(struct mm_struct *);
14
15
16/* Use a few helper functions to hide the ugly broken ASN
17 numbers on early Alphas (ev4 and ev45). */
18
19__EXTERN_INLINE void
20ev4_flush_tlb_current(struct mm_struct *mm)
21{
22 __load_new_mm_context(mm);
23 tbiap();
24}
25
26__EXTERN_INLINE void
27ev5_flush_tlb_current(struct mm_struct *mm)
28{
29 __load_new_mm_context(mm);
30}
31
32/* Flush just one page in the current TLB set. We need to be very
33 careful about the icache here, there is no way to invalidate a
34 specific icache page. */
35
36__EXTERN_INLINE void
37ev4_flush_tlb_current_page(struct mm_struct * mm,
38 struct vm_area_struct *vma,
39 unsigned long addr)
40{
41 int tbi_flag = 2;
42 if (vma->vm_flags & VM_EXEC) {
43 __load_new_mm_context(mm);
44 tbi_flag = 3;
45 }
46 tbi(tbi_flag, addr);
47}
48
49__EXTERN_INLINE void
50ev5_flush_tlb_current_page(struct mm_struct * mm,
51 struct vm_area_struct *vma,
52 unsigned long addr)
53{
54 if (vma->vm_flags & VM_EXEC)
55 __load_new_mm_context(mm);
56 else
57 tbi(2, addr);
58}
59
60
61#ifdef CONFIG_ALPHA_GENERIC
62# define flush_tlb_current alpha_mv.mv_flush_tlb_current
63# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
64#else
65# ifdef CONFIG_ALPHA_EV4
66# define flush_tlb_current ev4_flush_tlb_current
67# define flush_tlb_current_page ev4_flush_tlb_current_page
68# else
69# define flush_tlb_current ev5_flush_tlb_current
70# define flush_tlb_current_page ev5_flush_tlb_current_page
71# endif
72#endif
73
74#ifdef __MMU_EXTERN_INLINE
75#undef __EXTERN_INLINE
76#undef __MMU_EXTERN_INLINE
77#endif
78
79/* Flush current user mapping. */
80static inline void
81flush_tlb(void)
82{
83 flush_tlb_current(current->active_mm);
84}
85
86/* Flush someone else's user mapping. */
87static inline void
88flush_tlb_other(struct mm_struct *mm)
89{
90 unsigned long *mmc = &mm->context[smp_processor_id()];
91 /* Check it's not zero first to avoid cacheline ping pong
92 when possible. */
93 if (*mmc) *mmc = 0;
94}
95
96/* Flush a specified range of user mapping page tables from TLB.
97 Although Alpha uses VPTE caches, this can be a nop, as Alpha does
98 not have finegrained tlb flushing, so it will flush VPTE stuff
99 during next flush_tlb_range. */
100
101static inline void
102flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
103 unsigned long end)
104{
105}
106
107#ifndef CONFIG_SMP
108/* Flush everything (kernel mapping may also have changed
109 due to vmalloc/vfree). */
110static inline void flush_tlb_all(void)
111{
112 tbia();
113}
114
115/* Flush a specified user mapping. */
116static inline void
117flush_tlb_mm(struct mm_struct *mm)
118{
119 if (mm == current->active_mm)
120 flush_tlb_current(mm);
121 else
122 flush_tlb_other(mm);
123}
124
125/* Page-granular tlb flush. */
126static inline void
127flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
128{
129 struct mm_struct *mm = vma->vm_mm;
130
131 if (mm == current->active_mm)
132 flush_tlb_current_page(mm, vma, addr);
133 else
134 flush_tlb_other(mm);
135}
136
137/* Flush a specified range of user mapping. On the Alpha we flush
138 the whole user tlb. */
139static inline void
140flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
141 unsigned long end)
142{
143 flush_tlb_mm(vma->vm_mm);
144}
145
146#else /* CONFIG_SMP */
147
148extern void flush_tlb_all(void);
149extern void flush_tlb_mm(struct mm_struct *);
150extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
151extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
152 unsigned long);
153
154#endif /* CONFIG_SMP */
155
156#define flush_tlb_kernel_range(start, end) flush_tlb_all()
157
158#endif /* _ALPHA_TLBFLUSH_H */
diff --git a/include/asm-alpha/topology.h b/include/asm-alpha/topology.h
new file mode 100644
index 000000000000..eb740e280d9c
--- /dev/null
+++ b/include/asm-alpha/topology.h
@@ -0,0 +1,48 @@
1#ifndef _ASM_ALPHA_TOPOLOGY_H
2#define _ASM_ALPHA_TOPOLOGY_H
3
4#include <linux/smp.h>
5#include <linux/threads.h>
6#include <asm/machvec.h>
7
8#ifdef CONFIG_NUMA
9static inline int cpu_to_node(int cpu)
10{
11 int node;
12
13 if (!alpha_mv.cpuid_to_nid)
14 return 0;
15
16 node = alpha_mv.cpuid_to_nid(cpu);
17
18#ifdef DEBUG_NUMA
19 BUG_ON(node < 0);
20#endif
21
22 return node;
23}
24
25static inline cpumask_t node_to_cpumask(int node)
26{
27 cpumask_t node_cpu_mask = CPU_MASK_NONE;
28 int cpu;
29
30 for(cpu = 0; cpu < NR_CPUS; cpu++) {
31 if (cpu_online(cpu) && (cpu_to_node(cpu) == node))
32 cpu_set(cpu, node_cpu_mask);
33 }
34
35#ifdef DEBUG_NUMA
36 printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask);
37#endif
38
39 return node_cpu_mask;
40}
41
42#define pcibus_to_cpumask(bus) (cpu_online_map)
43
44#else /* CONFIG_NUMA */
45# include <asm-generic/topology.h>
46#endif /* !CONFIG_NUMA */
47
48#endif /* _ASM_ALPHA_TOPOLOGY_H */
diff --git a/include/asm-alpha/types.h b/include/asm-alpha/types.h
new file mode 100644
index 000000000000..43264d219246
--- /dev/null
+++ b/include/asm-alpha/types.h
@@ -0,0 +1,63 @@
1#ifndef _ALPHA_TYPES_H
2#define _ALPHA_TYPES_H
3
4/*
5 * This file is never included by application software unless
6 * explicitly requested (e.g., via linux/types.h) in which case the
7 * application is Linux specific so (user-) name space pollution is
8 * not a major issue. However, for interoperability, libraries still
9 * need to be careful to avoid a name clashes.
10 */
11
12#ifndef __ASSEMBLY__
13
14typedef unsigned int umode_t;
15
16/*
17 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
18 * header files exported to user space
19 */
20
21typedef __signed__ char __s8;
22typedef unsigned char __u8;
23
24typedef __signed__ short __s16;
25typedef unsigned short __u16;
26
27typedef __signed__ int __s32;
28typedef unsigned int __u32;
29
30typedef __signed__ long __s64;
31typedef unsigned long __u64;
32
33#endif /* __ASSEMBLY__ */
34
35/*
36 * These aren't exported outside the kernel to avoid name space clashes
37 */
38#ifdef __KERNEL__
39
40#define BITS_PER_LONG 64
41
42#ifndef __ASSEMBLY__
43
44typedef signed char s8;
45typedef unsigned char u8;
46
47typedef signed short s16;
48typedef unsigned short u16;
49
50typedef signed int s32;
51typedef unsigned int u32;
52
53typedef signed long s64;
54typedef unsigned long u64;
55
56typedef u64 dma_addr_t;
57typedef u64 dma64_addr_t;
58
59typedef unsigned short kmem_bufctl_t;
60
61#endif /* __ASSEMBLY__ */
62#endif /* __KERNEL__ */
63#endif /* _ALPHA_TYPES_H */
diff --git a/include/asm-alpha/uaccess.h b/include/asm-alpha/uaccess.h
new file mode 100644
index 000000000000..4c39ee750f38
--- /dev/null
+++ b/include/asm-alpha/uaccess.h
@@ -0,0 +1,517 @@
1#ifndef __ALPHA_UACCESS_H
2#define __ALPHA_UACCESS_H
3
4#include <linux/errno.h>
5#include <linux/sched.h>
6
7
8/*
9 * The fs value determines whether argument validity checking should be
10 * performed or not. If get_fs() == USER_DS, checking is performed, with
11 * get_fs() == KERNEL_DS, checking is bypassed.
12 *
13 * Or at least it did once upon a time. Nowadays it is a mask that
14 * defines which bits of the address space are off limits. This is a
15 * wee bit faster than the above.
16 *
17 * For historical reasons, these macros are grossly misnamed.
18 */
19
20#define KERNEL_DS ((mm_segment_t) { 0UL })
21#define USER_DS ((mm_segment_t) { -0x40000000000UL })
22
23#define VERIFY_READ 0
24#define VERIFY_WRITE 1
25
26#define get_fs() (current_thread_info()->addr_limit)
27#define get_ds() (KERNEL_DS)
28#define set_fs(x) (current_thread_info()->addr_limit = (x))
29
30#define segment_eq(a,b) ((a).seg == (b).seg)
31
32/*
33 * Is a address valid? This does a straightforward calculation rather
34 * than tests.
35 *
36 * Address valid if:
37 * - "addr" doesn't have any high-bits set
38 * - AND "size" doesn't have any high-bits set
39 * - AND "addr+size" doesn't have any high-bits set
40 * - OR we are in kernel mode.
41 */
42#define __access_ok(addr,size,segment) \
43 (((segment).seg & (addr | size | (addr+size))) == 0)
44
45#define access_ok(type,addr,size) \
46({ \
47 __chk_user_ptr(addr); \
48 __access_ok(((unsigned long)(addr)),(size),get_fs()); \
49})
50
51/* this function will go away soon - use access_ok() instead */
52extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
53{
54 return access_ok(type,addr,size) ? 0 : -EFAULT;
55}
56
57/*
58 * These are the main single-value transfer routines. They automatically
59 * use the right size if we just have the right pointer type.
60 *
61 * As the alpha uses the same address space for kernel and user
62 * data, we can just do these as direct assignments. (Of course, the
63 * exception handling means that it's no longer "just"...)
64 *
65 * Careful to not
66 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
67 * (b) require any knowledge of processes at this stage
68 */
69#define put_user(x,ptr) \
70 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
71#define get_user(x,ptr) \
72 __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
73
74/*
75 * The "__xxx" versions do not do address space checking, useful when
76 * doing multiple accesses to the same area (the programmer has to do the
77 * checks by hand with "access_ok()")
78 */
79#define __put_user(x,ptr) \
80 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
81#define __get_user(x,ptr) \
82 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
83
84/*
85 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
86 * encode the bits we need for resolving the exception. See the
87 * more extensive comments with fixup_inline_exception below for
88 * more information.
89 */
90
91extern void __get_user_unknown(void);
92
93#define __get_user_nocheck(x,ptr,size) \
94({ \
95 long __gu_err = 0; \
96 unsigned long __gu_val; \
97 __chk_user_ptr(ptr); \
98 switch (size) { \
99 case 1: __get_user_8(ptr); break; \
100 case 2: __get_user_16(ptr); break; \
101 case 4: __get_user_32(ptr); break; \
102 case 8: __get_user_64(ptr); break; \
103 default: __get_user_unknown(); break; \
104 } \
105 (x) = (__typeof__(*(ptr))) __gu_val; \
106 __gu_err; \
107})
108
109#define __get_user_check(x,ptr,size,segment) \
110({ \
111 long __gu_err = -EFAULT; \
112 unsigned long __gu_val = 0; \
113 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
114 if (__access_ok((unsigned long)__gu_addr,size,segment)) { \
115 __gu_err = 0; \
116 switch (size) { \
117 case 1: __get_user_8(__gu_addr); break; \
118 case 2: __get_user_16(__gu_addr); break; \
119 case 4: __get_user_32(__gu_addr); break; \
120 case 8: __get_user_64(__gu_addr); break; \
121 default: __get_user_unknown(); break; \
122 } \
123 } \
124 (x) = (__typeof__(*(ptr))) __gu_val; \
125 __gu_err; \
126})
127
128struct __large_struct { unsigned long buf[100]; };
129#define __m(x) (*(struct __large_struct __user *)(x))
130
131#define __get_user_64(addr) \
132 __asm__("1: ldq %0,%2\n" \
133 "2:\n" \
134 ".section __ex_table,\"a\"\n" \
135 " .long 1b - .\n" \
136 " lda %0, 2b-1b(%1)\n" \
137 ".previous" \
138 : "=r"(__gu_val), "=r"(__gu_err) \
139 : "m"(__m(addr)), "1"(__gu_err))
140
141#define __get_user_32(addr) \
142 __asm__("1: ldl %0,%2\n" \
143 "2:\n" \
144 ".section __ex_table,\"a\"\n" \
145 " .long 1b - .\n" \
146 " lda %0, 2b-1b(%1)\n" \
147 ".previous" \
148 : "=r"(__gu_val), "=r"(__gu_err) \
149 : "m"(__m(addr)), "1"(__gu_err))
150
151#ifdef __alpha_bwx__
152/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
153
154#define __get_user_16(addr) \
155 __asm__("1: ldwu %0,%2\n" \
156 "2:\n" \
157 ".section __ex_table,\"a\"\n" \
158 " .long 1b - .\n" \
159 " lda %0, 2b-1b(%1)\n" \
160 ".previous" \
161 : "=r"(__gu_val), "=r"(__gu_err) \
162 : "m"(__m(addr)), "1"(__gu_err))
163
164#define __get_user_8(addr) \
165 __asm__("1: ldbu %0,%2\n" \
166 "2:\n" \
167 ".section __ex_table,\"a\"\n" \
168 " .long 1b - .\n" \
169 " lda %0, 2b-1b(%1)\n" \
170 ".previous" \
171 : "=r"(__gu_val), "=r"(__gu_err) \
172 : "m"(__m(addr)), "1"(__gu_err))
173#else
174/* Unfortunately, we can't get an unaligned access trap for the sub-word
175 load, so we have to do a general unaligned operation. */
176
177#define __get_user_16(addr) \
178{ \
179 long __gu_tmp; \
180 __asm__("1: ldq_u %0,0(%3)\n" \
181 "2: ldq_u %1,1(%3)\n" \
182 " extwl %0,%3,%0\n" \
183 " extwh %1,%3,%1\n" \
184 " or %0,%1,%0\n" \
185 "3:\n" \
186 ".section __ex_table,\"a\"\n" \
187 " .long 1b - .\n" \
188 " lda %0, 3b-1b(%2)\n" \
189 " .long 2b - .\n" \
190 " lda %0, 3b-2b(%2)\n" \
191 ".previous" \
192 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
193 : "r"(addr), "2"(__gu_err)); \
194}
195
196#define __get_user_8(addr) \
197 __asm__("1: ldq_u %0,0(%2)\n" \
198 " extbl %0,%2,%0\n" \
199 "2:\n" \
200 ".section __ex_table,\"a\"\n" \
201 " .long 1b - .\n" \
202 " lda %0, 2b-1b(%1)\n" \
203 ".previous" \
204 : "=&r"(__gu_val), "=r"(__gu_err) \
205 : "r"(addr), "1"(__gu_err))
206#endif
207
208extern void __put_user_unknown(void);
209
210#define __put_user_nocheck(x,ptr,size) \
211({ \
212 long __pu_err = 0; \
213 __chk_user_ptr(ptr); \
214 switch (size) { \
215 case 1: __put_user_8(x,ptr); break; \
216 case 2: __put_user_16(x,ptr); break; \
217 case 4: __put_user_32(x,ptr); break; \
218 case 8: __put_user_64(x,ptr); break; \
219 default: __put_user_unknown(); break; \
220 } \
221 __pu_err; \
222})
223
224#define __put_user_check(x,ptr,size,segment) \
225({ \
226 long __pu_err = -EFAULT; \
227 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
228 if (__access_ok((unsigned long)__pu_addr,size,segment)) { \
229 __pu_err = 0; \
230 switch (size) { \
231 case 1: __put_user_8(x,__pu_addr); break; \
232 case 2: __put_user_16(x,__pu_addr); break; \
233 case 4: __put_user_32(x,__pu_addr); break; \
234 case 8: __put_user_64(x,__pu_addr); break; \
235 default: __put_user_unknown(); break; \
236 } \
237 } \
238 __pu_err; \
239})
240
241/*
242 * The "__put_user_xx()" macros tell gcc they read from memory
243 * instead of writing: this is because they do not write to
244 * any memory gcc knows about, so there are no aliasing issues
245 */
246#define __put_user_64(x,addr) \
247__asm__ __volatile__("1: stq %r2,%1\n" \
248 "2:\n" \
249 ".section __ex_table,\"a\"\n" \
250 " .long 1b - .\n" \
251 " lda $31,2b-1b(%0)\n" \
252 ".previous" \
253 : "=r"(__pu_err) \
254 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
255
256#define __put_user_32(x,addr) \
257__asm__ __volatile__("1: stl %r2,%1\n" \
258 "2:\n" \
259 ".section __ex_table,\"a\"\n" \
260 " .long 1b - .\n" \
261 " lda $31,2b-1b(%0)\n" \
262 ".previous" \
263 : "=r"(__pu_err) \
264 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
265
266#ifdef __alpha_bwx__
267/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
268
269#define __put_user_16(x,addr) \
270__asm__ __volatile__("1: stw %r2,%1\n" \
271 "2:\n" \
272 ".section __ex_table,\"a\"\n" \
273 " .long 1b - .\n" \
274 " lda $31,2b-1b(%0)\n" \
275 ".previous" \
276 : "=r"(__pu_err) \
277 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
278
279#define __put_user_8(x,addr) \
280__asm__ __volatile__("1: stb %r2,%1\n" \
281 "2:\n" \
282 ".section __ex_table,\"a\"\n" \
283 " .long 1b - .\n" \
284 " lda $31,2b-1b(%0)\n" \
285 ".previous" \
286 : "=r"(__pu_err) \
287 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
288#else
289/* Unfortunately, we can't get an unaligned access trap for the sub-word
290 write, so we have to do a general unaligned operation. */
291
292#define __put_user_16(x,addr) \
293{ \
294 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
295 __asm__ __volatile__( \
296 "1: ldq_u %2,1(%5)\n" \
297 "2: ldq_u %1,0(%5)\n" \
298 " inswh %6,%5,%4\n" \
299 " inswl %6,%5,%3\n" \
300 " mskwh %2,%5,%2\n" \
301 " mskwl %1,%5,%1\n" \
302 " or %2,%4,%2\n" \
303 " or %1,%3,%1\n" \
304 "3: stq_u %2,1(%5)\n" \
305 "4: stq_u %1,0(%5)\n" \
306 "5:\n" \
307 ".section __ex_table,\"a\"\n" \
308 " .long 1b - .\n" \
309 " lda $31, 5b-1b(%0)\n" \
310 " .long 2b - .\n" \
311 " lda $31, 5b-2b(%0)\n" \
312 " .long 3b - .\n" \
313 " lda $31, 5b-3b(%0)\n" \
314 " .long 4b - .\n" \
315 " lda $31, 5b-4b(%0)\n" \
316 ".previous" \
317 : "=r"(__pu_err), "=&r"(__pu_tmp1), \
318 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
319 "=&r"(__pu_tmp4) \
320 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
321}
322
323#define __put_user_8(x,addr) \
324{ \
325 long __pu_tmp1, __pu_tmp2; \
326 __asm__ __volatile__( \
327 "1: ldq_u %1,0(%4)\n" \
328 " insbl %3,%4,%2\n" \
329 " mskbl %1,%4,%1\n" \
330 " or %1,%2,%1\n" \
331 "2: stq_u %1,0(%4)\n" \
332 "3:\n" \
333 ".section __ex_table,\"a\"\n" \
334 " .long 1b - .\n" \
335 " lda $31, 3b-1b(%0)\n" \
336 " .long 2b - .\n" \
337 " lda $31, 3b-2b(%0)\n" \
338 ".previous" \
339 : "=r"(__pu_err), \
340 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
341 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
342}
343#endif
344
345
346/*
347 * Complex access routines
348 */
349
350/* This little bit of silliness is to get the GP loaded for a function
351 that ordinarily wouldn't. Otherwise we could have it done by the macro
352 directly, which can be optimized the linker. */
353#ifdef MODULE
354#define __module_address(sym) "r"(sym),
355#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
356#else
357#define __module_address(sym)
358#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
359#endif
360
361extern void __copy_user(void);
362
363extern inline long
364__copy_tofrom_user_nocheck(void *to, const void *from, long len)
365{
366 register void * __cu_to __asm__("$6") = to;
367 register const void * __cu_from __asm__("$7") = from;
368 register long __cu_len __asm__("$0") = len;
369
370 __asm__ __volatile__(
371 __module_call(28, 3, __copy_user)
372 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
373 : __module_address(__copy_user)
374 "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
375 : "$1","$2","$3","$4","$5","$28","memory");
376
377 return __cu_len;
378}
379
380extern inline long
381__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
382{
383 if (__access_ok((unsigned long)validate, len, get_fs()))
384 len = __copy_tofrom_user_nocheck(to, from, len);
385 return len;
386}
387
388#define __copy_to_user(to,from,n) \
389({ \
390 __chk_user_ptr(to); \
391 __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \
392})
393#define __copy_from_user(to,from,n) \
394({ \
395 __chk_user_ptr(from); \
396 __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \
397})
398
399#define __copy_to_user_inatomic __copy_to_user
400#define __copy_from_user_inatomic __copy_from_user
401
402
403extern inline long
404copy_to_user(void __user *to, const void *from, long n)
405{
406 return __copy_tofrom_user((__force void *)to, from, n, to);
407}
408
409extern inline long
410copy_from_user(void *to, const void __user *from, long n)
411{
412 return __copy_tofrom_user(to, (__force void *)from, n, from);
413}
414
415extern void __do_clear_user(void);
416
417extern inline long
418__clear_user(void __user *to, long len)
419{
420 register void __user * __cl_to __asm__("$6") = to;
421 register long __cl_len __asm__("$0") = len;
422 __asm__ __volatile__(
423 __module_call(28, 2, __do_clear_user)
424 : "=r"(__cl_len), "=r"(__cl_to)
425 : __module_address(__do_clear_user)
426 "0"(__cl_len), "1"(__cl_to)
427 : "$1","$2","$3","$4","$5","$28","memory");
428 return __cl_len;
429}
430
431extern inline long
432clear_user(void __user *to, long len)
433{
434 if (__access_ok((unsigned long)to, len, get_fs()))
435 len = __clear_user(to, len);
436 return len;
437}
438
439#undef __module_address
440#undef __module_call
441
442/* Returns: -EFAULT if exception before terminator, N if the entire
443 buffer filled, else strlen. */
444
445extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
446
447extern inline long
448strncpy_from_user(char *to, const char __user *from, long n)
449{
450 long ret = -EFAULT;
451 if (__access_ok((unsigned long)from, 0, get_fs()))
452 ret = __strncpy_from_user(to, from, n);
453 return ret;
454}
455
456/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
457extern long __strlen_user(const char __user *);
458
459extern inline long strlen_user(const char __user *str)
460{
461 return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
462}
463
464/* Returns: 0 if exception before NUL or reaching the supplied limit (N),
465 * a value greater than N if the limit would be exceeded, else strlen. */
466extern long __strnlen_user(const char __user *, long);
467
468extern inline long strnlen_user(const char __user *str, long n)
469{
470 return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
471}
472
473/*
474 * About the exception table:
475 *
476 * - insn is a 32-bit pc-relative offset from the faulting insn.
477 * - nextinsn is a 16-bit offset off of the faulting instruction
478 * (not off of the *next* instruction as branches are).
479 * - errreg is the register in which to place -EFAULT.
480 * - valreg is the final target register for the load sequence
481 * and will be zeroed.
482 *
483 * Either errreg or valreg may be $31, in which case nothing happens.
484 *
485 * The exception fixup information "just so happens" to be arranged
486 * as in a MEM format instruction. This lets us emit our three
487 * values like so:
488 *
489 * lda valreg, nextinsn(errreg)
490 *
491 */
492
493struct exception_table_entry
494{
495 signed int insn;
496 union exception_fixup {
497 unsigned unit;
498 struct {
499 signed int nextinsn : 16;
500 unsigned int errreg : 5;
501 unsigned int valreg : 5;
502 } bits;
503 } fixup;
504};
505
506/* Returns the new pc */
507#define fixup_exception(map_reg, fixup, pc) \
508({ \
509 if ((fixup)->fixup.bits.valreg != 31) \
510 map_reg((fixup)->fixup.bits.valreg) = 0; \
511 if ((fixup)->fixup.bits.errreg != 31) \
512 map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \
513 (pc) + (fixup)->fixup.bits.nextinsn; \
514})
515
516
517#endif /* __ALPHA_UACCESS_H */
diff --git a/include/asm-alpha/ucontext.h b/include/asm-alpha/ucontext.h
new file mode 100644
index 000000000000..47578ab42152
--- /dev/null
+++ b/include/asm-alpha/ucontext.h
@@ -0,0 +1,13 @@
1#ifndef _ASMAXP_UCONTEXT_H
2#define _ASMAXP_UCONTEXT_H
3
4struct ucontext {
5 unsigned long uc_flags;
6 struct ucontext *uc_link;
7 old_sigset_t uc_osf_sigmask;
8 stack_t uc_stack;
9 struct sigcontext uc_mcontext;
10 sigset_t uc_sigmask; /* mask last for extensibility */
11};
12
13#endif /* !_ASMAXP_UCONTEXT_H */
diff --git a/include/asm-alpha/unaligned.h b/include/asm-alpha/unaligned.h
new file mode 100644
index 000000000000..a1d72846f61c
--- /dev/null
+++ b/include/asm-alpha/unaligned.h
@@ -0,0 +1,6 @@
1#ifndef __ALPHA_UNALIGNED_H
2#define __ALPHA_UNALIGNED_H
3
4#include <asm-generic/unaligned.h>
5
6#endif
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
new file mode 100644
index 000000000000..c4e70e8617eb
--- /dev/null
+++ b/include/asm-alpha/unistd.h
@@ -0,0 +1,656 @@
1#ifndef _ALPHA_UNISTD_H
2#define _ALPHA_UNISTD_H
3
4#define __NR_osf_syscall 0 /* not implemented */
5#define __NR_exit 1
6#define __NR_fork 2
7#define __NR_read 3
8#define __NR_write 4
9#define __NR_osf_old_open 5 /* not implemented */
10#define __NR_close 6
11#define __NR_osf_wait4 7
12#define __NR_osf_old_creat 8 /* not implemented */
13#define __NR_link 9
14#define __NR_unlink 10
15#define __NR_osf_execve 11 /* not implemented */
16#define __NR_chdir 12
17#define __NR_fchdir 13
18#define __NR_mknod 14
19#define __NR_chmod 15
20#define __NR_chown 16
21#define __NR_brk 17
22#define __NR_osf_getfsstat 18 /* not implemented */
23#define __NR_lseek 19
24#define __NR_getxpid 20
25#define __NR_osf_mount 21
26#define __NR_umount 22
27#define __NR_setuid 23
28#define __NR_getxuid 24
29#define __NR_exec_with_loader 25 /* not implemented */
30#define __NR_ptrace 26
31#define __NR_osf_nrecvmsg 27 /* not implemented */
32#define __NR_osf_nsendmsg 28 /* not implemented */
33#define __NR_osf_nrecvfrom 29 /* not implemented */
34#define __NR_osf_naccept 30 /* not implemented */
35#define __NR_osf_ngetpeername 31 /* not implemented */
36#define __NR_osf_ngetsockname 32 /* not implemented */
37#define __NR_access 33
38#define __NR_osf_chflags 34 /* not implemented */
39#define __NR_osf_fchflags 35 /* not implemented */
40#define __NR_sync 36
41#define __NR_kill 37
42#define __NR_osf_old_stat 38 /* not implemented */
43#define __NR_setpgid 39
44#define __NR_osf_old_lstat 40 /* not implemented */
45#define __NR_dup 41
46#define __NR_pipe 42
47#define __NR_osf_set_program_attributes 43
48#define __NR_osf_profil 44 /* not implemented */
49#define __NR_open 45
50#define __NR_osf_old_sigaction 46 /* not implemented */
51#define __NR_getxgid 47
52#define __NR_osf_sigprocmask 48
53#define __NR_osf_getlogin 49 /* not implemented */
54#define __NR_osf_setlogin 50 /* not implemented */
55#define __NR_acct 51
56#define __NR_sigpending 52
57
58#define __NR_ioctl 54
59#define __NR_osf_reboot 55 /* not implemented */
60#define __NR_osf_revoke 56 /* not implemented */
61#define __NR_symlink 57
62#define __NR_readlink 58
63#define __NR_execve 59
64#define __NR_umask 60
65#define __NR_chroot 61
66#define __NR_osf_old_fstat 62 /* not implemented */
67#define __NR_getpgrp 63
68#define __NR_getpagesize 64
69#define __NR_osf_mremap 65 /* not implemented */
70#define __NR_vfork 66
71#define __NR_stat 67
72#define __NR_lstat 68
73#define __NR_osf_sbrk 69 /* not implemented */
74#define __NR_osf_sstk 70 /* not implemented */
75#define __NR_mmap 71 /* OSF/1 mmap is superset of Linux */
76#define __NR_osf_old_vadvise 72 /* not implemented */
77#define __NR_munmap 73
78#define __NR_mprotect 74
79#define __NR_madvise 75
80#define __NR_vhangup 76
81#define __NR_osf_kmodcall 77 /* not implemented */
82#define __NR_osf_mincore 78 /* not implemented */
83#define __NR_getgroups 79
84#define __NR_setgroups 80
85#define __NR_osf_old_getpgrp 81 /* not implemented */
86#define __NR_setpgrp 82 /* BSD alias for setpgid */
87#define __NR_osf_setitimer 83
88#define __NR_osf_old_wait 84 /* not implemented */
89#define __NR_osf_table 85 /* not implemented */
90#define __NR_osf_getitimer 86
91#define __NR_gethostname 87
92#define __NR_sethostname 88
93#define __NR_getdtablesize 89
94#define __NR_dup2 90
95#define __NR_fstat 91
96#define __NR_fcntl 92
97#define __NR_osf_select 93
98#define __NR_poll 94
99#define __NR_fsync 95
100#define __NR_setpriority 96
101#define __NR_socket 97
102#define __NR_connect 98
103#define __NR_accept 99
104#define __NR_getpriority 100
105#define __NR_send 101
106#define __NR_recv 102
107#define __NR_sigreturn 103
108#define __NR_bind 104
109#define __NR_setsockopt 105
110#define __NR_listen 106
111#define __NR_osf_plock 107 /* not implemented */
112#define __NR_osf_old_sigvec 108 /* not implemented */
113#define __NR_osf_old_sigblock 109 /* not implemented */
114#define __NR_osf_old_sigsetmask 110 /* not implemented */
115#define __NR_sigsuspend 111
116#define __NR_osf_sigstack 112
117#define __NR_recvmsg 113
118#define __NR_sendmsg 114
119#define __NR_osf_old_vtrace 115 /* not implemented */
120#define __NR_osf_gettimeofday 116
121#define __NR_osf_getrusage 117
122#define __NR_getsockopt 118
123
124#define __NR_readv 120
125#define __NR_writev 121
126#define __NR_osf_settimeofday 122
127#define __NR_fchown 123
128#define __NR_fchmod 124
129#define __NR_recvfrom 125
130#define __NR_setreuid 126
131#define __NR_setregid 127
132#define __NR_rename 128
133#define __NR_truncate 129
134#define __NR_ftruncate 130
135#define __NR_flock 131
136#define __NR_setgid 132
137#define __NR_sendto 133
138#define __NR_shutdown 134
139#define __NR_socketpair 135
140#define __NR_mkdir 136
141#define __NR_rmdir 137
142#define __NR_osf_utimes 138
143#define __NR_osf_old_sigreturn 139 /* not implemented */
144#define __NR_osf_adjtime 140 /* not implemented */
145#define __NR_getpeername 141
146#define __NR_osf_gethostid 142 /* not implemented */
147#define __NR_osf_sethostid 143 /* not implemented */
148#define __NR_getrlimit 144
149#define __NR_setrlimit 145
150#define __NR_osf_old_killpg 146 /* not implemented */
151#define __NR_setsid 147
152#define __NR_quotactl 148
153#define __NR_osf_oldquota 149 /* not implemented */
154#define __NR_getsockname 150
155
156#define __NR_osf_pid_block 153 /* not implemented */
157#define __NR_osf_pid_unblock 154 /* not implemented */
158
159#define __NR_sigaction 156
160#define __NR_osf_sigwaitprim 157 /* not implemented */
161#define __NR_osf_nfssvc 158 /* not implemented */
162#define __NR_osf_getdirentries 159
163#define __NR_osf_statfs 160
164#define __NR_osf_fstatfs 161
165
166#define __NR_osf_asynch_daemon 163 /* not implemented */
167#define __NR_osf_getfh 164 /* not implemented */
168#define __NR_osf_getdomainname 165
169#define __NR_setdomainname 166
170
171#define __NR_osf_exportfs 169 /* not implemented */
172
173#define __NR_osf_alt_plock 181 /* not implemented */
174
175#define __NR_osf_getmnt 184 /* not implemented */
176
177#define __NR_osf_alt_sigpending 187 /* not implemented */
178#define __NR_osf_alt_setsid 188 /* not implemented */
179
180#define __NR_osf_swapon 199
181#define __NR_msgctl 200
182#define __NR_msgget 201
183#define __NR_msgrcv 202
184#define __NR_msgsnd 203
185#define __NR_semctl 204
186#define __NR_semget 205
187#define __NR_semop 206
188#define __NR_osf_utsname 207
189#define __NR_lchown 208
190#define __NR_osf_shmat 209
191#define __NR_shmctl 210
192#define __NR_shmdt 211
193#define __NR_shmget 212
194#define __NR_osf_mvalid 213 /* not implemented */
195#define __NR_osf_getaddressconf 214 /* not implemented */
196#define __NR_osf_msleep 215 /* not implemented */
197#define __NR_osf_mwakeup 216 /* not implemented */
198#define __NR_msync 217
199#define __NR_osf_signal 218 /* not implemented */
200#define __NR_osf_utc_gettime 219 /* not implemented */
201#define __NR_osf_utc_adjtime 220 /* not implemented */
202
203#define __NR_osf_security 222 /* not implemented */
204#define __NR_osf_kloadcall 223 /* not implemented */
205
206#define __NR_getpgid 233
207#define __NR_getsid 234
208#define __NR_sigaltstack 235
209#define __NR_osf_waitid 236 /* not implemented */
210#define __NR_osf_priocntlset 237 /* not implemented */
211#define __NR_osf_sigsendset 238 /* not implemented */
212#define __NR_osf_set_speculative 239 /* not implemented */
213#define __NR_osf_msfs_syscall 240 /* not implemented */
214#define __NR_osf_sysinfo 241
215#define __NR_osf_uadmin 242 /* not implemented */
216#define __NR_osf_fuser 243 /* not implemented */
217#define __NR_osf_proplist_syscall 244
218#define __NR_osf_ntp_adjtime 245 /* not implemented */
219#define __NR_osf_ntp_gettime 246 /* not implemented */
220#define __NR_osf_pathconf 247 /* not implemented */
221#define __NR_osf_fpathconf 248 /* not implemented */
222
223#define __NR_osf_uswitch 250 /* not implemented */
224#define __NR_osf_usleep_thread 251
225#define __NR_osf_audcntl 252 /* not implemented */
226#define __NR_osf_audgen 253 /* not implemented */
227#define __NR_sysfs 254
228#define __NR_osf_subsys_info 255 /* not implemented */
229#define __NR_osf_getsysinfo 256
230#define __NR_osf_setsysinfo 257
231#define __NR_osf_afs_syscall 258 /* not implemented */
232#define __NR_osf_swapctl 259 /* not implemented */
233#define __NR_osf_memcntl 260 /* not implemented */
234#define __NR_osf_fdatasync 261 /* not implemented */
235
236
237/*
238 * Linux-specific system calls begin at 300
239 */
240#define __NR_bdflush 300
241#define __NR_sethae 301
242#define __NR_mount 302
243#define __NR_old_adjtimex 303
244#define __NR_swapoff 304
245#define __NR_getdents 305
246#define __NR_create_module 306
247#define __NR_init_module 307
248#define __NR_delete_module 308
249#define __NR_get_kernel_syms 309
250#define __NR_syslog 310
251#define __NR_reboot 311
252#define __NR_clone 312
253#define __NR_uselib 313
254#define __NR_mlock 314
255#define __NR_munlock 315
256#define __NR_mlockall 316
257#define __NR_munlockall 317
258#define __NR_sysinfo 318
259#define __NR__sysctl 319
260/* 320 was sys_idle. */
261#define __NR_oldumount 321
262#define __NR_swapon 322
263#define __NR_times 323
264#define __NR_personality 324
265#define __NR_setfsuid 325
266#define __NR_setfsgid 326
267#define __NR_ustat 327
268#define __NR_statfs 328
269#define __NR_fstatfs 329
270#define __NR_sched_setparam 330
271#define __NR_sched_getparam 331
272#define __NR_sched_setscheduler 332
273#define __NR_sched_getscheduler 333
274#define __NR_sched_yield 334
275#define __NR_sched_get_priority_max 335
276#define __NR_sched_get_priority_min 336
277#define __NR_sched_rr_get_interval 337
278#define __NR_afs_syscall 338
279#define __NR_uname 339
280#define __NR_nanosleep 340
281#define __NR_mremap 341
282#define __NR_nfsservctl 342
283#define __NR_setresuid 343
284#define __NR_getresuid 344
285#define __NR_pciconfig_read 345
286#define __NR_pciconfig_write 346
287#define __NR_query_module 347
288#define __NR_prctl 348
289#define __NR_pread64 349
290#define __NR_pwrite64 350
291#define __NR_rt_sigreturn 351
292#define __NR_rt_sigaction 352
293#define __NR_rt_sigprocmask 353
294#define __NR_rt_sigpending 354
295#define __NR_rt_sigtimedwait 355
296#define __NR_rt_sigqueueinfo 356
297#define __NR_rt_sigsuspend 357
298#define __NR_select 358
299#define __NR_gettimeofday 359
300#define __NR_settimeofday 360
301#define __NR_getitimer 361
302#define __NR_setitimer 362
303#define __NR_utimes 363
304#define __NR_getrusage 364
305#define __NR_wait4 365
306#define __NR_adjtimex 366
307#define __NR_getcwd 367
308#define __NR_capget 368
309#define __NR_capset 369
310#define __NR_sendfile 370
311#define __NR_setresgid 371
312#define __NR_getresgid 372
313#define __NR_dipc 373
314#define __NR_pivot_root 374
315#define __NR_mincore 375
316#define __NR_pciconfig_iobase 376
317#define __NR_getdents64 377
318#define __NR_gettid 378
319#define __NR_readahead 379
320/* 380 is unused */
321#define __NR_tkill 381
322#define __NR_setxattr 382
323#define __NR_lsetxattr 383
324#define __NR_fsetxattr 384
325#define __NR_getxattr 385
326#define __NR_lgetxattr 386
327#define __NR_fgetxattr 387
328#define __NR_listxattr 388
329#define __NR_llistxattr 389
330#define __NR_flistxattr 390
331#define __NR_removexattr 391
332#define __NR_lremovexattr 392
333#define __NR_fremovexattr 393
334#define __NR_futex 394
335#define __NR_sched_setaffinity 395
336#define __NR_sched_getaffinity 396
337#define __NR_tuxcall 397
338#define __NR_io_setup 398
339#define __NR_io_destroy 399
340#define __NR_io_getevents 400
341#define __NR_io_submit 401
342#define __NR_io_cancel 402
343#define __NR_exit_group 405
344#define __NR_lookup_dcookie 406
345#define __NR_sys_epoll_create 407
346#define __NR_sys_epoll_ctl 408
347#define __NR_sys_epoll_wait 409
348#define __NR_remap_file_pages 410
349#define __NR_set_tid_address 411
350#define __NR_restart_syscall 412
351#define __NR_fadvise64 413
352#define __NR_timer_create 414
353#define __NR_timer_settime 415
354#define __NR_timer_gettime 416
355#define __NR_timer_getoverrun 417
356#define __NR_timer_delete 418
357#define __NR_clock_settime 419
358#define __NR_clock_gettime 420
359#define __NR_clock_getres 421
360#define __NR_clock_nanosleep 422
361#define __NR_semtimedop 423
362#define __NR_tgkill 424
363#define __NR_stat64 425
364#define __NR_lstat64 426
365#define __NR_fstat64 427
366#define __NR_vserver 428
367#define __NR_mbind 429
368#define __NR_get_mempolicy 430
369#define __NR_set_mempolicy 431
370#define __NR_mq_open 432
371#define __NR_mq_unlink 433
372#define __NR_mq_timedsend 434
373#define __NR_mq_timedreceive 435
374#define __NR_mq_notify 436
375#define __NR_mq_getsetattr 437
376#define __NR_waitid 438
377
378#define NR_SYSCALLS 439
379
380#if defined(__GNUC__)
381
382#define _syscall_return(type) \
383 return (_sc_err ? errno = _sc_ret, _sc_ret = -1L : 0), (type) _sc_ret
384
385#define _syscall_clobbers \
386 "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \
387 "$22", "$23", "$24", "$25", "$27", "$28" \
388
389#define _syscall0(type, name) \
390type name(void) \
391{ \
392 long _sc_ret, _sc_err; \
393 { \
394 register long _sc_0 __asm__("$0"); \
395 register long _sc_19 __asm__("$19"); \
396 \
397 _sc_0 = __NR_##name; \
398 __asm__("callsys # %0 %1 %2" \
399 : "=r"(_sc_0), "=r"(_sc_19) \
400 : "0"(_sc_0) \
401 : _syscall_clobbers); \
402 _sc_ret = _sc_0, _sc_err = _sc_19; \
403 } \
404 _syscall_return(type); \
405}
406
407#define _syscall1(type,name,type1,arg1) \
408type name(type1 arg1) \
409{ \
410 long _sc_ret, _sc_err; \
411 { \
412 register long _sc_0 __asm__("$0"); \
413 register long _sc_16 __asm__("$16"); \
414 register long _sc_19 __asm__("$19"); \
415 \
416 _sc_0 = __NR_##name; \
417 _sc_16 = (long) (arg1); \
418 __asm__("callsys # %0 %1 %2 %3" \
419 : "=r"(_sc_0), "=r"(_sc_19) \
420 : "0"(_sc_0), "r"(_sc_16) \
421 : _syscall_clobbers); \
422 _sc_ret = _sc_0, _sc_err = _sc_19; \
423 } \
424 _syscall_return(type); \
425}
426
427#define _syscall2(type,name,type1,arg1,type2,arg2) \
428type name(type1 arg1,type2 arg2) \
429{ \
430 long _sc_ret, _sc_err; \
431 { \
432 register long _sc_0 __asm__("$0"); \
433 register long _sc_16 __asm__("$16"); \
434 register long _sc_17 __asm__("$17"); \
435 register long _sc_19 __asm__("$19"); \
436 \
437 _sc_0 = __NR_##name; \
438 _sc_16 = (long) (arg1); \
439 _sc_17 = (long) (arg2); \
440 __asm__("callsys # %0 %1 %2 %3 %4" \
441 : "=r"(_sc_0), "=r"(_sc_19) \
442 : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17) \
443 : _syscall_clobbers); \
444 _sc_ret = _sc_0, _sc_err = _sc_19; \
445 } \
446 _syscall_return(type); \
447}
448
449#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
450type name(type1 arg1,type2 arg2,type3 arg3) \
451{ \
452 long _sc_ret, _sc_err; \
453 { \
454 register long _sc_0 __asm__("$0"); \
455 register long _sc_16 __asm__("$16"); \
456 register long _sc_17 __asm__("$17"); \
457 register long _sc_18 __asm__("$18"); \
458 register long _sc_19 __asm__("$19"); \
459 \
460 _sc_0 = __NR_##name; \
461 _sc_16 = (long) (arg1); \
462 _sc_17 = (long) (arg2); \
463 _sc_18 = (long) (arg3); \
464 __asm__("callsys # %0 %1 %2 %3 %4 %5" \
465 : "=r"(_sc_0), "=r"(_sc_19) \
466 : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
467 "r"(_sc_18) \
468 : _syscall_clobbers); \
469 _sc_ret = _sc_0, _sc_err = _sc_19; \
470 } \
471 _syscall_return(type); \
472}
473
474#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
475type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
476{ \
477 long _sc_ret, _sc_err; \
478 { \
479 register long _sc_0 __asm__("$0"); \
480 register long _sc_16 __asm__("$16"); \
481 register long _sc_17 __asm__("$17"); \
482 register long _sc_18 __asm__("$18"); \
483 register long _sc_19 __asm__("$19"); \
484 \
485 _sc_0 = __NR_##name; \
486 _sc_16 = (long) (arg1); \
487 _sc_17 = (long) (arg2); \
488 _sc_18 = (long) (arg3); \
489 _sc_19 = (long) (arg4); \
490 __asm__("callsys # %0 %1 %2 %3 %4 %5 %6" \
491 : "=r"(_sc_0), "=r"(_sc_19) \
492 : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
493 "r"(_sc_18), "1"(_sc_19) \
494 : _syscall_clobbers); \
495 _sc_ret = _sc_0, _sc_err = _sc_19; \
496 } \
497 _syscall_return(type); \
498}
499
500#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
501 type5,arg5) \
502type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
503{ \
504 long _sc_ret, _sc_err; \
505 { \
506 register long _sc_0 __asm__("$0"); \
507 register long _sc_16 __asm__("$16"); \
508 register long _sc_17 __asm__("$17"); \
509 register long _sc_18 __asm__("$18"); \
510 register long _sc_19 __asm__("$19"); \
511 register long _sc_20 __asm__("$20"); \
512 \
513 _sc_0 = __NR_##name; \
514 _sc_16 = (long) (arg1); \
515 _sc_17 = (long) (arg2); \
516 _sc_18 = (long) (arg3); \
517 _sc_19 = (long) (arg4); \
518 _sc_20 = (long) (arg5); \
519 __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7" \
520 : "=r"(_sc_0), "=r"(_sc_19) \
521 : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
522 "r"(_sc_18), "1"(_sc_19), "r"(_sc_20) \
523 : _syscall_clobbers); \
524 _sc_ret = _sc_0, _sc_err = _sc_19; \
525 } \
526 _syscall_return(type); \
527}
528
529#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
530 type5,arg5,type6,arg6) \
531type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\
532{ \
533 long _sc_ret, _sc_err; \
534 { \
535 register long _sc_0 __asm__("$0"); \
536 register long _sc_16 __asm__("$16"); \
537 register long _sc_17 __asm__("$17"); \
538 register long _sc_18 __asm__("$18"); \
539 register long _sc_19 __asm__("$19"); \
540 register long _sc_20 __asm__("$20"); \
541 register long _sc_21 __asm__("$21"); \
542 \
543 _sc_0 = __NR_##name; \
544 _sc_16 = (long) (arg1); \
545 _sc_17 = (long) (arg2); \
546 _sc_18 = (long) (arg3); \
547 _sc_19 = (long) (arg4); \
548 _sc_20 = (long) (arg5); \
549 _sc_21 = (long) (arg6); \
550 __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7 %8" \
551 : "=r"(_sc_0), "=r"(_sc_19) \
552 : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
553 "r"(_sc_18), "1"(_sc_19), "r"(_sc_20), "r"(_sc_21) \
554 : _syscall_clobbers); \
555 _sc_ret = _sc_0, _sc_err = _sc_19; \
556 } \
557 _syscall_return(type); \
558}
559
560#endif /* __LIBRARY__ && __GNUC__ */
561
562#ifdef __KERNEL__
563#define __ARCH_WANT_IPC_PARSE_VERSION
564#define __ARCH_WANT_OLD_READDIR
565#define __ARCH_WANT_STAT64
566#define __ARCH_WANT_SYS_GETHOSTNAME
567#define __ARCH_WANT_SYS_SOCKETCALL
568#define __ARCH_WANT_SYS_FADVISE64
569#define __ARCH_WANT_SYS_GETPGRP
570#define __ARCH_WANT_SYS_OLD_GETRLIMIT
571#define __ARCH_WANT_SYS_OLDUMOUNT
572#define __ARCH_WANT_SYS_SIGPENDING
573#endif
574
575#ifdef __KERNEL_SYSCALLS__
576
577#include <linux/compiler.h>
578#include <linux/types.h>
579#include <linux/string.h>
580#include <linux/signal.h>
581#include <linux/syscalls.h>
582#include <asm/ptrace.h>
583
584static inline long open(const char * name, int mode, int flags)
585{
586 return sys_open(name, mode, flags);
587}
588
589static inline long dup(int fd)
590{
591 return sys_dup(fd);
592}
593
594static inline long close(int fd)
595{
596 return sys_close(fd);
597}
598
599static inline off_t lseek(int fd, off_t off, int whence)
600{
601 return sys_lseek(fd, off, whence);
602}
603
604static inline void _exit(int value)
605{
606 sys_exit(value);
607}
608
609#define exit(x) _exit(x)
610
611static inline long write(int fd, const char * buf, size_t nr)
612{
613 return sys_write(fd, buf, nr);
614}
615
616static inline long read(int fd, char * buf, size_t nr)
617{
618 return sys_read(fd, buf, nr);
619}
620
621extern int execve(char *, char **, char **);
622
623static inline long setsid(void)
624{
625 return sys_setsid();
626}
627
628static inline pid_t waitpid(int pid, int * wait_stat, int flags)
629{
630 return sys_wait4(pid, wait_stat, flags, NULL);
631}
632
633asmlinkage int sys_execve(char *ufilename, char **argv, char **envp,
634 unsigned long a3, unsigned long a4, unsigned long a5,
635 struct pt_regs regs);
636asmlinkage long sys_rt_sigaction(int sig,
637 const struct sigaction __user *act,
638 struct sigaction __user *oact,
639 size_t sigsetsize,
640 void *restorer);
641
642#endif /* __KERNEL_SYSCALLS__ */
643
644/* "Conditional" syscalls. What we want is
645
646 __attribute__((weak,alias("sys_ni_syscall")))
647
648 but that raises the problem of what type to give the symbol. If we use
649 a prototype, it'll conflict with the definition given in this file and
650 others. If we use __typeof, we discover that not all symbols actually
651 have declarations. If we use no prototype, then we get warnings from
652 -Wstrict-prototypes. Ho hum. */
653
654#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall")
655
656#endif /* _ALPHA_UNISTD_H */
diff --git a/include/asm-alpha/user.h b/include/asm-alpha/user.h
new file mode 100644
index 000000000000..7e417fc9d491
--- /dev/null
+++ b/include/asm-alpha/user.h
@@ -0,0 +1,53 @@
1#ifndef _ALPHA_USER_H
2#define _ALPHA_USER_H
3
4#include <linux/sched.h>
5#include <linux/ptrace.h>
6
7#include <asm/page.h>
8#include <asm/reg.h>
9
10/*
11 * Core file format: The core file is written in such a way that gdb
12 * can understand it and provide useful information to the user (under
13 * linux we use the `trad-core' bfd, NOT the osf-core). The file contents
14 * are as follows:
15 *
16 * upage: 1 page consisting of a user struct that tells gdb
17 * what is present in the file. Directly after this is a
18 * copy of the task_struct, which is currently not used by gdb,
19 * but it may come in handy at some point. All of the registers
20 * are stored as part of the upage. The upage should always be
21 * only one page long.
22 * data: The data segment follows next. We use current->end_text to
23 * current->brk to pick up all of the user variables, plus any memory
24 * that may have been sbrk'ed. No attempt is made to determine if a
25 * page is demand-zero or if a page is totally unused, we just cover
26 * the entire range. All of the addresses are rounded in such a way
27 * that an integral number of pages is written.
28 * stack: We need the stack information in order to get a meaningful
29 * backtrace. We need to write the data from usp to
30 * current->start_stack, so we round each of these in order to be able
31 * to write an integer number of pages.
32 */
33struct user {
34 unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */
35 size_t u_tsize; /* text size (pages) */
36 size_t u_dsize; /* data size (pages) */
37 size_t u_ssize; /* stack size (pages) */
38 unsigned long start_code; /* text starting address */
39 unsigned long start_data; /* data starting address */
40 unsigned long start_stack; /* stack starting address */
41 long int signal; /* signal causing core dump */
42 struct regs * u_ar0; /* help gdb find registers */
43 unsigned long magic; /* identifies a core file */
44 char u_comm[32]; /* user command name */
45};
46
47#define NBPG PAGE_SIZE
48#define UPAGES 1
49#define HOST_TEXT_START_ADDR (u.start_code)
50#define HOST_DATA_START_ADDR (u.start_data)
51#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
52
53#endif /* _ALPHA_USER_H */
diff --git a/include/asm-alpha/vga.h b/include/asm-alpha/vga.h
new file mode 100644
index 000000000000..8ca4f6b2da19
--- /dev/null
+++ b/include/asm-alpha/vga.h
@@ -0,0 +1,51 @@
1/*
2 * Access to VGA videoram
3 *
4 * (c) 1998 Martin Mares <mj@ucw.cz>
5 */
6
7#ifndef _LINUX_ASM_VGA_H_
8#define _LINUX_ASM_VGA_H_
9
10#include <asm/io.h>
11
12#define VT_BUF_HAVE_RW
13#define VT_BUF_HAVE_MEMSETW
14#define VT_BUF_HAVE_MEMCPYW
15
16extern inline void scr_writew(u16 val, volatile u16 *addr)
17{
18 if (__is_ioaddr(addr))
19 __raw_writew(val, (volatile u16 __iomem *) addr);
20 else
21 *addr = val;
22}
23
24extern inline u16 scr_readw(volatile const u16 *addr)
25{
26 if (__is_ioaddr(addr))
27 return __raw_readw((volatile const u16 __iomem *) addr);
28 else
29 return *addr;
30}
31
32extern inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
33{
34 if (__is_ioaddr(s))
35 memsetw_io((u16 __iomem *) s, c, count);
36 else
37 memsetw(s, c, count);
38}
39
40/* Do not trust that the usage will be correct; analyze the arguments. */
41extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count);
42
43/* ??? These are currently only used for downloading character sets. As
44 such, they don't need memory barriers. Is this all they are intended
45 to be used for? */
46#define vga_readb(a) readb((u8 __iomem *)(a))
47#define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a))
48
49#define VGA_MAP_MEM(x) ((unsigned long) ioremap(x, 0))
50
51#endif
diff --git a/include/asm-alpha/xor.h b/include/asm-alpha/xor.h
new file mode 100644
index 000000000000..5ee1c2bc0499
--- /dev/null
+++ b/include/asm-alpha/xor.h
@@ -0,0 +1,855 @@
1/*
2 * include/asm-alpha/xor.h
3 *
4 * Optimized RAID-5 checksumming functions for alpha EV5 and EV6
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2, or (at your option)
9 * any later version.
10 *
11 * You should have received a copy of the GNU General Public License
12 * (for example /usr/src/linux/COPYING); if not, write to the Free
13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14 */
15
16extern void xor_alpha_2(unsigned long, unsigned long *, unsigned long *);
17extern void xor_alpha_3(unsigned long, unsigned long *, unsigned long *,
18 unsigned long *);
19extern void xor_alpha_4(unsigned long, unsigned long *, unsigned long *,
20 unsigned long *, unsigned long *);
21extern void xor_alpha_5(unsigned long, unsigned long *, unsigned long *,
22 unsigned long *, unsigned long *, unsigned long *);
23
24extern void xor_alpha_prefetch_2(unsigned long, unsigned long *,
25 unsigned long *);
26extern void xor_alpha_prefetch_3(unsigned long, unsigned long *,
27 unsigned long *, unsigned long *);
28extern void xor_alpha_prefetch_4(unsigned long, unsigned long *,
29 unsigned long *, unsigned long *,
30 unsigned long *);
31extern void xor_alpha_prefetch_5(unsigned long, unsigned long *,
32 unsigned long *, unsigned long *,
33 unsigned long *, unsigned long *);
34
35asm(" \n\
36 .text \n\
37 .align 3 \n\
38 .ent xor_alpha_2 \n\
39xor_alpha_2: \n\
40 .prologue 0 \n\
41 srl $16, 6, $16 \n\
42 .align 4 \n\
432: \n\
44 ldq $0,0($17) \n\
45 ldq $1,0($18) \n\
46 ldq $2,8($17) \n\
47 ldq $3,8($18) \n\
48 \n\
49 ldq $4,16($17) \n\
50 ldq $5,16($18) \n\
51 ldq $6,24($17) \n\
52 ldq $7,24($18) \n\
53 \n\
54 ldq $19,32($17) \n\
55 ldq $20,32($18) \n\
56 ldq $21,40($17) \n\
57 ldq $22,40($18) \n\
58 \n\
59 ldq $23,48($17) \n\
60 ldq $24,48($18) \n\
61 ldq $25,56($17) \n\
62 xor $0,$1,$0 # 7 cycles from $1 load \n\
63 \n\
64 ldq $27,56($18) \n\
65 xor $2,$3,$2 \n\
66 stq $0,0($17) \n\
67 xor $4,$5,$4 \n\
68 \n\
69 stq $2,8($17) \n\
70 xor $6,$7,$6 \n\
71 stq $4,16($17) \n\
72 xor $19,$20,$19 \n\
73 \n\
74 stq $6,24($17) \n\
75 xor $21,$22,$21 \n\
76 stq $19,32($17) \n\
77 xor $23,$24,$23 \n\
78 \n\
79 stq $21,40($17) \n\
80 xor $25,$27,$25 \n\
81 stq $23,48($17) \n\
82 subq $16,1,$16 \n\
83 \n\
84 stq $25,56($17) \n\
85 addq $17,64,$17 \n\
86 addq $18,64,$18 \n\
87 bgt $16,2b \n\
88 \n\
89 ret \n\
90 .end xor_alpha_2 \n\
91 \n\
92 .align 3 \n\
93 .ent xor_alpha_3 \n\
94xor_alpha_3: \n\
95 .prologue 0 \n\
96 srl $16, 6, $16 \n\
97 .align 4 \n\
983: \n\
99 ldq $0,0($17) \n\
100 ldq $1,0($18) \n\
101 ldq $2,0($19) \n\
102 ldq $3,8($17) \n\
103 \n\
104 ldq $4,8($18) \n\
105 ldq $6,16($17) \n\
106 ldq $7,16($18) \n\
107 ldq $21,24($17) \n\
108 \n\
109 ldq $22,24($18) \n\
110 ldq $24,32($17) \n\
111 ldq $25,32($18) \n\
112 ldq $5,8($19) \n\
113 \n\
114 ldq $20,16($19) \n\
115 ldq $23,24($19) \n\
116 ldq $27,32($19) \n\
117 nop \n\
118 \n\
119 xor $0,$1,$1 # 8 cycles from $0 load \n\
120 xor $3,$4,$4 # 6 cycles from $4 load \n\
121 xor $6,$7,$7 # 6 cycles from $7 load \n\
122 xor $21,$22,$22 # 5 cycles from $22 load \n\
123 \n\
124 xor $1,$2,$2 # 9 cycles from $2 load \n\
125 xor $24,$25,$25 # 5 cycles from $25 load \n\
126 stq $2,0($17) \n\
127 xor $4,$5,$5 # 6 cycles from $5 load \n\
128 \n\
129 stq $5,8($17) \n\
130 xor $7,$20,$20 # 7 cycles from $20 load \n\
131 stq $20,16($17) \n\
132 xor $22,$23,$23 # 7 cycles from $23 load \n\
133 \n\
134 stq $23,24($17) \n\
135 xor $25,$27,$27 # 7 cycles from $27 load \n\
136 stq $27,32($17) \n\
137 nop \n\
138 \n\
139 ldq $0,40($17) \n\
140 ldq $1,40($18) \n\
141 ldq $3,48($17) \n\
142 ldq $4,48($18) \n\
143 \n\
144 ldq $6,56($17) \n\
145 ldq $7,56($18) \n\
146 ldq $2,40($19) \n\
147 ldq $5,48($19) \n\
148 \n\
149 ldq $20,56($19) \n\
150 xor $0,$1,$1 # 4 cycles from $1 load \n\
151 xor $3,$4,$4 # 5 cycles from $4 load \n\
152 xor $6,$7,$7 # 5 cycles from $7 load \n\
153 \n\
154 xor $1,$2,$2 # 4 cycles from $2 load \n\
155 xor $4,$5,$5 # 5 cycles from $5 load \n\
156 stq $2,40($17) \n\
157 xor $7,$20,$20 # 4 cycles from $20 load \n\
158 \n\
159 stq $5,48($17) \n\
160 subq $16,1,$16 \n\
161 stq $20,56($17) \n\
162 addq $19,64,$19 \n\
163 \n\
164 addq $18,64,$18 \n\
165 addq $17,64,$17 \n\
166 bgt $16,3b \n\
167 ret \n\
168 .end xor_alpha_3 \n\
169 \n\
170 .align 3 \n\
171 .ent xor_alpha_4 \n\
172xor_alpha_4: \n\
173 .prologue 0 \n\
174 srl $16, 6, $16 \n\
175 .align 4 \n\
1764: \n\
177 ldq $0,0($17) \n\
178 ldq $1,0($18) \n\
179 ldq $2,0($19) \n\
180 ldq $3,0($20) \n\
181 \n\
182 ldq $4,8($17) \n\
183 ldq $5,8($18) \n\
184 ldq $6,8($19) \n\
185 ldq $7,8($20) \n\
186 \n\
187 ldq $21,16($17) \n\
188 ldq $22,16($18) \n\
189 ldq $23,16($19) \n\
190 ldq $24,16($20) \n\
191 \n\
192 ldq $25,24($17) \n\
193 xor $0,$1,$1 # 6 cycles from $1 load \n\
194 ldq $27,24($18) \n\
195 xor $2,$3,$3 # 6 cycles from $3 load \n\
196 \n\
197 ldq $0,24($19) \n\
198 xor $1,$3,$3 \n\
199 ldq $1,24($20) \n\
200 xor $4,$5,$5 # 7 cycles from $5 load \n\
201 \n\
202 stq $3,0($17) \n\
203 xor $6,$7,$7 \n\
204 xor $21,$22,$22 # 7 cycles from $22 load \n\
205 xor $5,$7,$7 \n\
206 \n\
207 stq $7,8($17) \n\
208 xor $23,$24,$24 # 7 cycles from $24 load \n\
209 ldq $2,32($17) \n\
210 xor $22,$24,$24 \n\
211 \n\
212 ldq $3,32($18) \n\
213 ldq $4,32($19) \n\
214 ldq $5,32($20) \n\
215 xor $25,$27,$27 # 8 cycles from $27 load \n\
216 \n\
217 ldq $6,40($17) \n\
218 ldq $7,40($18) \n\
219 ldq $21,40($19) \n\
220 ldq $22,40($20) \n\
221 \n\
222 stq $24,16($17) \n\
223 xor $0,$1,$1 # 9 cycles from $1 load \n\
224 xor $2,$3,$3 # 5 cycles from $3 load \n\
225 xor $27,$1,$1 \n\
226 \n\
227 stq $1,24($17) \n\
228 xor $4,$5,$5 # 5 cycles from $5 load \n\
229 ldq $23,48($17) \n\
230 ldq $24,48($18) \n\
231 \n\
232 ldq $25,48($19) \n\
233 xor $3,$5,$5 \n\
234 ldq $27,48($20) \n\
235 ldq $0,56($17) \n\
236 \n\
237 ldq $1,56($18) \n\
238 ldq $2,56($19) \n\
239 xor $6,$7,$7 # 8 cycles from $6 load \n\
240 ldq $3,56($20) \n\
241 \n\
242 stq $5,32($17) \n\
243 xor $21,$22,$22 # 8 cycles from $22 load \n\
244 xor $7,$22,$22 \n\
245 xor $23,$24,$24 # 5 cycles from $24 load \n\
246 \n\
247 stq $22,40($17) \n\
248 xor $25,$27,$27 # 5 cycles from $27 load \n\
249 xor $24,$27,$27 \n\
250 xor $0,$1,$1 # 5 cycles from $1 load \n\
251 \n\
252 stq $27,48($17) \n\
253 xor $2,$3,$3 # 4 cycles from $3 load \n\
254 xor $1,$3,$3 \n\
255 subq $16,1,$16 \n\
256 \n\
257 stq $3,56($17) \n\
258 addq $20,64,$20 \n\
259 addq $19,64,$19 \n\
260 addq $18,64,$18 \n\
261 \n\
262 addq $17,64,$17 \n\
263 bgt $16,4b \n\
264 ret \n\
265 .end xor_alpha_4 \n\
266 \n\
267 .align 3 \n\
268 .ent xor_alpha_5 \n\
269xor_alpha_5: \n\
270 .prologue 0 \n\
271 srl $16, 6, $16 \n\
272 .align 4 \n\
2735: \n\
274 ldq $0,0($17) \n\
275 ldq $1,0($18) \n\
276 ldq $2,0($19) \n\
277 ldq $3,0($20) \n\
278 \n\
279 ldq $4,0($21) \n\
280 ldq $5,8($17) \n\
281 ldq $6,8($18) \n\
282 ldq $7,8($19) \n\
283 \n\
284 ldq $22,8($20) \n\
285 ldq $23,8($21) \n\
286 ldq $24,16($17) \n\
287 ldq $25,16($18) \n\
288 \n\
289 ldq $27,16($19) \n\
290 xor $0,$1,$1 # 6 cycles from $1 load \n\
291 ldq $28,16($20) \n\
292 xor $2,$3,$3 # 6 cycles from $3 load \n\
293 \n\
294 ldq $0,16($21) \n\
295 xor $1,$3,$3 \n\
296 ldq $1,24($17) \n\
297 xor $3,$4,$4 # 7 cycles from $4 load \n\
298 \n\
299 stq $4,0($17) \n\
300 xor $5,$6,$6 # 7 cycles from $6 load \n\
301 xor $7,$22,$22 # 7 cycles from $22 load \n\
302 xor $6,$23,$23 # 7 cycles from $23 load \n\
303 \n\
304 ldq $2,24($18) \n\
305 xor $22,$23,$23 \n\
306 ldq $3,24($19) \n\
307 xor $24,$25,$25 # 8 cycles from $25 load \n\
308 \n\
309 stq $23,8($17) \n\
310 xor $25,$27,$27 # 8 cycles from $27 load \n\
311 ldq $4,24($20) \n\
312 xor $28,$0,$0 # 7 cycles from $0 load \n\
313 \n\
314 ldq $5,24($21) \n\
315 xor $27,$0,$0 \n\
316 ldq $6,32($17) \n\
317 ldq $7,32($18) \n\
318 \n\
319 stq $0,16($17) \n\
320 xor $1,$2,$2 # 6 cycles from $2 load \n\
321 ldq $22,32($19) \n\
322 xor $3,$4,$4 # 4 cycles from $4 load \n\
323 \n\
324 ldq $23,32($20) \n\
325 xor $2,$4,$4 \n\
326 ldq $24,32($21) \n\
327 ldq $25,40($17) \n\
328 \n\
329 ldq $27,40($18) \n\
330 ldq $28,40($19) \n\
331 ldq $0,40($20) \n\
332 xor $4,$5,$5 # 7 cycles from $5 load \n\
333 \n\
334 stq $5,24($17) \n\
335 xor $6,$7,$7 # 7 cycles from $7 load \n\
336 ldq $1,40($21) \n\
337 ldq $2,48($17) \n\
338 \n\
339 ldq $3,48($18) \n\
340 xor $7,$22,$22 # 7 cycles from $22 load \n\
341 ldq $4,48($19) \n\
342 xor $23,$24,$24 # 6 cycles from $24 load \n\
343 \n\
344 ldq $5,48($20) \n\
345 xor $22,$24,$24 \n\
346 ldq $6,48($21) \n\
347 xor $25,$27,$27 # 7 cycles from $27 load \n\
348 \n\
349 stq $24,32($17) \n\
350 xor $27,$28,$28 # 8 cycles from $28 load \n\
351 ldq $7,56($17) \n\
352 xor $0,$1,$1 # 6 cycles from $1 load \n\
353 \n\
354 ldq $22,56($18) \n\
355 ldq $23,56($19) \n\
356 ldq $24,56($20) \n\
357 ldq $25,56($21) \n\
358 \n\
359 xor $28,$1,$1 \n\
360 xor $2,$3,$3 # 9 cycles from $3 load \n\
361 xor $3,$4,$4 # 9 cycles from $4 load \n\
362 xor $5,$6,$6 # 8 cycles from $6 load \n\
363 \n\
364 stq $1,40($17) \n\
365 xor $4,$6,$6 \n\
366 xor $7,$22,$22 # 7 cycles from $22 load \n\
367 xor $23,$24,$24 # 6 cycles from $24 load \n\
368 \n\
369 stq $6,48($17) \n\
370 xor $22,$24,$24 \n\
371 subq $16,1,$16 \n\
372 xor $24,$25,$25 # 8 cycles from $25 load \n\
373 \n\
374 stq $25,56($17) \n\
375 addq $21,64,$21 \n\
376 addq $20,64,$20 \n\
377 addq $19,64,$19 \n\
378 \n\
379 addq $18,64,$18 \n\
380 addq $17,64,$17 \n\
381 bgt $16,5b \n\
382 ret \n\
383 .end xor_alpha_5 \n\
384 \n\
385 .align 3 \n\
386 .ent xor_alpha_prefetch_2 \n\
387xor_alpha_prefetch_2: \n\
388 .prologue 0 \n\
389 srl $16, 6, $16 \n\
390 \n\
391 ldq $31, 0($17) \n\
392 ldq $31, 0($18) \n\
393 \n\
394 ldq $31, 64($17) \n\
395 ldq $31, 64($18) \n\
396 \n\
397 ldq $31, 128($17) \n\
398 ldq $31, 128($18) \n\
399 \n\
400 ldq $31, 192($17) \n\
401 ldq $31, 192($18) \n\
402 .align 4 \n\
4032: \n\
404 ldq $0,0($17) \n\
405 ldq $1,0($18) \n\
406 ldq $2,8($17) \n\
407 ldq $3,8($18) \n\
408 \n\
409 ldq $4,16($17) \n\
410 ldq $5,16($18) \n\
411 ldq $6,24($17) \n\
412 ldq $7,24($18) \n\
413 \n\
414 ldq $19,32($17) \n\
415 ldq $20,32($18) \n\
416 ldq $21,40($17) \n\
417 ldq $22,40($18) \n\
418 \n\
419 ldq $23,48($17) \n\
420 ldq $24,48($18) \n\
421 ldq $25,56($17) \n\
422 ldq $27,56($18) \n\
423 \n\
424 ldq $31,256($17) \n\
425 xor $0,$1,$0 # 8 cycles from $1 load \n\
426 ldq $31,256($18) \n\
427 xor $2,$3,$2 \n\
428 \n\
429 stq $0,0($17) \n\
430 xor $4,$5,$4 \n\
431 stq $2,8($17) \n\
432 xor $6,$7,$6 \n\
433 \n\
434 stq $4,16($17) \n\
435 xor $19,$20,$19 \n\
436 stq $6,24($17) \n\
437 xor $21,$22,$21 \n\
438 \n\
439 stq $19,32($17) \n\
440 xor $23,$24,$23 \n\
441 stq $21,40($17) \n\
442 xor $25,$27,$25 \n\
443 \n\
444 stq $23,48($17) \n\
445 subq $16,1,$16 \n\
446 stq $25,56($17) \n\
447 addq $17,64,$17 \n\
448 \n\
449 addq $18,64,$18 \n\
450 bgt $16,2b \n\
451 ret \n\
452 .end xor_alpha_prefetch_2 \n\
453 \n\
454 .align 3 \n\
455 .ent xor_alpha_prefetch_3 \n\
456xor_alpha_prefetch_3: \n\
457 .prologue 0 \n\
458 srl $16, 6, $16 \n\
459 \n\
460 ldq $31, 0($17) \n\
461 ldq $31, 0($18) \n\
462 ldq $31, 0($19) \n\
463 \n\
464 ldq $31, 64($17) \n\
465 ldq $31, 64($18) \n\
466 ldq $31, 64($19) \n\
467 \n\
468 ldq $31, 128($17) \n\
469 ldq $31, 128($18) \n\
470 ldq $31, 128($19) \n\
471 \n\
472 ldq $31, 192($17) \n\
473 ldq $31, 192($18) \n\
474 ldq $31, 192($19) \n\
475 .align 4 \n\
4763: \n\
477 ldq $0,0($17) \n\
478 ldq $1,0($18) \n\
479 ldq $2,0($19) \n\
480 ldq $3,8($17) \n\
481 \n\
482 ldq $4,8($18) \n\
483 ldq $6,16($17) \n\
484 ldq $7,16($18) \n\
485 ldq $21,24($17) \n\
486 \n\
487 ldq $22,24($18) \n\
488 ldq $24,32($17) \n\
489 ldq $25,32($18) \n\
490 ldq $5,8($19) \n\
491 \n\
492 ldq $20,16($19) \n\
493 ldq $23,24($19) \n\
494 ldq $27,32($19) \n\
495 nop \n\
496 \n\
497 xor $0,$1,$1 # 8 cycles from $0 load \n\
498 xor $3,$4,$4 # 7 cycles from $4 load \n\
499 xor $6,$7,$7 # 6 cycles from $7 load \n\
500 xor $21,$22,$22 # 5 cycles from $22 load \n\
501 \n\
502 xor $1,$2,$2 # 9 cycles from $2 load \n\
503 xor $24,$25,$25 # 5 cycles from $25 load \n\
504 stq $2,0($17) \n\
505 xor $4,$5,$5 # 6 cycles from $5 load \n\
506 \n\
507 stq $5,8($17) \n\
508 xor $7,$20,$20 # 7 cycles from $20 load \n\
509 stq $20,16($17) \n\
510 xor $22,$23,$23 # 7 cycles from $23 load \n\
511 \n\
512 stq $23,24($17) \n\
513 xor $25,$27,$27 # 7 cycles from $27 load \n\
514 stq $27,32($17) \n\
515 nop \n\
516 \n\
517 ldq $0,40($17) \n\
518 ldq $1,40($18) \n\
519 ldq $3,48($17) \n\
520 ldq $4,48($18) \n\
521 \n\
522 ldq $6,56($17) \n\
523 ldq $7,56($18) \n\
524 ldq $2,40($19) \n\
525 ldq $5,48($19) \n\
526 \n\
527 ldq $20,56($19) \n\
528 ldq $31,256($17) \n\
529 ldq $31,256($18) \n\
530 ldq $31,256($19) \n\
531 \n\
532 xor $0,$1,$1 # 6 cycles from $1 load \n\
533 xor $3,$4,$4 # 5 cycles from $4 load \n\
534 xor $6,$7,$7 # 5 cycles from $7 load \n\
535 xor $1,$2,$2 # 4 cycles from $2 load \n\
536 \n\
537 xor $4,$5,$5 # 5 cycles from $5 load \n\
538 xor $7,$20,$20 # 4 cycles from $20 load \n\
539 stq $2,40($17) \n\
540 subq $16,1,$16 \n\
541 \n\
542 stq $5,48($17) \n\
543 addq $19,64,$19 \n\
544 stq $20,56($17) \n\
545 addq $18,64,$18 \n\
546 \n\
547 addq $17,64,$17 \n\
548 bgt $16,3b \n\
549 ret \n\
550 .end xor_alpha_prefetch_3 \n\
551 \n\
552 .align 3 \n\
553 .ent xor_alpha_prefetch_4 \n\
554xor_alpha_prefetch_4: \n\
555 .prologue 0 \n\
556 srl $16, 6, $16 \n\
557 \n\
558 ldq $31, 0($17) \n\
559 ldq $31, 0($18) \n\
560 ldq $31, 0($19) \n\
561 ldq $31, 0($20) \n\
562 \n\
563 ldq $31, 64($17) \n\
564 ldq $31, 64($18) \n\
565 ldq $31, 64($19) \n\
566 ldq $31, 64($20) \n\
567 \n\
568 ldq $31, 128($17) \n\
569 ldq $31, 128($18) \n\
570 ldq $31, 128($19) \n\
571 ldq $31, 128($20) \n\
572 \n\
573 ldq $31, 192($17) \n\
574 ldq $31, 192($18) \n\
575 ldq $31, 192($19) \n\
576 ldq $31, 192($20) \n\
577 .align 4 \n\
5784: \n\
579 ldq $0,0($17) \n\
580 ldq $1,0($18) \n\
581 ldq $2,0($19) \n\
582 ldq $3,0($20) \n\
583 \n\
584 ldq $4,8($17) \n\
585 ldq $5,8($18) \n\
586 ldq $6,8($19) \n\
587 ldq $7,8($20) \n\
588 \n\
589 ldq $21,16($17) \n\
590 ldq $22,16($18) \n\
591 ldq $23,16($19) \n\
592 ldq $24,16($20) \n\
593 \n\
594 ldq $25,24($17) \n\
595 xor $0,$1,$1 # 6 cycles from $1 load \n\
596 ldq $27,24($18) \n\
597 xor $2,$3,$3 # 6 cycles from $3 load \n\
598 \n\
599 ldq $0,24($19) \n\
600 xor $1,$3,$3 \n\
601 ldq $1,24($20) \n\
602 xor $4,$5,$5 # 7 cycles from $5 load \n\
603 \n\
604 stq $3,0($17) \n\
605 xor $6,$7,$7 \n\
606 xor $21,$22,$22 # 7 cycles from $22 load \n\
607 xor $5,$7,$7 \n\
608 \n\
609 stq $7,8($17) \n\
610 xor $23,$24,$24 # 7 cycles from $24 load \n\
611 ldq $2,32($17) \n\
612 xor $22,$24,$24 \n\
613 \n\
614 ldq $3,32($18) \n\
615 ldq $4,32($19) \n\
616 ldq $5,32($20) \n\
617 xor $25,$27,$27 # 8 cycles from $27 load \n\
618 \n\
619 ldq $6,40($17) \n\
620 ldq $7,40($18) \n\
621 ldq $21,40($19) \n\
622 ldq $22,40($20) \n\
623 \n\
624 stq $24,16($17) \n\
625 xor $0,$1,$1 # 9 cycles from $1 load \n\
626 xor $2,$3,$3 # 5 cycles from $3 load \n\
627 xor $27,$1,$1 \n\
628 \n\
629 stq $1,24($17) \n\
630 xor $4,$5,$5 # 5 cycles from $5 load \n\
631 ldq $23,48($17) \n\
632 xor $3,$5,$5 \n\
633 \n\
634 ldq $24,48($18) \n\
635 ldq $25,48($19) \n\
636 ldq $27,48($20) \n\
637 ldq $0,56($17) \n\
638 \n\
639 ldq $1,56($18) \n\
640 ldq $2,56($19) \n\
641 ldq $3,56($20) \n\
642 xor $6,$7,$7 # 8 cycles from $6 load \n\
643 \n\
644 ldq $31,256($17) \n\
645 xor $21,$22,$22 # 8 cycles from $22 load \n\
646 ldq $31,256($18) \n\
647 xor $7,$22,$22 \n\
648 \n\
649 ldq $31,256($19) \n\
650 xor $23,$24,$24 # 6 cycles from $24 load \n\
651 ldq $31,256($20) \n\
652 xor $25,$27,$27 # 6 cycles from $27 load \n\
653 \n\
654 stq $5,32($17) \n\
655 xor $24,$27,$27 \n\
656 xor $0,$1,$1 # 7 cycles from $1 load \n\
657 xor $2,$3,$3 # 6 cycles from $3 load \n\
658 \n\
659 stq $22,40($17) \n\
660 xor $1,$3,$3 \n\
661 stq $27,48($17) \n\
662 subq $16,1,$16 \n\
663 \n\
664 stq $3,56($17) \n\
665 addq $20,64,$20 \n\
666 addq $19,64,$19 \n\
667 addq $18,64,$18 \n\
668 \n\
669 addq $17,64,$17 \n\
670 bgt $16,4b \n\
671 ret \n\
672 .end xor_alpha_prefetch_4 \n\
673 \n\
674 .align 3 \n\
675 .ent xor_alpha_prefetch_5 \n\
676xor_alpha_prefetch_5: \n\
677 .prologue 0 \n\
678 srl $16, 6, $16 \n\
679 \n\
680 ldq $31, 0($17) \n\
681 ldq $31, 0($18) \n\
682 ldq $31, 0($19) \n\
683 ldq $31, 0($20) \n\
684 ldq $31, 0($21) \n\
685 \n\
686 ldq $31, 64($17) \n\
687 ldq $31, 64($18) \n\
688 ldq $31, 64($19) \n\
689 ldq $31, 64($20) \n\
690 ldq $31, 64($21) \n\
691 \n\
692 ldq $31, 128($17) \n\
693 ldq $31, 128($18) \n\
694 ldq $31, 128($19) \n\
695 ldq $31, 128($20) \n\
696 ldq $31, 128($21) \n\
697 \n\
698 ldq $31, 192($17) \n\
699 ldq $31, 192($18) \n\
700 ldq $31, 192($19) \n\
701 ldq $31, 192($20) \n\
702 ldq $31, 192($21) \n\
703 .align 4 \n\
7045: \n\
705 ldq $0,0($17) \n\
706 ldq $1,0($18) \n\
707 ldq $2,0($19) \n\
708 ldq $3,0($20) \n\
709 \n\
710 ldq $4,0($21) \n\
711 ldq $5,8($17) \n\
712 ldq $6,8($18) \n\
713 ldq $7,8($19) \n\
714 \n\
715 ldq $22,8($20) \n\
716 ldq $23,8($21) \n\
717 ldq $24,16($17) \n\
718 ldq $25,16($18) \n\
719 \n\
720 ldq $27,16($19) \n\
721 xor $0,$1,$1 # 6 cycles from $1 load \n\
722 ldq $28,16($20) \n\
723 xor $2,$3,$3 # 6 cycles from $3 load \n\
724 \n\
725 ldq $0,16($21) \n\
726 xor $1,$3,$3 \n\
727 ldq $1,24($17) \n\
728 xor $3,$4,$4 # 7 cycles from $4 load \n\
729 \n\
730 stq $4,0($17) \n\
731 xor $5,$6,$6 # 7 cycles from $6 load \n\
732 xor $7,$22,$22 # 7 cycles from $22 load \n\
733 xor $6,$23,$23 # 7 cycles from $23 load \n\
734 \n\
735 ldq $2,24($18) \n\
736 xor $22,$23,$23 \n\
737 ldq $3,24($19) \n\
738 xor $24,$25,$25 # 8 cycles from $25 load \n\
739 \n\
740 stq $23,8($17) \n\
741 xor $25,$27,$27 # 8 cycles from $27 load \n\
742 ldq $4,24($20) \n\
743 xor $28,$0,$0 # 7 cycles from $0 load \n\
744 \n\
745 ldq $5,24($21) \n\
746 xor $27,$0,$0 \n\
747 ldq $6,32($17) \n\
748 ldq $7,32($18) \n\
749 \n\
750 stq $0,16($17) \n\
751 xor $1,$2,$2 # 6 cycles from $2 load \n\
752 ldq $22,32($19) \n\
753 xor $3,$4,$4 # 4 cycles from $4 load \n\
754 \n\
755 ldq $23,32($20) \n\
756 xor $2,$4,$4 \n\
757 ldq $24,32($21) \n\
758 ldq $25,40($17) \n\
759 \n\
760 ldq $27,40($18) \n\
761 ldq $28,40($19) \n\
762 ldq $0,40($20) \n\
763 xor $4,$5,$5 # 7 cycles from $5 load \n\
764 \n\
765 stq $5,24($17) \n\
766 xor $6,$7,$7 # 7 cycles from $7 load \n\
767 ldq $1,40($21) \n\
768 ldq $2,48($17) \n\
769 \n\
770 ldq $3,48($18) \n\
771 xor $7,$22,$22 # 7 cycles from $22 load \n\
772 ldq $4,48($19) \n\
773 xor $23,$24,$24 # 6 cycles from $24 load \n\
774 \n\
775 ldq $5,48($20) \n\
776 xor $22,$24,$24 \n\
777 ldq $6,48($21) \n\
778 xor $25,$27,$27 # 7 cycles from $27 load \n\
779 \n\
780 stq $24,32($17) \n\
781 xor $27,$28,$28 # 8 cycles from $28 load \n\
782 ldq $7,56($17) \n\
783 xor $0,$1,$1 # 6 cycles from $1 load \n\
784 \n\
785 ldq $22,56($18) \n\
786 ldq $23,56($19) \n\
787 ldq $24,56($20) \n\
788 ldq $25,56($21) \n\
789 \n\
790 ldq $31,256($17) \n\
791 xor $28,$1,$1 \n\
792 ldq $31,256($18) \n\
793 xor $2,$3,$3 # 9 cycles from $3 load \n\
794 \n\
795 ldq $31,256($19) \n\
796 xor $3,$4,$4 # 9 cycles from $4 load \n\
797 ldq $31,256($20) \n\
798 xor $5,$6,$6 # 8 cycles from $6 load \n\
799 \n\
800 stq $1,40($17) \n\
801 xor $4,$6,$6 \n\
802 xor $7,$22,$22 # 7 cycles from $22 load \n\
803 xor $23,$24,$24 # 6 cycles from $24 load \n\
804 \n\
805 stq $6,48($17) \n\
806 xor $22,$24,$24 \n\
807 ldq $31,256($21) \n\
808 xor $24,$25,$25 # 8 cycles from $25 load \n\
809 \n\
810 stq $25,56($17) \n\
811 subq $16,1,$16 \n\
812 addq $21,64,$21 \n\
813 addq $20,64,$20 \n\
814 \n\
815 addq $19,64,$19 \n\
816 addq $18,64,$18 \n\
817 addq $17,64,$17 \n\
818 bgt $16,5b \n\
819 \n\
820 ret \n\
821 .end xor_alpha_prefetch_5 \n\
822");
823
824static struct xor_block_template xor_block_alpha = {
825 .name = "alpha",
826 .do_2 = xor_alpha_2,
827 .do_3 = xor_alpha_3,
828 .do_4 = xor_alpha_4,
829 .do_5 = xor_alpha_5,
830};
831
832static struct xor_block_template xor_block_alpha_prefetch = {
833 .name = "alpha prefetch",
834 .do_2 = xor_alpha_prefetch_2,
835 .do_3 = xor_alpha_prefetch_3,
836 .do_4 = xor_alpha_prefetch_4,
837 .do_5 = xor_alpha_prefetch_5,
838};
839
840/* For grins, also test the generic routines. */
841#include <asm-generic/xor.h>
842
843#undef XOR_TRY_TEMPLATES
844#define XOR_TRY_TEMPLATES \
845 do { \
846 xor_speed(&xor_block_8regs); \
847 xor_speed(&xor_block_32regs); \
848 xor_speed(&xor_block_alpha); \
849 xor_speed(&xor_block_alpha_prefetch); \
850 } while (0)
851
852/* Force the use of alpha_prefetch if EV6, as it is significantly
853 faster in the cold cache case. */
854#define XOR_SELECT_TEMPLATE(FASTEST) \
855 (implver() == IMPLVER_EV6 ? &xor_block_alpha_prefetch : FASTEST)