diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-15 12:19:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-15 12:19:40 -0400 |
commit | 024b246ed24492d6c2ee14c34d742b137fce1b94 (patch) | |
tree | 428444950025503218c96b03c86f749403626dec /arch/alpha/include | |
parent | 9419fc1c957d600093baaea247fef23cca3b4e93 (diff) |
alpha: move include/asm-alpha to arch/alpha/include/asm
Sam Ravnborg did the build-test that the direct header file move works,
I'm just committing it.
This is a pure move:
mkdir arch/alpha/include
git mv include/asm-alpha arch/alpha/include/asm
with no other changes.
Requested-and-tested-by: Sam Ravnborg <sam@ravnborg.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/alpha/include')
129 files changed, 15903 insertions, 0 deletions
diff --git a/arch/alpha/include/asm/8253pit.h b/arch/alpha/include/asm/8253pit.h new file mode 100644 index 000000000000..fef5c1450e47 --- /dev/null +++ b/arch/alpha/include/asm/8253pit.h | |||
@@ -0,0 +1,10 @@ | |||
1 | /* | ||
2 | * 8253/8254 Programmable Interval Timer | ||
3 | */ | ||
4 | |||
5 | #ifndef _8253PIT_H | ||
6 | #define _8253PIT_H | ||
7 | |||
8 | #define PIT_TICK_RATE 1193180UL | ||
9 | |||
10 | #endif | ||
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild new file mode 100644 index 000000000000..b7c8f188b313 --- /dev/null +++ b/arch/alpha/include/asm/Kbuild | |||
@@ -0,0 +1,11 @@ | |||
1 | include include/asm-generic/Kbuild.asm | ||
2 | |||
3 | header-y += gentrap.h | ||
4 | header-y += regdef.h | ||
5 | header-y += pal.h | ||
6 | header-y += reg.h | ||
7 | |||
8 | unifdef-y += console.h | ||
9 | unifdef-y += fpu.h | ||
10 | unifdef-y += sysinfo.h | ||
11 | unifdef-y += compiler.h | ||
diff --git a/arch/alpha/include/asm/a.out-core.h b/arch/alpha/include/asm/a.out-core.h new file mode 100644 index 000000000000..9e33e92e524c --- /dev/null +++ b/arch/alpha/include/asm/a.out-core.h | |||
@@ -0,0 +1,80 @@ | |||
1 | /* a.out coredump register dumper | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _ASM_A_OUT_CORE_H | ||
13 | #define _ASM_A_OUT_CORE_H | ||
14 | |||
15 | #ifdef __KERNEL__ | ||
16 | |||
17 | #include <linux/user.h> | ||
18 | |||
19 | /* | ||
20 | * Fill in the user structure for an ECOFF core dump. | ||
21 | */ | ||
22 | static inline void aout_dump_thread(struct pt_regs *pt, struct user *dump) | ||
23 | { | ||
24 | /* switch stack follows right below pt_regs: */ | ||
25 | struct switch_stack * sw = ((struct switch_stack *) pt) - 1; | ||
26 | |||
27 | dump->magic = CMAGIC; | ||
28 | dump->start_code = current->mm->start_code; | ||
29 | dump->start_data = current->mm->start_data; | ||
30 | dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); | ||
31 | dump->u_tsize = ((current->mm->end_code - dump->start_code) | ||
32 | >> PAGE_SHIFT); | ||
33 | dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data) | ||
34 | >> PAGE_SHIFT); | ||
35 | dump->u_ssize = (current->mm->start_stack - dump->start_stack | ||
36 | + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
37 | |||
38 | /* | ||
39 | * We store the registers in an order/format that is | ||
40 | * compatible with DEC Unix/OSF/1 as this makes life easier | ||
41 | * for gdb. | ||
42 | */ | ||
43 | dump->regs[EF_V0] = pt->r0; | ||
44 | dump->regs[EF_T0] = pt->r1; | ||
45 | dump->regs[EF_T1] = pt->r2; | ||
46 | dump->regs[EF_T2] = pt->r3; | ||
47 | dump->regs[EF_T3] = pt->r4; | ||
48 | dump->regs[EF_T4] = pt->r5; | ||
49 | dump->regs[EF_T5] = pt->r6; | ||
50 | dump->regs[EF_T6] = pt->r7; | ||
51 | dump->regs[EF_T7] = pt->r8; | ||
52 | dump->regs[EF_S0] = sw->r9; | ||
53 | dump->regs[EF_S1] = sw->r10; | ||
54 | dump->regs[EF_S2] = sw->r11; | ||
55 | dump->regs[EF_S3] = sw->r12; | ||
56 | dump->regs[EF_S4] = sw->r13; | ||
57 | dump->regs[EF_S5] = sw->r14; | ||
58 | dump->regs[EF_S6] = sw->r15; | ||
59 | dump->regs[EF_A3] = pt->r19; | ||
60 | dump->regs[EF_A4] = pt->r20; | ||
61 | dump->regs[EF_A5] = pt->r21; | ||
62 | dump->regs[EF_T8] = pt->r22; | ||
63 | dump->regs[EF_T9] = pt->r23; | ||
64 | dump->regs[EF_T10] = pt->r24; | ||
65 | dump->regs[EF_T11] = pt->r25; | ||
66 | dump->regs[EF_RA] = pt->r26; | ||
67 | dump->regs[EF_T12] = pt->r27; | ||
68 | dump->regs[EF_AT] = pt->r28; | ||
69 | dump->regs[EF_SP] = rdusp(); | ||
70 | dump->regs[EF_PS] = pt->ps; | ||
71 | dump->regs[EF_PC] = pt->pc; | ||
72 | dump->regs[EF_GP] = pt->gp; | ||
73 | dump->regs[EF_A0] = pt->r16; | ||
74 | dump->regs[EF_A1] = pt->r17; | ||
75 | dump->regs[EF_A2] = pt->r18; | ||
76 | memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8); | ||
77 | } | ||
78 | |||
79 | #endif /* __KERNEL__ */ | ||
80 | #endif /* _ASM_A_OUT_CORE_H */ | ||
diff --git a/arch/alpha/include/asm/a.out.h b/arch/alpha/include/asm/a.out.h new file mode 100644 index 000000000000..02ce8473870a --- /dev/null +++ b/arch/alpha/include/asm/a.out.h | |||
@@ -0,0 +1,102 @@ | |||
1 | #ifndef __ALPHA_A_OUT_H__ | ||
2 | #define __ALPHA_A_OUT_H__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* | ||
7 | * OSF/1 ECOFF header structs. ECOFF files consist of: | ||
8 | * - a file header (struct filehdr), | ||
9 | * - an a.out header (struct aouthdr), | ||
10 | * - one or more section headers (struct scnhdr). | ||
11 | * The filhdr's "f_nscns" field contains the | ||
12 | * number of section headers. | ||
13 | */ | ||
14 | |||
15 | struct filehdr | ||
16 | { | ||
17 | /* OSF/1 "file" header */ | ||
18 | __u16 f_magic, f_nscns; | ||
19 | __u32 f_timdat; | ||
20 | __u64 f_symptr; | ||
21 | __u32 f_nsyms; | ||
22 | __u16 f_opthdr, f_flags; | ||
23 | }; | ||
24 | |||
25 | struct aouthdr | ||
26 | { | ||
27 | __u64 info; /* after that it looks quite normal.. */ | ||
28 | __u64 tsize; | ||
29 | __u64 dsize; | ||
30 | __u64 bsize; | ||
31 | __u64 entry; | ||
32 | __u64 text_start; /* with a few additions that actually make sense */ | ||
33 | __u64 data_start; | ||
34 | __u64 bss_start; | ||
35 | __u32 gprmask, fprmask; /* bitmask of general & floating point regs used in binary */ | ||
36 | __u64 gpvalue; | ||
37 | }; | ||
38 | |||
39 | struct scnhdr | ||
40 | { | ||
41 | char s_name[8]; | ||
42 | __u64 s_paddr; | ||
43 | __u64 s_vaddr; | ||
44 | __u64 s_size; | ||
45 | __u64 s_scnptr; | ||
46 | __u64 s_relptr; | ||
47 | __u64 s_lnnoptr; | ||
48 | __u16 s_nreloc; | ||
49 | __u16 s_nlnno; | ||
50 | __u32 s_flags; | ||
51 | }; | ||
52 | |||
53 | struct exec | ||
54 | { | ||
55 | /* OSF/1 "file" header */ | ||
56 | struct filehdr fh; | ||
57 | struct aouthdr ah; | ||
58 | }; | ||
59 | |||
60 | /* | ||
61 | * Define's so that the kernel exec code can access the a.out header | ||
62 | * fields... | ||
63 | */ | ||
64 | #define a_info ah.info | ||
65 | #define a_text ah.tsize | ||
66 | #define a_data ah.dsize | ||
67 | #define a_bss ah.bsize | ||
68 | #define a_entry ah.entry | ||
69 | #define a_textstart ah.text_start | ||
70 | #define a_datastart ah.data_start | ||
71 | #define a_bssstart ah.bss_start | ||
72 | #define a_gprmask ah.gprmask | ||
73 | #define a_fprmask ah.fprmask | ||
74 | #define a_gpvalue ah.gpvalue | ||
75 | |||
76 | #define N_TXTADDR(x) ((x).a_textstart) | ||
77 | #define N_DATADDR(x) ((x).a_datastart) | ||
78 | #define N_BSSADDR(x) ((x).a_bssstart) | ||
79 | #define N_DRSIZE(x) 0 | ||
80 | #define N_TRSIZE(x) 0 | ||
81 | #define N_SYMSIZE(x) 0 | ||
82 | |||
83 | #define AOUTHSZ sizeof(struct aouthdr) | ||
84 | #define SCNHSZ sizeof(struct scnhdr) | ||
85 | #define SCNROUND 16 | ||
86 | |||
87 | #define N_TXTOFF(x) \ | ||
88 | ((long) N_MAGIC(x) == ZMAGIC ? 0 : \ | ||
89 | (sizeof(struct exec) + (x).fh.f_nscns*SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1)) | ||
90 | |||
91 | #ifdef __KERNEL__ | ||
92 | |||
93 | /* Assume that start addresses below 4G belong to a TASO application. | ||
94 | Unfortunately, there is no proper bit in the exec header to check. | ||
95 | Worse, we have to notice the start address before swapping to use | ||
96 | /sbin/loader, which of course is _not_ a TASO application. */ | ||
97 | #define SET_AOUT_PERSONALITY(BFPM, EX) \ | ||
98 | set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000L \ | ||
99 | ? ADDR_LIMIT_32BIT : 0) | PER_OSF4)) | ||
100 | |||
101 | #endif /* __KERNEL__ */ | ||
102 | #endif /* __A_OUT_GNU_H__ */ | ||
diff --git a/arch/alpha/include/asm/agp.h b/arch/alpha/include/asm/agp.h new file mode 100644 index 000000000000..26c179135293 --- /dev/null +++ b/arch/alpha/include/asm/agp.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef AGP_H | ||
2 | #define AGP_H 1 | ||
3 | |||
4 | #include <asm/io.h> | ||
5 | |||
6 | /* dummy for now */ | ||
7 | |||
8 | #define map_page_into_agp(page) | ||
9 | #define unmap_page_from_agp(page) | ||
10 | #define flush_agp_cache() mb() | ||
11 | |||
12 | /* Convert a physical address to an address suitable for the GART. */ | ||
13 | #define phys_to_gart(x) (x) | ||
14 | #define gart_to_phys(x) (x) | ||
15 | |||
16 | /* GATT allocation. Returns/accepts GATT kernel virtual address. */ | ||
17 | #define alloc_gatt_pages(order) \ | ||
18 | ((char *)__get_free_pages(GFP_KERNEL, (order))) | ||
19 | #define free_gatt_pages(table, order) \ | ||
20 | free_pages((unsigned long)(table), (order)) | ||
21 | |||
22 | #endif | ||
diff --git a/arch/alpha/include/asm/agp_backend.h b/arch/alpha/include/asm/agp_backend.h new file mode 100644 index 000000000000..55dd44a2cea7 --- /dev/null +++ b/arch/alpha/include/asm/agp_backend.h | |||
@@ -0,0 +1,42 @@ | |||
1 | #ifndef _ALPHA_AGP_BACKEND_H | ||
2 | #define _ALPHA_AGP_BACKEND_H 1 | ||
3 | |||
4 | typedef union _alpha_agp_mode { | ||
5 | struct { | ||
6 | u32 rate : 3; | ||
7 | u32 reserved0 : 1; | ||
8 | u32 fw : 1; | ||
9 | u32 fourgb : 1; | ||
10 | u32 reserved1 : 2; | ||
11 | u32 enable : 1; | ||
12 | u32 sba : 1; | ||
13 | u32 reserved2 : 14; | ||
14 | u32 rq : 8; | ||
15 | } bits; | ||
16 | u32 lw; | ||
17 | } alpha_agp_mode; | ||
18 | |||
19 | typedef struct _alpha_agp_info { | ||
20 | struct pci_controller *hose; | ||
21 | struct { | ||
22 | dma_addr_t bus_base; | ||
23 | unsigned long size; | ||
24 | void *sysdata; | ||
25 | } aperture; | ||
26 | alpha_agp_mode capability; | ||
27 | alpha_agp_mode mode; | ||
28 | void *private; | ||
29 | struct alpha_agp_ops *ops; | ||
30 | } alpha_agp_info; | ||
31 | |||
32 | struct alpha_agp_ops { | ||
33 | int (*setup)(alpha_agp_info *); | ||
34 | void (*cleanup)(alpha_agp_info *); | ||
35 | int (*configure)(alpha_agp_info *); | ||
36 | int (*bind)(alpha_agp_info *, off_t, struct agp_memory *); | ||
37 | int (*unbind)(alpha_agp_info *, off_t, struct agp_memory *); | ||
38 | unsigned long (*translate)(alpha_agp_info *, dma_addr_t); | ||
39 | }; | ||
40 | |||
41 | |||
42 | #endif /* _ALPHA_AGP_BACKEND_H */ | ||
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h new file mode 100644 index 000000000000..ca88e54dec93 --- /dev/null +++ b/arch/alpha/include/asm/atomic.h | |||
@@ -0,0 +1,267 @@ | |||
1 | #ifndef _ALPHA_ATOMIC_H | ||
2 | #define _ALPHA_ATOMIC_H | ||
3 | |||
4 | #include <asm/barrier.h> | ||
5 | #include <asm/system.h> | ||
6 | |||
7 | /* | ||
8 | * Atomic operations that C can't guarantee us. Useful for | ||
9 | * resource counting etc... | ||
10 | * | ||
11 | * But use these as seldom as possible since they are much slower | ||
12 | * than regular operations. | ||
13 | */ | ||
14 | |||
15 | |||
16 | /* | ||
17 | * Counter is volatile to make sure gcc doesn't try to be clever | ||
18 | * and move things around on us. We need to use _exactly_ the address | ||
19 | * the user gave us, not some alias that contains the same information. | ||
20 | */ | ||
21 | typedef struct { volatile int counter; } atomic_t; | ||
22 | typedef struct { volatile long counter; } atomic64_t; | ||
23 | |||
24 | #define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) | ||
25 | #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) | ||
26 | |||
27 | #define atomic_read(v) ((v)->counter + 0) | ||
28 | #define atomic64_read(v) ((v)->counter + 0) | ||
29 | |||
30 | #define atomic_set(v,i) ((v)->counter = (i)) | ||
31 | #define atomic64_set(v,i) ((v)->counter = (i)) | ||
32 | |||
33 | /* | ||
34 | * To get proper branch prediction for the main line, we must branch | ||
35 | * forward to code at the end of this object's .text section, then | ||
36 | * branch back to restart the operation. | ||
37 | */ | ||
38 | |||
39 | static __inline__ void atomic_add(int i, atomic_t * v) | ||
40 | { | ||
41 | unsigned long temp; | ||
42 | __asm__ __volatile__( | ||
43 | "1: ldl_l %0,%1\n" | ||
44 | " addl %0,%2,%0\n" | ||
45 | " stl_c %0,%1\n" | ||
46 | " beq %0,2f\n" | ||
47 | ".subsection 2\n" | ||
48 | "2: br 1b\n" | ||
49 | ".previous" | ||
50 | :"=&r" (temp), "=m" (v->counter) | ||
51 | :"Ir" (i), "m" (v->counter)); | ||
52 | } | ||
53 | |||
54 | static __inline__ void atomic64_add(long i, atomic64_t * v) | ||
55 | { | ||
56 | unsigned long temp; | ||
57 | __asm__ __volatile__( | ||
58 | "1: ldq_l %0,%1\n" | ||
59 | " addq %0,%2,%0\n" | ||
60 | " stq_c %0,%1\n" | ||
61 | " beq %0,2f\n" | ||
62 | ".subsection 2\n" | ||
63 | "2: br 1b\n" | ||
64 | ".previous" | ||
65 | :"=&r" (temp), "=m" (v->counter) | ||
66 | :"Ir" (i), "m" (v->counter)); | ||
67 | } | ||
68 | |||
69 | static __inline__ void atomic_sub(int i, atomic_t * v) | ||
70 | { | ||
71 | unsigned long temp; | ||
72 | __asm__ __volatile__( | ||
73 | "1: ldl_l %0,%1\n" | ||
74 | " subl %0,%2,%0\n" | ||
75 | " stl_c %0,%1\n" | ||
76 | " beq %0,2f\n" | ||
77 | ".subsection 2\n" | ||
78 | "2: br 1b\n" | ||
79 | ".previous" | ||
80 | :"=&r" (temp), "=m" (v->counter) | ||
81 | :"Ir" (i), "m" (v->counter)); | ||
82 | } | ||
83 | |||
84 | static __inline__ void atomic64_sub(long i, atomic64_t * v) | ||
85 | { | ||
86 | unsigned long temp; | ||
87 | __asm__ __volatile__( | ||
88 | "1: ldq_l %0,%1\n" | ||
89 | " subq %0,%2,%0\n" | ||
90 | " stq_c %0,%1\n" | ||
91 | " beq %0,2f\n" | ||
92 | ".subsection 2\n" | ||
93 | "2: br 1b\n" | ||
94 | ".previous" | ||
95 | :"=&r" (temp), "=m" (v->counter) | ||
96 | :"Ir" (i), "m" (v->counter)); | ||
97 | } | ||
98 | |||
99 | |||
100 | /* | ||
101 | * Same as above, but return the result value | ||
102 | */ | ||
103 | static inline int atomic_add_return(int i, atomic_t *v) | ||
104 | { | ||
105 | long temp, result; | ||
106 | smp_mb(); | ||
107 | __asm__ __volatile__( | ||
108 | "1: ldl_l %0,%1\n" | ||
109 | " addl %0,%3,%2\n" | ||
110 | " addl %0,%3,%0\n" | ||
111 | " stl_c %0,%1\n" | ||
112 | " beq %0,2f\n" | ||
113 | ".subsection 2\n" | ||
114 | "2: br 1b\n" | ||
115 | ".previous" | ||
116 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | ||
117 | :"Ir" (i), "m" (v->counter) : "memory"); | ||
118 | smp_mb(); | ||
119 | return result; | ||
120 | } | ||
121 | |||
122 | static __inline__ long atomic64_add_return(long i, atomic64_t * v) | ||
123 | { | ||
124 | long temp, result; | ||
125 | smp_mb(); | ||
126 | __asm__ __volatile__( | ||
127 | "1: ldq_l %0,%1\n" | ||
128 | " addq %0,%3,%2\n" | ||
129 | " addq %0,%3,%0\n" | ||
130 | " stq_c %0,%1\n" | ||
131 | " beq %0,2f\n" | ||
132 | ".subsection 2\n" | ||
133 | "2: br 1b\n" | ||
134 | ".previous" | ||
135 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | ||
136 | :"Ir" (i), "m" (v->counter) : "memory"); | ||
137 | smp_mb(); | ||
138 | return result; | ||
139 | } | ||
140 | |||
141 | static __inline__ long atomic_sub_return(int i, atomic_t * v) | ||
142 | { | ||
143 | long temp, result; | ||
144 | smp_mb(); | ||
145 | __asm__ __volatile__( | ||
146 | "1: ldl_l %0,%1\n" | ||
147 | " subl %0,%3,%2\n" | ||
148 | " subl %0,%3,%0\n" | ||
149 | " stl_c %0,%1\n" | ||
150 | " beq %0,2f\n" | ||
151 | ".subsection 2\n" | ||
152 | "2: br 1b\n" | ||
153 | ".previous" | ||
154 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | ||
155 | :"Ir" (i), "m" (v->counter) : "memory"); | ||
156 | smp_mb(); | ||
157 | return result; | ||
158 | } | ||
159 | |||
160 | static __inline__ long atomic64_sub_return(long i, atomic64_t * v) | ||
161 | { | ||
162 | long temp, result; | ||
163 | smp_mb(); | ||
164 | __asm__ __volatile__( | ||
165 | "1: ldq_l %0,%1\n" | ||
166 | " subq %0,%3,%2\n" | ||
167 | " subq %0,%3,%0\n" | ||
168 | " stq_c %0,%1\n" | ||
169 | " beq %0,2f\n" | ||
170 | ".subsection 2\n" | ||
171 | "2: br 1b\n" | ||
172 | ".previous" | ||
173 | :"=&r" (temp), "=m" (v->counter), "=&r" (result) | ||
174 | :"Ir" (i), "m" (v->counter) : "memory"); | ||
175 | smp_mb(); | ||
176 | return result; | ||
177 | } | ||
178 | |||
179 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | ||
180 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | ||
181 | |||
182 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) | ||
183 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | ||
184 | |||
185 | /** | ||
186 | * atomic_add_unless - add unless the number is a given value | ||
187 | * @v: pointer of type atomic_t | ||
188 | * @a: the amount to add to v... | ||
189 | * @u: ...unless v is equal to u. | ||
190 | * | ||
191 | * Atomically adds @a to @v, so long as it was not @u. | ||
192 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
193 | */ | ||
194 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) | ||
195 | { | ||
196 | int c, old; | ||
197 | c = atomic_read(v); | ||
198 | for (;;) { | ||
199 | if (unlikely(c == (u))) | ||
200 | break; | ||
201 | old = atomic_cmpxchg((v), c, c + (a)); | ||
202 | if (likely(old == c)) | ||
203 | break; | ||
204 | c = old; | ||
205 | } | ||
206 | return c != (u); | ||
207 | } | ||
208 | |||
209 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
210 | |||
211 | /** | ||
212 | * atomic64_add_unless - add unless the number is a given value | ||
213 | * @v: pointer of type atomic64_t | ||
214 | * @a: the amount to add to v... | ||
215 | * @u: ...unless v is equal to u. | ||
216 | * | ||
217 | * Atomically adds @a to @v, so long as it was not @u. | ||
218 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
219 | */ | ||
220 | static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) | ||
221 | { | ||
222 | long c, old; | ||
223 | c = atomic64_read(v); | ||
224 | for (;;) { | ||
225 | if (unlikely(c == (u))) | ||
226 | break; | ||
227 | old = atomic64_cmpxchg((v), c, c + (a)); | ||
228 | if (likely(old == c)) | ||
229 | break; | ||
230 | c = old; | ||
231 | } | ||
232 | return c != (u); | ||
233 | } | ||
234 | |||
235 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
236 | |||
237 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | ||
238 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
239 | |||
240 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) | ||
241 | #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) | ||
242 | |||
243 | #define atomic_inc_return(v) atomic_add_return(1,(v)) | ||
244 | #define atomic64_inc_return(v) atomic64_add_return(1,(v)) | ||
245 | |||
246 | #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) | ||
247 | #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) | ||
248 | |||
249 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | ||
250 | #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) | ||
251 | |||
252 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | ||
253 | #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) | ||
254 | |||
255 | #define atomic_inc(v) atomic_add(1,(v)) | ||
256 | #define atomic64_inc(v) atomic64_add(1,(v)) | ||
257 | |||
258 | #define atomic_dec(v) atomic_sub(1,(v)) | ||
259 | #define atomic64_dec(v) atomic64_sub(1,(v)) | ||
260 | |||
261 | #define smp_mb__before_atomic_dec() smp_mb() | ||
262 | #define smp_mb__after_atomic_dec() smp_mb() | ||
263 | #define smp_mb__before_atomic_inc() smp_mb() | ||
264 | #define smp_mb__after_atomic_inc() smp_mb() | ||
265 | |||
266 | #include <asm-generic/atomic.h> | ||
267 | #endif /* _ALPHA_ATOMIC_H */ | ||
diff --git a/arch/alpha/include/asm/auxvec.h b/arch/alpha/include/asm/auxvec.h new file mode 100644 index 000000000000..e96fe880e310 --- /dev/null +++ b/arch/alpha/include/asm/auxvec.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #ifndef __ASM_ALPHA_AUXVEC_H | ||
2 | #define __ASM_ALPHA_AUXVEC_H | ||
3 | |||
4 | /* Reserve these numbers for any future use of a VDSO. */ | ||
5 | #if 0 | ||
6 | #define AT_SYSINFO 32 | ||
7 | #define AT_SYSINFO_EHDR 33 | ||
8 | #endif | ||
9 | |||
10 | /* More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the | ||
11 | value is -1, then the cache doesn't exist. Otherwise: | ||
12 | |||
13 | bit 0-3: Cache set-associativity; 0 means fully associative. | ||
14 | bit 4-7: Log2 of cacheline size. | ||
15 | bit 8-31: Size of the entire cache >> 8. | ||
16 | bit 32-63: Reserved. | ||
17 | */ | ||
18 | |||
19 | #define AT_L1I_CACHESHAPE 34 | ||
20 | #define AT_L1D_CACHESHAPE 35 | ||
21 | #define AT_L2_CACHESHAPE 36 | ||
22 | #define AT_L3_CACHESHAPE 37 | ||
23 | |||
24 | #endif /* __ASM_ALPHA_AUXVEC_H */ | ||
diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h new file mode 100644 index 000000000000..ac78eba909bc --- /dev/null +++ b/arch/alpha/include/asm/barrier.h | |||
@@ -0,0 +1,33 @@ | |||
1 | #ifndef __BARRIER_H | ||
2 | #define __BARRIER_H | ||
3 | |||
4 | #include <asm/compiler.h> | ||
5 | |||
6 | #define mb() \ | ||
7 | __asm__ __volatile__("mb": : :"memory") | ||
8 | |||
9 | #define rmb() \ | ||
10 | __asm__ __volatile__("mb": : :"memory") | ||
11 | |||
12 | #define wmb() \ | ||
13 | __asm__ __volatile__("wmb": : :"memory") | ||
14 | |||
15 | #define read_barrier_depends() \ | ||
16 | __asm__ __volatile__("mb": : :"memory") | ||
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | #define smp_mb() mb() | ||
20 | #define smp_rmb() rmb() | ||
21 | #define smp_wmb() wmb() | ||
22 | #define smp_read_barrier_depends() read_barrier_depends() | ||
23 | #else | ||
24 | #define smp_mb() barrier() | ||
25 | #define smp_rmb() barrier() | ||
26 | #define smp_wmb() barrier() | ||
27 | #define smp_read_barrier_depends() do { } while (0) | ||
28 | #endif | ||
29 | |||
30 | #define set_mb(var, value) \ | ||
31 | do { var = value; mb(); } while (0) | ||
32 | |||
33 | #endif /* __BARRIER_H */ | ||
diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h new file mode 100644 index 000000000000..15f3ae25c511 --- /dev/null +++ b/arch/alpha/include/asm/bitops.h | |||
@@ -0,0 +1,466 @@ | |||
1 | #ifndef _ALPHA_BITOPS_H | ||
2 | #define _ALPHA_BITOPS_H | ||
3 | |||
4 | #ifndef _LINUX_BITOPS_H | ||
5 | #error only <linux/bitops.h> can be included directly | ||
6 | #endif | ||
7 | |||
8 | #include <asm/compiler.h> | ||
9 | #include <asm/barrier.h> | ||
10 | |||
11 | /* | ||
12 | * Copyright 1994, Linus Torvalds. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * These have to be done with inline assembly: that way the bit-setting | ||
17 | * is guaranteed to be atomic. All bit operations return 0 if the bit | ||
18 | * was cleared before the operation and != 0 if it was not. | ||
19 | * | ||
20 | * To get proper branch prediction for the main line, we must branch | ||
21 | * forward to code at the end of this object's .text section, then | ||
22 | * branch back to restart the operation. | ||
23 | * | ||
24 | * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). | ||
25 | */ | ||
26 | |||
27 | static inline void | ||
28 | set_bit(unsigned long nr, volatile void * addr) | ||
29 | { | ||
30 | unsigned long temp; | ||
31 | int *m = ((int *) addr) + (nr >> 5); | ||
32 | |||
33 | __asm__ __volatile__( | ||
34 | "1: ldl_l %0,%3\n" | ||
35 | " bis %0,%2,%0\n" | ||
36 | " stl_c %0,%1\n" | ||
37 | " beq %0,2f\n" | ||
38 | ".subsection 2\n" | ||
39 | "2: br 1b\n" | ||
40 | ".previous" | ||
41 | :"=&r" (temp), "=m" (*m) | ||
42 | :"Ir" (1UL << (nr & 31)), "m" (*m)); | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * WARNING: non atomic version. | ||
47 | */ | ||
48 | static inline void | ||
49 | __set_bit(unsigned long nr, volatile void * addr) | ||
50 | { | ||
51 | int *m = ((int *) addr) + (nr >> 5); | ||
52 | |||
53 | *m |= 1 << (nr & 31); | ||
54 | } | ||
55 | |||
56 | #define smp_mb__before_clear_bit() smp_mb() | ||
57 | #define smp_mb__after_clear_bit() smp_mb() | ||
58 | |||
59 | static inline void | ||
60 | clear_bit(unsigned long nr, volatile void * addr) | ||
61 | { | ||
62 | unsigned long temp; | ||
63 | int *m = ((int *) addr) + (nr >> 5); | ||
64 | |||
65 | __asm__ __volatile__( | ||
66 | "1: ldl_l %0,%3\n" | ||
67 | " bic %0,%2,%0\n" | ||
68 | " stl_c %0,%1\n" | ||
69 | " beq %0,2f\n" | ||
70 | ".subsection 2\n" | ||
71 | "2: br 1b\n" | ||
72 | ".previous" | ||
73 | :"=&r" (temp), "=m" (*m) | ||
74 | :"Ir" (1UL << (nr & 31)), "m" (*m)); | ||
75 | } | ||
76 | |||
77 | static inline void | ||
78 | clear_bit_unlock(unsigned long nr, volatile void * addr) | ||
79 | { | ||
80 | smp_mb(); | ||
81 | clear_bit(nr, addr); | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * WARNING: non atomic version. | ||
86 | */ | ||
87 | static __inline__ void | ||
88 | __clear_bit(unsigned long nr, volatile void * addr) | ||
89 | { | ||
90 | int *m = ((int *) addr) + (nr >> 5); | ||
91 | |||
92 | *m &= ~(1 << (nr & 31)); | ||
93 | } | ||
94 | |||
95 | static inline void | ||
96 | __clear_bit_unlock(unsigned long nr, volatile void * addr) | ||
97 | { | ||
98 | smp_mb(); | ||
99 | __clear_bit(nr, addr); | ||
100 | } | ||
101 | |||
102 | static inline void | ||
103 | change_bit(unsigned long nr, volatile void * addr) | ||
104 | { | ||
105 | unsigned long temp; | ||
106 | int *m = ((int *) addr) + (nr >> 5); | ||
107 | |||
108 | __asm__ __volatile__( | ||
109 | "1: ldl_l %0,%3\n" | ||
110 | " xor %0,%2,%0\n" | ||
111 | " stl_c %0,%1\n" | ||
112 | " beq %0,2f\n" | ||
113 | ".subsection 2\n" | ||
114 | "2: br 1b\n" | ||
115 | ".previous" | ||
116 | :"=&r" (temp), "=m" (*m) | ||
117 | :"Ir" (1UL << (nr & 31)), "m" (*m)); | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * WARNING: non atomic version. | ||
122 | */ | ||
123 | static __inline__ void | ||
124 | __change_bit(unsigned long nr, volatile void * addr) | ||
125 | { | ||
126 | int *m = ((int *) addr) + (nr >> 5); | ||
127 | |||
128 | *m ^= 1 << (nr & 31); | ||
129 | } | ||
130 | |||
131 | static inline int | ||
132 | test_and_set_bit(unsigned long nr, volatile void *addr) | ||
133 | { | ||
134 | unsigned long oldbit; | ||
135 | unsigned long temp; | ||
136 | int *m = ((int *) addr) + (nr >> 5); | ||
137 | |||
138 | __asm__ __volatile__( | ||
139 | #ifdef CONFIG_SMP | ||
140 | " mb\n" | ||
141 | #endif | ||
142 | "1: ldl_l %0,%4\n" | ||
143 | " and %0,%3,%2\n" | ||
144 | " bne %2,2f\n" | ||
145 | " xor %0,%3,%0\n" | ||
146 | " stl_c %0,%1\n" | ||
147 | " beq %0,3f\n" | ||
148 | "2:\n" | ||
149 | #ifdef CONFIG_SMP | ||
150 | " mb\n" | ||
151 | #endif | ||
152 | ".subsection 2\n" | ||
153 | "3: br 1b\n" | ||
154 | ".previous" | ||
155 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | ||
156 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | ||
157 | |||
158 | return oldbit != 0; | ||
159 | } | ||
160 | |||
161 | static inline int | ||
162 | test_and_set_bit_lock(unsigned long nr, volatile void *addr) | ||
163 | { | ||
164 | unsigned long oldbit; | ||
165 | unsigned long temp; | ||
166 | int *m = ((int *) addr) + (nr >> 5); | ||
167 | |||
168 | __asm__ __volatile__( | ||
169 | "1: ldl_l %0,%4\n" | ||
170 | " and %0,%3,%2\n" | ||
171 | " bne %2,2f\n" | ||
172 | " xor %0,%3,%0\n" | ||
173 | " stl_c %0,%1\n" | ||
174 | " beq %0,3f\n" | ||
175 | "2:\n" | ||
176 | #ifdef CONFIG_SMP | ||
177 | " mb\n" | ||
178 | #endif | ||
179 | ".subsection 2\n" | ||
180 | "3: br 1b\n" | ||
181 | ".previous" | ||
182 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | ||
183 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | ||
184 | |||
185 | return oldbit != 0; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * WARNING: non atomic version. | ||
190 | */ | ||
191 | static inline int | ||
192 | __test_and_set_bit(unsigned long nr, volatile void * addr) | ||
193 | { | ||
194 | unsigned long mask = 1 << (nr & 0x1f); | ||
195 | int *m = ((int *) addr) + (nr >> 5); | ||
196 | int old = *m; | ||
197 | |||
198 | *m = old | mask; | ||
199 | return (old & mask) != 0; | ||
200 | } | ||
201 | |||
202 | static inline int | ||
203 | test_and_clear_bit(unsigned long nr, volatile void * addr) | ||
204 | { | ||
205 | unsigned long oldbit; | ||
206 | unsigned long temp; | ||
207 | int *m = ((int *) addr) + (nr >> 5); | ||
208 | |||
209 | __asm__ __volatile__( | ||
210 | #ifdef CONFIG_SMP | ||
211 | " mb\n" | ||
212 | #endif | ||
213 | "1: ldl_l %0,%4\n" | ||
214 | " and %0,%3,%2\n" | ||
215 | " beq %2,2f\n" | ||
216 | " xor %0,%3,%0\n" | ||
217 | " stl_c %0,%1\n" | ||
218 | " beq %0,3f\n" | ||
219 | "2:\n" | ||
220 | #ifdef CONFIG_SMP | ||
221 | " mb\n" | ||
222 | #endif | ||
223 | ".subsection 2\n" | ||
224 | "3: br 1b\n" | ||
225 | ".previous" | ||
226 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | ||
227 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | ||
228 | |||
229 | return oldbit != 0; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * WARNING: non atomic version. | ||
234 | */ | ||
235 | static inline int | ||
236 | __test_and_clear_bit(unsigned long nr, volatile void * addr) | ||
237 | { | ||
238 | unsigned long mask = 1 << (nr & 0x1f); | ||
239 | int *m = ((int *) addr) + (nr >> 5); | ||
240 | int old = *m; | ||
241 | |||
242 | *m = old & ~mask; | ||
243 | return (old & mask) != 0; | ||
244 | } | ||
245 | |||
246 | static inline int | ||
247 | test_and_change_bit(unsigned long nr, volatile void * addr) | ||
248 | { | ||
249 | unsigned long oldbit; | ||
250 | unsigned long temp; | ||
251 | int *m = ((int *) addr) + (nr >> 5); | ||
252 | |||
253 | __asm__ __volatile__( | ||
254 | #ifdef CONFIG_SMP | ||
255 | " mb\n" | ||
256 | #endif | ||
257 | "1: ldl_l %0,%4\n" | ||
258 | " and %0,%3,%2\n" | ||
259 | " xor %0,%3,%0\n" | ||
260 | " stl_c %0,%1\n" | ||
261 | " beq %0,3f\n" | ||
262 | #ifdef CONFIG_SMP | ||
263 | " mb\n" | ||
264 | #endif | ||
265 | ".subsection 2\n" | ||
266 | "3: br 1b\n" | ||
267 | ".previous" | ||
268 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) | ||
269 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); | ||
270 | |||
271 | return oldbit != 0; | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * WARNING: non atomic version. | ||
276 | */ | ||
277 | static __inline__ int | ||
278 | __test_and_change_bit(unsigned long nr, volatile void * addr) | ||
279 | { | ||
280 | unsigned long mask = 1 << (nr & 0x1f); | ||
281 | int *m = ((int *) addr) + (nr >> 5); | ||
282 | int old = *m; | ||
283 | |||
284 | *m = old ^ mask; | ||
285 | return (old & mask) != 0; | ||
286 | } | ||
287 | |||
288 | static inline int | ||
289 | test_bit(int nr, const volatile void * addr) | ||
290 | { | ||
291 | return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
296 | * so code should check against ~0UL first.. | ||
297 | * | ||
298 | * Do a binary search on the bits. Due to the nature of large | ||
299 | * constants on the alpha, it is worthwhile to split the search. | ||
300 | */ | ||
301 | static inline unsigned long ffz_b(unsigned long x) | ||
302 | { | ||
303 | unsigned long sum, x1, x2, x4; | ||
304 | |||
305 | x = ~x & -~x; /* set first 0 bit, clear others */ | ||
306 | x1 = x & 0xAA; | ||
307 | x2 = x & 0xCC; | ||
308 | x4 = x & 0xF0; | ||
309 | sum = x2 ? 2 : 0; | ||
310 | sum += (x4 != 0) * 4; | ||
311 | sum += (x1 != 0); | ||
312 | |||
313 | return sum; | ||
314 | } | ||
315 | |||
316 | static inline unsigned long ffz(unsigned long word) | ||
317 | { | ||
318 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | ||
319 | /* Whee. EV67 can calculate it directly. */ | ||
320 | return __kernel_cttz(~word); | ||
321 | #else | ||
322 | unsigned long bits, qofs, bofs; | ||
323 | |||
324 | bits = __kernel_cmpbge(word, ~0UL); | ||
325 | qofs = ffz_b(bits); | ||
326 | bits = __kernel_extbl(word, qofs); | ||
327 | bofs = ffz_b(bits); | ||
328 | |||
329 | return qofs*8 + bofs; | ||
330 | #endif | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * __ffs = Find First set bit in word. Undefined if no set bit exists. | ||
335 | */ | ||
336 | static inline unsigned long __ffs(unsigned long word) | ||
337 | { | ||
338 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | ||
339 | /* Whee. EV67 can calculate it directly. */ | ||
340 | return __kernel_cttz(word); | ||
341 | #else | ||
342 | unsigned long bits, qofs, bofs; | ||
343 | |||
344 | bits = __kernel_cmpbge(0, word); | ||
345 | qofs = ffz_b(bits); | ||
346 | bits = __kernel_extbl(word, qofs); | ||
347 | bofs = ffz_b(~bits); | ||
348 | |||
349 | return qofs*8 + bofs; | ||
350 | #endif | ||
351 | } | ||
352 | |||
353 | #ifdef __KERNEL__ | ||
354 | |||
355 | /* | ||
356 | * ffs: find first bit set. This is defined the same way as | ||
357 | * the libc and compiler builtin ffs routines, therefore | ||
358 | * differs in spirit from the above __ffs. | ||
359 | */ | ||
360 | |||
361 | static inline int ffs(int word) | ||
362 | { | ||
363 | int result = __ffs(word) + 1; | ||
364 | return word ? result : 0; | ||
365 | } | ||
366 | |||
367 | /* | ||
368 | * fls: find last bit set. | ||
369 | */ | ||
370 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | ||
371 | static inline int fls64(unsigned long word) | ||
372 | { | ||
373 | return 64 - __kernel_ctlz(word); | ||
374 | } | ||
375 | #else | ||
376 | extern const unsigned char __flsm1_tab[256]; | ||
377 | |||
378 | static inline int fls64(unsigned long x) | ||
379 | { | ||
380 | unsigned long t, a, r; | ||
381 | |||
382 | t = __kernel_cmpbge (x, 0x0101010101010101UL); | ||
383 | a = __flsm1_tab[t]; | ||
384 | t = __kernel_extbl (x, a); | ||
385 | r = a*8 + __flsm1_tab[t] + (x != 0); | ||
386 | |||
387 | return r; | ||
388 | } | ||
389 | #endif | ||
390 | |||
391 | static inline unsigned long __fls(unsigned long x) | ||
392 | { | ||
393 | return fls64(x) - 1; | ||
394 | } | ||
395 | |||
396 | static inline int fls(int x) | ||
397 | { | ||
398 | return fls64((unsigned int) x); | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * hweightN: returns the hamming weight (i.e. the number | ||
403 | * of bits set) of a N-bit word | ||
404 | */ | ||
405 | |||
406 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) | ||
407 | /* Whee. EV67 can calculate it directly. */ | ||
408 | static inline unsigned long hweight64(unsigned long w) | ||
409 | { | ||
410 | return __kernel_ctpop(w); | ||
411 | } | ||
412 | |||
413 | static inline unsigned int hweight32(unsigned int w) | ||
414 | { | ||
415 | return hweight64(w); | ||
416 | } | ||
417 | |||
418 | static inline unsigned int hweight16(unsigned int w) | ||
419 | { | ||
420 | return hweight64(w & 0xffff); | ||
421 | } | ||
422 | |||
423 | static inline unsigned int hweight8(unsigned int w) | ||
424 | { | ||
425 | return hweight64(w & 0xff); | ||
426 | } | ||
427 | #else | ||
428 | #include <asm-generic/bitops/hweight.h> | ||
429 | #endif | ||
430 | |||
431 | #endif /* __KERNEL__ */ | ||
432 | |||
433 | #include <asm-generic/bitops/find.h> | ||
434 | |||
435 | #ifdef __KERNEL__ | ||
436 | |||
437 | /* | ||
438 | * Every architecture must define this function. It's the fastest | ||
439 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
440 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
441 | * bits is set. | ||
442 | */ | ||
443 | static inline unsigned long | ||
444 | sched_find_first_bit(unsigned long b[3]) | ||
445 | { | ||
446 | unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; | ||
447 | unsigned long ofs; | ||
448 | |||
449 | ofs = (b1 ? 64 : 128); | ||
450 | b1 = (b1 ? b1 : b2); | ||
451 | ofs = (b0 ? 0 : ofs); | ||
452 | b0 = (b0 ? b0 : b1); | ||
453 | |||
454 | return __ffs(b0) + ofs; | ||
455 | } | ||
456 | |||
457 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
458 | |||
459 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | ||
460 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | ||
461 | |||
462 | #include <asm-generic/bitops/minix.h> | ||
463 | |||
464 | #endif /* __KERNEL__ */ | ||
465 | |||
466 | #endif /* _ALPHA_BITOPS_H */ | ||
diff --git a/arch/alpha/include/asm/bug.h b/arch/alpha/include/asm/bug.h new file mode 100644 index 000000000000..695a5ee4b5d3 --- /dev/null +++ b/arch/alpha/include/asm/bug.h | |||
@@ -0,0 +1,28 @@ | |||
1 | #ifndef _ALPHA_BUG_H | ||
2 | #define _ALPHA_BUG_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | |||
6 | #ifdef CONFIG_BUG | ||
7 | #include <asm/pal.h> | ||
8 | |||
9 | /* ??? Would be nice to use .gprel32 here, but we can't be sure that the | ||
10 | function loaded the GP, so this could fail in modules. */ | ||
11 | static inline void ATTRIB_NORET __BUG(const char *file, int line) | ||
12 | { | ||
13 | __asm__ __volatile__( | ||
14 | "call_pal %0 # bugchk\n\t" | ||
15 | ".long %1\n\t.8byte %2" | ||
16 | : : "i" (PAL_bugchk), "i"(line), "i"(file)); | ||
17 | for ( ; ; ) | ||
18 | ; | ||
19 | } | ||
20 | |||
21 | #define BUG() __BUG(__FILE__, __LINE__) | ||
22 | |||
23 | #define HAVE_ARCH_BUG | ||
24 | #endif | ||
25 | |||
26 | #include <asm-generic/bug.h> | ||
27 | |||
28 | #endif | ||
diff --git a/arch/alpha/include/asm/bugs.h b/arch/alpha/include/asm/bugs.h new file mode 100644 index 000000000000..78030d1c7e7e --- /dev/null +++ b/arch/alpha/include/asm/bugs.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * include/asm-alpha/bugs.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This is included by init/main.c to check for architecture-dependent bugs. | ||
9 | * | ||
10 | * Needs: | ||
11 | * void check_bugs(void); | ||
12 | */ | ||
13 | |||
14 | /* | ||
15 | * I don't know of any alpha bugs yet.. Nice chip | ||
16 | */ | ||
17 | |||
18 | static void check_bugs(void) | ||
19 | { | ||
20 | } | ||
diff --git a/arch/alpha/include/asm/byteorder.h b/arch/alpha/include/asm/byteorder.h new file mode 100644 index 000000000000..58e958fc7f1b --- /dev/null +++ b/arch/alpha/include/asm/byteorder.h | |||
@@ -0,0 +1,47 @@ | |||
1 | #ifndef _ALPHA_BYTEORDER_H | ||
2 | #define _ALPHA_BYTEORDER_H | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <linux/compiler.h> | ||
6 | #include <asm/compiler.h> | ||
7 | |||
8 | #ifdef __GNUC__ | ||
9 | |||
10 | static inline __attribute_const__ __u32 __arch__swab32(__u32 x) | ||
11 | { | ||
12 | /* | ||
13 | * Unfortunately, we can't use the 6 instruction sequence | ||
14 | * on ev6 since the latency of the UNPKBW is 3, which is | ||
15 | * pretty hard to hide. Just in case a future implementation | ||
16 | * has a lower latency, here's the sequence (also by Mike Burrows) | ||
17 | * | ||
18 | * UNPKBW a0, v0 v0: 00AA00BB00CC00DD | ||
19 | * SLL v0, 24, a0 a0: BB00CC00DD000000 | ||
20 | * BIS v0, a0, a0 a0: BBAACCBBDDCC00DD | ||
21 | * EXTWL a0, 6, v0 v0: 000000000000BBAA | ||
22 | * ZAP a0, 0xf3, a0 a0: 00000000DDCC0000 | ||
23 | * ADDL a0, v0, v0 v0: ssssssssDDCCBBAA | ||
24 | */ | ||
25 | |||
26 | __u64 t0, t1, t2, t3; | ||
27 | |||
28 | t0 = __kernel_inslh(x, 7); /* t0 : 0000000000AABBCC */ | ||
29 | t1 = __kernel_inswl(x, 3); /* t1 : 000000CCDD000000 */ | ||
30 | t1 |= t0; /* t1 : 000000CCDDAABBCC */ | ||
31 | t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */ | ||
32 | t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */ | ||
33 | t3 = t2 & 0x00FF00FF; /* t3 : 0000000000CC00AA */ | ||
34 | t1 = t0 + t3; /* t1 : ssssssssDDCCBBAA */ | ||
35 | |||
36 | return t1; | ||
37 | } | ||
38 | |||
39 | #define __arch__swab32 __arch__swab32 | ||
40 | |||
41 | #endif /* __GNUC__ */ | ||
42 | |||
43 | #define __BYTEORDER_HAS_U64__ | ||
44 | |||
45 | #include <linux/byteorder/little_endian.h> | ||
46 | |||
47 | #endif /* _ALPHA_BYTEORDER_H */ | ||
diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h new file mode 100644 index 000000000000..f199e69a5d0b --- /dev/null +++ b/arch/alpha/include/asm/cache.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * include/asm-alpha/cache.h | ||
3 | */ | ||
4 | #ifndef __ARCH_ALPHA_CACHE_H | ||
5 | #define __ARCH_ALPHA_CACHE_H | ||
6 | |||
7 | |||
8 | /* Bytes per L1 (data) cache line. */ | ||
9 | #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6) | ||
10 | # define L1_CACHE_BYTES 64 | ||
11 | # define L1_CACHE_SHIFT 6 | ||
12 | #else | ||
13 | /* Both EV4 and EV5 are write-through, read-allocate, | ||
14 | direct-mapped, physical. | ||
15 | */ | ||
16 | # define L1_CACHE_BYTES 32 | ||
17 | # define L1_CACHE_SHIFT 5 | ||
18 | #endif | ||
19 | |||
20 | #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) | ||
21 | #define SMP_CACHE_BYTES L1_CACHE_BYTES | ||
22 | |||
23 | #endif | ||
diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h new file mode 100644 index 000000000000..b686cc7fc44e --- /dev/null +++ b/arch/alpha/include/asm/cacheflush.h | |||
@@ -0,0 +1,74 @@ | |||
1 | #ifndef _ALPHA_CACHEFLUSH_H | ||
2 | #define _ALPHA_CACHEFLUSH_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | |||
6 | /* Caches aren't brain-dead on the Alpha. */ | ||
7 | #define flush_cache_all() do { } while (0) | ||
8 | #define flush_cache_mm(mm) do { } while (0) | ||
9 | #define flush_cache_dup_mm(mm) do { } while (0) | ||
10 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
11 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
12 | #define flush_dcache_page(page) do { } while (0) | ||
13 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
14 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
15 | #define flush_cache_vmap(start, end) do { } while (0) | ||
16 | #define flush_cache_vunmap(start, end) do { } while (0) | ||
17 | |||
18 | /* Note that the following two definitions are _highly_ dependent | ||
19 | on the contexts in which they are used in the kernel. I personally | ||
20 | think it is criminal how loosely defined these macros are. */ | ||
21 | |||
22 | /* We need to flush the kernel's icache after loading modules. The | ||
23 | only other use of this macro is in load_aout_interp which is not | ||
24 | used on Alpha. | ||
25 | |||
26 | Note that this definition should *not* be used for userspace | ||
27 | icache flushing. While functional, it is _way_ overkill. The | ||
28 | icache is tagged with ASNs and it suffices to allocate a new ASN | ||
29 | for the process. */ | ||
30 | #ifndef CONFIG_SMP | ||
31 | #define flush_icache_range(start, end) imb() | ||
32 | #else | ||
33 | #define flush_icache_range(start, end) smp_imb() | ||
34 | extern void smp_imb(void); | ||
35 | #endif | ||
36 | |||
37 | /* We need to flush the userspace icache after setting breakpoints in | ||
38 | ptrace. | ||
39 | |||
40 | Instead of indiscriminately using imb, take advantage of the fact | ||
41 | that icache entries are tagged with the ASN and load a new mm context. */ | ||
42 | /* ??? Ought to use this in arch/alpha/kernel/signal.c too. */ | ||
43 | |||
44 | #ifndef CONFIG_SMP | ||
45 | extern void __load_new_mm_context(struct mm_struct *); | ||
46 | static inline void | ||
47 | flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | ||
48 | unsigned long addr, int len) | ||
49 | { | ||
50 | if (vma->vm_flags & VM_EXEC) { | ||
51 | struct mm_struct *mm = vma->vm_mm; | ||
52 | if (current->active_mm == mm) | ||
53 | __load_new_mm_context(mm); | ||
54 | else | ||
55 | mm->context[smp_processor_id()] = 0; | ||
56 | } | ||
57 | } | ||
58 | #else | ||
59 | extern void flush_icache_user_range(struct vm_area_struct *vma, | ||
60 | struct page *page, unsigned long addr, int len); | ||
61 | #endif | ||
62 | |||
63 | /* This is used only in do_no_page and do_swap_page. */ | ||
64 | #define flush_icache_page(vma, page) \ | ||
65 | flush_icache_user_range((vma), (page), 0, 0) | ||
66 | |||
67 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | ||
68 | do { memcpy(dst, src, len); \ | ||
69 | flush_icache_user_range(vma, page, vaddr, len); \ | ||
70 | } while (0) | ||
71 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
72 | memcpy(dst, src, len) | ||
73 | |||
74 | #endif /* _ALPHA_CACHEFLUSH_H */ | ||
diff --git a/arch/alpha/include/asm/checksum.h b/arch/alpha/include/asm/checksum.h new file mode 100644 index 000000000000..d3854bbf0a9e --- /dev/null +++ b/arch/alpha/include/asm/checksum.h | |||
@@ -0,0 +1,75 @@ | |||
1 | #ifndef _ALPHA_CHECKSUM_H | ||
2 | #define _ALPHA_CHECKSUM_H | ||
3 | |||
4 | #include <linux/in6.h> | ||
5 | |||
6 | /* | ||
7 | * This is a version of ip_compute_csum() optimized for IP headers, | ||
8 | * which always checksum on 4 octet boundaries. | ||
9 | */ | ||
10 | extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); | ||
11 | |||
12 | /* | ||
13 | * computes the checksum of the TCP/UDP pseudo-header | ||
14 | * returns a 16-bit checksum, already complemented | ||
15 | */ | ||
16 | extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | ||
17 | unsigned short len, | ||
18 | unsigned short proto, | ||
19 | __wsum sum); | ||
20 | |||
21 | __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | ||
22 | unsigned short len, unsigned short proto, | ||
23 | __wsum sum); | ||
24 | |||
25 | /* | ||
26 | * computes the checksum of a memory block at buff, length len, | ||
27 | * and adds in "sum" (32-bit) | ||
28 | * | ||
29 | * returns a 32-bit number suitable for feeding into itself | ||
30 | * or csum_tcpudp_magic | ||
31 | * | ||
32 | * this function must be called with even lengths, except | ||
33 | * for the last fragment, which may be odd | ||
34 | * | ||
35 | * it's best to have buff aligned on a 32-bit boundary | ||
36 | */ | ||
37 | extern __wsum csum_partial(const void *buff, int len, __wsum sum); | ||
38 | |||
39 | /* | ||
40 | * the same as csum_partial, but copies from src while it | ||
41 | * checksums | ||
42 | * | ||
43 | * here even more important to align src and dst on a 32-bit (or even | ||
44 | * better 64-bit) boundary | ||
45 | */ | ||
46 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp); | ||
47 | |||
48 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); | ||
49 | |||
50 | |||
51 | /* | ||
52 | * this routine is used for miscellaneous IP-like checksums, mainly | ||
53 | * in icmp.c | ||
54 | */ | ||
55 | |||
56 | extern __sum16 ip_compute_csum(const void *buff, int len); | ||
57 | |||
58 | /* | ||
59 | * Fold a partial checksum without adding pseudo headers | ||
60 | */ | ||
61 | |||
62 | static inline __sum16 csum_fold(__wsum csum) | ||
63 | { | ||
64 | u32 sum = (__force u32)csum; | ||
65 | sum = (sum & 0xffff) + (sum >> 16); | ||
66 | sum = (sum & 0xffff) + (sum >> 16); | ||
67 | return (__force __sum16)~sum; | ||
68 | } | ||
69 | |||
70 | #define _HAVE_ARCH_IPV6_CSUM | ||
71 | extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | ||
72 | const struct in6_addr *daddr, | ||
73 | __u32 len, unsigned short proto, | ||
74 | __wsum sum); | ||
75 | #endif | ||
diff --git a/arch/alpha/include/asm/compiler.h b/arch/alpha/include/asm/compiler.h new file mode 100644 index 000000000000..da6bb199839c --- /dev/null +++ b/arch/alpha/include/asm/compiler.h | |||
@@ -0,0 +1,130 @@ | |||
1 | #ifndef __ALPHA_COMPILER_H | ||
2 | #define __ALPHA_COMPILER_H | ||
3 | |||
4 | /* | ||
5 | * Herein are macros we use when describing various patterns we want to GCC. | ||
6 | * In all cases we can get better schedules out of the compiler if we hide | ||
7 | * as little as possible inside inline assembly. However, we want to be | ||
8 | * able to know what we'll get out before giving up inline assembly. Thus | ||
9 | * these tests and macros. | ||
10 | */ | ||
11 | |||
12 | #if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3 | ||
13 | # define __kernel_insbl(val, shift) __builtin_alpha_insbl(val, shift) | ||
14 | # define __kernel_inswl(val, shift) __builtin_alpha_inswl(val, shift) | ||
15 | # define __kernel_insql(val, shift) __builtin_alpha_insql(val, shift) | ||
16 | # define __kernel_inslh(val, shift) __builtin_alpha_inslh(val, shift) | ||
17 | # define __kernel_extbl(val, shift) __builtin_alpha_extbl(val, shift) | ||
18 | # define __kernel_extwl(val, shift) __builtin_alpha_extwl(val, shift) | ||
19 | # define __kernel_cmpbge(a, b) __builtin_alpha_cmpbge(a, b) | ||
20 | #else | ||
21 | # define __kernel_insbl(val, shift) \ | ||
22 | ({ unsigned long __kir; \ | ||
23 | __asm__("insbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ | ||
24 | __kir; }) | ||
25 | # define __kernel_inswl(val, shift) \ | ||
26 | ({ unsigned long __kir; \ | ||
27 | __asm__("inswl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ | ||
28 | __kir; }) | ||
29 | # define __kernel_insql(val, shift) \ | ||
30 | ({ unsigned long __kir; \ | ||
31 | __asm__("insql %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ | ||
32 | __kir; }) | ||
33 | # define __kernel_inslh(val, shift) \ | ||
34 | ({ unsigned long __kir; \ | ||
35 | __asm__("inslh %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ | ||
36 | __kir; }) | ||
37 | # define __kernel_extbl(val, shift) \ | ||
38 | ({ unsigned long __kir; \ | ||
39 | __asm__("extbl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ | ||
40 | __kir; }) | ||
41 | # define __kernel_extwl(val, shift) \ | ||
42 | ({ unsigned long __kir; \ | ||
43 | __asm__("extwl %2,%1,%0" : "=r"(__kir) : "rI"(shift), "r"(val)); \ | ||
44 | __kir; }) | ||
45 | # define __kernel_cmpbge(a, b) \ | ||
46 | ({ unsigned long __kir; \ | ||
47 | __asm__("cmpbge %r2,%1,%0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \ | ||
48 | __kir; }) | ||
49 | #endif | ||
50 | |||
51 | #ifdef __alpha_cix__ | ||
52 | # if __GNUC__ == 3 && __GNUC_MINOR__ >= 4 || __GNUC__ > 3 | ||
53 | # define __kernel_cttz(x) __builtin_ctzl(x) | ||
54 | # define __kernel_ctlz(x) __builtin_clzl(x) | ||
55 | # define __kernel_ctpop(x) __builtin_popcountl(x) | ||
56 | # else | ||
57 | # define __kernel_cttz(x) \ | ||
58 | ({ unsigned long __kir; \ | ||
59 | __asm__("cttz %1,%0" : "=r"(__kir) : "r"(x)); \ | ||
60 | __kir; }) | ||
61 | # define __kernel_ctlz(x) \ | ||
62 | ({ unsigned long __kir; \ | ||
63 | __asm__("ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ | ||
64 | __kir; }) | ||
65 | # define __kernel_ctpop(x) \ | ||
66 | ({ unsigned long __kir; \ | ||
67 | __asm__("ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ | ||
68 | __kir; }) | ||
69 | # endif | ||
70 | #else | ||
71 | # define __kernel_cttz(x) \ | ||
72 | ({ unsigned long __kir; \ | ||
73 | __asm__(".arch ev67; cttz %1,%0" : "=r"(__kir) : "r"(x)); \ | ||
74 | __kir; }) | ||
75 | # define __kernel_ctlz(x) \ | ||
76 | ({ unsigned long __kir; \ | ||
77 | __asm__(".arch ev67; ctlz %1,%0" : "=r"(__kir) : "r"(x)); \ | ||
78 | __kir; }) | ||
79 | # define __kernel_ctpop(x) \ | ||
80 | ({ unsigned long __kir; \ | ||
81 | __asm__(".arch ev67; ctpop %1,%0" : "=r"(__kir) : "r"(x)); \ | ||
82 | __kir; }) | ||
83 | #endif | ||
84 | |||
85 | |||
86 | /* | ||
87 | * Beginning with EGCS 1.1, GCC defines __alpha_bwx__ when the BWX | ||
88 | * extension is enabled. Previous versions did not define anything | ||
89 | * we could test during compilation -- too bad, so sad. | ||
90 | */ | ||
91 | |||
92 | #if defined(__alpha_bwx__) | ||
93 | #define __kernel_ldbu(mem) (mem) | ||
94 | #define __kernel_ldwu(mem) (mem) | ||
95 | #define __kernel_stb(val,mem) ((mem) = (val)) | ||
96 | #define __kernel_stw(val,mem) ((mem) = (val)) | ||
97 | #else | ||
98 | #define __kernel_ldbu(mem) \ | ||
99 | ({ unsigned char __kir; \ | ||
100 | __asm__(".arch ev56; \ | ||
101 | ldbu %0,%1" : "=r"(__kir) : "m"(mem)); \ | ||
102 | __kir; }) | ||
103 | #define __kernel_ldwu(mem) \ | ||
104 | ({ unsigned short __kir; \ | ||
105 | __asm__(".arch ev56; \ | ||
106 | ldwu %0,%1" : "=r"(__kir) : "m"(mem)); \ | ||
107 | __kir; }) | ||
108 | #define __kernel_stb(val,mem) \ | ||
109 | __asm__(".arch ev56; \ | ||
110 | stb %1,%0" : "=m"(mem) : "r"(val)) | ||
111 | #define __kernel_stw(val,mem) \ | ||
112 | __asm__(".arch ev56; \ | ||
113 | stw %1,%0" : "=m"(mem) : "r"(val)) | ||
114 | #endif | ||
115 | |||
116 | #ifdef __KERNEL__ | ||
117 | /* Some idiots over in <linux/compiler.h> thought inline should imply | ||
118 | always_inline. This breaks stuff. We'll include this file whenever | ||
119 | we run into such problems. */ | ||
120 | |||
121 | #include <linux/compiler.h> | ||
122 | #undef inline | ||
123 | #undef __inline__ | ||
124 | #undef __inline | ||
125 | #undef __always_inline | ||
126 | #define __always_inline inline __attribute__((always_inline)) | ||
127 | |||
128 | #endif /* __KERNEL__ */ | ||
129 | |||
130 | #endif /* __ALPHA_COMPILER_H */ | ||
diff --git a/arch/alpha/include/asm/console.h b/arch/alpha/include/asm/console.h new file mode 100644 index 000000000000..a3ce4e62249b --- /dev/null +++ b/arch/alpha/include/asm/console.h | |||
@@ -0,0 +1,75 @@ | |||
1 | #ifndef __AXP_CONSOLE_H | ||
2 | #define __AXP_CONSOLE_H | ||
3 | |||
4 | /* | ||
5 | * Console callback routine numbers | ||
6 | */ | ||
7 | #define CCB_GETC 0x01 | ||
8 | #define CCB_PUTS 0x02 | ||
9 | #define CCB_RESET_TERM 0x03 | ||
10 | #define CCB_SET_TERM_INT 0x04 | ||
11 | #define CCB_SET_TERM_CTL 0x05 | ||
12 | #define CCB_PROCESS_KEYCODE 0x06 | ||
13 | #define CCB_OPEN_CONSOLE 0x07 | ||
14 | #define CCB_CLOSE_CONSOLE 0x08 | ||
15 | |||
16 | #define CCB_OPEN 0x10 | ||
17 | #define CCB_CLOSE 0x11 | ||
18 | #define CCB_IOCTL 0x12 | ||
19 | #define CCB_READ 0x13 | ||
20 | #define CCB_WRITE 0x14 | ||
21 | |||
22 | #define CCB_SET_ENV 0x20 | ||
23 | #define CCB_RESET_ENV 0x21 | ||
24 | #define CCB_GET_ENV 0x22 | ||
25 | #define CCB_SAVE_ENV 0x23 | ||
26 | |||
27 | #define CCB_PSWITCH 0x30 | ||
28 | #define CCB_BIOS_EMUL 0x32 | ||
29 | |||
30 | /* | ||
31 | * Environment variable numbers | ||
32 | */ | ||
33 | #define ENV_AUTO_ACTION 0x01 | ||
34 | #define ENV_BOOT_DEV 0x02 | ||
35 | #define ENV_BOOTDEF_DEV 0x03 | ||
36 | #define ENV_BOOTED_DEV 0x04 | ||
37 | #define ENV_BOOT_FILE 0x05 | ||
38 | #define ENV_BOOTED_FILE 0x06 | ||
39 | #define ENV_BOOT_OSFLAGS 0x07 | ||
40 | #define ENV_BOOTED_OSFLAGS 0x08 | ||
41 | #define ENV_BOOT_RESET 0x09 | ||
42 | #define ENV_DUMP_DEV 0x0A | ||
43 | #define ENV_ENABLE_AUDIT 0x0B | ||
44 | #define ENV_LICENSE 0x0C | ||
45 | #define ENV_CHAR_SET 0x0D | ||
46 | #define ENV_LANGUAGE 0x0E | ||
47 | #define ENV_TTY_DEV 0x0F | ||
48 | |||
49 | #ifdef __KERNEL__ | ||
50 | #ifndef __ASSEMBLY__ | ||
51 | extern long callback_puts(long unit, const char *s, long length); | ||
52 | extern long callback_getc(long unit); | ||
53 | extern long callback_open_console(void); | ||
54 | extern long callback_close_console(void); | ||
55 | extern long callback_open(const char *device, long length); | ||
56 | extern long callback_close(long unit); | ||
57 | extern long callback_read(long channel, long count, const char *buf, long lbn); | ||
58 | extern long callback_getenv(long id, const char *buf, unsigned long buf_size); | ||
59 | extern long callback_setenv(long id, const char *buf, unsigned long buf_size); | ||
60 | extern long callback_save_env(void); | ||
61 | |||
62 | extern int srm_fixup(unsigned long new_callback_addr, | ||
63 | unsigned long new_hwrpb_addr); | ||
64 | extern long srm_puts(const char *, long); | ||
65 | extern long srm_printk(const char *, ...) | ||
66 | __attribute__ ((format (printf, 1, 2))); | ||
67 | |||
68 | struct crb_struct; | ||
69 | struct hwrpb_struct; | ||
70 | extern int callback_init_done; | ||
71 | extern void * callback_init(void *); | ||
72 | #endif /* __ASSEMBLY__ */ | ||
73 | #endif /* __KERNEL__ */ | ||
74 | |||
75 | #endif /* __AXP_CONSOLE_H */ | ||
diff --git a/arch/alpha/include/asm/core_apecs.h b/arch/alpha/include/asm/core_apecs.h new file mode 100644 index 000000000000..6785ff7e02bc --- /dev/null +++ b/arch/alpha/include/asm/core_apecs.h | |||
@@ -0,0 +1,517 @@ | |||
1 | #ifndef __ALPHA_APECS__H__ | ||
2 | #define __ALPHA_APECS__H__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/compiler.h> | ||
6 | |||
7 | /* | ||
8 | * APECS is the internal name for the 2107x chipset which provides | ||
9 | * memory controller and PCI access for the 21064 chip based systems. | ||
10 | * | ||
11 | * This file is based on: | ||
12 | * | ||
13 | * DECchip 21071-AA and DECchip 21072-AA Core Logic Chipsets | ||
14 | * Data Sheet | ||
15 | * | ||
16 | * EC-N0648-72 | ||
17 | * | ||
18 | * | ||
19 | * david.rusling@reo.mts.dec.com Initial Version. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | An AVANTI *might* be an XL, and an XL has only 27 bits of ISA address | ||
25 | that get passed through the PCI<->ISA bridge chip. So we've gotta use | ||
26 | both windows to max out the physical memory we can DMA to. Sigh... | ||
27 | |||
28 | If we try a window at 0 for 1GB as a work-around, we run into conflicts | ||
29 | with ISA/PCI bus memory which can't be relocated, like VGA aperture and | ||
30 | BIOS ROMs. So we must put the windows high enough to avoid these areas. | ||
31 | |||
32 | We put window 1 at BUS 64Mb for 64Mb, mapping physical 0 to 64Mb-1, | ||
33 | and window 2 at BUS 1Gb for 1Gb, mapping physical 0 to 1Gb-1. | ||
34 | Yes, this does map 0 to 64Mb-1 twice, but only window 1 will actually | ||
35 | be used for that range (via virt_to_bus()). | ||
36 | |||
37 | Note that we actually fudge the window 1 maximum as 48Mb instead of 64Mb, | ||
38 | to keep virt_to_bus() from returning an address in the first window, for | ||
39 | a data area that goes beyond the 64Mb first DMA window. Sigh... | ||
40 | The fudge factor MUST match with <asm/dma.h> MAX_DMA_ADDRESS, but | ||
41 | we can't just use that here, because of header file looping... :-( | ||
42 | |||
43 | Window 1 will be used for all DMA from the ISA bus; yes, that does | ||
44 | limit what memory an ISA floppy or sound card or Ethernet can touch, but | ||
45 | it's also a known limitation on other platforms as well. We use the | ||
46 | same technique that is used on INTEL platforms with similar limitation: | ||
47 | set MAX_DMA_ADDRESS and clear some pages' DMAable flags during mem_init(). | ||
48 | We trust that any ISA bus device drivers will *always* ask for DMAable | ||
49 | memory explicitly via kmalloc()/get_free_pages() flags arguments. | ||
50 | |||
51 | Note that most PCI bus devices' drivers do *not* explicitly ask for | ||
52 | DMAable memory; they count on being able to DMA to any memory they | ||
53 | get from kmalloc()/get_free_pages(). They will also use window 1 for | ||
54 | any physical memory accesses below 64Mb; the rest will be handled by | ||
55 | window 2, maxing out at 1Gb of memory. I trust this is enough... :-) | ||
56 | |||
57 | We hope that the area before the first window is large enough so that | ||
58 | there will be no overlap at the top end (64Mb). We *must* locate the | ||
59 | PCI cards' memory just below window 1, so that there's still the | ||
60 | possibility of being able to access it via SPARSE space. This is | ||
61 | important for cards such as the Matrox Millennium, whose Xserver | ||
62 | wants to access memory-mapped registers in byte and short lengths. | ||
63 | |||
64 | Note that the XL is treated differently from the AVANTI, even though | ||
65 | for most other things they are identical. It didn't seem reasonable to | ||
66 | make the AVANTI support pay for the limitations of the XL. It is true, | ||
67 | however, that an XL kernel will run on an AVANTI without problems. | ||
68 | |||
69 | %%% All of this should be obviated by the ability to route | ||
70 | everything through the iommu. | ||
71 | */ | ||
72 | |||
73 | /* | ||
74 | * 21071-DA Control and Status registers. | ||
75 | * These are used for PCI memory access. | ||
76 | */ | ||
77 | #define APECS_IOC_DCSR (IDENT_ADDR + 0x1A0000000UL) | ||
78 | #define APECS_IOC_PEAR (IDENT_ADDR + 0x1A0000020UL) | ||
79 | #define APECS_IOC_SEAR (IDENT_ADDR + 0x1A0000040UL) | ||
80 | #define APECS_IOC_DR1 (IDENT_ADDR + 0x1A0000060UL) | ||
81 | #define APECS_IOC_DR2 (IDENT_ADDR + 0x1A0000080UL) | ||
82 | #define APECS_IOC_DR3 (IDENT_ADDR + 0x1A00000A0UL) | ||
83 | |||
84 | #define APECS_IOC_TB1R (IDENT_ADDR + 0x1A00000C0UL) | ||
85 | #define APECS_IOC_TB2R (IDENT_ADDR + 0x1A00000E0UL) | ||
86 | |||
87 | #define APECS_IOC_PB1R (IDENT_ADDR + 0x1A0000100UL) | ||
88 | #define APECS_IOC_PB2R (IDENT_ADDR + 0x1A0000120UL) | ||
89 | |||
90 | #define APECS_IOC_PM1R (IDENT_ADDR + 0x1A0000140UL) | ||
91 | #define APECS_IOC_PM2R (IDENT_ADDR + 0x1A0000160UL) | ||
92 | |||
93 | #define APECS_IOC_HAXR0 (IDENT_ADDR + 0x1A0000180UL) | ||
94 | #define APECS_IOC_HAXR1 (IDENT_ADDR + 0x1A00001A0UL) | ||
95 | #define APECS_IOC_HAXR2 (IDENT_ADDR + 0x1A00001C0UL) | ||
96 | |||
97 | #define APECS_IOC_PMLT (IDENT_ADDR + 0x1A00001E0UL) | ||
98 | |||
99 | #define APECS_IOC_TLBTAG0 (IDENT_ADDR + 0x1A0000200UL) | ||
100 | #define APECS_IOC_TLBTAG1 (IDENT_ADDR + 0x1A0000220UL) | ||
101 | #define APECS_IOC_TLBTAG2 (IDENT_ADDR + 0x1A0000240UL) | ||
102 | #define APECS_IOC_TLBTAG3 (IDENT_ADDR + 0x1A0000260UL) | ||
103 | #define APECS_IOC_TLBTAG4 (IDENT_ADDR + 0x1A0000280UL) | ||
104 | #define APECS_IOC_TLBTAG5 (IDENT_ADDR + 0x1A00002A0UL) | ||
105 | #define APECS_IOC_TLBTAG6 (IDENT_ADDR + 0x1A00002C0UL) | ||
106 | #define APECS_IOC_TLBTAG7 (IDENT_ADDR + 0x1A00002E0UL) | ||
107 | |||
108 | #define APECS_IOC_TLBDATA0 (IDENT_ADDR + 0x1A0000300UL) | ||
109 | #define APECS_IOC_TLBDATA1 (IDENT_ADDR + 0x1A0000320UL) | ||
110 | #define APECS_IOC_TLBDATA2 (IDENT_ADDR + 0x1A0000340UL) | ||
111 | #define APECS_IOC_TLBDATA3 (IDENT_ADDR + 0x1A0000360UL) | ||
112 | #define APECS_IOC_TLBDATA4 (IDENT_ADDR + 0x1A0000380UL) | ||
113 | #define APECS_IOC_TLBDATA5 (IDENT_ADDR + 0x1A00003A0UL) | ||
114 | #define APECS_IOC_TLBDATA6 (IDENT_ADDR + 0x1A00003C0UL) | ||
115 | #define APECS_IOC_TLBDATA7 (IDENT_ADDR + 0x1A00003E0UL) | ||
116 | |||
117 | #define APECS_IOC_TBIA (IDENT_ADDR + 0x1A0000400UL) | ||
118 | |||
119 | |||
120 | /* | ||
121 | * 21071-CA Control and Status registers. | ||
122 | * These are used to program memory timing, | ||
123 | * configure memory and initialise the B-Cache. | ||
124 | */ | ||
125 | #define APECS_MEM_GCR (IDENT_ADDR + 0x180000000UL) | ||
126 | #define APECS_MEM_EDSR (IDENT_ADDR + 0x180000040UL) | ||
127 | #define APECS_MEM_TAR (IDENT_ADDR + 0x180000060UL) | ||
128 | #define APECS_MEM_ELAR (IDENT_ADDR + 0x180000080UL) | ||
129 | #define APECS_MEM_EHAR (IDENT_ADDR + 0x1800000a0UL) | ||
130 | #define APECS_MEM_SFT_RST (IDENT_ADDR + 0x1800000c0UL) | ||
131 | #define APECS_MEM_LDxLAR (IDENT_ADDR + 0x1800000e0UL) | ||
132 | #define APECS_MEM_LDxHAR (IDENT_ADDR + 0x180000100UL) | ||
133 | #define APECS_MEM_GTR (IDENT_ADDR + 0x180000200UL) | ||
134 | #define APECS_MEM_RTR (IDENT_ADDR + 0x180000220UL) | ||
135 | #define APECS_MEM_VFPR (IDENT_ADDR + 0x180000240UL) | ||
136 | #define APECS_MEM_PDLDR (IDENT_ADDR + 0x180000260UL) | ||
137 | #define APECS_MEM_PDhDR (IDENT_ADDR + 0x180000280UL) | ||
138 | |||
139 | /* Bank x Base Address Register */ | ||
140 | #define APECS_MEM_B0BAR (IDENT_ADDR + 0x180000800UL) | ||
141 | #define APECS_MEM_B1BAR (IDENT_ADDR + 0x180000820UL) | ||
142 | #define APECS_MEM_B2BAR (IDENT_ADDR + 0x180000840UL) | ||
143 | #define APECS_MEM_B3BAR (IDENT_ADDR + 0x180000860UL) | ||
144 | #define APECS_MEM_B4BAR (IDENT_ADDR + 0x180000880UL) | ||
145 | #define APECS_MEM_B5BAR (IDENT_ADDR + 0x1800008A0UL) | ||
146 | #define APECS_MEM_B6BAR (IDENT_ADDR + 0x1800008C0UL) | ||
147 | #define APECS_MEM_B7BAR (IDENT_ADDR + 0x1800008E0UL) | ||
148 | #define APECS_MEM_B8BAR (IDENT_ADDR + 0x180000900UL) | ||
149 | |||
150 | /* Bank x Configuration Register */ | ||
151 | #define APECS_MEM_B0BCR (IDENT_ADDR + 0x180000A00UL) | ||
152 | #define APECS_MEM_B1BCR (IDENT_ADDR + 0x180000A20UL) | ||
153 | #define APECS_MEM_B2BCR (IDENT_ADDR + 0x180000A40UL) | ||
154 | #define APECS_MEM_B3BCR (IDENT_ADDR + 0x180000A60UL) | ||
155 | #define APECS_MEM_B4BCR (IDENT_ADDR + 0x180000A80UL) | ||
156 | #define APECS_MEM_B5BCR (IDENT_ADDR + 0x180000AA0UL) | ||
157 | #define APECS_MEM_B6BCR (IDENT_ADDR + 0x180000AC0UL) | ||
158 | #define APECS_MEM_B7BCR (IDENT_ADDR + 0x180000AE0UL) | ||
159 | #define APECS_MEM_B8BCR (IDENT_ADDR + 0x180000B00UL) | ||
160 | |||
161 | /* Bank x Timing Register A */ | ||
162 | #define APECS_MEM_B0TRA (IDENT_ADDR + 0x180000C00UL) | ||
163 | #define APECS_MEM_B1TRA (IDENT_ADDR + 0x180000C20UL) | ||
164 | #define APECS_MEM_B2TRA (IDENT_ADDR + 0x180000C40UL) | ||
165 | #define APECS_MEM_B3TRA (IDENT_ADDR + 0x180000C60UL) | ||
166 | #define APECS_MEM_B4TRA (IDENT_ADDR + 0x180000C80UL) | ||
167 | #define APECS_MEM_B5TRA (IDENT_ADDR + 0x180000CA0UL) | ||
168 | #define APECS_MEM_B6TRA (IDENT_ADDR + 0x180000CC0UL) | ||
169 | #define APECS_MEM_B7TRA (IDENT_ADDR + 0x180000CE0UL) | ||
170 | #define APECS_MEM_B8TRA (IDENT_ADDR + 0x180000D00UL) | ||
171 | |||
172 | /* Bank x Timing Register B */ | ||
173 | #define APECS_MEM_B0TRB (IDENT_ADDR + 0x180000E00UL) | ||
174 | #define APECS_MEM_B1TRB (IDENT_ADDR + 0x180000E20UL) | ||
175 | #define APECS_MEM_B2TRB (IDENT_ADDR + 0x180000E40UL) | ||
176 | #define APECS_MEM_B3TRB (IDENT_ADDR + 0x180000E60UL) | ||
177 | #define APECS_MEM_B4TRB (IDENT_ADDR + 0x180000E80UL) | ||
178 | #define APECS_MEM_B5TRB (IDENT_ADDR + 0x180000EA0UL) | ||
179 | #define APECS_MEM_B6TRB (IDENT_ADDR + 0x180000EC0UL) | ||
180 | #define APECS_MEM_B7TRB (IDENT_ADDR + 0x180000EE0UL) | ||
181 | #define APECS_MEM_B8TRB (IDENT_ADDR + 0x180000F00UL) | ||
182 | |||
183 | |||
184 | /* | ||
185 | * Memory spaces: | ||
186 | */ | ||
187 | #define APECS_IACK_SC (IDENT_ADDR + 0x1b0000000UL) | ||
188 | #define APECS_CONF (IDENT_ADDR + 0x1e0000000UL) | ||
189 | #define APECS_IO (IDENT_ADDR + 0x1c0000000UL) | ||
190 | #define APECS_SPARSE_MEM (IDENT_ADDR + 0x200000000UL) | ||
191 | #define APECS_DENSE_MEM (IDENT_ADDR + 0x300000000UL) | ||
192 | |||
193 | |||
194 | /* | ||
195 | * Bit definitions for I/O Controller status register 0: | ||
196 | */ | ||
197 | #define APECS_IOC_STAT0_CMD 0xf | ||
198 | #define APECS_IOC_STAT0_ERR (1<<4) | ||
199 | #define APECS_IOC_STAT0_LOST (1<<5) | ||
200 | #define APECS_IOC_STAT0_THIT (1<<6) | ||
201 | #define APECS_IOC_STAT0_TREF (1<<7) | ||
202 | #define APECS_IOC_STAT0_CODE_SHIFT 8 | ||
203 | #define APECS_IOC_STAT0_CODE_MASK 0x7 | ||
204 | #define APECS_IOC_STAT0_P_NBR_SHIFT 13 | ||
205 | #define APECS_IOC_STAT0_P_NBR_MASK 0x7ffff | ||
206 | |||
207 | #define APECS_HAE_ADDRESS APECS_IOC_HAXR1 | ||
208 | |||
209 | |||
210 | /* | ||
211 | * Data structure for handling APECS machine checks: | ||
212 | */ | ||
213 | |||
214 | struct el_apecs_mikasa_sysdata_mcheck | ||
215 | { | ||
216 | unsigned long coma_gcr; | ||
217 | unsigned long coma_edsr; | ||
218 | unsigned long coma_ter; | ||
219 | unsigned long coma_elar; | ||
220 | unsigned long coma_ehar; | ||
221 | unsigned long coma_ldlr; | ||
222 | unsigned long coma_ldhr; | ||
223 | unsigned long coma_base0; | ||
224 | unsigned long coma_base1; | ||
225 | unsigned long coma_base2; | ||
226 | unsigned long coma_base3; | ||
227 | unsigned long coma_cnfg0; | ||
228 | unsigned long coma_cnfg1; | ||
229 | unsigned long coma_cnfg2; | ||
230 | unsigned long coma_cnfg3; | ||
231 | unsigned long epic_dcsr; | ||
232 | unsigned long epic_pear; | ||
233 | unsigned long epic_sear; | ||
234 | unsigned long epic_tbr1; | ||
235 | unsigned long epic_tbr2; | ||
236 | unsigned long epic_pbr1; | ||
237 | unsigned long epic_pbr2; | ||
238 | unsigned long epic_pmr1; | ||
239 | unsigned long epic_pmr2; | ||
240 | unsigned long epic_harx1; | ||
241 | unsigned long epic_harx2; | ||
242 | unsigned long epic_pmlt; | ||
243 | unsigned long epic_tag0; | ||
244 | unsigned long epic_tag1; | ||
245 | unsigned long epic_tag2; | ||
246 | unsigned long epic_tag3; | ||
247 | unsigned long epic_tag4; | ||
248 | unsigned long epic_tag5; | ||
249 | unsigned long epic_tag6; | ||
250 | unsigned long epic_tag7; | ||
251 | unsigned long epic_data0; | ||
252 | unsigned long epic_data1; | ||
253 | unsigned long epic_data2; | ||
254 | unsigned long epic_data3; | ||
255 | unsigned long epic_data4; | ||
256 | unsigned long epic_data5; | ||
257 | unsigned long epic_data6; | ||
258 | unsigned long epic_data7; | ||
259 | |||
260 | unsigned long pceb_vid; | ||
261 | unsigned long pceb_did; | ||
262 | unsigned long pceb_revision; | ||
263 | unsigned long pceb_command; | ||
264 | unsigned long pceb_status; | ||
265 | unsigned long pceb_latency; | ||
266 | unsigned long pceb_control; | ||
267 | unsigned long pceb_arbcon; | ||
268 | unsigned long pceb_arbpri; | ||
269 | |||
270 | unsigned long esc_id; | ||
271 | unsigned long esc_revision; | ||
272 | unsigned long esc_int0; | ||
273 | unsigned long esc_int1; | ||
274 | unsigned long esc_elcr0; | ||
275 | unsigned long esc_elcr1; | ||
276 | unsigned long esc_last_eisa; | ||
277 | unsigned long esc_nmi_stat; | ||
278 | |||
279 | unsigned long pci_ir; | ||
280 | unsigned long pci_imr; | ||
281 | unsigned long svr_mgr; | ||
282 | }; | ||
283 | |||
284 | /* This for the normal APECS machines. */ | ||
285 | struct el_apecs_sysdata_mcheck | ||
286 | { | ||
287 | unsigned long coma_gcr; | ||
288 | unsigned long coma_edsr; | ||
289 | unsigned long coma_ter; | ||
290 | unsigned long coma_elar; | ||
291 | unsigned long coma_ehar; | ||
292 | unsigned long coma_ldlr; | ||
293 | unsigned long coma_ldhr; | ||
294 | unsigned long coma_base0; | ||
295 | unsigned long coma_base1; | ||
296 | unsigned long coma_base2; | ||
297 | unsigned long coma_cnfg0; | ||
298 | unsigned long coma_cnfg1; | ||
299 | unsigned long coma_cnfg2; | ||
300 | unsigned long epic_dcsr; | ||
301 | unsigned long epic_pear; | ||
302 | unsigned long epic_sear; | ||
303 | unsigned long epic_tbr1; | ||
304 | unsigned long epic_tbr2; | ||
305 | unsigned long epic_pbr1; | ||
306 | unsigned long epic_pbr2; | ||
307 | unsigned long epic_pmr1; | ||
308 | unsigned long epic_pmr2; | ||
309 | unsigned long epic_harx1; | ||
310 | unsigned long epic_harx2; | ||
311 | unsigned long epic_pmlt; | ||
312 | unsigned long epic_tag0; | ||
313 | unsigned long epic_tag1; | ||
314 | unsigned long epic_tag2; | ||
315 | unsigned long epic_tag3; | ||
316 | unsigned long epic_tag4; | ||
317 | unsigned long epic_tag5; | ||
318 | unsigned long epic_tag6; | ||
319 | unsigned long epic_tag7; | ||
320 | unsigned long epic_data0; | ||
321 | unsigned long epic_data1; | ||
322 | unsigned long epic_data2; | ||
323 | unsigned long epic_data3; | ||
324 | unsigned long epic_data4; | ||
325 | unsigned long epic_data5; | ||
326 | unsigned long epic_data6; | ||
327 | unsigned long epic_data7; | ||
328 | }; | ||
329 | |||
330 | struct el_apecs_procdata | ||
331 | { | ||
332 | unsigned long paltemp[32]; /* PAL TEMP REGS. */ | ||
333 | /* EV4-specific fields */ | ||
334 | unsigned long exc_addr; /* Address of excepting instruction. */ | ||
335 | unsigned long exc_sum; /* Summary of arithmetic traps. */ | ||
336 | unsigned long exc_mask; /* Exception mask (from exc_sum). */ | ||
337 | unsigned long iccsr; /* IBox hardware enables. */ | ||
338 | unsigned long pal_base; /* Base address for PALcode. */ | ||
339 | unsigned long hier; /* Hardware Interrupt Enable. */ | ||
340 | unsigned long hirr; /* Hardware Interrupt Request. */ | ||
341 | unsigned long csr; /* D-stream fault info. */ | ||
342 | unsigned long dc_stat; /* D-cache status (ECC/Parity Err). */ | ||
343 | unsigned long dc_addr; /* EV3 Phys Addr for ECC/DPERR. */ | ||
344 | unsigned long abox_ctl; /* ABox Control Register. */ | ||
345 | unsigned long biu_stat; /* BIU Status. */ | ||
346 | unsigned long biu_addr; /* BUI Address. */ | ||
347 | unsigned long biu_ctl; /* BIU Control. */ | ||
348 | unsigned long fill_syndrome;/* For correcting ECC errors. */ | ||
349 | unsigned long fill_addr; /* Cache block which was being read */ | ||
350 | unsigned long va; /* Effective VA of fault or miss. */ | ||
351 | unsigned long bc_tag; /* Backup Cache Tag Probe Results.*/ | ||
352 | }; | ||
353 | |||
354 | |||
355 | #ifdef __KERNEL__ | ||
356 | |||
357 | #ifndef __EXTERN_INLINE | ||
358 | #define __EXTERN_INLINE extern inline | ||
359 | #define __IO_EXTERN_INLINE | ||
360 | #endif | ||
361 | |||
362 | /* | ||
363 | * I/O functions: | ||
364 | * | ||
365 | * Unlike Jensen, the APECS machines have no concept of local | ||
366 | * I/O---everything goes over the PCI bus. | ||
367 | * | ||
368 | * There is plenty room for optimization here. In particular, | ||
369 | * the Alpha's insb/insw/extb/extw should be useful in moving | ||
370 | * data to/from the right byte-lanes. | ||
371 | */ | ||
372 | |||
373 | #define vip volatile int __force * | ||
374 | #define vuip volatile unsigned int __force * | ||
375 | #define vulp volatile unsigned long __force * | ||
376 | |||
377 | #define APECS_SET_HAE \ | ||
378 | do { \ | ||
379 | if (addr >= (1UL << 24)) { \ | ||
380 | unsigned long msb = addr & 0xf8000000; \ | ||
381 | addr -= msb; \ | ||
382 | set_hae(msb); \ | ||
383 | } \ | ||
384 | } while (0) | ||
385 | |||
386 | __EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr) | ||
387 | { | ||
388 | unsigned long addr = (unsigned long) xaddr; | ||
389 | unsigned long result, base_and_type; | ||
390 | |||
391 | if (addr >= APECS_DENSE_MEM) { | ||
392 | addr -= APECS_DENSE_MEM; | ||
393 | APECS_SET_HAE; | ||
394 | base_and_type = APECS_SPARSE_MEM + 0x00; | ||
395 | } else { | ||
396 | addr -= APECS_IO; | ||
397 | base_and_type = APECS_IO + 0x00; | ||
398 | } | ||
399 | |||
400 | result = *(vip) ((addr << 5) + base_and_type); | ||
401 | return __kernel_extbl(result, addr & 3); | ||
402 | } | ||
403 | |||
404 | __EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr) | ||
405 | { | ||
406 | unsigned long addr = (unsigned long) xaddr; | ||
407 | unsigned long w, base_and_type; | ||
408 | |||
409 | if (addr >= APECS_DENSE_MEM) { | ||
410 | addr -= APECS_DENSE_MEM; | ||
411 | APECS_SET_HAE; | ||
412 | base_and_type = APECS_SPARSE_MEM + 0x00; | ||
413 | } else { | ||
414 | addr -= APECS_IO; | ||
415 | base_and_type = APECS_IO + 0x00; | ||
416 | } | ||
417 | |||
418 | w = __kernel_insbl(b, addr & 3); | ||
419 | *(vuip) ((addr << 5) + base_and_type) = w; | ||
420 | } | ||
421 | |||
422 | __EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr) | ||
423 | { | ||
424 | unsigned long addr = (unsigned long) xaddr; | ||
425 | unsigned long result, base_and_type; | ||
426 | |||
427 | if (addr >= APECS_DENSE_MEM) { | ||
428 | addr -= APECS_DENSE_MEM; | ||
429 | APECS_SET_HAE; | ||
430 | base_and_type = APECS_SPARSE_MEM + 0x08; | ||
431 | } else { | ||
432 | addr -= APECS_IO; | ||
433 | base_and_type = APECS_IO + 0x08; | ||
434 | } | ||
435 | |||
436 | result = *(vip) ((addr << 5) + base_and_type); | ||
437 | return __kernel_extwl(result, addr & 3); | ||
438 | } | ||
439 | |||
440 | __EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr) | ||
441 | { | ||
442 | unsigned long addr = (unsigned long) xaddr; | ||
443 | unsigned long w, base_and_type; | ||
444 | |||
445 | if (addr >= APECS_DENSE_MEM) { | ||
446 | addr -= APECS_DENSE_MEM; | ||
447 | APECS_SET_HAE; | ||
448 | base_and_type = APECS_SPARSE_MEM + 0x08; | ||
449 | } else { | ||
450 | addr -= APECS_IO; | ||
451 | base_and_type = APECS_IO + 0x08; | ||
452 | } | ||
453 | |||
454 | w = __kernel_inswl(b, addr & 3); | ||
455 | *(vuip) ((addr << 5) + base_and_type) = w; | ||
456 | } | ||
457 | |||
458 | __EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr) | ||
459 | { | ||
460 | unsigned long addr = (unsigned long) xaddr; | ||
461 | if (addr < APECS_DENSE_MEM) | ||
462 | addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; | ||
463 | return *(vuip)addr; | ||
464 | } | ||
465 | |||
466 | __EXTERN_INLINE void apecs_iowrite32(u32 b, void __iomem *xaddr) | ||
467 | { | ||
468 | unsigned long addr = (unsigned long) xaddr; | ||
469 | if (addr < APECS_DENSE_MEM) | ||
470 | addr = ((addr - APECS_IO) << 5) + APECS_IO + 0x18; | ||
471 | *(vuip)addr = b; | ||
472 | } | ||
473 | |||
474 | __EXTERN_INLINE void __iomem *apecs_ioportmap(unsigned long addr) | ||
475 | { | ||
476 | return (void __iomem *)(addr + APECS_IO); | ||
477 | } | ||
478 | |||
479 | __EXTERN_INLINE void __iomem *apecs_ioremap(unsigned long addr, | ||
480 | unsigned long size) | ||
481 | { | ||
482 | return (void __iomem *)(addr + APECS_DENSE_MEM); | ||
483 | } | ||
484 | |||
485 | __EXTERN_INLINE int apecs_is_ioaddr(unsigned long addr) | ||
486 | { | ||
487 | return addr >= IDENT_ADDR + 0x180000000UL; | ||
488 | } | ||
489 | |||
490 | __EXTERN_INLINE int apecs_is_mmio(const volatile void __iomem *addr) | ||
491 | { | ||
492 | return (unsigned long)addr >= APECS_DENSE_MEM; | ||
493 | } | ||
494 | |||
495 | #undef APECS_SET_HAE | ||
496 | |||
497 | #undef vip | ||
498 | #undef vuip | ||
499 | #undef vulp | ||
500 | |||
501 | #undef __IO_PREFIX | ||
502 | #define __IO_PREFIX apecs | ||
503 | #define apecs_trivial_io_bw 0 | ||
504 | #define apecs_trivial_io_lq 0 | ||
505 | #define apecs_trivial_rw_bw 2 | ||
506 | #define apecs_trivial_rw_lq 1 | ||
507 | #define apecs_trivial_iounmap 1 | ||
508 | #include <asm/io_trivial.h> | ||
509 | |||
510 | #ifdef __IO_EXTERN_INLINE | ||
511 | #undef __EXTERN_INLINE | ||
512 | #undef __IO_EXTERN_INLINE | ||
513 | #endif | ||
514 | |||
515 | #endif /* __KERNEL__ */ | ||
516 | |||
517 | #endif /* __ALPHA_APECS__H__ */ | ||
diff --git a/arch/alpha/include/asm/core_cia.h b/arch/alpha/include/asm/core_cia.h new file mode 100644 index 000000000000..9e0516c0ca27 --- /dev/null +++ b/arch/alpha/include/asm/core_cia.h | |||
@@ -0,0 +1,500 @@ | |||
1 | #ifndef __ALPHA_CIA__H__ | ||
2 | #define __ALPHA_CIA__H__ | ||
3 | |||
4 | /* Define to experiment with fitting everything into one 512MB HAE window. */ | ||
5 | #define CIA_ONE_HAE_WINDOW 1 | ||
6 | |||
7 | #include <linux/types.h> | ||
8 | #include <asm/compiler.h> | ||
9 | |||
10 | /* | ||
11 | * CIA is the internal name for the 21171 chipset which provides | ||
12 | * memory controller and PCI access for the 21164 chip based systems. | ||
13 | * Also supported here is the 21172 (CIA-2) and 21174 (PYXIS). | ||
14 | * | ||
15 | * The lineage is a bit confused, since the 21174 was reportedly started | ||
16 | * from the 21171 Pass 1 mask, and so is missing bug fixes that appear | ||
17 | * in 21171 Pass 2 and 21172, but it also contains additional features. | ||
18 | * | ||
19 | * This file is based on: | ||
20 | * | ||
21 | * DECchip 21171 Core Logic Chipset | ||
22 | * Technical Reference Manual | ||
23 | * | ||
24 | * EC-QE18B-TE | ||
25 | * | ||
26 | * david.rusling@reo.mts.dec.com Initial Version. | ||
27 | * | ||
28 | */ | ||
29 | |||
30 | /* | ||
31 | * CIA ADDRESS BIT DEFINITIONS | ||
32 | * | ||
33 | * 3333 3333 3322 2222 2222 1111 1111 11 | ||
34 | * 9876 5432 1098 7654 3210 9876 5432 1098 7654 3210 | ||
35 | * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- | ||
36 | * 1 000 | ||
37 | * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- | ||
38 | * | |\| | ||
39 | * | Byte Enable --+ | | ||
40 | * | Transfer Length --+ | ||
41 | * +-- IO space, not cached | ||
42 | * | ||
43 | * Byte Transfer | ||
44 | * Enable Length Transfer Byte Address | ||
45 | * adr<6:5> adr<4:3> Length Enable Adder | ||
46 | * --------------------------------------------- | ||
47 | * 00 00 Byte 1110 0x000 | ||
48 | * 01 00 Byte 1101 0x020 | ||
49 | * 10 00 Byte 1011 0x040 | ||
50 | * 11 00 Byte 0111 0x060 | ||
51 | * | ||
52 | * 00 01 Word 1100 0x008 | ||
53 | * 01 01 Word 1001 0x028 <= Not supported in this code. | ||
54 | * 10 01 Word 0011 0x048 | ||
55 | * | ||
56 | * 00 10 Tribyte 1000 0x010 | ||
57 | * 01 10 Tribyte 0001 0x030 | ||
58 | * | ||
59 | * 10 11 Longword 0000 0x058 | ||
60 | * | ||
61 | * Note that byte enables are asserted low. | ||
62 | * | ||
63 | */ | ||
64 | |||
65 | #define CIA_MEM_R1_MASK 0x1fffffff /* SPARSE Mem region 1 mask is 29 bits */ | ||
66 | #define CIA_MEM_R2_MASK 0x07ffffff /* SPARSE Mem region 2 mask is 27 bits */ | ||
67 | #define CIA_MEM_R3_MASK 0x03ffffff /* SPARSE Mem region 3 mask is 26 bits */ | ||
68 | |||
69 | /* | ||
70 | * 21171-CA Control and Status Registers | ||
71 | */ | ||
72 | #define CIA_IOC_CIA_REV (IDENT_ADDR + 0x8740000080UL) | ||
73 | # define CIA_REV_MASK 0xff | ||
74 | #define CIA_IOC_PCI_LAT (IDENT_ADDR + 0x87400000C0UL) | ||
75 | #define CIA_IOC_CIA_CTRL (IDENT_ADDR + 0x8740000100UL) | ||
76 | # define CIA_CTRL_PCI_EN (1 << 0) | ||
77 | # define CIA_CTRL_PCI_LOCK_EN (1 << 1) | ||
78 | # define CIA_CTRL_PCI_LOOP_EN (1 << 2) | ||
79 | # define CIA_CTRL_FST_BB_EN (1 << 3) | ||
80 | # define CIA_CTRL_PCI_MST_EN (1 << 4) | ||
81 | # define CIA_CTRL_PCI_MEM_EN (1 << 5) | ||
82 | # define CIA_CTRL_PCI_REQ64_EN (1 << 6) | ||
83 | # define CIA_CTRL_PCI_ACK64_EN (1 << 7) | ||
84 | # define CIA_CTRL_ADDR_PE_EN (1 << 8) | ||
85 | # define CIA_CTRL_PERR_EN (1 << 9) | ||
86 | # define CIA_CTRL_FILL_ERR_EN (1 << 10) | ||
87 | # define CIA_CTRL_MCHK_ERR_EN (1 << 11) | ||
88 | # define CIA_CTRL_ECC_CHK_EN (1 << 12) | ||
89 | # define CIA_CTRL_ASSERT_IDLE_BC (1 << 13) | ||
90 | # define CIA_CTRL_COM_IDLE_BC (1 << 14) | ||
91 | # define CIA_CTRL_CSR_IOA_BYPASS (1 << 15) | ||
92 | # define CIA_CTRL_IO_FLUSHREQ_EN (1 << 16) | ||
93 | # define CIA_CTRL_CPU_FLUSHREQ_EN (1 << 17) | ||
94 | # define CIA_CTRL_ARB_CPU_EN (1 << 18) | ||
95 | # define CIA_CTRL_EN_ARB_LINK (1 << 19) | ||
96 | # define CIA_CTRL_RD_TYPE_SHIFT 20 | ||
97 | # define CIA_CTRL_RL_TYPE_SHIFT 24 | ||
98 | # define CIA_CTRL_RM_TYPE_SHIFT 28 | ||
99 | # define CIA_CTRL_EN_DMA_RD_PERF (1 << 31) | ||
100 | #define CIA_IOC_CIA_CNFG (IDENT_ADDR + 0x8740000140UL) | ||
101 | # define CIA_CNFG_IOA_BWEN (1 << 0) | ||
102 | # define CIA_CNFG_PCI_MWEN (1 << 4) | ||
103 | # define CIA_CNFG_PCI_DWEN (1 << 5) | ||
104 | # define CIA_CNFG_PCI_WLEN (1 << 8) | ||
105 | #define CIA_IOC_FLASH_CTRL (IDENT_ADDR + 0x8740000200UL) | ||
106 | #define CIA_IOC_HAE_MEM (IDENT_ADDR + 0x8740000400UL) | ||
107 | #define CIA_IOC_HAE_IO (IDENT_ADDR + 0x8740000440UL) | ||
108 | #define CIA_IOC_CFG (IDENT_ADDR + 0x8740000480UL) | ||
109 | #define CIA_IOC_CACK_EN (IDENT_ADDR + 0x8740000600UL) | ||
110 | # define CIA_CACK_EN_LOCK_EN (1 << 0) | ||
111 | # define CIA_CACK_EN_MB_EN (1 << 1) | ||
112 | # define CIA_CACK_EN_SET_DIRTY_EN (1 << 2) | ||
113 | # define CIA_CACK_EN_BC_VICTIM_EN (1 << 3) | ||
114 | |||
115 | |||
116 | /* | ||
117 | * 21171-CA Diagnostic Registers | ||
118 | */ | ||
119 | #define CIA_IOC_CIA_DIAG (IDENT_ADDR + 0x8740002000UL) | ||
120 | #define CIA_IOC_DIAG_CHECK (IDENT_ADDR + 0x8740003000UL) | ||
121 | |||
122 | /* | ||
123 | * 21171-CA Performance Monitor registers | ||
124 | */ | ||
125 | #define CIA_IOC_PERF_MONITOR (IDENT_ADDR + 0x8740004000UL) | ||
126 | #define CIA_IOC_PERF_CONTROL (IDENT_ADDR + 0x8740004040UL) | ||
127 | |||
128 | /* | ||
129 | * 21171-CA Error registers | ||
130 | */ | ||
131 | #define CIA_IOC_CPU_ERR0 (IDENT_ADDR + 0x8740008000UL) | ||
132 | #define CIA_IOC_CPU_ERR1 (IDENT_ADDR + 0x8740008040UL) | ||
133 | #define CIA_IOC_CIA_ERR (IDENT_ADDR + 0x8740008200UL) | ||
134 | # define CIA_ERR_COR_ERR (1 << 0) | ||
135 | # define CIA_ERR_UN_COR_ERR (1 << 1) | ||
136 | # define CIA_ERR_CPU_PE (1 << 2) | ||
137 | # define CIA_ERR_MEM_NEM (1 << 3) | ||
138 | # define CIA_ERR_PCI_SERR (1 << 4) | ||
139 | # define CIA_ERR_PERR (1 << 5) | ||
140 | # define CIA_ERR_PCI_ADDR_PE (1 << 6) | ||
141 | # define CIA_ERR_RCVD_MAS_ABT (1 << 7) | ||
142 | # define CIA_ERR_RCVD_TAR_ABT (1 << 8) | ||
143 | # define CIA_ERR_PA_PTE_INV (1 << 9) | ||
144 | # define CIA_ERR_FROM_WRT_ERR (1 << 10) | ||
145 | # define CIA_ERR_IOA_TIMEOUT (1 << 11) | ||
146 | # define CIA_ERR_LOST_CORR_ERR (1 << 16) | ||
147 | # define CIA_ERR_LOST_UN_CORR_ERR (1 << 17) | ||
148 | # define CIA_ERR_LOST_CPU_PE (1 << 18) | ||
149 | # define CIA_ERR_LOST_MEM_NEM (1 << 19) | ||
150 | # define CIA_ERR_LOST_PERR (1 << 21) | ||
151 | # define CIA_ERR_LOST_PCI_ADDR_PE (1 << 22) | ||
152 | # define CIA_ERR_LOST_RCVD_MAS_ABT (1 << 23) | ||
153 | # define CIA_ERR_LOST_RCVD_TAR_ABT (1 << 24) | ||
154 | # define CIA_ERR_LOST_PA_PTE_INV (1 << 25) | ||
155 | # define CIA_ERR_LOST_FROM_WRT_ERR (1 << 26) | ||
156 | # define CIA_ERR_LOST_IOA_TIMEOUT (1 << 27) | ||
157 | # define CIA_ERR_VALID (1 << 31) | ||
158 | #define CIA_IOC_CIA_STAT (IDENT_ADDR + 0x8740008240UL) | ||
159 | #define CIA_IOC_ERR_MASK (IDENT_ADDR + 0x8740008280UL) | ||
160 | #define CIA_IOC_CIA_SYN (IDENT_ADDR + 0x8740008300UL) | ||
161 | #define CIA_IOC_MEM_ERR0 (IDENT_ADDR + 0x8740008400UL) | ||
162 | #define CIA_IOC_MEM_ERR1 (IDENT_ADDR + 0x8740008440UL) | ||
163 | #define CIA_IOC_PCI_ERR0 (IDENT_ADDR + 0x8740008800UL) | ||
164 | #define CIA_IOC_PCI_ERR1 (IDENT_ADDR + 0x8740008840UL) | ||
165 | #define CIA_IOC_PCI_ERR3 (IDENT_ADDR + 0x8740008880UL) | ||
166 | |||
167 | /* | ||
168 | * 21171-CA System configuration registers | ||
169 | */ | ||
170 | #define CIA_IOC_MCR (IDENT_ADDR + 0x8750000000UL) | ||
171 | #define CIA_IOC_MBA0 (IDENT_ADDR + 0x8750000600UL) | ||
172 | #define CIA_IOC_MBA2 (IDENT_ADDR + 0x8750000680UL) | ||
173 | #define CIA_IOC_MBA4 (IDENT_ADDR + 0x8750000700UL) | ||
174 | #define CIA_IOC_MBA6 (IDENT_ADDR + 0x8750000780UL) | ||
175 | #define CIA_IOC_MBA8 (IDENT_ADDR + 0x8750000800UL) | ||
176 | #define CIA_IOC_MBAA (IDENT_ADDR + 0x8750000880UL) | ||
177 | #define CIA_IOC_MBAC (IDENT_ADDR + 0x8750000900UL) | ||
178 | #define CIA_IOC_MBAE (IDENT_ADDR + 0x8750000980UL) | ||
179 | #define CIA_IOC_TMG0 (IDENT_ADDR + 0x8750000B00UL) | ||
180 | #define CIA_IOC_TMG1 (IDENT_ADDR + 0x8750000B40UL) | ||
181 | #define CIA_IOC_TMG2 (IDENT_ADDR + 0x8750000B80UL) | ||
182 | |||
183 | /* | ||
184 | * 2117A-CA PCI Address and Scatter-Gather Registers. | ||
185 | */ | ||
186 | #define CIA_IOC_PCI_TBIA (IDENT_ADDR + 0x8760000100UL) | ||
187 | |||
188 | #define CIA_IOC_PCI_W0_BASE (IDENT_ADDR + 0x8760000400UL) | ||
189 | #define CIA_IOC_PCI_W0_MASK (IDENT_ADDR + 0x8760000440UL) | ||
190 | #define CIA_IOC_PCI_T0_BASE (IDENT_ADDR + 0x8760000480UL) | ||
191 | |||
192 | #define CIA_IOC_PCI_W1_BASE (IDENT_ADDR + 0x8760000500UL) | ||
193 | #define CIA_IOC_PCI_W1_MASK (IDENT_ADDR + 0x8760000540UL) | ||
194 | #define CIA_IOC_PCI_T1_BASE (IDENT_ADDR + 0x8760000580UL) | ||
195 | |||
196 | #define CIA_IOC_PCI_W2_BASE (IDENT_ADDR + 0x8760000600UL) | ||
197 | #define CIA_IOC_PCI_W2_MASK (IDENT_ADDR + 0x8760000640UL) | ||
198 | #define CIA_IOC_PCI_T2_BASE (IDENT_ADDR + 0x8760000680UL) | ||
199 | |||
200 | #define CIA_IOC_PCI_W3_BASE (IDENT_ADDR + 0x8760000700UL) | ||
201 | #define CIA_IOC_PCI_W3_MASK (IDENT_ADDR + 0x8760000740UL) | ||
202 | #define CIA_IOC_PCI_T3_BASE (IDENT_ADDR + 0x8760000780UL) | ||
203 | |||
204 | #define CIA_IOC_PCI_Wn_BASE(N) (IDENT_ADDR + 0x8760000400UL + (N)*0x100) | ||
205 | #define CIA_IOC_PCI_Wn_MASK(N) (IDENT_ADDR + 0x8760000440UL + (N)*0x100) | ||
206 | #define CIA_IOC_PCI_Tn_BASE(N) (IDENT_ADDR + 0x8760000480UL + (N)*0x100) | ||
207 | |||
208 | #define CIA_IOC_PCI_W_DAC (IDENT_ADDR + 0x87600007C0UL) | ||
209 | |||
210 | /* | ||
211 | * 2117A-CA Address Translation Registers. | ||
212 | */ | ||
213 | |||
214 | /* 8 tag registers, the first 4 of which are lockable. */ | ||
215 | #define CIA_IOC_TB_TAGn(n) \ | ||
216 | (IDENT_ADDR + 0x8760000800UL + (n)*0x40) | ||
217 | |||
218 | /* 4 page registers per tag register. */ | ||
219 | #define CIA_IOC_TBn_PAGEm(n,m) \ | ||
220 | (IDENT_ADDR + 0x8760001000UL + (n)*0x100 + (m)*0x40) | ||
221 | |||
222 | /* | ||
223 | * Memory spaces: | ||
224 | */ | ||
225 | #define CIA_IACK_SC (IDENT_ADDR + 0x8720000000UL) | ||
226 | #define CIA_CONF (IDENT_ADDR + 0x8700000000UL) | ||
227 | #define CIA_IO (IDENT_ADDR + 0x8580000000UL) | ||
228 | #define CIA_SPARSE_MEM (IDENT_ADDR + 0x8000000000UL) | ||
229 | #define CIA_SPARSE_MEM_R2 (IDENT_ADDR + 0x8400000000UL) | ||
230 | #define CIA_SPARSE_MEM_R3 (IDENT_ADDR + 0x8500000000UL) | ||
231 | #define CIA_DENSE_MEM (IDENT_ADDR + 0x8600000000UL) | ||
232 | #define CIA_BW_MEM (IDENT_ADDR + 0x8800000000UL) | ||
233 | #define CIA_BW_IO (IDENT_ADDR + 0x8900000000UL) | ||
234 | #define CIA_BW_CFG_0 (IDENT_ADDR + 0x8a00000000UL) | ||
235 | #define CIA_BW_CFG_1 (IDENT_ADDR + 0x8b00000000UL) | ||
236 | |||
237 | /* | ||
238 | * ALCOR's GRU ASIC registers | ||
239 | */ | ||
240 | #define GRU_INT_REQ (IDENT_ADDR + 0x8780000000UL) | ||
241 | #define GRU_INT_MASK (IDENT_ADDR + 0x8780000040UL) | ||
242 | #define GRU_INT_EDGE (IDENT_ADDR + 0x8780000080UL) | ||
243 | #define GRU_INT_HILO (IDENT_ADDR + 0x87800000C0UL) | ||
244 | #define GRU_INT_CLEAR (IDENT_ADDR + 0x8780000100UL) | ||
245 | |||
246 | #define GRU_CACHE_CNFG (IDENT_ADDR + 0x8780000200UL) | ||
247 | #define GRU_SCR (IDENT_ADDR + 0x8780000300UL) | ||
248 | #define GRU_LED (IDENT_ADDR + 0x8780000800UL) | ||
249 | #define GRU_RESET (IDENT_ADDR + 0x8780000900UL) | ||
250 | |||
251 | #define ALCOR_GRU_INT_REQ_BITS 0x800fffffUL | ||
252 | #define XLT_GRU_INT_REQ_BITS 0x80003fffUL | ||
253 | #define GRU_INT_REQ_BITS (alpha_mv.sys.cia.gru_int_req_bits+0) | ||
254 | |||
255 | /* | ||
256 | * PYXIS interrupt control registers | ||
257 | */ | ||
258 | #define PYXIS_INT_REQ (IDENT_ADDR + 0x87A0000000UL) | ||
259 | #define PYXIS_INT_MASK (IDENT_ADDR + 0x87A0000040UL) | ||
260 | #define PYXIS_INT_HILO (IDENT_ADDR + 0x87A00000C0UL) | ||
261 | #define PYXIS_INT_ROUTE (IDENT_ADDR + 0x87A0000140UL) | ||
262 | #define PYXIS_GPO (IDENT_ADDR + 0x87A0000180UL) | ||
263 | #define PYXIS_INT_CNFG (IDENT_ADDR + 0x87A00001C0UL) | ||
264 | #define PYXIS_RT_COUNT (IDENT_ADDR + 0x87A0000200UL) | ||
265 | #define PYXIS_INT_TIME (IDENT_ADDR + 0x87A0000240UL) | ||
266 | #define PYXIS_IIC_CTRL (IDENT_ADDR + 0x87A00002C0UL) | ||
267 | #define PYXIS_RESET (IDENT_ADDR + 0x8780000900UL) | ||
268 | |||
269 | /* Offset between ram physical addresses and pci64 DAC bus addresses. */ | ||
270 | #define PYXIS_DAC_OFFSET (1UL << 40) | ||
271 | |||
272 | /* | ||
273 | * Data structure for handling CIA machine checks. | ||
274 | */ | ||
275 | |||
276 | /* System-specific info. */ | ||
277 | struct el_CIA_sysdata_mcheck { | ||
278 | unsigned long cpu_err0; | ||
279 | unsigned long cpu_err1; | ||
280 | unsigned long cia_err; | ||
281 | unsigned long cia_stat; | ||
282 | unsigned long err_mask; | ||
283 | unsigned long cia_syn; | ||
284 | unsigned long mem_err0; | ||
285 | unsigned long mem_err1; | ||
286 | unsigned long pci_err0; | ||
287 | unsigned long pci_err1; | ||
288 | unsigned long pci_err2; | ||
289 | }; | ||
290 | |||
291 | |||
292 | #ifdef __KERNEL__ | ||
293 | |||
294 | #ifndef __EXTERN_INLINE | ||
295 | /* Do not touch, this should *NOT* be static inline */ | ||
296 | #define __EXTERN_INLINE extern inline | ||
297 | #define __IO_EXTERN_INLINE | ||
298 | #endif | ||
299 | |||
300 | /* | ||
301 | * I/O functions: | ||
302 | * | ||
303 | * CIA (the 2117x PCI/memory support chipset for the EV5 (21164) | ||
304 | * series of processors uses a sparse address mapping scheme to | ||
305 | * get at PCI memory and I/O. | ||
306 | */ | ||
307 | |||
308 | /* | ||
309 | * Memory functions. 64-bit and 32-bit accesses are done through | ||
310 | * dense memory space, everything else through sparse space. | ||
311 | * | ||
312 | * For reading and writing 8 and 16 bit quantities we need to | ||
313 | * go through one of the three sparse address mapping regions | ||
314 | * and use the HAE_MEM CSR to provide some bits of the address. | ||
315 | * The following few routines use only sparse address region 1 | ||
316 | * which gives 1Gbyte of accessible space which relates exactly | ||
317 | * to the amount of PCI memory mapping *into* system address space. | ||
318 | * See p 6-17 of the specification but it looks something like this: | ||
319 | * | ||
320 | * 21164 Address: | ||
321 | * | ||
322 | * 3 2 1 | ||
323 | * 9876543210987654321098765432109876543210 | ||
324 | * 1ZZZZ0.PCI.QW.Address............BBLL | ||
325 | * | ||
326 | * ZZ = SBZ | ||
327 | * BB = Byte offset | ||
328 | * LL = Transfer length | ||
329 | * | ||
330 | * PCI Address: | ||
331 | * | ||
332 | * 3 2 1 | ||
333 | * 10987654321098765432109876543210 | ||
334 | * HHH....PCI.QW.Address........ 00 | ||
335 | * | ||
336 | * HHH = 31:29 HAE_MEM CSR | ||
337 | * | ||
338 | */ | ||
339 | |||
340 | #define vip volatile int __force * | ||
341 | #define vuip volatile unsigned int __force * | ||
342 | #define vulp volatile unsigned long __force * | ||
343 | |||
344 | __EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr) | ||
345 | { | ||
346 | unsigned long addr = (unsigned long) xaddr; | ||
347 | unsigned long result, base_and_type; | ||
348 | |||
349 | if (addr >= CIA_DENSE_MEM) | ||
350 | base_and_type = CIA_SPARSE_MEM + 0x00; | ||
351 | else | ||
352 | base_and_type = CIA_IO + 0x00; | ||
353 | |||
354 | /* We can use CIA_MEM_R1_MASK for io ports too, since it is large | ||
355 | enough to cover all io ports, and smaller than CIA_IO. */ | ||
356 | addr &= CIA_MEM_R1_MASK; | ||
357 | result = *(vip) ((addr << 5) + base_and_type); | ||
358 | return __kernel_extbl(result, addr & 3); | ||
359 | } | ||
360 | |||
361 | __EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr) | ||
362 | { | ||
363 | unsigned long addr = (unsigned long) xaddr; | ||
364 | unsigned long w, base_and_type; | ||
365 | |||
366 | if (addr >= CIA_DENSE_MEM) | ||
367 | base_and_type = CIA_SPARSE_MEM + 0x00; | ||
368 | else | ||
369 | base_and_type = CIA_IO + 0x00; | ||
370 | |||
371 | addr &= CIA_MEM_R1_MASK; | ||
372 | w = __kernel_insbl(b, addr & 3); | ||
373 | *(vuip) ((addr << 5) + base_and_type) = w; | ||
374 | } | ||
375 | |||
376 | __EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr) | ||
377 | { | ||
378 | unsigned long addr = (unsigned long) xaddr; | ||
379 | unsigned long result, base_and_type; | ||
380 | |||
381 | if (addr >= CIA_DENSE_MEM) | ||
382 | base_and_type = CIA_SPARSE_MEM + 0x08; | ||
383 | else | ||
384 | base_and_type = CIA_IO + 0x08; | ||
385 | |||
386 | addr &= CIA_MEM_R1_MASK; | ||
387 | result = *(vip) ((addr << 5) + base_and_type); | ||
388 | return __kernel_extwl(result, addr & 3); | ||
389 | } | ||
390 | |||
391 | __EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr) | ||
392 | { | ||
393 | unsigned long addr = (unsigned long) xaddr; | ||
394 | unsigned long w, base_and_type; | ||
395 | |||
396 | if (addr >= CIA_DENSE_MEM) | ||
397 | base_and_type = CIA_SPARSE_MEM + 0x08; | ||
398 | else | ||
399 | base_and_type = CIA_IO + 0x08; | ||
400 | |||
401 | addr &= CIA_MEM_R1_MASK; | ||
402 | w = __kernel_inswl(b, addr & 3); | ||
403 | *(vuip) ((addr << 5) + base_and_type) = w; | ||
404 | } | ||
405 | |||
406 | __EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr) | ||
407 | { | ||
408 | unsigned long addr = (unsigned long) xaddr; | ||
409 | if (addr < CIA_DENSE_MEM) | ||
410 | addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; | ||
411 | return *(vuip)addr; | ||
412 | } | ||
413 | |||
414 | __EXTERN_INLINE void cia_iowrite32(u32 b, void __iomem *xaddr) | ||
415 | { | ||
416 | unsigned long addr = (unsigned long) xaddr; | ||
417 | if (addr < CIA_DENSE_MEM) | ||
418 | addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18; | ||
419 | *(vuip)addr = b; | ||
420 | } | ||
421 | |||
422 | __EXTERN_INLINE void __iomem *cia_ioportmap(unsigned long addr) | ||
423 | { | ||
424 | return (void __iomem *)(addr + CIA_IO); | ||
425 | } | ||
426 | |||
427 | __EXTERN_INLINE void __iomem *cia_ioremap(unsigned long addr, | ||
428 | unsigned long size) | ||
429 | { | ||
430 | return (void __iomem *)(addr + CIA_DENSE_MEM); | ||
431 | } | ||
432 | |||
433 | __EXTERN_INLINE int cia_is_ioaddr(unsigned long addr) | ||
434 | { | ||
435 | return addr >= IDENT_ADDR + 0x8000000000UL; | ||
436 | } | ||
437 | |||
438 | __EXTERN_INLINE int cia_is_mmio(const volatile void __iomem *addr) | ||
439 | { | ||
440 | return (unsigned long)addr >= CIA_DENSE_MEM; | ||
441 | } | ||
442 | |||
443 | __EXTERN_INLINE void __iomem *cia_bwx_ioportmap(unsigned long addr) | ||
444 | { | ||
445 | return (void __iomem *)(addr + CIA_BW_IO); | ||
446 | } | ||
447 | |||
448 | __EXTERN_INLINE void __iomem *cia_bwx_ioremap(unsigned long addr, | ||
449 | unsigned long size) | ||
450 | { | ||
451 | return (void __iomem *)(addr + CIA_BW_MEM); | ||
452 | } | ||
453 | |||
454 | __EXTERN_INLINE int cia_bwx_is_ioaddr(unsigned long addr) | ||
455 | { | ||
456 | return addr >= IDENT_ADDR + 0x8000000000UL; | ||
457 | } | ||
458 | |||
459 | __EXTERN_INLINE int cia_bwx_is_mmio(const volatile void __iomem *addr) | ||
460 | { | ||
461 | return (unsigned long)addr < CIA_BW_IO; | ||
462 | } | ||
463 | |||
464 | #undef vip | ||
465 | #undef vuip | ||
466 | #undef vulp | ||
467 | |||
468 | #undef __IO_PREFIX | ||
469 | #define __IO_PREFIX cia | ||
470 | #define cia_trivial_rw_bw 2 | ||
471 | #define cia_trivial_rw_lq 1 | ||
472 | #define cia_trivial_io_bw 0 | ||
473 | #define cia_trivial_io_lq 0 | ||
474 | #define cia_trivial_iounmap 1 | ||
475 | #include <asm/io_trivial.h> | ||
476 | |||
477 | #undef __IO_PREFIX | ||
478 | #define __IO_PREFIX cia_bwx | ||
479 | #define cia_bwx_trivial_rw_bw 1 | ||
480 | #define cia_bwx_trivial_rw_lq 1 | ||
481 | #define cia_bwx_trivial_io_bw 1 | ||
482 | #define cia_bwx_trivial_io_lq 1 | ||
483 | #define cia_bwx_trivial_iounmap 1 | ||
484 | #include <asm/io_trivial.h> | ||
485 | |||
486 | #undef __IO_PREFIX | ||
487 | #ifdef CONFIG_ALPHA_PYXIS | ||
488 | #define __IO_PREFIX cia_bwx | ||
489 | #else | ||
490 | #define __IO_PREFIX cia | ||
491 | #endif | ||
492 | |||
493 | #ifdef __IO_EXTERN_INLINE | ||
494 | #undef __EXTERN_INLINE | ||
495 | #undef __IO_EXTERN_INLINE | ||
496 | #endif | ||
497 | |||
498 | #endif /* __KERNEL__ */ | ||
499 | |||
500 | #endif /* __ALPHA_CIA__H__ */ | ||
diff --git a/arch/alpha/include/asm/core_irongate.h b/arch/alpha/include/asm/core_irongate.h new file mode 100644 index 000000000000..24b2db541501 --- /dev/null +++ b/arch/alpha/include/asm/core_irongate.h | |||
@@ -0,0 +1,232 @@ | |||
1 | #ifndef __ALPHA_IRONGATE__H__ | ||
2 | #define __ALPHA_IRONGATE__H__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/compiler.h> | ||
6 | |||
7 | /* | ||
8 | * IRONGATE is the internal name for the AMD-751 K7 core logic chipset | ||
9 | * which provides memory controller and PCI access for NAUTILUS-based | ||
10 | * EV6 (21264) systems. | ||
11 | * | ||
12 | * This file is based on: | ||
13 | * | ||
14 | * IronGate management library, (c) 1999 Alpha Processor, Inc. | ||
15 | * Copyright (C) 1999 Alpha Processor, Inc., | ||
16 | * (David Daniel, Stig Telfer, Soohoon Lee) | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * The 21264 supports, and internally recognizes, a 44-bit physical | ||
21 | * address space that is divided equally between memory address space | ||
22 | * and I/O address space. Memory address space resides in the lower | ||
23 | * half of the physical address space (PA[43]=0) and I/O address space | ||
24 | * resides in the upper half of the physical address space (PA[43]=1). | ||
25 | */ | ||
26 | |||
27 | /* | ||
28 | * Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access | ||
29 | * through the routines given is 32-bit. | ||
30 | * | ||
31 | * The first 0x40 bytes are standard as per the PCI spec. | ||
32 | */ | ||
33 | |||
34 | typedef volatile __u32 igcsr32; | ||
35 | |||
36 | typedef struct { | ||
37 | igcsr32 dev_vendor; /* 0x00 - device ID, vendor ID */ | ||
38 | igcsr32 stat_cmd; /* 0x04 - status, command */ | ||
39 | igcsr32 class; /* 0x08 - class code, rev ID */ | ||
40 | igcsr32 latency; /* 0x0C - header type, PCI latency */ | ||
41 | igcsr32 bar0; /* 0x10 - BAR0 - AGP */ | ||
42 | igcsr32 bar1; /* 0x14 - BAR1 - GART */ | ||
43 | igcsr32 bar2; /* 0x18 - Power Management reg block */ | ||
44 | |||
45 | igcsr32 rsrvd0[6]; /* 0x1C-0x33 reserved */ | ||
46 | |||
47 | igcsr32 capptr; /* 0x34 - Capabilities pointer */ | ||
48 | |||
49 | igcsr32 rsrvd1[2]; /* 0x38-0x3F reserved */ | ||
50 | |||
51 | igcsr32 bacsr10; /* 0x40 - base address chip selects */ | ||
52 | igcsr32 bacsr32; /* 0x44 - base address chip selects */ | ||
53 | igcsr32 bacsr54_eccms761; /* 0x48 - 751: base addr. chip selects | ||
54 | 761: ECC, mode/status */ | ||
55 | |||
56 | igcsr32 rsrvd2[1]; /* 0x4C-0x4F reserved */ | ||
57 | |||
58 | igcsr32 drammap; /* 0x50 - address mapping control */ | ||
59 | igcsr32 dramtm; /* 0x54 - timing, driver strength */ | ||
60 | igcsr32 dramms; /* 0x58 - DRAM mode/status */ | ||
61 | |||
62 | igcsr32 rsrvd3[1]; /* 0x5C-0x5F reserved */ | ||
63 | |||
64 | igcsr32 biu0; /* 0x60 - bus interface unit */ | ||
65 | igcsr32 biusip; /* 0x64 - Serial initialisation pkt */ | ||
66 | |||
67 | igcsr32 rsrvd4[2]; /* 0x68-0x6F reserved */ | ||
68 | |||
69 | igcsr32 mro; /* 0x70 - memory request optimiser */ | ||
70 | |||
71 | igcsr32 rsrvd5[3]; /* 0x74-0x7F reserved */ | ||
72 | |||
73 | igcsr32 whami; /* 0x80 - who am I */ | ||
74 | igcsr32 pciarb; /* 0x84 - PCI arbitration control */ | ||
75 | igcsr32 pcicfg; /* 0x88 - PCI config status */ | ||
76 | |||
77 | igcsr32 rsrvd6[4]; /* 0x8C-0x9B reserved */ | ||
78 | |||
79 | igcsr32 pci_mem; /* 0x9C - PCI top of memory, | ||
80 | 761 only */ | ||
81 | |||
82 | /* AGP (bus 1) control registers */ | ||
83 | igcsr32 agpcap; /* 0xA0 - AGP Capability Identifier */ | ||
84 | igcsr32 agpstat; /* 0xA4 - AGP status register */ | ||
85 | igcsr32 agpcmd; /* 0xA8 - AGP control register */ | ||
86 | igcsr32 agpva; /* 0xAC - AGP Virtual Address Space */ | ||
87 | igcsr32 agpmode; /* 0xB0 - AGP/GART mode control */ | ||
88 | } Irongate0; | ||
89 | |||
90 | |||
91 | typedef struct { | ||
92 | |||
93 | igcsr32 dev_vendor; /* 0x00 - Device and Vendor IDs */ | ||
94 | igcsr32 stat_cmd; /* 0x04 - Status and Command regs */ | ||
95 | igcsr32 class; /* 0x08 - subclass, baseclass etc */ | ||
96 | igcsr32 htype; /* 0x0C - header type (at 0x0E) */ | ||
97 | igcsr32 rsrvd0[2]; /* 0x10-0x17 reserved */ | ||
98 | igcsr32 busnos; /* 0x18 - Primary, secondary bus nos */ | ||
99 | igcsr32 io_baselim_regs; /* 0x1C - IO base, IO lim, AGP status */ | ||
100 | igcsr32 mem_baselim; /* 0x20 - memory base, memory lim */ | ||
101 | igcsr32 pfmem_baselim; /* 0x24 - prefetchable base, lim */ | ||
102 | igcsr32 rsrvd1[2]; /* 0x28-0x2F reserved */ | ||
103 | igcsr32 io_baselim; /* 0x30 - IO base, IO limit */ | ||
104 | igcsr32 rsrvd2[2]; /* 0x34-0x3B - reserved */ | ||
105 | igcsr32 interrupt; /* 0x3C - interrupt, PCI bridge ctrl */ | ||
106 | |||
107 | } Irongate1; | ||
108 | |||
109 | extern igcsr32 *IronECC; | ||
110 | |||
111 | /* | ||
112 | * Memory spaces: | ||
113 | */ | ||
114 | |||
115 | /* Irongate is consistent with a subset of the Tsunami memory map */ | ||
116 | #ifdef USE_48_BIT_KSEG | ||
117 | #define IRONGATE_BIAS 0x80000000000UL | ||
118 | #else | ||
119 | #define IRONGATE_BIAS 0x10000000000UL | ||
120 | #endif | ||
121 | |||
122 | |||
123 | #define IRONGATE_MEM (IDENT_ADDR | IRONGATE_BIAS | 0x000000000UL) | ||
124 | #define IRONGATE_IACK_SC (IDENT_ADDR | IRONGATE_BIAS | 0x1F8000000UL) | ||
125 | #define IRONGATE_IO (IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL) | ||
126 | #define IRONGATE_CONF (IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL) | ||
127 | |||
128 | /* | ||
129 | * PCI Configuration space accesses are formed like so: | ||
130 | * | ||
131 | * 0x1FE << 24 | : 2 2 2 2 1 1 1 1 : 1 1 1 1 1 1 0 0 : 0 0 0 0 0 0 0 0 : | ||
132 | * : 3 2 1 0 9 8 7 6 : 5 4 3 2 1 0 9 8 : 7 6 5 4 3 2 1 0 : | ||
133 | * ---bus numer--- -device-- -fun- ---register---- | ||
134 | */ | ||
135 | |||
136 | #define IGCSR(dev,fun,reg) ( IRONGATE_CONF | \ | ||
137 | ((dev)<<11) | \ | ||
138 | ((fun)<<8) | \ | ||
139 | (reg) ) | ||
140 | |||
141 | #define IRONGATE0 ((Irongate0 *) IGCSR(0, 0, 0)) | ||
142 | #define IRONGATE1 ((Irongate1 *) IGCSR(1, 0, 0)) | ||
143 | |||
144 | /* | ||
145 | * Data structure for handling IRONGATE machine checks: | ||
146 | * This is the standard OSF logout frame | ||
147 | */ | ||
148 | |||
149 | #define SCB_Q_SYSERR 0x620 /* OSF definitions */ | ||
150 | #define SCB_Q_PROCERR 0x630 | ||
151 | #define SCB_Q_SYSMCHK 0x660 | ||
152 | #define SCB_Q_PROCMCHK 0x670 | ||
153 | |||
154 | struct el_IRONGATE_sysdata_mcheck { | ||
155 | __u32 FrameSize; /* Bytes, including this field */ | ||
156 | __u32 FrameFlags; /* <31> = Retry, <30> = Second Error */ | ||
157 | __u32 CpuOffset; /* Offset to CPU-specific into */ | ||
158 | __u32 SystemOffset; /* Offset to system-specific info */ | ||
159 | __u32 MCHK_Code; | ||
160 | __u32 MCHK_Frame_Rev; | ||
161 | __u64 I_STAT; | ||
162 | __u64 DC_STAT; | ||
163 | __u64 C_ADDR; | ||
164 | __u64 DC1_SYNDROME; | ||
165 | __u64 DC0_SYNDROME; | ||
166 | __u64 C_STAT; | ||
167 | __u64 C_STS; | ||
168 | __u64 RESERVED0; | ||
169 | __u64 EXC_ADDR; | ||
170 | __u64 IER_CM; | ||
171 | __u64 ISUM; | ||
172 | __u64 MM_STAT; | ||
173 | __u64 PAL_BASE; | ||
174 | __u64 I_CTL; | ||
175 | __u64 PCTX; | ||
176 | }; | ||
177 | |||
178 | |||
179 | #ifdef __KERNEL__ | ||
180 | |||
181 | #ifndef __EXTERN_INLINE | ||
182 | #define __EXTERN_INLINE extern inline | ||
183 | #define __IO_EXTERN_INLINE | ||
184 | #endif | ||
185 | |||
186 | /* | ||
187 | * I/O functions: | ||
188 | * | ||
189 | * IRONGATE (AMD-751) PCI/memory support chip for the EV6 (21264) and | ||
190 | * K7 can only use linear accesses to get at PCI memory and I/O spaces. | ||
191 | */ | ||
192 | |||
193 | /* | ||
194 | * Memory functions. All accesses are done through linear space. | ||
195 | */ | ||
196 | |||
197 | __EXTERN_INLINE void __iomem *irongate_ioportmap(unsigned long addr) | ||
198 | { | ||
199 | return (void __iomem *)(addr + IRONGATE_IO); | ||
200 | } | ||
201 | |||
202 | extern void __iomem *irongate_ioremap(unsigned long addr, unsigned long size); | ||
203 | extern void irongate_iounmap(volatile void __iomem *addr); | ||
204 | |||
205 | __EXTERN_INLINE int irongate_is_ioaddr(unsigned long addr) | ||
206 | { | ||
207 | return addr >= IRONGATE_MEM; | ||
208 | } | ||
209 | |||
210 | __EXTERN_INLINE int irongate_is_mmio(const volatile void __iomem *xaddr) | ||
211 | { | ||
212 | unsigned long addr = (unsigned long)xaddr; | ||
213 | return addr < IRONGATE_IO || addr >= IRONGATE_CONF; | ||
214 | } | ||
215 | |||
216 | #undef __IO_PREFIX | ||
217 | #define __IO_PREFIX irongate | ||
218 | #define irongate_trivial_rw_bw 1 | ||
219 | #define irongate_trivial_rw_lq 1 | ||
220 | #define irongate_trivial_io_bw 1 | ||
221 | #define irongate_trivial_io_lq 1 | ||
222 | #define irongate_trivial_iounmap 0 | ||
223 | #include <asm/io_trivial.h> | ||
224 | |||
225 | #ifdef __IO_EXTERN_INLINE | ||
226 | #undef __EXTERN_INLINE | ||
227 | #undef __IO_EXTERN_INLINE | ||
228 | #endif | ||
229 | |||
230 | #endif /* __KERNEL__ */ | ||
231 | |||
232 | #endif /* __ALPHA_IRONGATE__H__ */ | ||
diff --git a/arch/alpha/include/asm/core_lca.h b/arch/alpha/include/asm/core_lca.h new file mode 100644 index 000000000000..f7cb4b460954 --- /dev/null +++ b/arch/alpha/include/asm/core_lca.h | |||
@@ -0,0 +1,361 @@ | |||
1 | #ifndef __ALPHA_LCA__H__ | ||
2 | #define __ALPHA_LCA__H__ | ||
3 | |||
4 | #include <asm/system.h> | ||
5 | #include <asm/compiler.h> | ||
6 | |||
7 | /* | ||
8 | * Low Cost Alpha (LCA) definitions (these apply to 21066 and 21068, | ||
9 | * for example). | ||
10 | * | ||
11 | * This file is based on: | ||
12 | * | ||
13 | * DECchip 21066 and DECchip 21068 Alpha AXP Microprocessors | ||
14 | * Hardware Reference Manual; Digital Equipment Corp.; May 1994; | ||
15 | * Maynard, MA; Order Number: EC-N2681-71. | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * NOTE: The LCA uses a Host Address Extension (HAE) register to access | ||
20 | * PCI addresses that are beyond the first 27 bits of address | ||
21 | * space. Updating the HAE requires an external cycle (and | ||
22 | * a memory barrier), which tends to be slow. Instead of updating | ||
23 | * it on each sparse memory access, we keep the current HAE value | ||
24 | * cached in variable cache_hae. Only if the cached HAE differs | ||
25 | * from the desired HAE value do we actually updated HAE register. | ||
26 | * The HAE register is preserved by the interrupt handler entry/exit | ||
27 | * code, so this scheme works even in the presence of interrupts. | ||
28 | * | ||
29 | * Dense memory space doesn't require the HAE, but is restricted to | ||
30 | * aligned 32 and 64 bit accesses. Special Cycle and Interrupt | ||
31 | * Acknowledge cycles may also require the use of the HAE. The LCA | ||
32 | * limits I/O address space to the bottom 24 bits of address space, | ||
33 | * but this easily covers the 16 bit ISA I/O address space. | ||
34 | */ | ||
35 | |||
36 | /* | ||
37 | * NOTE 2! The memory operations do not set any memory barriers, as | ||
38 | * it's not needed for cases like a frame buffer that is essentially | ||
39 | * memory-like. You need to do them by hand if the operations depend | ||
40 | * on ordering. | ||
41 | * | ||
42 | * Similarly, the port I/O operations do a "mb" only after a write | ||
43 | * operation: if an mb is needed before (as in the case of doing | ||
44 | * memory mapped I/O first, and then a port I/O operation to the same | ||
45 | * device), it needs to be done by hand. | ||
46 | * | ||
47 | * After the above has bitten me 100 times, I'll give up and just do | ||
48 | * the mb all the time, but right now I'm hoping this will work out. | ||
49 | * Avoiding mb's may potentially be a noticeable speed improvement, | ||
50 | * but I can't honestly say I've tested it. | ||
51 | * | ||
52 | * Handling interrupts that need to do mb's to synchronize to | ||
53 | * non-interrupts is another fun race area. Don't do it (because if | ||
54 | * you do, I'll have to do *everything* with interrupts disabled, | ||
55 | * ugh). | ||
56 | */ | ||
57 | |||
58 | /* | ||
59 | * Memory Controller registers: | ||
60 | */ | ||
61 | #define LCA_MEM_BCR0 (IDENT_ADDR + 0x120000000UL) | ||
62 | #define LCA_MEM_BCR1 (IDENT_ADDR + 0x120000008UL) | ||
63 | #define LCA_MEM_BCR2 (IDENT_ADDR + 0x120000010UL) | ||
64 | #define LCA_MEM_BCR3 (IDENT_ADDR + 0x120000018UL) | ||
65 | #define LCA_MEM_BMR0 (IDENT_ADDR + 0x120000020UL) | ||
66 | #define LCA_MEM_BMR1 (IDENT_ADDR + 0x120000028UL) | ||
67 | #define LCA_MEM_BMR2 (IDENT_ADDR + 0x120000030UL) | ||
68 | #define LCA_MEM_BMR3 (IDENT_ADDR + 0x120000038UL) | ||
69 | #define LCA_MEM_BTR0 (IDENT_ADDR + 0x120000040UL) | ||
70 | #define LCA_MEM_BTR1 (IDENT_ADDR + 0x120000048UL) | ||
71 | #define LCA_MEM_BTR2 (IDENT_ADDR + 0x120000050UL) | ||
72 | #define LCA_MEM_BTR3 (IDENT_ADDR + 0x120000058UL) | ||
73 | #define LCA_MEM_GTR (IDENT_ADDR + 0x120000060UL) | ||
74 | #define LCA_MEM_ESR (IDENT_ADDR + 0x120000068UL) | ||
75 | #define LCA_MEM_EAR (IDENT_ADDR + 0x120000070UL) | ||
76 | #define LCA_MEM_CAR (IDENT_ADDR + 0x120000078UL) | ||
77 | #define LCA_MEM_VGR (IDENT_ADDR + 0x120000080UL) | ||
78 | #define LCA_MEM_PLM (IDENT_ADDR + 0x120000088UL) | ||
79 | #define LCA_MEM_FOR (IDENT_ADDR + 0x120000090UL) | ||
80 | |||
81 | /* | ||
82 | * I/O Controller registers: | ||
83 | */ | ||
84 | #define LCA_IOC_HAE (IDENT_ADDR + 0x180000000UL) | ||
85 | #define LCA_IOC_CONF (IDENT_ADDR + 0x180000020UL) | ||
86 | #define LCA_IOC_STAT0 (IDENT_ADDR + 0x180000040UL) | ||
87 | #define LCA_IOC_STAT1 (IDENT_ADDR + 0x180000060UL) | ||
88 | #define LCA_IOC_TBIA (IDENT_ADDR + 0x180000080UL) | ||
89 | #define LCA_IOC_TB_ENA (IDENT_ADDR + 0x1800000a0UL) | ||
90 | #define LCA_IOC_SFT_RST (IDENT_ADDR + 0x1800000c0UL) | ||
91 | #define LCA_IOC_PAR_DIS (IDENT_ADDR + 0x1800000e0UL) | ||
92 | #define LCA_IOC_W_BASE0 (IDENT_ADDR + 0x180000100UL) | ||
93 | #define LCA_IOC_W_BASE1 (IDENT_ADDR + 0x180000120UL) | ||
94 | #define LCA_IOC_W_MASK0 (IDENT_ADDR + 0x180000140UL) | ||
95 | #define LCA_IOC_W_MASK1 (IDENT_ADDR + 0x180000160UL) | ||
96 | #define LCA_IOC_T_BASE0 (IDENT_ADDR + 0x180000180UL) | ||
97 | #define LCA_IOC_T_BASE1 (IDENT_ADDR + 0x1800001a0UL) | ||
98 | #define LCA_IOC_TB_TAG0 (IDENT_ADDR + 0x188000000UL) | ||
99 | #define LCA_IOC_TB_TAG1 (IDENT_ADDR + 0x188000020UL) | ||
100 | #define LCA_IOC_TB_TAG2 (IDENT_ADDR + 0x188000040UL) | ||
101 | #define LCA_IOC_TB_TAG3 (IDENT_ADDR + 0x188000060UL) | ||
102 | #define LCA_IOC_TB_TAG4 (IDENT_ADDR + 0x188000070UL) | ||
103 | #define LCA_IOC_TB_TAG5 (IDENT_ADDR + 0x1880000a0UL) | ||
104 | #define LCA_IOC_TB_TAG6 (IDENT_ADDR + 0x1880000c0UL) | ||
105 | #define LCA_IOC_TB_TAG7 (IDENT_ADDR + 0x1880000e0UL) | ||
106 | |||
107 | /* | ||
108 | * Memory spaces: | ||
109 | */ | ||
110 | #define LCA_IACK_SC (IDENT_ADDR + 0x1a0000000UL) | ||
111 | #define LCA_CONF (IDENT_ADDR + 0x1e0000000UL) | ||
112 | #define LCA_IO (IDENT_ADDR + 0x1c0000000UL) | ||
113 | #define LCA_SPARSE_MEM (IDENT_ADDR + 0x200000000UL) | ||
114 | #define LCA_DENSE_MEM (IDENT_ADDR + 0x300000000UL) | ||
115 | |||
116 | /* | ||
117 | * Bit definitions for I/O Controller status register 0: | ||
118 | */ | ||
119 | #define LCA_IOC_STAT0_CMD 0xf | ||
120 | #define LCA_IOC_STAT0_ERR (1<<4) | ||
121 | #define LCA_IOC_STAT0_LOST (1<<5) | ||
122 | #define LCA_IOC_STAT0_THIT (1<<6) | ||
123 | #define LCA_IOC_STAT0_TREF (1<<7) | ||
124 | #define LCA_IOC_STAT0_CODE_SHIFT 8 | ||
125 | #define LCA_IOC_STAT0_CODE_MASK 0x7 | ||
126 | #define LCA_IOC_STAT0_P_NBR_SHIFT 13 | ||
127 | #define LCA_IOC_STAT0_P_NBR_MASK 0x7ffff | ||
128 | |||
129 | #define LCA_HAE_ADDRESS LCA_IOC_HAE | ||
130 | |||
131 | /* LCA PMR Power Management register defines */ | ||
132 | #define LCA_PMR_ADDR (IDENT_ADDR + 0x120000098UL) | ||
133 | #define LCA_PMR_PDIV 0x7 /* Primary clock divisor */ | ||
134 | #define LCA_PMR_ODIV 0x38 /* Override clock divisor */ | ||
135 | #define LCA_PMR_INTO 0x40 /* Interrupt override */ | ||
136 | #define LCA_PMR_DMAO 0x80 /* DMA override */ | ||
137 | #define LCA_PMR_OCCEB 0xffff0000L /* Override cycle counter - even bits */ | ||
138 | #define LCA_PMR_OCCOB 0xffff000000000000L /* Override cycle counter - even bits */ | ||
139 | #define LCA_PMR_PRIMARY_MASK 0xfffffffffffffff8L | ||
140 | |||
141 | /* LCA PMR Macros */ | ||
142 | |||
143 | #define LCA_READ_PMR (*(volatile unsigned long *)LCA_PMR_ADDR) | ||
144 | #define LCA_WRITE_PMR(d) (*((volatile unsigned long *)LCA_PMR_ADDR) = (d)) | ||
145 | |||
146 | #define LCA_GET_PRIMARY(r) ((r) & LCA_PMR_PDIV) | ||
147 | #define LCA_GET_OVERRIDE(r) (((r) >> 3) & LCA_PMR_PDIV) | ||
148 | #define LCA_SET_PRIMARY_CLOCK(r, c) ((r) = (((r) & LCA_PMR_PRIMARY_MASK)|(c))) | ||
149 | |||
150 | /* LCA PMR Divisor values */ | ||
151 | #define LCA_PMR_DIV_1 0x0 | ||
152 | #define LCA_PMR_DIV_1_5 0x1 | ||
153 | #define LCA_PMR_DIV_2 0x2 | ||
154 | #define LCA_PMR_DIV_4 0x3 | ||
155 | #define LCA_PMR_DIV_8 0x4 | ||
156 | #define LCA_PMR_DIV_16 0x5 | ||
157 | #define LCA_PMR_DIV_MIN DIV_1 | ||
158 | #define LCA_PMR_DIV_MAX DIV_16 | ||
159 | |||
160 | |||
161 | /* | ||
162 | * Data structure for handling LCA machine checks. Correctable errors | ||
163 | * result in a short logout frame, uncorrectable ones in a long one. | ||
164 | */ | ||
165 | struct el_lca_mcheck_short { | ||
166 | struct el_common h; /* common logout header */ | ||
167 | unsigned long esr; /* error-status register */ | ||
168 | unsigned long ear; /* error-address register */ | ||
169 | unsigned long dc_stat; /* dcache status register */ | ||
170 | unsigned long ioc_stat0; /* I/O controller status register 0 */ | ||
171 | unsigned long ioc_stat1; /* I/O controller status register 1 */ | ||
172 | }; | ||
173 | |||
174 | struct el_lca_mcheck_long { | ||
175 | struct el_common h; /* common logout header */ | ||
176 | unsigned long pt[31]; /* PAL temps */ | ||
177 | unsigned long exc_addr; /* exception address */ | ||
178 | unsigned long pad1[3]; | ||
179 | unsigned long pal_base; /* PALcode base address */ | ||
180 | unsigned long hier; /* hw interrupt enable */ | ||
181 | unsigned long hirr; /* hw interrupt request */ | ||
182 | unsigned long mm_csr; /* MMU control & status */ | ||
183 | unsigned long dc_stat; /* data cache status */ | ||
184 | unsigned long dc_addr; /* data cache addr register */ | ||
185 | unsigned long abox_ctl; /* address box control register */ | ||
186 | unsigned long esr; /* error status register */ | ||
187 | unsigned long ear; /* error address register */ | ||
188 | unsigned long car; /* cache control register */ | ||
189 | unsigned long ioc_stat0; /* I/O controller status register 0 */ | ||
190 | unsigned long ioc_stat1; /* I/O controller status register 1 */ | ||
191 | unsigned long va; /* virtual address register */ | ||
192 | }; | ||
193 | |||
194 | union el_lca { | ||
195 | struct el_common * c; | ||
196 | struct el_lca_mcheck_long * l; | ||
197 | struct el_lca_mcheck_short * s; | ||
198 | }; | ||
199 | |||
200 | #ifdef __KERNEL__ | ||
201 | |||
202 | #ifndef __EXTERN_INLINE | ||
203 | #define __EXTERN_INLINE extern inline | ||
204 | #define __IO_EXTERN_INLINE | ||
205 | #endif | ||
206 | |||
207 | /* | ||
208 | * I/O functions: | ||
209 | * | ||
210 | * Unlike Jensen, the Noname machines have no concept of local | ||
211 | * I/O---everything goes over the PCI bus. | ||
212 | * | ||
213 | * There is plenty room for optimization here. In particular, | ||
214 | * the Alpha's insb/insw/extb/extw should be useful in moving | ||
215 | * data to/from the right byte-lanes. | ||
216 | */ | ||
217 | |||
218 | #define vip volatile int __force * | ||
219 | #define vuip volatile unsigned int __force * | ||
220 | #define vulp volatile unsigned long __force * | ||
221 | |||
222 | #define LCA_SET_HAE \ | ||
223 | do { \ | ||
224 | if (addr >= (1UL << 24)) { \ | ||
225 | unsigned long msb = addr & 0xf8000000; \ | ||
226 | addr -= msb; \ | ||
227 | set_hae(msb); \ | ||
228 | } \ | ||
229 | } while (0) | ||
230 | |||
231 | |||
232 | __EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr) | ||
233 | { | ||
234 | unsigned long addr = (unsigned long) xaddr; | ||
235 | unsigned long result, base_and_type; | ||
236 | |||
237 | if (addr >= LCA_DENSE_MEM) { | ||
238 | addr -= LCA_DENSE_MEM; | ||
239 | LCA_SET_HAE; | ||
240 | base_and_type = LCA_SPARSE_MEM + 0x00; | ||
241 | } else { | ||
242 | addr -= LCA_IO; | ||
243 | base_and_type = LCA_IO + 0x00; | ||
244 | } | ||
245 | |||
246 | result = *(vip) ((addr << 5) + base_and_type); | ||
247 | return __kernel_extbl(result, addr & 3); | ||
248 | } | ||
249 | |||
250 | __EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr) | ||
251 | { | ||
252 | unsigned long addr = (unsigned long) xaddr; | ||
253 | unsigned long w, base_and_type; | ||
254 | |||
255 | if (addr >= LCA_DENSE_MEM) { | ||
256 | addr -= LCA_DENSE_MEM; | ||
257 | LCA_SET_HAE; | ||
258 | base_and_type = LCA_SPARSE_MEM + 0x00; | ||
259 | } else { | ||
260 | addr -= LCA_IO; | ||
261 | base_and_type = LCA_IO + 0x00; | ||
262 | } | ||
263 | |||
264 | w = __kernel_insbl(b, addr & 3); | ||
265 | *(vuip) ((addr << 5) + base_and_type) = w; | ||
266 | } | ||
267 | |||
268 | __EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr) | ||
269 | { | ||
270 | unsigned long addr = (unsigned long) xaddr; | ||
271 | unsigned long result, base_and_type; | ||
272 | |||
273 | if (addr >= LCA_DENSE_MEM) { | ||
274 | addr -= LCA_DENSE_MEM; | ||
275 | LCA_SET_HAE; | ||
276 | base_and_type = LCA_SPARSE_MEM + 0x08; | ||
277 | } else { | ||
278 | addr -= LCA_IO; | ||
279 | base_and_type = LCA_IO + 0x08; | ||
280 | } | ||
281 | |||
282 | result = *(vip) ((addr << 5) + base_and_type); | ||
283 | return __kernel_extwl(result, addr & 3); | ||
284 | } | ||
285 | |||
286 | __EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr) | ||
287 | { | ||
288 | unsigned long addr = (unsigned long) xaddr; | ||
289 | unsigned long w, base_and_type; | ||
290 | |||
291 | if (addr >= LCA_DENSE_MEM) { | ||
292 | addr -= LCA_DENSE_MEM; | ||
293 | LCA_SET_HAE; | ||
294 | base_and_type = LCA_SPARSE_MEM + 0x08; | ||
295 | } else { | ||
296 | addr -= LCA_IO; | ||
297 | base_and_type = LCA_IO + 0x08; | ||
298 | } | ||
299 | |||
300 | w = __kernel_inswl(b, addr & 3); | ||
301 | *(vuip) ((addr << 5) + base_and_type) = w; | ||
302 | } | ||
303 | |||
304 | __EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr) | ||
305 | { | ||
306 | unsigned long addr = (unsigned long) xaddr; | ||
307 | if (addr < LCA_DENSE_MEM) | ||
308 | addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; | ||
309 | return *(vuip)addr; | ||
310 | } | ||
311 | |||
312 | __EXTERN_INLINE void lca_iowrite32(u32 b, void __iomem *xaddr) | ||
313 | { | ||
314 | unsigned long addr = (unsigned long) xaddr; | ||
315 | if (addr < LCA_DENSE_MEM) | ||
316 | addr = ((addr - LCA_IO) << 5) + LCA_IO + 0x18; | ||
317 | *(vuip)addr = b; | ||
318 | } | ||
319 | |||
320 | __EXTERN_INLINE void __iomem *lca_ioportmap(unsigned long addr) | ||
321 | { | ||
322 | return (void __iomem *)(addr + LCA_IO); | ||
323 | } | ||
324 | |||
325 | __EXTERN_INLINE void __iomem *lca_ioremap(unsigned long addr, | ||
326 | unsigned long size) | ||
327 | { | ||
328 | return (void __iomem *)(addr + LCA_DENSE_MEM); | ||
329 | } | ||
330 | |||
331 | __EXTERN_INLINE int lca_is_ioaddr(unsigned long addr) | ||
332 | { | ||
333 | return addr >= IDENT_ADDR + 0x120000000UL; | ||
334 | } | ||
335 | |||
336 | __EXTERN_INLINE int lca_is_mmio(const volatile void __iomem *addr) | ||
337 | { | ||
338 | return (unsigned long)addr >= LCA_DENSE_MEM; | ||
339 | } | ||
340 | |||
341 | #undef vip | ||
342 | #undef vuip | ||
343 | #undef vulp | ||
344 | |||
345 | #undef __IO_PREFIX | ||
346 | #define __IO_PREFIX lca | ||
347 | #define lca_trivial_rw_bw 2 | ||
348 | #define lca_trivial_rw_lq 1 | ||
349 | #define lca_trivial_io_bw 0 | ||
350 | #define lca_trivial_io_lq 0 | ||
351 | #define lca_trivial_iounmap 1 | ||
352 | #include <asm/io_trivial.h> | ||
353 | |||
354 | #ifdef __IO_EXTERN_INLINE | ||
355 | #undef __EXTERN_INLINE | ||
356 | #undef __IO_EXTERN_INLINE | ||
357 | #endif | ||
358 | |||
359 | #endif /* __KERNEL__ */ | ||
360 | |||
361 | #endif /* __ALPHA_LCA__H__ */ | ||
diff --git a/arch/alpha/include/asm/core_marvel.h b/arch/alpha/include/asm/core_marvel.h new file mode 100644 index 000000000000..30d55fe7aaf6 --- /dev/null +++ b/arch/alpha/include/asm/core_marvel.h | |||
@@ -0,0 +1,378 @@ | |||
1 | /* | ||
2 | * Marvel systems use the IO7 I/O chip provides PCI/PCIX/AGP access | ||
3 | * | ||
4 | * This file is based on: | ||
5 | * | ||
6 | * Marvel / EV7 System Programmer's Manual | ||
7 | * Revision 1.00 | ||
8 | * 14 May 2001 | ||
9 | */ | ||
10 | |||
11 | #ifndef __ALPHA_MARVEL__H__ | ||
12 | #define __ALPHA_MARVEL__H__ | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | |||
18 | #include <asm/compiler.h> | ||
19 | |||
20 | #define MARVEL_MAX_PIDS 32 /* as long as we rely on 43-bit superpage */ | ||
21 | #define MARVEL_IRQ_VEC_PE_SHIFT (10) | ||
22 | #define MARVEL_IRQ_VEC_IRQ_MASK ((1 << MARVEL_IRQ_VEC_PE_SHIFT) - 1) | ||
23 | #define MARVEL_NR_IRQS \ | ||
24 | (16 + (MARVEL_MAX_PIDS * (1 << MARVEL_IRQ_VEC_PE_SHIFT))) | ||
25 | |||
26 | /* | ||
27 | * EV7 RBOX Registers | ||
28 | */ | ||
29 | typedef struct { | ||
30 | volatile unsigned long csr __attribute__((aligned(16))); | ||
31 | } ev7_csr; | ||
32 | |||
33 | typedef struct { | ||
34 | ev7_csr RBOX_CFG; /* 0x0000 */ | ||
35 | ev7_csr RBOX_NSVC; | ||
36 | ev7_csr RBOX_EWVC; | ||
37 | ev7_csr RBOX_WHAMI; | ||
38 | ev7_csr RBOX_TCTL; /* 0x0040 */ | ||
39 | ev7_csr RBOX_INT; | ||
40 | ev7_csr RBOX_IMASK; | ||
41 | ev7_csr RBOX_IREQ; | ||
42 | ev7_csr RBOX_INTQ; /* 0x0080 */ | ||
43 | ev7_csr RBOX_INTA; | ||
44 | ev7_csr RBOX_IT; | ||
45 | ev7_csr RBOX_SCRATCH1; | ||
46 | ev7_csr RBOX_SCRATCH2; /* 0x00c0 */ | ||
47 | ev7_csr RBOX_L_ERR; | ||
48 | } ev7_csrs; | ||
49 | |||
50 | /* | ||
51 | * EV7 CSR addressing macros | ||
52 | */ | ||
53 | #define EV7_MASK40(addr) ((addr) & ((1UL << 41) - 1)) | ||
54 | #define EV7_KERN_ADDR(addr) ((void *)(IDENT_ADDR | EV7_MASK40(addr))) | ||
55 | |||
56 | #define EV7_PE_MASK 0x1ffUL /* 9 bits ( 256 + mem/io ) */ | ||
57 | #define EV7_IPE(pe) ((~((long)(pe)) & EV7_PE_MASK) << 35) | ||
58 | |||
59 | #define EV7_CSR_PHYS(pe, off) (EV7_IPE(pe) | (0x7FFCUL << 20) | (off)) | ||
60 | #define EV7_CSRS_PHYS(pe) (EV7_CSR_PHYS(pe, 0UL)) | ||
61 | |||
62 | #define EV7_CSR_KERN(pe, off) (EV7_KERN_ADDR(EV7_CSR_PHYS(pe, off))) | ||
63 | #define EV7_CSRS_KERN(pe) (EV7_KERN_ADDR(EV7_CSRS_PHYS(pe))) | ||
64 | |||
65 | #define EV7_CSR_OFFSET(name) ((unsigned long)&((ev7_csrs *)NULL)->name.csr) | ||
66 | |||
67 | /* | ||
68 | * IO7 registers | ||
69 | */ | ||
70 | typedef struct { | ||
71 | volatile unsigned long csr __attribute__((aligned(64))); | ||
72 | } io7_csr; | ||
73 | |||
74 | typedef struct { | ||
75 | /* I/O Port Control Registers */ | ||
76 | io7_csr POx_CTRL; /* 0x0000 */ | ||
77 | io7_csr POx_CACHE_CTL; | ||
78 | io7_csr POx_TIMER; | ||
79 | io7_csr POx_IO_ADR_EXT; | ||
80 | io7_csr POx_MEM_ADR_EXT; /* 0x0100 */ | ||
81 | io7_csr POx_XCAL_CTRL; | ||
82 | io7_csr rsvd1[2]; /* ?? spec doesn't show 0x180 */ | ||
83 | io7_csr POx_DM_SOURCE; /* 0x0200 */ | ||
84 | io7_csr POx_DM_DEST; | ||
85 | io7_csr POx_DM_SIZE; | ||
86 | io7_csr POx_DM_CTRL; | ||
87 | io7_csr rsvd2[4]; /* 0x0300 */ | ||
88 | |||
89 | /* AGP Control Registers -- port 3 only */ | ||
90 | io7_csr AGP_CAP_ID; /* 0x0400 */ | ||
91 | io7_csr AGP_STAT; | ||
92 | io7_csr AGP_CMD; | ||
93 | io7_csr rsvd3; | ||
94 | |||
95 | /* I/O Port Monitor Registers */ | ||
96 | io7_csr POx_MONCTL; /* 0x0500 */ | ||
97 | io7_csr POx_CTRA; | ||
98 | io7_csr POx_CTRB; | ||
99 | io7_csr POx_CTR56; | ||
100 | io7_csr POx_SCRATCH; /* 0x0600 */ | ||
101 | io7_csr POx_XTRA_A; | ||
102 | io7_csr POx_XTRA_TS; | ||
103 | io7_csr POx_XTRA_Z; | ||
104 | io7_csr rsvd4; /* 0x0700 */ | ||
105 | io7_csr POx_THRESHA; | ||
106 | io7_csr POx_THRESHB; | ||
107 | io7_csr rsvd5[33]; | ||
108 | |||
109 | /* System Address Space Window Control Registers */ | ||
110 | |||
111 | io7_csr POx_WBASE[4]; /* 0x1000 */ | ||
112 | io7_csr POx_WMASK[4]; | ||
113 | io7_csr POx_TBASE[4]; | ||
114 | io7_csr POx_SG_TBIA; | ||
115 | io7_csr POx_MSI_WBASE; | ||
116 | io7_csr rsvd6[50]; | ||
117 | |||
118 | /* I/O Port Error Registers */ | ||
119 | io7_csr POx_ERR_SUM; | ||
120 | io7_csr POx_FIRST_ERR; | ||
121 | io7_csr POx_MSK_HEI; | ||
122 | io7_csr POx_TLB_ERR; | ||
123 | io7_csr POx_SPL_COMPLT; | ||
124 | io7_csr POx_TRANS_SUM; | ||
125 | io7_csr POx_FRC_PCI_ERR; | ||
126 | io7_csr POx_MULT_ERR; | ||
127 | io7_csr rsvd7[8]; | ||
128 | |||
129 | /* I/O Port End of Interrupt Registers */ | ||
130 | io7_csr EOI_DAT; | ||
131 | io7_csr rsvd8[7]; | ||
132 | io7_csr POx_IACK_SPECIAL; | ||
133 | io7_csr rsvd9[103]; | ||
134 | } io7_ioport_csrs; | ||
135 | |||
136 | typedef struct { | ||
137 | io7_csr IO_ASIC_REV; /* 0x30.0000 */ | ||
138 | io7_csr IO_SYS_REV; | ||
139 | io7_csr SER_CHAIN3; | ||
140 | io7_csr PO7_RST1; | ||
141 | io7_csr PO7_RST2; /* 0x30.0100 */ | ||
142 | io7_csr POx_RST[4]; | ||
143 | io7_csr IO7_DWNH; | ||
144 | io7_csr IO7_MAF; | ||
145 | io7_csr IO7_MAF_TO; | ||
146 | io7_csr IO7_ACC_CLUMP; /* 0x30.0300 */ | ||
147 | io7_csr IO7_PMASK; | ||
148 | io7_csr IO7_IOMASK; | ||
149 | io7_csr IO7_UPH; | ||
150 | io7_csr IO7_UPH_TO; /* 0x30.0400 */ | ||
151 | io7_csr RBX_IREQ_OFF; | ||
152 | io7_csr RBX_INTA_OFF; | ||
153 | io7_csr INT_RTY; | ||
154 | io7_csr PO7_MONCTL; /* 0x30.0500 */ | ||
155 | io7_csr PO7_CTRA; | ||
156 | io7_csr PO7_CTRB; | ||
157 | io7_csr PO7_CTR56; | ||
158 | io7_csr PO7_SCRATCH; /* 0x30.0600 */ | ||
159 | io7_csr PO7_XTRA_A; | ||
160 | io7_csr PO7_XTRA_TS; | ||
161 | io7_csr PO7_XTRA_Z; | ||
162 | io7_csr PO7_PMASK; /* 0x30.0700 */ | ||
163 | io7_csr PO7_THRESHA; | ||
164 | io7_csr PO7_THRESHB; | ||
165 | io7_csr rsvd1[97]; | ||
166 | io7_csr PO7_ERROR_SUM; /* 0x30.2000 */ | ||
167 | io7_csr PO7_BHOLE_MASK; | ||
168 | io7_csr PO7_HEI_MSK; | ||
169 | io7_csr PO7_CRD_MSK; | ||
170 | io7_csr PO7_UNCRR_SYM; /* 0x30.2100 */ | ||
171 | io7_csr PO7_CRRCT_SYM; | ||
172 | io7_csr PO7_ERR_PKT[2]; | ||
173 | io7_csr PO7_UGBGE_SYM; /* 0x30.2200 */ | ||
174 | io7_csr rsbv2[887]; | ||
175 | io7_csr PO7_LSI_CTL[128]; /* 0x31.0000 */ | ||
176 | io7_csr rsvd3[123]; | ||
177 | io7_csr HLT_CTL; /* 0x31.3ec0 */ | ||
178 | io7_csr HPI_CTL; /* 0x31.3f00 */ | ||
179 | io7_csr CRD_CTL; | ||
180 | io7_csr STV_CTL; | ||
181 | io7_csr HEI_CTL; | ||
182 | io7_csr PO7_MSI_CTL[16]; /* 0x31.4000 */ | ||
183 | io7_csr rsvd4[240]; | ||
184 | |||
185 | /* | ||
186 | * Interrupt Diagnostic / Test | ||
187 | */ | ||
188 | struct { | ||
189 | io7_csr INT_PND; | ||
190 | io7_csr INT_CLR; | ||
191 | io7_csr INT_EOI; | ||
192 | io7_csr rsvd[29]; | ||
193 | } INT_DIAG[4]; | ||
194 | io7_csr rsvd5[125]; /* 0x31.a000 */ | ||
195 | io7_csr MISC_PND; /* 0x31.b800 */ | ||
196 | io7_csr rsvd6[31]; | ||
197 | io7_csr MSI_PND[16]; /* 0x31.c000 */ | ||
198 | io7_csr rsvd7[16]; | ||
199 | io7_csr MSI_CLR[16]; /* 0x31.c800 */ | ||
200 | } io7_port7_csrs; | ||
201 | |||
202 | /* | ||
203 | * IO7 DMA Window Base register (POx_WBASEx) | ||
204 | */ | ||
205 | #define wbase_m_ena 0x1 | ||
206 | #define wbase_m_sg 0x2 | ||
207 | #define wbase_m_dac 0x4 | ||
208 | #define wbase_m_addr 0xFFF00000 | ||
209 | union IO7_POx_WBASE { | ||
210 | struct { | ||
211 | unsigned ena : 1; /* <0> */ | ||
212 | unsigned sg : 1; /* <1> */ | ||
213 | unsigned dac : 1; /* <2> -- window 3 only */ | ||
214 | unsigned rsvd1 : 17; | ||
215 | unsigned addr : 12; /* <31:20> */ | ||
216 | unsigned rsvd2 : 32; | ||
217 | } bits; | ||
218 | unsigned as_long[2]; | ||
219 | unsigned as_quad; | ||
220 | }; | ||
221 | |||
222 | /* | ||
223 | * IO7 IID (Interrupt IDentifier) format | ||
224 | * | ||
225 | * For level-sensative interrupts, int_num is encoded as: | ||
226 | * | ||
227 | * bus/port slot/device INTx | ||
228 | * <7:5> <4:2> <1:0> | ||
229 | */ | ||
230 | union IO7_IID { | ||
231 | struct { | ||
232 | unsigned int_num : 9; /* <8:0> */ | ||
233 | unsigned tpu_mask : 4; /* <12:9> rsvd */ | ||
234 | unsigned msi : 1; /* 13 */ | ||
235 | unsigned ipe : 10; /* <23:14> */ | ||
236 | unsigned long rsvd : 40; | ||
237 | } bits; | ||
238 | unsigned int as_long[2]; | ||
239 | unsigned long as_quad; | ||
240 | }; | ||
241 | |||
242 | /* | ||
243 | * IO7 addressing macros | ||
244 | */ | ||
245 | #define IO7_KERN_ADDR(addr) (EV7_KERN_ADDR(addr)) | ||
246 | |||
247 | #define IO7_PORT_MASK 0x07UL /* 3 bits of port */ | ||
248 | |||
249 | #define IO7_IPE(pe) (EV7_IPE(pe)) | ||
250 | #define IO7_IPORT(port) ((~((long)(port)) & IO7_PORT_MASK) << 32) | ||
251 | |||
252 | #define IO7_HOSE(pe, port) (IO7_IPE(pe) | IO7_IPORT(port)) | ||
253 | |||
254 | #define IO7_MEM_PHYS(pe, port) (IO7_HOSE(pe, port) | 0x00000000UL) | ||
255 | #define IO7_CONF_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFE000000UL) | ||
256 | #define IO7_IO_PHYS(pe, port) (IO7_HOSE(pe, port) | 0xFF000000UL) | ||
257 | #define IO7_CSR_PHYS(pe, port, off) \ | ||
258 | (IO7_HOSE(pe, port) | 0xFF800000UL | (off)) | ||
259 | #define IO7_CSRS_PHYS(pe, port) (IO7_CSR_PHYS(pe, port, 0UL)) | ||
260 | #define IO7_PORT7_CSRS_PHYS(pe) (IO7_CSR_PHYS(pe, 7, 0x300000UL)) | ||
261 | |||
262 | #define IO7_MEM_KERN(pe, port) (IO7_KERN_ADDR(IO7_MEM_PHYS(pe, port))) | ||
263 | #define IO7_CONF_KERN(pe, port) (IO7_KERN_ADDR(IO7_CONF_PHYS(pe, port))) | ||
264 | #define IO7_IO_KERN(pe, port) (IO7_KERN_ADDR(IO7_IO_PHYS(pe, port))) | ||
265 | #define IO7_CSR_KERN(pe, port, off) (IO7_KERN_ADDR(IO7_CSR_PHYS(pe,port,off))) | ||
266 | #define IO7_CSRS_KERN(pe, port) (IO7_KERN_ADDR(IO7_CSRS_PHYS(pe, port))) | ||
267 | #define IO7_PORT7_CSRS_KERN(pe) (IO7_KERN_ADDR(IO7_PORT7_CSRS_PHYS(pe))) | ||
268 | |||
269 | #define IO7_PLL_RNGA(pll) (((pll) >> 3) & 0x7) | ||
270 | #define IO7_PLL_RNGB(pll) (((pll) >> 6) & 0x7) | ||
271 | |||
272 | #define IO7_MEM_SPACE (2UL * 1024 * 1024 * 1024) /* 2GB MEM */ | ||
273 | #define IO7_IO_SPACE (8UL * 1024 * 1024) /* 8MB I/O */ | ||
274 | |||
275 | |||
276 | /* | ||
277 | * Offset between ram physical addresses and pci64 DAC addresses | ||
278 | */ | ||
279 | #define IO7_DAC_OFFSET (1UL << 49) | ||
280 | |||
281 | /* | ||
282 | * This is needed to satisify the IO() macro used in initializing the machvec | ||
283 | */ | ||
284 | #define MARVEL_IACK_SC \ | ||
285 | ((unsigned long) \ | ||
286 | (&(((io7_ioport_csrs *)IO7_CSRS_KERN(0, 0))->POx_IACK_SPECIAL))) | ||
287 | |||
288 | #ifdef __KERNEL__ | ||
289 | |||
290 | /* | ||
291 | * IO7 structs | ||
292 | */ | ||
293 | #define IO7_NUM_PORTS 4 | ||
294 | #define IO7_AGP_PORT 3 | ||
295 | |||
296 | struct io7_port { | ||
297 | struct io7 *io7; | ||
298 | struct pci_controller *hose; | ||
299 | |||
300 | int enabled; | ||
301 | unsigned int port; | ||
302 | io7_ioport_csrs *csrs; | ||
303 | |||
304 | unsigned long saved_wbase[4]; | ||
305 | unsigned long saved_wmask[4]; | ||
306 | unsigned long saved_tbase[4]; | ||
307 | }; | ||
308 | |||
309 | struct io7 { | ||
310 | struct io7 *next; | ||
311 | |||
312 | unsigned int pe; | ||
313 | io7_port7_csrs *csrs; | ||
314 | struct io7_port ports[IO7_NUM_PORTS]; | ||
315 | |||
316 | spinlock_t irq_lock; | ||
317 | }; | ||
318 | |||
319 | #ifndef __EXTERN_INLINE | ||
320 | # define __EXTERN_INLINE extern inline | ||
321 | # define __IO_EXTERN_INLINE | ||
322 | #endif | ||
323 | |||
324 | /* | ||
325 | * I/O functions. All access through linear space. | ||
326 | */ | ||
327 | |||
328 | /* | ||
329 | * Memory functions. All accesses through linear space. | ||
330 | */ | ||
331 | |||
332 | #define vucp volatile unsigned char __force * | ||
333 | #define vusp volatile unsigned short __force * | ||
334 | |||
335 | extern unsigned int marvel_ioread8(void __iomem *); | ||
336 | extern void marvel_iowrite8(u8 b, void __iomem *); | ||
337 | |||
338 | __EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr) | ||
339 | { | ||
340 | return __kernel_ldwu(*(vusp)addr); | ||
341 | } | ||
342 | |||
343 | __EXTERN_INLINE void marvel_iowrite16(u16 b, void __iomem *addr) | ||
344 | { | ||
345 | __kernel_stw(b, *(vusp)addr); | ||
346 | } | ||
347 | |||
348 | extern void __iomem *marvel_ioremap(unsigned long addr, unsigned long size); | ||
349 | extern void marvel_iounmap(volatile void __iomem *addr); | ||
350 | extern void __iomem *marvel_ioportmap (unsigned long addr); | ||
351 | |||
352 | __EXTERN_INLINE int marvel_is_ioaddr(unsigned long addr) | ||
353 | { | ||
354 | return (addr >> 40) & 1; | ||
355 | } | ||
356 | |||
357 | extern int marvel_is_mmio(const volatile void __iomem *); | ||
358 | |||
359 | #undef vucp | ||
360 | #undef vusp | ||
361 | |||
362 | #undef __IO_PREFIX | ||
363 | #define __IO_PREFIX marvel | ||
364 | #define marvel_trivial_rw_bw 1 | ||
365 | #define marvel_trivial_rw_lq 1 | ||
366 | #define marvel_trivial_io_bw 0 | ||
367 | #define marvel_trivial_io_lq 1 | ||
368 | #define marvel_trivial_iounmap 0 | ||
369 | #include <asm/io_trivial.h> | ||
370 | |||
371 | #ifdef __IO_EXTERN_INLINE | ||
372 | # undef __EXTERN_INLINE | ||
373 | # undef __IO_EXTERN_INLINE | ||
374 | #endif | ||
375 | |||
376 | #endif /* __KERNEL__ */ | ||
377 | |||
378 | #endif /* __ALPHA_MARVEL__H__ */ | ||
diff --git a/arch/alpha/include/asm/core_mcpcia.h b/arch/alpha/include/asm/core_mcpcia.h new file mode 100644 index 000000000000..acf55b483472 --- /dev/null +++ b/arch/alpha/include/asm/core_mcpcia.h | |||
@@ -0,0 +1,381 @@ | |||
1 | #ifndef __ALPHA_MCPCIA__H__ | ||
2 | #define __ALPHA_MCPCIA__H__ | ||
3 | |||
4 | /* Define to experiment with fitting everything into one 128MB HAE window. | ||
5 | One window per bus, that is. */ | ||
6 | #define MCPCIA_ONE_HAE_WINDOW 1 | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/pci.h> | ||
10 | #include <asm/compiler.h> | ||
11 | |||
12 | /* | ||
13 | * MCPCIA is the internal name for a core logic chipset which provides | ||
14 | * PCI access for the RAWHIDE family of systems. | ||
15 | * | ||
16 | * This file is based on: | ||
17 | * | ||
18 | * RAWHIDE System Programmer's Manual | ||
19 | * 16-May-96 | ||
20 | * Rev. 1.4 | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | /*------------------------------------------------------------------------** | ||
25 | ** ** | ||
26 | ** I/O procedures ** | ||
27 | ** ** | ||
28 | ** inport[b|w|t|l], outport[b|w|t|l] 8:16:24:32 IO xfers ** | ||
29 | ** inportbxt: 8 bits only ** | ||
30 | ** inport: alias of inportw ** | ||
31 | ** outport: alias of outportw ** | ||
32 | ** ** | ||
33 | ** inmem[b|w|t|l], outmem[b|w|t|l] 8:16:24:32 ISA memory xfers ** | ||
34 | ** inmembxt: 8 bits only ** | ||
35 | ** inmem: alias of inmemw ** | ||
36 | ** outmem: alias of outmemw ** | ||
37 | ** ** | ||
38 | **------------------------------------------------------------------------*/ | ||
39 | |||
40 | |||
41 | /* MCPCIA ADDRESS BIT DEFINITIONS | ||
42 | * | ||
43 | * 3333 3333 3322 2222 2222 1111 1111 11 | ||
44 | * 9876 5432 1098 7654 3210 9876 5432 1098 7654 3210 | ||
45 | * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- | ||
46 | * 1 000 | ||
47 | * ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- | ||
48 | * | |\| | ||
49 | * | Byte Enable --+ | | ||
50 | * | Transfer Length --+ | ||
51 | * +-- IO space, not cached | ||
52 | * | ||
53 | * Byte Transfer | ||
54 | * Enable Length Transfer Byte Address | ||
55 | * adr<6:5> adr<4:3> Length Enable Adder | ||
56 | * --------------------------------------------- | ||
57 | * 00 00 Byte 1110 0x000 | ||
58 | * 01 00 Byte 1101 0x020 | ||
59 | * 10 00 Byte 1011 0x040 | ||
60 | * 11 00 Byte 0111 0x060 | ||
61 | * | ||
62 | * 00 01 Word 1100 0x008 | ||
63 | * 01 01 Word 1001 0x028 <= Not supported in this code. | ||
64 | * 10 01 Word 0011 0x048 | ||
65 | * | ||
66 | * 00 10 Tribyte 1000 0x010 | ||
67 | * 01 10 Tribyte 0001 0x030 | ||
68 | * | ||
69 | * 10 11 Longword 0000 0x058 | ||
70 | * | ||
71 | * Note that byte enables are asserted low. | ||
72 | * | ||
73 | */ | ||
74 | |||
75 | #define MCPCIA_MAX_HOSES 4 | ||
76 | |||
77 | #define MCPCIA_MID(m) ((unsigned long)(m) << 33) | ||
78 | |||
79 | /* Dodge has PCI0 and PCI1 at MID 4 and 5 respectively. | ||
80 | Durango adds PCI2 and PCI3 at MID 6 and 7 respectively. */ | ||
81 | #define MCPCIA_HOSE2MID(h) ((h) + 4) | ||
82 | |||
83 | #define MCPCIA_MEM_MASK 0x07ffffff /* SPARSE Mem region mask is 27 bits */ | ||
84 | |||
85 | /* | ||
86 | * Memory spaces: | ||
87 | */ | ||
88 | #define MCPCIA_SPARSE(m) (IDENT_ADDR + 0xf000000000UL + MCPCIA_MID(m)) | ||
89 | #define MCPCIA_DENSE(m) (IDENT_ADDR + 0xf100000000UL + MCPCIA_MID(m)) | ||
90 | #define MCPCIA_IO(m) (IDENT_ADDR + 0xf180000000UL + MCPCIA_MID(m)) | ||
91 | #define MCPCIA_CONF(m) (IDENT_ADDR + 0xf1c0000000UL + MCPCIA_MID(m)) | ||
92 | #define MCPCIA_CSR(m) (IDENT_ADDR + 0xf1e0000000UL + MCPCIA_MID(m)) | ||
93 | #define MCPCIA_IO_IACK(m) (IDENT_ADDR + 0xf1f0000000UL + MCPCIA_MID(m)) | ||
94 | #define MCPCIA_DENSE_IO(m) (IDENT_ADDR + 0xe1fc000000UL + MCPCIA_MID(m)) | ||
95 | #define MCPCIA_DENSE_CONF(m) (IDENT_ADDR + 0xe1fe000000UL + MCPCIA_MID(m)) | ||
96 | |||
97 | /* | ||
98 | * General Registers | ||
99 | */ | ||
100 | #define MCPCIA_REV(m) (MCPCIA_CSR(m) + 0x000) | ||
101 | #define MCPCIA_WHOAMI(m) (MCPCIA_CSR(m) + 0x040) | ||
102 | #define MCPCIA_PCI_LAT(m) (MCPCIA_CSR(m) + 0x080) | ||
103 | #define MCPCIA_CAP_CTRL(m) (MCPCIA_CSR(m) + 0x100) | ||
104 | #define MCPCIA_HAE_MEM(m) (MCPCIA_CSR(m) + 0x400) | ||
105 | #define MCPCIA_HAE_IO(m) (MCPCIA_CSR(m) + 0x440) | ||
106 | #define _MCPCIA_IACK_SC(m) (MCPCIA_CSR(m) + 0x480) | ||
107 | #define MCPCIA_HAE_DENSE(m) (MCPCIA_CSR(m) + 0x4C0) | ||
108 | |||
109 | /* | ||
110 | * Interrupt Control registers | ||
111 | */ | ||
112 | #define MCPCIA_INT_CTL(m) (MCPCIA_CSR(m) + 0x500) | ||
113 | #define MCPCIA_INT_REQ(m) (MCPCIA_CSR(m) + 0x540) | ||
114 | #define MCPCIA_INT_TARG(m) (MCPCIA_CSR(m) + 0x580) | ||
115 | #define MCPCIA_INT_ADR(m) (MCPCIA_CSR(m) + 0x5C0) | ||
116 | #define MCPCIA_INT_ADR_EXT(m) (MCPCIA_CSR(m) + 0x600) | ||
117 | #define MCPCIA_INT_MASK0(m) (MCPCIA_CSR(m) + 0x640) | ||
118 | #define MCPCIA_INT_MASK1(m) (MCPCIA_CSR(m) + 0x680) | ||
119 | #define MCPCIA_INT_ACK0(m) (MCPCIA_CSR(m) + 0x10003f00) | ||
120 | #define MCPCIA_INT_ACK1(m) (MCPCIA_CSR(m) + 0x10003f40) | ||
121 | |||
122 | /* | ||
123 | * Performance Monitor registers | ||
124 | */ | ||
125 | #define MCPCIA_PERF_MON(m) (MCPCIA_CSR(m) + 0x300) | ||
126 | #define MCPCIA_PERF_CONT(m) (MCPCIA_CSR(m) + 0x340) | ||
127 | |||
128 | /* | ||
129 | * Diagnostic Registers | ||
130 | */ | ||
131 | #define MCPCIA_CAP_DIAG(m) (MCPCIA_CSR(m) + 0x700) | ||
132 | #define MCPCIA_TOP_OF_MEM(m) (MCPCIA_CSR(m) + 0x7C0) | ||
133 | |||
134 | /* | ||
135 | * Error registers | ||
136 | */ | ||
137 | #define MCPCIA_MC_ERR0(m) (MCPCIA_CSR(m) + 0x800) | ||
138 | #define MCPCIA_MC_ERR1(m) (MCPCIA_CSR(m) + 0x840) | ||
139 | #define MCPCIA_CAP_ERR(m) (MCPCIA_CSR(m) + 0x880) | ||
140 | #define MCPCIA_PCI_ERR1(m) (MCPCIA_CSR(m) + 0x1040) | ||
141 | #define MCPCIA_MDPA_STAT(m) (MCPCIA_CSR(m) + 0x4000) | ||
142 | #define MCPCIA_MDPA_SYN(m) (MCPCIA_CSR(m) + 0x4040) | ||
143 | #define MCPCIA_MDPA_DIAG(m) (MCPCIA_CSR(m) + 0x4080) | ||
144 | #define MCPCIA_MDPB_STAT(m) (MCPCIA_CSR(m) + 0x8000) | ||
145 | #define MCPCIA_MDPB_SYN(m) (MCPCIA_CSR(m) + 0x8040) | ||
146 | #define MCPCIA_MDPB_DIAG(m) (MCPCIA_CSR(m) + 0x8080) | ||
147 | |||
148 | /* | ||
149 | * PCI Address Translation Registers. | ||
150 | */ | ||
151 | #define MCPCIA_SG_TBIA(m) (MCPCIA_CSR(m) + 0x1300) | ||
152 | #define MCPCIA_HBASE(m) (MCPCIA_CSR(m) + 0x1340) | ||
153 | |||
154 | #define MCPCIA_W0_BASE(m) (MCPCIA_CSR(m) + 0x1400) | ||
155 | #define MCPCIA_W0_MASK(m) (MCPCIA_CSR(m) + 0x1440) | ||
156 | #define MCPCIA_T0_BASE(m) (MCPCIA_CSR(m) + 0x1480) | ||
157 | |||
158 | #define MCPCIA_W1_BASE(m) (MCPCIA_CSR(m) + 0x1500) | ||
159 | #define MCPCIA_W1_MASK(m) (MCPCIA_CSR(m) + 0x1540) | ||
160 | #define MCPCIA_T1_BASE(m) (MCPCIA_CSR(m) + 0x1580) | ||
161 | |||
162 | #define MCPCIA_W2_BASE(m) (MCPCIA_CSR(m) + 0x1600) | ||
163 | #define MCPCIA_W2_MASK(m) (MCPCIA_CSR(m) + 0x1640) | ||
164 | #define MCPCIA_T2_BASE(m) (MCPCIA_CSR(m) + 0x1680) | ||
165 | |||
166 | #define MCPCIA_W3_BASE(m) (MCPCIA_CSR(m) + 0x1700) | ||
167 | #define MCPCIA_W3_MASK(m) (MCPCIA_CSR(m) + 0x1740) | ||
168 | #define MCPCIA_T3_BASE(m) (MCPCIA_CSR(m) + 0x1780) | ||
169 | |||
170 | /* Hack! Only words for bus 0. */ | ||
171 | |||
172 | #ifndef MCPCIA_ONE_HAE_WINDOW | ||
173 | #define MCPCIA_HAE_ADDRESS MCPCIA_HAE_MEM(4) | ||
174 | #endif | ||
175 | #define MCPCIA_IACK_SC _MCPCIA_IACK_SC(4) | ||
176 | |||
177 | /* | ||
178 | * The canonical non-remaped I/O and MEM addresses have these values | ||
179 | * subtracted out. This is arranged so that folks manipulating ISA | ||
180 | * devices can use their familiar numbers and have them map to bus 0. | ||
181 | */ | ||
182 | |||
183 | #define MCPCIA_IO_BIAS MCPCIA_IO(4) | ||
184 | #define MCPCIA_MEM_BIAS MCPCIA_DENSE(4) | ||
185 | |||
186 | /* Offset between ram physical addresses and pci64 DAC bus addresses. */ | ||
187 | #define MCPCIA_DAC_OFFSET (1UL << 40) | ||
188 | |||
189 | /* | ||
190 | * Data structure for handling MCPCIA machine checks: | ||
191 | */ | ||
192 | struct el_MCPCIA_uncorrected_frame_mcheck { | ||
193 | struct el_common header; | ||
194 | struct el_common_EV5_uncorrectable_mcheck procdata; | ||
195 | }; | ||
196 | |||
197 | |||
198 | #ifdef __KERNEL__ | ||
199 | |||
200 | #ifndef __EXTERN_INLINE | ||
201 | #define __EXTERN_INLINE extern inline | ||
202 | #define __IO_EXTERN_INLINE | ||
203 | #endif | ||
204 | |||
205 | /* | ||
206 | * I/O functions: | ||
207 | * | ||
208 | * MCPCIA, the RAWHIDE family PCI/memory support chipset for the EV5 (21164) | ||
209 | * and EV56 (21164a) processors, can use either a sparse address mapping | ||
210 | * scheme, or the so-called byte-word PCI address space, to get at PCI memory | ||
211 | * and I/O. | ||
212 | * | ||
213 | * Unfortunately, we can't use BWIO with EV5, so for now, we always use SPARSE. | ||
214 | */ | ||
215 | |||
216 | /* | ||
217 | * Memory functions. 64-bit and 32-bit accesses are done through | ||
218 | * dense memory space, everything else through sparse space. | ||
219 | * | ||
220 | * For reading and writing 8 and 16 bit quantities we need to | ||
221 | * go through one of the three sparse address mapping regions | ||
222 | * and use the HAE_MEM CSR to provide some bits of the address. | ||
223 | * The following few routines use only sparse address region 1 | ||
224 | * which gives 1Gbyte of accessible space which relates exactly | ||
225 | * to the amount of PCI memory mapping *into* system address space. | ||
226 | * See p 6-17 of the specification but it looks something like this: | ||
227 | * | ||
228 | * 21164 Address: | ||
229 | * | ||
230 | * 3 2 1 | ||
231 | * 9876543210987654321098765432109876543210 | ||
232 | * 1ZZZZ0.PCI.QW.Address............BBLL | ||
233 | * | ||
234 | * ZZ = SBZ | ||
235 | * BB = Byte offset | ||
236 | * LL = Transfer length | ||
237 | * | ||
238 | * PCI Address: | ||
239 | * | ||
240 | * 3 2 1 | ||
241 | * 10987654321098765432109876543210 | ||
242 | * HHH....PCI.QW.Address........ 00 | ||
243 | * | ||
244 | * HHH = 31:29 HAE_MEM CSR | ||
245 | * | ||
246 | */ | ||
247 | |||
248 | #define vip volatile int __force * | ||
249 | #define vuip volatile unsigned int __force * | ||
250 | |||
251 | #ifdef MCPCIA_ONE_HAE_WINDOW | ||
252 | #define MCPCIA_FROB_MMIO \ | ||
253 | if (__mcpcia_is_mmio(hose)) { \ | ||
254 | set_hae(hose & 0xffffffff); \ | ||
255 | hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \ | ||
256 | } | ||
257 | #else | ||
258 | #define MCPCIA_FROB_MMIO \ | ||
259 | if (__mcpcia_is_mmio(hose)) { \ | ||
260 | hose = hose - MCPCIA_DENSE(4) + MCPCIA_SPARSE(4); \ | ||
261 | } | ||
262 | #endif | ||
263 | |||
264 | extern inline int __mcpcia_is_mmio(unsigned long addr) | ||
265 | { | ||
266 | return (addr & 0x80000000UL) == 0; | ||
267 | } | ||
268 | |||
269 | __EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr) | ||
270 | { | ||
271 | unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; | ||
272 | unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; | ||
273 | unsigned long result; | ||
274 | |||
275 | MCPCIA_FROB_MMIO; | ||
276 | |||
277 | result = *(vip) ((addr << 5) + hose + 0x00); | ||
278 | return __kernel_extbl(result, addr & 3); | ||
279 | } | ||
280 | |||
281 | __EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr) | ||
282 | { | ||
283 | unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; | ||
284 | unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; | ||
285 | unsigned long w; | ||
286 | |||
287 | MCPCIA_FROB_MMIO; | ||
288 | |||
289 | w = __kernel_insbl(b, addr & 3); | ||
290 | *(vuip) ((addr << 5) + hose + 0x00) = w; | ||
291 | } | ||
292 | |||
293 | __EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr) | ||
294 | { | ||
295 | unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; | ||
296 | unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; | ||
297 | unsigned long result; | ||
298 | |||
299 | MCPCIA_FROB_MMIO; | ||
300 | |||
301 | result = *(vip) ((addr << 5) + hose + 0x08); | ||
302 | return __kernel_extwl(result, addr & 3); | ||
303 | } | ||
304 | |||
305 | __EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr) | ||
306 | { | ||
307 | unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK; | ||
308 | unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK; | ||
309 | unsigned long w; | ||
310 | |||
311 | MCPCIA_FROB_MMIO; | ||
312 | |||
313 | w = __kernel_inswl(b, addr & 3); | ||
314 | *(vuip) ((addr << 5) + hose + 0x08) = w; | ||
315 | } | ||
316 | |||
317 | __EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr) | ||
318 | { | ||
319 | unsigned long addr = (unsigned long)xaddr; | ||
320 | |||
321 | if (!__mcpcia_is_mmio(addr)) | ||
322 | addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; | ||
323 | |||
324 | return *(vuip)addr; | ||
325 | } | ||
326 | |||
327 | __EXTERN_INLINE void mcpcia_iowrite32(u32 b, void __iomem *xaddr) | ||
328 | { | ||
329 | unsigned long addr = (unsigned long)xaddr; | ||
330 | |||
331 | if (!__mcpcia_is_mmio(addr)) | ||
332 | addr = ((addr & 0xffff) << 5) + (addr & ~0xfffful) + 0x18; | ||
333 | |||
334 | *(vuip)addr = b; | ||
335 | } | ||
336 | |||
337 | |||
338 | __EXTERN_INLINE void __iomem *mcpcia_ioportmap(unsigned long addr) | ||
339 | { | ||
340 | return (void __iomem *)(addr + MCPCIA_IO_BIAS); | ||
341 | } | ||
342 | |||
343 | __EXTERN_INLINE void __iomem *mcpcia_ioremap(unsigned long addr, | ||
344 | unsigned long size) | ||
345 | { | ||
346 | return (void __iomem *)(addr + MCPCIA_MEM_BIAS); | ||
347 | } | ||
348 | |||
349 | __EXTERN_INLINE int mcpcia_is_ioaddr(unsigned long addr) | ||
350 | { | ||
351 | return addr >= MCPCIA_SPARSE(0); | ||
352 | } | ||
353 | |||
354 | __EXTERN_INLINE int mcpcia_is_mmio(const volatile void __iomem *xaddr) | ||
355 | { | ||
356 | unsigned long addr = (unsigned long) xaddr; | ||
357 | return __mcpcia_is_mmio(addr); | ||
358 | } | ||
359 | |||
360 | #undef MCPCIA_FROB_MMIO | ||
361 | |||
362 | #undef vip | ||
363 | #undef vuip | ||
364 | |||
365 | #undef __IO_PREFIX | ||
366 | #define __IO_PREFIX mcpcia | ||
367 | #define mcpcia_trivial_rw_bw 2 | ||
368 | #define mcpcia_trivial_rw_lq 1 | ||
369 | #define mcpcia_trivial_io_bw 0 | ||
370 | #define mcpcia_trivial_io_lq 0 | ||
371 | #define mcpcia_trivial_iounmap 1 | ||
372 | #include <asm/io_trivial.h> | ||
373 | |||
374 | #ifdef __IO_EXTERN_INLINE | ||
375 | #undef __EXTERN_INLINE | ||
376 | #undef __IO_EXTERN_INLINE | ||
377 | #endif | ||
378 | |||
379 | #endif /* __KERNEL__ */ | ||
380 | |||
381 | #endif /* __ALPHA_MCPCIA__H__ */ | ||
diff --git a/arch/alpha/include/asm/core_polaris.h b/arch/alpha/include/asm/core_polaris.h new file mode 100644 index 000000000000..2f966b64659d --- /dev/null +++ b/arch/alpha/include/asm/core_polaris.h | |||
@@ -0,0 +1,110 @@ | |||
1 | #ifndef __ALPHA_POLARIS__H__ | ||
2 | #define __ALPHA_POLARIS__H__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/compiler.h> | ||
6 | |||
7 | /* | ||
8 | * POLARIS is the internal name for a core logic chipset which provides | ||
9 | * memory controller and PCI access for the 21164PC chip based systems. | ||
10 | * | ||
11 | * This file is based on: | ||
12 | * | ||
13 | * Polaris System Controller | ||
14 | * Device Functional Specification | ||
15 | * 22-Jan-98 | ||
16 | * Rev. 4.2 | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | /* Polaris memory regions */ | ||
21 | #define POLARIS_SPARSE_MEM_BASE (IDENT_ADDR + 0xf800000000UL) | ||
22 | #define POLARIS_DENSE_MEM_BASE (IDENT_ADDR + 0xf900000000UL) | ||
23 | #define POLARIS_SPARSE_IO_BASE (IDENT_ADDR + 0xf980000000UL) | ||
24 | #define POLARIS_SPARSE_CONFIG_BASE (IDENT_ADDR + 0xf9c0000000UL) | ||
25 | #define POLARIS_IACK_BASE (IDENT_ADDR + 0xf9f8000000UL) | ||
26 | #define POLARIS_DENSE_IO_BASE (IDENT_ADDR + 0xf9fc000000UL) | ||
27 | #define POLARIS_DENSE_CONFIG_BASE (IDENT_ADDR + 0xf9fe000000UL) | ||
28 | |||
29 | #define POLARIS_IACK_SC POLARIS_IACK_BASE | ||
30 | |||
31 | /* The Polaris command/status registers live in PCI Config space for | ||
32 | * bus 0/device 0. As such, they may be bytes, words, or doublewords. | ||
33 | */ | ||
34 | #define POLARIS_W_VENID (POLARIS_DENSE_CONFIG_BASE) | ||
35 | #define POLARIS_W_DEVID (POLARIS_DENSE_CONFIG_BASE+2) | ||
36 | #define POLARIS_W_CMD (POLARIS_DENSE_CONFIG_BASE+4) | ||
37 | #define POLARIS_W_STATUS (POLARIS_DENSE_CONFIG_BASE+6) | ||
38 | |||
39 | /* | ||
40 | * Data structure for handling POLARIS machine checks: | ||
41 | */ | ||
42 | struct el_POLARIS_sysdata_mcheck { | ||
43 | u_long psc_status; | ||
44 | u_long psc_pcictl0; | ||
45 | u_long psc_pcictl1; | ||
46 | u_long psc_pcictl2; | ||
47 | }; | ||
48 | |||
49 | #ifdef __KERNEL__ | ||
50 | |||
51 | #ifndef __EXTERN_INLINE | ||
52 | #define __EXTERN_INLINE extern inline | ||
53 | #define __IO_EXTERN_INLINE | ||
54 | #endif | ||
55 | |||
56 | /* | ||
57 | * I/O functions: | ||
58 | * | ||
59 | * POLARIS, the PCI/memory support chipset for the PCA56 (21164PC) | ||
60 | * processors, can use either a sparse address mapping scheme, or the | ||
61 | * so-called byte-word PCI address space, to get at PCI memory and I/O. | ||
62 | * | ||
63 | * However, we will support only the BWX form. | ||
64 | */ | ||
65 | |||
66 | /* | ||
67 | * Memory functions. Polaris allows all accesses (byte/word | ||
68 | * as well as long/quad) to be done through dense space. | ||
69 | * | ||
70 | * We will only support DENSE access via BWX insns. | ||
71 | */ | ||
72 | |||
73 | __EXTERN_INLINE void __iomem *polaris_ioportmap(unsigned long addr) | ||
74 | { | ||
75 | return (void __iomem *)(addr + POLARIS_DENSE_IO_BASE); | ||
76 | } | ||
77 | |||
78 | __EXTERN_INLINE void __iomem *polaris_ioremap(unsigned long addr, | ||
79 | unsigned long size) | ||
80 | { | ||
81 | return (void __iomem *)(addr + POLARIS_DENSE_MEM_BASE); | ||
82 | } | ||
83 | |||
84 | __EXTERN_INLINE int polaris_is_ioaddr(unsigned long addr) | ||
85 | { | ||
86 | return addr >= POLARIS_SPARSE_MEM_BASE; | ||
87 | } | ||
88 | |||
89 | __EXTERN_INLINE int polaris_is_mmio(const volatile void __iomem *addr) | ||
90 | { | ||
91 | return (unsigned long)addr < POLARIS_SPARSE_IO_BASE; | ||
92 | } | ||
93 | |||
94 | #undef __IO_PREFIX | ||
95 | #define __IO_PREFIX polaris | ||
96 | #define polaris_trivial_rw_bw 1 | ||
97 | #define polaris_trivial_rw_lq 1 | ||
98 | #define polaris_trivial_io_bw 1 | ||
99 | #define polaris_trivial_io_lq 1 | ||
100 | #define polaris_trivial_iounmap 1 | ||
101 | #include <asm/io_trivial.h> | ||
102 | |||
103 | #ifdef __IO_EXTERN_INLINE | ||
104 | #undef __EXTERN_INLINE | ||
105 | #undef __IO_EXTERN_INLINE | ||
106 | #endif | ||
107 | |||
108 | #endif /* __KERNEL__ */ | ||
109 | |||
110 | #endif /* __ALPHA_POLARIS__H__ */ | ||
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h new file mode 100644 index 000000000000..46bfff58f670 --- /dev/null +++ b/arch/alpha/include/asm/core_t2.h | |||
@@ -0,0 +1,633 @@ | |||
1 | #ifndef __ALPHA_T2__H__ | ||
2 | #define __ALPHA_T2__H__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/spinlock.h> | ||
6 | #include <asm/compiler.h> | ||
7 | #include <asm/system.h> | ||
8 | |||
9 | /* | ||
10 | * T2 is the internal name for the core logic chipset which provides | ||
11 | * memory controller and PCI access for the SABLE-based systems. | ||
12 | * | ||
13 | * This file is based on: | ||
14 | * | ||
15 | * SABLE I/O Specification | ||
16 | * Revision/Update Information: 1.3 | ||
17 | * | ||
18 | * jestabro@amt.tay1.dec.com Initial Version. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */ | ||
23 | |||
24 | /* GAMMA-SABLE is a SABLE with EV5-based CPUs */ | ||
25 | /* All LYNX machines, EV4 or EV5, use the GAMMA bias also */ | ||
26 | #define _GAMMA_BIAS 0x8000000000UL | ||
27 | |||
28 | #if defined(CONFIG_ALPHA_GENERIC) | ||
29 | #define GAMMA_BIAS alpha_mv.sys.t2.gamma_bias | ||
30 | #elif defined(CONFIG_ALPHA_GAMMA) | ||
31 | #define GAMMA_BIAS _GAMMA_BIAS | ||
32 | #else | ||
33 | #define GAMMA_BIAS 0 | ||
34 | #endif | ||
35 | |||
36 | /* | ||
37 | * Memory spaces: | ||
38 | */ | ||
39 | #define T2_CONF (IDENT_ADDR + GAMMA_BIAS + 0x390000000UL) | ||
40 | #define T2_IO (IDENT_ADDR + GAMMA_BIAS + 0x3a0000000UL) | ||
41 | #define T2_SPARSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x200000000UL) | ||
42 | #define T2_DENSE_MEM (IDENT_ADDR + GAMMA_BIAS + 0x3c0000000UL) | ||
43 | |||
44 | #define T2_IOCSR (IDENT_ADDR + GAMMA_BIAS + 0x38e000000UL) | ||
45 | #define T2_CERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000020UL) | ||
46 | #define T2_CERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000040UL) | ||
47 | #define T2_CERR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000060UL) | ||
48 | #define T2_PERR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000080UL) | ||
49 | #define T2_PERR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000a0UL) | ||
50 | #define T2_PSCR (IDENT_ADDR + GAMMA_BIAS + 0x38e0000c0UL) | ||
51 | #define T2_HAE_1 (IDENT_ADDR + GAMMA_BIAS + 0x38e0000e0UL) | ||
52 | #define T2_HAE_2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000100UL) | ||
53 | #define T2_HBASE (IDENT_ADDR + GAMMA_BIAS + 0x38e000120UL) | ||
54 | #define T2_WBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000140UL) | ||
55 | #define T2_WMASK1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000160UL) | ||
56 | #define T2_TBASE1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000180UL) | ||
57 | #define T2_WBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001a0UL) | ||
58 | #define T2_WMASK2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001c0UL) | ||
59 | #define T2_TBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001e0UL) | ||
60 | #define T2_TLBBR (IDENT_ADDR + GAMMA_BIAS + 0x38e000200UL) | ||
61 | #define T2_IVR (IDENT_ADDR + GAMMA_BIAS + 0x38e000220UL) | ||
62 | #define T2_HAE_3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL) | ||
63 | #define T2_HAE_4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL) | ||
64 | |||
65 | /* The CSRs below are T3/T4 only */ | ||
66 | #define T2_WBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000280UL) | ||
67 | #define T2_WMASK3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002a0UL) | ||
68 | #define T2_TBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002c0UL) | ||
69 | |||
70 | #define T2_TDR0 (IDENT_ADDR + GAMMA_BIAS + 0x38e000300UL) | ||
71 | #define T2_TDR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000320UL) | ||
72 | #define T2_TDR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000340UL) | ||
73 | #define T2_TDR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000360UL) | ||
74 | #define T2_TDR4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000380UL) | ||
75 | #define T2_TDR5 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003a0UL) | ||
76 | #define T2_TDR6 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003c0UL) | ||
77 | #define T2_TDR7 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003e0UL) | ||
78 | |||
79 | #define T2_WBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000400UL) | ||
80 | #define T2_WMASK4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000420UL) | ||
81 | #define T2_TBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000440UL) | ||
82 | |||
83 | #define T2_AIR (IDENT_ADDR + GAMMA_BIAS + 0x38e000460UL) | ||
84 | #define T2_VAR (IDENT_ADDR + GAMMA_BIAS + 0x38e000480UL) | ||
85 | #define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL) | ||
86 | #define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL) | ||
87 | |||
88 | #define T2_HAE_ADDRESS T2_HAE_1 | ||
89 | |||
90 | /* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to | ||
91 | 3.8fff.ffff | ||
92 | * | ||
93 | * +--------------+ 3 8000 0000 | ||
94 | * | CPU 0 CSRs | | ||
95 | * +--------------+ 3 8100 0000 | ||
96 | * | CPU 1 CSRs | | ||
97 | * +--------------+ 3 8200 0000 | ||
98 | * | CPU 2 CSRs | | ||
99 | * +--------------+ 3 8300 0000 | ||
100 | * | CPU 3 CSRs | | ||
101 | * +--------------+ 3 8400 0000 | ||
102 | * | CPU Reserved | | ||
103 | * +--------------+ 3 8700 0000 | ||
104 | * | Mem Reserved | | ||
105 | * +--------------+ 3 8800 0000 | ||
106 | * | Mem 0 CSRs | | ||
107 | * +--------------+ 3 8900 0000 | ||
108 | * | Mem 1 CSRs | | ||
109 | * +--------------+ 3 8a00 0000 | ||
110 | * | Mem 2 CSRs | | ||
111 | * +--------------+ 3 8b00 0000 | ||
112 | * | Mem 3 CSRs | | ||
113 | * +--------------+ 3 8c00 0000 | ||
114 | * | Mem Reserved | | ||
115 | * +--------------+ 3 8e00 0000 | ||
116 | * | PCI Bridge | | ||
117 | * +--------------+ 3 8f00 0000 | ||
118 | * | Expansion IO | | ||
119 | * +--------------+ 3 9000 0000 | ||
120 | * | ||
121 | * | ||
122 | */ | ||
123 | #define T2_CPU0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x380000000L) | ||
124 | #define T2_CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L) | ||
125 | #define T2_CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L) | ||
126 | #define T2_CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L) | ||
127 | |||
128 | #define T2_CPUn_BASE(n) (T2_CPU0_BASE + (((n)&3) * 0x001000000L)) | ||
129 | |||
130 | #define T2_MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L) | ||
131 | #define T2_MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L) | ||
132 | #define T2_MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L) | ||
133 | #define T2_MEM3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38b000000L) | ||
134 | |||
135 | |||
136 | /* | ||
137 | * Sable CPU Module CSRS | ||
138 | * | ||
139 | * These are CSRs for hardware other than the CPU chip on the CPU module. | ||
140 | * The CPU module has Backup Cache control logic, Cbus control logic, and | ||
141 | * interrupt control logic on it. There is a duplicate tag store to speed | ||
142 | * up maintaining cache coherency. | ||
143 | */ | ||
144 | |||
145 | struct sable_cpu_csr { | ||
146 | unsigned long bcc; long fill_00[3]; /* Backup Cache Control */ | ||
147 | unsigned long bcce; long fill_01[3]; /* Backup Cache Correctable Error */ | ||
148 | unsigned long bccea; long fill_02[3]; /* B-Cache Corr Err Address Latch */ | ||
149 | unsigned long bcue; long fill_03[3]; /* B-Cache Uncorrectable Error */ | ||
150 | unsigned long bcuea; long fill_04[3]; /* B-Cache Uncorr Err Addr Latch */ | ||
151 | unsigned long dter; long fill_05[3]; /* Duplicate Tag Error */ | ||
152 | unsigned long cbctl; long fill_06[3]; /* CBus Control */ | ||
153 | unsigned long cbe; long fill_07[3]; /* CBus Error */ | ||
154 | unsigned long cbeal; long fill_08[3]; /* CBus Error Addr Latch low */ | ||
155 | unsigned long cbeah; long fill_09[3]; /* CBus Error Addr Latch high */ | ||
156 | unsigned long pmbx; long fill_10[3]; /* Processor Mailbox */ | ||
157 | unsigned long ipir; long fill_11[3]; /* Inter-Processor Int Request */ | ||
158 | unsigned long sic; long fill_12[3]; /* System Interrupt Clear */ | ||
159 | unsigned long adlk; long fill_13[3]; /* Address Lock (LDxL/STxC) */ | ||
160 | unsigned long madrl; long fill_14[3]; /* CBus Miss Address */ | ||
161 | unsigned long rev; long fill_15[3]; /* CMIC Revision */ | ||
162 | }; | ||
163 | |||
164 | /* | ||
165 | * Data structure for handling T2 machine checks: | ||
166 | */ | ||
167 | struct el_t2_frame_header { | ||
168 | unsigned int elcf_fid; /* Frame ID (from above) */ | ||
169 | unsigned int elcf_size; /* Size of frame in bytes */ | ||
170 | }; | ||
171 | |||
172 | struct el_t2_procdata_mcheck { | ||
173 | unsigned long elfmc_paltemp[32]; /* PAL TEMP REGS. */ | ||
174 | /* EV4-specific fields */ | ||
175 | unsigned long elfmc_exc_addr; /* Addr of excepting insn. */ | ||
176 | unsigned long elfmc_exc_sum; /* Summary of arith traps. */ | ||
177 | unsigned long elfmc_exc_mask; /* Exception mask (from exc_sum). */ | ||
178 | unsigned long elfmc_iccsr; /* IBox hardware enables. */ | ||
179 | unsigned long elfmc_pal_base; /* Base address for PALcode. */ | ||
180 | unsigned long elfmc_hier; /* Hardware Interrupt Enable. */ | ||
181 | unsigned long elfmc_hirr; /* Hardware Interrupt Request. */ | ||
182 | unsigned long elfmc_mm_csr; /* D-stream fault info. */ | ||
183 | unsigned long elfmc_dc_stat; /* D-cache status (ECC/Parity Err). */ | ||
184 | unsigned long elfmc_dc_addr; /* EV3 Phys Addr for ECC/DPERR. */ | ||
185 | unsigned long elfmc_abox_ctl; /* ABox Control Register. */ | ||
186 | unsigned long elfmc_biu_stat; /* BIU Status. */ | ||
187 | unsigned long elfmc_biu_addr; /* BUI Address. */ | ||
188 | unsigned long elfmc_biu_ctl; /* BIU Control. */ | ||
189 | unsigned long elfmc_fill_syndrome; /* For correcting ECC errors. */ | ||
190 | unsigned long elfmc_fill_addr;/* Cache block which was being read. */ | ||
191 | unsigned long elfmc_va; /* Effective VA of fault or miss. */ | ||
192 | unsigned long elfmc_bc_tag; /* Backup Cache Tag Probe Results. */ | ||
193 | }; | ||
194 | |||
195 | /* | ||
196 | * Sable processor specific Machine Check Data segment. | ||
197 | */ | ||
198 | |||
199 | struct el_t2_logout_header { | ||
200 | unsigned int elfl_size; /* size in bytes of logout area. */ | ||
201 | unsigned int elfl_sbz1:31; /* Should be zero. */ | ||
202 | unsigned int elfl_retry:1; /* Retry flag. */ | ||
203 | unsigned int elfl_procoffset; /* Processor-specific offset. */ | ||
204 | unsigned int elfl_sysoffset; /* Offset of system-specific. */ | ||
205 | unsigned int elfl_error_type; /* PAL error type code. */ | ||
206 | unsigned int elfl_frame_rev; /* PAL Frame revision. */ | ||
207 | }; | ||
208 | struct el_t2_sysdata_mcheck { | ||
209 | unsigned long elcmc_bcc; /* CSR 0 */ | ||
210 | unsigned long elcmc_bcce; /* CSR 1 */ | ||
211 | unsigned long elcmc_bccea; /* CSR 2 */ | ||
212 | unsigned long elcmc_bcue; /* CSR 3 */ | ||
213 | unsigned long elcmc_bcuea; /* CSR 4 */ | ||
214 | unsigned long elcmc_dter; /* CSR 5 */ | ||
215 | unsigned long elcmc_cbctl; /* CSR 6 */ | ||
216 | unsigned long elcmc_cbe; /* CSR 7 */ | ||
217 | unsigned long elcmc_cbeal; /* CSR 8 */ | ||
218 | unsigned long elcmc_cbeah; /* CSR 9 */ | ||
219 | unsigned long elcmc_pmbx; /* CSR 10 */ | ||
220 | unsigned long elcmc_ipir; /* CSR 11 */ | ||
221 | unsigned long elcmc_sic; /* CSR 12 */ | ||
222 | unsigned long elcmc_adlk; /* CSR 13 */ | ||
223 | unsigned long elcmc_madrl; /* CSR 14 */ | ||
224 | unsigned long elcmc_crrev4; /* CSR 15 */ | ||
225 | }; | ||
226 | |||
227 | /* | ||
228 | * Sable memory error frame - sable pfms section 3.42 | ||
229 | */ | ||
230 | struct el_t2_data_memory { | ||
231 | struct el_t2_frame_header elcm_hdr; /* ID$MEM-FERR = 0x08 */ | ||
232 | unsigned int elcm_module; /* Module id. */ | ||
233 | unsigned int elcm_res04; /* Reserved. */ | ||
234 | unsigned long elcm_merr; /* CSR0: Error Reg 1. */ | ||
235 | unsigned long elcm_mcmd1; /* CSR1: Command Trap 1. */ | ||
236 | unsigned long elcm_mcmd2; /* CSR2: Command Trap 2. */ | ||
237 | unsigned long elcm_mconf; /* CSR3: Configuration. */ | ||
238 | unsigned long elcm_medc1; /* CSR4: EDC Status 1. */ | ||
239 | unsigned long elcm_medc2; /* CSR5: EDC Status 2. */ | ||
240 | unsigned long elcm_medcc; /* CSR6: EDC Control. */ | ||
241 | unsigned long elcm_msctl; /* CSR7: Stream Buffer Control. */ | ||
242 | unsigned long elcm_mref; /* CSR8: Refresh Control. */ | ||
243 | unsigned long elcm_filter; /* CSR9: CRD Filter Control. */ | ||
244 | }; | ||
245 | |||
246 | |||
247 | /* | ||
248 | * Sable other CPU error frame - sable pfms section 3.43 | ||
249 | */ | ||
250 | struct el_t2_data_other_cpu { | ||
251 | short elco_cpuid; /* CPU ID */ | ||
252 | short elco_res02[3]; | ||
253 | unsigned long elco_bcc; /* CSR 0 */ | ||
254 | unsigned long elco_bcce; /* CSR 1 */ | ||
255 | unsigned long elco_bccea; /* CSR 2 */ | ||
256 | unsigned long elco_bcue; /* CSR 3 */ | ||
257 | unsigned long elco_bcuea; /* CSR 4 */ | ||
258 | unsigned long elco_dter; /* CSR 5 */ | ||
259 | unsigned long elco_cbctl; /* CSR 6 */ | ||
260 | unsigned long elco_cbe; /* CSR 7 */ | ||
261 | unsigned long elco_cbeal; /* CSR 8 */ | ||
262 | unsigned long elco_cbeah; /* CSR 9 */ | ||
263 | unsigned long elco_pmbx; /* CSR 10 */ | ||
264 | unsigned long elco_ipir; /* CSR 11 */ | ||
265 | unsigned long elco_sic; /* CSR 12 */ | ||
266 | unsigned long elco_adlk; /* CSR 13 */ | ||
267 | unsigned long elco_madrl; /* CSR 14 */ | ||
268 | unsigned long elco_crrev4; /* CSR 15 */ | ||
269 | }; | ||
270 | |||
271 | /* | ||
272 | * Sable other CPU error frame - sable pfms section 3.44 | ||
273 | */ | ||
274 | struct el_t2_data_t2{ | ||
275 | struct el_t2_frame_header elct_hdr; /* ID$T2-FRAME */ | ||
276 | unsigned long elct_iocsr; /* IO Control and Status Register */ | ||
277 | unsigned long elct_cerr1; /* Cbus Error Register 1 */ | ||
278 | unsigned long elct_cerr2; /* Cbus Error Register 2 */ | ||
279 | unsigned long elct_cerr3; /* Cbus Error Register 3 */ | ||
280 | unsigned long elct_perr1; /* PCI Error Register 1 */ | ||
281 | unsigned long elct_perr2; /* PCI Error Register 2 */ | ||
282 | unsigned long elct_hae0_1; /* High Address Extension Register 1 */ | ||
283 | unsigned long elct_hae0_2; /* High Address Extension Register 2 */ | ||
284 | unsigned long elct_hbase; /* High Base Register */ | ||
285 | unsigned long elct_wbase1; /* Window Base Register 1 */ | ||
286 | unsigned long elct_wmask1; /* Window Mask Register 1 */ | ||
287 | unsigned long elct_tbase1; /* Translated Base Register 1 */ | ||
288 | unsigned long elct_wbase2; /* Window Base Register 2 */ | ||
289 | unsigned long elct_wmask2; /* Window Mask Register 2 */ | ||
290 | unsigned long elct_tbase2; /* Translated Base Register 2 */ | ||
291 | unsigned long elct_tdr0; /* TLB Data Register 0 */ | ||
292 | unsigned long elct_tdr1; /* TLB Data Register 1 */ | ||
293 | unsigned long elct_tdr2; /* TLB Data Register 2 */ | ||
294 | unsigned long elct_tdr3; /* TLB Data Register 3 */ | ||
295 | unsigned long elct_tdr4; /* TLB Data Register 4 */ | ||
296 | unsigned long elct_tdr5; /* TLB Data Register 5 */ | ||
297 | unsigned long elct_tdr6; /* TLB Data Register 6 */ | ||
298 | unsigned long elct_tdr7; /* TLB Data Register 7 */ | ||
299 | }; | ||
300 | |||
301 | /* | ||
302 | * Sable error log data structure - sable pfms section 3.40 | ||
303 | */ | ||
304 | struct el_t2_data_corrected { | ||
305 | unsigned long elcpb_biu_stat; | ||
306 | unsigned long elcpb_biu_addr; | ||
307 | unsigned long elcpb_biu_ctl; | ||
308 | unsigned long elcpb_fill_syndrome; | ||
309 | unsigned long elcpb_fill_addr; | ||
310 | unsigned long elcpb_bc_tag; | ||
311 | }; | ||
312 | |||
313 | /* | ||
314 | * Sable error log data structure | ||
315 | * Note there are 4 memory slots on sable (see t2.h) | ||
316 | */ | ||
317 | struct el_t2_frame_mcheck { | ||
318 | struct el_t2_frame_header elfmc_header; /* ID$P-FRAME_MCHECK */ | ||
319 | struct el_t2_logout_header elfmc_hdr; | ||
320 | struct el_t2_procdata_mcheck elfmc_procdata; | ||
321 | struct el_t2_sysdata_mcheck elfmc_sysdata; | ||
322 | struct el_t2_data_t2 elfmc_t2data; | ||
323 | struct el_t2_data_memory elfmc_memdata[4]; | ||
324 | struct el_t2_frame_header elfmc_footer; /* empty */ | ||
325 | }; | ||
326 | |||
327 | |||
328 | /* | ||
329 | * Sable error log data structures on memory errors | ||
330 | */ | ||
331 | struct el_t2_frame_corrected { | ||
332 | struct el_t2_frame_header elfcc_header; /* ID$P-BC-COR */ | ||
333 | struct el_t2_logout_header elfcc_hdr; | ||
334 | struct el_t2_data_corrected elfcc_procdata; | ||
335 | /* struct el_t2_data_t2 elfcc_t2data; */ | ||
336 | /* struct el_t2_data_memory elfcc_memdata[4]; */ | ||
337 | struct el_t2_frame_header elfcc_footer; /* empty */ | ||
338 | }; | ||
339 | |||
340 | |||
341 | #ifdef __KERNEL__ | ||
342 | |||
343 | #ifndef __EXTERN_INLINE | ||
344 | #define __EXTERN_INLINE extern inline | ||
345 | #define __IO_EXTERN_INLINE | ||
346 | #endif | ||
347 | |||
348 | /* | ||
349 | * I/O functions: | ||
350 | * | ||
351 | * T2 (the core logic PCI/memory support chipset for the SABLE | ||
352 | * series of processors uses a sparse address mapping scheme to | ||
353 | * get at PCI memory and I/O. | ||
354 | */ | ||
355 | |||
356 | #define vip volatile int * | ||
357 | #define vuip volatile unsigned int * | ||
358 | |||
359 | extern inline u8 t2_inb(unsigned long addr) | ||
360 | { | ||
361 | long result = *(vip) ((addr << 5) + T2_IO + 0x00); | ||
362 | return __kernel_extbl(result, addr & 3); | ||
363 | } | ||
364 | |||
365 | extern inline void t2_outb(u8 b, unsigned long addr) | ||
366 | { | ||
367 | unsigned long w; | ||
368 | |||
369 | w = __kernel_insbl(b, addr & 3); | ||
370 | *(vuip) ((addr << 5) + T2_IO + 0x00) = w; | ||
371 | mb(); | ||
372 | } | ||
373 | |||
374 | extern inline u16 t2_inw(unsigned long addr) | ||
375 | { | ||
376 | long result = *(vip) ((addr << 5) + T2_IO + 0x08); | ||
377 | return __kernel_extwl(result, addr & 3); | ||
378 | } | ||
379 | |||
380 | extern inline void t2_outw(u16 b, unsigned long addr) | ||
381 | { | ||
382 | unsigned long w; | ||
383 | |||
384 | w = __kernel_inswl(b, addr & 3); | ||
385 | *(vuip) ((addr << 5) + T2_IO + 0x08) = w; | ||
386 | mb(); | ||
387 | } | ||
388 | |||
389 | extern inline u32 t2_inl(unsigned long addr) | ||
390 | { | ||
391 | return *(vuip) ((addr << 5) + T2_IO + 0x18); | ||
392 | } | ||
393 | |||
394 | extern inline void t2_outl(u32 b, unsigned long addr) | ||
395 | { | ||
396 | *(vuip) ((addr << 5) + T2_IO + 0x18) = b; | ||
397 | mb(); | ||
398 | } | ||
399 | |||
400 | |||
401 | /* | ||
402 | * Memory functions. | ||
403 | * | ||
404 | * For reading and writing 8 and 16 bit quantities we need to | ||
405 | * go through one of the three sparse address mapping regions | ||
406 | * and use the HAE_MEM CSR to provide some bits of the address. | ||
407 | * The following few routines use only sparse address region 1 | ||
408 | * which gives 1Gbyte of accessible space which relates exactly | ||
409 | * to the amount of PCI memory mapping *into* system address space. | ||
410 | * See p 6-17 of the specification but it looks something like this: | ||
411 | * | ||
412 | * 21164 Address: | ||
413 | * | ||
414 | * 3 2 1 | ||
415 | * 9876543210987654321098765432109876543210 | ||
416 | * 1ZZZZ0.PCI.QW.Address............BBLL | ||
417 | * | ||
418 | * ZZ = SBZ | ||
419 | * BB = Byte offset | ||
420 | * LL = Transfer length | ||
421 | * | ||
422 | * PCI Address: | ||
423 | * | ||
424 | * 3 2 1 | ||
425 | * 10987654321098765432109876543210 | ||
426 | * HHH....PCI.QW.Address........ 00 | ||
427 | * | ||
428 | * HHH = 31:29 HAE_MEM CSR | ||
429 | * | ||
430 | */ | ||
431 | |||
432 | #define t2_set_hae { \ | ||
433 | msb = addr >> 27; \ | ||
434 | addr &= T2_MEM_R1_MASK; \ | ||
435 | set_hae(msb); \ | ||
436 | } | ||
437 | |||
438 | extern spinlock_t t2_hae_lock; | ||
439 | |||
440 | /* | ||
441 | * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since | ||
442 | * they may be called directly, rather than through the | ||
443 | * ioreadNN/iowriteNN routines. | ||
444 | */ | ||
445 | |||
446 | __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) | ||
447 | { | ||
448 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | ||
449 | unsigned long result, msb; | ||
450 | unsigned long flags; | ||
451 | spin_lock_irqsave(&t2_hae_lock, flags); | ||
452 | |||
453 | t2_set_hae; | ||
454 | |||
455 | result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); | ||
456 | spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
457 | return __kernel_extbl(result, addr & 3); | ||
458 | } | ||
459 | |||
460 | __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) | ||
461 | { | ||
462 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | ||
463 | unsigned long result, msb; | ||
464 | unsigned long flags; | ||
465 | spin_lock_irqsave(&t2_hae_lock, flags); | ||
466 | |||
467 | t2_set_hae; | ||
468 | |||
469 | result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); | ||
470 | spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
471 | return __kernel_extwl(result, addr & 3); | ||
472 | } | ||
473 | |||
474 | /* | ||
475 | * On SABLE with T2, we must use SPARSE memory even for 32-bit access, | ||
476 | * because we cannot access all of DENSE without changing its HAE. | ||
477 | */ | ||
478 | __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) | ||
479 | { | ||
480 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | ||
481 | unsigned long result, msb; | ||
482 | unsigned long flags; | ||
483 | spin_lock_irqsave(&t2_hae_lock, flags); | ||
484 | |||
485 | t2_set_hae; | ||
486 | |||
487 | result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); | ||
488 | spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
489 | return result & 0xffffffffUL; | ||
490 | } | ||
491 | |||
492 | __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) | ||
493 | { | ||
494 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | ||
495 | unsigned long r0, r1, work, msb; | ||
496 | unsigned long flags; | ||
497 | spin_lock_irqsave(&t2_hae_lock, flags); | ||
498 | |||
499 | t2_set_hae; | ||
500 | |||
501 | work = (addr << 5) + T2_SPARSE_MEM + 0x18; | ||
502 | r0 = *(vuip)(work); | ||
503 | r1 = *(vuip)(work + (4 << 5)); | ||
504 | spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
505 | return r1 << 32 | r0; | ||
506 | } | ||
507 | |||
508 | __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) | ||
509 | { | ||
510 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | ||
511 | unsigned long msb, w; | ||
512 | unsigned long flags; | ||
513 | spin_lock_irqsave(&t2_hae_lock, flags); | ||
514 | |||
515 | t2_set_hae; | ||
516 | |||
517 | w = __kernel_insbl(b, addr & 3); | ||
518 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; | ||
519 | spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
520 | } | ||
521 | |||
522 | __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) | ||
523 | { | ||
524 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | ||
525 | unsigned long msb, w; | ||
526 | unsigned long flags; | ||
527 | spin_lock_irqsave(&t2_hae_lock, flags); | ||
528 | |||
529 | t2_set_hae; | ||
530 | |||
531 | w = __kernel_inswl(b, addr & 3); | ||
532 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; | ||
533 | spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
534 | } | ||
535 | |||
536 | /* | ||
537 | * On SABLE with T2, we must use SPARSE memory even for 32-bit access, | ||
538 | * because we cannot access all of DENSE without changing its HAE. | ||
539 | */ | ||
540 | __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) | ||
541 | { | ||
542 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | ||
543 | unsigned long msb; | ||
544 | unsigned long flags; | ||
545 | spin_lock_irqsave(&t2_hae_lock, flags); | ||
546 | |||
547 | t2_set_hae; | ||
548 | |||
549 | *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; | ||
550 | spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
551 | } | ||
552 | |||
553 | __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) | ||
554 | { | ||
555 | unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; | ||
556 | unsigned long msb, work; | ||
557 | unsigned long flags; | ||
558 | spin_lock_irqsave(&t2_hae_lock, flags); | ||
559 | |||
560 | t2_set_hae; | ||
561 | |||
562 | work = (addr << 5) + T2_SPARSE_MEM + 0x18; | ||
563 | *(vuip)work = b; | ||
564 | *(vuip)(work + (4 << 5)) = b >> 32; | ||
565 | spin_unlock_irqrestore(&t2_hae_lock, flags); | ||
566 | } | ||
567 | |||
568 | __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) | ||
569 | { | ||
570 | return (void __iomem *)(addr + T2_IO); | ||
571 | } | ||
572 | |||
573 | __EXTERN_INLINE void __iomem *t2_ioremap(unsigned long addr, | ||
574 | unsigned long size) | ||
575 | { | ||
576 | return (void __iomem *)(addr + T2_DENSE_MEM); | ||
577 | } | ||
578 | |||
579 | __EXTERN_INLINE int t2_is_ioaddr(unsigned long addr) | ||
580 | { | ||
581 | return (long)addr >= 0; | ||
582 | } | ||
583 | |||
584 | __EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr) | ||
585 | { | ||
586 | return (unsigned long)addr >= T2_DENSE_MEM; | ||
587 | } | ||
588 | |||
589 | /* New-style ioread interface. The mmio routines are so ugly for T2 that | ||
590 | it doesn't make sense to merge the pio and mmio routines. */ | ||
591 | |||
592 | #define IOPORT(OS, NS) \ | ||
593 | __EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \ | ||
594 | { \ | ||
595 | if (t2_is_mmio(xaddr)) \ | ||
596 | return t2_read##OS(xaddr); \ | ||
597 | else \ | ||
598 | return t2_in##OS((unsigned long)xaddr - T2_IO); \ | ||
599 | } \ | ||
600 | __EXTERN_INLINE void t2_iowrite##NS(u##NS b, void __iomem *xaddr) \ | ||
601 | { \ | ||
602 | if (t2_is_mmio(xaddr)) \ | ||
603 | t2_write##OS(b, xaddr); \ | ||
604 | else \ | ||
605 | t2_out##OS(b, (unsigned long)xaddr - T2_IO); \ | ||
606 | } | ||
607 | |||
608 | IOPORT(b, 8) | ||
609 | IOPORT(w, 16) | ||
610 | IOPORT(l, 32) | ||
611 | |||
612 | #undef IOPORT | ||
613 | |||
614 | #undef vip | ||
615 | #undef vuip | ||
616 | |||
617 | #undef __IO_PREFIX | ||
618 | #define __IO_PREFIX t2 | ||
619 | #define t2_trivial_rw_bw 0 | ||
620 | #define t2_trivial_rw_lq 0 | ||
621 | #define t2_trivial_io_bw 0 | ||
622 | #define t2_trivial_io_lq 0 | ||
623 | #define t2_trivial_iounmap 1 | ||
624 | #include <asm/io_trivial.h> | ||
625 | |||
626 | #ifdef __IO_EXTERN_INLINE | ||
627 | #undef __EXTERN_INLINE | ||
628 | #undef __IO_EXTERN_INLINE | ||
629 | #endif | ||
630 | |||
631 | #endif /* __KERNEL__ */ | ||
632 | |||
633 | #endif /* __ALPHA_T2__H__ */ | ||
diff --git a/arch/alpha/include/asm/core_titan.h b/arch/alpha/include/asm/core_titan.h new file mode 100644 index 000000000000..a17f6f33b68e --- /dev/null +++ b/arch/alpha/include/asm/core_titan.h | |||
@@ -0,0 +1,410 @@ | |||
1 | #ifndef __ALPHA_TITAN__H__ | ||
2 | #define __ALPHA_TITAN__H__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/pci.h> | ||
6 | #include <asm/compiler.h> | ||
7 | |||
8 | /* | ||
9 | * TITAN is the internal names for a core logic chipset which provides | ||
10 | * memory controller and PCI/AGP access for 21264 based systems. | ||
11 | * | ||
12 | * This file is based on: | ||
13 | * | ||
14 | * Titan Chipset Engineering Specification | ||
15 | * Revision 0.12 | ||
16 | * 13 July 1999 | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | /* XXX: Do we need to conditionalize on this? */ | ||
21 | #ifdef USE_48_BIT_KSEG | ||
22 | #define TI_BIAS 0x80000000000UL | ||
23 | #else | ||
24 | #define TI_BIAS 0x10000000000UL | ||
25 | #endif | ||
26 | |||
27 | /* | ||
28 | * CChip, DChip, and PChip registers | ||
29 | */ | ||
30 | |||
31 | typedef struct { | ||
32 | volatile unsigned long csr __attribute__((aligned(64))); | ||
33 | } titan_64; | ||
34 | |||
35 | typedef struct { | ||
36 | titan_64 csc; | ||
37 | titan_64 mtr; | ||
38 | titan_64 misc; | ||
39 | titan_64 mpd; | ||
40 | titan_64 aar0; | ||
41 | titan_64 aar1; | ||
42 | titan_64 aar2; | ||
43 | titan_64 aar3; | ||
44 | titan_64 dim0; | ||
45 | titan_64 dim1; | ||
46 | titan_64 dir0; | ||
47 | titan_64 dir1; | ||
48 | titan_64 drir; | ||
49 | titan_64 prben; | ||
50 | titan_64 iic0; | ||
51 | titan_64 iic1; | ||
52 | titan_64 mpr0; | ||
53 | titan_64 mpr1; | ||
54 | titan_64 mpr2; | ||
55 | titan_64 mpr3; | ||
56 | titan_64 rsvd[2]; | ||
57 | titan_64 ttr; | ||
58 | titan_64 tdr; | ||
59 | titan_64 dim2; | ||
60 | titan_64 dim3; | ||
61 | titan_64 dir2; | ||
62 | titan_64 dir3; | ||
63 | titan_64 iic2; | ||
64 | titan_64 iic3; | ||
65 | titan_64 pwr; | ||
66 | titan_64 reserved[17]; | ||
67 | titan_64 cmonctla; | ||
68 | titan_64 cmonctlb; | ||
69 | titan_64 cmoncnt01; | ||
70 | titan_64 cmoncnt23; | ||
71 | titan_64 cpen; | ||
72 | } titan_cchip; | ||
73 | |||
74 | typedef struct { | ||
75 | titan_64 dsc; | ||
76 | titan_64 str; | ||
77 | titan_64 drev; | ||
78 | titan_64 dsc2; | ||
79 | } titan_dchip; | ||
80 | |||
81 | typedef struct { | ||
82 | titan_64 wsba[4]; | ||
83 | titan_64 wsm[4]; | ||
84 | titan_64 tba[4]; | ||
85 | titan_64 pctl; | ||
86 | titan_64 plat; | ||
87 | titan_64 reserved0[2]; | ||
88 | union { | ||
89 | struct { | ||
90 | titan_64 serror; | ||
91 | titan_64 serren; | ||
92 | titan_64 serrset; | ||
93 | titan_64 reserved0; | ||
94 | titan_64 gperror; | ||
95 | titan_64 gperren; | ||
96 | titan_64 gperrset; | ||
97 | titan_64 reserved1; | ||
98 | titan_64 gtlbiv; | ||
99 | titan_64 gtlbia; | ||
100 | titan_64 reserved2[2]; | ||
101 | titan_64 sctl; | ||
102 | titan_64 reserved3[3]; | ||
103 | } g; | ||
104 | struct { | ||
105 | titan_64 agperror; | ||
106 | titan_64 agperren; | ||
107 | titan_64 agperrset; | ||
108 | titan_64 agplastwr; | ||
109 | titan_64 aperror; | ||
110 | titan_64 aperren; | ||
111 | titan_64 aperrset; | ||
112 | titan_64 reserved0; | ||
113 | titan_64 atlbiv; | ||
114 | titan_64 atlbia; | ||
115 | titan_64 reserved1[6]; | ||
116 | } a; | ||
117 | } port_specific; | ||
118 | titan_64 sprst; | ||
119 | titan_64 reserved1[31]; | ||
120 | } titan_pachip_port; | ||
121 | |||
122 | typedef struct { | ||
123 | titan_pachip_port g_port; | ||
124 | titan_pachip_port a_port; | ||
125 | } titan_pachip; | ||
126 | |||
127 | #define TITAN_cchip ((titan_cchip *)(IDENT_ADDR+TI_BIAS+0x1A0000000UL)) | ||
128 | #define TITAN_dchip ((titan_dchip *)(IDENT_ADDR+TI_BIAS+0x1B0000800UL)) | ||
129 | #define TITAN_pachip0 ((titan_pachip *)(IDENT_ADDR+TI_BIAS+0x180000000UL)) | ||
130 | #define TITAN_pachip1 ((titan_pachip *)(IDENT_ADDR+TI_BIAS+0x380000000UL)) | ||
131 | extern unsigned TITAN_agp; | ||
132 | extern int TITAN_bootcpu; | ||
133 | |||
134 | /* | ||
135 | * TITAN PA-chip Window Space Base Address register. | ||
136 | * (WSBA[0-2]) | ||
137 | */ | ||
138 | #define wsba_m_ena 0x1 | ||
139 | #define wsba_m_sg 0x2 | ||
140 | #define wsba_m_addr 0xFFF00000 | ||
141 | #define wmask_k_sz1gb 0x3FF00000 | ||
142 | union TPAchipWSBA { | ||
143 | struct { | ||
144 | unsigned wsba_v_ena : 1; | ||
145 | unsigned wsba_v_sg : 1; | ||
146 | unsigned wsba_v_rsvd1 : 18; | ||
147 | unsigned wsba_v_addr : 12; | ||
148 | unsigned wsba_v_rsvd2 : 32; | ||
149 | } wsba_r_bits; | ||
150 | int wsba_q_whole [2]; | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * TITAN PA-chip Control Register | ||
155 | * This definition covers both the G-Port GPCTL and the A-PORT APCTL. | ||
156 | * Bits <51:0> are the same in both cases. APCTL<63:52> are only | ||
157 | * applicable to AGP. | ||
158 | */ | ||
159 | #define pctl_m_fbtb 0x00000001 | ||
160 | #define pctl_m_thdis 0x00000002 | ||
161 | #define pctl_m_chaindis 0x00000004 | ||
162 | #define pctl_m_tgtlat 0x00000018 | ||
163 | #define pctl_m_hole 0x00000020 | ||
164 | #define pctl_m_mwin 0x00000040 | ||
165 | #define pctl_m_arbena 0x00000080 | ||
166 | #define pctl_m_prigrp 0x0000FF00 | ||
167 | #define pctl_m_ppri 0x00010000 | ||
168 | #define pctl_m_pcispd66 0x00020000 | ||
169 | #define pctl_m_cngstlt 0x003C0000 | ||
170 | #define pctl_m_ptpdesten 0x3FC00000 | ||
171 | #define pctl_m_dpcen 0x40000000 | ||
172 | #define pctl_m_apcen 0x0000000080000000UL | ||
173 | #define pctl_m_dcrtv 0x0000000300000000UL | ||
174 | #define pctl_m_en_stepping 0x0000000400000000UL | ||
175 | #define apctl_m_rsvd1 0x000FFFF800000000UL | ||
176 | #define apctl_m_agp_rate 0x0030000000000000UL | ||
177 | #define apctl_m_agp_sba_en 0x0040000000000000UL | ||
178 | #define apctl_m_agp_en 0x0080000000000000UL | ||
179 | #define apctl_m_rsvd2 0x0100000000000000UL | ||
180 | #define apctl_m_agp_present 0x0200000000000000UL | ||
181 | #define apctl_agp_hp_rd 0x1C00000000000000UL | ||
182 | #define apctl_agp_lp_rd 0xE000000000000000UL | ||
183 | #define gpctl_m_rsvd 0xFFFFFFF800000000UL | ||
184 | union TPAchipPCTL { | ||
185 | struct { | ||
186 | unsigned pctl_v_fbtb : 1; /* A/G [0] */ | ||
187 | unsigned pctl_v_thdis : 1; /* A/G [1] */ | ||
188 | unsigned pctl_v_chaindis : 1; /* A/G [2] */ | ||
189 | unsigned pctl_v_tgtlat : 2; /* A/G [4:3] */ | ||
190 | unsigned pctl_v_hole : 1; /* A/G [5] */ | ||
191 | unsigned pctl_v_mwin : 1; /* A/G [6] */ | ||
192 | unsigned pctl_v_arbena : 1; /* A/G [7] */ | ||
193 | unsigned pctl_v_prigrp : 8; /* A/G [15:8] */ | ||
194 | unsigned pctl_v_ppri : 1; /* A/G [16] */ | ||
195 | unsigned pctl_v_pcispd66 : 1; /* A/G [17] */ | ||
196 | unsigned pctl_v_cngstlt : 4; /* A/G [21:18] */ | ||
197 | unsigned pctl_v_ptpdesten : 8; /* A/G [29:22] */ | ||
198 | unsigned pctl_v_dpcen : 1; /* A/G [30] */ | ||
199 | unsigned pctl_v_apcen : 1; /* A/G [31] */ | ||
200 | unsigned pctl_v_dcrtv : 2; /* A/G [33:32] */ | ||
201 | unsigned pctl_v_en_stepping :1; /* A/G [34] */ | ||
202 | unsigned apctl_v_rsvd1 : 17; /* A [51:35] */ | ||
203 | unsigned apctl_v_agp_rate : 2; /* A [53:52] */ | ||
204 | unsigned apctl_v_agp_sba_en : 1; /* A [54] */ | ||
205 | unsigned apctl_v_agp_en : 1; /* A [55] */ | ||
206 | unsigned apctl_v_rsvd2 : 1; /* A [56] */ | ||
207 | unsigned apctl_v_agp_present : 1; /* A [57] */ | ||
208 | unsigned apctl_v_agp_hp_rd : 3; /* A [60:58] */ | ||
209 | unsigned apctl_v_agp_lp_rd : 3; /* A [63:61] */ | ||
210 | } pctl_r_bits; | ||
211 | unsigned int pctl_l_whole [2]; | ||
212 | unsigned long pctl_q_whole; | ||
213 | }; | ||
214 | |||
215 | /* | ||
216 | * SERROR / SERREN / SERRSET | ||
217 | */ | ||
218 | union TPAchipSERR { | ||
219 | struct { | ||
220 | unsigned serr_v_lost_uecc : 1; /* [0] */ | ||
221 | unsigned serr_v_uecc : 1; /* [1] */ | ||
222 | unsigned serr_v_cre : 1; /* [2] */ | ||
223 | unsigned serr_v_nxio : 1; /* [3] */ | ||
224 | unsigned serr_v_lost_cre : 1; /* [4] */ | ||
225 | unsigned serr_v_rsvd0 : 10; /* [14:5] */ | ||
226 | unsigned serr_v_addr : 32; /* [46:15] */ | ||
227 | unsigned serr_v_rsvd1 : 5; /* [51:47] */ | ||
228 | unsigned serr_v_source : 2; /* [53:52] */ | ||
229 | unsigned serr_v_cmd : 2; /* [55:54] */ | ||
230 | unsigned serr_v_syn : 8; /* [63:56] */ | ||
231 | } serr_r_bits; | ||
232 | unsigned int serr_l_whole[2]; | ||
233 | unsigned long serr_q_whole; | ||
234 | }; | ||
235 | |||
236 | /* | ||
237 | * GPERROR / APERROR / GPERREN / APERREN / GPERRSET / APERRSET | ||
238 | */ | ||
239 | union TPAchipPERR { | ||
240 | struct { | ||
241 | unsigned long perr_v_lost : 1; /* [0] */ | ||
242 | unsigned long perr_v_serr : 1; /* [1] */ | ||
243 | unsigned long perr_v_perr : 1; /* [2] */ | ||
244 | unsigned long perr_v_dcrto : 1; /* [3] */ | ||
245 | unsigned long perr_v_sge : 1; /* [4] */ | ||
246 | unsigned long perr_v_ape : 1; /* [5] */ | ||
247 | unsigned long perr_v_ta : 1; /* [6] */ | ||
248 | unsigned long perr_v_dpe : 1; /* [7] */ | ||
249 | unsigned long perr_v_nds : 1; /* [8] */ | ||
250 | unsigned long perr_v_iptpr : 1; /* [9] */ | ||
251 | unsigned long perr_v_iptpw : 1; /* [10] */ | ||
252 | unsigned long perr_v_rsvd0 : 3; /* [13:11] */ | ||
253 | unsigned long perr_v_addr : 33; /* [46:14] */ | ||
254 | unsigned long perr_v_dac : 1; /* [47] */ | ||
255 | unsigned long perr_v_mwin : 1; /* [48] */ | ||
256 | unsigned long perr_v_rsvd1 : 3; /* [51:49] */ | ||
257 | unsigned long perr_v_cmd : 4; /* [55:52] */ | ||
258 | unsigned long perr_v_rsvd2 : 8; /* [63:56] */ | ||
259 | } perr_r_bits; | ||
260 | unsigned int perr_l_whole[2]; | ||
261 | unsigned long perr_q_whole; | ||
262 | }; | ||
263 | |||
264 | /* | ||
265 | * AGPERROR / AGPERREN / AGPERRSET | ||
266 | */ | ||
267 | union TPAchipAGPERR { | ||
268 | struct { | ||
269 | unsigned agperr_v_lost : 1; /* [0] */ | ||
270 | unsigned agperr_v_lpqfull : 1; /* [1] */ | ||
271 | unsigned apgerr_v_hpqfull : 1; /* [2] */ | ||
272 | unsigned agperr_v_rescmd : 1; /* [3] */ | ||
273 | unsigned agperr_v_ipte : 1; /* [4] */ | ||
274 | unsigned agperr_v_ptp : 1; /* [5] */ | ||
275 | unsigned agperr_v_nowindow : 1; /* [6] */ | ||
276 | unsigned agperr_v_rsvd0 : 8; /* [14:7] */ | ||
277 | unsigned agperr_v_addr : 32; /* [46:15] */ | ||
278 | unsigned agperr_v_rsvd1 : 1; /* [47] */ | ||
279 | unsigned agperr_v_dac : 1; /* [48] */ | ||
280 | unsigned agperr_v_mwin : 1; /* [49] */ | ||
281 | unsigned agperr_v_cmd : 3; /* [52:50] */ | ||
282 | unsigned agperr_v_length : 6; /* [58:53] */ | ||
283 | unsigned agperr_v_fence : 1; /* [59] */ | ||
284 | unsigned agperr_v_rsvd2 : 4; /* [63:60] */ | ||
285 | } agperr_r_bits; | ||
286 | unsigned int agperr_l_whole[2]; | ||
287 | unsigned long agperr_q_whole; | ||
288 | }; | ||
289 | /* | ||
290 | * Memory spaces: | ||
291 | * Hose numbers are assigned as follows: | ||
292 | * 0 - pachip 0 / G Port | ||
293 | * 1 - pachip 1 / G Port | ||
294 | * 2 - pachip 0 / A Port | ||
295 | * 3 - pachip 1 / A Port | ||
296 | */ | ||
297 | #define TITAN_HOSE_SHIFT (33) | ||
298 | #define TITAN_HOSE(h) (((unsigned long)(h)) << TITAN_HOSE_SHIFT) | ||
299 | #define TITAN_BASE (IDENT_ADDR + TI_BIAS) | ||
300 | #define TITAN_MEM(h) (TITAN_BASE+TITAN_HOSE(h)+0x000000000UL) | ||
301 | #define _TITAN_IACK_SC(h) (TITAN_BASE+TITAN_HOSE(h)+0x1F8000000UL) | ||
302 | #define TITAN_IO(h) (TITAN_BASE+TITAN_HOSE(h)+0x1FC000000UL) | ||
303 | #define TITAN_CONF(h) (TITAN_BASE+TITAN_HOSE(h)+0x1FE000000UL) | ||
304 | |||
305 | #define TITAN_HOSE_MASK TITAN_HOSE(3) | ||
306 | #define TITAN_IACK_SC _TITAN_IACK_SC(0) /* hack! */ | ||
307 | |||
308 | /* | ||
309 | * The canonical non-remaped I/O and MEM addresses have these values | ||
310 | * subtracted out. This is arranged so that folks manipulating ISA | ||
311 | * devices can use their familiar numbers and have them map to bus 0. | ||
312 | */ | ||
313 | |||
314 | #define TITAN_IO_BIAS TITAN_IO(0) | ||
315 | #define TITAN_MEM_BIAS TITAN_MEM(0) | ||
316 | |||
317 | /* The IO address space is larger than 0xffff */ | ||
318 | #define TITAN_IO_SPACE (TITAN_CONF(0) - TITAN_IO(0)) | ||
319 | |||
320 | /* TIG Space */ | ||
321 | #define TITAN_TIG_SPACE (TITAN_BASE + 0x100000000UL) | ||
322 | |||
323 | /* Offset between ram physical addresses and pci64 DAC bus addresses. */ | ||
324 | /* ??? Just a guess. Ought to confirm it hasn't been moved. */ | ||
325 | #define TITAN_DAC_OFFSET (1UL << 40) | ||
326 | |||
327 | /* | ||
328 | * Data structure for handling TITAN machine checks: | ||
329 | */ | ||
330 | #define SCB_Q_SYSERR 0x620 | ||
331 | #define SCB_Q_PROCERR 0x630 | ||
332 | #define SCB_Q_SYSMCHK 0x660 | ||
333 | #define SCB_Q_PROCMCHK 0x670 | ||
334 | #define SCB_Q_SYSEVENT 0x680 /* environmental / system management */ | ||
335 | struct el_TITAN_sysdata_mcheck { | ||
336 | u64 summary; /* 0x00 */ | ||
337 | u64 c_dirx; /* 0x08 */ | ||
338 | u64 c_misc; /* 0x10 */ | ||
339 | u64 p0_serror; /* 0x18 */ | ||
340 | u64 p0_gperror; /* 0x20 */ | ||
341 | u64 p0_aperror; /* 0x28 */ | ||
342 | u64 p0_agperror;/* 0x30 */ | ||
343 | u64 p1_serror; /* 0x38 */ | ||
344 | u64 p1_gperror; /* 0x40 */ | ||
345 | u64 p1_aperror; /* 0x48 */ | ||
346 | u64 p1_agperror;/* 0x50 */ | ||
347 | }; | ||
348 | |||
349 | /* | ||
350 | * System area for a privateer 680 environmental/system management mcheck | ||
351 | */ | ||
352 | struct el_PRIVATEER_envdata_mcheck { | ||
353 | u64 summary; /* 0x00 */ | ||
354 | u64 c_dirx; /* 0x08 */ | ||
355 | u64 smir; /* 0x10 */ | ||
356 | u64 cpuir; /* 0x18 */ | ||
357 | u64 psir; /* 0x20 */ | ||
358 | u64 fault; /* 0x28 */ | ||
359 | u64 sys_doors; /* 0x30 */ | ||
360 | u64 temp_warn; /* 0x38 */ | ||
361 | u64 fan_ctrl; /* 0x40 */ | ||
362 | u64 code; /* 0x48 */ | ||
363 | u64 reserved; /* 0x50 */ | ||
364 | }; | ||
365 | |||
366 | #ifdef __KERNEL__ | ||
367 | |||
368 | #ifndef __EXTERN_INLINE | ||
369 | #define __EXTERN_INLINE extern inline | ||
370 | #define __IO_EXTERN_INLINE | ||
371 | #endif | ||
372 | |||
373 | /* | ||
374 | * I/O functions: | ||
375 | * | ||
376 | * TITAN, a 21??? PCI/memory support chipset for the EV6 (21264) | ||
377 | * can only use linear accesses to get at PCI/AGP memory and I/O spaces. | ||
378 | */ | ||
379 | |||
380 | /* | ||
381 | * Memory functions. all accesses are done through linear space. | ||
382 | */ | ||
383 | extern void __iomem *titan_ioportmap(unsigned long addr); | ||
384 | extern void __iomem *titan_ioremap(unsigned long addr, unsigned long size); | ||
385 | extern void titan_iounmap(volatile void __iomem *addr); | ||
386 | |||
387 | __EXTERN_INLINE int titan_is_ioaddr(unsigned long addr) | ||
388 | { | ||
389 | return addr >= TITAN_BASE; | ||
390 | } | ||
391 | |||
392 | extern int titan_is_mmio(const volatile void __iomem *addr); | ||
393 | |||
394 | #undef __IO_PREFIX | ||
395 | #define __IO_PREFIX titan | ||
396 | #define titan_trivial_rw_bw 1 | ||
397 | #define titan_trivial_rw_lq 1 | ||
398 | #define titan_trivial_io_bw 1 | ||
399 | #define titan_trivial_io_lq 1 | ||
400 | #define titan_trivial_iounmap 0 | ||
401 | #include <asm/io_trivial.h> | ||
402 | |||
403 | #ifdef __IO_EXTERN_INLINE | ||
404 | #undef __EXTERN_INLINE | ||
405 | #undef __IO_EXTERN_INLINE | ||
406 | #endif | ||
407 | |||
408 | #endif /* __KERNEL__ */ | ||
409 | |||
410 | #endif /* __ALPHA_TITAN__H__ */ | ||
diff --git a/arch/alpha/include/asm/core_tsunami.h b/arch/alpha/include/asm/core_tsunami.h new file mode 100644 index 000000000000..58d4fe48742c --- /dev/null +++ b/arch/alpha/include/asm/core_tsunami.h | |||
@@ -0,0 +1,335 @@ | |||
1 | #ifndef __ALPHA_TSUNAMI__H__ | ||
2 | #define __ALPHA_TSUNAMI__H__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/pci.h> | ||
6 | #include <asm/compiler.h> | ||
7 | |||
8 | /* | ||
9 | * TSUNAMI/TYPHOON are the internal names for the core logic chipset which | ||
10 | * provides memory controller and PCI access for the 21264 based systems. | ||
11 | * | ||
12 | * This file is based on: | ||
13 | * | ||
14 | * Tsunami System Programmers Manual | ||
15 | * Preliminary, Chapters 2-5 | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | /* XXX: Do we need to conditionalize on this? */ | ||
20 | #ifdef USE_48_BIT_KSEG | ||
21 | #define TS_BIAS 0x80000000000UL | ||
22 | #else | ||
23 | #define TS_BIAS 0x10000000000UL | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * CChip, DChip, and PChip registers | ||
28 | */ | ||
29 | |||
30 | typedef struct { | ||
31 | volatile unsigned long csr __attribute__((aligned(64))); | ||
32 | } tsunami_64; | ||
33 | |||
34 | typedef struct { | ||
35 | tsunami_64 csc; | ||
36 | tsunami_64 mtr; | ||
37 | tsunami_64 misc; | ||
38 | tsunami_64 mpd; | ||
39 | tsunami_64 aar0; | ||
40 | tsunami_64 aar1; | ||
41 | tsunami_64 aar2; | ||
42 | tsunami_64 aar3; | ||
43 | tsunami_64 dim0; | ||
44 | tsunami_64 dim1; | ||
45 | tsunami_64 dir0; | ||
46 | tsunami_64 dir1; | ||
47 | tsunami_64 drir; | ||
48 | tsunami_64 prben; | ||
49 | tsunami_64 iic; /* a.k.a. iic0 */ | ||
50 | tsunami_64 wdr; /* a.k.a. iic1 */ | ||
51 | tsunami_64 mpr0; | ||
52 | tsunami_64 mpr1; | ||
53 | tsunami_64 mpr2; | ||
54 | tsunami_64 mpr3; | ||
55 | tsunami_64 mctl; | ||
56 | tsunami_64 __pad1; | ||
57 | tsunami_64 ttr; | ||
58 | tsunami_64 tdr; | ||
59 | tsunami_64 dim2; | ||
60 | tsunami_64 dim3; | ||
61 | tsunami_64 dir2; | ||
62 | tsunami_64 dir3; | ||
63 | tsunami_64 iic2; | ||
64 | tsunami_64 iic3; | ||
65 | } tsunami_cchip; | ||
66 | |||
67 | typedef struct { | ||
68 | tsunami_64 dsc; | ||
69 | tsunami_64 str; | ||
70 | tsunami_64 drev; | ||
71 | } tsunami_dchip; | ||
72 | |||
73 | typedef struct { | ||
74 | tsunami_64 wsba[4]; | ||
75 | tsunami_64 wsm[4]; | ||
76 | tsunami_64 tba[4]; | ||
77 | tsunami_64 pctl; | ||
78 | tsunami_64 plat; | ||
79 | tsunami_64 reserved; | ||
80 | tsunami_64 perror; | ||
81 | tsunami_64 perrmask; | ||
82 | tsunami_64 perrset; | ||
83 | tsunami_64 tlbiv; | ||
84 | tsunami_64 tlbia; | ||
85 | tsunami_64 pmonctl; | ||
86 | tsunami_64 pmoncnt; | ||
87 | } tsunami_pchip; | ||
88 | |||
89 | #define TSUNAMI_cchip ((tsunami_cchip *)(IDENT_ADDR+TS_BIAS+0x1A0000000UL)) | ||
90 | #define TSUNAMI_dchip ((tsunami_dchip *)(IDENT_ADDR+TS_BIAS+0x1B0000800UL)) | ||
91 | #define TSUNAMI_pchip0 ((tsunami_pchip *)(IDENT_ADDR+TS_BIAS+0x180000000UL)) | ||
92 | #define TSUNAMI_pchip1 ((tsunami_pchip *)(IDENT_ADDR+TS_BIAS+0x380000000UL)) | ||
93 | extern int TSUNAMI_bootcpu; | ||
94 | |||
95 | /* | ||
96 | * TSUNAMI Pchip Error register. | ||
97 | */ | ||
98 | |||
99 | #define perror_m_lost 0x1 | ||
100 | #define perror_m_serr 0x2 | ||
101 | #define perror_m_perr 0x4 | ||
102 | #define perror_m_dcrto 0x8 | ||
103 | #define perror_m_sge 0x10 | ||
104 | #define perror_m_ape 0x20 | ||
105 | #define perror_m_ta 0x40 | ||
106 | #define perror_m_rdpe 0x80 | ||
107 | #define perror_m_nds 0x100 | ||
108 | #define perror_m_rto 0x200 | ||
109 | #define perror_m_uecc 0x400 | ||
110 | #define perror_m_cre 0x800 | ||
111 | #define perror_m_addrl 0xFFFFFFFF0000UL | ||
112 | #define perror_m_addrh 0x7000000000000UL | ||
113 | #define perror_m_cmd 0xF0000000000000UL | ||
114 | #define perror_m_syn 0xFF00000000000000UL | ||
115 | union TPchipPERROR { | ||
116 | struct { | ||
117 | unsigned int perror_v_lost : 1; | ||
118 | unsigned perror_v_serr : 1; | ||
119 | unsigned perror_v_perr : 1; | ||
120 | unsigned perror_v_dcrto : 1; | ||
121 | unsigned perror_v_sge : 1; | ||
122 | unsigned perror_v_ape : 1; | ||
123 | unsigned perror_v_ta : 1; | ||
124 | unsigned perror_v_rdpe : 1; | ||
125 | unsigned perror_v_nds : 1; | ||
126 | unsigned perror_v_rto : 1; | ||
127 | unsigned perror_v_uecc : 1; | ||
128 | unsigned perror_v_cre : 1; | ||
129 | unsigned perror_v_rsvd1 : 4; | ||
130 | unsigned perror_v_addrl : 32; | ||
131 | unsigned perror_v_addrh : 3; | ||
132 | unsigned perror_v_rsvd2 : 1; | ||
133 | unsigned perror_v_cmd : 4; | ||
134 | unsigned perror_v_syn : 8; | ||
135 | } perror_r_bits; | ||
136 | int perror_q_whole [2]; | ||
137 | }; | ||
138 | |||
139 | /* | ||
140 | * TSUNAMI Pchip Window Space Base Address register. | ||
141 | */ | ||
142 | #define wsba_m_ena 0x1 | ||
143 | #define wsba_m_sg 0x2 | ||
144 | #define wsba_m_ptp 0x4 | ||
145 | #define wsba_m_addr 0xFFF00000 | ||
146 | #define wmask_k_sz1gb 0x3FF00000 | ||
147 | union TPchipWSBA { | ||
148 | struct { | ||
149 | unsigned wsba_v_ena : 1; | ||
150 | unsigned wsba_v_sg : 1; | ||
151 | unsigned wsba_v_ptp : 1; | ||
152 | unsigned wsba_v_rsvd1 : 17; | ||
153 | unsigned wsba_v_addr : 12; | ||
154 | unsigned wsba_v_rsvd2 : 32; | ||
155 | } wsba_r_bits; | ||
156 | int wsba_q_whole [2]; | ||
157 | }; | ||
158 | |||
159 | /* | ||
160 | * TSUNAMI Pchip Control Register | ||
161 | */ | ||
162 | #define pctl_m_fdsc 0x1 | ||
163 | #define pctl_m_fbtb 0x2 | ||
164 | #define pctl_m_thdis 0x4 | ||
165 | #define pctl_m_chaindis 0x8 | ||
166 | #define pctl_m_tgtlat 0x10 | ||
167 | #define pctl_m_hole 0x20 | ||
168 | #define pctl_m_mwin 0x40 | ||
169 | #define pctl_m_arbena 0x80 | ||
170 | #define pctl_m_prigrp 0x7F00 | ||
171 | #define pctl_m_ppri 0x8000 | ||
172 | #define pctl_m_rsvd1 0x30000 | ||
173 | #define pctl_m_eccen 0x40000 | ||
174 | #define pctl_m_padm 0x80000 | ||
175 | #define pctl_m_cdqmax 0xF00000 | ||
176 | #define pctl_m_rev 0xFF000000 | ||
177 | #define pctl_m_crqmax 0xF00000000UL | ||
178 | #define pctl_m_ptpmax 0xF000000000UL | ||
179 | #define pctl_m_pclkx 0x30000000000UL | ||
180 | #define pctl_m_fdsdis 0x40000000000UL | ||
181 | #define pctl_m_fdwdis 0x80000000000UL | ||
182 | #define pctl_m_ptevrfy 0x100000000000UL | ||
183 | #define pctl_m_rpp 0x200000000000UL | ||
184 | #define pctl_m_pid 0xC00000000000UL | ||
185 | #define pctl_m_rsvd2 0xFFFF000000000000UL | ||
186 | |||
187 | union TPchipPCTL { | ||
188 | struct { | ||
189 | unsigned pctl_v_fdsc : 1; | ||
190 | unsigned pctl_v_fbtb : 1; | ||
191 | unsigned pctl_v_thdis : 1; | ||
192 | unsigned pctl_v_chaindis : 1; | ||
193 | unsigned pctl_v_tgtlat : 1; | ||
194 | unsigned pctl_v_hole : 1; | ||
195 | unsigned pctl_v_mwin : 1; | ||
196 | unsigned pctl_v_arbena : 1; | ||
197 | unsigned pctl_v_prigrp : 7; | ||
198 | unsigned pctl_v_ppri : 1; | ||
199 | unsigned pctl_v_rsvd1 : 2; | ||
200 | unsigned pctl_v_eccen : 1; | ||
201 | unsigned pctl_v_padm : 1; | ||
202 | unsigned pctl_v_cdqmax : 4; | ||
203 | unsigned pctl_v_rev : 8; | ||
204 | unsigned pctl_v_crqmax : 4; | ||
205 | unsigned pctl_v_ptpmax : 4; | ||
206 | unsigned pctl_v_pclkx : 2; | ||
207 | unsigned pctl_v_fdsdis : 1; | ||
208 | unsigned pctl_v_fdwdis : 1; | ||
209 | unsigned pctl_v_ptevrfy : 1; | ||
210 | unsigned pctl_v_rpp : 1; | ||
211 | unsigned pctl_v_pid : 2; | ||
212 | unsigned pctl_v_rsvd2 : 16; | ||
213 | } pctl_r_bits; | ||
214 | int pctl_q_whole [2]; | ||
215 | }; | ||
216 | |||
217 | /* | ||
218 | * TSUNAMI Pchip Error Mask Register. | ||
219 | */ | ||
220 | #define perrmask_m_lost 0x1 | ||
221 | #define perrmask_m_serr 0x2 | ||
222 | #define perrmask_m_perr 0x4 | ||
223 | #define perrmask_m_dcrto 0x8 | ||
224 | #define perrmask_m_sge 0x10 | ||
225 | #define perrmask_m_ape 0x20 | ||
226 | #define perrmask_m_ta 0x40 | ||
227 | #define perrmask_m_rdpe 0x80 | ||
228 | #define perrmask_m_nds 0x100 | ||
229 | #define perrmask_m_rto 0x200 | ||
230 | #define perrmask_m_uecc 0x400 | ||
231 | #define perrmask_m_cre 0x800 | ||
232 | #define perrmask_m_rsvd 0xFFFFFFFFFFFFF000UL | ||
233 | union TPchipPERRMASK { | ||
234 | struct { | ||
235 | unsigned int perrmask_v_lost : 1; | ||
236 | unsigned perrmask_v_serr : 1; | ||
237 | unsigned perrmask_v_perr : 1; | ||
238 | unsigned perrmask_v_dcrto : 1; | ||
239 | unsigned perrmask_v_sge : 1; | ||
240 | unsigned perrmask_v_ape : 1; | ||
241 | unsigned perrmask_v_ta : 1; | ||
242 | unsigned perrmask_v_rdpe : 1; | ||
243 | unsigned perrmask_v_nds : 1; | ||
244 | unsigned perrmask_v_rto : 1; | ||
245 | unsigned perrmask_v_uecc : 1; | ||
246 | unsigned perrmask_v_cre : 1; | ||
247 | unsigned perrmask_v_rsvd1 : 20; | ||
248 | unsigned perrmask_v_rsvd2 : 32; | ||
249 | } perrmask_r_bits; | ||
250 | int perrmask_q_whole [2]; | ||
251 | }; | ||
252 | |||
253 | /* | ||
254 | * Memory spaces: | ||
255 | */ | ||
256 | #define TSUNAMI_HOSE(h) (((unsigned long)(h)) << 33) | ||
257 | #define TSUNAMI_BASE (IDENT_ADDR + TS_BIAS) | ||
258 | |||
259 | #define TSUNAMI_MEM(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x000000000UL) | ||
260 | #define _TSUNAMI_IACK_SC(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1F8000000UL) | ||
261 | #define TSUNAMI_IO(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FC000000UL) | ||
262 | #define TSUNAMI_CONF(h) (TSUNAMI_BASE+TSUNAMI_HOSE(h) + 0x1FE000000UL) | ||
263 | |||
264 | #define TSUNAMI_IACK_SC _TSUNAMI_IACK_SC(0) /* hack! */ | ||
265 | |||
266 | |||
267 | /* | ||
268 | * The canonical non-remaped I/O and MEM addresses have these values | ||
269 | * subtracted out. This is arranged so that folks manipulating ISA | ||
270 | * devices can use their familiar numbers and have them map to bus 0. | ||
271 | */ | ||
272 | |||
273 | #define TSUNAMI_IO_BIAS TSUNAMI_IO(0) | ||
274 | #define TSUNAMI_MEM_BIAS TSUNAMI_MEM(0) | ||
275 | |||
276 | /* The IO address space is larger than 0xffff */ | ||
277 | #define TSUNAMI_IO_SPACE (TSUNAMI_CONF(0) - TSUNAMI_IO(0)) | ||
278 | |||
279 | /* Offset between ram physical addresses and pci64 DAC bus addresses. */ | ||
280 | #define TSUNAMI_DAC_OFFSET (1UL << 40) | ||
281 | |||
282 | /* | ||
283 | * Data structure for handling TSUNAMI machine checks: | ||
284 | */ | ||
285 | struct el_TSUNAMI_sysdata_mcheck { | ||
286 | }; | ||
287 | |||
288 | |||
289 | #ifdef __KERNEL__ | ||
290 | |||
291 | #ifndef __EXTERN_INLINE | ||
292 | #define __EXTERN_INLINE extern inline | ||
293 | #define __IO_EXTERN_INLINE | ||
294 | #endif | ||
295 | |||
296 | /* | ||
297 | * I/O functions: | ||
298 | * | ||
299 | * TSUNAMI, the 21??? PCI/memory support chipset for the EV6 (21264) | ||
300 | * can only use linear accesses to get at PCI memory and I/O spaces. | ||
301 | */ | ||
302 | |||
303 | /* | ||
304 | * Memory functions. all accesses are done through linear space. | ||
305 | */ | ||
306 | extern void __iomem *tsunami_ioportmap(unsigned long addr); | ||
307 | extern void __iomem *tsunami_ioremap(unsigned long addr, unsigned long size); | ||
308 | __EXTERN_INLINE int tsunami_is_ioaddr(unsigned long addr) | ||
309 | { | ||
310 | return addr >= TSUNAMI_BASE; | ||
311 | } | ||
312 | |||
313 | __EXTERN_INLINE int tsunami_is_mmio(const volatile void __iomem *xaddr) | ||
314 | { | ||
315 | unsigned long addr = (unsigned long) xaddr; | ||
316 | return (addr & 0x100000000UL) == 0; | ||
317 | } | ||
318 | |||
319 | #undef __IO_PREFIX | ||
320 | #define __IO_PREFIX tsunami | ||
321 | #define tsunami_trivial_rw_bw 1 | ||
322 | #define tsunami_trivial_rw_lq 1 | ||
323 | #define tsunami_trivial_io_bw 1 | ||
324 | #define tsunami_trivial_io_lq 1 | ||
325 | #define tsunami_trivial_iounmap 1 | ||
326 | #include <asm/io_trivial.h> | ||
327 | |||
328 | #ifdef __IO_EXTERN_INLINE | ||
329 | #undef __EXTERN_INLINE | ||
330 | #undef __IO_EXTERN_INLINE | ||
331 | #endif | ||
332 | |||
333 | #endif /* __KERNEL__ */ | ||
334 | |||
335 | #endif /* __ALPHA_TSUNAMI__H__ */ | ||
diff --git a/arch/alpha/include/asm/core_wildfire.h b/arch/alpha/include/asm/core_wildfire.h new file mode 100644 index 000000000000..cd562f544ba2 --- /dev/null +++ b/arch/alpha/include/asm/core_wildfire.h | |||
@@ -0,0 +1,318 @@ | |||
1 | #ifndef __ALPHA_WILDFIRE__H__ | ||
2 | #define __ALPHA_WILDFIRE__H__ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/compiler.h> | ||
6 | |||
7 | #define WILDFIRE_MAX_QBB 8 /* more than 8 requires other mods */ | ||
8 | #define WILDFIRE_PCA_PER_QBB 4 | ||
9 | #define WILDFIRE_IRQ_PER_PCA 64 | ||
10 | |||
11 | #define WILDFIRE_NR_IRQS \ | ||
12 | (WILDFIRE_MAX_QBB * WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) | ||
13 | |||
14 | extern unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB]; | ||
15 | extern unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB]; | ||
16 | #define QBB_MAP_EMPTY 0xff | ||
17 | |||
18 | extern unsigned long wildfire_hard_qbb_mask; | ||
19 | extern unsigned long wildfire_soft_qbb_mask; | ||
20 | extern unsigned long wildfire_gp_mask; | ||
21 | extern unsigned long wildfire_hs_mask; | ||
22 | extern unsigned long wildfire_iop_mask; | ||
23 | extern unsigned long wildfire_ior_mask; | ||
24 | extern unsigned long wildfire_pca_mask; | ||
25 | extern unsigned long wildfire_cpu_mask; | ||
26 | extern unsigned long wildfire_mem_mask; | ||
27 | |||
28 | #define WILDFIRE_QBB_EXISTS(qbbno) (wildfire_soft_qbb_mask & (1 << (qbbno))) | ||
29 | |||
30 | #define WILDFIRE_MEM_EXISTS(qbbno) (wildfire_mem_mask & (0xf << ((qbbno) << 2))) | ||
31 | |||
32 | #define WILDFIRE_PCA_EXISTS(qbbno, pcano) \ | ||
33 | (wildfire_pca_mask & (1 << (((qbbno) << 2) + (pcano)))) | ||
34 | |||
35 | typedef struct { | ||
36 | volatile unsigned long csr __attribute__((aligned(64))); | ||
37 | } wildfire_64; | ||
38 | |||
39 | typedef struct { | ||
40 | volatile unsigned long csr __attribute__((aligned(256))); | ||
41 | } wildfire_256; | ||
42 | |||
43 | typedef struct { | ||
44 | volatile unsigned long csr __attribute__((aligned(2048))); | ||
45 | } wildfire_2k; | ||
46 | |||
47 | typedef struct { | ||
48 | wildfire_64 qsd_whami; | ||
49 | wildfire_64 qsd_rev; | ||
50 | wildfire_64 qsd_port_present; | ||
51 | wildfire_64 qsd_port_active; | ||
52 | wildfire_64 qsd_fault_ena; | ||
53 | wildfire_64 qsd_cpu_int_ena; | ||
54 | wildfire_64 qsd_mem_config; | ||
55 | wildfire_64 qsd_err_sum; | ||
56 | wildfire_64 ce_sum[4]; | ||
57 | wildfire_64 dev_init[4]; | ||
58 | wildfire_64 it_int[4]; | ||
59 | wildfire_64 ip_int[4]; | ||
60 | wildfire_64 uce_sum[4]; | ||
61 | wildfire_64 se_sum__non_dev_int[4]; | ||
62 | wildfire_64 scratch[4]; | ||
63 | wildfire_64 qsd_timer; | ||
64 | wildfire_64 qsd_diag; | ||
65 | } wildfire_qsd; | ||
66 | |||
67 | typedef struct { | ||
68 | wildfire_256 qsd_whami; | ||
69 | wildfire_256 __pad1; | ||
70 | wildfire_256 ce_sum; | ||
71 | wildfire_256 dev_init; | ||
72 | wildfire_256 it_int; | ||
73 | wildfire_256 ip_int; | ||
74 | wildfire_256 uce_sum; | ||
75 | wildfire_256 se_sum; | ||
76 | } wildfire_fast_qsd; | ||
77 | |||
78 | typedef struct { | ||
79 | wildfire_2k qsa_qbb_id; | ||
80 | wildfire_2k __pad1; | ||
81 | wildfire_2k qsa_port_ena; | ||
82 | wildfire_2k qsa_scratch; | ||
83 | wildfire_2k qsa_config[5]; | ||
84 | wildfire_2k qsa_ref_int; | ||
85 | wildfire_2k qsa_qbb_pop[2]; | ||
86 | wildfire_2k qsa_dtag_fc; | ||
87 | wildfire_2k __pad2[3]; | ||
88 | wildfire_2k qsa_diag; | ||
89 | wildfire_2k qsa_diag_lock[4]; | ||
90 | wildfire_2k __pad3[11]; | ||
91 | wildfire_2k qsa_cpu_err_sum; | ||
92 | wildfire_2k qsa_misc_err_sum; | ||
93 | wildfire_2k qsa_tmo_err_sum; | ||
94 | wildfire_2k qsa_err_ena; | ||
95 | wildfire_2k qsa_tmo_config; | ||
96 | wildfire_2k qsa_ill_cmd_err_sum; | ||
97 | wildfire_2k __pad4[26]; | ||
98 | wildfire_2k qsa_busy_mask; | ||
99 | wildfire_2k qsa_arr_valid; | ||
100 | wildfire_2k __pad5[2]; | ||
101 | wildfire_2k qsa_port_map[4]; | ||
102 | wildfire_2k qsa_arr_addr[8]; | ||
103 | wildfire_2k qsa_arr_mask[8]; | ||
104 | } wildfire_qsa; | ||
105 | |||
106 | typedef struct { | ||
107 | wildfire_64 ioa_config; | ||
108 | wildfire_64 iod_config; | ||
109 | wildfire_64 iop_switch_credits; | ||
110 | wildfire_64 __pad1; | ||
111 | wildfire_64 iop_hose_credits; | ||
112 | wildfire_64 __pad2[11]; | ||
113 | struct { | ||
114 | wildfire_64 __pad3; | ||
115 | wildfire_64 init; | ||
116 | } iop_hose[4]; | ||
117 | wildfire_64 ioa_hose_0_ctrl; | ||
118 | wildfire_64 iod_hose_0_ctrl; | ||
119 | wildfire_64 ioa_hose_1_ctrl; | ||
120 | wildfire_64 iod_hose_1_ctrl; | ||
121 | wildfire_64 ioa_hose_2_ctrl; | ||
122 | wildfire_64 iod_hose_2_ctrl; | ||
123 | wildfire_64 ioa_hose_3_ctrl; | ||
124 | wildfire_64 iod_hose_3_ctrl; | ||
125 | struct { | ||
126 | wildfire_64 target; | ||
127 | wildfire_64 __pad4; | ||
128 | } iop_dev_int[4]; | ||
129 | |||
130 | wildfire_64 iop_err_int_target; | ||
131 | wildfire_64 __pad5[7]; | ||
132 | wildfire_64 iop_qbb_err_sum; | ||
133 | wildfire_64 __pad6; | ||
134 | wildfire_64 iop_qbb_se_sum; | ||
135 | wildfire_64 __pad7; | ||
136 | wildfire_64 ioa_err_sum; | ||
137 | wildfire_64 iod_err_sum; | ||
138 | wildfire_64 __pad8[4]; | ||
139 | wildfire_64 ioa_diag_force_err; | ||
140 | wildfire_64 iod_diag_force_err; | ||
141 | wildfire_64 __pad9[4]; | ||
142 | wildfire_64 iop_diag_send_err_int; | ||
143 | wildfire_64 __pad10[15]; | ||
144 | wildfire_64 ioa_scratch; | ||
145 | wildfire_64 iod_scratch; | ||
146 | } wildfire_iop; | ||
147 | |||
148 | typedef struct { | ||
149 | wildfire_2k gpa_qbb_map[4]; | ||
150 | wildfire_2k gpa_mem_pop_map; | ||
151 | wildfire_2k gpa_scratch; | ||
152 | wildfire_2k gpa_diag; | ||
153 | wildfire_2k gpa_config_0; | ||
154 | wildfire_2k __pad1; | ||
155 | wildfire_2k gpa_init_id; | ||
156 | wildfire_2k gpa_config_2; | ||
157 | /* not complete */ | ||
158 | } wildfire_gp; | ||
159 | |||
160 | typedef struct { | ||
161 | wildfire_64 pca_what_am_i; | ||
162 | wildfire_64 pca_err_sum; | ||
163 | wildfire_64 pca_diag_force_err; | ||
164 | wildfire_64 pca_diag_send_err_int; | ||
165 | wildfire_64 pca_hose_credits; | ||
166 | wildfire_64 pca_scratch; | ||
167 | wildfire_64 pca_micro_addr; | ||
168 | wildfire_64 pca_micro_data; | ||
169 | wildfire_64 pca_pend_int; | ||
170 | wildfire_64 pca_sent_int; | ||
171 | wildfire_64 __pad1; | ||
172 | wildfire_64 pca_stdio_edge_level; | ||
173 | wildfire_64 __pad2[52]; | ||
174 | struct { | ||
175 | wildfire_64 target; | ||
176 | wildfire_64 enable; | ||
177 | } pca_int[4]; | ||
178 | wildfire_64 __pad3[56]; | ||
179 | wildfire_64 pca_alt_sent_int[32]; | ||
180 | } wildfire_pca; | ||
181 | |||
182 | typedef struct { | ||
183 | wildfire_64 ne_what_am_i; | ||
184 | /* not complete */ | ||
185 | } wildfire_ne; | ||
186 | |||
187 | typedef struct { | ||
188 | wildfire_64 fe_what_am_i; | ||
189 | /* not complete */ | ||
190 | } wildfire_fe; | ||
191 | |||
192 | typedef struct { | ||
193 | wildfire_64 pci_io_addr_ext; | ||
194 | wildfire_64 pci_ctrl; | ||
195 | wildfire_64 pci_err_sum; | ||
196 | wildfire_64 pci_err_addr; | ||
197 | wildfire_64 pci_stall_cnt; | ||
198 | wildfire_64 pci_iack_special; | ||
199 | wildfire_64 __pad1[2]; | ||
200 | wildfire_64 pci_pend_int; | ||
201 | wildfire_64 pci_sent_int; | ||
202 | wildfire_64 __pad2[54]; | ||
203 | struct { | ||
204 | wildfire_64 wbase; | ||
205 | wildfire_64 wmask; | ||
206 | wildfire_64 tbase; | ||
207 | } pci_window[4]; | ||
208 | wildfire_64 pci_flush_tlb; | ||
209 | wildfire_64 pci_perf_mon; | ||
210 | } wildfire_pci; | ||
211 | |||
212 | #define WILDFIRE_ENTITY_SHIFT 18 | ||
213 | |||
214 | #define WILDFIRE_GP_ENTITY (0x10UL << WILDFIRE_ENTITY_SHIFT) | ||
215 | #define WILDFIRE_IOP_ENTITY (0x08UL << WILDFIRE_ENTITY_SHIFT) | ||
216 | #define WILDFIRE_QSA_ENTITY (0x04UL << WILDFIRE_ENTITY_SHIFT) | ||
217 | #define WILDFIRE_QSD_ENTITY_SLOW (0x05UL << WILDFIRE_ENTITY_SHIFT) | ||
218 | #define WILDFIRE_QSD_ENTITY_FAST (0x01UL << WILDFIRE_ENTITY_SHIFT) | ||
219 | |||
220 | #define WILDFIRE_PCA_ENTITY(pca) ((0xc|(pca))<<WILDFIRE_ENTITY_SHIFT) | ||
221 | |||
222 | #define WILDFIRE_BASE (IDENT_ADDR | (1UL << 40)) | ||
223 | |||
224 | #define WILDFIRE_QBB_MASK 0x0fUL /* for now, only 4 bits/16 QBBs */ | ||
225 | |||
226 | #define WILDFIRE_QBB(q) ((~((long)(q)) & WILDFIRE_QBB_MASK) << 36) | ||
227 | #define WILDFIRE_HOSE(h) ((long)(h) << 33) | ||
228 | |||
229 | #define WILDFIRE_QBB_IO(q) (WILDFIRE_BASE | WILDFIRE_QBB(q)) | ||
230 | #define WILDFIRE_QBB_HOSE(q,h) (WILDFIRE_QBB_IO(q) | WILDFIRE_HOSE(h)) | ||
231 | |||
232 | #define WILDFIRE_MEM(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x000000000UL) | ||
233 | #define WILDFIRE_CONF(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FE000000UL) | ||
234 | #define WILDFIRE_IO(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FF000000UL) | ||
235 | |||
236 | #define WILDFIRE_qsd(q) \ | ||
237 | ((wildfire_qsd *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSD_ENTITY_SLOW|(((1UL<<13)-1)<<23))) | ||
238 | |||
239 | #define WILDFIRE_fast_qsd() \ | ||
240 | ((wildfire_fast_qsd *)(WILDFIRE_QBB_IO(0)|WILDFIRE_QSD_ENTITY_FAST|(((1UL<<13)-1)<<23))) | ||
241 | |||
242 | #define WILDFIRE_qsa(q) \ | ||
243 | ((wildfire_qsa *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSA_ENTITY|(((1UL<<13)-1)<<23))) | ||
244 | |||
245 | #define WILDFIRE_iop(q) \ | ||
246 | ((wildfire_iop *)(WILDFIRE_QBB_IO(q)|WILDFIRE_IOP_ENTITY|(((1UL<<13)-1)<<23))) | ||
247 | |||
248 | #define WILDFIRE_gp(q) \ | ||
249 | ((wildfire_gp *)(WILDFIRE_QBB_IO(q)|WILDFIRE_GP_ENTITY|(((1UL<<13)-1)<<23))) | ||
250 | |||
251 | #define WILDFIRE_pca(q,pca) \ | ||
252 | ((wildfire_pca *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23))) | ||
253 | |||
254 | #define WILDFIRE_ne(q,pca) \ | ||
255 | ((wildfire_ne *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(1UL<<16))) | ||
256 | |||
257 | #define WILDFIRE_fe(q,pca) \ | ||
258 | ((wildfire_fe *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(3UL<<15))) | ||
259 | |||
260 | #define WILDFIRE_pci(q,h) \ | ||
261 | ((wildfire_pci *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(((h)&6)>>1)|((((h)&1)|2)<<16)|(((1UL<<13)-1)<<23))) | ||
262 | |||
263 | #define WILDFIRE_IO_BIAS WILDFIRE_IO(0,0) | ||
264 | #define WILDFIRE_MEM_BIAS WILDFIRE_MEM(0,0) /* ??? */ | ||
265 | |||
266 | /* The IO address space is larger than 0xffff */ | ||
267 | #define WILDFIRE_IO_SPACE (8UL*1024*1024) | ||
268 | |||
269 | #ifdef __KERNEL__ | ||
270 | |||
271 | #ifndef __EXTERN_INLINE | ||
272 | #define __EXTERN_INLINE extern inline | ||
273 | #define __IO_EXTERN_INLINE | ||
274 | #endif | ||
275 | |||
276 | /* | ||
277 | * Memory functions. all accesses are done through linear space. | ||
278 | */ | ||
279 | |||
280 | __EXTERN_INLINE void __iomem *wildfire_ioportmap(unsigned long addr) | ||
281 | { | ||
282 | return (void __iomem *)(addr + WILDFIRE_IO_BIAS); | ||
283 | } | ||
284 | |||
285 | __EXTERN_INLINE void __iomem *wildfire_ioremap(unsigned long addr, | ||
286 | unsigned long size) | ||
287 | { | ||
288 | return (void __iomem *)(addr + WILDFIRE_MEM_BIAS); | ||
289 | } | ||
290 | |||
291 | __EXTERN_INLINE int wildfire_is_ioaddr(unsigned long addr) | ||
292 | { | ||
293 | return addr >= WILDFIRE_BASE; | ||
294 | } | ||
295 | |||
296 | __EXTERN_INLINE int wildfire_is_mmio(const volatile void __iomem *xaddr) | ||
297 | { | ||
298 | unsigned long addr = (unsigned long)xaddr; | ||
299 | return (addr & 0x100000000UL) == 0; | ||
300 | } | ||
301 | |||
302 | #undef __IO_PREFIX | ||
303 | #define __IO_PREFIX wildfire | ||
304 | #define wildfire_trivial_rw_bw 1 | ||
305 | #define wildfire_trivial_rw_lq 1 | ||
306 | #define wildfire_trivial_io_bw 1 | ||
307 | #define wildfire_trivial_io_lq 1 | ||
308 | #define wildfire_trivial_iounmap 1 | ||
309 | #include <asm/io_trivial.h> | ||
310 | |||
311 | #ifdef __IO_EXTERN_INLINE | ||
312 | #undef __EXTERN_INLINE | ||
313 | #undef __IO_EXTERN_INLINE | ||
314 | #endif | ||
315 | |||
316 | #endif /* __KERNEL__ */ | ||
317 | |||
318 | #endif /* __ALPHA_WILDFIRE__H__ */ | ||
diff --git a/arch/alpha/include/asm/cputime.h b/arch/alpha/include/asm/cputime.h new file mode 100644 index 000000000000..19577fd93230 --- /dev/null +++ b/arch/alpha/include/asm/cputime.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ALPHA_CPUTIME_H | ||
2 | #define __ALPHA_CPUTIME_H | ||
3 | |||
4 | #include <asm-generic/cputime.h> | ||
5 | |||
6 | #endif /* __ALPHA_CPUTIME_H */ | ||
diff --git a/arch/alpha/include/asm/current.h b/arch/alpha/include/asm/current.h new file mode 100644 index 000000000000..094d285a1b34 --- /dev/null +++ b/arch/alpha/include/asm/current.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _ALPHA_CURRENT_H | ||
2 | #define _ALPHA_CURRENT_H | ||
3 | |||
4 | #include <linux/thread_info.h> | ||
5 | |||
6 | #define get_current() (current_thread_info()->task) | ||
7 | #define current get_current() | ||
8 | |||
9 | #endif /* _ALPHA_CURRENT_H */ | ||
diff --git a/arch/alpha/include/asm/delay.h b/arch/alpha/include/asm/delay.h new file mode 100644 index 000000000000..2aa3f410f7e6 --- /dev/null +++ b/arch/alpha/include/asm/delay.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef __ALPHA_DELAY_H | ||
2 | #define __ALPHA_DELAY_H | ||
3 | |||
4 | extern void __delay(int loops); | ||
5 | extern void udelay(unsigned long usecs); | ||
6 | |||
7 | extern void ndelay(unsigned long nsecs); | ||
8 | #define ndelay ndelay | ||
9 | |||
10 | #endif /* defined(__ALPHA_DELAY_H) */ | ||
diff --git a/arch/alpha/include/asm/device.h b/arch/alpha/include/asm/device.h new file mode 100644 index 000000000000..d8f9872b0e2d --- /dev/null +++ b/arch/alpha/include/asm/device.h | |||
@@ -0,0 +1,7 @@ | |||
1 | /* | ||
2 | * Arch specific extensions to struct device | ||
3 | * | ||
4 | * This file is released under the GPLv2 | ||
5 | */ | ||
6 | #include <asm-generic/device.h> | ||
7 | |||
diff --git a/arch/alpha/include/asm/div64.h b/arch/alpha/include/asm/div64.h new file mode 100644 index 000000000000..6cd978cefb28 --- /dev/null +++ b/arch/alpha/include/asm/div64.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/div64.h> | |||
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h new file mode 100644 index 000000000000..a5801ae02e4b --- /dev/null +++ b/arch/alpha/include/asm/dma-mapping.h | |||
@@ -0,0 +1,69 @@ | |||
1 | #ifndef _ALPHA_DMA_MAPPING_H | ||
2 | #define _ALPHA_DMA_MAPPING_H | ||
3 | |||
4 | |||
5 | #ifdef CONFIG_PCI | ||
6 | |||
7 | #include <linux/pci.h> | ||
8 | |||
9 | #define dma_map_single(dev, va, size, dir) \ | ||
10 | pci_map_single(alpha_gendev_to_pci(dev), va, size, dir) | ||
11 | #define dma_unmap_single(dev, addr, size, dir) \ | ||
12 | pci_unmap_single(alpha_gendev_to_pci(dev), addr, size, dir) | ||
13 | #define dma_alloc_coherent(dev, size, addr, gfp) \ | ||
14 | __pci_alloc_consistent(alpha_gendev_to_pci(dev), size, addr, gfp) | ||
15 | #define dma_free_coherent(dev, size, va, addr) \ | ||
16 | pci_free_consistent(alpha_gendev_to_pci(dev), size, va, addr) | ||
17 | #define dma_map_page(dev, page, off, size, dir) \ | ||
18 | pci_map_page(alpha_gendev_to_pci(dev), page, off, size, dir) | ||
19 | #define dma_unmap_page(dev, addr, size, dir) \ | ||
20 | pci_unmap_page(alpha_gendev_to_pci(dev), addr, size, dir) | ||
21 | #define dma_map_sg(dev, sg, nents, dir) \ | ||
22 | pci_map_sg(alpha_gendev_to_pci(dev), sg, nents, dir) | ||
23 | #define dma_unmap_sg(dev, sg, nents, dir) \ | ||
24 | pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir) | ||
25 | #define dma_supported(dev, mask) \ | ||
26 | pci_dma_supported(alpha_gendev_to_pci(dev), mask) | ||
27 | #define dma_mapping_error(dev, addr) \ | ||
28 | pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr) | ||
29 | |||
30 | #else /* no PCI - no IOMMU. */ | ||
31 | |||
32 | struct scatterlist; | ||
33 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
34 | dma_addr_t *dma_handle, gfp_t gfp); | ||
35 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
36 | enum dma_data_direction direction); | ||
37 | |||
38 | #define dma_free_coherent(dev, size, va, addr) \ | ||
39 | free_pages((unsigned long)va, get_order(size)) | ||
40 | #define dma_supported(dev, mask) (mask < 0x00ffffffUL ? 0 : 1) | ||
41 | #define dma_map_single(dev, va, size, dir) virt_to_phys(va) | ||
42 | #define dma_map_page(dev, page, off, size, dir) (page_to_pa(page) + off) | ||
43 | |||
44 | #define dma_unmap_single(dev, addr, size, dir) ((void)0) | ||
45 | #define dma_unmap_page(dev, addr, size, dir) ((void)0) | ||
46 | #define dma_unmap_sg(dev, sg, nents, dir) ((void)0) | ||
47 | |||
48 | #define dma_mapping_error(dev, addr) (0) | ||
49 | |||
50 | #endif /* !CONFIG_PCI */ | ||
51 | |||
52 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
53 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
54 | #define dma_is_consistent(d, h) (1) | ||
55 | |||
56 | int dma_set_mask(struct device *dev, u64 mask); | ||
57 | |||
58 | #define dma_sync_single_for_cpu(dev, addr, size, dir) ((void)0) | ||
59 | #define dma_sync_single_for_device(dev, addr, size, dir) ((void)0) | ||
60 | #define dma_sync_single_range(dev, addr, off, size, dir) ((void)0) | ||
61 | #define dma_sync_sg_for_cpu(dev, sg, nents, dir) ((void)0) | ||
62 | #define dma_sync_sg_for_device(dev, sg, nents, dir) ((void)0) | ||
63 | #define dma_cache_sync(dev, va, size, dir) ((void)0) | ||
64 | #define dma_sync_single_range_for_cpu(dev, addr, offset, size, dir) ((void)0) | ||
65 | #define dma_sync_single_range_for_device(dev, addr, offset, size, dir) ((void)0) | ||
66 | |||
67 | #define dma_get_cache_alignment() L1_CACHE_BYTES | ||
68 | |||
69 | #endif /* _ALPHA_DMA_MAPPING_H */ | ||
diff --git a/arch/alpha/include/asm/dma.h b/arch/alpha/include/asm/dma.h new file mode 100644 index 000000000000..87cfdbdf08fc --- /dev/null +++ b/arch/alpha/include/asm/dma.h | |||
@@ -0,0 +1,376 @@ | |||
1 | /* | ||
2 | * include/asm-alpha/dma.h | ||
3 | * | ||
4 | * This is essentially the same as the i386 DMA stuff, as the AlphaPCs | ||
5 | * use ISA-compatible dma. The only extension is support for high-page | ||
6 | * registers that allow to set the top 8 bits of a 32-bit DMA address. | ||
7 | * This register should be written last when setting up a DMA address | ||
8 | * as this will also enable DMA across 64 KB boundaries. | ||
9 | */ | ||
10 | |||
11 | /* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ | ||
12 | * linux/include/asm/dma.h: Defines for using and allocating dma channels. | ||
13 | * Written by Hennus Bergman, 1992. | ||
14 | * High DMA channel support & info by Hannu Savolainen | ||
15 | * and John Boyd, Nov. 1992. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_DMA_H | ||
19 | #define _ASM_DMA_H | ||
20 | |||
21 | #include <linux/spinlock.h> | ||
22 | #include <asm/io.h> | ||
23 | |||
24 | #define dma_outb outb | ||
25 | #define dma_inb inb | ||
26 | |||
27 | /* | ||
28 | * NOTES about DMA transfers: | ||
29 | * | ||
30 | * controller 1: channels 0-3, byte operations, ports 00-1F | ||
31 | * controller 2: channels 4-7, word operations, ports C0-DF | ||
32 | * | ||
33 | * - ALL registers are 8 bits only, regardless of transfer size | ||
34 | * - channel 4 is not used - cascades 1 into 2. | ||
35 | * - channels 0-3 are byte - addresses/counts are for physical bytes | ||
36 | * - channels 5-7 are word - addresses/counts are for physical words | ||
37 | * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries | ||
38 | * - transfer count loaded to registers is 1 less than actual count | ||
39 | * - controller 2 offsets are all even (2x offsets for controller 1) | ||
40 | * - page registers for 5-7 don't use data bit 0, represent 128K pages | ||
41 | * - page registers for 0-3 use bit 0, represent 64K pages | ||
42 | * | ||
43 | * DMA transfers are limited to the lower 16MB of _physical_ memory. | ||
44 | * Note that addresses loaded into registers must be _physical_ addresses, | ||
45 | * not logical addresses (which may differ if paging is active). | ||
46 | * | ||
47 | * Address mapping for channels 0-3: | ||
48 | * | ||
49 | * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) | ||
50 | * | ... | | ... | | ... | | ||
51 | * | ... | | ... | | ... | | ||
52 | * | ... | | ... | | ... | | ||
53 | * P7 ... P0 A7 ... A0 A7 ... A0 | ||
54 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
55 | * | ||
56 | * Address mapping for channels 5-7: | ||
57 | * | ||
58 | * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) | ||
59 | * | ... | \ \ ... \ \ \ ... \ \ | ||
60 | * | ... | \ \ ... \ \ \ ... \ (not used) | ||
61 | * | ... | \ \ ... \ \ \ ... \ | ||
62 | * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 | ||
63 | * | Page | Addr MSB | Addr LSB | (DMA registers) | ||
64 | * | ||
65 | * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses | ||
66 | * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at | ||
67 | * the hardware level, so odd-byte transfers aren't possible). | ||
68 | * | ||
69 | * Transfer count (_not # bytes_) is limited to 64K, represented as actual | ||
70 | * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, | ||
71 | * and up to 128K bytes may be transferred on channels 5-7 in one operation. | ||
72 | * | ||
73 | */ | ||
74 | |||
75 | #define MAX_DMA_CHANNELS 8 | ||
76 | |||
77 | /* | ||
78 | ISA DMA limitations on Alpha platforms, | ||
79 | |||
80 | These may be due to SIO (PCI<->ISA bridge) chipset limitation, or | ||
81 | just a wiring limit. | ||
82 | */ | ||
83 | |||
84 | /* The maximum address for ISA DMA transfer on Alpha XL, due to an | ||
85 | hardware SIO limitation, is 64MB. | ||
86 | */ | ||
87 | #define ALPHA_XL_MAX_ISA_DMA_ADDRESS 0x04000000UL | ||
88 | |||
89 | /* The maximum address for ISA DMA transfer on RUFFIAN, | ||
90 | due to an hardware SIO limitation, is 16MB. | ||
91 | */ | ||
92 | #define ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS 0x01000000UL | ||
93 | |||
94 | /* The maximum address for ISA DMA transfer on SABLE, and some ALCORs, | ||
95 | due to an hardware SIO chip limitation, is 2GB. | ||
96 | */ | ||
97 | #define ALPHA_SABLE_MAX_ISA_DMA_ADDRESS 0x80000000UL | ||
98 | #define ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS 0x80000000UL | ||
99 | |||
100 | /* | ||
101 | Maximum address for all the others is the complete 32-bit bus | ||
102 | address space. | ||
103 | */ | ||
104 | #define ALPHA_MAX_ISA_DMA_ADDRESS 0x100000000UL | ||
105 | |||
106 | #ifdef CONFIG_ALPHA_GENERIC | ||
107 | # define MAX_ISA_DMA_ADDRESS (alpha_mv.max_isa_dma_address) | ||
108 | #else | ||
109 | # if defined(CONFIG_ALPHA_XL) | ||
110 | # define MAX_ISA_DMA_ADDRESS ALPHA_XL_MAX_ISA_DMA_ADDRESS | ||
111 | # elif defined(CONFIG_ALPHA_RUFFIAN) | ||
112 | # define MAX_ISA_DMA_ADDRESS ALPHA_RUFFIAN_MAX_ISA_DMA_ADDRESS | ||
113 | # elif defined(CONFIG_ALPHA_SABLE) | ||
114 | # define MAX_ISA_DMA_ADDRESS ALPHA_SABLE_MAX_ISA_DMA_ADDRESS | ||
115 | # elif defined(CONFIG_ALPHA_ALCOR) | ||
116 | # define MAX_ISA_DMA_ADDRESS ALPHA_ALCOR_MAX_ISA_DMA_ADDRESS | ||
117 | # else | ||
118 | # define MAX_ISA_DMA_ADDRESS ALPHA_MAX_ISA_DMA_ADDRESS | ||
119 | # endif | ||
120 | #endif | ||
121 | |||
122 | /* If we have the iommu, we don't have any address limitations on DMA. | ||
123 | Otherwise (Nautilus, RX164), we have to have 0-16 Mb DMA zone | ||
124 | like i386. */ | ||
125 | #define MAX_DMA_ADDRESS (alpha_mv.mv_pci_tbi ? \ | ||
126 | ~0UL : IDENT_ADDR + 0x01000000) | ||
127 | |||
128 | /* 8237 DMA controllers */ | ||
129 | #define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ | ||
130 | #define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ | ||
131 | |||
132 | /* DMA controller registers */ | ||
133 | #define DMA1_CMD_REG 0x08 /* command register (w) */ | ||
134 | #define DMA1_STAT_REG 0x08 /* status register (r) */ | ||
135 | #define DMA1_REQ_REG 0x09 /* request register (w) */ | ||
136 | #define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ | ||
137 | #define DMA1_MODE_REG 0x0B /* mode register (w) */ | ||
138 | #define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ | ||
139 | #define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ | ||
140 | #define DMA1_RESET_REG 0x0D /* Master Clear (w) */ | ||
141 | #define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ | ||
142 | #define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ | ||
143 | #define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG) | ||
144 | |||
145 | #define DMA2_CMD_REG 0xD0 /* command register (w) */ | ||
146 | #define DMA2_STAT_REG 0xD0 /* status register (r) */ | ||
147 | #define DMA2_REQ_REG 0xD2 /* request register (w) */ | ||
148 | #define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ | ||
149 | #define DMA2_MODE_REG 0xD6 /* mode register (w) */ | ||
150 | #define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ | ||
151 | #define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ | ||
152 | #define DMA2_RESET_REG 0xDA /* Master Clear (w) */ | ||
153 | #define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ | ||
154 | #define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ | ||
155 | #define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) | ||
156 | |||
157 | #define DMA_ADDR_0 0x00 /* DMA address registers */ | ||
158 | #define DMA_ADDR_1 0x02 | ||
159 | #define DMA_ADDR_2 0x04 | ||
160 | #define DMA_ADDR_3 0x06 | ||
161 | #define DMA_ADDR_4 0xC0 | ||
162 | #define DMA_ADDR_5 0xC4 | ||
163 | #define DMA_ADDR_6 0xC8 | ||
164 | #define DMA_ADDR_7 0xCC | ||
165 | |||
166 | #define DMA_CNT_0 0x01 /* DMA count registers */ | ||
167 | #define DMA_CNT_1 0x03 | ||
168 | #define DMA_CNT_2 0x05 | ||
169 | #define DMA_CNT_3 0x07 | ||
170 | #define DMA_CNT_4 0xC2 | ||
171 | #define DMA_CNT_5 0xC6 | ||
172 | #define DMA_CNT_6 0xCA | ||
173 | #define DMA_CNT_7 0xCE | ||
174 | |||
175 | #define DMA_PAGE_0 0x87 /* DMA page registers */ | ||
176 | #define DMA_PAGE_1 0x83 | ||
177 | #define DMA_PAGE_2 0x81 | ||
178 | #define DMA_PAGE_3 0x82 | ||
179 | #define DMA_PAGE_5 0x8B | ||
180 | #define DMA_PAGE_6 0x89 | ||
181 | #define DMA_PAGE_7 0x8A | ||
182 | |||
183 | #define DMA_HIPAGE_0 (0x400 | DMA_PAGE_0) | ||
184 | #define DMA_HIPAGE_1 (0x400 | DMA_PAGE_1) | ||
185 | #define DMA_HIPAGE_2 (0x400 | DMA_PAGE_2) | ||
186 | #define DMA_HIPAGE_3 (0x400 | DMA_PAGE_3) | ||
187 | #define DMA_HIPAGE_4 (0x400 | DMA_PAGE_4) | ||
188 | #define DMA_HIPAGE_5 (0x400 | DMA_PAGE_5) | ||
189 | #define DMA_HIPAGE_6 (0x400 | DMA_PAGE_6) | ||
190 | #define DMA_HIPAGE_7 (0x400 | DMA_PAGE_7) | ||
191 | |||
192 | #define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ | ||
193 | #define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ | ||
194 | #define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ | ||
195 | |||
196 | #define DMA_AUTOINIT 0x10 | ||
197 | |||
198 | extern spinlock_t dma_spin_lock; | ||
199 | |||
200 | static __inline__ unsigned long claim_dma_lock(void) | ||
201 | { | ||
202 | unsigned long flags; | ||
203 | spin_lock_irqsave(&dma_spin_lock, flags); | ||
204 | return flags; | ||
205 | } | ||
206 | |||
207 | static __inline__ void release_dma_lock(unsigned long flags) | ||
208 | { | ||
209 | spin_unlock_irqrestore(&dma_spin_lock, flags); | ||
210 | } | ||
211 | |||
212 | /* enable/disable a specific DMA channel */ | ||
213 | static __inline__ void enable_dma(unsigned int dmanr) | ||
214 | { | ||
215 | if (dmanr<=3) | ||
216 | dma_outb(dmanr, DMA1_MASK_REG); | ||
217 | else | ||
218 | dma_outb(dmanr & 3, DMA2_MASK_REG); | ||
219 | } | ||
220 | |||
221 | static __inline__ void disable_dma(unsigned int dmanr) | ||
222 | { | ||
223 | if (dmanr<=3) | ||
224 | dma_outb(dmanr | 4, DMA1_MASK_REG); | ||
225 | else | ||
226 | dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); | ||
227 | } | ||
228 | |||
229 | /* Clear the 'DMA Pointer Flip Flop'. | ||
230 | * Write 0 for LSB/MSB, 1 for MSB/LSB access. | ||
231 | * Use this once to initialize the FF to a known state. | ||
232 | * After that, keep track of it. :-) | ||
233 | * --- In order to do that, the DMA routines below should --- | ||
234 | * --- only be used while interrupts are disabled! --- | ||
235 | */ | ||
236 | static __inline__ void clear_dma_ff(unsigned int dmanr) | ||
237 | { | ||
238 | if (dmanr<=3) | ||
239 | dma_outb(0, DMA1_CLEAR_FF_REG); | ||
240 | else | ||
241 | dma_outb(0, DMA2_CLEAR_FF_REG); | ||
242 | } | ||
243 | |||
244 | /* set mode (above) for a specific DMA channel */ | ||
245 | static __inline__ void set_dma_mode(unsigned int dmanr, char mode) | ||
246 | { | ||
247 | if (dmanr<=3) | ||
248 | dma_outb(mode | dmanr, DMA1_MODE_REG); | ||
249 | else | ||
250 | dma_outb(mode | (dmanr&3), DMA2_MODE_REG); | ||
251 | } | ||
252 | |||
253 | /* set extended mode for a specific DMA channel */ | ||
254 | static __inline__ void set_dma_ext_mode(unsigned int dmanr, char ext_mode) | ||
255 | { | ||
256 | if (dmanr<=3) | ||
257 | dma_outb(ext_mode | dmanr, DMA1_EXT_MODE_REG); | ||
258 | else | ||
259 | dma_outb(ext_mode | (dmanr&3), DMA2_EXT_MODE_REG); | ||
260 | } | ||
261 | |||
262 | /* Set only the page register bits of the transfer address. | ||
263 | * This is used for successive transfers when we know the contents of | ||
264 | * the lower 16 bits of the DMA current address register. | ||
265 | */ | ||
266 | static __inline__ void set_dma_page(unsigned int dmanr, unsigned int pagenr) | ||
267 | { | ||
268 | switch(dmanr) { | ||
269 | case 0: | ||
270 | dma_outb(pagenr, DMA_PAGE_0); | ||
271 | dma_outb((pagenr >> 8), DMA_HIPAGE_0); | ||
272 | break; | ||
273 | case 1: | ||
274 | dma_outb(pagenr, DMA_PAGE_1); | ||
275 | dma_outb((pagenr >> 8), DMA_HIPAGE_1); | ||
276 | break; | ||
277 | case 2: | ||
278 | dma_outb(pagenr, DMA_PAGE_2); | ||
279 | dma_outb((pagenr >> 8), DMA_HIPAGE_2); | ||
280 | break; | ||
281 | case 3: | ||
282 | dma_outb(pagenr, DMA_PAGE_3); | ||
283 | dma_outb((pagenr >> 8), DMA_HIPAGE_3); | ||
284 | break; | ||
285 | case 5: | ||
286 | dma_outb(pagenr & 0xfe, DMA_PAGE_5); | ||
287 | dma_outb((pagenr >> 8), DMA_HIPAGE_5); | ||
288 | break; | ||
289 | case 6: | ||
290 | dma_outb(pagenr & 0xfe, DMA_PAGE_6); | ||
291 | dma_outb((pagenr >> 8), DMA_HIPAGE_6); | ||
292 | break; | ||
293 | case 7: | ||
294 | dma_outb(pagenr & 0xfe, DMA_PAGE_7); | ||
295 | dma_outb((pagenr >> 8), DMA_HIPAGE_7); | ||
296 | break; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | |||
301 | /* Set transfer address & page bits for specific DMA channel. | ||
302 | * Assumes dma flipflop is clear. | ||
303 | */ | ||
304 | static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) | ||
305 | { | ||
306 | if (dmanr <= 3) { | ||
307 | dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); | ||
308 | dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); | ||
309 | } else { | ||
310 | dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); | ||
311 | dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); | ||
312 | } | ||
313 | set_dma_page(dmanr, a>>16); /* set hipage last to enable 32-bit mode */ | ||
314 | } | ||
315 | |||
316 | |||
317 | /* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for | ||
318 | * a specific DMA channel. | ||
319 | * You must ensure the parameters are valid. | ||
320 | * NOTE: from a manual: "the number of transfers is one more | ||
321 | * than the initial word count"! This is taken into account. | ||
322 | * Assumes dma flip-flop is clear. | ||
323 | * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. | ||
324 | */ | ||
325 | static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) | ||
326 | { | ||
327 | count--; | ||
328 | if (dmanr <= 3) { | ||
329 | dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); | ||
330 | dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); | ||
331 | } else { | ||
332 | dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); | ||
333 | dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); | ||
334 | } | ||
335 | } | ||
336 | |||
337 | |||
338 | /* Get DMA residue count. After a DMA transfer, this | ||
339 | * should return zero. Reading this while a DMA transfer is | ||
340 | * still in progress will return unpredictable results. | ||
341 | * If called before the channel has been used, it may return 1. | ||
342 | * Otherwise, it returns the number of _bytes_ left to transfer. | ||
343 | * | ||
344 | * Assumes DMA flip-flop is clear. | ||
345 | */ | ||
346 | static __inline__ int get_dma_residue(unsigned int dmanr) | ||
347 | { | ||
348 | unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE | ||
349 | : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; | ||
350 | |||
351 | /* using short to get 16-bit wrap around */ | ||
352 | unsigned short count; | ||
353 | |||
354 | count = 1 + dma_inb(io_port); | ||
355 | count += dma_inb(io_port) << 8; | ||
356 | |||
357 | return (dmanr<=3)? count : (count<<1); | ||
358 | } | ||
359 | |||
360 | |||
361 | /* These are in kernel/dma.c: */ | ||
362 | extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ | ||
363 | extern void free_dma(unsigned int dmanr); /* release it again */ | ||
364 | #define KERNEL_HAVE_CHECK_DMA | ||
365 | extern int check_dma(unsigned int dmanr); | ||
366 | |||
367 | /* From PCI */ | ||
368 | |||
369 | #ifdef CONFIG_PCI | ||
370 | extern int isa_dma_bridge_buggy; | ||
371 | #else | ||
372 | #define isa_dma_bridge_buggy (0) | ||
373 | #endif | ||
374 | |||
375 | |||
376 | #endif /* _ASM_DMA_H */ | ||
diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h new file mode 100644 index 000000000000..fc1002ea1e0c --- /dev/null +++ b/arch/alpha/include/asm/elf.h | |||
@@ -0,0 +1,165 @@ | |||
1 | #ifndef __ASM_ALPHA_ELF_H | ||
2 | #define __ASM_ALPHA_ELF_H | ||
3 | |||
4 | #include <asm/auxvec.h> | ||
5 | |||
6 | /* Special values for the st_other field in the symbol table. */ | ||
7 | |||
8 | #define STO_ALPHA_NOPV 0x80 | ||
9 | #define STO_ALPHA_STD_GPLOAD 0x88 | ||
10 | |||
11 | /* | ||
12 | * Alpha ELF relocation types | ||
13 | */ | ||
14 | #define R_ALPHA_NONE 0 /* No reloc */ | ||
15 | #define R_ALPHA_REFLONG 1 /* Direct 32 bit */ | ||
16 | #define R_ALPHA_REFQUAD 2 /* Direct 64 bit */ | ||
17 | #define R_ALPHA_GPREL32 3 /* GP relative 32 bit */ | ||
18 | #define R_ALPHA_LITERAL 4 /* GP relative 16 bit w/optimization */ | ||
19 | #define R_ALPHA_LITUSE 5 /* Optimization hint for LITERAL */ | ||
20 | #define R_ALPHA_GPDISP 6 /* Add displacement to GP */ | ||
21 | #define R_ALPHA_BRADDR 7 /* PC+4 relative 23 bit shifted */ | ||
22 | #define R_ALPHA_HINT 8 /* PC+4 relative 16 bit shifted */ | ||
23 | #define R_ALPHA_SREL16 9 /* PC relative 16 bit */ | ||
24 | #define R_ALPHA_SREL32 10 /* PC relative 32 bit */ | ||
25 | #define R_ALPHA_SREL64 11 /* PC relative 64 bit */ | ||
26 | #define R_ALPHA_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ | ||
27 | #define R_ALPHA_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ | ||
28 | #define R_ALPHA_GPREL16 19 /* GP relative 16 bit */ | ||
29 | #define R_ALPHA_COPY 24 /* Copy symbol at runtime */ | ||
30 | #define R_ALPHA_GLOB_DAT 25 /* Create GOT entry */ | ||
31 | #define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ | ||
32 | #define R_ALPHA_RELATIVE 27 /* Adjust by program base */ | ||
33 | #define R_ALPHA_BRSGP 28 | ||
34 | #define R_ALPHA_TLSGD 29 | ||
35 | #define R_ALPHA_TLS_LDM 30 | ||
36 | #define R_ALPHA_DTPMOD64 31 | ||
37 | #define R_ALPHA_GOTDTPREL 32 | ||
38 | #define R_ALPHA_DTPREL64 33 | ||
39 | #define R_ALPHA_DTPRELHI 34 | ||
40 | #define R_ALPHA_DTPRELLO 35 | ||
41 | #define R_ALPHA_DTPREL16 36 | ||
42 | #define R_ALPHA_GOTTPREL 37 | ||
43 | #define R_ALPHA_TPREL64 38 | ||
44 | #define R_ALPHA_TPRELHI 39 | ||
45 | #define R_ALPHA_TPRELLO 40 | ||
46 | #define R_ALPHA_TPREL16 41 | ||
47 | |||
48 | #define SHF_ALPHA_GPREL 0x10000000 | ||
49 | |||
50 | /* Legal values for e_flags field of Elf64_Ehdr. */ | ||
51 | |||
52 | #define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */ | ||
53 | |||
54 | /* | ||
55 | * ELF register definitions.. | ||
56 | */ | ||
57 | |||
58 | /* | ||
59 | * The OSF/1 version of <sys/procfs.h> makes gregset_t 46 entries long. | ||
60 | * I have no idea why that is so. For now, we just leave it at 33 | ||
61 | * (32 general regs + processor status word). | ||
62 | */ | ||
63 | #define ELF_NGREG 33 | ||
64 | #define ELF_NFPREG 32 | ||
65 | |||
66 | typedef unsigned long elf_greg_t; | ||
67 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
68 | |||
69 | typedef double elf_fpreg_t; | ||
70 | typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | ||
71 | |||
72 | /* | ||
73 | * This is used to ensure we don't load something for the wrong architecture. | ||
74 | */ | ||
75 | #define elf_check_arch(x) ((x)->e_machine == EM_ALPHA) | ||
76 | |||
77 | /* | ||
78 | * These are used to set parameters in the core dumps. | ||
79 | */ | ||
80 | #define ELF_CLASS ELFCLASS64 | ||
81 | #define ELF_DATA ELFDATA2LSB | ||
82 | #define ELF_ARCH EM_ALPHA | ||
83 | |||
84 | #define USE_ELF_CORE_DUMP | ||
85 | #define ELF_EXEC_PAGESIZE 8192 | ||
86 | |||
87 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
88 | use of this is to invoke "./ld.so someprog" to test out a new version of | ||
89 | the loader. We need to make sure that it is out of the way of the program | ||
90 | that it will "exec", and that there is sufficient room for the brk. */ | ||
91 | |||
92 | #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) | ||
93 | |||
94 | /* $0 is set by ld.so to a pointer to a function which might be | ||
95 | registered using atexit. This provides a mean for the dynamic | ||
96 | linker to call DT_FINI functions for shared libraries that have | ||
97 | been loaded before the code runs. | ||
98 | |||
99 | So that we can use the same startup file with static executables, | ||
100 | we start programs with a value of 0 to indicate that there is no | ||
101 | such function. */ | ||
102 | |||
103 | #define ELF_PLAT_INIT(_r, load_addr) _r->r0 = 0 | ||
104 | |||
105 | /* The registers are layed out in pt_regs for PAL and syscall | ||
106 | convenience. Re-order them for the linear elf_gregset_t. */ | ||
107 | |||
108 | struct pt_regs; | ||
109 | struct thread_info; | ||
110 | struct task_struct; | ||
111 | extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, | ||
112 | struct thread_info *ti); | ||
113 | #define ELF_CORE_COPY_REGS(DEST, REGS) \ | ||
114 | dump_elf_thread(DEST, REGS, current_thread_info()); | ||
115 | |||
116 | /* Similar, but for a thread other than current. */ | ||
117 | |||
118 | extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task); | ||
119 | #define ELF_CORE_COPY_TASK_REGS(TASK, DEST) \ | ||
120 | dump_elf_task(*(DEST), TASK) | ||
121 | |||
122 | /* Similar, but for the FP registers. */ | ||
123 | |||
124 | extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task); | ||
125 | #define ELF_CORE_COPY_FPREGS(TASK, DEST) \ | ||
126 | dump_elf_task_fp(*(DEST), TASK) | ||
127 | |||
128 | /* This yields a mask that user programs can use to figure out what | ||
129 | instruction set this CPU supports. This is trivial on Alpha, | ||
130 | but not so on other machines. */ | ||
131 | |||
132 | #define ELF_HWCAP (~amask(-1)) | ||
133 | |||
134 | /* This yields a string that ld.so will use to load implementation | ||
135 | specific libraries for optimization. This is more specific in | ||
136 | intent than poking at uname or /proc/cpuinfo. */ | ||
137 | |||
138 | #define ELF_PLATFORM \ | ||
139 | ({ \ | ||
140 | enum implver_enum i_ = implver(); \ | ||
141 | ( i_ == IMPLVER_EV4 ? "ev4" \ | ||
142 | : i_ == IMPLVER_EV5 \ | ||
143 | ? (amask(AMASK_BWX) ? "ev5" : "ev56") \ | ||
144 | : amask (AMASK_CIX) ? "ev6" : "ev67"); \ | ||
145 | }) | ||
146 | |||
147 | #define SET_PERSONALITY(EX, IBCS2) \ | ||
148 | set_personality(((EX).e_flags & EF_ALPHA_32BIT) \ | ||
149 | ? PER_LINUX_32BIT : (IBCS2) ? PER_SVR4 : PER_LINUX) | ||
150 | |||
151 | extern int alpha_l1i_cacheshape; | ||
152 | extern int alpha_l1d_cacheshape; | ||
153 | extern int alpha_l2_cacheshape; | ||
154 | extern int alpha_l3_cacheshape; | ||
155 | |||
156 | /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ | ||
157 | #define ARCH_DLINFO \ | ||
158 | do { \ | ||
159 | NEW_AUX_ENT(AT_L1I_CACHESHAPE, alpha_l1i_cacheshape); \ | ||
160 | NEW_AUX_ENT(AT_L1D_CACHESHAPE, alpha_l1d_cacheshape); \ | ||
161 | NEW_AUX_ENT(AT_L2_CACHESHAPE, alpha_l2_cacheshape); \ | ||
162 | NEW_AUX_ENT(AT_L3_CACHESHAPE, alpha_l3_cacheshape); \ | ||
163 | } while (0) | ||
164 | |||
165 | #endif /* __ASM_ALPHA_ELF_H */ | ||
diff --git a/arch/alpha/include/asm/emergency-restart.h b/arch/alpha/include/asm/emergency-restart.h new file mode 100644 index 000000000000..108d8c48e42e --- /dev/null +++ b/arch/alpha/include/asm/emergency-restart.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_EMERGENCY_RESTART_H | ||
2 | #define _ASM_EMERGENCY_RESTART_H | ||
3 | |||
4 | #include <asm-generic/emergency-restart.h> | ||
5 | |||
6 | #endif /* _ASM_EMERGENCY_RESTART_H */ | ||
diff --git a/arch/alpha/include/asm/err_common.h b/arch/alpha/include/asm/err_common.h new file mode 100644 index 000000000000..c25095942107 --- /dev/null +++ b/arch/alpha/include/asm/err_common.h | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * linux/include/asm-alpha/err_common.h | ||
3 | * | ||
4 | * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation) | ||
5 | * | ||
6 | * Contains declarations and macros to support Alpha error handling | ||
7 | * implementations. | ||
8 | */ | ||
9 | |||
10 | #ifndef __ALPHA_ERR_COMMON_H | ||
11 | #define __ALPHA_ERR_COMMON_H 1 | ||
12 | |||
13 | /* | ||
14 | * SCB Vector definitions | ||
15 | */ | ||
16 | #define SCB_Q_SYSERR 0x620 | ||
17 | #define SCB_Q_PROCERR 0x630 | ||
18 | #define SCB_Q_SYSMCHK 0x660 | ||
19 | #define SCB_Q_PROCMCHK 0x670 | ||
20 | #define SCB_Q_SYSEVENT 0x680 | ||
21 | |||
22 | /* | ||
23 | * Disposition definitions for logout frame parser | ||
24 | */ | ||
25 | #define MCHK_DISPOSITION_UNKNOWN_ERROR 0x00 | ||
26 | #define MCHK_DISPOSITION_REPORT 0x01 | ||
27 | #define MCHK_DISPOSITION_DISMISS 0x02 | ||
28 | |||
29 | /* | ||
30 | * Error Log definitions | ||
31 | */ | ||
32 | /* | ||
33 | * Types | ||
34 | */ | ||
35 | |||
36 | #define EL_CLASS__TERMINATION (0) | ||
37 | # define EL_TYPE__TERMINATION__TERMINATION (0) | ||
38 | #define EL_CLASS__HEADER (5) | ||
39 | # define EL_TYPE__HEADER__SYSTEM_ERROR_FRAME (1) | ||
40 | # define EL_TYPE__HEADER__SYSTEM_EVENT_FRAME (2) | ||
41 | # define EL_TYPE__HEADER__HALT_FRAME (3) | ||
42 | # define EL_TYPE__HEADER__LOGOUT_FRAME (19) | ||
43 | #define EL_CLASS__GENERAL_NOTIFICATION (9) | ||
44 | #define EL_CLASS__PCI_ERROR_FRAME (11) | ||
45 | #define EL_CLASS__REGATTA_FAMILY (12) | ||
46 | # define EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME (1) | ||
47 | # define EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME (2) | ||
48 | # define EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME (3) | ||
49 | # define EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED (8) | ||
50 | # define EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED (9) | ||
51 | # define EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED (10) | ||
52 | # define EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT (11) | ||
53 | # define EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT (12) | ||
54 | #define EL_CLASS__PAL (14) | ||
55 | # define EL_TYPE__PAL__LOGOUT_FRAME (1) | ||
56 | # define EL_TYPE__PAL__EV7_PROCESSOR (4) | ||
57 | # define EL_TYPE__PAL__EV7_ZBOX (5) | ||
58 | # define EL_TYPE__PAL__EV7_RBOX (6) | ||
59 | # define EL_TYPE__PAL__EV7_IO (7) | ||
60 | # define EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE (10) | ||
61 | # define EL_TYPE__PAL__ENV__AIRMOVER_FAN (11) | ||
62 | # define EL_TYPE__PAL__ENV__VOLTAGE (12) | ||
63 | # define EL_TYPE__PAL__ENV__INTRUSION (13) | ||
64 | # define EL_TYPE__PAL__ENV__POWER_SUPPLY (14) | ||
65 | # define EL_TYPE__PAL__ENV__LAN (15) | ||
66 | # define EL_TYPE__PAL__ENV__HOT_PLUG (16) | ||
67 | |||
68 | union el_timestamp { | ||
69 | struct { | ||
70 | u8 second; | ||
71 | u8 minute; | ||
72 | u8 hour; | ||
73 | u8 day; | ||
74 | u8 month; | ||
75 | u8 year; | ||
76 | } b; | ||
77 | u64 as_int; | ||
78 | }; | ||
79 | |||
80 | struct el_subpacket { | ||
81 | u16 length; /* length of header (in bytes) */ | ||
82 | u16 class; /* header class and type... */ | ||
83 | u16 type; /* ...determine content */ | ||
84 | u16 revision; /* header revision */ | ||
85 | union { | ||
86 | struct { /* Class 5, Type 1 - System Error */ | ||
87 | u32 frame_length; | ||
88 | u32 frame_packet_count; | ||
89 | } sys_err; | ||
90 | struct { /* Class 5, Type 2 - System Event */ | ||
91 | union el_timestamp timestamp; | ||
92 | u32 frame_length; | ||
93 | u32 frame_packet_count; | ||
94 | } sys_event; | ||
95 | struct { /* Class 5, Type 3 - Double Error Halt */ | ||
96 | u16 halt_code; | ||
97 | u16 reserved; | ||
98 | union el_timestamp timestamp; | ||
99 | u32 frame_length; | ||
100 | u32 frame_packet_count; | ||
101 | } err_halt; | ||
102 | struct { /* Clasee 5, Type 19 - Logout Frame Header */ | ||
103 | u32 frame_length; | ||
104 | u32 frame_flags; | ||
105 | u32 cpu_offset; | ||
106 | u32 system_offset; | ||
107 | } logout_header; | ||
108 | struct { /* Class 12 - Regatta */ | ||
109 | u64 cpuid; | ||
110 | u64 data_start[1]; | ||
111 | } regatta_frame; | ||
112 | struct { /* Raw */ | ||
113 | u64 data_start[1]; | ||
114 | } raw; | ||
115 | } by_type; | ||
116 | }; | ||
117 | |||
118 | #endif /* __ALPHA_ERR_COMMON_H */ | ||
diff --git a/arch/alpha/include/asm/err_ev6.h b/arch/alpha/include/asm/err_ev6.h new file mode 100644 index 000000000000..ea637791e4a9 --- /dev/null +++ b/arch/alpha/include/asm/err_ev6.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ALPHA_ERR_EV6_H | ||
2 | #define __ALPHA_ERR_EV6_H 1 | ||
3 | |||
4 | /* Dummy include for now. */ | ||
5 | |||
6 | #endif /* __ALPHA_ERR_EV6_H */ | ||
diff --git a/arch/alpha/include/asm/err_ev7.h b/arch/alpha/include/asm/err_ev7.h new file mode 100644 index 000000000000..87f99777c2e4 --- /dev/null +++ b/arch/alpha/include/asm/err_ev7.h | |||
@@ -0,0 +1,202 @@ | |||
1 | #ifndef __ALPHA_ERR_EV7_H | ||
2 | #define __ALPHA_ERR_EV7_H 1 | ||
3 | |||
4 | /* | ||
5 | * Data for el packet class PAL (14), type LOGOUT_FRAME (1) | ||
6 | */ | ||
7 | struct ev7_pal_logout_subpacket { | ||
8 | u32 mchk_code; | ||
9 | u32 subpacket_count; | ||
10 | u64 whami; | ||
11 | u64 rbox_whami; | ||
12 | u64 rbox_int; | ||
13 | u64 exc_addr; | ||
14 | union el_timestamp timestamp; | ||
15 | u64 halt_code; | ||
16 | u64 reserved; | ||
17 | }; | ||
18 | |||
19 | /* | ||
20 | * Data for el packet class PAL (14), type EV7_PROCESSOR (4) | ||
21 | */ | ||
22 | struct ev7_pal_processor_subpacket { | ||
23 | u64 i_stat; | ||
24 | u64 dc_stat; | ||
25 | u64 c_addr; | ||
26 | u64 c_syndrome_1; | ||
27 | u64 c_syndrome_0; | ||
28 | u64 c_stat; | ||
29 | u64 c_sts; | ||
30 | u64 mm_stat; | ||
31 | u64 exc_addr; | ||
32 | u64 ier_cm; | ||
33 | u64 isum; | ||
34 | u64 pal_base; | ||
35 | u64 i_ctl; | ||
36 | u64 process_context; | ||
37 | u64 cbox_ctl; | ||
38 | u64 cbox_stp_ctl; | ||
39 | u64 cbox_acc_ctl; | ||
40 | u64 cbox_lcl_set; | ||
41 | u64 cbox_gbl_set; | ||
42 | u64 bbox_ctl; | ||
43 | u64 bbox_err_sts; | ||
44 | u64 bbox_err_idx; | ||
45 | u64 cbox_ddp_err_sts; | ||
46 | u64 bbox_dat_rmp; | ||
47 | u64 reserved[2]; | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * Data for el packet class PAL (14), type EV7_ZBOX (5) | ||
52 | */ | ||
53 | struct ev7_pal_zbox_subpacket { | ||
54 | u32 zbox0_dram_err_status_1; | ||
55 | u32 zbox0_dram_err_status_2; | ||
56 | u32 zbox0_dram_err_status_3; | ||
57 | u32 zbox0_dram_err_ctl; | ||
58 | u32 zbox0_dram_err_adr; | ||
59 | u32 zbox0_dift_timeout; | ||
60 | u32 zbox0_dram_mapper_ctl; | ||
61 | u32 zbox0_frc_err_adr; | ||
62 | u32 zbox0_dift_err_status; | ||
63 | u32 reserved1; | ||
64 | u32 zbox1_dram_err_status_1; | ||
65 | u32 zbox1_dram_err_status_2; | ||
66 | u32 zbox1_dram_err_status_3; | ||
67 | u32 zbox1_dram_err_ctl; | ||
68 | u32 zbox1_dram_err_adr; | ||
69 | u32 zbox1_dift_timeout; | ||
70 | u32 zbox1_dram_mapper_ctl; | ||
71 | u32 zbox1_frc_err_adr; | ||
72 | u32 zbox1_dift_err_status; | ||
73 | u32 reserved2; | ||
74 | u64 cbox_ctl; | ||
75 | u64 cbox_stp_ctl; | ||
76 | u64 zbox0_error_pa; | ||
77 | u64 zbox1_error_pa; | ||
78 | u64 zbox0_ored_syndrome; | ||
79 | u64 zbox1_ored_syndrome; | ||
80 | u64 reserved3[2]; | ||
81 | }; | ||
82 | |||
83 | /* | ||
84 | * Data for el packet class PAL (14), type EV7_RBOX (6) | ||
85 | */ | ||
86 | struct ev7_pal_rbox_subpacket { | ||
87 | u64 rbox_cfg; | ||
88 | u64 rbox_n_cfg; | ||
89 | u64 rbox_s_cfg; | ||
90 | u64 rbox_e_cfg; | ||
91 | u64 rbox_w_cfg; | ||
92 | u64 rbox_n_err; | ||
93 | u64 rbox_s_err; | ||
94 | u64 rbox_e_err; | ||
95 | u64 rbox_w_err; | ||
96 | u64 rbox_io_cfg; | ||
97 | u64 rbox_io_err; | ||
98 | u64 rbox_l_err; | ||
99 | u64 rbox_whoami; | ||
100 | u64 rbox_imask; | ||
101 | u64 rbox_intq; | ||
102 | u64 rbox_int; | ||
103 | u64 reserved[2]; | ||
104 | }; | ||
105 | |||
106 | /* | ||
107 | * Data for el packet class PAL (14), type EV7_IO (7) | ||
108 | */ | ||
109 | struct ev7_pal_io_one_port { | ||
110 | u64 pox_err_sum; | ||
111 | u64 pox_tlb_err; | ||
112 | u64 pox_spl_cmplt; | ||
113 | u64 pox_trans_sum; | ||
114 | u64 pox_first_err; | ||
115 | u64 pox_mult_err; | ||
116 | u64 pox_dm_source; | ||
117 | u64 pox_dm_dest; | ||
118 | u64 pox_dm_size; | ||
119 | u64 pox_dm_ctrl; | ||
120 | u64 reserved; | ||
121 | }; | ||
122 | |||
123 | struct ev7_pal_io_subpacket { | ||
124 | u64 io_asic_rev; | ||
125 | u64 io_sys_rev; | ||
126 | u64 io7_uph; | ||
127 | u64 hpi_ctl; | ||
128 | u64 crd_ctl; | ||
129 | u64 hei_ctl; | ||
130 | u64 po7_error_sum; | ||
131 | u64 po7_uncrr_sym; | ||
132 | u64 po7_crrct_sym; | ||
133 | u64 po7_ugbge_sym; | ||
134 | u64 po7_err_pkt0; | ||
135 | u64 po7_err_pkt1; | ||
136 | u64 reserved[2]; | ||
137 | struct ev7_pal_io_one_port ports[4]; | ||
138 | }; | ||
139 | |||
140 | /* | ||
141 | * Environmental subpacket. Data used for el packets: | ||
142 | * class PAL (14), type AMBIENT_TEMPERATURE (10) | ||
143 | * class PAL (14), type AIRMOVER_FAN (11) | ||
144 | * class PAL (14), type VOLTAGE (12) | ||
145 | * class PAL (14), type INTRUSION (13) | ||
146 | * class PAL (14), type POWER_SUPPLY (14) | ||
147 | * class PAL (14), type LAN (15) | ||
148 | * class PAL (14), type HOT_PLUG (16) | ||
149 | */ | ||
150 | struct ev7_pal_environmental_subpacket { | ||
151 | u16 cabinet; | ||
152 | u16 drawer; | ||
153 | u16 reserved1[2]; | ||
154 | u8 module_type; | ||
155 | u8 unit_id; /* unit reporting condition */ | ||
156 | u8 reserved2; | ||
157 | u8 condition; /* condition reported */ | ||
158 | }; | ||
159 | |||
160 | /* | ||
161 | * Convert environmental type to index | ||
162 | */ | ||
163 | static inline int ev7_lf_env_index(int type) | ||
164 | { | ||
165 | BUG_ON((type < EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE) | ||
166 | || (type > EL_TYPE__PAL__ENV__HOT_PLUG)); | ||
167 | |||
168 | return type - EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Data for generic el packet class PAL. | ||
173 | */ | ||
174 | struct ev7_pal_subpacket { | ||
175 | union { | ||
176 | struct ev7_pal_logout_subpacket logout; /* Type 1 */ | ||
177 | struct ev7_pal_processor_subpacket ev7; /* Type 4 */ | ||
178 | struct ev7_pal_zbox_subpacket zbox; /* Type 5 */ | ||
179 | struct ev7_pal_rbox_subpacket rbox; /* Type 6 */ | ||
180 | struct ev7_pal_io_subpacket io; /* Type 7 */ | ||
181 | struct ev7_pal_environmental_subpacket env; /* Type 10-16 */ | ||
182 | u64 as_quad[1]; /* Raw u64 */ | ||
183 | } by_type; | ||
184 | }; | ||
185 | |||
186 | /* | ||
187 | * Struct to contain collected logout from subpackets. | ||
188 | */ | ||
189 | struct ev7_lf_subpackets { | ||
190 | struct ev7_pal_logout_subpacket *logout; /* Type 1 */ | ||
191 | struct ev7_pal_processor_subpacket *ev7; /* Type 4 */ | ||
192 | struct ev7_pal_zbox_subpacket *zbox; /* Type 5 */ | ||
193 | struct ev7_pal_rbox_subpacket *rbox; /* Type 6 */ | ||
194 | struct ev7_pal_io_subpacket *io; /* Type 7 */ | ||
195 | struct ev7_pal_environmental_subpacket *env[7]; /* Type 10-16 */ | ||
196 | |||
197 | unsigned int io_pid; | ||
198 | }; | ||
199 | |||
200 | #endif /* __ALPHA_ERR_EV7_H */ | ||
201 | |||
202 | |||
diff --git a/arch/alpha/include/asm/errno.h b/arch/alpha/include/asm/errno.h new file mode 100644 index 000000000000..69e2655249d2 --- /dev/null +++ b/arch/alpha/include/asm/errno.h | |||
@@ -0,0 +1,123 @@ | |||
1 | #ifndef _ALPHA_ERRNO_H | ||
2 | #define _ALPHA_ERRNO_H | ||
3 | |||
4 | #include <asm-generic/errno-base.h> | ||
5 | |||
6 | #undef EAGAIN /* 11 in errno-base.h */ | ||
7 | |||
8 | #define EDEADLK 11 /* Resource deadlock would occur */ | ||
9 | |||
10 | #define EAGAIN 35 /* Try again */ | ||
11 | #define EWOULDBLOCK EAGAIN /* Operation would block */ | ||
12 | #define EINPROGRESS 36 /* Operation now in progress */ | ||
13 | #define EALREADY 37 /* Operation already in progress */ | ||
14 | #define ENOTSOCK 38 /* Socket operation on non-socket */ | ||
15 | #define EDESTADDRREQ 39 /* Destination address required */ | ||
16 | #define EMSGSIZE 40 /* Message too long */ | ||
17 | #define EPROTOTYPE 41 /* Protocol wrong type for socket */ | ||
18 | #define ENOPROTOOPT 42 /* Protocol not available */ | ||
19 | #define EPROTONOSUPPORT 43 /* Protocol not supported */ | ||
20 | #define ESOCKTNOSUPPORT 44 /* Socket type not supported */ | ||
21 | #define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ | ||
22 | #define EPFNOSUPPORT 46 /* Protocol family not supported */ | ||
23 | #define EAFNOSUPPORT 47 /* Address family not supported by protocol */ | ||
24 | #define EADDRINUSE 48 /* Address already in use */ | ||
25 | #define EADDRNOTAVAIL 49 /* Cannot assign requested address */ | ||
26 | #define ENETDOWN 50 /* Network is down */ | ||
27 | #define ENETUNREACH 51 /* Network is unreachable */ | ||
28 | #define ENETRESET 52 /* Network dropped connection because of reset */ | ||
29 | #define ECONNABORTED 53 /* Software caused connection abort */ | ||
30 | #define ECONNRESET 54 /* Connection reset by peer */ | ||
31 | #define ENOBUFS 55 /* No buffer space available */ | ||
32 | #define EISCONN 56 /* Transport endpoint is already connected */ | ||
33 | #define ENOTCONN 57 /* Transport endpoint is not connected */ | ||
34 | #define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ | ||
35 | #define ETOOMANYREFS 59 /* Too many references: cannot splice */ | ||
36 | #define ETIMEDOUT 60 /* Connection timed out */ | ||
37 | #define ECONNREFUSED 61 /* Connection refused */ | ||
38 | #define ELOOP 62 /* Too many symbolic links encountered */ | ||
39 | #define ENAMETOOLONG 63 /* File name too long */ | ||
40 | #define EHOSTDOWN 64 /* Host is down */ | ||
41 | #define EHOSTUNREACH 65 /* No route to host */ | ||
42 | #define ENOTEMPTY 66 /* Directory not empty */ | ||
43 | |||
44 | #define EUSERS 68 /* Too many users */ | ||
45 | #define EDQUOT 69 /* Quota exceeded */ | ||
46 | #define ESTALE 70 /* Stale NFS file handle */ | ||
47 | #define EREMOTE 71 /* Object is remote */ | ||
48 | |||
49 | #define ENOLCK 77 /* No record locks available */ | ||
50 | #define ENOSYS 78 /* Function not implemented */ | ||
51 | |||
52 | #define ENOMSG 80 /* No message of desired type */ | ||
53 | #define EIDRM 81 /* Identifier removed */ | ||
54 | #define ENOSR 82 /* Out of streams resources */ | ||
55 | #define ETIME 83 /* Timer expired */ | ||
56 | #define EBADMSG 84 /* Not a data message */ | ||
57 | #define EPROTO 85 /* Protocol error */ | ||
58 | #define ENODATA 86 /* No data available */ | ||
59 | #define ENOSTR 87 /* Device not a stream */ | ||
60 | |||
61 | #define ENOPKG 92 /* Package not installed */ | ||
62 | |||
63 | #define EILSEQ 116 /* Illegal byte sequence */ | ||
64 | |||
65 | /* The following are just random noise.. */ | ||
66 | #define ECHRNG 88 /* Channel number out of range */ | ||
67 | #define EL2NSYNC 89 /* Level 2 not synchronized */ | ||
68 | #define EL3HLT 90 /* Level 3 halted */ | ||
69 | #define EL3RST 91 /* Level 3 reset */ | ||
70 | |||
71 | #define ELNRNG 93 /* Link number out of range */ | ||
72 | #define EUNATCH 94 /* Protocol driver not attached */ | ||
73 | #define ENOCSI 95 /* No CSI structure available */ | ||
74 | #define EL2HLT 96 /* Level 2 halted */ | ||
75 | #define EBADE 97 /* Invalid exchange */ | ||
76 | #define EBADR 98 /* Invalid request descriptor */ | ||
77 | #define EXFULL 99 /* Exchange full */ | ||
78 | #define ENOANO 100 /* No anode */ | ||
79 | #define EBADRQC 101 /* Invalid request code */ | ||
80 | #define EBADSLT 102 /* Invalid slot */ | ||
81 | |||
82 | #define EDEADLOCK EDEADLK | ||
83 | |||
84 | #define EBFONT 104 /* Bad font file format */ | ||
85 | #define ENONET 105 /* Machine is not on the network */ | ||
86 | #define ENOLINK 106 /* Link has been severed */ | ||
87 | #define EADV 107 /* Advertise error */ | ||
88 | #define ESRMNT 108 /* Srmount error */ | ||
89 | #define ECOMM 109 /* Communication error on send */ | ||
90 | #define EMULTIHOP 110 /* Multihop attempted */ | ||
91 | #define EDOTDOT 111 /* RFS specific error */ | ||
92 | #define EOVERFLOW 112 /* Value too large for defined data type */ | ||
93 | #define ENOTUNIQ 113 /* Name not unique on network */ | ||
94 | #define EBADFD 114 /* File descriptor in bad state */ | ||
95 | #define EREMCHG 115 /* Remote address changed */ | ||
96 | |||
97 | #define EUCLEAN 117 /* Structure needs cleaning */ | ||
98 | #define ENOTNAM 118 /* Not a XENIX named type file */ | ||
99 | #define ENAVAIL 119 /* No XENIX semaphores available */ | ||
100 | #define EISNAM 120 /* Is a named type file */ | ||
101 | #define EREMOTEIO 121 /* Remote I/O error */ | ||
102 | |||
103 | #define ELIBACC 122 /* Can not access a needed shared library */ | ||
104 | #define ELIBBAD 123 /* Accessing a corrupted shared library */ | ||
105 | #define ELIBSCN 124 /* .lib section in a.out corrupted */ | ||
106 | #define ELIBMAX 125 /* Attempting to link in too many shared libraries */ | ||
107 | #define ELIBEXEC 126 /* Cannot exec a shared library directly */ | ||
108 | #define ERESTART 127 /* Interrupted system call should be restarted */ | ||
109 | #define ESTRPIPE 128 /* Streams pipe error */ | ||
110 | |||
111 | #define ENOMEDIUM 129 /* No medium found */ | ||
112 | #define EMEDIUMTYPE 130 /* Wrong medium type */ | ||
113 | #define ECANCELED 131 /* Operation Cancelled */ | ||
114 | #define ENOKEY 132 /* Required key not available */ | ||
115 | #define EKEYEXPIRED 133 /* Key has expired */ | ||
116 | #define EKEYREVOKED 134 /* Key has been revoked */ | ||
117 | #define EKEYREJECTED 135 /* Key was rejected by service */ | ||
118 | |||
119 | /* for robust mutexes */ | ||
120 | #define EOWNERDEAD 136 /* Owner died */ | ||
121 | #define ENOTRECOVERABLE 137 /* State not recoverable */ | ||
122 | |||
123 | #endif | ||
diff --git a/arch/alpha/include/asm/fb.h b/arch/alpha/include/asm/fb.h new file mode 100644 index 000000000000..fa9bbb96b2b3 --- /dev/null +++ b/arch/alpha/include/asm/fb.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _ASM_FB_H_ | ||
2 | #define _ASM_FB_H_ | ||
3 | #include <linux/device.h> | ||
4 | |||
5 | /* Caching is off in the I/O space quadrant by design. */ | ||
6 | #define fb_pgprotect(...) do {} while (0) | ||
7 | |||
8 | static inline int fb_is_primary_device(struct fb_info *info) | ||
9 | { | ||
10 | return 0; | ||
11 | } | ||
12 | |||
13 | #endif /* _ASM_FB_H_ */ | ||
diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h new file mode 100644 index 000000000000..25da0017ec87 --- /dev/null +++ b/arch/alpha/include/asm/fcntl.h | |||
@@ -0,0 +1,43 @@ | |||
1 | #ifndef _ALPHA_FCNTL_H | ||
2 | #define _ALPHA_FCNTL_H | ||
3 | |||
4 | /* open/fcntl - O_SYNC is only implemented on blocks devices and on files | ||
5 | located on an ext2 file system */ | ||
6 | #define O_CREAT 01000 /* not fcntl */ | ||
7 | #define O_TRUNC 02000 /* not fcntl */ | ||
8 | #define O_EXCL 04000 /* not fcntl */ | ||
9 | #define O_NOCTTY 010000 /* not fcntl */ | ||
10 | |||
11 | #define O_NONBLOCK 00004 | ||
12 | #define O_APPEND 00010 | ||
13 | #define O_SYNC 040000 | ||
14 | #define O_DIRECTORY 0100000 /* must be a directory */ | ||
15 | #define O_NOFOLLOW 0200000 /* don't follow links */ | ||
16 | #define O_LARGEFILE 0400000 /* will be set by the kernel on every open */ | ||
17 | #define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */ | ||
18 | #define O_NOATIME 04000000 | ||
19 | #define O_CLOEXEC 010000000 /* set close_on_exec */ | ||
20 | |||
21 | #define F_GETLK 7 | ||
22 | #define F_SETLK 8 | ||
23 | #define F_SETLKW 9 | ||
24 | |||
25 | #define F_SETOWN 5 /* for sockets. */ | ||
26 | #define F_GETOWN 6 /* for sockets. */ | ||
27 | #define F_SETSIG 10 /* for sockets. */ | ||
28 | #define F_GETSIG 11 /* for sockets. */ | ||
29 | |||
30 | /* for posix fcntl() and lockf() */ | ||
31 | #define F_RDLCK 1 | ||
32 | #define F_WRLCK 2 | ||
33 | #define F_UNLCK 8 | ||
34 | |||
35 | /* for old implementation of bsd flock () */ | ||
36 | #define F_EXLCK 16 /* or 3 */ | ||
37 | #define F_SHLCK 32 /* or 4 */ | ||
38 | |||
39 | #define F_INPROGRESS 64 | ||
40 | |||
41 | #include <asm-generic/fcntl.h> | ||
42 | |||
43 | #endif | ||
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h new file mode 100644 index 000000000000..0be50413b2b5 --- /dev/null +++ b/arch/alpha/include/asm/floppy.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Architecture specific parts of the Floppy driver | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1995 | ||
9 | */ | ||
10 | #ifndef __ASM_ALPHA_FLOPPY_H | ||
11 | #define __ASM_ALPHA_FLOPPY_H | ||
12 | |||
13 | |||
14 | #define fd_inb(port) inb_p(port) | ||
15 | #define fd_outb(value,port) outb_p(value,port) | ||
16 | |||
17 | #define fd_enable_dma() enable_dma(FLOPPY_DMA) | ||
18 | #define fd_disable_dma() disable_dma(FLOPPY_DMA) | ||
19 | #define fd_request_dma() request_dma(FLOPPY_DMA,"floppy") | ||
20 | #define fd_free_dma() free_dma(FLOPPY_DMA) | ||
21 | #define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA) | ||
22 | #define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode) | ||
23 | #define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA,virt_to_bus(addr)) | ||
24 | #define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count) | ||
25 | #define fd_enable_irq() enable_irq(FLOPPY_IRQ) | ||
26 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) | ||
27 | #define fd_cacheflush(addr,size) /* nothing */ | ||
28 | #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ | ||
29 | IRQF_DISABLED, "floppy", NULL) | ||
30 | #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); | ||
31 | |||
32 | #ifdef CONFIG_PCI | ||
33 | |||
34 | #include <linux/pci.h> | ||
35 | |||
36 | #define fd_dma_setup(addr,size,mode,io) alpha_fd_dma_setup(addr,size,mode,io) | ||
37 | |||
38 | static __inline__ int | ||
39 | alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io) | ||
40 | { | ||
41 | static unsigned long prev_size; | ||
42 | static dma_addr_t bus_addr = 0; | ||
43 | static char *prev_addr; | ||
44 | static int prev_dir; | ||
45 | int dir; | ||
46 | |||
47 | dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE; | ||
48 | |||
49 | if (bus_addr | ||
50 | && (addr != prev_addr || size != prev_size || dir != prev_dir)) { | ||
51 | /* different from last time -- unmap prev */ | ||
52 | pci_unmap_single(isa_bridge, bus_addr, prev_size, prev_dir); | ||
53 | bus_addr = 0; | ||
54 | } | ||
55 | |||
56 | if (!bus_addr) /* need to map it */ | ||
57 | bus_addr = pci_map_single(isa_bridge, addr, size, dir); | ||
58 | |||
59 | /* remember this one as prev */ | ||
60 | prev_addr = addr; | ||
61 | prev_size = size; | ||
62 | prev_dir = dir; | ||
63 | |||
64 | fd_clear_dma_ff(); | ||
65 | fd_cacheflush(addr, size); | ||
66 | fd_set_dma_mode(mode); | ||
67 | set_dma_addr(FLOPPY_DMA, bus_addr); | ||
68 | fd_set_dma_count(size); | ||
69 | virtual_dma_port = io; | ||
70 | fd_enable_dma(); | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | #endif /* CONFIG_PCI */ | ||
76 | |||
77 | __inline__ void virtual_dma_init(void) | ||
78 | { | ||
79 | /* Nothing to do on an Alpha */ | ||
80 | } | ||
81 | |||
82 | static int FDC1 = 0x3f0; | ||
83 | static int FDC2 = -1; | ||
84 | |||
85 | /* | ||
86 | * Again, the CMOS information doesn't work on the alpha.. | ||
87 | */ | ||
88 | #define FLOPPY0_TYPE 6 | ||
89 | #define FLOPPY1_TYPE 0 | ||
90 | |||
91 | #define N_FDC 2 | ||
92 | #define N_DRIVE 8 | ||
93 | |||
94 | /* | ||
95 | * Most Alphas have no problems with floppy DMA crossing 64k borders, | ||
96 | * except for certain ones, like XL and RUFFIAN. | ||
97 | * | ||
98 | * However, the test is simple and fast, and this *is* floppy, after all, | ||
99 | * so we do it for all platforms, just to make sure. | ||
100 | * | ||
101 | * This is advantageous in other circumstances as well, as in moving | ||
102 | * about the PCI DMA windows and forcing the floppy to start doing | ||
103 | * scatter-gather when it never had before, and there *is* a problem | ||
104 | * on that platform... ;-} | ||
105 | */ | ||
106 | |||
107 | static inline unsigned long CROSS_64KB(void *a, unsigned long s) | ||
108 | { | ||
109 | unsigned long p = (unsigned long)a; | ||
110 | return ((p + s - 1) ^ p) & ~0xffffUL; | ||
111 | } | ||
112 | |||
113 | #define EXTRA_FLOPPY_PARAMS | ||
114 | |||
115 | #endif /* __ASM_ALPHA_FLOPPY_H */ | ||
diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h new file mode 100644 index 000000000000..ecb17a72acc3 --- /dev/null +++ b/arch/alpha/include/asm/fpu.h | |||
@@ -0,0 +1,193 @@ | |||
1 | #ifndef __ASM_ALPHA_FPU_H | ||
2 | #define __ASM_ALPHA_FPU_H | ||
3 | |||
4 | /* | ||
5 | * Alpha floating-point control register defines: | ||
6 | */ | ||
7 | #define FPCR_DNOD (1UL<<47) /* denorm INV trap disable */ | ||
8 | #define FPCR_DNZ (1UL<<48) /* denorms to zero */ | ||
9 | #define FPCR_INVD (1UL<<49) /* invalid op disable (opt.) */ | ||
10 | #define FPCR_DZED (1UL<<50) /* division by zero disable (opt.) */ | ||
11 | #define FPCR_OVFD (1UL<<51) /* overflow disable (optional) */ | ||
12 | #define FPCR_INV (1UL<<52) /* invalid operation */ | ||
13 | #define FPCR_DZE (1UL<<53) /* division by zero */ | ||
14 | #define FPCR_OVF (1UL<<54) /* overflow */ | ||
15 | #define FPCR_UNF (1UL<<55) /* underflow */ | ||
16 | #define FPCR_INE (1UL<<56) /* inexact */ | ||
17 | #define FPCR_IOV (1UL<<57) /* integer overflow */ | ||
18 | #define FPCR_UNDZ (1UL<<60) /* underflow to zero (opt.) */ | ||
19 | #define FPCR_UNFD (1UL<<61) /* underflow disable (opt.) */ | ||
20 | #define FPCR_INED (1UL<<62) /* inexact disable (opt.) */ | ||
21 | #define FPCR_SUM (1UL<<63) /* summary bit */ | ||
22 | |||
23 | #define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */ | ||
24 | #define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */ | ||
25 | #define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */ | ||
26 | #define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */ | ||
27 | #define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */ | ||
28 | #define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT) | ||
29 | |||
30 | #define FPCR_MASK 0xffff800000000000L | ||
31 | |||
32 | /* | ||
33 | * IEEE trap enables are implemented in software. These per-thread | ||
34 | * bits are stored in the "ieee_state" field of "struct thread_info". | ||
35 | * Thus, the bits are defined so as not to conflict with the | ||
36 | * floating-point enable bit (which is architected). On top of that, | ||
37 | * we want to make these bits compatible with OSF/1 so | ||
38 | * ieee_set_fp_control() etc. can be implemented easily and | ||
39 | * compatibly. The corresponding definitions are in | ||
40 | * /usr/include/machine/fpu.h under OSF/1. | ||
41 | */ | ||
42 | #define IEEE_TRAP_ENABLE_INV (1UL<<1) /* invalid op */ | ||
43 | #define IEEE_TRAP_ENABLE_DZE (1UL<<2) /* division by zero */ | ||
44 | #define IEEE_TRAP_ENABLE_OVF (1UL<<3) /* overflow */ | ||
45 | #define IEEE_TRAP_ENABLE_UNF (1UL<<4) /* underflow */ | ||
46 | #define IEEE_TRAP_ENABLE_INE (1UL<<5) /* inexact */ | ||
47 | #define IEEE_TRAP_ENABLE_DNO (1UL<<6) /* denorm */ | ||
48 | #define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\ | ||
49 | IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\ | ||
50 | IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO) | ||
51 | |||
52 | /* Denorm and Underflow flushing */ | ||
53 | #define IEEE_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */ | ||
54 | #define IEEE_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */ | ||
55 | |||
56 | #define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ) | ||
57 | |||
58 | /* status bits coming from fpcr: */ | ||
59 | #define IEEE_STATUS_INV (1UL<<17) | ||
60 | #define IEEE_STATUS_DZE (1UL<<18) | ||
61 | #define IEEE_STATUS_OVF (1UL<<19) | ||
62 | #define IEEE_STATUS_UNF (1UL<<20) | ||
63 | #define IEEE_STATUS_INE (1UL<<21) | ||
64 | #define IEEE_STATUS_DNO (1UL<<22) | ||
65 | |||
66 | #define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \ | ||
67 | IEEE_STATUS_OVF | IEEE_STATUS_UNF | \ | ||
68 | IEEE_STATUS_INE | IEEE_STATUS_DNO) | ||
69 | |||
70 | #define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \ | ||
71 | IEEE_STATUS_MASK | IEEE_MAP_MASK) | ||
72 | |||
73 | #define IEEE_CURRENT_RM_SHIFT 32 | ||
74 | #define IEEE_CURRENT_RM_MASK (3UL<<IEEE_CURRENT_RM_SHIFT) | ||
75 | |||
76 | #define IEEE_STATUS_TO_EXCSUM_SHIFT 16 | ||
77 | |||
78 | #define IEEE_INHERIT (1UL<<63) /* inherit on thread create? */ | ||
79 | |||
80 | /* | ||
81 | * Convert the software IEEE trap enable and status bits into the | ||
82 | * hardware fpcr format. | ||
83 | * | ||
84 | * Digital Unix engineers receive my thanks for not defining the | ||
85 | * software bits identical to the hardware bits. The chip designers | ||
86 | * receive my thanks for making all the not-implemented fpcr bits | ||
87 | * RAZ forcing us to use system calls to read/write this value. | ||
88 | */ | ||
89 | |||
90 | static inline unsigned long | ||
91 | ieee_swcr_to_fpcr(unsigned long sw) | ||
92 | { | ||
93 | unsigned long fp; | ||
94 | fp = (sw & IEEE_STATUS_MASK) << 35; | ||
95 | fp |= (sw & IEEE_MAP_DMZ) << 36; | ||
96 | fp |= (sw & IEEE_STATUS_MASK ? FPCR_SUM : 0); | ||
97 | fp |= (~sw & (IEEE_TRAP_ENABLE_INV | ||
98 | | IEEE_TRAP_ENABLE_DZE | ||
99 | | IEEE_TRAP_ENABLE_OVF)) << 48; | ||
100 | fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57; | ||
101 | fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); | ||
102 | fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41; | ||
103 | return fp; | ||
104 | } | ||
105 | |||
106 | static inline unsigned long | ||
107 | ieee_fpcr_to_swcr(unsigned long fp) | ||
108 | { | ||
109 | unsigned long sw; | ||
110 | sw = (fp >> 35) & IEEE_STATUS_MASK; | ||
111 | sw |= (fp >> 36) & IEEE_MAP_DMZ; | ||
112 | sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV | ||
113 | | IEEE_TRAP_ENABLE_DZE | ||
114 | | IEEE_TRAP_ENABLE_OVF); | ||
115 | sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE); | ||
116 | sw |= (fp >> 47) & IEEE_MAP_UMZ; | ||
117 | sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO; | ||
118 | return sw; | ||
119 | } | ||
120 | |||
121 | #ifdef __KERNEL__ | ||
122 | |||
123 | /* The following two functions don't need trapb/excb instructions | ||
124 | around the mf_fpcr/mt_fpcr instructions because (a) the kernel | ||
125 | never generates arithmetic faults and (b) call_pal instructions | ||
126 | are implied trap barriers. */ | ||
127 | |||
128 | static inline unsigned long | ||
129 | rdfpcr(void) | ||
130 | { | ||
131 | unsigned long tmp, ret; | ||
132 | |||
133 | #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) | ||
134 | __asm__ __volatile__ ( | ||
135 | "ftoit $f0,%0\n\t" | ||
136 | "mf_fpcr $f0\n\t" | ||
137 | "ftoit $f0,%1\n\t" | ||
138 | "itoft %0,$f0" | ||
139 | : "=r"(tmp), "=r"(ret)); | ||
140 | #else | ||
141 | __asm__ __volatile__ ( | ||
142 | "stt $f0,%0\n\t" | ||
143 | "mf_fpcr $f0\n\t" | ||
144 | "stt $f0,%1\n\t" | ||
145 | "ldt $f0,%0" | ||
146 | : "=m"(tmp), "=m"(ret)); | ||
147 | #endif | ||
148 | |||
149 | return ret; | ||
150 | } | ||
151 | |||
152 | static inline void | ||
153 | wrfpcr(unsigned long val) | ||
154 | { | ||
155 | unsigned long tmp; | ||
156 | |||
157 | #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) | ||
158 | __asm__ __volatile__ ( | ||
159 | "ftoit $f0,%0\n\t" | ||
160 | "itoft %1,$f0\n\t" | ||
161 | "mt_fpcr $f0\n\t" | ||
162 | "itoft %0,$f0" | ||
163 | : "=&r"(tmp) : "r"(val)); | ||
164 | #else | ||
165 | __asm__ __volatile__ ( | ||
166 | "stt $f0,%0\n\t" | ||
167 | "ldt $f0,%1\n\t" | ||
168 | "mt_fpcr $f0\n\t" | ||
169 | "ldt $f0,%0" | ||
170 | : "=m"(tmp) : "m"(val)); | ||
171 | #endif | ||
172 | } | ||
173 | |||
174 | static inline unsigned long | ||
175 | swcr_update_status(unsigned long swcr, unsigned long fpcr) | ||
176 | { | ||
177 | /* EV6 implements most of the bits in hardware. Collect | ||
178 | the acrued exception bits from the real fpcr. */ | ||
179 | if (implver() == IMPLVER_EV6) { | ||
180 | swcr &= ~IEEE_STATUS_MASK; | ||
181 | swcr |= (fpcr >> 35) & IEEE_STATUS_MASK; | ||
182 | } | ||
183 | return swcr; | ||
184 | } | ||
185 | |||
186 | extern unsigned long alpha_read_fp_reg (unsigned long reg); | ||
187 | extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); | ||
188 | extern unsigned long alpha_read_fp_reg_s (unsigned long reg); | ||
189 | extern void alpha_write_fp_reg_s (unsigned long reg, unsigned long val); | ||
190 | |||
191 | #endif /* __KERNEL__ */ | ||
192 | |||
193 | #endif /* __ASM_ALPHA_FPU_H */ | ||
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h new file mode 100644 index 000000000000..6a332a9f099c --- /dev/null +++ b/arch/alpha/include/asm/futex.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_FUTEX_H | ||
2 | #define _ASM_FUTEX_H | ||
3 | |||
4 | #include <asm-generic/futex.h> | ||
5 | |||
6 | #endif | ||
diff --git a/arch/alpha/include/asm/gct.h b/arch/alpha/include/asm/gct.h new file mode 100644 index 000000000000..3504c704927c --- /dev/null +++ b/arch/alpha/include/asm/gct.h | |||
@@ -0,0 +1,58 @@ | |||
1 | #ifndef __ALPHA_GCT_H | ||
2 | #define __ALPHA_GCT_H | ||
3 | |||
4 | typedef u64 gct_id; | ||
5 | typedef u64 gct6_handle; | ||
6 | |||
7 | typedef struct __gct6_node { | ||
8 | u8 type; | ||
9 | u8 subtype; | ||
10 | u16 size; | ||
11 | u32 hd_extension; | ||
12 | gct6_handle owner; | ||
13 | gct6_handle active_user; | ||
14 | gct_id id; | ||
15 | u64 flags; | ||
16 | u16 rev; | ||
17 | u16 change_counter; | ||
18 | u16 max_child; | ||
19 | u16 reserved1; | ||
20 | gct6_handle saved_owner; | ||
21 | gct6_handle affinity; | ||
22 | gct6_handle parent; | ||
23 | gct6_handle next; | ||
24 | gct6_handle prev; | ||
25 | gct6_handle child; | ||
26 | u64 fw_flags; | ||
27 | u64 os_usage; | ||
28 | u64 fru_id; | ||
29 | u32 checksum; | ||
30 | u32 magic; /* 'GLXY' */ | ||
31 | } gct6_node; | ||
32 | |||
33 | typedef struct { | ||
34 | u8 type; | ||
35 | u8 subtype; | ||
36 | void (*callout)(gct6_node *); | ||
37 | } gct6_search_struct; | ||
38 | |||
39 | #define GCT_NODE_MAGIC 0x59584c47 /* 'GLXY' */ | ||
40 | |||
41 | /* | ||
42 | * node types | ||
43 | */ | ||
44 | #define GCT_TYPE_HOSE 0x0E | ||
45 | |||
46 | /* | ||
47 | * node subtypes | ||
48 | */ | ||
49 | #define GCT_SUBTYPE_IO_PORT_MODULE 0x2C | ||
50 | |||
51 | #define GCT_NODE_PTR(off) ((gct6_node *)((char *)hwrpb + \ | ||
52 | hwrpb->frut_offset + \ | ||
53 | (gct6_handle)(off))) \ | ||
54 | |||
55 | int gct6_find_nodes(gct6_node *, gct6_search_struct *); | ||
56 | |||
57 | #endif /* __ALPHA_GCT_H */ | ||
58 | |||
diff --git a/arch/alpha/include/asm/gentrap.h b/arch/alpha/include/asm/gentrap.h new file mode 100644 index 000000000000..ae50cc3192c7 --- /dev/null +++ b/arch/alpha/include/asm/gentrap.h | |||
@@ -0,0 +1,37 @@ | |||
1 | #ifndef _ASMAXP_GENTRAP_H | ||
2 | #define _ASMAXP_GENTRAP_H | ||
3 | |||
4 | /* | ||
5 | * Definitions for gentrap causes. They are generated by user-level | ||
6 | * programs and therefore should be compatible with the corresponding | ||
7 | * OSF/1 definitions. | ||
8 | */ | ||
9 | #define GEN_INTOVF -1 /* integer overflow */ | ||
10 | #define GEN_INTDIV -2 /* integer division by zero */ | ||
11 | #define GEN_FLTOVF -3 /* fp overflow */ | ||
12 | #define GEN_FLTDIV -4 /* fp division by zero */ | ||
13 | #define GEN_FLTUND -5 /* fp underflow */ | ||
14 | #define GEN_FLTINV -6 /* invalid fp operand */ | ||
15 | #define GEN_FLTINE -7 /* inexact fp operand */ | ||
16 | #define GEN_DECOVF -8 /* decimal overflow (for COBOL??) */ | ||
17 | #define GEN_DECDIV -9 /* decimal division by zero */ | ||
18 | #define GEN_DECINV -10 /* invalid decimal operand */ | ||
19 | #define GEN_ROPRAND -11 /* reserved operand */ | ||
20 | #define GEN_ASSERTERR -12 /* assertion error */ | ||
21 | #define GEN_NULPTRERR -13 /* null pointer error */ | ||
22 | #define GEN_STKOVF -14 /* stack overflow */ | ||
23 | #define GEN_STRLENERR -15 /* string length error */ | ||
24 | #define GEN_SUBSTRERR -16 /* substring error */ | ||
25 | #define GEN_RANGERR -17 /* range error */ | ||
26 | #define GEN_SUBRNG -18 | ||
27 | #define GEN_SUBRNG1 -19 | ||
28 | #define GEN_SUBRNG2 -20 | ||
29 | #define GEN_SUBRNG3 -21 /* these report range errors for */ | ||
30 | #define GEN_SUBRNG4 -22 /* subscripting (indexing) at levels 0..7 */ | ||
31 | #define GEN_SUBRNG5 -23 | ||
32 | #define GEN_SUBRNG6 -24 | ||
33 | #define GEN_SUBRNG7 -25 | ||
34 | |||
35 | /* the remaining codes (-26..-1023) are reserved. */ | ||
36 | |||
37 | #endif /* _ASMAXP_GENTRAP_H */ | ||
diff --git a/arch/alpha/include/asm/hardirq.h b/arch/alpha/include/asm/hardirq.h new file mode 100644 index 000000000000..d953e234daa8 --- /dev/null +++ b/arch/alpha/include/asm/hardirq.h | |||
@@ -0,0 +1,30 @@ | |||
1 | #ifndef _ALPHA_HARDIRQ_H | ||
2 | #define _ALPHA_HARDIRQ_H | ||
3 | |||
4 | #include <linux/threads.h> | ||
5 | #include <linux/cache.h> | ||
6 | |||
7 | |||
8 | /* entry.S is sensitive to the offsets of these fields */ | ||
9 | typedef struct { | ||
10 | unsigned long __softirq_pending; | ||
11 | } ____cacheline_aligned irq_cpustat_t; | ||
12 | |||
13 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
14 | |||
15 | void ack_bad_irq(unsigned int irq); | ||
16 | |||
17 | #define HARDIRQ_BITS 12 | ||
18 | |||
19 | /* | ||
20 | * The hardirq mask has to be large enough to have | ||
21 | * space for potentially nestable IRQ sources in the system | ||
22 | * to nest on a single CPU. On Alpha, interrupts are masked at the CPU | ||
23 | * by IPL as well as at the system level. We only have 8 IPLs (UNIX PALcode) | ||
24 | * so we really only have 8 nestable IRQs, but allow some overhead | ||
25 | */ | ||
26 | #if (1 << HARDIRQ_BITS) < 16 | ||
27 | #error HARDIRQ_BITS is too low! | ||
28 | #endif | ||
29 | |||
30 | #endif /* _ALPHA_HARDIRQ_H */ | ||
diff --git a/arch/alpha/include/asm/hw_irq.h b/arch/alpha/include/asm/hw_irq.h new file mode 100644 index 000000000000..a37db0f95092 --- /dev/null +++ b/arch/alpha/include/asm/hw_irq.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _ALPHA_HW_IRQ_H | ||
2 | #define _ALPHA_HW_IRQ_H | ||
3 | |||
4 | |||
5 | extern volatile unsigned long irq_err_count; | ||
6 | |||
7 | #ifdef CONFIG_ALPHA_GENERIC | ||
8 | #define ACTUAL_NR_IRQS alpha_mv.nr_irqs | ||
9 | #else | ||
10 | #define ACTUAL_NR_IRQS NR_IRQS | ||
11 | #endif | ||
12 | |||
13 | #endif | ||
diff --git a/arch/alpha/include/asm/hwrpb.h b/arch/alpha/include/asm/hwrpb.h new file mode 100644 index 000000000000..8e8f871af7cf --- /dev/null +++ b/arch/alpha/include/asm/hwrpb.h | |||
@@ -0,0 +1,220 @@ | |||
1 | #ifndef __ALPHA_HWRPB_H | ||
2 | #define __ALPHA_HWRPB_H | ||
3 | |||
4 | #define INIT_HWRPB ((struct hwrpb_struct *) 0x10000000) | ||
5 | |||
6 | /* | ||
7 | * DEC processor types for Alpha systems. Found in HWRPB. | ||
8 | * These values are architected. | ||
9 | */ | ||
10 | |||
11 | #define EV3_CPU 1 /* EV3 */ | ||
12 | #define EV4_CPU 2 /* EV4 (21064) */ | ||
13 | #define LCA4_CPU 4 /* LCA4 (21066/21068) */ | ||
14 | #define EV5_CPU 5 /* EV5 (21164) */ | ||
15 | #define EV45_CPU 6 /* EV4.5 (21064/xxx) */ | ||
16 | #define EV56_CPU 7 /* EV5.6 (21164) */ | ||
17 | #define EV6_CPU 8 /* EV6 (21264) */ | ||
18 | #define PCA56_CPU 9 /* PCA56 (21164PC) */ | ||
19 | #define PCA57_CPU 10 /* PCA57 (notyet) */ | ||
20 | #define EV67_CPU 11 /* EV67 (21264A) */ | ||
21 | #define EV68CB_CPU 12 /* EV68CB (21264C) */ | ||
22 | #define EV68AL_CPU 13 /* EV68AL (21264B) */ | ||
23 | #define EV68CX_CPU 14 /* EV68CX (21264D) */ | ||
24 | #define EV7_CPU 15 /* EV7 (21364) */ | ||
25 | #define EV79_CPU 16 /* EV79 (21364??) */ | ||
26 | #define EV69_CPU 17 /* EV69 (21264/EV69A) */ | ||
27 | |||
28 | /* | ||
29 | * DEC system types for Alpha systems. Found in HWRPB. | ||
30 | * These values are architected. | ||
31 | */ | ||
32 | |||
33 | #define ST_ADU 1 /* Alpha ADU systype */ | ||
34 | #define ST_DEC_4000 2 /* Cobra systype */ | ||
35 | #define ST_DEC_7000 3 /* Ruby systype */ | ||
36 | #define ST_DEC_3000_500 4 /* Flamingo systype */ | ||
37 | #define ST_DEC_2000_300 6 /* Jensen systype */ | ||
38 | #define ST_DEC_3000_300 7 /* Pelican systype */ | ||
39 | #define ST_DEC_2100_A500 9 /* Sable systype */ | ||
40 | #define ST_DEC_AXPVME_64 10 /* AXPvme system type */ | ||
41 | #define ST_DEC_AXPPCI_33 11 /* NoName system type */ | ||
42 | #define ST_DEC_TLASER 12 /* Turbolaser systype */ | ||
43 | #define ST_DEC_2100_A50 13 /* Avanti systype */ | ||
44 | #define ST_DEC_MUSTANG 14 /* Mustang systype */ | ||
45 | #define ST_DEC_ALCOR 15 /* Alcor (EV5) systype */ | ||
46 | #define ST_DEC_1000 17 /* Mikasa systype */ | ||
47 | #define ST_DEC_EB64 18 /* EB64 systype */ | ||
48 | #define ST_DEC_EB66 19 /* EB66 systype */ | ||
49 | #define ST_DEC_EB64P 20 /* EB64+ systype */ | ||
50 | #define ST_DEC_BURNS 21 /* laptop systype */ | ||
51 | #define ST_DEC_RAWHIDE 22 /* Rawhide systype */ | ||
52 | #define ST_DEC_K2 23 /* K2 systype */ | ||
53 | #define ST_DEC_LYNX 24 /* Lynx systype */ | ||
54 | #define ST_DEC_XL 25 /* Alpha XL systype */ | ||
55 | #define ST_DEC_EB164 26 /* EB164 systype */ | ||
56 | #define ST_DEC_NORITAKE 27 /* Noritake systype */ | ||
57 | #define ST_DEC_CORTEX 28 /* Cortex systype */ | ||
58 | #define ST_DEC_MIATA 30 /* Miata systype */ | ||
59 | #define ST_DEC_XXM 31 /* XXM systype */ | ||
60 | #define ST_DEC_TAKARA 32 /* Takara systype */ | ||
61 | #define ST_DEC_YUKON 33 /* Yukon systype */ | ||
62 | #define ST_DEC_TSUNAMI 34 /* Tsunami systype */ | ||
63 | #define ST_DEC_WILDFIRE 35 /* Wildfire systype */ | ||
64 | #define ST_DEC_CUSCO 36 /* CUSCO systype */ | ||
65 | #define ST_DEC_EIGER 37 /* Eiger systype */ | ||
66 | #define ST_DEC_TITAN 38 /* Titan systype */ | ||
67 | #define ST_DEC_MARVEL 39 /* Marvel systype */ | ||
68 | |||
69 | /* UNOFFICIAL!!! */ | ||
70 | #define ST_UNOFFICIAL_BIAS 100 | ||
71 | #define ST_DTI_RUFFIAN 101 /* RUFFIAN systype */ | ||
72 | |||
73 | /* Alpha Processor, Inc. systems */ | ||
74 | #define ST_API_BIAS 200 | ||
75 | #define ST_API_NAUTILUS 201 /* UP1000 systype */ | ||
76 | |||
77 | struct pcb_struct { | ||
78 | unsigned long ksp; | ||
79 | unsigned long usp; | ||
80 | unsigned long ptbr; | ||
81 | unsigned int pcc; | ||
82 | unsigned int asn; | ||
83 | unsigned long unique; | ||
84 | unsigned long flags; | ||
85 | unsigned long res1, res2; | ||
86 | }; | ||
87 | |||
88 | struct percpu_struct { | ||
89 | unsigned long hwpcb[16]; | ||
90 | unsigned long flags; | ||
91 | unsigned long pal_mem_size; | ||
92 | unsigned long pal_scratch_size; | ||
93 | unsigned long pal_mem_pa; | ||
94 | unsigned long pal_scratch_pa; | ||
95 | unsigned long pal_revision; | ||
96 | unsigned long type; | ||
97 | unsigned long variation; | ||
98 | unsigned long revision; | ||
99 | unsigned long serial_no[2]; | ||
100 | unsigned long logout_area_pa; | ||
101 | unsigned long logout_area_len; | ||
102 | unsigned long halt_PCBB; | ||
103 | unsigned long halt_PC; | ||
104 | unsigned long halt_PS; | ||
105 | unsigned long halt_arg; | ||
106 | unsigned long halt_ra; | ||
107 | unsigned long halt_pv; | ||
108 | unsigned long halt_reason; | ||
109 | unsigned long res; | ||
110 | unsigned long ipc_buffer[21]; | ||
111 | unsigned long palcode_avail[16]; | ||
112 | unsigned long compatibility; | ||
113 | unsigned long console_data_log_pa; | ||
114 | unsigned long console_data_log_length; | ||
115 | unsigned long bcache_info; | ||
116 | }; | ||
117 | |||
118 | struct procdesc_struct { | ||
119 | unsigned long weird_vms_stuff; | ||
120 | unsigned long address; | ||
121 | }; | ||
122 | |||
123 | struct vf_map_struct { | ||
124 | unsigned long va; | ||
125 | unsigned long pa; | ||
126 | unsigned long count; | ||
127 | }; | ||
128 | |||
129 | struct crb_struct { | ||
130 | struct procdesc_struct * dispatch_va; | ||
131 | struct procdesc_struct * dispatch_pa; | ||
132 | struct procdesc_struct * fixup_va; | ||
133 | struct procdesc_struct * fixup_pa; | ||
134 | /* virtual->physical map */ | ||
135 | unsigned long map_entries; | ||
136 | unsigned long map_pages; | ||
137 | struct vf_map_struct map[1]; | ||
138 | }; | ||
139 | |||
140 | struct memclust_struct { | ||
141 | unsigned long start_pfn; | ||
142 | unsigned long numpages; | ||
143 | unsigned long numtested; | ||
144 | unsigned long bitmap_va; | ||
145 | unsigned long bitmap_pa; | ||
146 | unsigned long bitmap_chksum; | ||
147 | unsigned long usage; | ||
148 | }; | ||
149 | |||
150 | struct memdesc_struct { | ||
151 | unsigned long chksum; | ||
152 | unsigned long optional_pa; | ||
153 | unsigned long numclusters; | ||
154 | struct memclust_struct cluster[0]; | ||
155 | }; | ||
156 | |||
157 | struct dsr_struct { | ||
158 | long smm; /* SMM nubber used by LMF */ | ||
159 | unsigned long lurt_off; /* offset to LURT table */ | ||
160 | unsigned long sysname_off; /* offset to sysname char count */ | ||
161 | }; | ||
162 | |||
163 | struct hwrpb_struct { | ||
164 | unsigned long phys_addr; /* check: physical address of the hwrpb */ | ||
165 | unsigned long id; /* check: "HWRPB\0\0\0" */ | ||
166 | unsigned long revision; | ||
167 | unsigned long size; /* size of hwrpb */ | ||
168 | unsigned long cpuid; | ||
169 | unsigned long pagesize; /* 8192, I hope */ | ||
170 | unsigned long pa_bits; /* number of physical address bits */ | ||
171 | unsigned long max_asn; | ||
172 | unsigned char ssn[16]; /* system serial number: big bother is watching */ | ||
173 | unsigned long sys_type; | ||
174 | unsigned long sys_variation; | ||
175 | unsigned long sys_revision; | ||
176 | unsigned long intr_freq; /* interval clock frequency * 4096 */ | ||
177 | unsigned long cycle_freq; /* cycle counter frequency */ | ||
178 | unsigned long vptb; /* Virtual Page Table Base address */ | ||
179 | unsigned long res1; | ||
180 | unsigned long tbhb_offset; /* Translation Buffer Hint Block */ | ||
181 | unsigned long nr_processors; | ||
182 | unsigned long processor_size; | ||
183 | unsigned long processor_offset; | ||
184 | unsigned long ctb_nr; | ||
185 | unsigned long ctb_size; /* console terminal block size */ | ||
186 | unsigned long ctbt_offset; /* console terminal block table offset */ | ||
187 | unsigned long crb_offset; /* console callback routine block */ | ||
188 | unsigned long mddt_offset; /* memory data descriptor table */ | ||
189 | unsigned long cdb_offset; /* configuration data block (or NULL) */ | ||
190 | unsigned long frut_offset; /* FRU table (or NULL) */ | ||
191 | void (*save_terminal)(unsigned long); | ||
192 | unsigned long save_terminal_data; | ||
193 | void (*restore_terminal)(unsigned long); | ||
194 | unsigned long restore_terminal_data; | ||
195 | void (*CPU_restart)(unsigned long); | ||
196 | unsigned long CPU_restart_data; | ||
197 | unsigned long res2; | ||
198 | unsigned long res3; | ||
199 | unsigned long chksum; | ||
200 | unsigned long rxrdy; | ||
201 | unsigned long txrdy; | ||
202 | unsigned long dsr_offset; /* "Dynamic System Recognition Data Block Table" */ | ||
203 | }; | ||
204 | |||
205 | #ifdef __KERNEL__ | ||
206 | |||
207 | extern struct hwrpb_struct *hwrpb; | ||
208 | |||
209 | static inline void | ||
210 | hwrpb_update_checksum(struct hwrpb_struct *h) | ||
211 | { | ||
212 | unsigned long sum = 0, *l; | ||
213 | for (l = (unsigned long *) h; l < (unsigned long *) &h->chksum; ++l) | ||
214 | sum += *l; | ||
215 | h->chksum = sum; | ||
216 | } | ||
217 | |||
218 | #endif /* __KERNEL__ */ | ||
219 | |||
220 | #endif /* __ALPHA_HWRPB_H */ | ||
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h new file mode 100644 index 000000000000..e971ab000f95 --- /dev/null +++ b/arch/alpha/include/asm/io.h | |||
@@ -0,0 +1,577 @@ | |||
1 | #ifndef __ALPHA_IO_H | ||
2 | #define __ALPHA_IO_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <asm/compiler.h> | ||
9 | #include <asm/system.h> | ||
10 | #include <asm/pgtable.h> | ||
11 | #include <asm/machvec.h> | ||
12 | #include <asm/hwrpb.h> | ||
13 | |||
14 | /* The generic header contains only prototypes. Including it ensures that | ||
15 | the implementation we have here matches that interface. */ | ||
16 | #include <asm-generic/iomap.h> | ||
17 | |||
18 | /* We don't use IO slowdowns on the Alpha, but.. */ | ||
19 | #define __SLOW_DOWN_IO do { } while (0) | ||
20 | #define SLOW_DOWN_IO do { } while (0) | ||
21 | |||
22 | /* | ||
23 | * Virtual -> physical identity mapping starts at this offset | ||
24 | */ | ||
25 | #ifdef USE_48_BIT_KSEG | ||
26 | #define IDENT_ADDR 0xffff800000000000UL | ||
27 | #else | ||
28 | #define IDENT_ADDR 0xfffffc0000000000UL | ||
29 | #endif | ||
30 | |||
31 | /* | ||
32 | * We try to avoid hae updates (thus the cache), but when we | ||
33 | * do need to update the hae, we need to do it atomically, so | ||
34 | * that any interrupts wouldn't get confused with the hae | ||
35 | * register not being up-to-date with respect to the hardware | ||
36 | * value. | ||
37 | */ | ||
38 | extern inline void __set_hae(unsigned long new_hae) | ||
39 | { | ||
40 | unsigned long flags; | ||
41 | local_irq_save(flags); | ||
42 | |||
43 | alpha_mv.hae_cache = new_hae; | ||
44 | *alpha_mv.hae_register = new_hae; | ||
45 | mb(); | ||
46 | /* Re-read to make sure it was written. */ | ||
47 | new_hae = *alpha_mv.hae_register; | ||
48 | |||
49 | local_irq_restore(flags); | ||
50 | } | ||
51 | |||
52 | extern inline void set_hae(unsigned long new_hae) | ||
53 | { | ||
54 | if (new_hae != alpha_mv.hae_cache) | ||
55 | __set_hae(new_hae); | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Change virtual addresses to physical addresses and vv. | ||
60 | */ | ||
61 | #ifdef USE_48_BIT_KSEG | ||
62 | static inline unsigned long virt_to_phys(void *address) | ||
63 | { | ||
64 | return (unsigned long)address - IDENT_ADDR; | ||
65 | } | ||
66 | |||
67 | static inline void * phys_to_virt(unsigned long address) | ||
68 | { | ||
69 | return (void *) (address + IDENT_ADDR); | ||
70 | } | ||
71 | #else | ||
72 | static inline unsigned long virt_to_phys(void *address) | ||
73 | { | ||
74 | unsigned long phys = (unsigned long)address; | ||
75 | |||
76 | /* Sign-extend from bit 41. */ | ||
77 | phys <<= (64 - 41); | ||
78 | phys = (long)phys >> (64 - 41); | ||
79 | |||
80 | /* Crop to the physical address width of the processor. */ | ||
81 | phys &= (1ul << hwrpb->pa_bits) - 1; | ||
82 | |||
83 | return phys; | ||
84 | } | ||
85 | |||
86 | static inline void * phys_to_virt(unsigned long address) | ||
87 | { | ||
88 | return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1))); | ||
89 | } | ||
90 | #endif | ||
91 | |||
92 | #define page_to_phys(page) page_to_pa(page) | ||
93 | |||
94 | static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page) | ||
95 | { | ||
96 | return page_to_phys(page); | ||
97 | } | ||
98 | |||
99 | /* This depends on working iommu. */ | ||
100 | #define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0) | ||
101 | |||
102 | /* Maximum PIO space address supported? */ | ||
103 | #define IO_SPACE_LIMIT 0xffff | ||
104 | |||
105 | /* | ||
106 | * Change addresses as seen by the kernel (virtual) to addresses as | ||
107 | * seen by a device (bus), and vice versa. | ||
108 | * | ||
109 | * Note that this only works for a limited range of kernel addresses, | ||
110 | * and very well may not span all memory. Consider this interface | ||
111 | * deprecated in favour of the DMA-mapping API. | ||
112 | */ | ||
113 | extern unsigned long __direct_map_base; | ||
114 | extern unsigned long __direct_map_size; | ||
115 | |||
116 | static inline unsigned long __deprecated virt_to_bus(void *address) | ||
117 | { | ||
118 | unsigned long phys = virt_to_phys(address); | ||
119 | unsigned long bus = phys + __direct_map_base; | ||
120 | return phys <= __direct_map_size ? bus : 0; | ||
121 | } | ||
122 | #define isa_virt_to_bus virt_to_bus | ||
123 | |||
124 | static inline void * __deprecated bus_to_virt(unsigned long address) | ||
125 | { | ||
126 | void *virt; | ||
127 | |||
128 | /* This check is a sanity check but also ensures that bus address 0 | ||
129 | maps to virtual address 0 which is useful to detect null pointers | ||
130 | (the NCR driver is much simpler if NULL pointers are preserved). */ | ||
131 | address -= __direct_map_base; | ||
132 | virt = phys_to_virt(address); | ||
133 | return (long)address <= 0 ? NULL : virt; | ||
134 | } | ||
135 | #define isa_bus_to_virt bus_to_virt | ||
136 | |||
137 | /* | ||
138 | * There are different chipsets to interface the Alpha CPUs to the world. | ||
139 | */ | ||
140 | |||
141 | #define IO_CONCAT(a,b) _IO_CONCAT(a,b) | ||
142 | #define _IO_CONCAT(a,b) a ## _ ## b | ||
143 | |||
144 | #ifdef CONFIG_ALPHA_GENERIC | ||
145 | |||
146 | /* In a generic kernel, we always go through the machine vector. */ | ||
147 | |||
148 | #define REMAP1(TYPE, NAME, QUAL) \ | ||
149 | static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ | ||
150 | { \ | ||
151 | return alpha_mv.mv_##NAME(addr); \ | ||
152 | } | ||
153 | |||
154 | #define REMAP2(TYPE, NAME, QUAL) \ | ||
155 | static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ | ||
156 | { \ | ||
157 | alpha_mv.mv_##NAME(b, addr); \ | ||
158 | } | ||
159 | |||
160 | REMAP1(unsigned int, ioread8, /**/) | ||
161 | REMAP1(unsigned int, ioread16, /**/) | ||
162 | REMAP1(unsigned int, ioread32, /**/) | ||
163 | REMAP1(u8, readb, const volatile) | ||
164 | REMAP1(u16, readw, const volatile) | ||
165 | REMAP1(u32, readl, const volatile) | ||
166 | REMAP1(u64, readq, const volatile) | ||
167 | |||
168 | REMAP2(u8, iowrite8, /**/) | ||
169 | REMAP2(u16, iowrite16, /**/) | ||
170 | REMAP2(u32, iowrite32, /**/) | ||
171 | REMAP2(u8, writeb, volatile) | ||
172 | REMAP2(u16, writew, volatile) | ||
173 | REMAP2(u32, writel, volatile) | ||
174 | REMAP2(u64, writeq, volatile) | ||
175 | |||
176 | #undef REMAP1 | ||
177 | #undef REMAP2 | ||
178 | |||
179 | extern inline void __iomem *generic_ioportmap(unsigned long a) | ||
180 | { | ||
181 | return alpha_mv.mv_ioportmap(a); | ||
182 | } | ||
183 | |||
184 | static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) | ||
185 | { | ||
186 | return alpha_mv.mv_ioremap(a, s); | ||
187 | } | ||
188 | |||
189 | static inline void generic_iounmap(volatile void __iomem *a) | ||
190 | { | ||
191 | return alpha_mv.mv_iounmap(a); | ||
192 | } | ||
193 | |||
194 | static inline int generic_is_ioaddr(unsigned long a) | ||
195 | { | ||
196 | return alpha_mv.mv_is_ioaddr(a); | ||
197 | } | ||
198 | |||
199 | static inline int generic_is_mmio(const volatile void __iomem *a) | ||
200 | { | ||
201 | return alpha_mv.mv_is_mmio(a); | ||
202 | } | ||
203 | |||
204 | #define __IO_PREFIX generic | ||
205 | #define generic_trivial_rw_bw 0 | ||
206 | #define generic_trivial_rw_lq 0 | ||
207 | #define generic_trivial_io_bw 0 | ||
208 | #define generic_trivial_io_lq 0 | ||
209 | #define generic_trivial_iounmap 0 | ||
210 | |||
211 | #else | ||
212 | |||
213 | #if defined(CONFIG_ALPHA_APECS) | ||
214 | # include <asm/core_apecs.h> | ||
215 | #elif defined(CONFIG_ALPHA_CIA) | ||
216 | # include <asm/core_cia.h> | ||
217 | #elif defined(CONFIG_ALPHA_IRONGATE) | ||
218 | # include <asm/core_irongate.h> | ||
219 | #elif defined(CONFIG_ALPHA_JENSEN) | ||
220 | # include <asm/jensen.h> | ||
221 | #elif defined(CONFIG_ALPHA_LCA) | ||
222 | # include <asm/core_lca.h> | ||
223 | #elif defined(CONFIG_ALPHA_MARVEL) | ||
224 | # include <asm/core_marvel.h> | ||
225 | #elif defined(CONFIG_ALPHA_MCPCIA) | ||
226 | # include <asm/core_mcpcia.h> | ||
227 | #elif defined(CONFIG_ALPHA_POLARIS) | ||
228 | # include <asm/core_polaris.h> | ||
229 | #elif defined(CONFIG_ALPHA_T2) | ||
230 | # include <asm/core_t2.h> | ||
231 | #elif defined(CONFIG_ALPHA_TSUNAMI) | ||
232 | # include <asm/core_tsunami.h> | ||
233 | #elif defined(CONFIG_ALPHA_TITAN) | ||
234 | # include <asm/core_titan.h> | ||
235 | #elif defined(CONFIG_ALPHA_WILDFIRE) | ||
236 | # include <asm/core_wildfire.h> | ||
237 | #else | ||
238 | #error "What system is this?" | ||
239 | #endif | ||
240 | |||
241 | #endif /* GENERIC */ | ||
242 | |||
243 | /* | ||
244 | * We always have external versions of these routines. | ||
245 | */ | ||
246 | extern u8 inb(unsigned long port); | ||
247 | extern u16 inw(unsigned long port); | ||
248 | extern u32 inl(unsigned long port); | ||
249 | extern void outb(u8 b, unsigned long port); | ||
250 | extern void outw(u16 b, unsigned long port); | ||
251 | extern void outl(u32 b, unsigned long port); | ||
252 | |||
253 | extern u8 readb(const volatile void __iomem *addr); | ||
254 | extern u16 readw(const volatile void __iomem *addr); | ||
255 | extern u32 readl(const volatile void __iomem *addr); | ||
256 | extern u64 readq(const volatile void __iomem *addr); | ||
257 | extern void writeb(u8 b, volatile void __iomem *addr); | ||
258 | extern void writew(u16 b, volatile void __iomem *addr); | ||
259 | extern void writel(u32 b, volatile void __iomem *addr); | ||
260 | extern void writeq(u64 b, volatile void __iomem *addr); | ||
261 | |||
262 | extern u8 __raw_readb(const volatile void __iomem *addr); | ||
263 | extern u16 __raw_readw(const volatile void __iomem *addr); | ||
264 | extern u32 __raw_readl(const volatile void __iomem *addr); | ||
265 | extern u64 __raw_readq(const volatile void __iomem *addr); | ||
266 | extern void __raw_writeb(u8 b, volatile void __iomem *addr); | ||
267 | extern void __raw_writew(u16 b, volatile void __iomem *addr); | ||
268 | extern void __raw_writel(u32 b, volatile void __iomem *addr); | ||
269 | extern void __raw_writeq(u64 b, volatile void __iomem *addr); | ||
270 | |||
271 | /* | ||
272 | * Mapping from port numbers to __iomem space is pretty easy. | ||
273 | */ | ||
274 | |||
275 | /* These two have to be extern inline because of the extern prototype from | ||
276 | <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for | ||
277 | the same declaration. */ | ||
278 | extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) | ||
279 | { | ||
280 | return IO_CONCAT(__IO_PREFIX,ioportmap) (port); | ||
281 | } | ||
282 | |||
283 | extern inline void ioport_unmap(void __iomem *addr) | ||
284 | { | ||
285 | } | ||
286 | |||
287 | static inline void __iomem *ioremap(unsigned long port, unsigned long size) | ||
288 | { | ||
289 | return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); | ||
290 | } | ||
291 | |||
292 | static inline void __iomem *__ioremap(unsigned long port, unsigned long size, | ||
293 | unsigned long flags) | ||
294 | { | ||
295 | return ioremap(port, size); | ||
296 | } | ||
297 | |||
298 | static inline void __iomem * ioremap_nocache(unsigned long offset, | ||
299 | unsigned long size) | ||
300 | { | ||
301 | return ioremap(offset, size); | ||
302 | } | ||
303 | |||
304 | static inline void iounmap(volatile void __iomem *addr) | ||
305 | { | ||
306 | IO_CONCAT(__IO_PREFIX,iounmap)(addr); | ||
307 | } | ||
308 | |||
309 | static inline int __is_ioaddr(unsigned long addr) | ||
310 | { | ||
311 | return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); | ||
312 | } | ||
313 | #define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) | ||
314 | |||
315 | static inline int __is_mmio(const volatile void __iomem *addr) | ||
316 | { | ||
317 | return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); | ||
318 | } | ||
319 | |||
320 | |||
321 | /* | ||
322 | * If the actual I/O bits are sufficiently trivial, then expand inline. | ||
323 | */ | ||
324 | |||
325 | #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) | ||
326 | extern inline unsigned int ioread8(void __iomem *addr) | ||
327 | { | ||
328 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); | ||
329 | mb(); | ||
330 | return ret; | ||
331 | } | ||
332 | |||
333 | extern inline unsigned int ioread16(void __iomem *addr) | ||
334 | { | ||
335 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); | ||
336 | mb(); | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | extern inline void iowrite8(u8 b, void __iomem *addr) | ||
341 | { | ||
342 | IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); | ||
343 | mb(); | ||
344 | } | ||
345 | |||
346 | extern inline void iowrite16(u16 b, void __iomem *addr) | ||
347 | { | ||
348 | IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); | ||
349 | mb(); | ||
350 | } | ||
351 | |||
352 | extern inline u8 inb(unsigned long port) | ||
353 | { | ||
354 | return ioread8(ioport_map(port, 1)); | ||
355 | } | ||
356 | |||
357 | extern inline u16 inw(unsigned long port) | ||
358 | { | ||
359 | return ioread16(ioport_map(port, 2)); | ||
360 | } | ||
361 | |||
362 | extern inline void outb(u8 b, unsigned long port) | ||
363 | { | ||
364 | iowrite8(b, ioport_map(port, 1)); | ||
365 | } | ||
366 | |||
367 | extern inline void outw(u16 b, unsigned long port) | ||
368 | { | ||
369 | iowrite16(b, ioport_map(port, 2)); | ||
370 | } | ||
371 | #endif | ||
372 | |||
373 | #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) | ||
374 | extern inline unsigned int ioread32(void __iomem *addr) | ||
375 | { | ||
376 | unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); | ||
377 | mb(); | ||
378 | return ret; | ||
379 | } | ||
380 | |||
381 | extern inline void iowrite32(u32 b, void __iomem *addr) | ||
382 | { | ||
383 | IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr); | ||
384 | mb(); | ||
385 | } | ||
386 | |||
387 | extern inline u32 inl(unsigned long port) | ||
388 | { | ||
389 | return ioread32(ioport_map(port, 4)); | ||
390 | } | ||
391 | |||
392 | extern inline void outl(u32 b, unsigned long port) | ||
393 | { | ||
394 | iowrite32(b, ioport_map(port, 4)); | ||
395 | } | ||
396 | #endif | ||
397 | |||
398 | #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 | ||
399 | extern inline u8 __raw_readb(const volatile void __iomem *addr) | ||
400 | { | ||
401 | return IO_CONCAT(__IO_PREFIX,readb)(addr); | ||
402 | } | ||
403 | |||
404 | extern inline u16 __raw_readw(const volatile void __iomem *addr) | ||
405 | { | ||
406 | return IO_CONCAT(__IO_PREFIX,readw)(addr); | ||
407 | } | ||
408 | |||
409 | extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) | ||
410 | { | ||
411 | IO_CONCAT(__IO_PREFIX,writeb)(b, addr); | ||
412 | } | ||
413 | |||
414 | extern inline void __raw_writew(u16 b, volatile void __iomem *addr) | ||
415 | { | ||
416 | IO_CONCAT(__IO_PREFIX,writew)(b, addr); | ||
417 | } | ||
418 | |||
419 | extern inline u8 readb(const volatile void __iomem *addr) | ||
420 | { | ||
421 | u8 ret = __raw_readb(addr); | ||
422 | mb(); | ||
423 | return ret; | ||
424 | } | ||
425 | |||
426 | extern inline u16 readw(const volatile void __iomem *addr) | ||
427 | { | ||
428 | u16 ret = __raw_readw(addr); | ||
429 | mb(); | ||
430 | return ret; | ||
431 | } | ||
432 | |||
433 | extern inline void writeb(u8 b, volatile void __iomem *addr) | ||
434 | { | ||
435 | __raw_writeb(b, addr); | ||
436 | mb(); | ||
437 | } | ||
438 | |||
439 | extern inline void writew(u16 b, volatile void __iomem *addr) | ||
440 | { | ||
441 | __raw_writew(b, addr); | ||
442 | mb(); | ||
443 | } | ||
444 | #endif | ||
445 | |||
446 | #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 | ||
447 | extern inline u32 __raw_readl(const volatile void __iomem *addr) | ||
448 | { | ||
449 | return IO_CONCAT(__IO_PREFIX,readl)(addr); | ||
450 | } | ||
451 | |||
452 | extern inline u64 __raw_readq(const volatile void __iomem *addr) | ||
453 | { | ||
454 | return IO_CONCAT(__IO_PREFIX,readq)(addr); | ||
455 | } | ||
456 | |||
457 | extern inline void __raw_writel(u32 b, volatile void __iomem *addr) | ||
458 | { | ||
459 | IO_CONCAT(__IO_PREFIX,writel)(b, addr); | ||
460 | } | ||
461 | |||
462 | extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) | ||
463 | { | ||
464 | IO_CONCAT(__IO_PREFIX,writeq)(b, addr); | ||
465 | } | ||
466 | |||
467 | extern inline u32 readl(const volatile void __iomem *addr) | ||
468 | { | ||
469 | u32 ret = __raw_readl(addr); | ||
470 | mb(); | ||
471 | return ret; | ||
472 | } | ||
473 | |||
474 | extern inline u64 readq(const volatile void __iomem *addr) | ||
475 | { | ||
476 | u64 ret = __raw_readq(addr); | ||
477 | mb(); | ||
478 | return ret; | ||
479 | } | ||
480 | |||
481 | extern inline void writel(u32 b, volatile void __iomem *addr) | ||
482 | { | ||
483 | __raw_writel(b, addr); | ||
484 | mb(); | ||
485 | } | ||
486 | |||
487 | extern inline void writeq(u64 b, volatile void __iomem *addr) | ||
488 | { | ||
489 | __raw_writeq(b, addr); | ||
490 | mb(); | ||
491 | } | ||
492 | #endif | ||
493 | |||
494 | #define inb_p inb | ||
495 | #define inw_p inw | ||
496 | #define inl_p inl | ||
497 | #define outb_p outb | ||
498 | #define outw_p outw | ||
499 | #define outl_p outl | ||
500 | #define readb_relaxed(addr) __raw_readb(addr) | ||
501 | #define readw_relaxed(addr) __raw_readw(addr) | ||
502 | #define readl_relaxed(addr) __raw_readl(addr) | ||
503 | #define readq_relaxed(addr) __raw_readq(addr) | ||
504 | |||
505 | #define mmiowb() | ||
506 | |||
507 | /* | ||
508 | * String version of IO memory access ops: | ||
509 | */ | ||
510 | extern void memcpy_fromio(void *, const volatile void __iomem *, long); | ||
511 | extern void memcpy_toio(volatile void __iomem *, const void *, long); | ||
512 | extern void _memset_c_io(volatile void __iomem *, unsigned long, long); | ||
513 | |||
514 | static inline void memset_io(volatile void __iomem *addr, u8 c, long len) | ||
515 | { | ||
516 | _memset_c_io(addr, 0x0101010101010101UL * c, len); | ||
517 | } | ||
518 | |||
519 | #define __HAVE_ARCH_MEMSETW_IO | ||
520 | static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) | ||
521 | { | ||
522 | _memset_c_io(addr, 0x0001000100010001UL * c, len); | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * String versions of in/out ops: | ||
527 | */ | ||
528 | extern void insb (unsigned long port, void *dst, unsigned long count); | ||
529 | extern void insw (unsigned long port, void *dst, unsigned long count); | ||
530 | extern void insl (unsigned long port, void *dst, unsigned long count); | ||
531 | extern void outsb (unsigned long port, const void *src, unsigned long count); | ||
532 | extern void outsw (unsigned long port, const void *src, unsigned long count); | ||
533 | extern void outsl (unsigned long port, const void *src, unsigned long count); | ||
534 | |||
535 | /* | ||
536 | * The Alpha Jensen hardware for some rather strange reason puts | ||
537 | * the RTC clock at 0x170 instead of 0x70. Probably due to some | ||
538 | * misguided idea about using 0x70 for NMI stuff. | ||
539 | * | ||
540 | * These defines will override the defaults when doing RTC queries | ||
541 | */ | ||
542 | |||
543 | #ifdef CONFIG_ALPHA_GENERIC | ||
544 | # define RTC_PORT(x) ((x) + alpha_mv.rtc_port) | ||
545 | #else | ||
546 | # ifdef CONFIG_ALPHA_JENSEN | ||
547 | # define RTC_PORT(x) (0x170+(x)) | ||
548 | # else | ||
549 | # define RTC_PORT(x) (0x70 + (x)) | ||
550 | # endif | ||
551 | #endif | ||
552 | #define RTC_ALWAYS_BCD 0 | ||
553 | |||
554 | /* | ||
555 | * Some mucking forons use if[n]def writeq to check if platform has it. | ||
556 | * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them | ||
557 | * to play with; for now just use cpp anti-recursion logics and make sure | ||
558 | * that damn thing is defined and expands to itself. | ||
559 | */ | ||
560 | |||
561 | #define writeq writeq | ||
562 | #define readq readq | ||
563 | |||
564 | /* | ||
565 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
566 | * access | ||
567 | */ | ||
568 | #define xlate_dev_mem_ptr(p) __va(p) | ||
569 | |||
570 | /* | ||
571 | * Convert a virtual cached pointer to an uncached pointer | ||
572 | */ | ||
573 | #define xlate_dev_kmem_ptr(p) p | ||
574 | |||
575 | #endif /* __KERNEL__ */ | ||
576 | |||
577 | #endif /* __ALPHA_IO_H */ | ||
diff --git a/arch/alpha/include/asm/io_trivial.h b/arch/alpha/include/asm/io_trivial.h new file mode 100644 index 000000000000..1c77f10b4b36 --- /dev/null +++ b/arch/alpha/include/asm/io_trivial.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* Trivial implementations of basic i/o routines. Assumes that all | ||
2 | of the hard work has been done by ioremap and ioportmap, and that | ||
3 | access to i/o space is linear. */ | ||
4 | |||
5 | /* This file may be included multiple times. */ | ||
6 | |||
7 | #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) | ||
8 | __EXTERN_INLINE unsigned int | ||
9 | IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a) | ||
10 | { | ||
11 | return __kernel_ldbu(*(volatile u8 __force *)a); | ||
12 | } | ||
13 | |||
14 | __EXTERN_INLINE unsigned int | ||
15 | IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a) | ||
16 | { | ||
17 | return __kernel_ldwu(*(volatile u16 __force *)a); | ||
18 | } | ||
19 | |||
20 | __EXTERN_INLINE void | ||
21 | IO_CONCAT(__IO_PREFIX,iowrite8)(u8 b, void __iomem *a) | ||
22 | { | ||
23 | __kernel_stb(b, *(volatile u8 __force *)a); | ||
24 | } | ||
25 | |||
26 | __EXTERN_INLINE void | ||
27 | IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a) | ||
28 | { | ||
29 | __kernel_stw(b, *(volatile u16 __force *)a); | ||
30 | } | ||
31 | #endif | ||
32 | |||
33 | #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) | ||
34 | __EXTERN_INLINE unsigned int | ||
35 | IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a) | ||
36 | { | ||
37 | return *(volatile u32 __force *)a; | ||
38 | } | ||
39 | |||
40 | __EXTERN_INLINE void | ||
41 | IO_CONCAT(__IO_PREFIX,iowrite32)(u32 b, void __iomem *a) | ||
42 | { | ||
43 | *(volatile u32 __force *)a = b; | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 | ||
48 | __EXTERN_INLINE u8 | ||
49 | IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) | ||
50 | { | ||
51 | return __kernel_ldbu(*(const volatile u8 __force *)a); | ||
52 | } | ||
53 | |||
54 | __EXTERN_INLINE u16 | ||
55 | IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) | ||
56 | { | ||
57 | return __kernel_ldwu(*(const volatile u16 __force *)a); | ||
58 | } | ||
59 | |||
60 | __EXTERN_INLINE void | ||
61 | IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a) | ||
62 | { | ||
63 | __kernel_stb(b, *(volatile u8 __force *)a); | ||
64 | } | ||
65 | |||
66 | __EXTERN_INLINE void | ||
67 | IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) | ||
68 | { | ||
69 | __kernel_stw(b, *(volatile u16 __force *)a); | ||
70 | } | ||
71 | #elif IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 2 | ||
72 | __EXTERN_INLINE u8 | ||
73 | IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a) | ||
74 | { | ||
75 | void __iomem *addr = (void __iomem *)a; | ||
76 | return IO_CONCAT(__IO_PREFIX,ioread8)(addr); | ||
77 | } | ||
78 | |||
79 | __EXTERN_INLINE u16 | ||
80 | IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a) | ||
81 | { | ||
82 | void __iomem *addr = (void __iomem *)a; | ||
83 | return IO_CONCAT(__IO_PREFIX,ioread16)(addr); | ||
84 | } | ||
85 | |||
86 | __EXTERN_INLINE void | ||
87 | IO_CONCAT(__IO_PREFIX,writeb)(u8 b, volatile void __iomem *a) | ||
88 | { | ||
89 | void __iomem *addr = (void __iomem *)a; | ||
90 | IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr); | ||
91 | } | ||
92 | |||
93 | __EXTERN_INLINE void | ||
94 | IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a) | ||
95 | { | ||
96 | void __iomem *addr = (void __iomem *)a; | ||
97 | IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr); | ||
98 | } | ||
99 | #endif | ||
100 | |||
101 | #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 | ||
102 | __EXTERN_INLINE u32 | ||
103 | IO_CONCAT(__IO_PREFIX,readl)(const volatile void __iomem *a) | ||
104 | { | ||
105 | return *(const volatile u32 __force *)a; | ||
106 | } | ||
107 | |||
108 | __EXTERN_INLINE u64 | ||
109 | IO_CONCAT(__IO_PREFIX,readq)(const volatile void __iomem *a) | ||
110 | { | ||
111 | return *(const volatile u64 __force *)a; | ||
112 | } | ||
113 | |||
114 | __EXTERN_INLINE void | ||
115 | IO_CONCAT(__IO_PREFIX,writel)(u32 b, volatile void __iomem *a) | ||
116 | { | ||
117 | *(volatile u32 __force *)a = b; | ||
118 | } | ||
119 | |||
120 | __EXTERN_INLINE void | ||
121 | IO_CONCAT(__IO_PREFIX,writeq)(u64 b, volatile void __iomem *a) | ||
122 | { | ||
123 | *(volatile u64 __force *)a = b; | ||
124 | } | ||
125 | #endif | ||
126 | |||
127 | #if IO_CONCAT(__IO_PREFIX,trivial_iounmap) | ||
128 | __EXTERN_INLINE void IO_CONCAT(__IO_PREFIX,iounmap)(volatile void __iomem *a) | ||
129 | { | ||
130 | } | ||
131 | #endif | ||
diff --git a/arch/alpha/include/asm/ioctl.h b/arch/alpha/include/asm/ioctl.h new file mode 100644 index 000000000000..fc63727f4178 --- /dev/null +++ b/arch/alpha/include/asm/ioctl.h | |||
@@ -0,0 +1,66 @@ | |||
1 | #ifndef _ALPHA_IOCTL_H | ||
2 | #define _ALPHA_IOCTL_H | ||
3 | |||
4 | /* | ||
5 | * The original linux ioctl numbering scheme was just a general | ||
6 | * "anything goes" setup, where more or less random numbers were | ||
7 | * assigned. Sorry, I was clueless when I started out on this. | ||
8 | * | ||
9 | * On the alpha, we'll try to clean it up a bit, using a more sane | ||
10 | * ioctl numbering, and also trying to be compatible with OSF/1 in | ||
11 | * the process. I'd like to clean it up for the i386 as well, but | ||
12 | * it's so painful recognizing both the new and the old numbers.. | ||
13 | */ | ||
14 | |||
15 | #define _IOC_NRBITS 8 | ||
16 | #define _IOC_TYPEBITS 8 | ||
17 | #define _IOC_SIZEBITS 13 | ||
18 | #define _IOC_DIRBITS 3 | ||
19 | |||
20 | #define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) | ||
21 | #define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) | ||
22 | #define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) | ||
23 | #define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) | ||
24 | |||
25 | #define _IOC_NRSHIFT 0 | ||
26 | #define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) | ||
27 | #define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) | ||
28 | #define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) | ||
29 | |||
30 | /* | ||
31 | * Direction bits _IOC_NONE could be 0, but OSF/1 gives it a bit. | ||
32 | * And this turns out useful to catch old ioctl numbers in header | ||
33 | * files for us. | ||
34 | */ | ||
35 | #define _IOC_NONE 1U | ||
36 | #define _IOC_READ 2U | ||
37 | #define _IOC_WRITE 4U | ||
38 | |||
39 | #define _IOC(dir,type,nr,size) \ | ||
40 | ((unsigned int) \ | ||
41 | (((dir) << _IOC_DIRSHIFT) | \ | ||
42 | ((type) << _IOC_TYPESHIFT) | \ | ||
43 | ((nr) << _IOC_NRSHIFT) | \ | ||
44 | ((size) << _IOC_SIZESHIFT))) | ||
45 | |||
46 | /* used to create numbers */ | ||
47 | #define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) | ||
48 | #define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) | ||
49 | #define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) | ||
50 | #define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) | ||
51 | |||
52 | /* used to decode them.. */ | ||
53 | #define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) | ||
54 | #define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) | ||
55 | #define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) | ||
56 | #define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) | ||
57 | |||
58 | /* ...and for the drivers/sound files... */ | ||
59 | |||
60 | #define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) | ||
61 | #define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) | ||
62 | #define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) | ||
63 | #define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) | ||
64 | #define IOCSIZE_SHIFT (_IOC_SIZESHIFT) | ||
65 | |||
66 | #endif /* _ALPHA_IOCTL_H */ | ||
diff --git a/arch/alpha/include/asm/ioctls.h b/arch/alpha/include/asm/ioctls.h new file mode 100644 index 000000000000..67bb9f6fdbe4 --- /dev/null +++ b/arch/alpha/include/asm/ioctls.h | |||
@@ -0,0 +1,112 @@ | |||
1 | #ifndef _ASM_ALPHA_IOCTLS_H | ||
2 | #define _ASM_ALPHA_IOCTLS_H | ||
3 | |||
4 | #include <asm/ioctl.h> | ||
5 | |||
6 | #define FIOCLEX _IO('f', 1) | ||
7 | #define FIONCLEX _IO('f', 2) | ||
8 | #define FIOASYNC _IOW('f', 125, int) | ||
9 | #define FIONBIO _IOW('f', 126, int) | ||
10 | #define FIONREAD _IOR('f', 127, int) | ||
11 | #define TIOCINQ FIONREAD | ||
12 | #define FIOQSIZE _IOR('f', 128, loff_t) | ||
13 | |||
14 | #define TIOCGETP _IOR('t', 8, struct sgttyb) | ||
15 | #define TIOCSETP _IOW('t', 9, struct sgttyb) | ||
16 | #define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */ | ||
17 | |||
18 | #define TIOCSETC _IOW('t', 17, struct tchars) | ||
19 | #define TIOCGETC _IOR('t', 18, struct tchars) | ||
20 | #define TCGETS _IOR('t', 19, struct termios) | ||
21 | #define TCSETS _IOW('t', 20, struct termios) | ||
22 | #define TCSETSW _IOW('t', 21, struct termios) | ||
23 | #define TCSETSF _IOW('t', 22, struct termios) | ||
24 | |||
25 | #define TCGETA _IOR('t', 23, struct termio) | ||
26 | #define TCSETA _IOW('t', 24, struct termio) | ||
27 | #define TCSETAW _IOW('t', 25, struct termio) | ||
28 | #define TCSETAF _IOW('t', 28, struct termio) | ||
29 | |||
30 | #define TCSBRK _IO('t', 29) | ||
31 | #define TCXONC _IO('t', 30) | ||
32 | #define TCFLSH _IO('t', 31) | ||
33 | |||
34 | #define TIOCSWINSZ _IOW('t', 103, struct winsize) | ||
35 | #define TIOCGWINSZ _IOR('t', 104, struct winsize) | ||
36 | #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ | ||
37 | #define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ | ||
38 | #define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ | ||
39 | |||
40 | #define TIOCGLTC _IOR('t', 116, struct ltchars) | ||
41 | #define TIOCSLTC _IOW('t', 117, struct ltchars) | ||
42 | #define TIOCSPGRP _IOW('t', 118, int) | ||
43 | #define TIOCGPGRP _IOR('t', 119, int) | ||
44 | |||
45 | #define TIOCEXCL 0x540C | ||
46 | #define TIOCNXCL 0x540D | ||
47 | #define TIOCSCTTY 0x540E | ||
48 | |||
49 | #define TIOCSTI 0x5412 | ||
50 | #define TIOCMGET 0x5415 | ||
51 | #define TIOCMBIS 0x5416 | ||
52 | #define TIOCMBIC 0x5417 | ||
53 | #define TIOCMSET 0x5418 | ||
54 | # define TIOCM_LE 0x001 | ||
55 | # define TIOCM_DTR 0x002 | ||
56 | # define TIOCM_RTS 0x004 | ||
57 | # define TIOCM_ST 0x008 | ||
58 | # define TIOCM_SR 0x010 | ||
59 | # define TIOCM_CTS 0x020 | ||
60 | # define TIOCM_CAR 0x040 | ||
61 | # define TIOCM_RNG 0x080 | ||
62 | # define TIOCM_DSR 0x100 | ||
63 | # define TIOCM_CD TIOCM_CAR | ||
64 | # define TIOCM_RI TIOCM_RNG | ||
65 | # define TIOCM_OUT1 0x2000 | ||
66 | # define TIOCM_OUT2 0x4000 | ||
67 | # define TIOCM_LOOP 0x8000 | ||
68 | |||
69 | #define TIOCGSOFTCAR 0x5419 | ||
70 | #define TIOCSSOFTCAR 0x541A | ||
71 | #define TIOCLINUX 0x541C | ||
72 | #define TIOCCONS 0x541D | ||
73 | #define TIOCGSERIAL 0x541E | ||
74 | #define TIOCSSERIAL 0x541F | ||
75 | #define TIOCPKT 0x5420 | ||
76 | # define TIOCPKT_DATA 0 | ||
77 | # define TIOCPKT_FLUSHREAD 1 | ||
78 | # define TIOCPKT_FLUSHWRITE 2 | ||
79 | # define TIOCPKT_STOP 4 | ||
80 | # define TIOCPKT_START 8 | ||
81 | # define TIOCPKT_NOSTOP 16 | ||
82 | # define TIOCPKT_DOSTOP 32 | ||
83 | |||
84 | |||
85 | #define TIOCNOTTY 0x5422 | ||
86 | #define TIOCSETD 0x5423 | ||
87 | #define TIOCGETD 0x5424 | ||
88 | #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ | ||
89 | #define TIOCSBRK 0x5427 /* BSD compatibility */ | ||
90 | #define TIOCCBRK 0x5428 /* BSD compatibility */ | ||
91 | #define TIOCGSID 0x5429 /* Return the session ID of FD */ | ||
92 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | ||
93 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | ||
94 | |||
95 | #define TIOCSERCONFIG 0x5453 | ||
96 | #define TIOCSERGWILD 0x5454 | ||
97 | #define TIOCSERSWILD 0x5455 | ||
98 | #define TIOCGLCKTRMIOS 0x5456 | ||
99 | #define TIOCSLCKTRMIOS 0x5457 | ||
100 | #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ | ||
101 | #define TIOCSERGETLSR 0x5459 /* Get line status register */ | ||
102 | /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ | ||
103 | # define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ | ||
104 | #define TIOCSERGETMULTI 0x545A /* Get multiport config */ | ||
105 | #define TIOCSERSETMULTI 0x545B /* Set multiport config */ | ||
106 | |||
107 | #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ | ||
108 | #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ | ||
109 | #define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ | ||
110 | #define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ | ||
111 | |||
112 | #endif /* _ASM_ALPHA_IOCTLS_H */ | ||
diff --git a/arch/alpha/include/asm/ipcbuf.h b/arch/alpha/include/asm/ipcbuf.h new file mode 100644 index 000000000000..d9c0e1a50702 --- /dev/null +++ b/arch/alpha/include/asm/ipcbuf.h | |||
@@ -0,0 +1,28 @@ | |||
1 | #ifndef _ALPHA_IPCBUF_H | ||
2 | #define _ALPHA_IPCBUF_H | ||
3 | |||
4 | /* | ||
5 | * The ipc64_perm structure for alpha architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 32-bit seq | ||
11 | * - 2 miscellaneous 64-bit values | ||
12 | */ | ||
13 | |||
14 | struct ipc64_perm | ||
15 | { | ||
16 | __kernel_key_t key; | ||
17 | __kernel_uid_t uid; | ||
18 | __kernel_gid_t gid; | ||
19 | __kernel_uid_t cuid; | ||
20 | __kernel_gid_t cgid; | ||
21 | __kernel_mode_t mode; | ||
22 | unsigned short seq; | ||
23 | unsigned short __pad1; | ||
24 | unsigned long __unused1; | ||
25 | unsigned long __unused2; | ||
26 | }; | ||
27 | |||
28 | #endif /* _ALPHA_IPCBUF_H */ | ||
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h new file mode 100644 index 000000000000..06377400dc09 --- /dev/null +++ b/arch/alpha/include/asm/irq.h | |||
@@ -0,0 +1,91 @@ | |||
1 | #ifndef _ALPHA_IRQ_H | ||
2 | #define _ALPHA_IRQ_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/alpha/irq.h | ||
6 | * | ||
7 | * (C) 1994 Linus Torvalds | ||
8 | */ | ||
9 | |||
10 | #include <linux/linkage.h> | ||
11 | |||
12 | #if defined(CONFIG_ALPHA_GENERIC) | ||
13 | |||
14 | /* Here NR_IRQS is not exact, but rather an upper bound. This is used | ||
15 | many places throughout the kernel to size static arrays. That's ok, | ||
16 | we'll use alpha_mv.nr_irqs when we want the real thing. */ | ||
17 | |||
18 | /* When LEGACY_START_ADDRESS is selected, we leave out: | ||
19 | TITAN | ||
20 | WILDFIRE | ||
21 | MARVEL | ||
22 | |||
23 | This helps keep the kernel object size reasonable for the majority | ||
24 | of machines. | ||
25 | */ | ||
26 | |||
27 | # if defined(CONFIG_ALPHA_LEGACY_START_ADDRESS) | ||
28 | # define NR_IRQS (128) /* max is RAWHIDE/TAKARA */ | ||
29 | # else | ||
30 | # define NR_IRQS (32768 + 16) /* marvel - 32 pids */ | ||
31 | # endif | ||
32 | |||
33 | #elif defined(CONFIG_ALPHA_CABRIOLET) || \ | ||
34 | defined(CONFIG_ALPHA_EB66P) || \ | ||
35 | defined(CONFIG_ALPHA_EB164) || \ | ||
36 | defined(CONFIG_ALPHA_PC164) || \ | ||
37 | defined(CONFIG_ALPHA_LX164) | ||
38 | # define NR_IRQS 35 | ||
39 | |||
40 | #elif defined(CONFIG_ALPHA_EB66) || \ | ||
41 | defined(CONFIG_ALPHA_EB64P) || \ | ||
42 | defined(CONFIG_ALPHA_MIKASA) | ||
43 | # define NR_IRQS 32 | ||
44 | |||
45 | #elif defined(CONFIG_ALPHA_ALCOR) || \ | ||
46 | defined(CONFIG_ALPHA_MIATA) || \ | ||
47 | defined(CONFIG_ALPHA_RUFFIAN) || \ | ||
48 | defined(CONFIG_ALPHA_RX164) || \ | ||
49 | defined(CONFIG_ALPHA_NORITAKE) | ||
50 | # define NR_IRQS 48 | ||
51 | |||
52 | #elif defined(CONFIG_ALPHA_SABLE) || \ | ||
53 | defined(CONFIG_ALPHA_SX164) | ||
54 | # define NR_IRQS 40 | ||
55 | |||
56 | #elif defined(CONFIG_ALPHA_DP264) || \ | ||
57 | defined(CONFIG_ALPHA_LYNX) || \ | ||
58 | defined(CONFIG_ALPHA_SHARK) || \ | ||
59 | defined(CONFIG_ALPHA_EIGER) | ||
60 | # define NR_IRQS 64 | ||
61 | |||
62 | #elif defined(CONFIG_ALPHA_TITAN) | ||
63 | #define NR_IRQS 80 | ||
64 | |||
65 | #elif defined(CONFIG_ALPHA_RAWHIDE) || \ | ||
66 | defined(CONFIG_ALPHA_TAKARA) | ||
67 | # define NR_IRQS 128 | ||
68 | |||
69 | #elif defined(CONFIG_ALPHA_WILDFIRE) | ||
70 | # define NR_IRQS 2048 /* enuff for 8 QBBs */ | ||
71 | |||
72 | #elif defined(CONFIG_ALPHA_MARVEL) | ||
73 | # define NR_IRQS (32768 + 16) /* marvel - 32 pids*/ | ||
74 | |||
75 | #else /* everyone else */ | ||
76 | # define NR_IRQS 16 | ||
77 | #endif | ||
78 | |||
79 | static __inline__ int irq_canonicalize(int irq) | ||
80 | { | ||
81 | /* | ||
82 | * XXX is this true for all Alpha's? The old serial driver | ||
83 | * did it this way for years without any complaints, so.... | ||
84 | */ | ||
85 | return ((irq == 2) ? 9 : irq); | ||
86 | } | ||
87 | |||
88 | struct pt_regs; | ||
89 | extern void (*perf_irq)(unsigned long, struct pt_regs *); | ||
90 | |||
91 | #endif /* _ALPHA_IRQ_H */ | ||
diff --git a/arch/alpha/include/asm/irq_regs.h b/arch/alpha/include/asm/irq_regs.h new file mode 100644 index 000000000000..3dd9c0b70270 --- /dev/null +++ b/arch/alpha/include/asm/irq_regs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/irq_regs.h> | |||
diff --git a/arch/alpha/include/asm/jensen.h b/arch/alpha/include/asm/jensen.h new file mode 100644 index 000000000000..964b06ead43b --- /dev/null +++ b/arch/alpha/include/asm/jensen.h | |||
@@ -0,0 +1,346 @@ | |||
1 | #ifndef __ALPHA_JENSEN_H | ||
2 | #define __ALPHA_JENSEN_H | ||
3 | |||
4 | #include <asm/compiler.h> | ||
5 | |||
6 | /* | ||
7 | * Defines for the AlphaPC EISA IO and memory address space. | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * NOTE! The memory operations do not set any memory barriers, as it's | ||
12 | * not needed for cases like a frame buffer that is essentially memory-like. | ||
13 | * You need to do them by hand if the operations depend on ordering. | ||
14 | * | ||
15 | * Similarly, the port IO operations do a "mb" only after a write operation: | ||
16 | * if an mb is needed before (as in the case of doing memory mapped IO | ||
17 | * first, and then a port IO operation to the same device), it needs to be | ||
18 | * done by hand. | ||
19 | * | ||
20 | * After the above has bitten me 100 times, I'll give up and just do the | ||
21 | * mb all the time, but right now I'm hoping this will work out. Avoiding | ||
22 | * mb's may potentially be a noticeable speed improvement, but I can't | ||
23 | * honestly say I've tested it. | ||
24 | * | ||
25 | * Handling interrupts that need to do mb's to synchronize to non-interrupts | ||
26 | * is another fun race area. Don't do it (because if you do, I'll have to | ||
27 | * do *everything* with interrupts disabled, ugh). | ||
28 | */ | ||
29 | |||
30 | /* | ||
31 | * EISA Interrupt Acknowledge address | ||
32 | */ | ||
33 | #define EISA_INTA (IDENT_ADDR + 0x100000000UL) | ||
34 | |||
35 | /* | ||
36 | * FEPROM addresses | ||
37 | */ | ||
38 | #define EISA_FEPROM0 (IDENT_ADDR + 0x180000000UL) | ||
39 | #define EISA_FEPROM1 (IDENT_ADDR + 0x1A0000000UL) | ||
40 | |||
41 | /* | ||
42 | * VL82C106 base address | ||
43 | */ | ||
44 | #define EISA_VL82C106 (IDENT_ADDR + 0x1C0000000UL) | ||
45 | |||
46 | /* | ||
47 | * EISA "Host Address Extension" address (bits 25-31 of the EISA address) | ||
48 | */ | ||
49 | #define EISA_HAE (IDENT_ADDR + 0x1D0000000UL) | ||
50 | |||
51 | /* | ||
52 | * "SYSCTL" register address | ||
53 | */ | ||
54 | #define EISA_SYSCTL (IDENT_ADDR + 0x1E0000000UL) | ||
55 | |||
56 | /* | ||
57 | * "spare" register address | ||
58 | */ | ||
59 | #define EISA_SPARE (IDENT_ADDR + 0x1F0000000UL) | ||
60 | |||
61 | /* | ||
62 | * EISA memory address offset | ||
63 | */ | ||
64 | #define EISA_MEM (IDENT_ADDR + 0x200000000UL) | ||
65 | |||
66 | /* | ||
67 | * EISA IO address offset | ||
68 | */ | ||
69 | #define EISA_IO (IDENT_ADDR + 0x300000000UL) | ||
70 | |||
71 | |||
72 | #ifdef __KERNEL__ | ||
73 | |||
74 | #ifndef __EXTERN_INLINE | ||
75 | #define __EXTERN_INLINE extern inline | ||
76 | #define __IO_EXTERN_INLINE | ||
77 | #endif | ||
78 | |||
79 | /* | ||
80 | * Handle the "host address register". This needs to be set | ||
81 | * to the high 7 bits of the EISA address. This is also needed | ||
82 | * for EISA IO addresses, which are only 16 bits wide (the | ||
83 | * hae needs to be set to 0). | ||
84 | * | ||
85 | * HAE isn't needed for the local IO operations, though. | ||
86 | */ | ||
87 | |||
88 | #define JENSEN_HAE_ADDRESS EISA_HAE | ||
89 | #define JENSEN_HAE_MASK 0x1ffffff | ||
90 | |||
91 | __EXTERN_INLINE void jensen_set_hae(unsigned long addr) | ||
92 | { | ||
93 | /* hae on the Jensen is bits 31:25 shifted right */ | ||
94 | addr >>= 25; | ||
95 | if (addr != alpha_mv.hae_cache) | ||
96 | set_hae(addr); | ||
97 | } | ||
98 | |||
99 | #define vuip volatile unsigned int * | ||
100 | |||
101 | /* | ||
102 | * IO functions | ||
103 | * | ||
104 | * The "local" functions are those that don't go out to the EISA bus, | ||
105 | * but instead act on the VL82C106 chip directly.. This is mainly the | ||
106 | * keyboard, RTC, printer and first two serial lines.. | ||
107 | * | ||
108 | * The local stuff makes for some complications, but it seems to be | ||
109 | * gone in the PCI version. I hope I can get DEC suckered^H^H^H^H^H^H^H^H | ||
110 | * convinced that I need one of the newer machines. | ||
111 | */ | ||
112 | |||
113 | static inline unsigned int jensen_local_inb(unsigned long addr) | ||
114 | { | ||
115 | return 0xff & *(vuip)((addr << 9) + EISA_VL82C106); | ||
116 | } | ||
117 | |||
118 | static inline void jensen_local_outb(u8 b, unsigned long addr) | ||
119 | { | ||
120 | *(vuip)((addr << 9) + EISA_VL82C106) = b; | ||
121 | mb(); | ||
122 | } | ||
123 | |||
124 | static inline unsigned int jensen_bus_inb(unsigned long addr) | ||
125 | { | ||
126 | long result; | ||
127 | |||
128 | jensen_set_hae(0); | ||
129 | result = *(volatile int *)((addr << 7) + EISA_IO + 0x00); | ||
130 | return __kernel_extbl(result, addr & 3); | ||
131 | } | ||
132 | |||
133 | static inline void jensen_bus_outb(u8 b, unsigned long addr) | ||
134 | { | ||
135 | jensen_set_hae(0); | ||
136 | *(vuip)((addr << 7) + EISA_IO + 0x00) = b * 0x01010101; | ||
137 | mb(); | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * It seems gcc is not very good at optimizing away logical | ||
142 | * operations that result in operations across inline functions. | ||
143 | * Which is why this is a macro. | ||
144 | */ | ||
145 | |||
146 | #define jensen_is_local(addr) ( \ | ||
147 | /* keyboard */ (addr == 0x60 || addr == 0x64) || \ | ||
148 | /* RTC */ (addr == 0x170 || addr == 0x171) || \ | ||
149 | /* mb COM2 */ (addr >= 0x2f8 && addr <= 0x2ff) || \ | ||
150 | /* mb LPT1 */ (addr >= 0x3bc && addr <= 0x3be) || \ | ||
151 | /* mb COM2 */ (addr >= 0x3f8 && addr <= 0x3ff)) | ||
152 | |||
153 | __EXTERN_INLINE u8 jensen_inb(unsigned long addr) | ||
154 | { | ||
155 | if (jensen_is_local(addr)) | ||
156 | return jensen_local_inb(addr); | ||
157 | else | ||
158 | return jensen_bus_inb(addr); | ||
159 | } | ||
160 | |||
161 | __EXTERN_INLINE void jensen_outb(u8 b, unsigned long addr) | ||
162 | { | ||
163 | if (jensen_is_local(addr)) | ||
164 | jensen_local_outb(b, addr); | ||
165 | else | ||
166 | jensen_bus_outb(b, addr); | ||
167 | } | ||
168 | |||
169 | __EXTERN_INLINE u16 jensen_inw(unsigned long addr) | ||
170 | { | ||
171 | long result; | ||
172 | |||
173 | jensen_set_hae(0); | ||
174 | result = *(volatile int *) ((addr << 7) + EISA_IO + 0x20); | ||
175 | result >>= (addr & 3) * 8; | ||
176 | return 0xffffUL & result; | ||
177 | } | ||
178 | |||
179 | __EXTERN_INLINE u32 jensen_inl(unsigned long addr) | ||
180 | { | ||
181 | jensen_set_hae(0); | ||
182 | return *(vuip) ((addr << 7) + EISA_IO + 0x60); | ||
183 | } | ||
184 | |||
185 | __EXTERN_INLINE void jensen_outw(u16 b, unsigned long addr) | ||
186 | { | ||
187 | jensen_set_hae(0); | ||
188 | *(vuip) ((addr << 7) + EISA_IO + 0x20) = b * 0x00010001; | ||
189 | mb(); | ||
190 | } | ||
191 | |||
192 | __EXTERN_INLINE void jensen_outl(u32 b, unsigned long addr) | ||
193 | { | ||
194 | jensen_set_hae(0); | ||
195 | *(vuip) ((addr << 7) + EISA_IO + 0x60) = b; | ||
196 | mb(); | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Memory functions. | ||
201 | */ | ||
202 | |||
203 | __EXTERN_INLINE u8 jensen_readb(const volatile void __iomem *xaddr) | ||
204 | { | ||
205 | unsigned long addr = (unsigned long) xaddr; | ||
206 | long result; | ||
207 | |||
208 | jensen_set_hae(addr); | ||
209 | addr &= JENSEN_HAE_MASK; | ||
210 | result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x00); | ||
211 | result >>= (addr & 3) * 8; | ||
212 | return 0xffUL & result; | ||
213 | } | ||
214 | |||
215 | __EXTERN_INLINE u16 jensen_readw(const volatile void __iomem *xaddr) | ||
216 | { | ||
217 | unsigned long addr = (unsigned long) xaddr; | ||
218 | long result; | ||
219 | |||
220 | jensen_set_hae(addr); | ||
221 | addr &= JENSEN_HAE_MASK; | ||
222 | result = *(volatile int *) ((addr << 7) + EISA_MEM + 0x20); | ||
223 | result >>= (addr & 3) * 8; | ||
224 | return 0xffffUL & result; | ||
225 | } | ||
226 | |||
227 | __EXTERN_INLINE u32 jensen_readl(const volatile void __iomem *xaddr) | ||
228 | { | ||
229 | unsigned long addr = (unsigned long) xaddr; | ||
230 | jensen_set_hae(addr); | ||
231 | addr &= JENSEN_HAE_MASK; | ||
232 | return *(vuip) ((addr << 7) + EISA_MEM + 0x60); | ||
233 | } | ||
234 | |||
235 | __EXTERN_INLINE u64 jensen_readq(const volatile void __iomem *xaddr) | ||
236 | { | ||
237 | unsigned long addr = (unsigned long) xaddr; | ||
238 | unsigned long r0, r1; | ||
239 | |||
240 | jensen_set_hae(addr); | ||
241 | addr &= JENSEN_HAE_MASK; | ||
242 | addr = (addr << 7) + EISA_MEM + 0x60; | ||
243 | r0 = *(vuip) (addr); | ||
244 | r1 = *(vuip) (addr + (4 << 7)); | ||
245 | return r1 << 32 | r0; | ||
246 | } | ||
247 | |||
248 | __EXTERN_INLINE void jensen_writeb(u8 b, volatile void __iomem *xaddr) | ||
249 | { | ||
250 | unsigned long addr = (unsigned long) xaddr; | ||
251 | jensen_set_hae(addr); | ||
252 | addr &= JENSEN_HAE_MASK; | ||
253 | *(vuip) ((addr << 7) + EISA_MEM + 0x00) = b * 0x01010101; | ||
254 | } | ||
255 | |||
256 | __EXTERN_INLINE void jensen_writew(u16 b, volatile void __iomem *xaddr) | ||
257 | { | ||
258 | unsigned long addr = (unsigned long) xaddr; | ||
259 | jensen_set_hae(addr); | ||
260 | addr &= JENSEN_HAE_MASK; | ||
261 | *(vuip) ((addr << 7) + EISA_MEM + 0x20) = b * 0x00010001; | ||
262 | } | ||
263 | |||
264 | __EXTERN_INLINE void jensen_writel(u32 b, volatile void __iomem *xaddr) | ||
265 | { | ||
266 | unsigned long addr = (unsigned long) xaddr; | ||
267 | jensen_set_hae(addr); | ||
268 | addr &= JENSEN_HAE_MASK; | ||
269 | *(vuip) ((addr << 7) + EISA_MEM + 0x60) = b; | ||
270 | } | ||
271 | |||
272 | __EXTERN_INLINE void jensen_writeq(u64 b, volatile void __iomem *xaddr) | ||
273 | { | ||
274 | unsigned long addr = (unsigned long) xaddr; | ||
275 | jensen_set_hae(addr); | ||
276 | addr &= JENSEN_HAE_MASK; | ||
277 | addr = (addr << 7) + EISA_MEM + 0x60; | ||
278 | *(vuip) (addr) = b; | ||
279 | *(vuip) (addr + (4 << 7)) = b >> 32; | ||
280 | } | ||
281 | |||
282 | __EXTERN_INLINE void __iomem *jensen_ioportmap(unsigned long addr) | ||
283 | { | ||
284 | return (void __iomem *)addr; | ||
285 | } | ||
286 | |||
287 | __EXTERN_INLINE void __iomem *jensen_ioremap(unsigned long addr, | ||
288 | unsigned long size) | ||
289 | { | ||
290 | return (void __iomem *)(addr + 0x100000000ul); | ||
291 | } | ||
292 | |||
293 | __EXTERN_INLINE int jensen_is_ioaddr(unsigned long addr) | ||
294 | { | ||
295 | return (long)addr >= 0; | ||
296 | } | ||
297 | |||
298 | __EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr) | ||
299 | { | ||
300 | return (unsigned long)addr >= 0x100000000ul; | ||
301 | } | ||
302 | |||
303 | /* New-style ioread interface. All the routines are so ugly for Jensen | ||
304 | that it doesn't make sense to merge them. */ | ||
305 | |||
306 | #define IOPORT(OS, NS) \ | ||
307 | __EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \ | ||
308 | { \ | ||
309 | if (jensen_is_mmio(xaddr)) \ | ||
310 | return jensen_read##OS(xaddr - 0x100000000ul); \ | ||
311 | else \ | ||
312 | return jensen_in##OS((unsigned long)xaddr); \ | ||
313 | } \ | ||
314 | __EXTERN_INLINE void jensen_iowrite##NS(u##NS b, void __iomem *xaddr) \ | ||
315 | { \ | ||
316 | if (jensen_is_mmio(xaddr)) \ | ||
317 | jensen_write##OS(b, xaddr - 0x100000000ul); \ | ||
318 | else \ | ||
319 | jensen_out##OS(b, (unsigned long)xaddr); \ | ||
320 | } | ||
321 | |||
322 | IOPORT(b, 8) | ||
323 | IOPORT(w, 16) | ||
324 | IOPORT(l, 32) | ||
325 | |||
326 | #undef IOPORT | ||
327 | |||
328 | #undef vuip | ||
329 | |||
330 | #undef __IO_PREFIX | ||
331 | #define __IO_PREFIX jensen | ||
332 | #define jensen_trivial_rw_bw 0 | ||
333 | #define jensen_trivial_rw_lq 0 | ||
334 | #define jensen_trivial_io_bw 0 | ||
335 | #define jensen_trivial_io_lq 0 | ||
336 | #define jensen_trivial_iounmap 1 | ||
337 | #include <asm/io_trivial.h> | ||
338 | |||
339 | #ifdef __IO_EXTERN_INLINE | ||
340 | #undef __EXTERN_INLINE | ||
341 | #undef __IO_EXTERN_INLINE | ||
342 | #endif | ||
343 | |||
344 | #endif /* __KERNEL__ */ | ||
345 | |||
346 | #endif /* __ALPHA_JENSEN_H */ | ||
diff --git a/arch/alpha/include/asm/kdebug.h b/arch/alpha/include/asm/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/arch/alpha/include/asm/kdebug.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/kdebug.h> | |||
diff --git a/arch/alpha/include/asm/kmap_types.h b/arch/alpha/include/asm/kmap_types.h new file mode 100644 index 000000000000..3e6735a34c57 --- /dev/null +++ b/arch/alpha/include/asm/kmap_types.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef _ASM_KMAP_TYPES_H | ||
2 | #define _ASM_KMAP_TYPES_H | ||
3 | |||
4 | /* Dummy header just to define km_type. */ | ||
5 | |||
6 | |||
7 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
8 | # define D(n) __KM_FENCE_##n , | ||
9 | #else | ||
10 | # define D(n) | ||
11 | #endif | ||
12 | |||
13 | enum km_type { | ||
14 | D(0) KM_BOUNCE_READ, | ||
15 | D(1) KM_SKB_SUNRPC_DATA, | ||
16 | D(2) KM_SKB_DATA_SOFTIRQ, | ||
17 | D(3) KM_USER0, | ||
18 | D(4) KM_USER1, | ||
19 | D(5) KM_BIO_SRC_IRQ, | ||
20 | D(6) KM_BIO_DST_IRQ, | ||
21 | D(7) KM_PTE0, | ||
22 | D(8) KM_PTE1, | ||
23 | D(9) KM_IRQ0, | ||
24 | D(10) KM_IRQ1, | ||
25 | D(11) KM_SOFTIRQ0, | ||
26 | D(12) KM_SOFTIRQ1, | ||
27 | D(13) KM_TYPE_NR | ||
28 | }; | ||
29 | |||
30 | #undef D | ||
31 | |||
32 | #endif | ||
diff --git a/arch/alpha/include/asm/linkage.h b/arch/alpha/include/asm/linkage.h new file mode 100644 index 000000000000..291c2d01c44f --- /dev/null +++ b/arch/alpha/include/asm/linkage.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | ||
2 | #define __ASM_LINKAGE_H | ||
3 | |||
4 | /* Nothing to see here... */ | ||
5 | |||
6 | #endif | ||
diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h new file mode 100644 index 000000000000..6ad3ea696421 --- /dev/null +++ b/arch/alpha/include/asm/local.h | |||
@@ -0,0 +1,118 @@ | |||
1 | #ifndef _ALPHA_LOCAL_H | ||
2 | #define _ALPHA_LOCAL_H | ||
3 | |||
4 | #include <linux/percpu.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | typedef struct | ||
8 | { | ||
9 | atomic_long_t a; | ||
10 | } local_t; | ||
11 | |||
12 | #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } | ||
13 | #define local_read(l) atomic_long_read(&(l)->a) | ||
14 | #define local_set(l,i) atomic_long_set(&(l)->a, (i)) | ||
15 | #define local_inc(l) atomic_long_inc(&(l)->a) | ||
16 | #define local_dec(l) atomic_long_dec(&(l)->a) | ||
17 | #define local_add(i,l) atomic_long_add((i),(&(l)->a)) | ||
18 | #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) | ||
19 | |||
20 | static __inline__ long local_add_return(long i, local_t * l) | ||
21 | { | ||
22 | long temp, result; | ||
23 | __asm__ __volatile__( | ||
24 | "1: ldq_l %0,%1\n" | ||
25 | " addq %0,%3,%2\n" | ||
26 | " addq %0,%3,%0\n" | ||
27 | " stq_c %0,%1\n" | ||
28 | " beq %0,2f\n" | ||
29 | ".subsection 2\n" | ||
30 | "2: br 1b\n" | ||
31 | ".previous" | ||
32 | :"=&r" (temp), "=m" (l->a.counter), "=&r" (result) | ||
33 | :"Ir" (i), "m" (l->a.counter) : "memory"); | ||
34 | return result; | ||
35 | } | ||
36 | |||
37 | static __inline__ long local_sub_return(long i, local_t * l) | ||
38 | { | ||
39 | long temp, result; | ||
40 | __asm__ __volatile__( | ||
41 | "1: ldq_l %0,%1\n" | ||
42 | " subq %0,%3,%2\n" | ||
43 | " subq %0,%3,%0\n" | ||
44 | " stq_c %0,%1\n" | ||
45 | " beq %0,2f\n" | ||
46 | ".subsection 2\n" | ||
47 | "2: br 1b\n" | ||
48 | ".previous" | ||
49 | :"=&r" (temp), "=m" (l->a.counter), "=&r" (result) | ||
50 | :"Ir" (i), "m" (l->a.counter) : "memory"); | ||
51 | return result; | ||
52 | } | ||
53 | |||
54 | #define local_cmpxchg(l, o, n) \ | ||
55 | (cmpxchg_local(&((l)->a.counter), (o), (n))) | ||
56 | #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) | ||
57 | |||
58 | /** | ||
59 | * local_add_unless - add unless the number is a given value | ||
60 | * @l: pointer of type local_t | ||
61 | * @a: the amount to add to l... | ||
62 | * @u: ...unless l is equal to u. | ||
63 | * | ||
64 | * Atomically adds @a to @l, so long as it was not @u. | ||
65 | * Returns non-zero if @l was not @u, and zero otherwise. | ||
66 | */ | ||
67 | #define local_add_unless(l, a, u) \ | ||
68 | ({ \ | ||
69 | long c, old; \ | ||
70 | c = local_read(l); \ | ||
71 | for (;;) { \ | ||
72 | if (unlikely(c == (u))) \ | ||
73 | break; \ | ||
74 | old = local_cmpxchg((l), c, c + (a)); \ | ||
75 | if (likely(old == c)) \ | ||
76 | break; \ | ||
77 | c = old; \ | ||
78 | } \ | ||
79 | c != (u); \ | ||
80 | }) | ||
81 | #define local_inc_not_zero(l) local_add_unless((l), 1, 0) | ||
82 | |||
83 | #define local_add_negative(a, l) (local_add_return((a), (l)) < 0) | ||
84 | |||
85 | #define local_dec_return(l) local_sub_return(1,(l)) | ||
86 | |||
87 | #define local_inc_return(l) local_add_return(1,(l)) | ||
88 | |||
89 | #define local_sub_and_test(i,l) (local_sub_return((i), (l)) == 0) | ||
90 | |||
91 | #define local_inc_and_test(l) (local_add_return(1, (l)) == 0) | ||
92 | |||
93 | #define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) | ||
94 | |||
95 | /* Verify if faster than atomic ops */ | ||
96 | #define __local_inc(l) ((l)->a.counter++) | ||
97 | #define __local_dec(l) ((l)->a.counter++) | ||
98 | #define __local_add(i,l) ((l)->a.counter+=(i)) | ||
99 | #define __local_sub(i,l) ((l)->a.counter-=(i)) | ||
100 | |||
101 | /* Use these for per-cpu local_t variables: on some archs they are | ||
102 | * much more efficient than these naive implementations. Note they take | ||
103 | * a variable, not an address. | ||
104 | */ | ||
105 | #define cpu_local_read(l) local_read(&__get_cpu_var(l)) | ||
106 | #define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i)) | ||
107 | |||
108 | #define cpu_local_inc(l) local_inc(&__get_cpu_var(l)) | ||
109 | #define cpu_local_dec(l) local_dec(&__get_cpu_var(l)) | ||
110 | #define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l)) | ||
111 | #define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l)) | ||
112 | |||
113 | #define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l)) | ||
114 | #define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l)) | ||
115 | #define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l)) | ||
116 | #define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l)) | ||
117 | |||
118 | #endif /* _ALPHA_LOCAL_H */ | ||
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h new file mode 100644 index 000000000000..a86c083cdf7f --- /dev/null +++ b/arch/alpha/include/asm/machvec.h | |||
@@ -0,0 +1,134 @@ | |||
1 | #ifndef __ALPHA_MACHVEC_H | ||
2 | #define __ALPHA_MACHVEC_H 1 | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* | ||
7 | * This file gets pulled in by asm/io.h from user space. We don't | ||
8 | * want most of this escaping. | ||
9 | */ | ||
10 | |||
11 | #ifdef __KERNEL__ | ||
12 | |||
13 | /* The following structure vectors all of the I/O and IRQ manipulation | ||
14 | from the generic kernel to the hardware specific backend. */ | ||
15 | |||
16 | struct task_struct; | ||
17 | struct mm_struct; | ||
18 | struct vm_area_struct; | ||
19 | struct linux_hose_info; | ||
20 | struct pci_dev; | ||
21 | struct pci_ops; | ||
22 | struct pci_controller; | ||
23 | struct _alpha_agp_info; | ||
24 | |||
25 | struct alpha_machine_vector | ||
26 | { | ||
27 | /* This "belongs" down below with the rest of the runtime | ||
28 | variables, but it is convenient for entry.S if these | ||
29 | two slots are at the beginning of the struct. */ | ||
30 | unsigned long hae_cache; | ||
31 | unsigned long *hae_register; | ||
32 | |||
33 | int nr_irqs; | ||
34 | int rtc_port; | ||
35 | unsigned int max_asn; | ||
36 | unsigned long max_isa_dma_address; | ||
37 | unsigned long irq_probe_mask; | ||
38 | unsigned long iack_sc; | ||
39 | unsigned long min_io_address; | ||
40 | unsigned long min_mem_address; | ||
41 | unsigned long pci_dac_offset; | ||
42 | |||
43 | void (*mv_pci_tbi)(struct pci_controller *hose, | ||
44 | dma_addr_t start, dma_addr_t end); | ||
45 | |||
46 | unsigned int (*mv_ioread8)(void __iomem *); | ||
47 | unsigned int (*mv_ioread16)(void __iomem *); | ||
48 | unsigned int (*mv_ioread32)(void __iomem *); | ||
49 | |||
50 | void (*mv_iowrite8)(u8, void __iomem *); | ||
51 | void (*mv_iowrite16)(u16, void __iomem *); | ||
52 | void (*mv_iowrite32)(u32, void __iomem *); | ||
53 | |||
54 | u8 (*mv_readb)(const volatile void __iomem *); | ||
55 | u16 (*mv_readw)(const volatile void __iomem *); | ||
56 | u32 (*mv_readl)(const volatile void __iomem *); | ||
57 | u64 (*mv_readq)(const volatile void __iomem *); | ||
58 | |||
59 | void (*mv_writeb)(u8, volatile void __iomem *); | ||
60 | void (*mv_writew)(u16, volatile void __iomem *); | ||
61 | void (*mv_writel)(u32, volatile void __iomem *); | ||
62 | void (*mv_writeq)(u64, volatile void __iomem *); | ||
63 | |||
64 | void __iomem *(*mv_ioportmap)(unsigned long); | ||
65 | void __iomem *(*mv_ioremap)(unsigned long, unsigned long); | ||
66 | void (*mv_iounmap)(volatile void __iomem *); | ||
67 | int (*mv_is_ioaddr)(unsigned long); | ||
68 | int (*mv_is_mmio)(const volatile void __iomem *); | ||
69 | |||
70 | void (*mv_switch_mm)(struct mm_struct *, struct mm_struct *, | ||
71 | struct task_struct *); | ||
72 | void (*mv_activate_mm)(struct mm_struct *, struct mm_struct *); | ||
73 | |||
74 | void (*mv_flush_tlb_current)(struct mm_struct *); | ||
75 | void (*mv_flush_tlb_current_page)(struct mm_struct * mm, | ||
76 | struct vm_area_struct *vma, | ||
77 | unsigned long addr); | ||
78 | |||
79 | void (*update_irq_hw)(unsigned long, unsigned long, int); | ||
80 | void (*ack_irq)(unsigned long); | ||
81 | void (*device_interrupt)(unsigned long vector); | ||
82 | void (*machine_check)(u64 vector, u64 la); | ||
83 | |||
84 | void (*smp_callin)(void); | ||
85 | void (*init_arch)(void); | ||
86 | void (*init_irq)(void); | ||
87 | void (*init_rtc)(void); | ||
88 | void (*init_pci)(void); | ||
89 | void (*kill_arch)(int); | ||
90 | |||
91 | u8 (*pci_swizzle)(struct pci_dev *, u8 *); | ||
92 | int (*pci_map_irq)(struct pci_dev *, u8, u8); | ||
93 | struct pci_ops *pci_ops; | ||
94 | |||
95 | struct _alpha_agp_info *(*agp_info)(void); | ||
96 | |||
97 | const char *vector_name; | ||
98 | |||
99 | /* NUMA information */ | ||
100 | int (*pa_to_nid)(unsigned long); | ||
101 | int (*cpuid_to_nid)(int); | ||
102 | unsigned long (*node_mem_start)(int); | ||
103 | unsigned long (*node_mem_size)(int); | ||
104 | |||
105 | /* System specific parameters. */ | ||
106 | union { | ||
107 | struct { | ||
108 | unsigned long gru_int_req_bits; | ||
109 | } cia; | ||
110 | |||
111 | struct { | ||
112 | unsigned long gamma_bias; | ||
113 | } t2; | ||
114 | |||
115 | struct { | ||
116 | unsigned int route_tab; | ||
117 | } sio; | ||
118 | } sys; | ||
119 | }; | ||
120 | |||
121 | extern struct alpha_machine_vector alpha_mv; | ||
122 | |||
123 | #ifdef CONFIG_ALPHA_GENERIC | ||
124 | extern int alpha_using_srm; | ||
125 | #else | ||
126 | #ifdef CONFIG_ALPHA_SRM | ||
127 | #define alpha_using_srm 1 | ||
128 | #else | ||
129 | #define alpha_using_srm 0 | ||
130 | #endif | ||
131 | #endif /* GENERIC */ | ||
132 | |||
133 | #endif | ||
134 | #endif /* __ALPHA_MACHVEC_H */ | ||
diff --git a/arch/alpha/include/asm/mc146818rtc.h b/arch/alpha/include/asm/mc146818rtc.h new file mode 100644 index 000000000000..097703f1c8cb --- /dev/null +++ b/arch/alpha/include/asm/mc146818rtc.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * Machine dependent access functions for RTC registers. | ||
3 | */ | ||
4 | #ifndef __ASM_ALPHA_MC146818RTC_H | ||
5 | #define __ASM_ALPHA_MC146818RTC_H | ||
6 | |||
7 | #include <asm/io.h> | ||
8 | |||
9 | #ifndef RTC_PORT | ||
10 | #define RTC_PORT(x) (0x70 + (x)) | ||
11 | #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ | ||
12 | #endif | ||
13 | |||
14 | /* | ||
15 | * The yet supported machines all access the RTC index register via | ||
16 | * an ISA port access but the way to access the date register differs ... | ||
17 | */ | ||
18 | #define CMOS_READ(addr) ({ \ | ||
19 | outb_p((addr),RTC_PORT(0)); \ | ||
20 | inb_p(RTC_PORT(1)); \ | ||
21 | }) | ||
22 | #define CMOS_WRITE(val, addr) ({ \ | ||
23 | outb_p((addr),RTC_PORT(0)); \ | ||
24 | outb_p((val),RTC_PORT(1)); \ | ||
25 | }) | ||
26 | |||
27 | #endif /* __ASM_ALPHA_MC146818RTC_H */ | ||
diff --git a/arch/alpha/include/asm/md.h b/arch/alpha/include/asm/md.h new file mode 100644 index 000000000000..6c9b8222a4f2 --- /dev/null +++ b/arch/alpha/include/asm/md.h | |||
@@ -0,0 +1,13 @@ | |||
1 | /* $Id: md.h,v 1.1 1997/12/15 15:11:48 jj Exp $ | ||
2 | * md.h: High speed xor_block operation for RAID4/5 | ||
3 | * | ||
4 | */ | ||
5 | |||
6 | #ifndef __ASM_MD_H | ||
7 | #define __ASM_MD_H | ||
8 | |||
9 | /* #define HAVE_ARCH_XORBLOCK */ | ||
10 | |||
11 | #define MD_XORBLOCK_ALIGNMENT sizeof(long) | ||
12 | |||
13 | #endif /* __ASM_MD_H */ | ||
diff --git a/arch/alpha/include/asm/mman.h b/arch/alpha/include/asm/mman.h new file mode 100644 index 000000000000..90d7c35d2867 --- /dev/null +++ b/arch/alpha/include/asm/mman.h | |||
@@ -0,0 +1,54 @@ | |||
1 | #ifndef __ALPHA_MMAN_H__ | ||
2 | #define __ALPHA_MMAN_H__ | ||
3 | |||
4 | #define PROT_READ 0x1 /* page can be read */ | ||
5 | #define PROT_WRITE 0x2 /* page can be written */ | ||
6 | #define PROT_EXEC 0x4 /* page can be executed */ | ||
7 | #define PROT_SEM 0x8 /* page may be used for atomic ops */ | ||
8 | #define PROT_NONE 0x0 /* page can not be accessed */ | ||
9 | #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ | ||
10 | #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ | ||
11 | |||
12 | #define MAP_SHARED 0x01 /* Share changes */ | ||
13 | #define MAP_PRIVATE 0x02 /* Changes are private */ | ||
14 | #define MAP_TYPE 0x0f /* Mask for type of mapping (OSF/1 is _wrong_) */ | ||
15 | #define MAP_FIXED 0x100 /* Interpret addr exactly */ | ||
16 | #define MAP_ANONYMOUS 0x10 /* don't use a file */ | ||
17 | |||
18 | /* not used by linux, but here to make sure we don't clash with OSF/1 defines */ | ||
19 | #define _MAP_HASSEMAPHORE 0x0200 | ||
20 | #define _MAP_INHERIT 0x0400 | ||
21 | #define _MAP_UNALIGNED 0x0800 | ||
22 | |||
23 | /* These are linux-specific */ | ||
24 | #define MAP_GROWSDOWN 0x01000 /* stack-like segment */ | ||
25 | #define MAP_DENYWRITE 0x02000 /* ETXTBSY */ | ||
26 | #define MAP_EXECUTABLE 0x04000 /* mark it as an executable */ | ||
27 | #define MAP_LOCKED 0x08000 /* lock the mapping */ | ||
28 | #define MAP_NORESERVE 0x10000 /* don't check for reservations */ | ||
29 | #define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */ | ||
30 | #define MAP_NONBLOCK 0x40000 /* do not block on IO */ | ||
31 | |||
32 | #define MS_ASYNC 1 /* sync memory asynchronously */ | ||
33 | #define MS_SYNC 2 /* synchronous memory sync */ | ||
34 | #define MS_INVALIDATE 4 /* invalidate the caches */ | ||
35 | |||
36 | #define MCL_CURRENT 8192 /* lock all currently mapped pages */ | ||
37 | #define MCL_FUTURE 16384 /* lock all additions to address space */ | ||
38 | |||
39 | #define MADV_NORMAL 0 /* no further special treatment */ | ||
40 | #define MADV_RANDOM 1 /* expect random page references */ | ||
41 | #define MADV_SEQUENTIAL 2 /* expect sequential page references */ | ||
42 | #define MADV_WILLNEED 3 /* will need these pages */ | ||
43 | #define MADV_SPACEAVAIL 5 /* ensure resources are available */ | ||
44 | #define MADV_DONTNEED 6 /* don't need these pages */ | ||
45 | |||
46 | /* common/generic parameters */ | ||
47 | #define MADV_REMOVE 9 /* remove these pages & resources */ | ||
48 | #define MADV_DONTFORK 10 /* don't inherit across fork */ | ||
49 | #define MADV_DOFORK 11 /* do inherit across fork */ | ||
50 | |||
51 | /* compatibility flags */ | ||
52 | #define MAP_FILE 0 | ||
53 | |||
54 | #endif /* __ALPHA_MMAN_H__ */ | ||
diff --git a/arch/alpha/include/asm/mmu.h b/arch/alpha/include/asm/mmu.h new file mode 100644 index 000000000000..3dc127779329 --- /dev/null +++ b/arch/alpha/include/asm/mmu.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef __ALPHA_MMU_H | ||
2 | #define __ALPHA_MMU_H | ||
3 | |||
4 | /* The alpha MMU context is one "unsigned long" bitmap per CPU */ | ||
5 | typedef unsigned long mm_context_t[NR_CPUS]; | ||
6 | |||
7 | #endif | ||
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h new file mode 100644 index 000000000000..86c08a02d239 --- /dev/null +++ b/arch/alpha/include/asm/mmu_context.h | |||
@@ -0,0 +1,260 @@ | |||
1 | #ifndef __ALPHA_MMU_CONTEXT_H | ||
2 | #define __ALPHA_MMU_CONTEXT_H | ||
3 | |||
4 | /* | ||
5 | * get a new mmu context.. | ||
6 | * | ||
7 | * Copyright (C) 1996, Linus Torvalds | ||
8 | */ | ||
9 | |||
10 | #include <asm/system.h> | ||
11 | #include <asm/machvec.h> | ||
12 | #include <asm/compiler.h> | ||
13 | #include <asm-generic/mm_hooks.h> | ||
14 | |||
15 | /* | ||
16 | * Force a context reload. This is needed when we change the page | ||
17 | * table pointer or when we update the ASN of the current process. | ||
18 | */ | ||
19 | |||
20 | /* Don't get into trouble with dueling __EXTERN_INLINEs. */ | ||
21 | #ifndef __EXTERN_INLINE | ||
22 | #include <asm/io.h> | ||
23 | #endif | ||
24 | |||
25 | |||
26 | static inline unsigned long | ||
27 | __reload_thread(struct pcb_struct *pcb) | ||
28 | { | ||
29 | register unsigned long a0 __asm__("$16"); | ||
30 | register unsigned long v0 __asm__("$0"); | ||
31 | |||
32 | a0 = virt_to_phys(pcb); | ||
33 | __asm__ __volatile__( | ||
34 | "call_pal %2 #__reload_thread" | ||
35 | : "=r"(v0), "=r"(a0) | ||
36 | : "i"(PAL_swpctx), "r"(a0) | ||
37 | : "$1", "$22", "$23", "$24", "$25"); | ||
38 | |||
39 | return v0; | ||
40 | } | ||
41 | |||
42 | |||
43 | /* | ||
44 | * The maximum ASN's the processor supports. On the EV4 this is 63 | ||
45 | * but the PAL-code doesn't actually use this information. On the | ||
46 | * EV5 this is 127, and EV6 has 255. | ||
47 | * | ||
48 | * On the EV4, the ASNs are more-or-less useless anyway, as they are | ||
49 | * only used as an icache tag, not for TB entries. On the EV5 and EV6, | ||
50 | * ASN's also validate the TB entries, and thus make a lot more sense. | ||
51 | * | ||
52 | * The EV4 ASN's don't even match the architecture manual, ugh. And | ||
53 | * I quote: "If a processor implements address space numbers (ASNs), | ||
54 | * and the old PTE has the Address Space Match (ASM) bit clear (ASNs | ||
55 | * in use) and the Valid bit set, then entries can also effectively be | ||
56 | * made coherent by assigning a new, unused ASN to the currently | ||
57 | * running process and not reusing the previous ASN before calling the | ||
58 | * appropriate PALcode routine to invalidate the translation buffer (TB)". | ||
59 | * | ||
60 | * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually | ||
61 | * work correctly and can thus not be used (explaining the lack of PAL-code | ||
62 | * support). | ||
63 | */ | ||
64 | #define EV4_MAX_ASN 63 | ||
65 | #define EV5_MAX_ASN 127 | ||
66 | #define EV6_MAX_ASN 255 | ||
67 | |||
68 | #ifdef CONFIG_ALPHA_GENERIC | ||
69 | # define MAX_ASN (alpha_mv.max_asn) | ||
70 | #else | ||
71 | # ifdef CONFIG_ALPHA_EV4 | ||
72 | # define MAX_ASN EV4_MAX_ASN | ||
73 | # elif defined(CONFIG_ALPHA_EV5) | ||
74 | # define MAX_ASN EV5_MAX_ASN | ||
75 | # else | ||
76 | # define MAX_ASN EV6_MAX_ASN | ||
77 | # endif | ||
78 | #endif | ||
79 | |||
80 | /* | ||
81 | * cpu_last_asn(processor): | ||
82 | * 63 0 | ||
83 | * +-------------+----------------+--------------+ | ||
84 | * | asn version | this processor | hardware asn | | ||
85 | * +-------------+----------------+--------------+ | ||
86 | */ | ||
87 | |||
88 | #include <asm/smp.h> | ||
89 | #ifdef CONFIG_SMP | ||
90 | #define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) | ||
91 | #else | ||
92 | extern unsigned long last_asn; | ||
93 | #define cpu_last_asn(cpuid) last_asn | ||
94 | #endif /* CONFIG_SMP */ | ||
95 | |||
96 | #define WIDTH_HARDWARE_ASN 8 | ||
97 | #define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN) | ||
98 | #define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1) | ||
99 | |||
100 | /* | ||
101 | * NOTE! The way this is set up, the high bits of the "asn_cache" (and | ||
102 | * the "mm->context") are the ASN _version_ code. A version of 0 is | ||
103 | * always considered invalid, so to invalidate another process you only | ||
104 | * need to do "p->mm->context = 0". | ||
105 | * | ||
106 | * If we need more ASN's than the processor has, we invalidate the old | ||
107 | * user TLB's (tbiap()) and start a new ASN version. That will automatically | ||
108 | * force a new asn for any other processes the next time they want to | ||
109 | * run. | ||
110 | */ | ||
111 | |||
112 | #ifndef __EXTERN_INLINE | ||
113 | #define __EXTERN_INLINE extern inline | ||
114 | #define __MMU_EXTERN_INLINE | ||
115 | #endif | ||
116 | |||
117 | extern inline unsigned long | ||
118 | __get_new_mm_context(struct mm_struct *mm, long cpu) | ||
119 | { | ||
120 | unsigned long asn = cpu_last_asn(cpu); | ||
121 | unsigned long next = asn + 1; | ||
122 | |||
123 | if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) { | ||
124 | tbiap(); | ||
125 | imb(); | ||
126 | next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; | ||
127 | } | ||
128 | cpu_last_asn(cpu) = next; | ||
129 | return next; | ||
130 | } | ||
131 | |||
132 | __EXTERN_INLINE void | ||
133 | ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, | ||
134 | struct task_struct *next) | ||
135 | { | ||
136 | /* Check if our ASN is of an older version, and thus invalid. */ | ||
137 | unsigned long asn; | ||
138 | unsigned long mmc; | ||
139 | long cpu = smp_processor_id(); | ||
140 | |||
141 | #ifdef CONFIG_SMP | ||
142 | cpu_data[cpu].asn_lock = 1; | ||
143 | barrier(); | ||
144 | #endif | ||
145 | asn = cpu_last_asn(cpu); | ||
146 | mmc = next_mm->context[cpu]; | ||
147 | if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) { | ||
148 | mmc = __get_new_mm_context(next_mm, cpu); | ||
149 | next_mm->context[cpu] = mmc; | ||
150 | } | ||
151 | #ifdef CONFIG_SMP | ||
152 | else | ||
153 | cpu_data[cpu].need_new_asn = 1; | ||
154 | #endif | ||
155 | |||
156 | /* Always update the PCB ASN. Another thread may have allocated | ||
157 | a new mm->context (via flush_tlb_mm) without the ASN serial | ||
158 | number wrapping. We have no way to detect when this is needed. */ | ||
159 | task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK; | ||
160 | } | ||
161 | |||
162 | __EXTERN_INLINE void | ||
163 | ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, | ||
164 | struct task_struct *next) | ||
165 | { | ||
166 | /* As described, ASN's are broken for TLB usage. But we can | ||
167 | optimize for switching between threads -- if the mm is | ||
168 | unchanged from current we needn't flush. */ | ||
169 | /* ??? May not be needed because EV4 PALcode recognizes that | ||
170 | ASN's are broken and does a tbiap itself on swpctx, under | ||
171 | the "Must set ASN or flush" rule. At least this is true | ||
172 | for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com). | ||
173 | I'm going to leave this here anyway, just to Be Sure. -- r~ */ | ||
174 | if (prev_mm != next_mm) | ||
175 | tbiap(); | ||
176 | |||
177 | /* Do continue to allocate ASNs, because we can still use them | ||
178 | to avoid flushing the icache. */ | ||
179 | ev5_switch_mm(prev_mm, next_mm, next); | ||
180 | } | ||
181 | |||
182 | extern void __load_new_mm_context(struct mm_struct *); | ||
183 | |||
184 | #ifdef CONFIG_SMP | ||
185 | #define check_mmu_context() \ | ||
186 | do { \ | ||
187 | int cpu = smp_processor_id(); \ | ||
188 | cpu_data[cpu].asn_lock = 0; \ | ||
189 | barrier(); \ | ||
190 | if (cpu_data[cpu].need_new_asn) { \ | ||
191 | struct mm_struct * mm = current->active_mm; \ | ||
192 | cpu_data[cpu].need_new_asn = 0; \ | ||
193 | if (!mm->context[cpu]) \ | ||
194 | __load_new_mm_context(mm); \ | ||
195 | } \ | ||
196 | } while(0) | ||
197 | #else | ||
198 | #define check_mmu_context() do { } while(0) | ||
199 | #endif | ||
200 | |||
201 | __EXTERN_INLINE void | ||
202 | ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) | ||
203 | { | ||
204 | __load_new_mm_context(next_mm); | ||
205 | } | ||
206 | |||
207 | __EXTERN_INLINE void | ||
208 | ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) | ||
209 | { | ||
210 | __load_new_mm_context(next_mm); | ||
211 | tbiap(); | ||
212 | } | ||
213 | |||
214 | #define deactivate_mm(tsk,mm) do { } while (0) | ||
215 | |||
216 | #ifdef CONFIG_ALPHA_GENERIC | ||
217 | # define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c)) | ||
218 | # define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y)) | ||
219 | #else | ||
220 | # ifdef CONFIG_ALPHA_EV4 | ||
221 | # define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c)) | ||
222 | # define activate_mm(x,y) ev4_activate_mm((x),(y)) | ||
223 | # else | ||
224 | # define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c)) | ||
225 | # define activate_mm(x,y) ev5_activate_mm((x),(y)) | ||
226 | # endif | ||
227 | #endif | ||
228 | |||
229 | static inline int | ||
230 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
231 | { | ||
232 | int i; | ||
233 | |||
234 | for_each_online_cpu(i) | ||
235 | mm->context[i] = 0; | ||
236 | if (tsk != current) | ||
237 | task_thread_info(tsk)->pcb.ptbr | ||
238 | = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | extern inline void | ||
243 | destroy_context(struct mm_struct *mm) | ||
244 | { | ||
245 | /* Nothing to do. */ | ||
246 | } | ||
247 | |||
248 | static inline void | ||
249 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
250 | { | ||
251 | task_thread_info(tsk)->pcb.ptbr | ||
252 | = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; | ||
253 | } | ||
254 | |||
255 | #ifdef __MMU_EXTERN_INLINE | ||
256 | #undef __EXTERN_INLINE | ||
257 | #undef __MMU_EXTERN_INLINE | ||
258 | #endif | ||
259 | |||
260 | #endif /* __ALPHA_MMU_CONTEXT_H */ | ||
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h new file mode 100644 index 000000000000..8af56ce346ad --- /dev/null +++ b/arch/alpha/include/asm/mmzone.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99 | ||
3 | * Adapted for the alpha wildfire architecture Jan 2001. | ||
4 | */ | ||
5 | #ifndef _ASM_MMZONE_H_ | ||
6 | #define _ASM_MMZONE_H_ | ||
7 | |||
8 | #include <asm/smp.h> | ||
9 | |||
10 | struct bootmem_data_t; /* stupid forward decl. */ | ||
11 | |||
12 | /* | ||
13 | * Following are macros that are specific to this numa platform. | ||
14 | */ | ||
15 | |||
16 | extern pg_data_t node_data[]; | ||
17 | |||
18 | #define alpha_pa_to_nid(pa) \ | ||
19 | (alpha_mv.pa_to_nid \ | ||
20 | ? alpha_mv.pa_to_nid(pa) \ | ||
21 | : (0)) | ||
22 | #define node_mem_start(nid) \ | ||
23 | (alpha_mv.node_mem_start \ | ||
24 | ? alpha_mv.node_mem_start(nid) \ | ||
25 | : (0UL)) | ||
26 | #define node_mem_size(nid) \ | ||
27 | (alpha_mv.node_mem_size \ | ||
28 | ? alpha_mv.node_mem_size(nid) \ | ||
29 | : ((nid) ? (0UL) : (~0UL))) | ||
30 | |||
31 | #define pa_to_nid(pa) alpha_pa_to_nid(pa) | ||
32 | #define NODE_DATA(nid) (&node_data[(nid)]) | ||
33 | |||
34 | #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) | ||
35 | |||
36 | #if 1 | ||
37 | #define PLAT_NODE_DATA_LOCALNR(p, n) \ | ||
38 | (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn) | ||
39 | #else | ||
40 | static inline unsigned long | ||
41 | PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) | ||
42 | { | ||
43 | unsigned long temp; | ||
44 | temp = p >> PAGE_SHIFT; | ||
45 | return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn; | ||
46 | } | ||
47 | #endif | ||
48 | |||
49 | #ifdef CONFIG_DISCONTIGMEM | ||
50 | |||
51 | /* | ||
52 | * Following are macros that each numa implementation must define. | ||
53 | */ | ||
54 | |||
55 | /* | ||
56 | * Given a kernel address, find the home node of the underlying memory. | ||
57 | */ | ||
58 | #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) | ||
59 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
60 | |||
61 | /* | ||
62 | * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory | ||
63 | * and returns the kaddr corresponding to first physical page in the | ||
64 | * node's mem_map. | ||
65 | */ | ||
66 | #define LOCAL_BASE_ADDR(kaddr) \ | ||
67 | ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \ | ||
68 | << PAGE_SHIFT)) | ||
69 | |||
70 | /* XXX: FIXME -- wli */ | ||
71 | #define kern_addr_valid(kaddr) (0) | ||
72 | |||
73 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | ||
74 | |||
75 | #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr) | ||
76 | |||
77 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32)) | ||
78 | #define pgd_page(pgd) (pfn_to_page(pgd_val(pgd) >> 32)) | ||
79 | #define pte_pfn(pte) (pte_val(pte) >> 32) | ||
80 | |||
81 | #define mk_pte(page, pgprot) \ | ||
82 | ({ \ | ||
83 | pte_t pte; \ | ||
84 | unsigned long pfn; \ | ||
85 | \ | ||
86 | pfn = page_to_pfn(page) << 32; \ | ||
87 | pte_val(pte) = pfn | pgprot_val(pgprot); \ | ||
88 | \ | ||
89 | pte; \ | ||
90 | }) | ||
91 | |||
92 | #define pte_page(x) \ | ||
93 | ({ \ | ||
94 | unsigned long kvirt; \ | ||
95 | struct page * __xx; \ | ||
96 | \ | ||
97 | kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \ | ||
98 | __xx = virt_to_page(kvirt); \ | ||
99 | \ | ||
100 | __xx; \ | ||
101 | }) | ||
102 | |||
103 | #define page_to_pa(page) \ | ||
104 | (page_to_pfn(page) << PAGE_SHIFT) | ||
105 | |||
106 | #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) | ||
107 | #define pfn_valid(pfn) \ | ||
108 | (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \ | ||
109 | node_spanned_pages(pfn_to_nid(pfn))) \ | ||
110 | |||
111 | #define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT)) | ||
112 | |||
113 | #endif /* CONFIG_DISCONTIGMEM */ | ||
114 | |||
115 | #endif /* _ASM_MMZONE_H_ */ | ||
diff --git a/arch/alpha/include/asm/module.h b/arch/alpha/include/asm/module.h new file mode 100644 index 000000000000..7b63743c534a --- /dev/null +++ b/arch/alpha/include/asm/module.h | |||
@@ -0,0 +1,23 @@ | |||
1 | #ifndef _ALPHA_MODULE_H | ||
2 | #define _ALPHA_MODULE_H | ||
3 | |||
4 | struct mod_arch_specific | ||
5 | { | ||
6 | unsigned int gotsecindex; | ||
7 | }; | ||
8 | |||
9 | #define Elf_Sym Elf64_Sym | ||
10 | #define Elf_Shdr Elf64_Shdr | ||
11 | #define Elf_Ehdr Elf64_Ehdr | ||
12 | #define Elf_Phdr Elf64_Phdr | ||
13 | #define Elf_Dyn Elf64_Dyn | ||
14 | #define Elf_Rel Elf64_Rel | ||
15 | #define Elf_Rela Elf64_Rela | ||
16 | |||
17 | #define ARCH_SHF_SMALL SHF_ALPHA_GPREL | ||
18 | |||
19 | #ifdef MODULE | ||
20 | asm(".section .got,\"aws\",@progbits; .align 3; .previous"); | ||
21 | #endif | ||
22 | |||
23 | #endif /*_ALPHA_MODULE_H*/ | ||
diff --git a/arch/alpha/include/asm/msgbuf.h b/arch/alpha/include/asm/msgbuf.h new file mode 100644 index 000000000000..98496501a2bb --- /dev/null +++ b/arch/alpha/include/asm/msgbuf.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef _ALPHA_MSGBUF_H | ||
2 | #define _ALPHA_MSGBUF_H | ||
3 | |||
4 | /* | ||
5 | * The msqid64_ds structure for alpha architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 2 miscellaneous 64-bit values | ||
11 | */ | ||
12 | |||
13 | struct msqid64_ds { | ||
14 | struct ipc64_perm msg_perm; | ||
15 | __kernel_time_t msg_stime; /* last msgsnd time */ | ||
16 | __kernel_time_t msg_rtime; /* last msgrcv time */ | ||
17 | __kernel_time_t msg_ctime; /* last change time */ | ||
18 | unsigned long msg_cbytes; /* current number of bytes on queue */ | ||
19 | unsigned long msg_qnum; /* number of messages in queue */ | ||
20 | unsigned long msg_qbytes; /* max number of bytes on queue */ | ||
21 | __kernel_pid_t msg_lspid; /* pid of last msgsnd */ | ||
22 | __kernel_pid_t msg_lrpid; /* last receive pid */ | ||
23 | unsigned long __unused1; | ||
24 | unsigned long __unused2; | ||
25 | }; | ||
26 | |||
27 | #endif /* _ALPHA_MSGBUF_H */ | ||
diff --git a/arch/alpha/include/asm/mutex.h b/arch/alpha/include/asm/mutex.h new file mode 100644 index 000000000000..458c1f7fbc18 --- /dev/null +++ b/arch/alpha/include/asm/mutex.h | |||
@@ -0,0 +1,9 @@ | |||
1 | /* | ||
2 | * Pull in the generic implementation for the mutex fastpath. | ||
3 | * | ||
4 | * TODO: implement optimized primitives instead, or leave the generic | ||
5 | * implementation in place, or pick the atomic_xchg() based generic | ||
6 | * implementation. (see asm-generic/mutex-xchg.h for details) | ||
7 | */ | ||
8 | |||
9 | #include <asm-generic/mutex-dec.h> | ||
diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h new file mode 100644 index 000000000000..0995f9d13417 --- /dev/null +++ b/arch/alpha/include/asm/page.h | |||
@@ -0,0 +1,98 @@ | |||
1 | #ifndef _ALPHA_PAGE_H | ||
2 | #define _ALPHA_PAGE_H | ||
3 | |||
4 | #include <linux/const.h> | ||
5 | #include <asm/pal.h> | ||
6 | |||
7 | /* PAGE_SHIFT determines the page size */ | ||
8 | #define PAGE_SHIFT 13 | ||
9 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | ||
10 | #define PAGE_MASK (~(PAGE_SIZE-1)) | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | #define STRICT_MM_TYPECHECKS | ||
15 | |||
16 | extern void clear_page(void *page); | ||
17 | #define clear_user_page(page, vaddr, pg) clear_page(page) | ||
18 | |||
19 | #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ | ||
20 | alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr) | ||
21 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE | ||
22 | |||
23 | extern void copy_page(void * _to, void * _from); | ||
24 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | ||
25 | |||
26 | #ifdef STRICT_MM_TYPECHECKS | ||
27 | /* | ||
28 | * These are used to make use of C type-checking.. | ||
29 | */ | ||
30 | typedef struct { unsigned long pte; } pte_t; | ||
31 | typedef struct { unsigned long pmd; } pmd_t; | ||
32 | typedef struct { unsigned long pgd; } pgd_t; | ||
33 | typedef struct { unsigned long pgprot; } pgprot_t; | ||
34 | |||
35 | #define pte_val(x) ((x).pte) | ||
36 | #define pmd_val(x) ((x).pmd) | ||
37 | #define pgd_val(x) ((x).pgd) | ||
38 | #define pgprot_val(x) ((x).pgprot) | ||
39 | |||
40 | #define __pte(x) ((pte_t) { (x) } ) | ||
41 | #define __pmd(x) ((pmd_t) { (x) } ) | ||
42 | #define __pgd(x) ((pgd_t) { (x) } ) | ||
43 | #define __pgprot(x) ((pgprot_t) { (x) } ) | ||
44 | |||
45 | #else | ||
46 | /* | ||
47 | * .. while these make it easier on the compiler | ||
48 | */ | ||
49 | typedef unsigned long pte_t; | ||
50 | typedef unsigned long pmd_t; | ||
51 | typedef unsigned long pgd_t; | ||
52 | typedef unsigned long pgprot_t; | ||
53 | |||
54 | #define pte_val(x) (x) | ||
55 | #define pmd_val(x) (x) | ||
56 | #define pgd_val(x) (x) | ||
57 | #define pgprot_val(x) (x) | ||
58 | |||
59 | #define __pte(x) (x) | ||
60 | #define __pgd(x) (x) | ||
61 | #define __pgprot(x) (x) | ||
62 | |||
63 | #endif /* STRICT_MM_TYPECHECKS */ | ||
64 | |||
65 | typedef struct page *pgtable_t; | ||
66 | |||
67 | #ifdef USE_48_BIT_KSEG | ||
68 | #define PAGE_OFFSET 0xffff800000000000UL | ||
69 | #else | ||
70 | #define PAGE_OFFSET 0xfffffc0000000000UL | ||
71 | #endif | ||
72 | |||
73 | #else | ||
74 | |||
75 | #ifdef USE_48_BIT_KSEG | ||
76 | #define PAGE_OFFSET 0xffff800000000000 | ||
77 | #else | ||
78 | #define PAGE_OFFSET 0xfffffc0000000000 | ||
79 | #endif | ||
80 | |||
81 | #endif /* !__ASSEMBLY__ */ | ||
82 | |||
83 | #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) | ||
84 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) | ||
85 | #ifndef CONFIG_DISCONTIGMEM | ||
86 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | ||
87 | |||
88 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | ||
89 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | ||
90 | #endif /* CONFIG_DISCONTIGMEM */ | ||
91 | |||
92 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | ||
93 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
94 | |||
95 | #include <asm-generic/memory_model.h> | ||
96 | #include <asm-generic/page.h> | ||
97 | |||
98 | #endif /* _ALPHA_PAGE_H */ | ||
diff --git a/arch/alpha/include/asm/pal.h b/arch/alpha/include/asm/pal.h new file mode 100644 index 000000000000..9b4ba0d6f00b --- /dev/null +++ b/arch/alpha/include/asm/pal.h | |||
@@ -0,0 +1,51 @@ | |||
1 | #ifndef __ALPHA_PAL_H | ||
2 | #define __ALPHA_PAL_H | ||
3 | |||
4 | /* | ||
5 | * Common PAL-code | ||
6 | */ | ||
7 | #define PAL_halt 0 | ||
8 | #define PAL_cflush 1 | ||
9 | #define PAL_draina 2 | ||
10 | #define PAL_bpt 128 | ||
11 | #define PAL_bugchk 129 | ||
12 | #define PAL_chmk 131 | ||
13 | #define PAL_callsys 131 | ||
14 | #define PAL_imb 134 | ||
15 | #define PAL_rduniq 158 | ||
16 | #define PAL_wruniq 159 | ||
17 | #define PAL_gentrap 170 | ||
18 | #define PAL_nphalt 190 | ||
19 | |||
20 | /* | ||
21 | * VMS specific PAL-code | ||
22 | */ | ||
23 | #define PAL_swppal 10 | ||
24 | #define PAL_mfpr_vptb 41 | ||
25 | |||
26 | /* | ||
27 | * OSF specific PAL-code | ||
28 | */ | ||
29 | #define PAL_cserve 9 | ||
30 | #define PAL_wripir 13 | ||
31 | #define PAL_rdmces 16 | ||
32 | #define PAL_wrmces 17 | ||
33 | #define PAL_wrfen 43 | ||
34 | #define PAL_wrvptptr 45 | ||
35 | #define PAL_jtopal 46 | ||
36 | #define PAL_swpctx 48 | ||
37 | #define PAL_wrval 49 | ||
38 | #define PAL_rdval 50 | ||
39 | #define PAL_tbi 51 | ||
40 | #define PAL_wrent 52 | ||
41 | #define PAL_swpipl 53 | ||
42 | #define PAL_rdps 54 | ||
43 | #define PAL_wrkgp 55 | ||
44 | #define PAL_wrusp 56 | ||
45 | #define PAL_wrperfmon 57 | ||
46 | #define PAL_rdusp 58 | ||
47 | #define PAL_whami 60 | ||
48 | #define PAL_retsys 61 | ||
49 | #define PAL_rti 63 | ||
50 | |||
51 | #endif /* __ALPHA_PAL_H */ | ||
diff --git a/arch/alpha/include/asm/param.h b/arch/alpha/include/asm/param.h new file mode 100644 index 000000000000..e691ecfedb2c --- /dev/null +++ b/arch/alpha/include/asm/param.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef _ASM_ALPHA_PARAM_H | ||
2 | #define _ASM_ALPHA_PARAM_H | ||
3 | |||
4 | /* ??? Gross. I don't want to parameterize this, and supposedly the | ||
5 | hardware ignores reprogramming. We also need userland buy-in to the | ||
6 | change in HZ, since this is visible in the wait4 resources etc. */ | ||
7 | |||
8 | #ifdef __KERNEL__ | ||
9 | #define HZ CONFIG_HZ | ||
10 | #define USER_HZ HZ | ||
11 | #else | ||
12 | #define HZ 1024 | ||
13 | #endif | ||
14 | |||
15 | #define EXEC_PAGESIZE 8192 | ||
16 | |||
17 | #ifndef NOGROUP | ||
18 | #define NOGROUP (-1) | ||
19 | #endif | ||
20 | |||
21 | #define MAXHOSTNAMELEN 64 /* max length of hostname */ | ||
22 | |||
23 | #ifdef __KERNEL__ | ||
24 | # define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ | ||
25 | #endif | ||
26 | |||
27 | #endif /* _ASM_ALPHA_PARAM_H */ | ||
diff --git a/arch/alpha/include/asm/parport.h b/arch/alpha/include/asm/parport.h new file mode 100644 index 000000000000..c5ee7cbb2fcd --- /dev/null +++ b/arch/alpha/include/asm/parport.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * parport.h: platform-specific PC-style parport initialisation | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk> | ||
5 | * | ||
6 | * This file should only be included by drivers/parport/parport_pc.c. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_AXP_PARPORT_H | ||
10 | #define _ASM_AXP_PARPORT_H 1 | ||
11 | |||
12 | static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); | ||
13 | static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) | ||
14 | { | ||
15 | return parport_pc_find_isa_ports (autoirq, autodma); | ||
16 | } | ||
17 | |||
18 | #endif /* !(_ASM_AXP_PARPORT_H) */ | ||
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h new file mode 100644 index 000000000000..2a14302c17a3 --- /dev/null +++ b/arch/alpha/include/asm/pci.h | |||
@@ -0,0 +1,276 @@ | |||
1 | #ifndef __ALPHA_PCI_H | ||
2 | #define __ALPHA_PCI_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/dma-mapping.h> | ||
8 | #include <asm/scatterlist.h> | ||
9 | #include <asm/machvec.h> | ||
10 | |||
11 | /* | ||
12 | * The following structure is used to manage multiple PCI busses. | ||
13 | */ | ||
14 | |||
15 | struct pci_dev; | ||
16 | struct pci_bus; | ||
17 | struct resource; | ||
18 | struct pci_iommu_arena; | ||
19 | struct page; | ||
20 | |||
21 | /* A controller. Used to manage multiple PCI busses. */ | ||
22 | |||
23 | struct pci_controller { | ||
24 | struct pci_controller *next; | ||
25 | struct pci_bus *bus; | ||
26 | struct resource *io_space; | ||
27 | struct resource *mem_space; | ||
28 | |||
29 | /* The following are for reporting to userland. The invariant is | ||
30 | that if we report a BWX-capable dense memory, we do not report | ||
31 | a sparse memory at all, even if it exists. */ | ||
32 | unsigned long sparse_mem_base; | ||
33 | unsigned long dense_mem_base; | ||
34 | unsigned long sparse_io_base; | ||
35 | unsigned long dense_io_base; | ||
36 | |||
37 | /* This one's for the kernel only. It's in KSEG somewhere. */ | ||
38 | unsigned long config_space_base; | ||
39 | |||
40 | unsigned int index; | ||
41 | /* For compatibility with current (as of July 2003) pciutils | ||
42 | and XFree86. Eventually will be removed. */ | ||
43 | unsigned int need_domain_info; | ||
44 | |||
45 | struct pci_iommu_arena *sg_pci; | ||
46 | struct pci_iommu_arena *sg_isa; | ||
47 | |||
48 | void *sysdata; | ||
49 | }; | ||
50 | |||
51 | /* Override the logic in pci_scan_bus for skipping already-configured | ||
52 | bus numbers. */ | ||
53 | |||
54 | #define pcibios_assign_all_busses() 1 | ||
55 | #define pcibios_scan_all_fns(a, b) 0 | ||
56 | |||
57 | #define PCIBIOS_MIN_IO alpha_mv.min_io_address | ||
58 | #define PCIBIOS_MIN_MEM alpha_mv.min_mem_address | ||
59 | |||
60 | extern void pcibios_set_master(struct pci_dev *dev); | ||
61 | |||
62 | extern inline void pcibios_penalize_isa_irq(int irq, int active) | ||
63 | { | ||
64 | /* We don't do dynamic PCI IRQ allocation */ | ||
65 | } | ||
66 | |||
67 | /* IOMMU controls. */ | ||
68 | |||
69 | /* The PCI address space does not equal the physical memory address space. | ||
70 | The networking and block device layers use this boolean for bounce buffer | ||
71 | decisions. */ | ||
72 | #define PCI_DMA_BUS_IS_PHYS 0 | ||
73 | |||
74 | /* Allocate and map kernel buffer using consistent mode DMA for PCI | ||
75 | device. Returns non-NULL cpu-view pointer to the buffer if | ||
76 | successful and sets *DMA_ADDRP to the pci side dma address as well, | ||
77 | else DMA_ADDRP is undefined. */ | ||
78 | |||
79 | extern void *__pci_alloc_consistent(struct pci_dev *, size_t, | ||
80 | dma_addr_t *, gfp_t); | ||
81 | static inline void * | ||
82 | pci_alloc_consistent(struct pci_dev *dev, size_t size, dma_addr_t *dma) | ||
83 | { | ||
84 | return __pci_alloc_consistent(dev, size, dma, GFP_ATOMIC); | ||
85 | } | ||
86 | |||
87 | /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must | ||
88 | be values that were returned from pci_alloc_consistent. SIZE must | ||
89 | be the same as what as passed into pci_alloc_consistent. | ||
90 | References to the memory and mappings associated with CPU_ADDR or | ||
91 | DMA_ADDR past this call are illegal. */ | ||
92 | |||
93 | extern void pci_free_consistent(struct pci_dev *, size_t, void *, dma_addr_t); | ||
94 | |||
95 | /* Map a single buffer of the indicate size for PCI DMA in streaming mode. | ||
96 | The 32-bit PCI bus mastering address to use is returned. Once the device | ||
97 | is given the dma address, the device owns this memory until either | ||
98 | pci_unmap_single or pci_dma_sync_single_for_cpu is performed. */ | ||
99 | |||
100 | extern dma_addr_t pci_map_single(struct pci_dev *, void *, size_t, int); | ||
101 | |||
102 | /* Likewise, but for a page instead of an address. */ | ||
103 | extern dma_addr_t pci_map_page(struct pci_dev *, struct page *, | ||
104 | unsigned long, size_t, int); | ||
105 | |||
106 | /* Test for pci_map_single or pci_map_page having generated an error. */ | ||
107 | |||
108 | static inline int | ||
109 | pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) | ||
110 | { | ||
111 | return dma_addr == 0; | ||
112 | } | ||
113 | |||
114 | /* Unmap a single streaming mode DMA translation. The DMA_ADDR and | ||
115 | SIZE must match what was provided for in a previous pci_map_single | ||
116 | call. All other usages are undefined. After this call, reads by | ||
117 | the cpu to the buffer are guaranteed to see whatever the device | ||
118 | wrote there. */ | ||
119 | |||
120 | extern void pci_unmap_single(struct pci_dev *, dma_addr_t, size_t, int); | ||
121 | extern void pci_unmap_page(struct pci_dev *, dma_addr_t, size_t, int); | ||
122 | |||
123 | /* pci_unmap_{single,page} is not a nop, thus... */ | ||
124 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | ||
125 | dma_addr_t ADDR_NAME; | ||
126 | #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ | ||
127 | __u32 LEN_NAME; | ||
128 | #define pci_unmap_addr(PTR, ADDR_NAME) \ | ||
129 | ((PTR)->ADDR_NAME) | ||
130 | #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ | ||
131 | (((PTR)->ADDR_NAME) = (VAL)) | ||
132 | #define pci_unmap_len(PTR, LEN_NAME) \ | ||
133 | ((PTR)->LEN_NAME) | ||
134 | #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ | ||
135 | (((PTR)->LEN_NAME) = (VAL)) | ||
136 | |||
137 | /* Map a set of buffers described by scatterlist in streaming mode for | ||
138 | PCI DMA. This is the scatter-gather version of the above | ||
139 | pci_map_single interface. Here the scatter gather list elements | ||
140 | are each tagged with the appropriate PCI dma address and length. | ||
141 | They are obtained via sg_dma_{address,length}(SG). | ||
142 | |||
143 | NOTE: An implementation may be able to use a smaller number of DMA | ||
144 | address/length pairs than there are SG table elements. (for | ||
145 | example via virtual mapping capabilities) The routine returns the | ||
146 | number of addr/length pairs actually used, at most nents. | ||
147 | |||
148 | Device ownership issues as mentioned above for pci_map_single are | ||
149 | the same here. */ | ||
150 | |||
151 | extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int, int); | ||
152 | |||
153 | /* Unmap a set of streaming mode DMA translations. Again, cpu read | ||
154 | rules concerning calls here are the same as for pci_unmap_single() | ||
155 | above. */ | ||
156 | |||
157 | extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int, int); | ||
158 | |||
159 | /* Make physical memory consistent for a single streaming mode DMA | ||
160 | translation after a transfer and device currently has ownership | ||
161 | of the buffer. | ||
162 | |||
163 | If you perform a pci_map_single() but wish to interrogate the | ||
164 | buffer using the cpu, yet do not wish to teardown the PCI dma | ||
165 | mapping, you must call this function before doing so. At the next | ||
166 | point you give the PCI dma address back to the card, you must first | ||
167 | perform a pci_dma_sync_for_device, and then the device again owns | ||
168 | the buffer. */ | ||
169 | |||
170 | static inline void | ||
171 | pci_dma_sync_single_for_cpu(struct pci_dev *dev, dma_addr_t dma_addr, | ||
172 | long size, int direction) | ||
173 | { | ||
174 | /* Nothing to do. */ | ||
175 | } | ||
176 | |||
177 | static inline void | ||
178 | pci_dma_sync_single_for_device(struct pci_dev *dev, dma_addr_t dma_addr, | ||
179 | size_t size, int direction) | ||
180 | { | ||
181 | /* Nothing to do. */ | ||
182 | } | ||
183 | |||
184 | /* Make physical memory consistent for a set of streaming mode DMA | ||
185 | translations after a transfer. The same as pci_dma_sync_single_* | ||
186 | but for a scatter-gather list, same rules and usage. */ | ||
187 | |||
188 | static inline void | ||
189 | pci_dma_sync_sg_for_cpu(struct pci_dev *dev, struct scatterlist *sg, | ||
190 | int nents, int direction) | ||
191 | { | ||
192 | /* Nothing to do. */ | ||
193 | } | ||
194 | |||
195 | static inline void | ||
196 | pci_dma_sync_sg_for_device(struct pci_dev *dev, struct scatterlist *sg, | ||
197 | int nents, int direction) | ||
198 | { | ||
199 | /* Nothing to do. */ | ||
200 | } | ||
201 | |||
202 | /* Return whether the given PCI device DMA address mask can | ||
203 | be supported properly. For example, if your device can | ||
204 | only drive the low 24-bits during PCI bus mastering, then | ||
205 | you would pass 0x00ffffff as the mask to this function. */ | ||
206 | |||
207 | extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask); | ||
208 | |||
209 | #ifdef CONFIG_PCI | ||
210 | static inline void pci_dma_burst_advice(struct pci_dev *pdev, | ||
211 | enum pci_dma_burst_strategy *strat, | ||
212 | unsigned long *strategy_parameter) | ||
213 | { | ||
214 | unsigned long cacheline_size; | ||
215 | u8 byte; | ||
216 | |||
217 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); | ||
218 | if (byte == 0) | ||
219 | cacheline_size = 1024; | ||
220 | else | ||
221 | cacheline_size = (int) byte * 4; | ||
222 | |||
223 | *strat = PCI_DMA_BURST_BOUNDARY; | ||
224 | *strategy_parameter = cacheline_size; | ||
225 | } | ||
226 | #endif | ||
227 | |||
228 | /* TODO: integrate with include/asm-generic/pci.h ? */ | ||
229 | static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | ||
230 | { | ||
231 | return channel ? 15 : 14; | ||
232 | } | ||
233 | |||
234 | extern void pcibios_resource_to_bus(struct pci_dev *, struct pci_bus_region *, | ||
235 | struct resource *); | ||
236 | |||
237 | extern void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, | ||
238 | struct pci_bus_region *region); | ||
239 | |||
240 | static inline struct resource * | ||
241 | pcibios_select_root(struct pci_dev *pdev, struct resource *res) | ||
242 | { | ||
243 | struct resource *root = NULL; | ||
244 | |||
245 | if (res->flags & IORESOURCE_IO) | ||
246 | root = &ioport_resource; | ||
247 | if (res->flags & IORESOURCE_MEM) | ||
248 | root = &iomem_resource; | ||
249 | |||
250 | return root; | ||
251 | } | ||
252 | |||
253 | #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index | ||
254 | |||
255 | static inline int pci_proc_domain(struct pci_bus *bus) | ||
256 | { | ||
257 | struct pci_controller *hose = bus->sysdata; | ||
258 | return hose->need_domain_info; | ||
259 | } | ||
260 | |||
261 | struct pci_dev *alpha_gendev_to_pci(struct device *dev); | ||
262 | |||
263 | #endif /* __KERNEL__ */ | ||
264 | |||
265 | /* Values for the `which' argument to sys_pciconfig_iobase. */ | ||
266 | #define IOBASE_HOSE 0 | ||
267 | #define IOBASE_SPARSE_MEM 1 | ||
268 | #define IOBASE_DENSE_MEM 2 | ||
269 | #define IOBASE_SPARSE_IO 3 | ||
270 | #define IOBASE_DENSE_IO 4 | ||
271 | #define IOBASE_ROOT_BUS 5 | ||
272 | #define IOBASE_FROM_HOSE 0x10000 | ||
273 | |||
274 | extern struct pci_dev *isa_bridge; | ||
275 | |||
276 | #endif /* __ALPHA_PCI_H */ | ||
diff --git a/arch/alpha/include/asm/percpu.h b/arch/alpha/include/asm/percpu.h new file mode 100644 index 000000000000..3495e8e00d70 --- /dev/null +++ b/arch/alpha/include/asm/percpu.h | |||
@@ -0,0 +1,78 @@ | |||
1 | #ifndef __ALPHA_PERCPU_H | ||
2 | #define __ALPHA_PERCPU_H | ||
3 | #include <linux/compiler.h> | ||
4 | #include <linux/threads.h> | ||
5 | |||
6 | /* | ||
7 | * Determine the real variable name from the name visible in the | ||
8 | * kernel sources. | ||
9 | */ | ||
10 | #define per_cpu_var(var) per_cpu__##var | ||
11 | |||
12 | #ifdef CONFIG_SMP | ||
13 | |||
14 | /* | ||
15 | * per_cpu_offset() is the offset that has to be added to a | ||
16 | * percpu variable to get to the instance for a certain processor. | ||
17 | */ | ||
18 | extern unsigned long __per_cpu_offset[NR_CPUS]; | ||
19 | |||
20 | #define per_cpu_offset(x) (__per_cpu_offset[x]) | ||
21 | |||
22 | #define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) | ||
23 | #ifdef CONFIG_DEBUG_PREEMPT | ||
24 | #define my_cpu_offset per_cpu_offset(smp_processor_id()) | ||
25 | #else | ||
26 | #define my_cpu_offset __my_cpu_offset | ||
27 | #endif | ||
28 | |||
29 | #ifndef MODULE | ||
30 | #define SHIFT_PERCPU_PTR(var, offset) RELOC_HIDE(&per_cpu_var(var), (offset)) | ||
31 | #define PER_CPU_ATTRIBUTES | ||
32 | #else | ||
33 | /* | ||
34 | * To calculate addresses of locally defined variables, GCC uses 32-bit | ||
35 | * displacement from the GP. Which doesn't work for per cpu variables in | ||
36 | * modules, as an offset to the kernel per cpu area is way above 4G. | ||
37 | * | ||
38 | * This forces allocation of a GOT entry for per cpu variable using | ||
39 | * ldq instruction with a 'literal' relocation. | ||
40 | */ | ||
41 | #define SHIFT_PERCPU_PTR(var, offset) ({ \ | ||
42 | extern int simple_identifier_##var(void); \ | ||
43 | unsigned long __ptr, tmp_gp; \ | ||
44 | asm ( "br %1, 1f \n\ | ||
45 | 1: ldgp %1, 0(%1) \n\ | ||
46 | ldq %0, per_cpu__" #var"(%1)\t!literal" \ | ||
47 | : "=&r"(__ptr), "=&r"(tmp_gp)); \ | ||
48 | (typeof(&per_cpu_var(var)))(__ptr + (offset)); }) | ||
49 | |||
50 | #define PER_CPU_ATTRIBUTES __used | ||
51 | |||
52 | #endif /* MODULE */ | ||
53 | |||
54 | /* | ||
55 | * A percpu variable may point to a discarded regions. The following are | ||
56 | * established ways to produce a usable pointer from the percpu variable | ||
57 | * offset. | ||
58 | */ | ||
59 | #define per_cpu(var, cpu) \ | ||
60 | (*SHIFT_PERCPU_PTR(var, per_cpu_offset(cpu))) | ||
61 | #define __get_cpu_var(var) \ | ||
62 | (*SHIFT_PERCPU_PTR(var, my_cpu_offset)) | ||
63 | #define __raw_get_cpu_var(var) \ | ||
64 | (*SHIFT_PERCPU_PTR(var, __my_cpu_offset)) | ||
65 | |||
66 | #else /* ! SMP */ | ||
67 | |||
68 | #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) | ||
69 | #define __get_cpu_var(var) per_cpu_var(var) | ||
70 | #define __raw_get_cpu_var(var) per_cpu_var(var) | ||
71 | |||
72 | #define PER_CPU_ATTRIBUTES | ||
73 | |||
74 | #endif /* SMP */ | ||
75 | |||
76 | #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name) | ||
77 | |||
78 | #endif /* __ALPHA_PERCPU_H */ | ||
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h new file mode 100644 index 000000000000..fd090155dccd --- /dev/null +++ b/arch/alpha/include/asm/pgalloc.h | |||
@@ -0,0 +1,83 @@ | |||
1 | #ifndef _ALPHA_PGALLOC_H | ||
2 | #define _ALPHA_PGALLOC_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <linux/mmzone.h> | ||
6 | |||
7 | /* | ||
8 | * Allocate and free page tables. The xxx_kernel() versions are | ||
9 | * used to allocate a kernel page table - this turns on ASN bits | ||
10 | * if any. | ||
11 | */ | ||
12 | |||
13 | static inline void | ||
14 | pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) | ||
15 | { | ||
16 | pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET)); | ||
17 | } | ||
18 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
19 | |||
20 | static inline void | ||
21 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) | ||
22 | { | ||
23 | pmd_set(pmd, pte); | ||
24 | } | ||
25 | |||
26 | static inline void | ||
27 | pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | ||
28 | { | ||
29 | pgd_set(pgd, pmd); | ||
30 | } | ||
31 | |||
32 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | ||
33 | |||
34 | static inline void | ||
35 | pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
36 | { | ||
37 | free_page((unsigned long)pgd); | ||
38 | } | ||
39 | |||
40 | static inline pmd_t * | ||
41 | pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
42 | { | ||
43 | pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | ||
44 | return ret; | ||
45 | } | ||
46 | |||
47 | static inline void | ||
48 | pmd_free(struct mm_struct *mm, pmd_t *pmd) | ||
49 | { | ||
50 | free_page((unsigned long)pmd); | ||
51 | } | ||
52 | |||
53 | extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); | ||
54 | |||
55 | static inline void | ||
56 | pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
57 | { | ||
58 | free_page((unsigned long)pte); | ||
59 | } | ||
60 | |||
61 | static inline pgtable_t | ||
62 | pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
63 | { | ||
64 | pte_t *pte = pte_alloc_one_kernel(mm, address); | ||
65 | struct page *page; | ||
66 | |||
67 | if (!pte) | ||
68 | return NULL; | ||
69 | page = virt_to_page(pte); | ||
70 | pgtable_page_ctor(page); | ||
71 | return page; | ||
72 | } | ||
73 | |||
74 | static inline void | ||
75 | pte_free(struct mm_struct *mm, pgtable_t page) | ||
76 | { | ||
77 | pgtable_page_dtor(page); | ||
78 | __free_page(page); | ||
79 | } | ||
80 | |||
81 | #define check_pgt_cache() do { } while (0) | ||
82 | |||
83 | #endif /* _ALPHA_PGALLOC_H */ | ||
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h new file mode 100644 index 000000000000..3f0c59f6d8aa --- /dev/null +++ b/arch/alpha/include/asm/pgtable.h | |||
@@ -0,0 +1,380 @@ | |||
1 | #ifndef _ALPHA_PGTABLE_H | ||
2 | #define _ALPHA_PGTABLE_H | ||
3 | |||
4 | #include <asm-generic/4level-fixup.h> | ||
5 | |||
6 | /* | ||
7 | * This file contains the functions and defines necessary to modify and use | ||
8 | * the Alpha page table tree. | ||
9 | * | ||
10 | * This hopefully works with any standard Alpha page-size, as defined | ||
11 | * in <asm/page.h> (currently 8192). | ||
12 | */ | ||
13 | #include <linux/mmzone.h> | ||
14 | |||
15 | #include <asm/page.h> | ||
16 | #include <asm/processor.h> /* For TASK_SIZE */ | ||
17 | #include <asm/machvec.h> | ||
18 | |||
19 | struct mm_struct; | ||
20 | struct vm_area_struct; | ||
21 | |||
22 | /* Certain architectures need to do special things when PTEs | ||
23 | * within a page table are directly modified. Thus, the following | ||
24 | * hook is made available. | ||
25 | */ | ||
26 | #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) | ||
27 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
28 | |||
29 | /* PMD_SHIFT determines the size of the area a second-level page table can map */ | ||
30 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) | ||
31 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
32 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
33 | |||
34 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | ||
35 | #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) | ||
36 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
37 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
38 | |||
39 | /* | ||
40 | * Entries per page directory level: the Alpha is three-level, with | ||
41 | * all levels having a one-page page table. | ||
42 | */ | ||
43 | #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) | ||
44 | #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) | ||
45 | #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) | ||
46 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | ||
47 | #define FIRST_USER_ADDRESS 0 | ||
48 | |||
49 | /* Number of pointers that fit on a page: this will go away. */ | ||
50 | #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3)) | ||
51 | |||
52 | #ifdef CONFIG_ALPHA_LARGE_VMALLOC | ||
53 | #define VMALLOC_START 0xfffffe0000000000 | ||
54 | #else | ||
55 | #define VMALLOC_START (-2*PGDIR_SIZE) | ||
56 | #endif | ||
57 | #define VMALLOC_END (-PGDIR_SIZE) | ||
58 | |||
59 | /* | ||
60 | * OSF/1 PAL-code-imposed page table bits | ||
61 | */ | ||
62 | #define _PAGE_VALID 0x0001 | ||
63 | #define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ | ||
64 | #define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ | ||
65 | #define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ | ||
66 | #define _PAGE_ASM 0x0010 | ||
67 | #define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */ | ||
68 | #define _PAGE_URE 0x0200 /* xxx */ | ||
69 | #define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */ | ||
70 | #define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */ | ||
71 | |||
72 | /* .. and these are ours ... */ | ||
73 | #define _PAGE_DIRTY 0x20000 | ||
74 | #define _PAGE_ACCESSED 0x40000 | ||
75 | #define _PAGE_FILE 0x80000 /* set:pagecache, unset:swap */ | ||
76 | |||
77 | /* | ||
78 | * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly | ||
79 | * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it. | ||
80 | * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use | ||
81 | * the KRE/URE bits to watch for it. That way we don't need to overload the | ||
82 | * KWE/UWE bits with both handling dirty and accessed. | ||
83 | * | ||
84 | * Note that the kernel uses the accessed bit just to check whether to page | ||
85 | * out a page or not, so it doesn't have to be exact anyway. | ||
86 | */ | ||
87 | |||
88 | #define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE) | ||
89 | #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE) | ||
90 | |||
91 | #define _PFN_MASK 0xFFFFFFFF00000000UL | ||
92 | |||
93 | #define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) | ||
94 | #define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS) | ||
95 | |||
96 | /* | ||
97 | * All the normal masks have the "page accessed" bits on, as any time they are used, | ||
98 | * the page is accessed. They are cleared only by the page-out routines | ||
99 | */ | ||
100 | #define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE) | ||
101 | #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) | ||
102 | #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) | ||
103 | #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) | ||
104 | #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) | ||
105 | |||
106 | #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) | ||
107 | |||
108 | #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW)) | ||
109 | #define _PAGE_S(x) _PAGE_NORMAL(x) | ||
110 | |||
111 | /* | ||
112 | * The hardware can handle write-only mappings, but as the Alpha | ||
113 | * architecture does byte-wide writes with a read-modify-write | ||
114 | * sequence, it's not practical to have write-without-read privs. | ||
115 | * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in | ||
116 | * arch/alpha/mm/fault.c) | ||
117 | */ | ||
118 | /* xwr */ | ||
119 | #define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) | ||
120 | #define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW) | ||
121 | #define __P010 _PAGE_P(_PAGE_FOE) | ||
122 | #define __P011 _PAGE_P(_PAGE_FOE) | ||
123 | #define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR) | ||
124 | #define __P101 _PAGE_P(_PAGE_FOW) | ||
125 | #define __P110 _PAGE_P(0) | ||
126 | #define __P111 _PAGE_P(0) | ||
127 | |||
128 | #define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) | ||
129 | #define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW) | ||
130 | #define __S010 _PAGE_S(_PAGE_FOE) | ||
131 | #define __S011 _PAGE_S(_PAGE_FOE) | ||
132 | #define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR) | ||
133 | #define __S101 _PAGE_S(_PAGE_FOW) | ||
134 | #define __S110 _PAGE_S(0) | ||
135 | #define __S111 _PAGE_S(0) | ||
136 | |||
137 | /* | ||
138 | * pgprot_noncached() is only for infiniband pci support, and a real | ||
139 | * implementation for RAM would be more complicated. | ||
140 | */ | ||
141 | #define pgprot_noncached(prot) (prot) | ||
142 | |||
143 | /* | ||
144 | * BAD_PAGETABLE is used when we need a bogus page-table, while | ||
145 | * BAD_PAGE is used for a bogus page. | ||
146 | * | ||
147 | * ZERO_PAGE is a global shared page that is always zero: used | ||
148 | * for zero-mapped memory areas etc.. | ||
149 | */ | ||
150 | extern pte_t __bad_page(void); | ||
151 | extern pmd_t * __bad_pagetable(void); | ||
152 | |||
153 | extern unsigned long __zero_page(void); | ||
154 | |||
155 | #define BAD_PAGETABLE __bad_pagetable() | ||
156 | #define BAD_PAGE __bad_page() | ||
157 | #define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE)) | ||
158 | |||
159 | /* number of bits that fit into a memory pointer */ | ||
160 | #define BITS_PER_PTR (8*sizeof(unsigned long)) | ||
161 | |||
162 | /* to align the pointer to a pointer address */ | ||
163 | #define PTR_MASK (~(sizeof(void*)-1)) | ||
164 | |||
165 | /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ | ||
166 | #define SIZEOF_PTR_LOG2 3 | ||
167 | |||
168 | /* to find an entry in a page-table */ | ||
169 | #define PAGE_PTR(address) \ | ||
170 | ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) | ||
171 | |||
172 | /* | ||
173 | * On certain platforms whose physical address space can overlap KSEG, | ||
174 | * namely EV6 and above, we must re-twiddle the physaddr to restore the | ||
175 | * correct high-order bits. | ||
176 | * | ||
177 | * This is extremely confusing until you realize that this is actually | ||
178 | * just working around a userspace bug. The X server was intending to | ||
179 | * provide the physical address but instead provided the KSEG address. | ||
180 | * Or tried to, except it's not representable. | ||
181 | * | ||
182 | * On Tsunami there's nothing meaningful at 0x40000000000, so this is | ||
183 | * a safe thing to do. Come the first core logic that does put something | ||
184 | * in this area -- memory or whathaveyou -- then this hack will have | ||
185 | * to go away. So be prepared! | ||
186 | */ | ||
187 | |||
188 | #if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG) | ||
189 | #error "EV6-only feature in a generic kernel" | ||
190 | #endif | ||
191 | #if defined(CONFIG_ALPHA_GENERIC) || \ | ||
192 | (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG)) | ||
193 | #define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT) | ||
194 | #define PHYS_TWIDDLE(pfn) \ | ||
195 | ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \ | ||
196 | ? ((pfn) ^= KSEG_PFN) : (pfn)) | ||
197 | #else | ||
198 | #define PHYS_TWIDDLE(pfn) (pfn) | ||
199 | #endif | ||
200 | |||
201 | /* | ||
202 | * Conversion functions: convert a page and protection to a page entry, | ||
203 | * and a page entry and page directory to the page they refer to. | ||
204 | */ | ||
205 | #ifndef CONFIG_DISCONTIGMEM | ||
206 | #define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT) | ||
207 | |||
208 | #define pte_pfn(pte) (pte_val(pte) >> 32) | ||
209 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | ||
210 | #define mk_pte(page, pgprot) \ | ||
211 | ({ \ | ||
212 | pte_t pte; \ | ||
213 | \ | ||
214 | pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \ | ||
215 | pte; \ | ||
216 | }) | ||
217 | #endif | ||
218 | |||
219 | extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot) | ||
220 | { pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; } | ||
221 | |||
222 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
223 | { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } | ||
224 | |||
225 | extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep) | ||
226 | { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } | ||
227 | |||
228 | extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) | ||
229 | { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); } | ||
230 | |||
231 | |||
232 | extern inline unsigned long | ||
233 | pmd_page_vaddr(pmd_t pmd) | ||
234 | { | ||
235 | return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET; | ||
236 | } | ||
237 | |||
238 | #ifndef CONFIG_DISCONTIGMEM | ||
239 | #define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32)) | ||
240 | #define pgd_page(pgd) (mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32)) | ||
241 | #endif | ||
242 | |||
243 | extern inline unsigned long pgd_page_vaddr(pgd_t pgd) | ||
244 | { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); } | ||
245 | |||
246 | extern inline int pte_none(pte_t pte) { return !pte_val(pte); } | ||
247 | extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; } | ||
248 | extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
249 | { | ||
250 | pte_val(*ptep) = 0; | ||
251 | } | ||
252 | |||
253 | extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } | ||
254 | extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; } | ||
255 | extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; } | ||
256 | extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; } | ||
257 | |||
258 | extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); } | ||
259 | extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; } | ||
260 | extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; } | ||
261 | extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; } | ||
262 | |||
263 | /* | ||
264 | * The following only work if pte_present() is true. | ||
265 | * Undefined behaviour if not.. | ||
266 | */ | ||
267 | extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); } | ||
268 | extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
269 | extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
270 | extern inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
271 | extern inline int pte_special(pte_t pte) { return 0; } | ||
272 | |||
273 | extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; } | ||
274 | extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; } | ||
275 | extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; } | ||
276 | extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; } | ||
277 | extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; } | ||
278 | extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; } | ||
279 | extern inline pte_t pte_mkspecial(pte_t pte) { return pte; } | ||
280 | |||
281 | #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address)) | ||
282 | |||
283 | /* to find an entry in a kernel page-table-directory */ | ||
284 | #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) | ||
285 | |||
286 | /* to find an entry in a page-table-directory. */ | ||
287 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | ||
288 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | ||
289 | |||
290 | /* | ||
291 | * The smp_read_barrier_depends() in the following functions are required to | ||
292 | * order the load of *dir (the pointer in the top level page table) with any | ||
293 | * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir). | ||
294 | * | ||
295 | * If this ordering is not enforced, the CPU might load an older value of | ||
296 | * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for | ||
297 | * more details. | ||
298 | * | ||
299 | * Note that we never change the mm->pgd pointer after the task is running, so | ||
300 | * pgd_offset does not require such a barrier. | ||
301 | */ | ||
302 | |||
303 | /* Find an entry in the second-level page table.. */ | ||
304 | extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) | ||
305 | { | ||
306 | pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); | ||
307 | smp_read_barrier_depends(); /* see above */ | ||
308 | return ret; | ||
309 | } | ||
310 | |||
311 | /* Find an entry in the third-level page table.. */ | ||
312 | extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address) | ||
313 | { | ||
314 | pte_t *ret = (pte_t *) pmd_page_vaddr(*dir) | ||
315 | + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1)); | ||
316 | smp_read_barrier_depends(); /* see above */ | ||
317 | return ret; | ||
318 | } | ||
319 | |||
320 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) | ||
321 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) | ||
322 | #define pte_unmap(pte) do { } while (0) | ||
323 | #define pte_unmap_nested(pte) do { } while (0) | ||
324 | |||
325 | extern pgd_t swapper_pg_dir[1024]; | ||
326 | |||
327 | /* | ||
328 | * The Alpha doesn't have any external MMU info: the kernel page | ||
329 | * tables contain all the necessary information. | ||
330 | */ | ||
331 | extern inline void update_mmu_cache(struct vm_area_struct * vma, | ||
332 | unsigned long address, pte_t pte) | ||
333 | { | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * Non-present pages: high 24 bits are offset, next 8 bits type, | ||
338 | * low 32 bits zero. | ||
339 | */ | ||
340 | extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | ||
341 | { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; } | ||
342 | |||
343 | #define __swp_type(x) (((x).val >> 32) & 0xff) | ||
344 | #define __swp_offset(x) ((x).val >> 40) | ||
345 | #define __swp_entry(type, off) ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) }) | ||
346 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
347 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
348 | |||
349 | #define pte_to_pgoff(pte) (pte_val(pte) >> 32) | ||
350 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE }) | ||
351 | |||
352 | #define PTE_FILE_MAX_BITS 32 | ||
353 | |||
354 | #ifndef CONFIG_DISCONTIGMEM | ||
355 | #define kern_addr_valid(addr) (1) | ||
356 | #endif | ||
357 | |||
358 | #define io_remap_pfn_range(vma, start, pfn, size, prot) \ | ||
359 | remap_pfn_range(vma, start, pfn, size, prot) | ||
360 | |||
361 | #define pte_ERROR(e) \ | ||
362 | printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) | ||
363 | #define pmd_ERROR(e) \ | ||
364 | printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) | ||
365 | #define pgd_ERROR(e) \ | ||
366 | printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
367 | |||
368 | extern void paging_init(void); | ||
369 | |||
370 | #include <asm-generic/pgtable.h> | ||
371 | |||
372 | /* | ||
373 | * No page table caches to initialise | ||
374 | */ | ||
375 | #define pgtable_cache_init() do { } while (0) | ||
376 | |||
377 | /* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ | ||
378 | #define HAVE_ARCH_UNMAPPED_AREA | ||
379 | |||
380 | #endif /* _ALPHA_PGTABLE_H */ | ||
diff --git a/arch/alpha/include/asm/poll.h b/arch/alpha/include/asm/poll.h new file mode 100644 index 000000000000..c98509d3149e --- /dev/null +++ b/arch/alpha/include/asm/poll.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/poll.h> | |||
diff --git a/arch/alpha/include/asm/posix_types.h b/arch/alpha/include/asm/posix_types.h new file mode 100644 index 000000000000..db167413300b --- /dev/null +++ b/arch/alpha/include/asm/posix_types.h | |||
@@ -0,0 +1,123 @@ | |||
1 | #ifndef _ALPHA_POSIX_TYPES_H | ||
2 | #define _ALPHA_POSIX_TYPES_H | ||
3 | |||
4 | /* | ||
5 | * This file is generally used by user-level software, so you need to | ||
6 | * be a little careful about namespace pollution etc. Also, we cannot | ||
7 | * assume GCC is being used. | ||
8 | */ | ||
9 | |||
10 | typedef unsigned int __kernel_ino_t; | ||
11 | typedef unsigned int __kernel_mode_t; | ||
12 | typedef unsigned int __kernel_nlink_t; | ||
13 | typedef long __kernel_off_t; | ||
14 | typedef long long __kernel_loff_t; | ||
15 | typedef int __kernel_pid_t; | ||
16 | typedef int __kernel_ipc_pid_t; | ||
17 | typedef unsigned int __kernel_uid_t; | ||
18 | typedef unsigned int __kernel_gid_t; | ||
19 | typedef unsigned long __kernel_size_t; | ||
20 | typedef long __kernel_ssize_t; | ||
21 | typedef long __kernel_ptrdiff_t; | ||
22 | typedef long __kernel_time_t; | ||
23 | typedef long __kernel_suseconds_t; | ||
24 | typedef long __kernel_clock_t; | ||
25 | typedef int __kernel_daddr_t; | ||
26 | typedef char * __kernel_caddr_t; | ||
27 | typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ | ||
28 | typedef unsigned short __kernel_uid16_t; | ||
29 | typedef unsigned short __kernel_gid16_t; | ||
30 | typedef int __kernel_clockid_t; | ||
31 | typedef int __kernel_timer_t; | ||
32 | |||
33 | typedef struct { | ||
34 | int val[2]; | ||
35 | } __kernel_fsid_t; | ||
36 | |||
37 | typedef __kernel_uid_t __kernel_old_uid_t; | ||
38 | typedef __kernel_gid_t __kernel_old_gid_t; | ||
39 | typedef __kernel_uid_t __kernel_uid32_t; | ||
40 | typedef __kernel_gid_t __kernel_gid32_t; | ||
41 | |||
42 | typedef unsigned int __kernel_old_dev_t; | ||
43 | |||
44 | #ifdef __KERNEL__ | ||
45 | |||
46 | #ifndef __GNUC__ | ||
47 | |||
48 | #define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d)) | ||
49 | #define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d)) | ||
50 | #define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0) | ||
51 | #define __FD_ZERO(set) \ | ||
52 | ((void) memset ((void *) (set), 0, sizeof (__kernel_fd_set))) | ||
53 | |||
54 | #else /* __GNUC__ */ | ||
55 | |||
56 | /* With GNU C, use inline functions instead so args are evaluated only once: */ | ||
57 | |||
58 | #undef __FD_SET | ||
59 | static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) | ||
60 | { | ||
61 | unsigned long _tmp = fd / __NFDBITS; | ||
62 | unsigned long _rem = fd % __NFDBITS; | ||
63 | fdsetp->fds_bits[_tmp] |= (1UL<<_rem); | ||
64 | } | ||
65 | |||
66 | #undef __FD_CLR | ||
67 | static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) | ||
68 | { | ||
69 | unsigned long _tmp = fd / __NFDBITS; | ||
70 | unsigned long _rem = fd % __NFDBITS; | ||
71 | fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem); | ||
72 | } | ||
73 | |||
74 | #undef __FD_ISSET | ||
75 | static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p) | ||
76 | { | ||
77 | unsigned long _tmp = fd / __NFDBITS; | ||
78 | unsigned long _rem = fd % __NFDBITS; | ||
79 | return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * This will unroll the loop for the normal constant case (8 ints, | ||
84 | * for a 256-bit fd_set) | ||
85 | */ | ||
86 | #undef __FD_ZERO | ||
87 | static __inline__ void __FD_ZERO(__kernel_fd_set *p) | ||
88 | { | ||
89 | unsigned long *tmp = p->fds_bits; | ||
90 | int i; | ||
91 | |||
92 | if (__builtin_constant_p(__FDSET_LONGS)) { | ||
93 | switch (__FDSET_LONGS) { | ||
94 | case 16: | ||
95 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | ||
96 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | ||
97 | tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; | ||
98 | tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; | ||
99 | return; | ||
100 | |||
101 | case 8: | ||
102 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | ||
103 | tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; | ||
104 | return; | ||
105 | |||
106 | case 4: | ||
107 | tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; | ||
108 | return; | ||
109 | } | ||
110 | } | ||
111 | i = __FDSET_LONGS; | ||
112 | while (i) { | ||
113 | i--; | ||
114 | *tmp = 0; | ||
115 | tmp++; | ||
116 | } | ||
117 | } | ||
118 | |||
119 | #endif /* __GNUC__ */ | ||
120 | |||
121 | #endif /* __KERNEL__ */ | ||
122 | |||
123 | #endif /* _ALPHA_POSIX_TYPES_H */ | ||
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h new file mode 100644 index 000000000000..94afe5859301 --- /dev/null +++ b/arch/alpha/include/asm/processor.h | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * include/asm-alpha/processor.h | ||
3 | * | ||
4 | * Copyright (C) 1994 Linus Torvalds | ||
5 | */ | ||
6 | |||
7 | #ifndef __ASM_ALPHA_PROCESSOR_H | ||
8 | #define __ASM_ALPHA_PROCESSOR_H | ||
9 | |||
10 | #include <linux/personality.h> /* for ADDR_LIMIT_32BIT */ | ||
11 | |||
12 | /* | ||
13 | * Returns current instruction pointer ("program counter"). | ||
14 | */ | ||
15 | #define current_text_addr() \ | ||
16 | ({ void *__pc; __asm__ ("br %0,.+4" : "=r"(__pc)); __pc; }) | ||
17 | |||
18 | /* | ||
19 | * We have a 42-bit user address space: 4TB user VM... | ||
20 | */ | ||
21 | #define TASK_SIZE (0x40000000000UL) | ||
22 | |||
23 | #define STACK_TOP \ | ||
24 | (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) | ||
25 | |||
26 | #define STACK_TOP_MAX 0x00120000000UL | ||
27 | |||
28 | /* This decides where the kernel will search for a free chunk of vm | ||
29 | * space during mmap's. | ||
30 | */ | ||
31 | #define TASK_UNMAPPED_BASE \ | ||
32 | ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2) | ||
33 | |||
34 | typedef struct { | ||
35 | unsigned long seg; | ||
36 | } mm_segment_t; | ||
37 | |||
38 | /* This is dead. Everything has been moved to thread_info. */ | ||
39 | struct thread_struct { }; | ||
40 | #define INIT_THREAD { } | ||
41 | |||
42 | /* Return saved PC of a blocked thread. */ | ||
43 | struct task_struct; | ||
44 | extern unsigned long thread_saved_pc(struct task_struct *); | ||
45 | |||
46 | /* Do necessary setup to start up a newly executed thread. */ | ||
47 | extern void start_thread(struct pt_regs *, unsigned long, unsigned long); | ||
48 | |||
49 | /* Free all resources held by a thread. */ | ||
50 | extern void release_thread(struct task_struct *); | ||
51 | |||
52 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
53 | #define prepare_to_copy(tsk) do { } while (0) | ||
54 | |||
55 | /* Create a kernel thread without removing it from tasklists. */ | ||
56 | extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
57 | |||
58 | unsigned long get_wchan(struct task_struct *p); | ||
59 | |||
60 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) | ||
61 | |||
62 | #define KSTK_ESP(tsk) \ | ||
63 | ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp) | ||
64 | |||
65 | #define cpu_relax() barrier() | ||
66 | |||
67 | #define ARCH_HAS_PREFETCH | ||
68 | #define ARCH_HAS_PREFETCHW | ||
69 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
70 | |||
71 | #ifndef CONFIG_SMP | ||
72 | /* Nothing to prefetch. */ | ||
73 | #define spin_lock_prefetch(lock) do { } while (0) | ||
74 | #endif | ||
75 | |||
76 | extern inline void prefetch(const void *ptr) | ||
77 | { | ||
78 | __builtin_prefetch(ptr, 0, 3); | ||
79 | } | ||
80 | |||
81 | extern inline void prefetchw(const void *ptr) | ||
82 | { | ||
83 | __builtin_prefetch(ptr, 1, 3); | ||
84 | } | ||
85 | |||
86 | #ifdef CONFIG_SMP | ||
87 | extern inline void spin_lock_prefetch(const void *ptr) | ||
88 | { | ||
89 | __builtin_prefetch(ptr, 1, 3); | ||
90 | } | ||
91 | #endif | ||
92 | |||
93 | #endif /* __ASM_ALPHA_PROCESSOR_H */ | ||
diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h new file mode 100644 index 000000000000..32c7a5cddd59 --- /dev/null +++ b/arch/alpha/include/asm/ptrace.h | |||
@@ -0,0 +1,83 @@ | |||
1 | #ifndef _ASMAXP_PTRACE_H | ||
2 | #define _ASMAXP_PTRACE_H | ||
3 | |||
4 | |||
5 | /* | ||
6 | * This struct defines the way the registers are stored on the | ||
7 | * kernel stack during a system call or other kernel entry | ||
8 | * | ||
9 | * NOTE! I want to minimize the overhead of system calls, so this | ||
10 | * struct has as little information as possible. I does not have | ||
11 | * | ||
12 | * - floating point regs: the kernel doesn't change those | ||
13 | * - r9-15: saved by the C compiler | ||
14 | * | ||
15 | * This makes "fork()" and "exec()" a bit more complex, but should | ||
16 | * give us low system call latency. | ||
17 | */ | ||
18 | |||
19 | struct pt_regs { | ||
20 | unsigned long r0; | ||
21 | unsigned long r1; | ||
22 | unsigned long r2; | ||
23 | unsigned long r3; | ||
24 | unsigned long r4; | ||
25 | unsigned long r5; | ||
26 | unsigned long r6; | ||
27 | unsigned long r7; | ||
28 | unsigned long r8; | ||
29 | unsigned long r19; | ||
30 | unsigned long r20; | ||
31 | unsigned long r21; | ||
32 | unsigned long r22; | ||
33 | unsigned long r23; | ||
34 | unsigned long r24; | ||
35 | unsigned long r25; | ||
36 | unsigned long r26; | ||
37 | unsigned long r27; | ||
38 | unsigned long r28; | ||
39 | unsigned long hae; | ||
40 | /* JRP - These are the values provided to a0-a2 by PALcode */ | ||
41 | unsigned long trap_a0; | ||
42 | unsigned long trap_a1; | ||
43 | unsigned long trap_a2; | ||
44 | /* These are saved by PAL-code: */ | ||
45 | unsigned long ps; | ||
46 | unsigned long pc; | ||
47 | unsigned long gp; | ||
48 | unsigned long r16; | ||
49 | unsigned long r17; | ||
50 | unsigned long r18; | ||
51 | }; | ||
52 | |||
53 | /* | ||
54 | * This is the extended stack used by signal handlers and the context | ||
55 | * switcher: it's pushed after the normal "struct pt_regs". | ||
56 | */ | ||
57 | struct switch_stack { | ||
58 | unsigned long r9; | ||
59 | unsigned long r10; | ||
60 | unsigned long r11; | ||
61 | unsigned long r12; | ||
62 | unsigned long r13; | ||
63 | unsigned long r14; | ||
64 | unsigned long r15; | ||
65 | unsigned long r26; | ||
66 | unsigned long fp[32]; /* fp[31] is fpcr */ | ||
67 | }; | ||
68 | |||
69 | #ifdef __KERNEL__ | ||
70 | |||
71 | #define user_mode(regs) (((regs)->ps & 8) != 0) | ||
72 | #define instruction_pointer(regs) ((regs)->pc) | ||
73 | #define profile_pc(regs) instruction_pointer(regs) | ||
74 | extern void show_regs(struct pt_regs *); | ||
75 | |||
76 | #define task_pt_regs(task) \ | ||
77 | ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1) | ||
78 | |||
79 | #define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0) | ||
80 | |||
81 | #endif | ||
82 | |||
83 | #endif | ||
diff --git a/arch/alpha/include/asm/reg.h b/arch/alpha/include/asm/reg.h new file mode 100644 index 000000000000..86ff916fb069 --- /dev/null +++ b/arch/alpha/include/asm/reg.h | |||
@@ -0,0 +1,52 @@ | |||
1 | #ifndef __reg_h__ | ||
2 | #define __reg_h__ | ||
3 | |||
4 | /* | ||
5 | * Exception frame offsets. | ||
6 | */ | ||
7 | #define EF_V0 0 | ||
8 | #define EF_T0 1 | ||
9 | #define EF_T1 2 | ||
10 | #define EF_T2 3 | ||
11 | #define EF_T3 4 | ||
12 | #define EF_T4 5 | ||
13 | #define EF_T5 6 | ||
14 | #define EF_T6 7 | ||
15 | #define EF_T7 8 | ||
16 | #define EF_S0 9 | ||
17 | #define EF_S1 10 | ||
18 | #define EF_S2 11 | ||
19 | #define EF_S3 12 | ||
20 | #define EF_S4 13 | ||
21 | #define EF_S5 14 | ||
22 | #define EF_S6 15 | ||
23 | #define EF_A3 16 | ||
24 | #define EF_A4 17 | ||
25 | #define EF_A5 18 | ||
26 | #define EF_T8 19 | ||
27 | #define EF_T9 20 | ||
28 | #define EF_T10 21 | ||
29 | #define EF_T11 22 | ||
30 | #define EF_RA 23 | ||
31 | #define EF_T12 24 | ||
32 | #define EF_AT 25 | ||
33 | #define EF_SP 26 | ||
34 | #define EF_PS 27 | ||
35 | #define EF_PC 28 | ||
36 | #define EF_GP 29 | ||
37 | #define EF_A0 30 | ||
38 | #define EF_A1 31 | ||
39 | #define EF_A2 32 | ||
40 | |||
41 | #define EF_SIZE (33*8) | ||
42 | #define HWEF_SIZE (6*8) /* size of PAL frame (PS-A2) */ | ||
43 | |||
44 | #define EF_SSIZE (EF_SIZE - HWEF_SIZE) | ||
45 | |||
46 | /* | ||
47 | * Map register number into core file offset. | ||
48 | */ | ||
49 | #define CORE_REG(reg, ubase) \ | ||
50 | (((unsigned long *)((unsigned long)(ubase)))[reg]) | ||
51 | |||
52 | #endif /* __reg_h__ */ | ||
diff --git a/arch/alpha/include/asm/regdef.h b/arch/alpha/include/asm/regdef.h new file mode 100644 index 000000000000..142df9c4f8b8 --- /dev/null +++ b/arch/alpha/include/asm/regdef.h | |||
@@ -0,0 +1,44 @@ | |||
1 | #ifndef __alpha_regdef_h__ | ||
2 | #define __alpha_regdef_h__ | ||
3 | |||
4 | #define v0 $0 /* function return value */ | ||
5 | |||
6 | #define t0 $1 /* temporary registers (caller-saved) */ | ||
7 | #define t1 $2 | ||
8 | #define t2 $3 | ||
9 | #define t3 $4 | ||
10 | #define t4 $5 | ||
11 | #define t5 $6 | ||
12 | #define t6 $7 | ||
13 | #define t7 $8 | ||
14 | |||
15 | #define s0 $9 /* saved-registers (callee-saved registers) */ | ||
16 | #define s1 $10 | ||
17 | #define s2 $11 | ||
18 | #define s3 $12 | ||
19 | #define s4 $13 | ||
20 | #define s5 $14 | ||
21 | #define s6 $15 | ||
22 | #define fp s6 /* frame-pointer (s6 in frame-less procedures) */ | ||
23 | |||
24 | #define a0 $16 /* argument registers (caller-saved) */ | ||
25 | #define a1 $17 | ||
26 | #define a2 $18 | ||
27 | #define a3 $19 | ||
28 | #define a4 $20 | ||
29 | #define a5 $21 | ||
30 | |||
31 | #define t8 $22 /* more temps (caller-saved) */ | ||
32 | #define t9 $23 | ||
33 | #define t10 $24 | ||
34 | #define t11 $25 | ||
35 | #define ra $26 /* return address register */ | ||
36 | #define t12 $27 | ||
37 | |||
38 | #define pv t12 /* procedure-variable register */ | ||
39 | #define AT $at /* assembler temporary */ | ||
40 | #define gp $29 /* global pointer */ | ||
41 | #define sp $30 /* stack pointer */ | ||
42 | #define zero $31 /* reads as zero, writes are noops */ | ||
43 | |||
44 | #endif /* __alpha_regdef_h__ */ | ||
diff --git a/arch/alpha/include/asm/resource.h b/arch/alpha/include/asm/resource.h new file mode 100644 index 000000000000..c10874ff5973 --- /dev/null +++ b/arch/alpha/include/asm/resource.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef _ALPHA_RESOURCE_H | ||
2 | #define _ALPHA_RESOURCE_H | ||
3 | |||
4 | /* | ||
5 | * Alpha/Linux-specific ordering of these four resource limit IDs, | ||
6 | * the rest comes from the generic header: | ||
7 | */ | ||
8 | #define RLIMIT_NOFILE 6 /* max number of open files */ | ||
9 | #define RLIMIT_AS 7 /* address space limit */ | ||
10 | #define RLIMIT_NPROC 8 /* max number of processes */ | ||
11 | #define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */ | ||
12 | |||
13 | /* | ||
14 | * SuS says limits have to be unsigned. Fine, it's unsigned, but | ||
15 | * we retain the old value for compatibility, especially with DU. | ||
16 | * When you run into the 2^63 barrier, you call me. | ||
17 | */ | ||
18 | #define RLIM_INFINITY 0x7ffffffffffffffful | ||
19 | |||
20 | #include <asm-generic/resource.h> | ||
21 | |||
22 | #endif /* _ALPHA_RESOURCE_H */ | ||
diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h new file mode 100644 index 000000000000..4e854b1333eb --- /dev/null +++ b/arch/alpha/include/asm/rtc.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef _ALPHA_RTC_H | ||
2 | #define _ALPHA_RTC_H | ||
3 | |||
4 | /* | ||
5 | * Alpha uses the default access methods for the RTC. | ||
6 | */ | ||
7 | |||
8 | #include <asm-generic/rtc.h> | ||
9 | |||
10 | #endif | ||
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h new file mode 100644 index 000000000000..1570c0b54336 --- /dev/null +++ b/arch/alpha/include/asm/rwsem.h | |||
@@ -0,0 +1,259 @@ | |||
1 | #ifndef _ALPHA_RWSEM_H | ||
2 | #define _ALPHA_RWSEM_H | ||
3 | |||
4 | /* | ||
5 | * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001. | ||
6 | * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h | ||
7 | */ | ||
8 | |||
9 | #ifndef _LINUX_RWSEM_H | ||
10 | #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" | ||
11 | #endif | ||
12 | |||
13 | #ifdef __KERNEL__ | ||
14 | |||
15 | #include <linux/compiler.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | |||
19 | struct rwsem_waiter; | ||
20 | |||
21 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
22 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
23 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); | ||
24 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
25 | |||
26 | /* | ||
27 | * the semaphore definition | ||
28 | */ | ||
29 | struct rw_semaphore { | ||
30 | long count; | ||
31 | #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L | ||
32 | #define RWSEM_ACTIVE_BIAS 0x0000000000000001L | ||
33 | #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL | ||
34 | #define RWSEM_WAITING_BIAS (-0x0000000100000000L) | ||
35 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | ||
36 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | ||
37 | spinlock_t wait_lock; | ||
38 | struct list_head wait_list; | ||
39 | }; | ||
40 | |||
41 | #define __RWSEM_INITIALIZER(name) \ | ||
42 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | ||
43 | LIST_HEAD_INIT((name).wait_list) } | ||
44 | |||
45 | #define DECLARE_RWSEM(name) \ | ||
46 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
47 | |||
48 | static inline void init_rwsem(struct rw_semaphore *sem) | ||
49 | { | ||
50 | sem->count = RWSEM_UNLOCKED_VALUE; | ||
51 | spin_lock_init(&sem->wait_lock); | ||
52 | INIT_LIST_HEAD(&sem->wait_list); | ||
53 | } | ||
54 | |||
55 | static inline void __down_read(struct rw_semaphore *sem) | ||
56 | { | ||
57 | long oldcount; | ||
58 | #ifndef CONFIG_SMP | ||
59 | oldcount = sem->count; | ||
60 | sem->count += RWSEM_ACTIVE_READ_BIAS; | ||
61 | #else | ||
62 | long temp; | ||
63 | __asm__ __volatile__( | ||
64 | "1: ldq_l %0,%1\n" | ||
65 | " addq %0,%3,%2\n" | ||
66 | " stq_c %2,%1\n" | ||
67 | " beq %2,2f\n" | ||
68 | " mb\n" | ||
69 | ".subsection 2\n" | ||
70 | "2: br 1b\n" | ||
71 | ".previous" | ||
72 | :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) | ||
73 | :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); | ||
74 | #endif | ||
75 | if (unlikely(oldcount < 0)) | ||
76 | rwsem_down_read_failed(sem); | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * trylock for reading -- returns 1 if successful, 0 if contention | ||
81 | */ | ||
82 | static inline int __down_read_trylock(struct rw_semaphore *sem) | ||
83 | { | ||
84 | long old, new, res; | ||
85 | |||
86 | res = sem->count; | ||
87 | do { | ||
88 | new = res + RWSEM_ACTIVE_READ_BIAS; | ||
89 | if (new <= 0) | ||
90 | break; | ||
91 | old = res; | ||
92 | res = cmpxchg(&sem->count, old, new); | ||
93 | } while (res != old); | ||
94 | return res >= 0 ? 1 : 0; | ||
95 | } | ||
96 | |||
97 | static inline void __down_write(struct rw_semaphore *sem) | ||
98 | { | ||
99 | long oldcount; | ||
100 | #ifndef CONFIG_SMP | ||
101 | oldcount = sem->count; | ||
102 | sem->count += RWSEM_ACTIVE_WRITE_BIAS; | ||
103 | #else | ||
104 | long temp; | ||
105 | __asm__ __volatile__( | ||
106 | "1: ldq_l %0,%1\n" | ||
107 | " addq %0,%3,%2\n" | ||
108 | " stq_c %2,%1\n" | ||
109 | " beq %2,2f\n" | ||
110 | " mb\n" | ||
111 | ".subsection 2\n" | ||
112 | "2: br 1b\n" | ||
113 | ".previous" | ||
114 | :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) | ||
115 | :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); | ||
116 | #endif | ||
117 | if (unlikely(oldcount)) | ||
118 | rwsem_down_write_failed(sem); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * trylock for writing -- returns 1 if successful, 0 if contention | ||
123 | */ | ||
124 | static inline int __down_write_trylock(struct rw_semaphore *sem) | ||
125 | { | ||
126 | long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE, | ||
127 | RWSEM_ACTIVE_WRITE_BIAS); | ||
128 | if (ret == RWSEM_UNLOCKED_VALUE) | ||
129 | return 1; | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static inline void __up_read(struct rw_semaphore *sem) | ||
134 | { | ||
135 | long oldcount; | ||
136 | #ifndef CONFIG_SMP | ||
137 | oldcount = sem->count; | ||
138 | sem->count -= RWSEM_ACTIVE_READ_BIAS; | ||
139 | #else | ||
140 | long temp; | ||
141 | __asm__ __volatile__( | ||
142 | " mb\n" | ||
143 | "1: ldq_l %0,%1\n" | ||
144 | " subq %0,%3,%2\n" | ||
145 | " stq_c %2,%1\n" | ||
146 | " beq %2,2f\n" | ||
147 | ".subsection 2\n" | ||
148 | "2: br 1b\n" | ||
149 | ".previous" | ||
150 | :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) | ||
151 | :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory"); | ||
152 | #endif | ||
153 | if (unlikely(oldcount < 0)) | ||
154 | if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0) | ||
155 | rwsem_wake(sem); | ||
156 | } | ||
157 | |||
158 | static inline void __up_write(struct rw_semaphore *sem) | ||
159 | { | ||
160 | long count; | ||
161 | #ifndef CONFIG_SMP | ||
162 | sem->count -= RWSEM_ACTIVE_WRITE_BIAS; | ||
163 | count = sem->count; | ||
164 | #else | ||
165 | long temp; | ||
166 | __asm__ __volatile__( | ||
167 | " mb\n" | ||
168 | "1: ldq_l %0,%1\n" | ||
169 | " subq %0,%3,%2\n" | ||
170 | " stq_c %2,%1\n" | ||
171 | " beq %2,2f\n" | ||
172 | " subq %0,%3,%0\n" | ||
173 | ".subsection 2\n" | ||
174 | "2: br 1b\n" | ||
175 | ".previous" | ||
176 | :"=&r" (count), "=m" (sem->count), "=&r" (temp) | ||
177 | :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory"); | ||
178 | #endif | ||
179 | if (unlikely(count)) | ||
180 | if ((int)count == 0) | ||
181 | rwsem_wake(sem); | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * downgrade write lock to read lock | ||
186 | */ | ||
187 | static inline void __downgrade_write(struct rw_semaphore *sem) | ||
188 | { | ||
189 | long oldcount; | ||
190 | #ifndef CONFIG_SMP | ||
191 | oldcount = sem->count; | ||
192 | sem->count -= RWSEM_WAITING_BIAS; | ||
193 | #else | ||
194 | long temp; | ||
195 | __asm__ __volatile__( | ||
196 | "1: ldq_l %0,%1\n" | ||
197 | " addq %0,%3,%2\n" | ||
198 | " stq_c %2,%1\n" | ||
199 | " beq %2,2f\n" | ||
200 | " mb\n" | ||
201 | ".subsection 2\n" | ||
202 | "2: br 1b\n" | ||
203 | ".previous" | ||
204 | :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp) | ||
205 | :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory"); | ||
206 | #endif | ||
207 | if (unlikely(oldcount < 0)) | ||
208 | rwsem_downgrade_wake(sem); | ||
209 | } | ||
210 | |||
211 | static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem) | ||
212 | { | ||
213 | #ifndef CONFIG_SMP | ||
214 | sem->count += val; | ||
215 | #else | ||
216 | long temp; | ||
217 | __asm__ __volatile__( | ||
218 | "1: ldq_l %0,%1\n" | ||
219 | " addq %0,%2,%0\n" | ||
220 | " stq_c %0,%1\n" | ||
221 | " beq %0,2f\n" | ||
222 | ".subsection 2\n" | ||
223 | "2: br 1b\n" | ||
224 | ".previous" | ||
225 | :"=&r" (temp), "=m" (sem->count) | ||
226 | :"Ir" (val), "m" (sem->count)); | ||
227 | #endif | ||
228 | } | ||
229 | |||
230 | static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem) | ||
231 | { | ||
232 | #ifndef CONFIG_SMP | ||
233 | sem->count += val; | ||
234 | return sem->count; | ||
235 | #else | ||
236 | long ret, temp; | ||
237 | __asm__ __volatile__( | ||
238 | "1: ldq_l %0,%1\n" | ||
239 | " addq %0,%3,%2\n" | ||
240 | " addq %0,%3,%0\n" | ||
241 | " stq_c %2,%1\n" | ||
242 | " beq %2,2f\n" | ||
243 | ".subsection 2\n" | ||
244 | "2: br 1b\n" | ||
245 | ".previous" | ||
246 | :"=&r" (ret), "=m" (sem->count), "=&r" (temp) | ||
247 | :"Ir" (val), "m" (sem->count)); | ||
248 | |||
249 | return ret; | ||
250 | #endif | ||
251 | } | ||
252 | |||
253 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
254 | { | ||
255 | return (sem->count != 0); | ||
256 | } | ||
257 | |||
258 | #endif /* __KERNEL__ */ | ||
259 | #endif /* _ALPHA_RWSEM_H */ | ||
diff --git a/arch/alpha/include/asm/scatterlist.h b/arch/alpha/include/asm/scatterlist.h new file mode 100644 index 000000000000..440747ca6349 --- /dev/null +++ b/arch/alpha/include/asm/scatterlist.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef _ALPHA_SCATTERLIST_H | ||
2 | #define _ALPHA_SCATTERLIST_H | ||
3 | |||
4 | #include <asm/page.h> | ||
5 | #include <asm/types.h> | ||
6 | |||
7 | struct scatterlist { | ||
8 | #ifdef CONFIG_DEBUG_SG | ||
9 | unsigned long sg_magic; | ||
10 | #endif | ||
11 | unsigned long page_link; | ||
12 | unsigned int offset; | ||
13 | |||
14 | unsigned int length; | ||
15 | |||
16 | dma_addr_t dma_address; | ||
17 | __u32 dma_length; | ||
18 | }; | ||
19 | |||
20 | #define sg_dma_address(sg) ((sg)->dma_address) | ||
21 | #define sg_dma_len(sg) ((sg)->dma_length) | ||
22 | |||
23 | #define ISA_DMA_THRESHOLD (~0UL) | ||
24 | |||
25 | #endif /* !(_ALPHA_SCATTERLIST_H) */ | ||
diff --git a/arch/alpha/include/asm/sections.h b/arch/alpha/include/asm/sections.h new file mode 100644 index 000000000000..43b40edd6e44 --- /dev/null +++ b/arch/alpha/include/asm/sections.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef _ALPHA_SECTIONS_H | ||
2 | #define _ALPHA_SECTIONS_H | ||
3 | |||
4 | /* nothing to see, move along */ | ||
5 | #include <asm-generic/sections.h> | ||
6 | |||
7 | #endif | ||
diff --git a/arch/alpha/include/asm/segment.h b/arch/alpha/include/asm/segment.h new file mode 100644 index 000000000000..0453d97daae7 --- /dev/null +++ b/arch/alpha/include/asm/segment.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ALPHA_SEGMENT_H | ||
2 | #define __ALPHA_SEGMENT_H | ||
3 | |||
4 | /* Only here because we have some old header files that expect it.. */ | ||
5 | |||
6 | #endif | ||
diff --git a/arch/alpha/include/asm/sembuf.h b/arch/alpha/include/asm/sembuf.h new file mode 100644 index 000000000000..7b38b1534784 --- /dev/null +++ b/arch/alpha/include/asm/sembuf.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef _ALPHA_SEMBUF_H | ||
2 | #define _ALPHA_SEMBUF_H | ||
3 | |||
4 | /* | ||
5 | * The semid64_ds structure for alpha architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 2 miscellaneous 64-bit values | ||
11 | */ | ||
12 | |||
13 | struct semid64_ds { | ||
14 | struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ | ||
15 | __kernel_time_t sem_otime; /* last semop time */ | ||
16 | __kernel_time_t sem_ctime; /* last change time */ | ||
17 | unsigned long sem_nsems; /* no. of semaphores in array */ | ||
18 | unsigned long __unused1; | ||
19 | unsigned long __unused2; | ||
20 | }; | ||
21 | |||
22 | #endif /* _ALPHA_SEMBUF_H */ | ||
diff --git a/arch/alpha/include/asm/serial.h b/arch/alpha/include/asm/serial.h new file mode 100644 index 000000000000..9d263e8d8ccc --- /dev/null +++ b/arch/alpha/include/asm/serial.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * include/asm-alpha/serial.h | ||
3 | */ | ||
4 | |||
5 | |||
6 | /* | ||
7 | * This assumes you have a 1.8432 MHz clock for your UART. | ||
8 | * | ||
9 | * It'd be nice if someone built a serial card with a 24.576 MHz | ||
10 | * clock, since the 16550A is capable of handling a top speed of 1.5 | ||
11 | * megabits/second; but this requires the faster clock. | ||
12 | */ | ||
13 | #define BASE_BAUD ( 1843200 / 16 ) | ||
14 | |||
15 | /* Standard COM flags (except for COM4, because of the 8514 problem) */ | ||
16 | #ifdef CONFIG_SERIAL_DETECT_IRQ | ||
17 | #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) | ||
18 | #define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) | ||
19 | #else | ||
20 | #define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) | ||
21 | #define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF | ||
22 | #endif | ||
23 | |||
24 | #define SERIAL_PORT_DFNS \ | ||
25 | /* UART CLK PORT IRQ FLAGS */ \ | ||
26 | { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ | ||
27 | { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ | ||
28 | { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ | ||
29 | { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ | ||
diff --git a/arch/alpha/include/asm/setup.h b/arch/alpha/include/asm/setup.h new file mode 100644 index 000000000000..2e023a4aa317 --- /dev/null +++ b/arch/alpha/include/asm/setup.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ALPHA_SETUP_H | ||
2 | #define __ALPHA_SETUP_H | ||
3 | |||
4 | #define COMMAND_LINE_SIZE 256 | ||
5 | |||
6 | #endif | ||
diff --git a/arch/alpha/include/asm/sfp-machine.h b/arch/alpha/include/asm/sfp-machine.h new file mode 100644 index 000000000000..5fe63afbd474 --- /dev/null +++ b/arch/alpha/include/asm/sfp-machine.h | |||
@@ -0,0 +1,82 @@ | |||
1 | /* Machine-dependent software floating-point definitions. | ||
2 | Alpha kernel version. | ||
3 | Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. | ||
4 | This file is part of the GNU C Library. | ||
5 | Contributed by Richard Henderson (rth@cygnus.com), | ||
6 | Jakub Jelinek (jakub@redhat.com) and | ||
7 | David S. Miller (davem@redhat.com). | ||
8 | |||
9 | The GNU C Library is free software; you can redistribute it and/or | ||
10 | modify it under the terms of the GNU Library General Public License as | ||
11 | published by the Free Software Foundation; either version 2 of the | ||
12 | License, or (at your option) any later version. | ||
13 | |||
14 | The GNU C Library is distributed in the hope that it will be useful, | ||
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | Library General Public License for more details. | ||
18 | |||
19 | You should have received a copy of the GNU Library General Public | ||
20 | License along with the GNU C Library; see the file COPYING.LIB. If | ||
21 | not, write to the Free Software Foundation, Inc., | ||
22 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ | ||
23 | |||
24 | #ifndef _SFP_MACHINE_H | ||
25 | #define _SFP_MACHINE_H | ||
26 | |||
27 | #define _FP_W_TYPE_SIZE 64 | ||
28 | #define _FP_W_TYPE unsigned long | ||
29 | #define _FP_WS_TYPE signed long | ||
30 | #define _FP_I_TYPE long | ||
31 | |||
32 | #define _FP_MUL_MEAT_S(R,X,Y) \ | ||
33 | _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y) | ||
34 | #define _FP_MUL_MEAT_D(R,X,Y) \ | ||
35 | _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) | ||
36 | #define _FP_MUL_MEAT_Q(R,X,Y) \ | ||
37 | _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) | ||
38 | |||
39 | #define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm) | ||
40 | #define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv(D,R,X,Y) | ||
41 | #define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y) | ||
42 | |||
43 | #define _FP_NANFRAC_S _FP_QNANBIT_S | ||
44 | #define _FP_NANFRAC_D _FP_QNANBIT_D | ||
45 | #define _FP_NANFRAC_Q _FP_QNANBIT_Q | ||
46 | #define _FP_NANSIGN_S 1 | ||
47 | #define _FP_NANSIGN_D 1 | ||
48 | #define _FP_NANSIGN_Q 1 | ||
49 | |||
50 | #define _FP_KEEPNANFRACP 1 | ||
51 | |||
52 | /* Alpha Architecture Handbook, 4.7.10.4 sais that | ||
53 | * we should prefer any type of NaN in Fb, then Fa. | ||
54 | */ | ||
55 | #define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ | ||
56 | do { \ | ||
57 | R##_s = Y##_s; \ | ||
58 | _FP_FRAC_COPY_##wc(R,X); \ | ||
59 | R##_c = FP_CLS_NAN; \ | ||
60 | } while (0) | ||
61 | |||
62 | /* Obtain the current rounding mode. */ | ||
63 | #define FP_ROUNDMODE mode | ||
64 | #define FP_RND_NEAREST (FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT) | ||
65 | #define FP_RND_ZERO (FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT) | ||
66 | #define FP_RND_PINF (FPCR_DYN_PLUS >> FPCR_DYN_SHIFT) | ||
67 | #define FP_RND_MINF (FPCR_DYN_MINUS >> FPCR_DYN_SHIFT) | ||
68 | |||
69 | /* Exception flags. */ | ||
70 | #define FP_EX_INVALID IEEE_TRAP_ENABLE_INV | ||
71 | #define FP_EX_OVERFLOW IEEE_TRAP_ENABLE_OVF | ||
72 | #define FP_EX_UNDERFLOW IEEE_TRAP_ENABLE_UNF | ||
73 | #define FP_EX_DIVZERO IEEE_TRAP_ENABLE_DZE | ||
74 | #define FP_EX_INEXACT IEEE_TRAP_ENABLE_INE | ||
75 | #define FP_EX_DENORM IEEE_TRAP_ENABLE_DNO | ||
76 | |||
77 | #define FP_DENORM_ZERO (swcr & IEEE_MAP_DMZ) | ||
78 | |||
79 | /* We write the results always */ | ||
80 | #define FP_INHIBIT_RESULTS 0 | ||
81 | |||
82 | #endif | ||
diff --git a/arch/alpha/include/asm/shmbuf.h b/arch/alpha/include/asm/shmbuf.h new file mode 100644 index 000000000000..37ee84f05085 --- /dev/null +++ b/arch/alpha/include/asm/shmbuf.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef _ALPHA_SHMBUF_H | ||
2 | #define _ALPHA_SHMBUF_H | ||
3 | |||
4 | /* | ||
5 | * The shmid64_ds structure for alpha architecture. | ||
6 | * Note extra padding because this structure is passed back and forth | ||
7 | * between kernel and user space. | ||
8 | * | ||
9 | * Pad space is left for: | ||
10 | * - 2 miscellaneous 64-bit values | ||
11 | */ | ||
12 | |||
13 | struct shmid64_ds { | ||
14 | struct ipc64_perm shm_perm; /* operation perms */ | ||
15 | size_t shm_segsz; /* size of segment (bytes) */ | ||
16 | __kernel_time_t shm_atime; /* last attach time */ | ||
17 | __kernel_time_t shm_dtime; /* last detach time */ | ||
18 | __kernel_time_t shm_ctime; /* last change time */ | ||
19 | __kernel_pid_t shm_cpid; /* pid of creator */ | ||
20 | __kernel_pid_t shm_lpid; /* pid of last operator */ | ||
21 | unsigned long shm_nattch; /* no. of current attaches */ | ||
22 | unsigned long __unused1; | ||
23 | unsigned long __unused2; | ||
24 | }; | ||
25 | |||
26 | struct shminfo64 { | ||
27 | unsigned long shmmax; | ||
28 | unsigned long shmmin; | ||
29 | unsigned long shmmni; | ||
30 | unsigned long shmseg; | ||
31 | unsigned long shmall; | ||
32 | unsigned long __unused1; | ||
33 | unsigned long __unused2; | ||
34 | unsigned long __unused3; | ||
35 | unsigned long __unused4; | ||
36 | }; | ||
37 | |||
38 | #endif /* _ALPHA_SHMBUF_H */ | ||
diff --git a/arch/alpha/include/asm/shmparam.h b/arch/alpha/include/asm/shmparam.h new file mode 100644 index 000000000000..cc901d58aebb --- /dev/null +++ b/arch/alpha/include/asm/shmparam.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASMAXP_SHMPARAM_H | ||
2 | #define _ASMAXP_SHMPARAM_H | ||
3 | |||
4 | #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ | ||
5 | |||
6 | #endif /* _ASMAXP_SHMPARAM_H */ | ||
diff --git a/arch/alpha/include/asm/sigcontext.h b/arch/alpha/include/asm/sigcontext.h new file mode 100644 index 000000000000..323cdb026198 --- /dev/null +++ b/arch/alpha/include/asm/sigcontext.h | |||
@@ -0,0 +1,34 @@ | |||
1 | #ifndef _ASMAXP_SIGCONTEXT_H | ||
2 | #define _ASMAXP_SIGCONTEXT_H | ||
3 | |||
4 | struct sigcontext { | ||
5 | /* | ||
6 | * What should we have here? I'd probably better use the same | ||
7 | * stack layout as OSF/1, just in case we ever want to try | ||
8 | * running their binaries.. | ||
9 | * | ||
10 | * This is the basic layout, but I don't know if we'll ever | ||
11 | * actually fill in all the values.. | ||
12 | */ | ||
13 | long sc_onstack; | ||
14 | long sc_mask; | ||
15 | long sc_pc; | ||
16 | long sc_ps; | ||
17 | long sc_regs[32]; | ||
18 | long sc_ownedfp; | ||
19 | long sc_fpregs[32]; | ||
20 | unsigned long sc_fpcr; | ||
21 | unsigned long sc_fp_control; | ||
22 | unsigned long sc_reserved1, sc_reserved2; | ||
23 | unsigned long sc_ssize; | ||
24 | char * sc_sbase; | ||
25 | unsigned long sc_traparg_a0; | ||
26 | unsigned long sc_traparg_a1; | ||
27 | unsigned long sc_traparg_a2; | ||
28 | unsigned long sc_fp_trap_pc; | ||
29 | unsigned long sc_fp_trigger_sum; | ||
30 | unsigned long sc_fp_trigger_inst; | ||
31 | }; | ||
32 | |||
33 | |||
34 | #endif | ||
diff --git a/arch/alpha/include/asm/siginfo.h b/arch/alpha/include/asm/siginfo.h new file mode 100644 index 000000000000..9822362a8424 --- /dev/null +++ b/arch/alpha/include/asm/siginfo.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _ALPHA_SIGINFO_H | ||
2 | #define _ALPHA_SIGINFO_H | ||
3 | |||
4 | #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) | ||
5 | #define __ARCH_SI_TRAPNO | ||
6 | |||
7 | #include <asm-generic/siginfo.h> | ||
8 | |||
9 | #endif | ||
diff --git a/arch/alpha/include/asm/signal.h b/arch/alpha/include/asm/signal.h new file mode 100644 index 000000000000..13c2305d35ef --- /dev/null +++ b/arch/alpha/include/asm/signal.h | |||
@@ -0,0 +1,172 @@ | |||
1 | #ifndef _ASMAXP_SIGNAL_H | ||
2 | #define _ASMAXP_SIGNAL_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* Avoid too many header ordering problems. */ | ||
7 | struct siginfo; | ||
8 | |||
9 | #ifdef __KERNEL__ | ||
10 | /* Digital Unix defines 64 signals. Most things should be clean enough | ||
11 | to redefine this at will, if care is taken to make libc match. */ | ||
12 | |||
13 | #define _NSIG 64 | ||
14 | #define _NSIG_BPW 64 | ||
15 | #define _NSIG_WORDS (_NSIG / _NSIG_BPW) | ||
16 | |||
17 | typedef unsigned long old_sigset_t; /* at least 32 bits */ | ||
18 | |||
19 | typedef struct { | ||
20 | unsigned long sig[_NSIG_WORDS]; | ||
21 | } sigset_t; | ||
22 | |||
23 | #else | ||
24 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
25 | |||
26 | #define NSIG 32 | ||
27 | typedef unsigned long sigset_t; | ||
28 | |||
29 | #endif /* __KERNEL__ */ | ||
30 | |||
31 | |||
32 | /* | ||
33 | * Linux/AXP has different signal numbers that Linux/i386: I'm trying | ||
34 | * to make it OSF/1 binary compatible, at least for normal binaries. | ||
35 | */ | ||
36 | #define SIGHUP 1 | ||
37 | #define SIGINT 2 | ||
38 | #define SIGQUIT 3 | ||
39 | #define SIGILL 4 | ||
40 | #define SIGTRAP 5 | ||
41 | #define SIGABRT 6 | ||
42 | #define SIGEMT 7 | ||
43 | #define SIGFPE 8 | ||
44 | #define SIGKILL 9 | ||
45 | #define SIGBUS 10 | ||
46 | #define SIGSEGV 11 | ||
47 | #define SIGSYS 12 | ||
48 | #define SIGPIPE 13 | ||
49 | #define SIGALRM 14 | ||
50 | #define SIGTERM 15 | ||
51 | #define SIGURG 16 | ||
52 | #define SIGSTOP 17 | ||
53 | #define SIGTSTP 18 | ||
54 | #define SIGCONT 19 | ||
55 | #define SIGCHLD 20 | ||
56 | #define SIGTTIN 21 | ||
57 | #define SIGTTOU 22 | ||
58 | #define SIGIO 23 | ||
59 | #define SIGXCPU 24 | ||
60 | #define SIGXFSZ 25 | ||
61 | #define SIGVTALRM 26 | ||
62 | #define SIGPROF 27 | ||
63 | #define SIGWINCH 28 | ||
64 | #define SIGINFO 29 | ||
65 | #define SIGUSR1 30 | ||
66 | #define SIGUSR2 31 | ||
67 | |||
68 | #define SIGPOLL SIGIO | ||
69 | #define SIGPWR SIGINFO | ||
70 | #define SIGIOT SIGABRT | ||
71 | |||
72 | /* These should not be considered constants from userland. */ | ||
73 | #define SIGRTMIN 32 | ||
74 | #define SIGRTMAX _NSIG | ||
75 | |||
76 | /* | ||
77 | * SA_FLAGS values: | ||
78 | * | ||
79 | * SA_ONSTACK indicates that a registered stack_t will be used. | ||
80 | * SA_RESTART flag to get restarting signals (which were the default long ago) | ||
81 | * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. | ||
82 | * SA_RESETHAND clears the handler when the signal is delivered. | ||
83 | * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. | ||
84 | * SA_NODEFER prevents the current signal from being masked in the handler. | ||
85 | * | ||
86 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single | ||
87 | * Unix names RESETHAND and NODEFER respectively. | ||
88 | */ | ||
89 | |||
90 | #define SA_ONSTACK 0x00000001 | ||
91 | #define SA_RESTART 0x00000002 | ||
92 | #define SA_NOCLDSTOP 0x00000004 | ||
93 | #define SA_NODEFER 0x00000008 | ||
94 | #define SA_RESETHAND 0x00000010 | ||
95 | #define SA_NOCLDWAIT 0x00000020 | ||
96 | #define SA_SIGINFO 0x00000040 | ||
97 | |||
98 | #define SA_ONESHOT SA_RESETHAND | ||
99 | #define SA_NOMASK SA_NODEFER | ||
100 | |||
101 | /* | ||
102 | * sigaltstack controls | ||
103 | */ | ||
104 | #define SS_ONSTACK 1 | ||
105 | #define SS_DISABLE 2 | ||
106 | |||
107 | #define MINSIGSTKSZ 4096 | ||
108 | #define SIGSTKSZ 16384 | ||
109 | |||
110 | #define SIG_BLOCK 1 /* for blocking signals */ | ||
111 | #define SIG_UNBLOCK 2 /* for unblocking signals */ | ||
112 | #define SIG_SETMASK 3 /* for setting the signal mask */ | ||
113 | |||
114 | #include <asm-generic/signal.h> | ||
115 | |||
116 | #ifdef __KERNEL__ | ||
117 | struct osf_sigaction { | ||
118 | __sighandler_t sa_handler; | ||
119 | old_sigset_t sa_mask; | ||
120 | int sa_flags; | ||
121 | }; | ||
122 | |||
123 | struct sigaction { | ||
124 | __sighandler_t sa_handler; | ||
125 | unsigned long sa_flags; | ||
126 | sigset_t sa_mask; /* mask last for extensibility */ | ||
127 | }; | ||
128 | |||
129 | struct k_sigaction { | ||
130 | struct sigaction sa; | ||
131 | __sigrestore_t ka_restorer; | ||
132 | }; | ||
133 | #else | ||
134 | /* Here we must cater to libcs that poke about in kernel headers. */ | ||
135 | |||
136 | struct sigaction { | ||
137 | union { | ||
138 | __sighandler_t _sa_handler; | ||
139 | void (*_sa_sigaction)(int, struct siginfo *, void *); | ||
140 | } _u; | ||
141 | sigset_t sa_mask; | ||
142 | int sa_flags; | ||
143 | }; | ||
144 | |||
145 | #define sa_handler _u._sa_handler | ||
146 | #define sa_sigaction _u._sa_sigaction | ||
147 | |||
148 | #endif /* __KERNEL__ */ | ||
149 | |||
150 | typedef struct sigaltstack { | ||
151 | void __user *ss_sp; | ||
152 | int ss_flags; | ||
153 | size_t ss_size; | ||
154 | } stack_t; | ||
155 | |||
156 | /* sigstack(2) is deprecated, and will be withdrawn in a future version | ||
157 | of the X/Open CAE Specification. Use sigaltstack instead. It is only | ||
158 | implemented here for OSF/1 compatibility. */ | ||
159 | |||
160 | struct sigstack { | ||
161 | void __user *ss_sp; | ||
162 | int ss_onstack; | ||
163 | }; | ||
164 | |||
165 | #ifdef __KERNEL__ | ||
166 | #include <asm/sigcontext.h> | ||
167 | |||
168 | #define ptrace_signal_deliver(regs, cookie) do { } while (0) | ||
169 | |||
170 | #endif | ||
171 | |||
172 | #endif | ||
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h new file mode 100644 index 000000000000..544c69af8168 --- /dev/null +++ b/arch/alpha/include/asm/smp.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef __ASM_SMP_H | ||
2 | #define __ASM_SMP_H | ||
3 | |||
4 | #include <linux/threads.h> | ||
5 | #include <linux/cpumask.h> | ||
6 | #include <linux/bitops.h> | ||
7 | #include <asm/pal.h> | ||
8 | |||
9 | /* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */ | ||
10 | |||
11 | static __inline__ unsigned char | ||
12 | __hard_smp_processor_id(void) | ||
13 | { | ||
14 | register unsigned char __r0 __asm__("$0"); | ||
15 | __asm__ __volatile__( | ||
16 | "call_pal %1 #whami" | ||
17 | : "=r"(__r0) | ||
18 | :"i" (PAL_whami) | ||
19 | : "$1", "$22", "$23", "$24", "$25"); | ||
20 | return __r0; | ||
21 | } | ||
22 | |||
23 | #ifdef CONFIG_SMP | ||
24 | |||
25 | #include <asm/irq.h> | ||
26 | |||
27 | struct cpuinfo_alpha { | ||
28 | unsigned long loops_per_jiffy; | ||
29 | unsigned long last_asn; | ||
30 | int need_new_asn; | ||
31 | int asn_lock; | ||
32 | unsigned long ipi_count; | ||
33 | unsigned long prof_multiplier; | ||
34 | unsigned long prof_counter; | ||
35 | unsigned char mcheck_expected; | ||
36 | unsigned char mcheck_taken; | ||
37 | unsigned char mcheck_extra; | ||
38 | } __attribute__((aligned(64))); | ||
39 | |||
40 | extern struct cpuinfo_alpha cpu_data[NR_CPUS]; | ||
41 | |||
42 | #define PROC_CHANGE_PENALTY 20 | ||
43 | |||
44 | #define hard_smp_processor_id() __hard_smp_processor_id() | ||
45 | #define raw_smp_processor_id() (current_thread_info()->cpu) | ||
46 | |||
47 | extern int smp_num_cpus; | ||
48 | #define cpu_possible_map cpu_present_map | ||
49 | |||
50 | extern void arch_send_call_function_single_ipi(int cpu); | ||
51 | extern void arch_send_call_function_ipi(cpumask_t mask); | ||
52 | |||
53 | #else /* CONFIG_SMP */ | ||
54 | |||
55 | #define hard_smp_processor_id() 0 | ||
56 | #define smp_call_function_on_cpu(func,info,wait,cpu) ({ 0; }) | ||
57 | |||
58 | #endif /* CONFIG_SMP */ | ||
59 | |||
60 | #define NO_PROC_ID (-1) | ||
61 | |||
62 | #endif | ||
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h new file mode 100644 index 000000000000..a1057c2d95e7 --- /dev/null +++ b/arch/alpha/include/asm/socket.h | |||
@@ -0,0 +1,70 @@ | |||
1 | #ifndef _ASM_SOCKET_H | ||
2 | #define _ASM_SOCKET_H | ||
3 | |||
4 | #include <asm/sockios.h> | ||
5 | |||
6 | /* For setsockopt(2) */ | ||
7 | /* | ||
8 | * Note: we only bother about making the SOL_SOCKET options | ||
9 | * same as OSF/1, as that's all that "normal" programs are | ||
10 | * likely to set. We don't necessarily want to be binary | ||
11 | * compatible with _everything_. | ||
12 | */ | ||
13 | #define SOL_SOCKET 0xffff | ||
14 | |||
15 | #define SO_DEBUG 0x0001 | ||
16 | #define SO_REUSEADDR 0x0004 | ||
17 | #define SO_KEEPALIVE 0x0008 | ||
18 | #define SO_DONTROUTE 0x0010 | ||
19 | #define SO_BROADCAST 0x0020 | ||
20 | #define SO_LINGER 0x0080 | ||
21 | #define SO_OOBINLINE 0x0100 | ||
22 | /* To add :#define SO_REUSEPORT 0x0200 */ | ||
23 | |||
24 | #define SO_TYPE 0x1008 | ||
25 | #define SO_ERROR 0x1007 | ||
26 | #define SO_SNDBUF 0x1001 | ||
27 | #define SO_RCVBUF 0x1002 | ||
28 | #define SO_SNDBUFFORCE 0x100a | ||
29 | #define SO_RCVBUFFORCE 0x100b | ||
30 | #define SO_RCVLOWAT 0x1010 | ||
31 | #define SO_SNDLOWAT 0x1011 | ||
32 | #define SO_RCVTIMEO 0x1012 | ||
33 | #define SO_SNDTIMEO 0x1013 | ||
34 | #define SO_ACCEPTCONN 0x1014 | ||
35 | |||
36 | /* linux-specific, might as well be the same as on i386 */ | ||
37 | #define SO_NO_CHECK 11 | ||
38 | #define SO_PRIORITY 12 | ||
39 | #define SO_BSDCOMPAT 14 | ||
40 | |||
41 | #define SO_PASSCRED 17 | ||
42 | #define SO_PEERCRED 18 | ||
43 | #define SO_BINDTODEVICE 25 | ||
44 | |||
45 | /* Socket filtering */ | ||
46 | #define SO_ATTACH_FILTER 26 | ||
47 | #define SO_DETACH_FILTER 27 | ||
48 | |||
49 | #define SO_PEERNAME 28 | ||
50 | #define SO_TIMESTAMP 29 | ||
51 | #define SCM_TIMESTAMP SO_TIMESTAMP | ||
52 | |||
53 | #define SO_PEERSEC 30 | ||
54 | #define SO_PASSSEC 34 | ||
55 | #define SO_TIMESTAMPNS 35 | ||
56 | #define SCM_TIMESTAMPNS SO_TIMESTAMPNS | ||
57 | |||
58 | /* Security levels - as per NRL IPv6 - don't actually do anything */ | ||
59 | #define SO_SECURITY_AUTHENTICATION 19 | ||
60 | #define SO_SECURITY_ENCRYPTION_TRANSPORT 20 | ||
61 | #define SO_SECURITY_ENCRYPTION_NETWORK 21 | ||
62 | |||
63 | #define SO_MARK 36 | ||
64 | |||
65 | /* O_NONBLOCK clashes with the bits used for socket types. Therefore we | ||
66 | * have to define SOCK_NONBLOCK to a different value here. | ||
67 | */ | ||
68 | #define SOCK_NONBLOCK 0x40000000 | ||
69 | |||
70 | #endif /* _ASM_SOCKET_H */ | ||
diff --git a/arch/alpha/include/asm/sockios.h b/arch/alpha/include/asm/sockios.h new file mode 100644 index 000000000000..7932c7ab4a4d --- /dev/null +++ b/arch/alpha/include/asm/sockios.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _ASM_ALPHA_SOCKIOS_H | ||
2 | #define _ASM_ALPHA_SOCKIOS_H | ||
3 | |||
4 | /* Socket-level I/O control calls. */ | ||
5 | |||
6 | #define FIOGETOWN _IOR('f', 123, int) | ||
7 | #define FIOSETOWN _IOW('f', 124, int) | ||
8 | |||
9 | #define SIOCATMARK _IOR('s', 7, int) | ||
10 | #define SIOCSPGRP _IOW('s', 8, pid_t) | ||
11 | #define SIOCGPGRP _IOR('s', 9, pid_t) | ||
12 | |||
13 | #define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ | ||
14 | #define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ | ||
15 | |||
16 | #endif /* _ASM_ALPHA_SOCKIOS_H */ | ||
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h new file mode 100644 index 000000000000..aeeb125f6851 --- /dev/null +++ b/arch/alpha/include/asm/spinlock.h | |||
@@ -0,0 +1,173 @@ | |||
1 | #ifndef _ALPHA_SPINLOCK_H | ||
2 | #define _ALPHA_SPINLOCK_H | ||
3 | |||
4 | #include <asm/system.h> | ||
5 | #include <linux/kernel.h> | ||
6 | #include <asm/current.h> | ||
7 | |||
8 | /* | ||
9 | * Simple spin lock operations. There are two variants, one clears IRQ's | ||
10 | * on the local processor, one does not. | ||
11 | * | ||
12 | * We make no fairness assumptions. They have a cost. | ||
13 | */ | ||
14 | |||
15 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | ||
16 | #define __raw_spin_is_locked(x) ((x)->lock != 0) | ||
17 | #define __raw_spin_unlock_wait(x) \ | ||
18 | do { cpu_relax(); } while ((x)->lock) | ||
19 | |||
20 | static inline void __raw_spin_unlock(raw_spinlock_t * lock) | ||
21 | { | ||
22 | mb(); | ||
23 | lock->lock = 0; | ||
24 | } | ||
25 | |||
26 | static inline void __raw_spin_lock(raw_spinlock_t * lock) | ||
27 | { | ||
28 | long tmp; | ||
29 | |||
30 | __asm__ __volatile__( | ||
31 | "1: ldl_l %0,%1\n" | ||
32 | " bne %0,2f\n" | ||
33 | " lda %0,1\n" | ||
34 | " stl_c %0,%1\n" | ||
35 | " beq %0,2f\n" | ||
36 | " mb\n" | ||
37 | ".subsection 2\n" | ||
38 | "2: ldl %0,%1\n" | ||
39 | " bne %0,2b\n" | ||
40 | " br 1b\n" | ||
41 | ".previous" | ||
42 | : "=&r" (tmp), "=m" (lock->lock) | ||
43 | : "m"(lock->lock) : "memory"); | ||
44 | } | ||
45 | |||
46 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | ||
47 | { | ||
48 | return !test_and_set_bit(0, &lock->lock); | ||
49 | } | ||
50 | |||
51 | /***********************************************************/ | ||
52 | |||
53 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) | ||
54 | { | ||
55 | return (lock->lock & 1) == 0; | ||
56 | } | ||
57 | |||
58 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | ||
59 | { | ||
60 | return lock->lock == 0; | ||
61 | } | ||
62 | |||
63 | static inline void __raw_read_lock(raw_rwlock_t *lock) | ||
64 | { | ||
65 | long regx; | ||
66 | |||
67 | __asm__ __volatile__( | ||
68 | "1: ldl_l %1,%0\n" | ||
69 | " blbs %1,6f\n" | ||
70 | " subl %1,2,%1\n" | ||
71 | " stl_c %1,%0\n" | ||
72 | " beq %1,6f\n" | ||
73 | " mb\n" | ||
74 | ".subsection 2\n" | ||
75 | "6: ldl %1,%0\n" | ||
76 | " blbs %1,6b\n" | ||
77 | " br 1b\n" | ||
78 | ".previous" | ||
79 | : "=m" (*lock), "=&r" (regx) | ||
80 | : "m" (*lock) : "memory"); | ||
81 | } | ||
82 | |||
83 | static inline void __raw_write_lock(raw_rwlock_t *lock) | ||
84 | { | ||
85 | long regx; | ||
86 | |||
87 | __asm__ __volatile__( | ||
88 | "1: ldl_l %1,%0\n" | ||
89 | " bne %1,6f\n" | ||
90 | " lda %1,1\n" | ||
91 | " stl_c %1,%0\n" | ||
92 | " beq %1,6f\n" | ||
93 | " mb\n" | ||
94 | ".subsection 2\n" | ||
95 | "6: ldl %1,%0\n" | ||
96 | " bne %1,6b\n" | ||
97 | " br 1b\n" | ||
98 | ".previous" | ||
99 | : "=m" (*lock), "=&r" (regx) | ||
100 | : "m" (*lock) : "memory"); | ||
101 | } | ||
102 | |||
103 | static inline int __raw_read_trylock(raw_rwlock_t * lock) | ||
104 | { | ||
105 | long regx; | ||
106 | int success; | ||
107 | |||
108 | __asm__ __volatile__( | ||
109 | "1: ldl_l %1,%0\n" | ||
110 | " lda %2,0\n" | ||
111 | " blbs %1,2f\n" | ||
112 | " subl %1,2,%2\n" | ||
113 | " stl_c %2,%0\n" | ||
114 | " beq %2,6f\n" | ||
115 | "2: mb\n" | ||
116 | ".subsection 2\n" | ||
117 | "6: br 1b\n" | ||
118 | ".previous" | ||
119 | : "=m" (*lock), "=&r" (regx), "=&r" (success) | ||
120 | : "m" (*lock) : "memory"); | ||
121 | |||
122 | return success; | ||
123 | } | ||
124 | |||
125 | static inline int __raw_write_trylock(raw_rwlock_t * lock) | ||
126 | { | ||
127 | long regx; | ||
128 | int success; | ||
129 | |||
130 | __asm__ __volatile__( | ||
131 | "1: ldl_l %1,%0\n" | ||
132 | " lda %2,0\n" | ||
133 | " bne %1,2f\n" | ||
134 | " lda %2,1\n" | ||
135 | " stl_c %2,%0\n" | ||
136 | " beq %2,6f\n" | ||
137 | "2: mb\n" | ||
138 | ".subsection 2\n" | ||
139 | "6: br 1b\n" | ||
140 | ".previous" | ||
141 | : "=m" (*lock), "=&r" (regx), "=&r" (success) | ||
142 | : "m" (*lock) : "memory"); | ||
143 | |||
144 | return success; | ||
145 | } | ||
146 | |||
147 | static inline void __raw_read_unlock(raw_rwlock_t * lock) | ||
148 | { | ||
149 | long regx; | ||
150 | __asm__ __volatile__( | ||
151 | " mb\n" | ||
152 | "1: ldl_l %1,%0\n" | ||
153 | " addl %1,2,%1\n" | ||
154 | " stl_c %1,%0\n" | ||
155 | " beq %1,6f\n" | ||
156 | ".subsection 2\n" | ||
157 | "6: br 1b\n" | ||
158 | ".previous" | ||
159 | : "=m" (*lock), "=&r" (regx) | ||
160 | : "m" (*lock) : "memory"); | ||
161 | } | ||
162 | |||
163 | static inline void __raw_write_unlock(raw_rwlock_t * lock) | ||
164 | { | ||
165 | mb(); | ||
166 | lock->lock = 0; | ||
167 | } | ||
168 | |||
169 | #define _raw_spin_relax(lock) cpu_relax() | ||
170 | #define _raw_read_relax(lock) cpu_relax() | ||
171 | #define _raw_write_relax(lock) cpu_relax() | ||
172 | |||
173 | #endif /* _ALPHA_SPINLOCK_H */ | ||
diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h new file mode 100644 index 000000000000..8141eb5ebf0d --- /dev/null +++ b/arch/alpha/include/asm/spinlock_types.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _ALPHA_SPINLOCK_TYPES_H | ||
2 | #define _ALPHA_SPINLOCK_TYPES_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | typedef struct { | ||
9 | volatile unsigned int lock; | ||
10 | } raw_spinlock_t; | ||
11 | |||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 0 } | ||
13 | |||
14 | typedef struct { | ||
15 | volatile unsigned int lock; | ||
16 | } raw_rwlock_t; | ||
17 | |||
18 | #define __RAW_RW_LOCK_UNLOCKED { 0 } | ||
19 | |||
20 | #endif | ||
diff --git a/arch/alpha/include/asm/stat.h b/arch/alpha/include/asm/stat.h new file mode 100644 index 000000000000..07ad3e6b3f3e --- /dev/null +++ b/arch/alpha/include/asm/stat.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef _ALPHA_STAT_H | ||
2 | #define _ALPHA_STAT_H | ||
3 | |||
4 | struct stat { | ||
5 | unsigned int st_dev; | ||
6 | unsigned int st_ino; | ||
7 | unsigned int st_mode; | ||
8 | unsigned int st_nlink; | ||
9 | unsigned int st_uid; | ||
10 | unsigned int st_gid; | ||
11 | unsigned int st_rdev; | ||
12 | long st_size; | ||
13 | unsigned long st_atime; | ||
14 | unsigned long st_mtime; | ||
15 | unsigned long st_ctime; | ||
16 | unsigned int st_blksize; | ||
17 | unsigned int st_blocks; | ||
18 | unsigned int st_flags; | ||
19 | unsigned int st_gen; | ||
20 | }; | ||
21 | |||
22 | /* The stat64 structure increases the size of dev_t, blkcnt_t, adds | ||
23 | nanosecond resolution times, and padding for expansion. */ | ||
24 | |||
25 | struct stat64 { | ||
26 | unsigned long st_dev; | ||
27 | unsigned long st_ino; | ||
28 | unsigned long st_rdev; | ||
29 | long st_size; | ||
30 | unsigned long st_blocks; | ||
31 | |||
32 | unsigned int st_mode; | ||
33 | unsigned int st_uid; | ||
34 | unsigned int st_gid; | ||
35 | unsigned int st_blksize; | ||
36 | unsigned int st_nlink; | ||
37 | unsigned int __pad0; | ||
38 | |||
39 | unsigned long st_atime; | ||
40 | unsigned long st_atime_nsec; | ||
41 | unsigned long st_mtime; | ||
42 | unsigned long st_mtime_nsec; | ||
43 | unsigned long st_ctime; | ||
44 | unsigned long st_ctime_nsec; | ||
45 | long __unused[3]; | ||
46 | }; | ||
47 | |||
48 | #endif | ||
diff --git a/arch/alpha/include/asm/statfs.h b/arch/alpha/include/asm/statfs.h new file mode 100644 index 000000000000..ad15830baefe --- /dev/null +++ b/arch/alpha/include/asm/statfs.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ALPHA_STATFS_H | ||
2 | #define _ALPHA_STATFS_H | ||
3 | |||
4 | #include <asm-generic/statfs.h> | ||
5 | |||
6 | #endif | ||
diff --git a/arch/alpha/include/asm/string.h b/arch/alpha/include/asm/string.h new file mode 100644 index 000000000000..b02b8a282940 --- /dev/null +++ b/arch/alpha/include/asm/string.h | |||
@@ -0,0 +1,66 @@ | |||
1 | #ifndef __ALPHA_STRING_H__ | ||
2 | #define __ALPHA_STRING_H__ | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | /* | ||
7 | * GCC of any recent vintage doesn't do stupid things with bcopy. | ||
8 | * EGCS 1.1 knows all about expanding memcpy inline, others don't. | ||
9 | * | ||
10 | * Similarly for a memset with data = 0. | ||
11 | */ | ||
12 | |||
13 | #define __HAVE_ARCH_MEMCPY | ||
14 | extern void * memcpy(void *, const void *, size_t); | ||
15 | #define __HAVE_ARCH_MEMMOVE | ||
16 | extern void * memmove(void *, const void *, size_t); | ||
17 | |||
18 | /* For backward compatibility with modules. Unused otherwise. */ | ||
19 | extern void * __memcpy(void *, const void *, size_t); | ||
20 | |||
21 | #define memcpy __builtin_memcpy | ||
22 | |||
23 | #define __HAVE_ARCH_MEMSET | ||
24 | extern void * __constant_c_memset(void *, unsigned long, size_t); | ||
25 | extern void * __memset(void *, int, size_t); | ||
26 | extern void * memset(void *, int, size_t); | ||
27 | |||
28 | #define memset(s, c, n) \ | ||
29 | (__builtin_constant_p(c) \ | ||
30 | ? (__builtin_constant_p(n) && (c) == 0 \ | ||
31 | ? __builtin_memset((s),0,(n)) \ | ||
32 | : __constant_c_memset((s),0x0101010101010101UL*(unsigned char)(c),(n))) \ | ||
33 | : __memset((s),(c),(n))) | ||
34 | |||
35 | #define __HAVE_ARCH_STRCPY | ||
36 | extern char * strcpy(char *,const char *); | ||
37 | #define __HAVE_ARCH_STRNCPY | ||
38 | extern char * strncpy(char *, const char *, size_t); | ||
39 | #define __HAVE_ARCH_STRCAT | ||
40 | extern char * strcat(char *, const char *); | ||
41 | #define __HAVE_ARCH_STRNCAT | ||
42 | extern char * strncat(char *, const char *, size_t); | ||
43 | #define __HAVE_ARCH_STRCHR | ||
44 | extern char * strchr(const char *,int); | ||
45 | #define __HAVE_ARCH_STRRCHR | ||
46 | extern char * strrchr(const char *,int); | ||
47 | #define __HAVE_ARCH_STRLEN | ||
48 | extern size_t strlen(const char *); | ||
49 | #define __HAVE_ARCH_MEMCHR | ||
50 | extern void * memchr(const void *, int, size_t); | ||
51 | |||
52 | /* The following routine is like memset except that it writes 16-bit | ||
53 | aligned values. The DEST and COUNT parameters must be even for | ||
54 | correct operation. */ | ||
55 | |||
56 | #define __HAVE_ARCH_MEMSETW | ||
57 | extern void * __memsetw(void *dest, unsigned short, size_t count); | ||
58 | |||
59 | #define memsetw(s, c, n) \ | ||
60 | (__builtin_constant_p(c) \ | ||
61 | ? __constant_c_memset((s),0x0001000100010001UL*(unsigned short)(c),(n)) \ | ||
62 | : __memsetw((s),(c),(n))) | ||
63 | |||
64 | #endif /* __KERNEL__ */ | ||
65 | |||
66 | #endif /* __ALPHA_STRING_H__ */ | ||
diff --git a/arch/alpha/include/asm/suspend.h b/arch/alpha/include/asm/suspend.h new file mode 100644 index 000000000000..c7042d575851 --- /dev/null +++ b/arch/alpha/include/asm/suspend.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ALPHA_SUSPEND_H | ||
2 | #define __ALPHA_SUSPEND_H | ||
3 | |||
4 | /* Dummy include. */ | ||
5 | |||
6 | #endif /* __ALPHA_SUSPEND_H */ | ||
diff --git a/arch/alpha/include/asm/sysinfo.h b/arch/alpha/include/asm/sysinfo.h new file mode 100644 index 000000000000..086aba284df2 --- /dev/null +++ b/arch/alpha/include/asm/sysinfo.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * include/asm-alpha/sysinfo.h | ||
3 | */ | ||
4 | |||
5 | #ifndef __ASM_ALPHA_SYSINFO_H | ||
6 | #define __ASM_ALPHA_SYSINFO_H | ||
7 | |||
8 | /* This defines the subset of the OSF/1 getsysinfo/setsysinfo calls | ||
9 | that we support. */ | ||
10 | |||
11 | #define GSI_UACPROC 8 | ||
12 | #define GSI_IEEE_FP_CONTROL 45 | ||
13 | #define GSI_IEEE_STATE_AT_SIGNAL 46 | ||
14 | #define GSI_PROC_TYPE 60 | ||
15 | #define GSI_GET_HWRPB 101 | ||
16 | |||
17 | #define SSI_NVPAIRS 1 | ||
18 | #define SSI_IEEE_FP_CONTROL 14 | ||
19 | #define SSI_IEEE_STATE_AT_SIGNAL 15 | ||
20 | #define SSI_IEEE_IGNORE_STATE_AT_SIGNAL 16 | ||
21 | #define SSI_IEEE_RAISE_EXCEPTION 1001 /* linux specific */ | ||
22 | |||
23 | #define SSIN_UACPROC 6 | ||
24 | |||
25 | #define UAC_BITMASK 7 | ||
26 | #define UAC_NOPRINT 1 | ||
27 | #define UAC_NOFIX 2 | ||
28 | #define UAC_SIGBUS 4 | ||
29 | |||
30 | |||
31 | #ifdef __KERNEL__ | ||
32 | |||
33 | /* This is the shift that is applied to the UAC bits as stored in the | ||
34 | per-thread flags. See thread_info.h. */ | ||
35 | #define UAC_SHIFT 6 | ||
36 | |||
37 | #endif | ||
38 | |||
39 | #endif /* __ASM_ALPHA_SYSINFO_H */ | ||
diff --git a/arch/alpha/include/asm/system.h b/arch/alpha/include/asm/system.h new file mode 100644 index 000000000000..afe20fa58c99 --- /dev/null +++ b/arch/alpha/include/asm/system.h | |||
@@ -0,0 +1,829 @@ | |||
1 | #ifndef __ALPHA_SYSTEM_H | ||
2 | #define __ALPHA_SYSTEM_H | ||
3 | |||
4 | #include <asm/pal.h> | ||
5 | #include <asm/page.h> | ||
6 | #include <asm/barrier.h> | ||
7 | |||
8 | /* | ||
9 | * System defines.. Note that this is included both from .c and .S | ||
10 | * files, so it does only defines, not any C code. | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | * We leave one page for the initial stack page, and one page for | ||
15 | * the initial process structure. Also, the console eats 3 MB for | ||
16 | * the initial bootloader (one of which we can reclaim later). | ||
17 | */ | ||
18 | #define BOOT_PCB 0x20000000 | ||
19 | #define BOOT_ADDR 0x20000000 | ||
20 | /* Remove when official MILO sources have ELF support: */ | ||
21 | #define BOOT_SIZE (16*1024) | ||
22 | |||
23 | #ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS | ||
24 | #define KERNEL_START_PHYS 0x300000 /* Old bootloaders hardcoded this. */ | ||
25 | #else | ||
26 | #define KERNEL_START_PHYS 0x1000000 /* required: Wildfire/Titan/Marvel */ | ||
27 | #endif | ||
28 | |||
29 | #define KERNEL_START (PAGE_OFFSET+KERNEL_START_PHYS) | ||
30 | #define SWAPPER_PGD KERNEL_START | ||
31 | #define INIT_STACK (PAGE_OFFSET+KERNEL_START_PHYS+0x02000) | ||
32 | #define EMPTY_PGT (PAGE_OFFSET+KERNEL_START_PHYS+0x04000) | ||
33 | #define EMPTY_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x08000) | ||
34 | #define ZERO_PGE (PAGE_OFFSET+KERNEL_START_PHYS+0x0A000) | ||
35 | |||
36 | #define START_ADDR (PAGE_OFFSET+KERNEL_START_PHYS+0x10000) | ||
37 | |||
38 | /* | ||
39 | * This is setup by the secondary bootstrap loader. Because | ||
40 | * the zero page is zeroed out as soon as the vm system is | ||
41 | * initialized, we need to copy things out into a more permanent | ||
42 | * place. | ||
43 | */ | ||
44 | #define PARAM ZERO_PGE | ||
45 | #define COMMAND_LINE ((char*)(PARAM + 0x0000)) | ||
46 | #define INITRD_START (*(unsigned long *) (PARAM+0x100)) | ||
47 | #define INITRD_SIZE (*(unsigned long *) (PARAM+0x108)) | ||
48 | |||
49 | #ifndef __ASSEMBLY__ | ||
50 | #include <linux/kernel.h> | ||
51 | #define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */ | ||
52 | |||
53 | /* | ||
54 | * This is the logout header that should be common to all platforms | ||
55 | * (assuming they are running OSF/1 PALcode, I guess). | ||
56 | */ | ||
57 | struct el_common { | ||
58 | unsigned int size; /* size in bytes of logout area */ | ||
59 | unsigned int sbz1 : 30; /* should be zero */ | ||
60 | unsigned int err2 : 1; /* second error */ | ||
61 | unsigned int retry : 1; /* retry flag */ | ||
62 | unsigned int proc_offset; /* processor-specific offset */ | ||
63 | unsigned int sys_offset; /* system-specific offset */ | ||
64 | unsigned int code; /* machine check code */ | ||
65 | unsigned int frame_rev; /* frame revision */ | ||
66 | }; | ||
67 | |||
68 | /* Machine Check Frame for uncorrectable errors (Large format) | ||
69 | * --- This is used to log uncorrectable errors such as | ||
70 | * double bit ECC errors. | ||
71 | * --- These errors are detected by both processor and systems. | ||
72 | */ | ||
73 | struct el_common_EV5_uncorrectable_mcheck { | ||
74 | unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ | ||
75 | unsigned long paltemp[24]; /* PAL TEMP REGS. */ | ||
76 | unsigned long exc_addr; /* Address of excepting instruction*/ | ||
77 | unsigned long exc_sum; /* Summary of arithmetic traps. */ | ||
78 | unsigned long exc_mask; /* Exception mask (from exc_sum). */ | ||
79 | unsigned long pal_base; /* Base address for PALcode. */ | ||
80 | unsigned long isr; /* Interrupt Status Reg. */ | ||
81 | unsigned long icsr; /* CURRENT SETUP OF EV5 IBOX */ | ||
82 | unsigned long ic_perr_stat; /* I-CACHE Reg. <11> set Data parity | ||
83 | <12> set TAG parity*/ | ||
84 | unsigned long dc_perr_stat; /* D-CACHE error Reg. Bits set to 1: | ||
85 | <2> Data error in bank 0 | ||
86 | <3> Data error in bank 1 | ||
87 | <4> Tag error in bank 0 | ||
88 | <5> Tag error in bank 1 */ | ||
89 | unsigned long va; /* Effective VA of fault or miss. */ | ||
90 | unsigned long mm_stat; /* Holds the reason for D-stream | ||
91 | fault or D-cache parity errors */ | ||
92 | unsigned long sc_addr; /* Address that was being accessed | ||
93 | when EV5 detected Secondary cache | ||
94 | failure. */ | ||
95 | unsigned long sc_stat; /* Helps determine if the error was | ||
96 | TAG/Data parity(Secondary Cache)*/ | ||
97 | unsigned long bc_tag_addr; /* Contents of EV5 BC_TAG_ADDR */ | ||
98 | unsigned long ei_addr; /* Physical address of any transfer | ||
99 | that is logged in EV5 EI_STAT */ | ||
100 | unsigned long fill_syndrome; /* For correcting ECC errors. */ | ||
101 | unsigned long ei_stat; /* Helps identify reason of any | ||
102 | processor uncorrectable error | ||
103 | at its external interface. */ | ||
104 | unsigned long ld_lock; /* Contents of EV5 LD_LOCK register*/ | ||
105 | }; | ||
106 | |||
107 | struct el_common_EV6_mcheck { | ||
108 | unsigned int FrameSize; /* Bytes, including this field */ | ||
109 | unsigned int FrameFlags; /* <31> = Retry, <30> = Second Error */ | ||
110 | unsigned int CpuOffset; /* Offset to CPU-specific info */ | ||
111 | unsigned int SystemOffset; /* Offset to system-specific info */ | ||
112 | unsigned int MCHK_Code; | ||
113 | unsigned int MCHK_Frame_Rev; | ||
114 | unsigned long I_STAT; /* EV6 Internal Processor Registers */ | ||
115 | unsigned long DC_STAT; /* (See the 21264 Spec) */ | ||
116 | unsigned long C_ADDR; | ||
117 | unsigned long DC1_SYNDROME; | ||
118 | unsigned long DC0_SYNDROME; | ||
119 | unsigned long C_STAT; | ||
120 | unsigned long C_STS; | ||
121 | unsigned long MM_STAT; | ||
122 | unsigned long EXC_ADDR; | ||
123 | unsigned long IER_CM; | ||
124 | unsigned long ISUM; | ||
125 | unsigned long RESERVED0; | ||
126 | unsigned long PAL_BASE; | ||
127 | unsigned long I_CTL; | ||
128 | unsigned long PCTX; | ||
129 | }; | ||
130 | |||
131 | extern void halt(void) __attribute__((noreturn)); | ||
132 | #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) | ||
133 | |||
134 | #define switch_to(P,N,L) \ | ||
135 | do { \ | ||
136 | (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \ | ||
137 | check_mmu_context(); \ | ||
138 | } while (0) | ||
139 | |||
140 | struct task_struct; | ||
141 | extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); | ||
142 | |||
143 | #define imb() \ | ||
144 | __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") | ||
145 | |||
146 | #define draina() \ | ||
147 | __asm__ __volatile__ ("call_pal %0 #draina" : : "i" (PAL_draina) : "memory") | ||
148 | |||
149 | enum implver_enum { | ||
150 | IMPLVER_EV4, | ||
151 | IMPLVER_EV5, | ||
152 | IMPLVER_EV6 | ||
153 | }; | ||
154 | |||
155 | #ifdef CONFIG_ALPHA_GENERIC | ||
156 | #define implver() \ | ||
157 | ({ unsigned long __implver; \ | ||
158 | __asm__ ("implver %0" : "=r"(__implver)); \ | ||
159 | (enum implver_enum) __implver; }) | ||
160 | #else | ||
161 | /* Try to eliminate some dead code. */ | ||
162 | #ifdef CONFIG_ALPHA_EV4 | ||
163 | #define implver() IMPLVER_EV4 | ||
164 | #endif | ||
165 | #ifdef CONFIG_ALPHA_EV5 | ||
166 | #define implver() IMPLVER_EV5 | ||
167 | #endif | ||
168 | #if defined(CONFIG_ALPHA_EV6) | ||
169 | #define implver() IMPLVER_EV6 | ||
170 | #endif | ||
171 | #endif | ||
172 | |||
173 | enum amask_enum { | ||
174 | AMASK_BWX = (1UL << 0), | ||
175 | AMASK_FIX = (1UL << 1), | ||
176 | AMASK_CIX = (1UL << 2), | ||
177 | AMASK_MAX = (1UL << 8), | ||
178 | AMASK_PRECISE_TRAP = (1UL << 9), | ||
179 | }; | ||
180 | |||
181 | #define amask(mask) \ | ||
182 | ({ unsigned long __amask, __input = (mask); \ | ||
183 | __asm__ ("amask %1,%0" : "=r"(__amask) : "rI"(__input)); \ | ||
184 | __amask; }) | ||
185 | |||
186 | #define __CALL_PAL_R0(NAME, TYPE) \ | ||
187 | extern inline TYPE NAME(void) \ | ||
188 | { \ | ||
189 | register TYPE __r0 __asm__("$0"); \ | ||
190 | __asm__ __volatile__( \ | ||
191 | "call_pal %1 # " #NAME \ | ||
192 | :"=r" (__r0) \ | ||
193 | :"i" (PAL_ ## NAME) \ | ||
194 | :"$1", "$16", "$22", "$23", "$24", "$25"); \ | ||
195 | return __r0; \ | ||
196 | } | ||
197 | |||
198 | #define __CALL_PAL_W1(NAME, TYPE0) \ | ||
199 | extern inline void NAME(TYPE0 arg0) \ | ||
200 | { \ | ||
201 | register TYPE0 __r16 __asm__("$16") = arg0; \ | ||
202 | __asm__ __volatile__( \ | ||
203 | "call_pal %1 # "#NAME \ | ||
204 | : "=r"(__r16) \ | ||
205 | : "i"(PAL_ ## NAME), "0"(__r16) \ | ||
206 | : "$1", "$22", "$23", "$24", "$25"); \ | ||
207 | } | ||
208 | |||
209 | #define __CALL_PAL_W2(NAME, TYPE0, TYPE1) \ | ||
210 | extern inline void NAME(TYPE0 arg0, TYPE1 arg1) \ | ||
211 | { \ | ||
212 | register TYPE0 __r16 __asm__("$16") = arg0; \ | ||
213 | register TYPE1 __r17 __asm__("$17") = arg1; \ | ||
214 | __asm__ __volatile__( \ | ||
215 | "call_pal %2 # "#NAME \ | ||
216 | : "=r"(__r16), "=r"(__r17) \ | ||
217 | : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ | ||
218 | : "$1", "$22", "$23", "$24", "$25"); \ | ||
219 | } | ||
220 | |||
221 | #define __CALL_PAL_RW1(NAME, RTYPE, TYPE0) \ | ||
222 | extern inline RTYPE NAME(TYPE0 arg0) \ | ||
223 | { \ | ||
224 | register RTYPE __r0 __asm__("$0"); \ | ||
225 | register TYPE0 __r16 __asm__("$16") = arg0; \ | ||
226 | __asm__ __volatile__( \ | ||
227 | "call_pal %2 # "#NAME \ | ||
228 | : "=r"(__r16), "=r"(__r0) \ | ||
229 | : "i"(PAL_ ## NAME), "0"(__r16) \ | ||
230 | : "$1", "$22", "$23", "$24", "$25"); \ | ||
231 | return __r0; \ | ||
232 | } | ||
233 | |||
234 | #define __CALL_PAL_RW2(NAME, RTYPE, TYPE0, TYPE1) \ | ||
235 | extern inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ | ||
236 | { \ | ||
237 | register RTYPE __r0 __asm__("$0"); \ | ||
238 | register TYPE0 __r16 __asm__("$16") = arg0; \ | ||
239 | register TYPE1 __r17 __asm__("$17") = arg1; \ | ||
240 | __asm__ __volatile__( \ | ||
241 | "call_pal %3 # "#NAME \ | ||
242 | : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ | ||
243 | : "i"(PAL_ ## NAME), "0"(__r16), "1"(__r17) \ | ||
244 | : "$1", "$22", "$23", "$24", "$25"); \ | ||
245 | return __r0; \ | ||
246 | } | ||
247 | |||
248 | __CALL_PAL_W1(cflush, unsigned long); | ||
249 | __CALL_PAL_R0(rdmces, unsigned long); | ||
250 | __CALL_PAL_R0(rdps, unsigned long); | ||
251 | __CALL_PAL_R0(rdusp, unsigned long); | ||
252 | __CALL_PAL_RW1(swpipl, unsigned long, unsigned long); | ||
253 | __CALL_PAL_R0(whami, unsigned long); | ||
254 | __CALL_PAL_W2(wrent, void*, unsigned long); | ||
255 | __CALL_PAL_W1(wripir, unsigned long); | ||
256 | __CALL_PAL_W1(wrkgp, unsigned long); | ||
257 | __CALL_PAL_W1(wrmces, unsigned long); | ||
258 | __CALL_PAL_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); | ||
259 | __CALL_PAL_W1(wrusp, unsigned long); | ||
260 | __CALL_PAL_W1(wrvptptr, unsigned long); | ||
261 | |||
262 | #define IPL_MIN 0 | ||
263 | #define IPL_SW0 1 | ||
264 | #define IPL_SW1 2 | ||
265 | #define IPL_DEV0 3 | ||
266 | #define IPL_DEV1 4 | ||
267 | #define IPL_TIMER 5 | ||
268 | #define IPL_PERF 6 | ||
269 | #define IPL_POWERFAIL 6 | ||
270 | #define IPL_MCHECK 7 | ||
271 | #define IPL_MAX 7 | ||
272 | |||
273 | #ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK | ||
274 | #undef IPL_MIN | ||
275 | #define IPL_MIN __min_ipl | ||
276 | extern int __min_ipl; | ||
277 | #endif | ||
278 | |||
279 | #define getipl() (rdps() & 7) | ||
280 | #define setipl(ipl) ((void) swpipl(ipl)) | ||
281 | |||
282 | #define local_irq_disable() do { setipl(IPL_MAX); barrier(); } while(0) | ||
283 | #define local_irq_enable() do { barrier(); setipl(IPL_MIN); } while(0) | ||
284 | #define local_save_flags(flags) ((flags) = rdps()) | ||
285 | #define local_irq_save(flags) do { (flags) = swpipl(IPL_MAX); barrier(); } while(0) | ||
286 | #define local_irq_restore(flags) do { barrier(); setipl(flags); barrier(); } while(0) | ||
287 | |||
288 | #define irqs_disabled() (getipl() == IPL_MAX) | ||
289 | |||
290 | /* | ||
291 | * TB routines.. | ||
292 | */ | ||
293 | #define __tbi(nr,arg,arg1...) \ | ||
294 | ({ \ | ||
295 | register unsigned long __r16 __asm__("$16") = (nr); \ | ||
296 | register unsigned long __r17 __asm__("$17"); arg; \ | ||
297 | __asm__ __volatile__( \ | ||
298 | "call_pal %3 #__tbi" \ | ||
299 | :"=r" (__r16),"=r" (__r17) \ | ||
300 | :"0" (__r16),"i" (PAL_tbi) ,##arg1 \ | ||
301 | :"$0", "$1", "$22", "$23", "$24", "$25"); \ | ||
302 | }) | ||
303 | |||
304 | #define tbi(x,y) __tbi(x,__r17=(y),"1" (__r17)) | ||
305 | #define tbisi(x) __tbi(1,__r17=(x),"1" (__r17)) | ||
306 | #define tbisd(x) __tbi(2,__r17=(x),"1" (__r17)) | ||
307 | #define tbis(x) __tbi(3,__r17=(x),"1" (__r17)) | ||
308 | #define tbiap() __tbi(-1, /* no second argument */) | ||
309 | #define tbia() __tbi(-2, /* no second argument */) | ||
310 | |||
311 | /* | ||
312 | * Atomic exchange. | ||
313 | * Since it can be used to implement critical sections | ||
314 | * it must clobber "memory" (also for interrupts in UP). | ||
315 | */ | ||
316 | |||
317 | static inline unsigned long | ||
318 | __xchg_u8(volatile char *m, unsigned long val) | ||
319 | { | ||
320 | unsigned long ret, tmp, addr64; | ||
321 | |||
322 | __asm__ __volatile__( | ||
323 | " andnot %4,7,%3\n" | ||
324 | " insbl %1,%4,%1\n" | ||
325 | "1: ldq_l %2,0(%3)\n" | ||
326 | " extbl %2,%4,%0\n" | ||
327 | " mskbl %2,%4,%2\n" | ||
328 | " or %1,%2,%2\n" | ||
329 | " stq_c %2,0(%3)\n" | ||
330 | " beq %2,2f\n" | ||
331 | #ifdef CONFIG_SMP | ||
332 | " mb\n" | ||
333 | #endif | ||
334 | ".subsection 2\n" | ||
335 | "2: br 1b\n" | ||
336 | ".previous" | ||
337 | : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) | ||
338 | : "r" ((long)m), "1" (val) : "memory"); | ||
339 | |||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | static inline unsigned long | ||
344 | __xchg_u16(volatile short *m, unsigned long val) | ||
345 | { | ||
346 | unsigned long ret, tmp, addr64; | ||
347 | |||
348 | __asm__ __volatile__( | ||
349 | " andnot %4,7,%3\n" | ||
350 | " inswl %1,%4,%1\n" | ||
351 | "1: ldq_l %2,0(%3)\n" | ||
352 | " extwl %2,%4,%0\n" | ||
353 | " mskwl %2,%4,%2\n" | ||
354 | " or %1,%2,%2\n" | ||
355 | " stq_c %2,0(%3)\n" | ||
356 | " beq %2,2f\n" | ||
357 | #ifdef CONFIG_SMP | ||
358 | " mb\n" | ||
359 | #endif | ||
360 | ".subsection 2\n" | ||
361 | "2: br 1b\n" | ||
362 | ".previous" | ||
363 | : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) | ||
364 | : "r" ((long)m), "1" (val) : "memory"); | ||
365 | |||
366 | return ret; | ||
367 | } | ||
368 | |||
369 | static inline unsigned long | ||
370 | __xchg_u32(volatile int *m, unsigned long val) | ||
371 | { | ||
372 | unsigned long dummy; | ||
373 | |||
374 | __asm__ __volatile__( | ||
375 | "1: ldl_l %0,%4\n" | ||
376 | " bis $31,%3,%1\n" | ||
377 | " stl_c %1,%2\n" | ||
378 | " beq %1,2f\n" | ||
379 | #ifdef CONFIG_SMP | ||
380 | " mb\n" | ||
381 | #endif | ||
382 | ".subsection 2\n" | ||
383 | "2: br 1b\n" | ||
384 | ".previous" | ||
385 | : "=&r" (val), "=&r" (dummy), "=m" (*m) | ||
386 | : "rI" (val), "m" (*m) : "memory"); | ||
387 | |||
388 | return val; | ||
389 | } | ||
390 | |||
391 | static inline unsigned long | ||
392 | __xchg_u64(volatile long *m, unsigned long val) | ||
393 | { | ||
394 | unsigned long dummy; | ||
395 | |||
396 | __asm__ __volatile__( | ||
397 | "1: ldq_l %0,%4\n" | ||
398 | " bis $31,%3,%1\n" | ||
399 | " stq_c %1,%2\n" | ||
400 | " beq %1,2f\n" | ||
401 | #ifdef CONFIG_SMP | ||
402 | " mb\n" | ||
403 | #endif | ||
404 | ".subsection 2\n" | ||
405 | "2: br 1b\n" | ||
406 | ".previous" | ||
407 | : "=&r" (val), "=&r" (dummy), "=m" (*m) | ||
408 | : "rI" (val), "m" (*m) : "memory"); | ||
409 | |||
410 | return val; | ||
411 | } | ||
412 | |||
413 | /* This function doesn't exist, so you'll get a linker error | ||
414 | if something tries to do an invalid xchg(). */ | ||
415 | extern void __xchg_called_with_bad_pointer(void); | ||
416 | |||
417 | #define __xchg(ptr, x, size) \ | ||
418 | ({ \ | ||
419 | unsigned long __xchg__res; \ | ||
420 | volatile void *__xchg__ptr = (ptr); \ | ||
421 | switch (size) { \ | ||
422 | case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \ | ||
423 | case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \ | ||
424 | case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \ | ||
425 | case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \ | ||
426 | default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ | ||
427 | } \ | ||
428 | __xchg__res; \ | ||
429 | }) | ||
430 | |||
431 | #define xchg(ptr,x) \ | ||
432 | ({ \ | ||
433 | __typeof__(*(ptr)) _x_ = (x); \ | ||
434 | (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ | ||
435 | }) | ||
436 | |||
437 | static inline unsigned long | ||
438 | __xchg_u8_local(volatile char *m, unsigned long val) | ||
439 | { | ||
440 | unsigned long ret, tmp, addr64; | ||
441 | |||
442 | __asm__ __volatile__( | ||
443 | " andnot %4,7,%3\n" | ||
444 | " insbl %1,%4,%1\n" | ||
445 | "1: ldq_l %2,0(%3)\n" | ||
446 | " extbl %2,%4,%0\n" | ||
447 | " mskbl %2,%4,%2\n" | ||
448 | " or %1,%2,%2\n" | ||
449 | " stq_c %2,0(%3)\n" | ||
450 | " beq %2,2f\n" | ||
451 | ".subsection 2\n" | ||
452 | "2: br 1b\n" | ||
453 | ".previous" | ||
454 | : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) | ||
455 | : "r" ((long)m), "1" (val) : "memory"); | ||
456 | |||
457 | return ret; | ||
458 | } | ||
459 | |||
460 | static inline unsigned long | ||
461 | __xchg_u16_local(volatile short *m, unsigned long val) | ||
462 | { | ||
463 | unsigned long ret, tmp, addr64; | ||
464 | |||
465 | __asm__ __volatile__( | ||
466 | " andnot %4,7,%3\n" | ||
467 | " inswl %1,%4,%1\n" | ||
468 | "1: ldq_l %2,0(%3)\n" | ||
469 | " extwl %2,%4,%0\n" | ||
470 | " mskwl %2,%4,%2\n" | ||
471 | " or %1,%2,%2\n" | ||
472 | " stq_c %2,0(%3)\n" | ||
473 | " beq %2,2f\n" | ||
474 | ".subsection 2\n" | ||
475 | "2: br 1b\n" | ||
476 | ".previous" | ||
477 | : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) | ||
478 | : "r" ((long)m), "1" (val) : "memory"); | ||
479 | |||
480 | return ret; | ||
481 | } | ||
482 | |||
483 | static inline unsigned long | ||
484 | __xchg_u32_local(volatile int *m, unsigned long val) | ||
485 | { | ||
486 | unsigned long dummy; | ||
487 | |||
488 | __asm__ __volatile__( | ||
489 | "1: ldl_l %0,%4\n" | ||
490 | " bis $31,%3,%1\n" | ||
491 | " stl_c %1,%2\n" | ||
492 | " beq %1,2f\n" | ||
493 | ".subsection 2\n" | ||
494 | "2: br 1b\n" | ||
495 | ".previous" | ||
496 | : "=&r" (val), "=&r" (dummy), "=m" (*m) | ||
497 | : "rI" (val), "m" (*m) : "memory"); | ||
498 | |||
499 | return val; | ||
500 | } | ||
501 | |||
502 | static inline unsigned long | ||
503 | __xchg_u64_local(volatile long *m, unsigned long val) | ||
504 | { | ||
505 | unsigned long dummy; | ||
506 | |||
507 | __asm__ __volatile__( | ||
508 | "1: ldq_l %0,%4\n" | ||
509 | " bis $31,%3,%1\n" | ||
510 | " stq_c %1,%2\n" | ||
511 | " beq %1,2f\n" | ||
512 | ".subsection 2\n" | ||
513 | "2: br 1b\n" | ||
514 | ".previous" | ||
515 | : "=&r" (val), "=&r" (dummy), "=m" (*m) | ||
516 | : "rI" (val), "m" (*m) : "memory"); | ||
517 | |||
518 | return val; | ||
519 | } | ||
520 | |||
521 | #define __xchg_local(ptr, x, size) \ | ||
522 | ({ \ | ||
523 | unsigned long __xchg__res; \ | ||
524 | volatile void *__xchg__ptr = (ptr); \ | ||
525 | switch (size) { \ | ||
526 | case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \ | ||
527 | case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \ | ||
528 | case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \ | ||
529 | case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \ | ||
530 | default: __xchg_called_with_bad_pointer(); __xchg__res = x; \ | ||
531 | } \ | ||
532 | __xchg__res; \ | ||
533 | }) | ||
534 | |||
535 | #define xchg_local(ptr,x) \ | ||
536 | ({ \ | ||
537 | __typeof__(*(ptr)) _x_ = (x); \ | ||
538 | (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ | ||
539 | sizeof(*(ptr))); \ | ||
540 | }) | ||
541 | |||
542 | /* | ||
543 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | ||
544 | * store NEW in MEM. Return the initial value in MEM. Success is | ||
545 | * indicated by comparing RETURN with OLD. | ||
546 | * | ||
547 | * The memory barrier should be placed in SMP only when we actually | ||
548 | * make the change. If we don't change anything (so if the returned | ||
549 | * prev is equal to old) then we aren't acquiring anything new and | ||
550 | * we don't need any memory barrier as far I can tell. | ||
551 | */ | ||
552 | |||
553 | #define __HAVE_ARCH_CMPXCHG 1 | ||
554 | |||
555 | static inline unsigned long | ||
556 | __cmpxchg_u8(volatile char *m, long old, long new) | ||
557 | { | ||
558 | unsigned long prev, tmp, cmp, addr64; | ||
559 | |||
560 | __asm__ __volatile__( | ||
561 | " andnot %5,7,%4\n" | ||
562 | " insbl %1,%5,%1\n" | ||
563 | "1: ldq_l %2,0(%4)\n" | ||
564 | " extbl %2,%5,%0\n" | ||
565 | " cmpeq %0,%6,%3\n" | ||
566 | " beq %3,2f\n" | ||
567 | " mskbl %2,%5,%2\n" | ||
568 | " or %1,%2,%2\n" | ||
569 | " stq_c %2,0(%4)\n" | ||
570 | " beq %2,3f\n" | ||
571 | #ifdef CONFIG_SMP | ||
572 | " mb\n" | ||
573 | #endif | ||
574 | "2:\n" | ||
575 | ".subsection 2\n" | ||
576 | "3: br 1b\n" | ||
577 | ".previous" | ||
578 | : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) | ||
579 | : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); | ||
580 | |||
581 | return prev; | ||
582 | } | ||
583 | |||
584 | static inline unsigned long | ||
585 | __cmpxchg_u16(volatile short *m, long old, long new) | ||
586 | { | ||
587 | unsigned long prev, tmp, cmp, addr64; | ||
588 | |||
589 | __asm__ __volatile__( | ||
590 | " andnot %5,7,%4\n" | ||
591 | " inswl %1,%5,%1\n" | ||
592 | "1: ldq_l %2,0(%4)\n" | ||
593 | " extwl %2,%5,%0\n" | ||
594 | " cmpeq %0,%6,%3\n" | ||
595 | " beq %3,2f\n" | ||
596 | " mskwl %2,%5,%2\n" | ||
597 | " or %1,%2,%2\n" | ||
598 | " stq_c %2,0(%4)\n" | ||
599 | " beq %2,3f\n" | ||
600 | #ifdef CONFIG_SMP | ||
601 | " mb\n" | ||
602 | #endif | ||
603 | "2:\n" | ||
604 | ".subsection 2\n" | ||
605 | "3: br 1b\n" | ||
606 | ".previous" | ||
607 | : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) | ||
608 | : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); | ||
609 | |||
610 | return prev; | ||
611 | } | ||
612 | |||
613 | static inline unsigned long | ||
614 | __cmpxchg_u32(volatile int *m, int old, int new) | ||
615 | { | ||
616 | unsigned long prev, cmp; | ||
617 | |||
618 | __asm__ __volatile__( | ||
619 | "1: ldl_l %0,%5\n" | ||
620 | " cmpeq %0,%3,%1\n" | ||
621 | " beq %1,2f\n" | ||
622 | " mov %4,%1\n" | ||
623 | " stl_c %1,%2\n" | ||
624 | " beq %1,3f\n" | ||
625 | #ifdef CONFIG_SMP | ||
626 | " mb\n" | ||
627 | #endif | ||
628 | "2:\n" | ||
629 | ".subsection 2\n" | ||
630 | "3: br 1b\n" | ||
631 | ".previous" | ||
632 | : "=&r"(prev), "=&r"(cmp), "=m"(*m) | ||
633 | : "r"((long) old), "r"(new), "m"(*m) : "memory"); | ||
634 | |||
635 | return prev; | ||
636 | } | ||
637 | |||
638 | static inline unsigned long | ||
639 | __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) | ||
640 | { | ||
641 | unsigned long prev, cmp; | ||
642 | |||
643 | __asm__ __volatile__( | ||
644 | "1: ldq_l %0,%5\n" | ||
645 | " cmpeq %0,%3,%1\n" | ||
646 | " beq %1,2f\n" | ||
647 | " mov %4,%1\n" | ||
648 | " stq_c %1,%2\n" | ||
649 | " beq %1,3f\n" | ||
650 | #ifdef CONFIG_SMP | ||
651 | " mb\n" | ||
652 | #endif | ||
653 | "2:\n" | ||
654 | ".subsection 2\n" | ||
655 | "3: br 1b\n" | ||
656 | ".previous" | ||
657 | : "=&r"(prev), "=&r"(cmp), "=m"(*m) | ||
658 | : "r"((long) old), "r"(new), "m"(*m) : "memory"); | ||
659 | |||
660 | return prev; | ||
661 | } | ||
662 | |||
663 | /* This function doesn't exist, so you'll get a linker error | ||
664 | if something tries to do an invalid cmpxchg(). */ | ||
665 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
666 | |||
667 | static __always_inline unsigned long | ||
668 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | ||
669 | { | ||
670 | switch (size) { | ||
671 | case 1: | ||
672 | return __cmpxchg_u8(ptr, old, new); | ||
673 | case 2: | ||
674 | return __cmpxchg_u16(ptr, old, new); | ||
675 | case 4: | ||
676 | return __cmpxchg_u32(ptr, old, new); | ||
677 | case 8: | ||
678 | return __cmpxchg_u64(ptr, old, new); | ||
679 | } | ||
680 | __cmpxchg_called_with_bad_pointer(); | ||
681 | return old; | ||
682 | } | ||
683 | |||
684 | #define cmpxchg(ptr, o, n) \ | ||
685 | ({ \ | ||
686 | __typeof__(*(ptr)) _o_ = (o); \ | ||
687 | __typeof__(*(ptr)) _n_ = (n); \ | ||
688 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
689 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
690 | }) | ||
691 | #define cmpxchg64(ptr, o, n) \ | ||
692 | ({ \ | ||
693 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
694 | cmpxchg((ptr), (o), (n)); \ | ||
695 | }) | ||
696 | |||
697 | static inline unsigned long | ||
698 | __cmpxchg_u8_local(volatile char *m, long old, long new) | ||
699 | { | ||
700 | unsigned long prev, tmp, cmp, addr64; | ||
701 | |||
702 | __asm__ __volatile__( | ||
703 | " andnot %5,7,%4\n" | ||
704 | " insbl %1,%5,%1\n" | ||
705 | "1: ldq_l %2,0(%4)\n" | ||
706 | " extbl %2,%5,%0\n" | ||
707 | " cmpeq %0,%6,%3\n" | ||
708 | " beq %3,2f\n" | ||
709 | " mskbl %2,%5,%2\n" | ||
710 | " or %1,%2,%2\n" | ||
711 | " stq_c %2,0(%4)\n" | ||
712 | " beq %2,3f\n" | ||
713 | "2:\n" | ||
714 | ".subsection 2\n" | ||
715 | "3: br 1b\n" | ||
716 | ".previous" | ||
717 | : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) | ||
718 | : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); | ||
719 | |||
720 | return prev; | ||
721 | } | ||
722 | |||
723 | static inline unsigned long | ||
724 | __cmpxchg_u16_local(volatile short *m, long old, long new) | ||
725 | { | ||
726 | unsigned long prev, tmp, cmp, addr64; | ||
727 | |||
728 | __asm__ __volatile__( | ||
729 | " andnot %5,7,%4\n" | ||
730 | " inswl %1,%5,%1\n" | ||
731 | "1: ldq_l %2,0(%4)\n" | ||
732 | " extwl %2,%5,%0\n" | ||
733 | " cmpeq %0,%6,%3\n" | ||
734 | " beq %3,2f\n" | ||
735 | " mskwl %2,%5,%2\n" | ||
736 | " or %1,%2,%2\n" | ||
737 | " stq_c %2,0(%4)\n" | ||
738 | " beq %2,3f\n" | ||
739 | "2:\n" | ||
740 | ".subsection 2\n" | ||
741 | "3: br 1b\n" | ||
742 | ".previous" | ||
743 | : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) | ||
744 | : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); | ||
745 | |||
746 | return prev; | ||
747 | } | ||
748 | |||
749 | static inline unsigned long | ||
750 | __cmpxchg_u32_local(volatile int *m, int old, int new) | ||
751 | { | ||
752 | unsigned long prev, cmp; | ||
753 | |||
754 | __asm__ __volatile__( | ||
755 | "1: ldl_l %0,%5\n" | ||
756 | " cmpeq %0,%3,%1\n" | ||
757 | " beq %1,2f\n" | ||
758 | " mov %4,%1\n" | ||
759 | " stl_c %1,%2\n" | ||
760 | " beq %1,3f\n" | ||
761 | "2:\n" | ||
762 | ".subsection 2\n" | ||
763 | "3: br 1b\n" | ||
764 | ".previous" | ||
765 | : "=&r"(prev), "=&r"(cmp), "=m"(*m) | ||
766 | : "r"((long) old), "r"(new), "m"(*m) : "memory"); | ||
767 | |||
768 | return prev; | ||
769 | } | ||
770 | |||
771 | static inline unsigned long | ||
772 | __cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new) | ||
773 | { | ||
774 | unsigned long prev, cmp; | ||
775 | |||
776 | __asm__ __volatile__( | ||
777 | "1: ldq_l %0,%5\n" | ||
778 | " cmpeq %0,%3,%1\n" | ||
779 | " beq %1,2f\n" | ||
780 | " mov %4,%1\n" | ||
781 | " stq_c %1,%2\n" | ||
782 | " beq %1,3f\n" | ||
783 | "2:\n" | ||
784 | ".subsection 2\n" | ||
785 | "3: br 1b\n" | ||
786 | ".previous" | ||
787 | : "=&r"(prev), "=&r"(cmp), "=m"(*m) | ||
788 | : "r"((long) old), "r"(new), "m"(*m) : "memory"); | ||
789 | |||
790 | return prev; | ||
791 | } | ||
792 | |||
793 | static __always_inline unsigned long | ||
794 | __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, | ||
795 | int size) | ||
796 | { | ||
797 | switch (size) { | ||
798 | case 1: | ||
799 | return __cmpxchg_u8_local(ptr, old, new); | ||
800 | case 2: | ||
801 | return __cmpxchg_u16_local(ptr, old, new); | ||
802 | case 4: | ||
803 | return __cmpxchg_u32_local(ptr, old, new); | ||
804 | case 8: | ||
805 | return __cmpxchg_u64_local(ptr, old, new); | ||
806 | } | ||
807 | __cmpxchg_called_with_bad_pointer(); | ||
808 | return old; | ||
809 | } | ||
810 | |||
811 | #define cmpxchg_local(ptr, o, n) \ | ||
812 | ({ \ | ||
813 | __typeof__(*(ptr)) _o_ = (o); \ | ||
814 | __typeof__(*(ptr)) _n_ = (n); \ | ||
815 | (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ | ||
816 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
817 | }) | ||
818 | #define cmpxchg64_local(ptr, o, n) \ | ||
819 | ({ \ | ||
820 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | ||
821 | cmpxchg_local((ptr), (o), (n)); \ | ||
822 | }) | ||
823 | |||
824 | |||
825 | #endif /* __ASSEMBLY__ */ | ||
826 | |||
827 | #define arch_align_stack(x) (x) | ||
828 | |||
829 | #endif | ||
diff --git a/arch/alpha/include/asm/termbits.h b/arch/alpha/include/asm/termbits.h new file mode 100644 index 000000000000..ad854a4a3af6 --- /dev/null +++ b/arch/alpha/include/asm/termbits.h | |||
@@ -0,0 +1,200 @@ | |||
1 | #ifndef _ALPHA_TERMBITS_H | ||
2 | #define _ALPHA_TERMBITS_H | ||
3 | |||
4 | #include <linux/posix_types.h> | ||
5 | |||
6 | typedef unsigned char cc_t; | ||
7 | typedef unsigned int speed_t; | ||
8 | typedef unsigned int tcflag_t; | ||
9 | |||
10 | /* | ||
11 | * termios type and macro definitions. Be careful about adding stuff | ||
12 | * to this file since it's used in GNU libc and there are strict rules | ||
13 | * concerning namespace pollution. | ||
14 | */ | ||
15 | |||
16 | #define NCCS 19 | ||
17 | struct termios { | ||
18 | tcflag_t c_iflag; /* input mode flags */ | ||
19 | tcflag_t c_oflag; /* output mode flags */ | ||
20 | tcflag_t c_cflag; /* control mode flags */ | ||
21 | tcflag_t c_lflag; /* local mode flags */ | ||
22 | cc_t c_cc[NCCS]; /* control characters */ | ||
23 | cc_t c_line; /* line discipline (== c_cc[19]) */ | ||
24 | speed_t c_ispeed; /* input speed */ | ||
25 | speed_t c_ospeed; /* output speed */ | ||
26 | }; | ||
27 | |||
28 | /* Alpha has matching termios and ktermios */ | ||
29 | |||
30 | struct ktermios { | ||
31 | tcflag_t c_iflag; /* input mode flags */ | ||
32 | tcflag_t c_oflag; /* output mode flags */ | ||
33 | tcflag_t c_cflag; /* control mode flags */ | ||
34 | tcflag_t c_lflag; /* local mode flags */ | ||
35 | cc_t c_cc[NCCS]; /* control characters */ | ||
36 | cc_t c_line; /* line discipline (== c_cc[19]) */ | ||
37 | speed_t c_ispeed; /* input speed */ | ||
38 | speed_t c_ospeed; /* output speed */ | ||
39 | }; | ||
40 | |||
41 | /* c_cc characters */ | ||
42 | #define VEOF 0 | ||
43 | #define VEOL 1 | ||
44 | #define VEOL2 2 | ||
45 | #define VERASE 3 | ||
46 | #define VWERASE 4 | ||
47 | #define VKILL 5 | ||
48 | #define VREPRINT 6 | ||
49 | #define VSWTC 7 | ||
50 | #define VINTR 8 | ||
51 | #define VQUIT 9 | ||
52 | #define VSUSP 10 | ||
53 | #define VSTART 12 | ||
54 | #define VSTOP 13 | ||
55 | #define VLNEXT 14 | ||
56 | #define VDISCARD 15 | ||
57 | #define VMIN 16 | ||
58 | #define VTIME 17 | ||
59 | |||
60 | /* c_iflag bits */ | ||
61 | #define IGNBRK 0000001 | ||
62 | #define BRKINT 0000002 | ||
63 | #define IGNPAR 0000004 | ||
64 | #define PARMRK 0000010 | ||
65 | #define INPCK 0000020 | ||
66 | #define ISTRIP 0000040 | ||
67 | #define INLCR 0000100 | ||
68 | #define IGNCR 0000200 | ||
69 | #define ICRNL 0000400 | ||
70 | #define IXON 0001000 | ||
71 | #define IXOFF 0002000 | ||
72 | #define IXANY 0004000 | ||
73 | #define IUCLC 0010000 | ||
74 | #define IMAXBEL 0020000 | ||
75 | #define IUTF8 0040000 | ||
76 | |||
77 | /* c_oflag bits */ | ||
78 | #define OPOST 0000001 | ||
79 | #define ONLCR 0000002 | ||
80 | #define OLCUC 0000004 | ||
81 | |||
82 | #define OCRNL 0000010 | ||
83 | #define ONOCR 0000020 | ||
84 | #define ONLRET 0000040 | ||
85 | |||
86 | #define OFILL 00000100 | ||
87 | #define OFDEL 00000200 | ||
88 | #define NLDLY 00001400 | ||
89 | #define NL0 00000000 | ||
90 | #define NL1 00000400 | ||
91 | #define NL2 00001000 | ||
92 | #define NL3 00001400 | ||
93 | #define TABDLY 00006000 | ||
94 | #define TAB0 00000000 | ||
95 | #define TAB1 00002000 | ||
96 | #define TAB2 00004000 | ||
97 | #define TAB3 00006000 | ||
98 | #define CRDLY 00030000 | ||
99 | #define CR0 00000000 | ||
100 | #define CR1 00010000 | ||
101 | #define CR2 00020000 | ||
102 | #define CR3 00030000 | ||
103 | #define FFDLY 00040000 | ||
104 | #define FF0 00000000 | ||
105 | #define FF1 00040000 | ||
106 | #define BSDLY 00100000 | ||
107 | #define BS0 00000000 | ||
108 | #define BS1 00100000 | ||
109 | #define VTDLY 00200000 | ||
110 | #define VT0 00000000 | ||
111 | #define VT1 00200000 | ||
112 | #define XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */ | ||
113 | |||
114 | /* c_cflag bit meaning */ | ||
115 | #define CBAUD 0000037 | ||
116 | #define B0 0000000 /* hang up */ | ||
117 | #define B50 0000001 | ||
118 | #define B75 0000002 | ||
119 | #define B110 0000003 | ||
120 | #define B134 0000004 | ||
121 | #define B150 0000005 | ||
122 | #define B200 0000006 | ||
123 | #define B300 0000007 | ||
124 | #define B600 0000010 | ||
125 | #define B1200 0000011 | ||
126 | #define B1800 0000012 | ||
127 | #define B2400 0000013 | ||
128 | #define B4800 0000014 | ||
129 | #define B9600 0000015 | ||
130 | #define B19200 0000016 | ||
131 | #define B38400 0000017 | ||
132 | #define EXTA B19200 | ||
133 | #define EXTB B38400 | ||
134 | #define CBAUDEX 0000000 | ||
135 | #define B57600 00020 | ||
136 | #define B115200 00021 | ||
137 | #define B230400 00022 | ||
138 | #define B460800 00023 | ||
139 | #define B500000 00024 | ||
140 | #define B576000 00025 | ||
141 | #define B921600 00026 | ||
142 | #define B1000000 00027 | ||
143 | #define B1152000 00030 | ||
144 | #define B1500000 00031 | ||
145 | #define B2000000 00032 | ||
146 | #define B2500000 00033 | ||
147 | #define B3000000 00034 | ||
148 | #define B3500000 00035 | ||
149 | #define B4000000 00036 | ||
150 | |||
151 | #define CSIZE 00001400 | ||
152 | #define CS5 00000000 | ||
153 | #define CS6 00000400 | ||
154 | #define CS7 00001000 | ||
155 | #define CS8 00001400 | ||
156 | |||
157 | #define CSTOPB 00002000 | ||
158 | #define CREAD 00004000 | ||
159 | #define PARENB 00010000 | ||
160 | #define PARODD 00020000 | ||
161 | #define HUPCL 00040000 | ||
162 | |||
163 | #define CLOCAL 00100000 | ||
164 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ | ||
165 | #define CRTSCTS 020000000000 /* flow control */ | ||
166 | |||
167 | /* c_lflag bits */ | ||
168 | #define ISIG 0x00000080 | ||
169 | #define ICANON 0x00000100 | ||
170 | #define XCASE 0x00004000 | ||
171 | #define ECHO 0x00000008 | ||
172 | #define ECHOE 0x00000002 | ||
173 | #define ECHOK 0x00000004 | ||
174 | #define ECHONL 0x00000010 | ||
175 | #define NOFLSH 0x80000000 | ||
176 | #define TOSTOP 0x00400000 | ||
177 | #define ECHOCTL 0x00000040 | ||
178 | #define ECHOPRT 0x00000020 | ||
179 | #define ECHOKE 0x00000001 | ||
180 | #define FLUSHO 0x00800000 | ||
181 | #define PENDIN 0x20000000 | ||
182 | #define IEXTEN 0x00000400 | ||
183 | |||
184 | /* Values for the ACTION argument to `tcflow'. */ | ||
185 | #define TCOOFF 0 | ||
186 | #define TCOON 1 | ||
187 | #define TCIOFF 2 | ||
188 | #define TCION 3 | ||
189 | |||
190 | /* Values for the QUEUE_SELECTOR argument to `tcflush'. */ | ||
191 | #define TCIFLUSH 0 | ||
192 | #define TCOFLUSH 1 | ||
193 | #define TCIOFLUSH 2 | ||
194 | |||
195 | /* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */ | ||
196 | #define TCSANOW 0 | ||
197 | #define TCSADRAIN 1 | ||
198 | #define TCSAFLUSH 2 | ||
199 | |||
200 | #endif /* _ALPHA_TERMBITS_H */ | ||
diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h new file mode 100644 index 000000000000..fa13716a11c3 --- /dev/null +++ b/arch/alpha/include/asm/termios.h | |||
@@ -0,0 +1,146 @@ | |||
1 | #ifndef _ALPHA_TERMIOS_H | ||
2 | #define _ALPHA_TERMIOS_H | ||
3 | |||
4 | #include <asm/ioctls.h> | ||
5 | #include <asm/termbits.h> | ||
6 | |||
7 | struct sgttyb { | ||
8 | char sg_ispeed; | ||
9 | char sg_ospeed; | ||
10 | char sg_erase; | ||
11 | char sg_kill; | ||
12 | short sg_flags; | ||
13 | }; | ||
14 | |||
15 | struct tchars { | ||
16 | char t_intrc; | ||
17 | char t_quitc; | ||
18 | char t_startc; | ||
19 | char t_stopc; | ||
20 | char t_eofc; | ||
21 | char t_brkc; | ||
22 | }; | ||
23 | |||
24 | struct ltchars { | ||
25 | char t_suspc; | ||
26 | char t_dsuspc; | ||
27 | char t_rprntc; | ||
28 | char t_flushc; | ||
29 | char t_werasc; | ||
30 | char t_lnextc; | ||
31 | }; | ||
32 | |||
33 | struct winsize { | ||
34 | unsigned short ws_row; | ||
35 | unsigned short ws_col; | ||
36 | unsigned short ws_xpixel; | ||
37 | unsigned short ws_ypixel; | ||
38 | }; | ||
39 | |||
40 | #define NCC 8 | ||
41 | struct termio { | ||
42 | unsigned short c_iflag; /* input mode flags */ | ||
43 | unsigned short c_oflag; /* output mode flags */ | ||
44 | unsigned short c_cflag; /* control mode flags */ | ||
45 | unsigned short c_lflag; /* local mode flags */ | ||
46 | unsigned char c_line; /* line discipline */ | ||
47 | unsigned char c_cc[NCC]; /* control characters */ | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * c_cc characters in the termio structure. Oh, how I love being | ||
52 | * backwardly compatible. Notice that character 4 and 5 are | ||
53 | * interpreted differently depending on whether ICANON is set in | ||
54 | * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise | ||
55 | * as _VMIN and V_TIME. This is for compatibility with OSF/1 (which | ||
56 | * is compatible with sysV)... | ||
57 | */ | ||
58 | #define _VINTR 0 | ||
59 | #define _VQUIT 1 | ||
60 | #define _VERASE 2 | ||
61 | #define _VKILL 3 | ||
62 | #define _VEOF 4 | ||
63 | #define _VMIN 4 | ||
64 | #define _VEOL 5 | ||
65 | #define _VTIME 5 | ||
66 | #define _VEOL2 6 | ||
67 | #define _VSWTC 7 | ||
68 | |||
69 | #ifdef __KERNEL__ | ||
70 | /* eof=^D eol=\0 eol2=\0 erase=del | ||
71 | werase=^W kill=^U reprint=^R sxtc=\0 | ||
72 | intr=^C quit=^\ susp=^Z <OSF/1 VDSUSP> | ||
73 | start=^Q stop=^S lnext=^V discard=^U | ||
74 | vmin=\1 vtime=\0 | ||
75 | */ | ||
76 | #define INIT_C_CC "\004\000\000\177\027\025\022\000\003\034\032\000\021\023\026\025\001\000" | ||
77 | |||
78 | /* | ||
79 | * Translate a "termio" structure into a "termios". Ugh. | ||
80 | */ | ||
81 | |||
82 | #define user_termio_to_kernel_termios(a_termios, u_termio) \ | ||
83 | ({ \ | ||
84 | struct ktermios *k_termios = (a_termios); \ | ||
85 | struct termio k_termio; \ | ||
86 | int canon, ret; \ | ||
87 | \ | ||
88 | ret = copy_from_user(&k_termio, u_termio, sizeof(k_termio)); \ | ||
89 | if (!ret) { \ | ||
90 | /* Overwrite only the low bits. */ \ | ||
91 | *(unsigned short *)&k_termios->c_iflag = k_termio.c_iflag; \ | ||
92 | *(unsigned short *)&k_termios->c_oflag = k_termio.c_oflag; \ | ||
93 | *(unsigned short *)&k_termios->c_cflag = k_termio.c_cflag; \ | ||
94 | *(unsigned short *)&k_termios->c_lflag = k_termio.c_lflag; \ | ||
95 | canon = k_termio.c_lflag & ICANON; \ | ||
96 | \ | ||
97 | k_termios->c_cc[VINTR] = k_termio.c_cc[_VINTR]; \ | ||
98 | k_termios->c_cc[VQUIT] = k_termio.c_cc[_VQUIT]; \ | ||
99 | k_termios->c_cc[VERASE] = k_termio.c_cc[_VERASE]; \ | ||
100 | k_termios->c_cc[VKILL] = k_termio.c_cc[_VKILL]; \ | ||
101 | k_termios->c_cc[VEOL2] = k_termio.c_cc[_VEOL2]; \ | ||
102 | k_termios->c_cc[VSWTC] = k_termio.c_cc[_VSWTC]; \ | ||
103 | k_termios->c_cc[canon ? VEOF : VMIN] = k_termio.c_cc[_VEOF]; \ | ||
104 | k_termios->c_cc[canon ? VEOL : VTIME] = k_termio.c_cc[_VEOL]; \ | ||
105 | } \ | ||
106 | ret; \ | ||
107 | }) | ||
108 | |||
109 | /* | ||
110 | * Translate a "termios" structure into a "termio". Ugh. | ||
111 | * | ||
112 | * Note the "fun" _VMIN overloading. | ||
113 | */ | ||
114 | #define kernel_termios_to_user_termio(u_termio, a_termios) \ | ||
115 | ({ \ | ||
116 | struct ktermios *k_termios = (a_termios); \ | ||
117 | struct termio k_termio; \ | ||
118 | int canon; \ | ||
119 | \ | ||
120 | k_termio.c_iflag = k_termios->c_iflag; \ | ||
121 | k_termio.c_oflag = k_termios->c_oflag; \ | ||
122 | k_termio.c_cflag = k_termios->c_cflag; \ | ||
123 | canon = (k_termio.c_lflag = k_termios->c_lflag) & ICANON; \ | ||
124 | \ | ||
125 | k_termio.c_line = k_termios->c_line; \ | ||
126 | k_termio.c_cc[_VINTR] = k_termios->c_cc[VINTR]; \ | ||
127 | k_termio.c_cc[_VQUIT] = k_termios->c_cc[VQUIT]; \ | ||
128 | k_termio.c_cc[_VERASE] = k_termios->c_cc[VERASE]; \ | ||
129 | k_termio.c_cc[_VKILL] = k_termios->c_cc[VKILL]; \ | ||
130 | k_termio.c_cc[_VEOF] = k_termios->c_cc[canon ? VEOF : VMIN]; \ | ||
131 | k_termio.c_cc[_VEOL] = k_termios->c_cc[canon ? VEOL : VTIME]; \ | ||
132 | k_termio.c_cc[_VEOL2] = k_termios->c_cc[VEOL2]; \ | ||
133 | k_termio.c_cc[_VSWTC] = k_termios->c_cc[VSWTC]; \ | ||
134 | \ | ||
135 | copy_to_user(u_termio, &k_termio, sizeof(k_termio)); \ | ||
136 | }) | ||
137 | |||
138 | #define user_termios_to_kernel_termios(k, u) \ | ||
139 | copy_from_user(k, u, sizeof(struct termios)) | ||
140 | |||
141 | #define kernel_termios_to_user_termios(u, k) \ | ||
142 | copy_to_user(u, k, sizeof(struct termios)) | ||
143 | |||
144 | #endif /* __KERNEL__ */ | ||
145 | |||
146 | #endif /* _ALPHA_TERMIOS_H */ | ||
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h new file mode 100644 index 000000000000..15fda4344424 --- /dev/null +++ b/arch/alpha/include/asm/thread_info.h | |||
@@ -0,0 +1,114 @@ | |||
1 | #ifndef _ALPHA_THREAD_INFO_H | ||
2 | #define _ALPHA_THREAD_INFO_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | #include <asm/processor.h> | ||
8 | #include <asm/types.h> | ||
9 | #include <asm/hwrpb.h> | ||
10 | #endif | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | struct thread_info { | ||
14 | struct pcb_struct pcb; /* palcode state */ | ||
15 | |||
16 | struct task_struct *task; /* main task structure */ | ||
17 | unsigned int flags; /* low level flags */ | ||
18 | unsigned int ieee_state; /* see fpu.h */ | ||
19 | |||
20 | struct exec_domain *exec_domain; /* execution domain */ | ||
21 | mm_segment_t addr_limit; /* thread address space */ | ||
22 | unsigned cpu; /* current CPU */ | ||
23 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | ||
24 | |||
25 | int bpt_nsaved; | ||
26 | unsigned long bpt_addr[2]; /* breakpoint handling */ | ||
27 | unsigned int bpt_insn[2]; | ||
28 | |||
29 | struct restart_block restart_block; | ||
30 | }; | ||
31 | |||
32 | /* | ||
33 | * Macros/functions for gaining access to the thread information structure. | ||
34 | */ | ||
35 | #define INIT_THREAD_INFO(tsk) \ | ||
36 | { \ | ||
37 | .task = &tsk, \ | ||
38 | .exec_domain = &default_exec_domain, \ | ||
39 | .addr_limit = KERNEL_DS, \ | ||
40 | .restart_block = { \ | ||
41 | .fn = do_no_restart_syscall, \ | ||
42 | }, \ | ||
43 | } | ||
44 | |||
45 | #define init_thread_info (init_thread_union.thread_info) | ||
46 | #define init_stack (init_thread_union.stack) | ||
47 | |||
48 | /* How to get the thread information struct from C. */ | ||
49 | register struct thread_info *__current_thread_info __asm__("$8"); | ||
50 | #define current_thread_info() __current_thread_info | ||
51 | |||
52 | /* Thread information allocation. */ | ||
53 | #define THREAD_SIZE_ORDER 1 | ||
54 | #define THREAD_SIZE (2*PAGE_SIZE) | ||
55 | |||
56 | #endif /* __ASSEMBLY__ */ | ||
57 | |||
58 | #define PREEMPT_ACTIVE 0x40000000 | ||
59 | |||
60 | /* | ||
61 | * Thread information flags: | ||
62 | * - these are process state flags and used from assembly | ||
63 | * - pending work-to-be-done flags come first to fit in and immediate operand. | ||
64 | * | ||
65 | * TIF_SYSCALL_TRACE is known to be 0 via blbs. | ||
66 | */ | ||
67 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | ||
68 | #define TIF_SIGPENDING 1 /* signal pending */ | ||
69 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | ||
70 | #define TIF_POLLING_NRFLAG 3 /* poll_idle is polling NEED_RESCHED */ | ||
71 | #define TIF_DIE_IF_KERNEL 4 /* dik recursion lock */ | ||
72 | #define TIF_UAC_NOPRINT 5 /* see sysinfo.h */ | ||
73 | #define TIF_UAC_NOFIX 6 | ||
74 | #define TIF_UAC_SIGBUS 7 | ||
75 | #define TIF_MEMDIE 8 | ||
76 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ | ||
77 | |||
78 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | ||
79 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | ||
80 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | ||
81 | #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) | ||
82 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | ||
83 | |||
84 | /* Work to do on interrupt/exception return. */ | ||
85 | #define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED) | ||
86 | |||
87 | /* Work to do on any return to userspace. */ | ||
88 | #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ | ||
89 | | _TIF_SYSCALL_TRACE) | ||
90 | |||
91 | #define ALPHA_UAC_SHIFT 6 | ||
92 | #define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \ | ||
93 | 1 << TIF_UAC_SIGBUS) | ||
94 | |||
95 | #define SET_UNALIGN_CTL(task,value) ({ \ | ||
96 | task_thread_info(task)->flags = ((task_thread_info(task)->flags & \ | ||
97 | ~ALPHA_UAC_MASK) \ | ||
98 | | (((value) << ALPHA_UAC_SHIFT) & (1<<TIF_UAC_NOPRINT))\ | ||
99 | | (((value) << (ALPHA_UAC_SHIFT + 1)) & (1<<TIF_UAC_SIGBUS)) \ | ||
100 | | (((value) << (ALPHA_UAC_SHIFT - 1)) & (1<<TIF_UAC_NOFIX)));\ | ||
101 | 0; }) | ||
102 | |||
103 | #define GET_UNALIGN_CTL(task,value) ({ \ | ||
104 | put_user((task_thread_info(task)->flags & (1 << TIF_UAC_NOPRINT))\ | ||
105 | >> ALPHA_UAC_SHIFT \ | ||
106 | | (task_thread_info(task)->flags & (1 << TIF_UAC_SIGBUS))\ | ||
107 | >> (ALPHA_UAC_SHIFT + 1) \ | ||
108 | | (task_thread_info(task)->flags & (1 << TIF_UAC_NOFIX))\ | ||
109 | >> (ALPHA_UAC_SHIFT - 1), \ | ||
110 | (int __user *)(value)); \ | ||
111 | }) | ||
112 | |||
113 | #endif /* __KERNEL__ */ | ||
114 | #endif /* _ALPHA_THREAD_INFO_H */ | ||
diff --git a/arch/alpha/include/asm/timex.h b/arch/alpha/include/asm/timex.h new file mode 100644 index 000000000000..afa0c45e3e98 --- /dev/null +++ b/arch/alpha/include/asm/timex.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * linux/include/asm-alpha/timex.h | ||
3 | * | ||
4 | * ALPHA architecture timex specifications | ||
5 | */ | ||
6 | #ifndef _ASMALPHA_TIMEX_H | ||
7 | #define _ASMALPHA_TIMEX_H | ||
8 | |||
9 | /* With only one or two oddballs, we use the RTC as the ticker, selecting | ||
10 | the 32.768kHz reference clock, which nicely divides down to our HZ. */ | ||
11 | #define CLOCK_TICK_RATE 32768 | ||
12 | |||
13 | /* | ||
14 | * Standard way to access the cycle counter. | ||
15 | * Currently only used on SMP for scheduling. | ||
16 | * | ||
17 | * Only the low 32 bits are available as a continuously counting entity. | ||
18 | * But this only means we'll force a reschedule every 8 seconds or so, | ||
19 | * which isn't an evil thing. | ||
20 | */ | ||
21 | |||
22 | typedef unsigned int cycles_t; | ||
23 | |||
24 | static inline cycles_t get_cycles (void) | ||
25 | { | ||
26 | cycles_t ret; | ||
27 | __asm__ __volatile__ ("rpcc %0" : "=r"(ret)); | ||
28 | return ret; | ||
29 | } | ||
30 | |||
31 | #endif | ||
diff --git a/arch/alpha/include/asm/tlb.h b/arch/alpha/include/asm/tlb.h new file mode 100644 index 000000000000..c13636575fba --- /dev/null +++ b/arch/alpha/include/asm/tlb.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef _ALPHA_TLB_H | ||
2 | #define _ALPHA_TLB_H | ||
3 | |||
4 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
5 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
6 | #define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0) | ||
7 | |||
8 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
9 | |||
10 | #include <asm-generic/tlb.h> | ||
11 | |||
12 | #define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) | ||
13 | #define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) | ||
14 | |||
15 | #endif | ||
diff --git a/arch/alpha/include/asm/tlbflush.h b/arch/alpha/include/asm/tlbflush.h new file mode 100644 index 000000000000..9d87aaa08c0d --- /dev/null +++ b/arch/alpha/include/asm/tlbflush.h | |||
@@ -0,0 +1,151 @@ | |||
1 | #ifndef _ALPHA_TLBFLUSH_H | ||
2 | #define _ALPHA_TLBFLUSH_H | ||
3 | |||
4 | #include <linux/mm.h> | ||
5 | #include <asm/compiler.h> | ||
6 | #include <asm/pgalloc.h> | ||
7 | |||
8 | #ifndef __EXTERN_INLINE | ||
9 | #define __EXTERN_INLINE extern inline | ||
10 | #define __MMU_EXTERN_INLINE | ||
11 | #endif | ||
12 | |||
13 | extern void __load_new_mm_context(struct mm_struct *); | ||
14 | |||
15 | |||
16 | /* Use a few helper functions to hide the ugly broken ASN | ||
17 | numbers on early Alphas (ev4 and ev45). */ | ||
18 | |||
19 | __EXTERN_INLINE void | ||
20 | ev4_flush_tlb_current(struct mm_struct *mm) | ||
21 | { | ||
22 | __load_new_mm_context(mm); | ||
23 | tbiap(); | ||
24 | } | ||
25 | |||
26 | __EXTERN_INLINE void | ||
27 | ev5_flush_tlb_current(struct mm_struct *mm) | ||
28 | { | ||
29 | __load_new_mm_context(mm); | ||
30 | } | ||
31 | |||
32 | /* Flush just one page in the current TLB set. We need to be very | ||
33 | careful about the icache here, there is no way to invalidate a | ||
34 | specific icache page. */ | ||
35 | |||
36 | __EXTERN_INLINE void | ||
37 | ev4_flush_tlb_current_page(struct mm_struct * mm, | ||
38 | struct vm_area_struct *vma, | ||
39 | unsigned long addr) | ||
40 | { | ||
41 | int tbi_flag = 2; | ||
42 | if (vma->vm_flags & VM_EXEC) { | ||
43 | __load_new_mm_context(mm); | ||
44 | tbi_flag = 3; | ||
45 | } | ||
46 | tbi(tbi_flag, addr); | ||
47 | } | ||
48 | |||
49 | __EXTERN_INLINE void | ||
50 | ev5_flush_tlb_current_page(struct mm_struct * mm, | ||
51 | struct vm_area_struct *vma, | ||
52 | unsigned long addr) | ||
53 | { | ||
54 | if (vma->vm_flags & VM_EXEC) | ||
55 | __load_new_mm_context(mm); | ||
56 | else | ||
57 | tbi(2, addr); | ||
58 | } | ||
59 | |||
60 | |||
61 | #ifdef CONFIG_ALPHA_GENERIC | ||
62 | # define flush_tlb_current alpha_mv.mv_flush_tlb_current | ||
63 | # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page | ||
64 | #else | ||
65 | # ifdef CONFIG_ALPHA_EV4 | ||
66 | # define flush_tlb_current ev4_flush_tlb_current | ||
67 | # define flush_tlb_current_page ev4_flush_tlb_current_page | ||
68 | # else | ||
69 | # define flush_tlb_current ev5_flush_tlb_current | ||
70 | # define flush_tlb_current_page ev5_flush_tlb_current_page | ||
71 | # endif | ||
72 | #endif | ||
73 | |||
74 | #ifdef __MMU_EXTERN_INLINE | ||
75 | #undef __EXTERN_INLINE | ||
76 | #undef __MMU_EXTERN_INLINE | ||
77 | #endif | ||
78 | |||
79 | /* Flush current user mapping. */ | ||
80 | static inline void | ||
81 | flush_tlb(void) | ||
82 | { | ||
83 | flush_tlb_current(current->active_mm); | ||
84 | } | ||
85 | |||
86 | /* Flush someone else's user mapping. */ | ||
87 | static inline void | ||
88 | flush_tlb_other(struct mm_struct *mm) | ||
89 | { | ||
90 | unsigned long *mmc = &mm->context[smp_processor_id()]; | ||
91 | /* Check it's not zero first to avoid cacheline ping pong | ||
92 | when possible. */ | ||
93 | if (*mmc) *mmc = 0; | ||
94 | } | ||
95 | |||
96 | #ifndef CONFIG_SMP | ||
97 | /* Flush everything (kernel mapping may also have changed | ||
98 | due to vmalloc/vfree). */ | ||
99 | static inline void flush_tlb_all(void) | ||
100 | { | ||
101 | tbia(); | ||
102 | } | ||
103 | |||
104 | /* Flush a specified user mapping. */ | ||
105 | static inline void | ||
106 | flush_tlb_mm(struct mm_struct *mm) | ||
107 | { | ||
108 | if (mm == current->active_mm) | ||
109 | flush_tlb_current(mm); | ||
110 | else | ||
111 | flush_tlb_other(mm); | ||
112 | } | ||
113 | |||
114 | /* Page-granular tlb flush. */ | ||
115 | static inline void | ||
116 | flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | ||
117 | { | ||
118 | struct mm_struct *mm = vma->vm_mm; | ||
119 | |||
120 | if (mm == current->active_mm) | ||
121 | flush_tlb_current_page(mm, vma, addr); | ||
122 | else | ||
123 | flush_tlb_other(mm); | ||
124 | } | ||
125 | |||
126 | /* Flush a specified range of user mapping. On the Alpha we flush | ||
127 | the whole user tlb. */ | ||
128 | static inline void | ||
129 | flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
130 | unsigned long end) | ||
131 | { | ||
132 | flush_tlb_mm(vma->vm_mm); | ||
133 | } | ||
134 | |||
135 | #else /* CONFIG_SMP */ | ||
136 | |||
137 | extern void flush_tlb_all(void); | ||
138 | extern void flush_tlb_mm(struct mm_struct *); | ||
139 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
140 | extern void flush_tlb_range(struct vm_area_struct *, unsigned long, | ||
141 | unsigned long); | ||
142 | |||
143 | #endif /* CONFIG_SMP */ | ||
144 | |||
145 | static inline void flush_tlb_kernel_range(unsigned long start, | ||
146 | unsigned long end) | ||
147 | { | ||
148 | flush_tlb_all(); | ||
149 | } | ||
150 | |||
151 | #endif /* _ALPHA_TLBFLUSH_H */ | ||
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h new file mode 100644 index 000000000000..149532e162c4 --- /dev/null +++ b/arch/alpha/include/asm/topology.h | |||
@@ -0,0 +1,47 @@ | |||
1 | #ifndef _ASM_ALPHA_TOPOLOGY_H | ||
2 | #define _ASM_ALPHA_TOPOLOGY_H | ||
3 | |||
4 | #include <linux/smp.h> | ||
5 | #include <linux/threads.h> | ||
6 | #include <asm/machvec.h> | ||
7 | |||
8 | #ifdef CONFIG_NUMA | ||
9 | static inline int cpu_to_node(int cpu) | ||
10 | { | ||
11 | int node; | ||
12 | |||
13 | if (!alpha_mv.cpuid_to_nid) | ||
14 | return 0; | ||
15 | |||
16 | node = alpha_mv.cpuid_to_nid(cpu); | ||
17 | |||
18 | #ifdef DEBUG_NUMA | ||
19 | BUG_ON(node < 0); | ||
20 | #endif | ||
21 | |||
22 | return node; | ||
23 | } | ||
24 | |||
25 | static inline cpumask_t node_to_cpumask(int node) | ||
26 | { | ||
27 | cpumask_t node_cpu_mask = CPU_MASK_NONE; | ||
28 | int cpu; | ||
29 | |||
30 | for_each_online_cpu(cpu) { | ||
31 | if (cpu_to_node(cpu) == node) | ||
32 | cpu_set(cpu, node_cpu_mask); | ||
33 | } | ||
34 | |||
35 | #ifdef DEBUG_NUMA | ||
36 | printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask); | ||
37 | #endif | ||
38 | |||
39 | return node_cpu_mask; | ||
40 | } | ||
41 | |||
42 | #define pcibus_to_cpumask(bus) (cpu_online_map) | ||
43 | |||
44 | #endif /* !CONFIG_NUMA */ | ||
45 | # include <asm-generic/topology.h> | ||
46 | |||
47 | #endif /* _ASM_ALPHA_TOPOLOGY_H */ | ||
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h new file mode 100644 index 000000000000..c1541353ccef --- /dev/null +++ b/arch/alpha/include/asm/types.h | |||
@@ -0,0 +1,33 @@ | |||
1 | #ifndef _ALPHA_TYPES_H | ||
2 | #define _ALPHA_TYPES_H | ||
3 | |||
4 | /* | ||
5 | * This file is never included by application software unless | ||
6 | * explicitly requested (e.g., via linux/types.h) in which case the | ||
7 | * application is Linux specific so (user-) name space pollution is | ||
8 | * not a major issue. However, for interoperability, libraries still | ||
9 | * need to be careful to avoid a name clashes. | ||
10 | */ | ||
11 | #include <asm-generic/int-l64.h> | ||
12 | |||
13 | #ifndef __ASSEMBLY__ | ||
14 | |||
15 | typedef unsigned int umode_t; | ||
16 | |||
17 | #endif /* __ASSEMBLY__ */ | ||
18 | |||
19 | /* | ||
20 | * These aren't exported outside the kernel to avoid name space clashes | ||
21 | */ | ||
22 | #ifdef __KERNEL__ | ||
23 | |||
24 | #define BITS_PER_LONG 64 | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | typedef u64 dma_addr_t; | ||
29 | typedef u64 dma64_addr_t; | ||
30 | |||
31 | #endif /* __ASSEMBLY__ */ | ||
32 | #endif /* __KERNEL__ */ | ||
33 | #endif /* _ALPHA_TYPES_H */ | ||
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h new file mode 100644 index 000000000000..22de3b434a22 --- /dev/null +++ b/arch/alpha/include/asm/uaccess.h | |||
@@ -0,0 +1,511 @@ | |||
1 | #ifndef __ALPHA_UACCESS_H | ||
2 | #define __ALPHA_UACCESS_H | ||
3 | |||
4 | #include <linux/errno.h> | ||
5 | #include <linux/sched.h> | ||
6 | |||
7 | |||
8 | /* | ||
9 | * The fs value determines whether argument validity checking should be | ||
10 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
11 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
12 | * | ||
13 | * Or at least it did once upon a time. Nowadays it is a mask that | ||
14 | * defines which bits of the address space are off limits. This is a | ||
15 | * wee bit faster than the above. | ||
16 | * | ||
17 | * For historical reasons, these macros are grossly misnamed. | ||
18 | */ | ||
19 | |||
20 | #define KERNEL_DS ((mm_segment_t) { 0UL }) | ||
21 | #define USER_DS ((mm_segment_t) { -0x40000000000UL }) | ||
22 | |||
23 | #define VERIFY_READ 0 | ||
24 | #define VERIFY_WRITE 1 | ||
25 | |||
26 | #define get_fs() (current_thread_info()->addr_limit) | ||
27 | #define get_ds() (KERNEL_DS) | ||
28 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
29 | |||
30 | #define segment_eq(a,b) ((a).seg == (b).seg) | ||
31 | |||
32 | /* | ||
33 | * Is a address valid? This does a straightforward calculation rather | ||
34 | * than tests. | ||
35 | * | ||
36 | * Address valid if: | ||
37 | * - "addr" doesn't have any high-bits set | ||
38 | * - AND "size" doesn't have any high-bits set | ||
39 | * - AND "addr+size" doesn't have any high-bits set | ||
40 | * - OR we are in kernel mode. | ||
41 | */ | ||
42 | #define __access_ok(addr,size,segment) \ | ||
43 | (((segment).seg & (addr | size | (addr+size))) == 0) | ||
44 | |||
45 | #define access_ok(type,addr,size) \ | ||
46 | ({ \ | ||
47 | __chk_user_ptr(addr); \ | ||
48 | __access_ok(((unsigned long)(addr)),(size),get_fs()); \ | ||
49 | }) | ||
50 | |||
51 | /* | ||
52 | * These are the main single-value transfer routines. They automatically | ||
53 | * use the right size if we just have the right pointer type. | ||
54 | * | ||
55 | * As the alpha uses the same address space for kernel and user | ||
56 | * data, we can just do these as direct assignments. (Of course, the | ||
57 | * exception handling means that it's no longer "just"...) | ||
58 | * | ||
59 | * Careful to not | ||
60 | * (a) re-use the arguments for side effects (sizeof/typeof is ok) | ||
61 | * (b) require any knowledge of processes at this stage | ||
62 | */ | ||
63 | #define put_user(x,ptr) \ | ||
64 | __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs()) | ||
65 | #define get_user(x,ptr) \ | ||
66 | __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs()) | ||
67 | |||
68 | /* | ||
69 | * The "__xxx" versions do not do address space checking, useful when | ||
70 | * doing multiple accesses to the same area (the programmer has to do the | ||
71 | * checks by hand with "access_ok()") | ||
72 | */ | ||
73 | #define __put_user(x,ptr) \ | ||
74 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | ||
75 | #define __get_user(x,ptr) \ | ||
76 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | ||
77 | |||
78 | /* | ||
79 | * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to | ||
80 | * encode the bits we need for resolving the exception. See the | ||
81 | * more extensive comments with fixup_inline_exception below for | ||
82 | * more information. | ||
83 | */ | ||
84 | |||
85 | extern void __get_user_unknown(void); | ||
86 | |||
87 | #define __get_user_nocheck(x,ptr,size) \ | ||
88 | ({ \ | ||
89 | long __gu_err = 0; \ | ||
90 | unsigned long __gu_val; \ | ||
91 | __chk_user_ptr(ptr); \ | ||
92 | switch (size) { \ | ||
93 | case 1: __get_user_8(ptr); break; \ | ||
94 | case 2: __get_user_16(ptr); break; \ | ||
95 | case 4: __get_user_32(ptr); break; \ | ||
96 | case 8: __get_user_64(ptr); break; \ | ||
97 | default: __get_user_unknown(); break; \ | ||
98 | } \ | ||
99 | (x) = (__typeof__(*(ptr))) __gu_val; \ | ||
100 | __gu_err; \ | ||
101 | }) | ||
102 | |||
103 | #define __get_user_check(x,ptr,size,segment) \ | ||
104 | ({ \ | ||
105 | long __gu_err = -EFAULT; \ | ||
106 | unsigned long __gu_val = 0; \ | ||
107 | const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ | ||
108 | if (__access_ok((unsigned long)__gu_addr,size,segment)) { \ | ||
109 | __gu_err = 0; \ | ||
110 | switch (size) { \ | ||
111 | case 1: __get_user_8(__gu_addr); break; \ | ||
112 | case 2: __get_user_16(__gu_addr); break; \ | ||
113 | case 4: __get_user_32(__gu_addr); break; \ | ||
114 | case 8: __get_user_64(__gu_addr); break; \ | ||
115 | default: __get_user_unknown(); break; \ | ||
116 | } \ | ||
117 | } \ | ||
118 | (x) = (__typeof__(*(ptr))) __gu_val; \ | ||
119 | __gu_err; \ | ||
120 | }) | ||
121 | |||
122 | struct __large_struct { unsigned long buf[100]; }; | ||
123 | #define __m(x) (*(struct __large_struct __user *)(x)) | ||
124 | |||
125 | #define __get_user_64(addr) \ | ||
126 | __asm__("1: ldq %0,%2\n" \ | ||
127 | "2:\n" \ | ||
128 | ".section __ex_table,\"a\"\n" \ | ||
129 | " .long 1b - .\n" \ | ||
130 | " lda %0, 2b-1b(%1)\n" \ | ||
131 | ".previous" \ | ||
132 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
133 | : "m"(__m(addr)), "1"(__gu_err)) | ||
134 | |||
135 | #define __get_user_32(addr) \ | ||
136 | __asm__("1: ldl %0,%2\n" \ | ||
137 | "2:\n" \ | ||
138 | ".section __ex_table,\"a\"\n" \ | ||
139 | " .long 1b - .\n" \ | ||
140 | " lda %0, 2b-1b(%1)\n" \ | ||
141 | ".previous" \ | ||
142 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
143 | : "m"(__m(addr)), "1"(__gu_err)) | ||
144 | |||
145 | #ifdef __alpha_bwx__ | ||
146 | /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ | ||
147 | |||
148 | #define __get_user_16(addr) \ | ||
149 | __asm__("1: ldwu %0,%2\n" \ | ||
150 | "2:\n" \ | ||
151 | ".section __ex_table,\"a\"\n" \ | ||
152 | " .long 1b - .\n" \ | ||
153 | " lda %0, 2b-1b(%1)\n" \ | ||
154 | ".previous" \ | ||
155 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
156 | : "m"(__m(addr)), "1"(__gu_err)) | ||
157 | |||
158 | #define __get_user_8(addr) \ | ||
159 | __asm__("1: ldbu %0,%2\n" \ | ||
160 | "2:\n" \ | ||
161 | ".section __ex_table,\"a\"\n" \ | ||
162 | " .long 1b - .\n" \ | ||
163 | " lda %0, 2b-1b(%1)\n" \ | ||
164 | ".previous" \ | ||
165 | : "=r"(__gu_val), "=r"(__gu_err) \ | ||
166 | : "m"(__m(addr)), "1"(__gu_err)) | ||
167 | #else | ||
168 | /* Unfortunately, we can't get an unaligned access trap for the sub-word | ||
169 | load, so we have to do a general unaligned operation. */ | ||
170 | |||
171 | #define __get_user_16(addr) \ | ||
172 | { \ | ||
173 | long __gu_tmp; \ | ||
174 | __asm__("1: ldq_u %0,0(%3)\n" \ | ||
175 | "2: ldq_u %1,1(%3)\n" \ | ||
176 | " extwl %0,%3,%0\n" \ | ||
177 | " extwh %1,%3,%1\n" \ | ||
178 | " or %0,%1,%0\n" \ | ||
179 | "3:\n" \ | ||
180 | ".section __ex_table,\"a\"\n" \ | ||
181 | " .long 1b - .\n" \ | ||
182 | " lda %0, 3b-1b(%2)\n" \ | ||
183 | " .long 2b - .\n" \ | ||
184 | " lda %0, 3b-2b(%2)\n" \ | ||
185 | ".previous" \ | ||
186 | : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \ | ||
187 | : "r"(addr), "2"(__gu_err)); \ | ||
188 | } | ||
189 | |||
190 | #define __get_user_8(addr) \ | ||
191 | __asm__("1: ldq_u %0,0(%2)\n" \ | ||
192 | " extbl %0,%2,%0\n" \ | ||
193 | "2:\n" \ | ||
194 | ".section __ex_table,\"a\"\n" \ | ||
195 | " .long 1b - .\n" \ | ||
196 | " lda %0, 2b-1b(%1)\n" \ | ||
197 | ".previous" \ | ||
198 | : "=&r"(__gu_val), "=r"(__gu_err) \ | ||
199 | : "r"(addr), "1"(__gu_err)) | ||
200 | #endif | ||
201 | |||
202 | extern void __put_user_unknown(void); | ||
203 | |||
204 | #define __put_user_nocheck(x,ptr,size) \ | ||
205 | ({ \ | ||
206 | long __pu_err = 0; \ | ||
207 | __chk_user_ptr(ptr); \ | ||
208 | switch (size) { \ | ||
209 | case 1: __put_user_8(x,ptr); break; \ | ||
210 | case 2: __put_user_16(x,ptr); break; \ | ||
211 | case 4: __put_user_32(x,ptr); break; \ | ||
212 | case 8: __put_user_64(x,ptr); break; \ | ||
213 | default: __put_user_unknown(); break; \ | ||
214 | } \ | ||
215 | __pu_err; \ | ||
216 | }) | ||
217 | |||
218 | #define __put_user_check(x,ptr,size,segment) \ | ||
219 | ({ \ | ||
220 | long __pu_err = -EFAULT; \ | ||
221 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
222 | if (__access_ok((unsigned long)__pu_addr,size,segment)) { \ | ||
223 | __pu_err = 0; \ | ||
224 | switch (size) { \ | ||
225 | case 1: __put_user_8(x,__pu_addr); break; \ | ||
226 | case 2: __put_user_16(x,__pu_addr); break; \ | ||
227 | case 4: __put_user_32(x,__pu_addr); break; \ | ||
228 | case 8: __put_user_64(x,__pu_addr); break; \ | ||
229 | default: __put_user_unknown(); break; \ | ||
230 | } \ | ||
231 | } \ | ||
232 | __pu_err; \ | ||
233 | }) | ||
234 | |||
235 | /* | ||
236 | * The "__put_user_xx()" macros tell gcc they read from memory | ||
237 | * instead of writing: this is because they do not write to | ||
238 | * any memory gcc knows about, so there are no aliasing issues | ||
239 | */ | ||
240 | #define __put_user_64(x,addr) \ | ||
241 | __asm__ __volatile__("1: stq %r2,%1\n" \ | ||
242 | "2:\n" \ | ||
243 | ".section __ex_table,\"a\"\n" \ | ||
244 | " .long 1b - .\n" \ | ||
245 | " lda $31,2b-1b(%0)\n" \ | ||
246 | ".previous" \ | ||
247 | : "=r"(__pu_err) \ | ||
248 | : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) | ||
249 | |||
250 | #define __put_user_32(x,addr) \ | ||
251 | __asm__ __volatile__("1: stl %r2,%1\n" \ | ||
252 | "2:\n" \ | ||
253 | ".section __ex_table,\"a\"\n" \ | ||
254 | " .long 1b - .\n" \ | ||
255 | " lda $31,2b-1b(%0)\n" \ | ||
256 | ".previous" \ | ||
257 | : "=r"(__pu_err) \ | ||
258 | : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) | ||
259 | |||
260 | #ifdef __alpha_bwx__ | ||
261 | /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ | ||
262 | |||
263 | #define __put_user_16(x,addr) \ | ||
264 | __asm__ __volatile__("1: stw %r2,%1\n" \ | ||
265 | "2:\n" \ | ||
266 | ".section __ex_table,\"a\"\n" \ | ||
267 | " .long 1b - .\n" \ | ||
268 | " lda $31,2b-1b(%0)\n" \ | ||
269 | ".previous" \ | ||
270 | : "=r"(__pu_err) \ | ||
271 | : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) | ||
272 | |||
273 | #define __put_user_8(x,addr) \ | ||
274 | __asm__ __volatile__("1: stb %r2,%1\n" \ | ||
275 | "2:\n" \ | ||
276 | ".section __ex_table,\"a\"\n" \ | ||
277 | " .long 1b - .\n" \ | ||
278 | " lda $31,2b-1b(%0)\n" \ | ||
279 | ".previous" \ | ||
280 | : "=r"(__pu_err) \ | ||
281 | : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) | ||
282 | #else | ||
283 | /* Unfortunately, we can't get an unaligned access trap for the sub-word | ||
284 | write, so we have to do a general unaligned operation. */ | ||
285 | |||
286 | #define __put_user_16(x,addr) \ | ||
287 | { \ | ||
288 | long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ | ||
289 | __asm__ __volatile__( \ | ||
290 | "1: ldq_u %2,1(%5)\n" \ | ||
291 | "2: ldq_u %1,0(%5)\n" \ | ||
292 | " inswh %6,%5,%4\n" \ | ||
293 | " inswl %6,%5,%3\n" \ | ||
294 | " mskwh %2,%5,%2\n" \ | ||
295 | " mskwl %1,%5,%1\n" \ | ||
296 | " or %2,%4,%2\n" \ | ||
297 | " or %1,%3,%1\n" \ | ||
298 | "3: stq_u %2,1(%5)\n" \ | ||
299 | "4: stq_u %1,0(%5)\n" \ | ||
300 | "5:\n" \ | ||
301 | ".section __ex_table,\"a\"\n" \ | ||
302 | " .long 1b - .\n" \ | ||
303 | " lda $31, 5b-1b(%0)\n" \ | ||
304 | " .long 2b - .\n" \ | ||
305 | " lda $31, 5b-2b(%0)\n" \ | ||
306 | " .long 3b - .\n" \ | ||
307 | " lda $31, 5b-3b(%0)\n" \ | ||
308 | " .long 4b - .\n" \ | ||
309 | " lda $31, 5b-4b(%0)\n" \ | ||
310 | ".previous" \ | ||
311 | : "=r"(__pu_err), "=&r"(__pu_tmp1), \ | ||
312 | "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ | ||
313 | "=&r"(__pu_tmp4) \ | ||
314 | : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ | ||
315 | } | ||
316 | |||
317 | #define __put_user_8(x,addr) \ | ||
318 | { \ | ||
319 | long __pu_tmp1, __pu_tmp2; \ | ||
320 | __asm__ __volatile__( \ | ||
321 | "1: ldq_u %1,0(%4)\n" \ | ||
322 | " insbl %3,%4,%2\n" \ | ||
323 | " mskbl %1,%4,%1\n" \ | ||
324 | " or %1,%2,%1\n" \ | ||
325 | "2: stq_u %1,0(%4)\n" \ | ||
326 | "3:\n" \ | ||
327 | ".section __ex_table,\"a\"\n" \ | ||
328 | " .long 1b - .\n" \ | ||
329 | " lda $31, 3b-1b(%0)\n" \ | ||
330 | " .long 2b - .\n" \ | ||
331 | " lda $31, 3b-2b(%0)\n" \ | ||
332 | ".previous" \ | ||
333 | : "=r"(__pu_err), \ | ||
334 | "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ | ||
335 | : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ | ||
336 | } | ||
337 | #endif | ||
338 | |||
339 | |||
340 | /* | ||
341 | * Complex access routines | ||
342 | */ | ||
343 | |||
344 | /* This little bit of silliness is to get the GP loaded for a function | ||
345 | that ordinarily wouldn't. Otherwise we could have it done by the macro | ||
346 | directly, which can be optimized the linker. */ | ||
347 | #ifdef MODULE | ||
348 | #define __module_address(sym) "r"(sym), | ||
349 | #define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym | ||
350 | #else | ||
351 | #define __module_address(sym) | ||
352 | #define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" | ||
353 | #endif | ||
354 | |||
355 | extern void __copy_user(void); | ||
356 | |||
357 | extern inline long | ||
358 | __copy_tofrom_user_nocheck(void *to, const void *from, long len) | ||
359 | { | ||
360 | register void * __cu_to __asm__("$6") = to; | ||
361 | register const void * __cu_from __asm__("$7") = from; | ||
362 | register long __cu_len __asm__("$0") = len; | ||
363 | |||
364 | __asm__ __volatile__( | ||
365 | __module_call(28, 3, __copy_user) | ||
366 | : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) | ||
367 | : __module_address(__copy_user) | ||
368 | "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) | ||
369 | : "$1","$2","$3","$4","$5","$28","memory"); | ||
370 | |||
371 | return __cu_len; | ||
372 | } | ||
373 | |||
374 | extern inline long | ||
375 | __copy_tofrom_user(void *to, const void *from, long len, const void __user *validate) | ||
376 | { | ||
377 | if (__access_ok((unsigned long)validate, len, get_fs())) | ||
378 | len = __copy_tofrom_user_nocheck(to, from, len); | ||
379 | return len; | ||
380 | } | ||
381 | |||
382 | #define __copy_to_user(to,from,n) \ | ||
383 | ({ \ | ||
384 | __chk_user_ptr(to); \ | ||
385 | __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \ | ||
386 | }) | ||
387 | #define __copy_from_user(to,from,n) \ | ||
388 | ({ \ | ||
389 | __chk_user_ptr(from); \ | ||
390 | __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \ | ||
391 | }) | ||
392 | |||
393 | #define __copy_to_user_inatomic __copy_to_user | ||
394 | #define __copy_from_user_inatomic __copy_from_user | ||
395 | |||
396 | |||
397 | extern inline long | ||
398 | copy_to_user(void __user *to, const void *from, long n) | ||
399 | { | ||
400 | return __copy_tofrom_user((__force void *)to, from, n, to); | ||
401 | } | ||
402 | |||
403 | extern inline long | ||
404 | copy_from_user(void *to, const void __user *from, long n) | ||
405 | { | ||
406 | return __copy_tofrom_user(to, (__force void *)from, n, from); | ||
407 | } | ||
408 | |||
409 | extern void __do_clear_user(void); | ||
410 | |||
411 | extern inline long | ||
412 | __clear_user(void __user *to, long len) | ||
413 | { | ||
414 | register void __user * __cl_to __asm__("$6") = to; | ||
415 | register long __cl_len __asm__("$0") = len; | ||
416 | __asm__ __volatile__( | ||
417 | __module_call(28, 2, __do_clear_user) | ||
418 | : "=r"(__cl_len), "=r"(__cl_to) | ||
419 | : __module_address(__do_clear_user) | ||
420 | "0"(__cl_len), "1"(__cl_to) | ||
421 | : "$1","$2","$3","$4","$5","$28","memory"); | ||
422 | return __cl_len; | ||
423 | } | ||
424 | |||
425 | extern inline long | ||
426 | clear_user(void __user *to, long len) | ||
427 | { | ||
428 | if (__access_ok((unsigned long)to, len, get_fs())) | ||
429 | len = __clear_user(to, len); | ||
430 | return len; | ||
431 | } | ||
432 | |||
433 | #undef __module_address | ||
434 | #undef __module_call | ||
435 | |||
436 | /* Returns: -EFAULT if exception before terminator, N if the entire | ||
437 | buffer filled, else strlen. */ | ||
438 | |||
439 | extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len); | ||
440 | |||
441 | extern inline long | ||
442 | strncpy_from_user(char *to, const char __user *from, long n) | ||
443 | { | ||
444 | long ret = -EFAULT; | ||
445 | if (__access_ok((unsigned long)from, 0, get_fs())) | ||
446 | ret = __strncpy_from_user(to, from, n); | ||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | /* Returns: 0 if bad, string length+1 (memory size) of string if ok */ | ||
451 | extern long __strlen_user(const char __user *); | ||
452 | |||
453 | extern inline long strlen_user(const char __user *str) | ||
454 | { | ||
455 | return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0; | ||
456 | } | ||
457 | |||
458 | /* Returns: 0 if exception before NUL or reaching the supplied limit (N), | ||
459 | * a value greater than N if the limit would be exceeded, else strlen. */ | ||
460 | extern long __strnlen_user(const char __user *, long); | ||
461 | |||
462 | extern inline long strnlen_user(const char __user *str, long n) | ||
463 | { | ||
464 | return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0; | ||
465 | } | ||
466 | |||
467 | /* | ||
468 | * About the exception table: | ||
469 | * | ||
470 | * - insn is a 32-bit pc-relative offset from the faulting insn. | ||
471 | * - nextinsn is a 16-bit offset off of the faulting instruction | ||
472 | * (not off of the *next* instruction as branches are). | ||
473 | * - errreg is the register in which to place -EFAULT. | ||
474 | * - valreg is the final target register for the load sequence | ||
475 | * and will be zeroed. | ||
476 | * | ||
477 | * Either errreg or valreg may be $31, in which case nothing happens. | ||
478 | * | ||
479 | * The exception fixup information "just so happens" to be arranged | ||
480 | * as in a MEM format instruction. This lets us emit our three | ||
481 | * values like so: | ||
482 | * | ||
483 | * lda valreg, nextinsn(errreg) | ||
484 | * | ||
485 | */ | ||
486 | |||
487 | struct exception_table_entry | ||
488 | { | ||
489 | signed int insn; | ||
490 | union exception_fixup { | ||
491 | unsigned unit; | ||
492 | struct { | ||
493 | signed int nextinsn : 16; | ||
494 | unsigned int errreg : 5; | ||
495 | unsigned int valreg : 5; | ||
496 | } bits; | ||
497 | } fixup; | ||
498 | }; | ||
499 | |||
500 | /* Returns the new pc */ | ||
501 | #define fixup_exception(map_reg, fixup, pc) \ | ||
502 | ({ \ | ||
503 | if ((fixup)->fixup.bits.valreg != 31) \ | ||
504 | map_reg((fixup)->fixup.bits.valreg) = 0; \ | ||
505 | if ((fixup)->fixup.bits.errreg != 31) \ | ||
506 | map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \ | ||
507 | (pc) + (fixup)->fixup.bits.nextinsn; \ | ||
508 | }) | ||
509 | |||
510 | |||
511 | #endif /* __ALPHA_UACCESS_H */ | ||
diff --git a/arch/alpha/include/asm/ucontext.h b/arch/alpha/include/asm/ucontext.h new file mode 100644 index 000000000000..47578ab42152 --- /dev/null +++ b/arch/alpha/include/asm/ucontext.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _ASMAXP_UCONTEXT_H | ||
2 | #define _ASMAXP_UCONTEXT_H | ||
3 | |||
4 | struct ucontext { | ||
5 | unsigned long uc_flags; | ||
6 | struct ucontext *uc_link; | ||
7 | old_sigset_t uc_osf_sigmask; | ||
8 | stack_t uc_stack; | ||
9 | struct sigcontext uc_mcontext; | ||
10 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
11 | }; | ||
12 | |||
13 | #endif /* !_ASMAXP_UCONTEXT_H */ | ||
diff --git a/arch/alpha/include/asm/unaligned.h b/arch/alpha/include/asm/unaligned.h new file mode 100644 index 000000000000..3787c60aed3f --- /dev/null +++ b/arch/alpha/include/asm/unaligned.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef _ASM_ALPHA_UNALIGNED_H | ||
2 | #define _ASM_ALPHA_UNALIGNED_H | ||
3 | |||
4 | #include <linux/unaligned/le_struct.h> | ||
5 | #include <linux/unaligned/be_byteshift.h> | ||
6 | #include <linux/unaligned/generic.h> | ||
7 | |||
8 | #define get_unaligned __get_unaligned_le | ||
9 | #define put_unaligned __put_unaligned_le | ||
10 | |||
11 | #endif /* _ASM_ALPHA_UNALIGNED_H */ | ||
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h new file mode 100644 index 000000000000..5b5c17485942 --- /dev/null +++ b/arch/alpha/include/asm/unistd.h | |||
@@ -0,0 +1,464 @@ | |||
1 | #ifndef _ALPHA_UNISTD_H | ||
2 | #define _ALPHA_UNISTD_H | ||
3 | |||
4 | #define __NR_osf_syscall 0 /* not implemented */ | ||
5 | #define __NR_exit 1 | ||
6 | #define __NR_fork 2 | ||
7 | #define __NR_read 3 | ||
8 | #define __NR_write 4 | ||
9 | #define __NR_osf_old_open 5 /* not implemented */ | ||
10 | #define __NR_close 6 | ||
11 | #define __NR_osf_wait4 7 | ||
12 | #define __NR_osf_old_creat 8 /* not implemented */ | ||
13 | #define __NR_link 9 | ||
14 | #define __NR_unlink 10 | ||
15 | #define __NR_osf_execve 11 /* not implemented */ | ||
16 | #define __NR_chdir 12 | ||
17 | #define __NR_fchdir 13 | ||
18 | #define __NR_mknod 14 | ||
19 | #define __NR_chmod 15 | ||
20 | #define __NR_chown 16 | ||
21 | #define __NR_brk 17 | ||
22 | #define __NR_osf_getfsstat 18 /* not implemented */ | ||
23 | #define __NR_lseek 19 | ||
24 | #define __NR_getxpid 20 | ||
25 | #define __NR_osf_mount 21 | ||
26 | #define __NR_umount 22 | ||
27 | #define __NR_setuid 23 | ||
28 | #define __NR_getxuid 24 | ||
29 | #define __NR_exec_with_loader 25 /* not implemented */ | ||
30 | #define __NR_ptrace 26 | ||
31 | #define __NR_osf_nrecvmsg 27 /* not implemented */ | ||
32 | #define __NR_osf_nsendmsg 28 /* not implemented */ | ||
33 | #define __NR_osf_nrecvfrom 29 /* not implemented */ | ||
34 | #define __NR_osf_naccept 30 /* not implemented */ | ||
35 | #define __NR_osf_ngetpeername 31 /* not implemented */ | ||
36 | #define __NR_osf_ngetsockname 32 /* not implemented */ | ||
37 | #define __NR_access 33 | ||
38 | #define __NR_osf_chflags 34 /* not implemented */ | ||
39 | #define __NR_osf_fchflags 35 /* not implemented */ | ||
40 | #define __NR_sync 36 | ||
41 | #define __NR_kill 37 | ||
42 | #define __NR_osf_old_stat 38 /* not implemented */ | ||
43 | #define __NR_setpgid 39 | ||
44 | #define __NR_osf_old_lstat 40 /* not implemented */ | ||
45 | #define __NR_dup 41 | ||
46 | #define __NR_pipe 42 | ||
47 | #define __NR_osf_set_program_attributes 43 | ||
48 | #define __NR_osf_profil 44 /* not implemented */ | ||
49 | #define __NR_open 45 | ||
50 | #define __NR_osf_old_sigaction 46 /* not implemented */ | ||
51 | #define __NR_getxgid 47 | ||
52 | #define __NR_osf_sigprocmask 48 | ||
53 | #define __NR_osf_getlogin 49 /* not implemented */ | ||
54 | #define __NR_osf_setlogin 50 /* not implemented */ | ||
55 | #define __NR_acct 51 | ||
56 | #define __NR_sigpending 52 | ||
57 | |||
58 | #define __NR_ioctl 54 | ||
59 | #define __NR_osf_reboot 55 /* not implemented */ | ||
60 | #define __NR_osf_revoke 56 /* not implemented */ | ||
61 | #define __NR_symlink 57 | ||
62 | #define __NR_readlink 58 | ||
63 | #define __NR_execve 59 | ||
64 | #define __NR_umask 60 | ||
65 | #define __NR_chroot 61 | ||
66 | #define __NR_osf_old_fstat 62 /* not implemented */ | ||
67 | #define __NR_getpgrp 63 | ||
68 | #define __NR_getpagesize 64 | ||
69 | #define __NR_osf_mremap 65 /* not implemented */ | ||
70 | #define __NR_vfork 66 | ||
71 | #define __NR_stat 67 | ||
72 | #define __NR_lstat 68 | ||
73 | #define __NR_osf_sbrk 69 /* not implemented */ | ||
74 | #define __NR_osf_sstk 70 /* not implemented */ | ||
75 | #define __NR_mmap 71 /* OSF/1 mmap is superset of Linux */ | ||
76 | #define __NR_osf_old_vadvise 72 /* not implemented */ | ||
77 | #define __NR_munmap 73 | ||
78 | #define __NR_mprotect 74 | ||
79 | #define __NR_madvise 75 | ||
80 | #define __NR_vhangup 76 | ||
81 | #define __NR_osf_kmodcall 77 /* not implemented */ | ||
82 | #define __NR_osf_mincore 78 /* not implemented */ | ||
83 | #define __NR_getgroups 79 | ||
84 | #define __NR_setgroups 80 | ||
85 | #define __NR_osf_old_getpgrp 81 /* not implemented */ | ||
86 | #define __NR_setpgrp 82 /* BSD alias for setpgid */ | ||
87 | #define __NR_osf_setitimer 83 | ||
88 | #define __NR_osf_old_wait 84 /* not implemented */ | ||
89 | #define __NR_osf_table 85 /* not implemented */ | ||
90 | #define __NR_osf_getitimer 86 | ||
91 | #define __NR_gethostname 87 | ||
92 | #define __NR_sethostname 88 | ||
93 | #define __NR_getdtablesize 89 | ||
94 | #define __NR_dup2 90 | ||
95 | #define __NR_fstat 91 | ||
96 | #define __NR_fcntl 92 | ||
97 | #define __NR_osf_select 93 | ||
98 | #define __NR_poll 94 | ||
99 | #define __NR_fsync 95 | ||
100 | #define __NR_setpriority 96 | ||
101 | #define __NR_socket 97 | ||
102 | #define __NR_connect 98 | ||
103 | #define __NR_accept 99 | ||
104 | #define __NR_getpriority 100 | ||
105 | #define __NR_send 101 | ||
106 | #define __NR_recv 102 | ||
107 | #define __NR_sigreturn 103 | ||
108 | #define __NR_bind 104 | ||
109 | #define __NR_setsockopt 105 | ||
110 | #define __NR_listen 106 | ||
111 | #define __NR_osf_plock 107 /* not implemented */ | ||
112 | #define __NR_osf_old_sigvec 108 /* not implemented */ | ||
113 | #define __NR_osf_old_sigblock 109 /* not implemented */ | ||
114 | #define __NR_osf_old_sigsetmask 110 /* not implemented */ | ||
115 | #define __NR_sigsuspend 111 | ||
116 | #define __NR_osf_sigstack 112 | ||
117 | #define __NR_recvmsg 113 | ||
118 | #define __NR_sendmsg 114 | ||
119 | #define __NR_osf_old_vtrace 115 /* not implemented */ | ||
120 | #define __NR_osf_gettimeofday 116 | ||
121 | #define __NR_osf_getrusage 117 | ||
122 | #define __NR_getsockopt 118 | ||
123 | |||
124 | #define __NR_readv 120 | ||
125 | #define __NR_writev 121 | ||
126 | #define __NR_osf_settimeofday 122 | ||
127 | #define __NR_fchown 123 | ||
128 | #define __NR_fchmod 124 | ||
129 | #define __NR_recvfrom 125 | ||
130 | #define __NR_setreuid 126 | ||
131 | #define __NR_setregid 127 | ||
132 | #define __NR_rename 128 | ||
133 | #define __NR_truncate 129 | ||
134 | #define __NR_ftruncate 130 | ||
135 | #define __NR_flock 131 | ||
136 | #define __NR_setgid 132 | ||
137 | #define __NR_sendto 133 | ||
138 | #define __NR_shutdown 134 | ||
139 | #define __NR_socketpair 135 | ||
140 | #define __NR_mkdir 136 | ||
141 | #define __NR_rmdir 137 | ||
142 | #define __NR_osf_utimes 138 | ||
143 | #define __NR_osf_old_sigreturn 139 /* not implemented */ | ||
144 | #define __NR_osf_adjtime 140 /* not implemented */ | ||
145 | #define __NR_getpeername 141 | ||
146 | #define __NR_osf_gethostid 142 /* not implemented */ | ||
147 | #define __NR_osf_sethostid 143 /* not implemented */ | ||
148 | #define __NR_getrlimit 144 | ||
149 | #define __NR_setrlimit 145 | ||
150 | #define __NR_osf_old_killpg 146 /* not implemented */ | ||
151 | #define __NR_setsid 147 | ||
152 | #define __NR_quotactl 148 | ||
153 | #define __NR_osf_oldquota 149 /* not implemented */ | ||
154 | #define __NR_getsockname 150 | ||
155 | |||
156 | #define __NR_osf_pid_block 153 /* not implemented */ | ||
157 | #define __NR_osf_pid_unblock 154 /* not implemented */ | ||
158 | |||
159 | #define __NR_sigaction 156 | ||
160 | #define __NR_osf_sigwaitprim 157 /* not implemented */ | ||
161 | #define __NR_osf_nfssvc 158 /* not implemented */ | ||
162 | #define __NR_osf_getdirentries 159 | ||
163 | #define __NR_osf_statfs 160 | ||
164 | #define __NR_osf_fstatfs 161 | ||
165 | |||
166 | #define __NR_osf_asynch_daemon 163 /* not implemented */ | ||
167 | #define __NR_osf_getfh 164 /* not implemented */ | ||
168 | #define __NR_osf_getdomainname 165 | ||
169 | #define __NR_setdomainname 166 | ||
170 | |||
171 | #define __NR_osf_exportfs 169 /* not implemented */ | ||
172 | |||
173 | #define __NR_osf_alt_plock 181 /* not implemented */ | ||
174 | |||
175 | #define __NR_osf_getmnt 184 /* not implemented */ | ||
176 | |||
177 | #define __NR_osf_alt_sigpending 187 /* not implemented */ | ||
178 | #define __NR_osf_alt_setsid 188 /* not implemented */ | ||
179 | |||
180 | #define __NR_osf_swapon 199 | ||
181 | #define __NR_msgctl 200 | ||
182 | #define __NR_msgget 201 | ||
183 | #define __NR_msgrcv 202 | ||
184 | #define __NR_msgsnd 203 | ||
185 | #define __NR_semctl 204 | ||
186 | #define __NR_semget 205 | ||
187 | #define __NR_semop 206 | ||
188 | #define __NR_osf_utsname 207 | ||
189 | #define __NR_lchown 208 | ||
190 | #define __NR_osf_shmat 209 | ||
191 | #define __NR_shmctl 210 | ||
192 | #define __NR_shmdt 211 | ||
193 | #define __NR_shmget 212 | ||
194 | #define __NR_osf_mvalid 213 /* not implemented */ | ||
195 | #define __NR_osf_getaddressconf 214 /* not implemented */ | ||
196 | #define __NR_osf_msleep 215 /* not implemented */ | ||
197 | #define __NR_osf_mwakeup 216 /* not implemented */ | ||
198 | #define __NR_msync 217 | ||
199 | #define __NR_osf_signal 218 /* not implemented */ | ||
200 | #define __NR_osf_utc_gettime 219 /* not implemented */ | ||
201 | #define __NR_osf_utc_adjtime 220 /* not implemented */ | ||
202 | |||
203 | #define __NR_osf_security 222 /* not implemented */ | ||
204 | #define __NR_osf_kloadcall 223 /* not implemented */ | ||
205 | |||
206 | #define __NR_getpgid 233 | ||
207 | #define __NR_getsid 234 | ||
208 | #define __NR_sigaltstack 235 | ||
209 | #define __NR_osf_waitid 236 /* not implemented */ | ||
210 | #define __NR_osf_priocntlset 237 /* not implemented */ | ||
211 | #define __NR_osf_sigsendset 238 /* not implemented */ | ||
212 | #define __NR_osf_set_speculative 239 /* not implemented */ | ||
213 | #define __NR_osf_msfs_syscall 240 /* not implemented */ | ||
214 | #define __NR_osf_sysinfo 241 | ||
215 | #define __NR_osf_uadmin 242 /* not implemented */ | ||
216 | #define __NR_osf_fuser 243 /* not implemented */ | ||
217 | #define __NR_osf_proplist_syscall 244 | ||
218 | #define __NR_osf_ntp_adjtime 245 /* not implemented */ | ||
219 | #define __NR_osf_ntp_gettime 246 /* not implemented */ | ||
220 | #define __NR_osf_pathconf 247 /* not implemented */ | ||
221 | #define __NR_osf_fpathconf 248 /* not implemented */ | ||
222 | |||
223 | #define __NR_osf_uswitch 250 /* not implemented */ | ||
224 | #define __NR_osf_usleep_thread 251 | ||
225 | #define __NR_osf_audcntl 252 /* not implemented */ | ||
226 | #define __NR_osf_audgen 253 /* not implemented */ | ||
227 | #define __NR_sysfs 254 | ||
228 | #define __NR_osf_subsys_info 255 /* not implemented */ | ||
229 | #define __NR_osf_getsysinfo 256 | ||
230 | #define __NR_osf_setsysinfo 257 | ||
231 | #define __NR_osf_afs_syscall 258 /* not implemented */ | ||
232 | #define __NR_osf_swapctl 259 /* not implemented */ | ||
233 | #define __NR_osf_memcntl 260 /* not implemented */ | ||
234 | #define __NR_osf_fdatasync 261 /* not implemented */ | ||
235 | |||
236 | /* | ||
237 | * Ignore legacy syscalls that we don't use. | ||
238 | */ | ||
239 | #define __IGNORE_alarm | ||
240 | #define __IGNORE_creat | ||
241 | #define __IGNORE_getegid | ||
242 | #define __IGNORE_geteuid | ||
243 | #define __IGNORE_getgid | ||
244 | #define __IGNORE_getpid | ||
245 | #define __IGNORE_getppid | ||
246 | #define __IGNORE_getuid | ||
247 | #define __IGNORE_pause | ||
248 | #define __IGNORE_time | ||
249 | #define __IGNORE_utime | ||
250 | |||
251 | /* | ||
252 | * Linux-specific system calls begin at 300 | ||
253 | */ | ||
254 | #define __NR_bdflush 300 | ||
255 | #define __NR_sethae 301 | ||
256 | #define __NR_mount 302 | ||
257 | #define __NR_old_adjtimex 303 | ||
258 | #define __NR_swapoff 304 | ||
259 | #define __NR_getdents 305 | ||
260 | #define __NR_create_module 306 | ||
261 | #define __NR_init_module 307 | ||
262 | #define __NR_delete_module 308 | ||
263 | #define __NR_get_kernel_syms 309 | ||
264 | #define __NR_syslog 310 | ||
265 | #define __NR_reboot 311 | ||
266 | #define __NR_clone 312 | ||
267 | #define __NR_uselib 313 | ||
268 | #define __NR_mlock 314 | ||
269 | #define __NR_munlock 315 | ||
270 | #define __NR_mlockall 316 | ||
271 | #define __NR_munlockall 317 | ||
272 | #define __NR_sysinfo 318 | ||
273 | #define __NR__sysctl 319 | ||
274 | /* 320 was sys_idle. */ | ||
275 | #define __NR_oldumount 321 | ||
276 | #define __NR_swapon 322 | ||
277 | #define __NR_times 323 | ||
278 | #define __NR_personality 324 | ||
279 | #define __NR_setfsuid 325 | ||
280 | #define __NR_setfsgid 326 | ||
281 | #define __NR_ustat 327 | ||
282 | #define __NR_statfs 328 | ||
283 | #define __NR_fstatfs 329 | ||
284 | #define __NR_sched_setparam 330 | ||
285 | #define __NR_sched_getparam 331 | ||
286 | #define __NR_sched_setscheduler 332 | ||
287 | #define __NR_sched_getscheduler 333 | ||
288 | #define __NR_sched_yield 334 | ||
289 | #define __NR_sched_get_priority_max 335 | ||
290 | #define __NR_sched_get_priority_min 336 | ||
291 | #define __NR_sched_rr_get_interval 337 | ||
292 | #define __NR_afs_syscall 338 | ||
293 | #define __NR_uname 339 | ||
294 | #define __NR_nanosleep 340 | ||
295 | #define __NR_mremap 341 | ||
296 | #define __NR_nfsservctl 342 | ||
297 | #define __NR_setresuid 343 | ||
298 | #define __NR_getresuid 344 | ||
299 | #define __NR_pciconfig_read 345 | ||
300 | #define __NR_pciconfig_write 346 | ||
301 | #define __NR_query_module 347 | ||
302 | #define __NR_prctl 348 | ||
303 | #define __NR_pread64 349 | ||
304 | #define __NR_pwrite64 350 | ||
305 | #define __NR_rt_sigreturn 351 | ||
306 | #define __NR_rt_sigaction 352 | ||
307 | #define __NR_rt_sigprocmask 353 | ||
308 | #define __NR_rt_sigpending 354 | ||
309 | #define __NR_rt_sigtimedwait 355 | ||
310 | #define __NR_rt_sigqueueinfo 356 | ||
311 | #define __NR_rt_sigsuspend 357 | ||
312 | #define __NR_select 358 | ||
313 | #define __NR_gettimeofday 359 | ||
314 | #define __NR_settimeofday 360 | ||
315 | #define __NR_getitimer 361 | ||
316 | #define __NR_setitimer 362 | ||
317 | #define __NR_utimes 363 | ||
318 | #define __NR_getrusage 364 | ||
319 | #define __NR_wait4 365 | ||
320 | #define __NR_adjtimex 366 | ||
321 | #define __NR_getcwd 367 | ||
322 | #define __NR_capget 368 | ||
323 | #define __NR_capset 369 | ||
324 | #define __NR_sendfile 370 | ||
325 | #define __NR_setresgid 371 | ||
326 | #define __NR_getresgid 372 | ||
327 | #define __NR_dipc 373 | ||
328 | #define __NR_pivot_root 374 | ||
329 | #define __NR_mincore 375 | ||
330 | #define __NR_pciconfig_iobase 376 | ||
331 | #define __NR_getdents64 377 | ||
332 | #define __NR_gettid 378 | ||
333 | #define __NR_readahead 379 | ||
334 | /* 380 is unused */ | ||
335 | #define __NR_tkill 381 | ||
336 | #define __NR_setxattr 382 | ||
337 | #define __NR_lsetxattr 383 | ||
338 | #define __NR_fsetxattr 384 | ||
339 | #define __NR_getxattr 385 | ||
340 | #define __NR_lgetxattr 386 | ||
341 | #define __NR_fgetxattr 387 | ||
342 | #define __NR_listxattr 388 | ||
343 | #define __NR_llistxattr 389 | ||
344 | #define __NR_flistxattr 390 | ||
345 | #define __NR_removexattr 391 | ||
346 | #define __NR_lremovexattr 392 | ||
347 | #define __NR_fremovexattr 393 | ||
348 | #define __NR_futex 394 | ||
349 | #define __NR_sched_setaffinity 395 | ||
350 | #define __NR_sched_getaffinity 396 | ||
351 | #define __NR_tuxcall 397 | ||
352 | #define __NR_io_setup 398 | ||
353 | #define __NR_io_destroy 399 | ||
354 | #define __NR_io_getevents 400 | ||
355 | #define __NR_io_submit 401 | ||
356 | #define __NR_io_cancel 402 | ||
357 | #define __NR_exit_group 405 | ||
358 | #define __NR_lookup_dcookie 406 | ||
359 | #define __NR_epoll_create 407 | ||
360 | #define __NR_epoll_ctl 408 | ||
361 | #define __NR_epoll_wait 409 | ||
362 | /* Feb 2007: These three sys_epoll defines shouldn't be here but culling | ||
363 | * them would break userspace apps ... we'll kill them off in 2010 :) */ | ||
364 | #define __NR_sys_epoll_create __NR_epoll_create | ||
365 | #define __NR_sys_epoll_ctl __NR_epoll_ctl | ||
366 | #define __NR_sys_epoll_wait __NR_epoll_wait | ||
367 | #define __NR_remap_file_pages 410 | ||
368 | #define __NR_set_tid_address 411 | ||
369 | #define __NR_restart_syscall 412 | ||
370 | #define __NR_fadvise64 413 | ||
371 | #define __NR_timer_create 414 | ||
372 | #define __NR_timer_settime 415 | ||
373 | #define __NR_timer_gettime 416 | ||
374 | #define __NR_timer_getoverrun 417 | ||
375 | #define __NR_timer_delete 418 | ||
376 | #define __NR_clock_settime 419 | ||
377 | #define __NR_clock_gettime 420 | ||
378 | #define __NR_clock_getres 421 | ||
379 | #define __NR_clock_nanosleep 422 | ||
380 | #define __NR_semtimedop 423 | ||
381 | #define __NR_tgkill 424 | ||
382 | #define __NR_stat64 425 | ||
383 | #define __NR_lstat64 426 | ||
384 | #define __NR_fstat64 427 | ||
385 | #define __NR_vserver 428 | ||
386 | #define __NR_mbind 429 | ||
387 | #define __NR_get_mempolicy 430 | ||
388 | #define __NR_set_mempolicy 431 | ||
389 | #define __NR_mq_open 432 | ||
390 | #define __NR_mq_unlink 433 | ||
391 | #define __NR_mq_timedsend 434 | ||
392 | #define __NR_mq_timedreceive 435 | ||
393 | #define __NR_mq_notify 436 | ||
394 | #define __NR_mq_getsetattr 437 | ||
395 | #define __NR_waitid 438 | ||
396 | #define __NR_add_key 439 | ||
397 | #define __NR_request_key 440 | ||
398 | #define __NR_keyctl 441 | ||
399 | #define __NR_ioprio_set 442 | ||
400 | #define __NR_ioprio_get 443 | ||
401 | #define __NR_inotify_init 444 | ||
402 | #define __NR_inotify_add_watch 445 | ||
403 | #define __NR_inotify_rm_watch 446 | ||
404 | #define __NR_fdatasync 447 | ||
405 | #define __NR_kexec_load 448 | ||
406 | #define __NR_migrate_pages 449 | ||
407 | #define __NR_openat 450 | ||
408 | #define __NR_mkdirat 451 | ||
409 | #define __NR_mknodat 452 | ||
410 | #define __NR_fchownat 453 | ||
411 | #define __NR_futimesat 454 | ||
412 | #define __NR_fstatat64 455 | ||
413 | #define __NR_unlinkat 456 | ||
414 | #define __NR_renameat 457 | ||
415 | #define __NR_linkat 458 | ||
416 | #define __NR_symlinkat 459 | ||
417 | #define __NR_readlinkat 460 | ||
418 | #define __NR_fchmodat 461 | ||
419 | #define __NR_faccessat 462 | ||
420 | #define __NR_pselect6 463 | ||
421 | #define __NR_ppoll 464 | ||
422 | #define __NR_unshare 465 | ||
423 | #define __NR_set_robust_list 466 | ||
424 | #define __NR_get_robust_list 467 | ||
425 | #define __NR_splice 468 | ||
426 | #define __NR_sync_file_range 469 | ||
427 | #define __NR_tee 470 | ||
428 | #define __NR_vmsplice 471 | ||
429 | #define __NR_move_pages 472 | ||
430 | #define __NR_getcpu 473 | ||
431 | #define __NR_epoll_pwait 474 | ||
432 | #define __NR_utimensat 475 | ||
433 | #define __NR_signalfd 476 | ||
434 | #define __NR_timerfd 477 | ||
435 | #define __NR_eventfd 478 | ||
436 | |||
437 | #ifdef __KERNEL__ | ||
438 | |||
439 | #define NR_SYSCALLS 479 | ||
440 | |||
441 | #define __ARCH_WANT_IPC_PARSE_VERSION | ||
442 | #define __ARCH_WANT_OLD_READDIR | ||
443 | #define __ARCH_WANT_STAT64 | ||
444 | #define __ARCH_WANT_SYS_GETHOSTNAME | ||
445 | #define __ARCH_WANT_SYS_FADVISE64 | ||
446 | #define __ARCH_WANT_SYS_GETPGRP | ||
447 | #define __ARCH_WANT_SYS_OLD_GETRLIMIT | ||
448 | #define __ARCH_WANT_SYS_OLDUMOUNT | ||
449 | #define __ARCH_WANT_SYS_SIGPENDING | ||
450 | |||
451 | /* "Conditional" syscalls. What we want is | ||
452 | |||
453 | __attribute__((weak,alias("sys_ni_syscall"))) | ||
454 | |||
455 | but that raises the problem of what type to give the symbol. If we use | ||
456 | a prototype, it'll conflict with the definition given in this file and | ||
457 | others. If we use __typeof, we discover that not all symbols actually | ||
458 | have declarations. If we use no prototype, then we get warnings from | ||
459 | -Wstrict-prototypes. Ho hum. */ | ||
460 | |||
461 | #define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall") | ||
462 | |||
463 | #endif /* __KERNEL__ */ | ||
464 | #endif /* _ALPHA_UNISTD_H */ | ||
diff --git a/arch/alpha/include/asm/user.h b/arch/alpha/include/asm/user.h new file mode 100644 index 000000000000..a4eb6a4ca8d1 --- /dev/null +++ b/arch/alpha/include/asm/user.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef _ALPHA_USER_H | ||
2 | #define _ALPHA_USER_H | ||
3 | |||
4 | #include <linux/sched.h> | ||
5 | #include <linux/ptrace.h> | ||
6 | |||
7 | #include <asm/page.h> | ||
8 | #include <asm/reg.h> | ||
9 | |||
10 | /* | ||
11 | * Core file format: The core file is written in such a way that gdb | ||
12 | * can understand it and provide useful information to the user (under | ||
13 | * linux we use the `trad-core' bfd, NOT the osf-core). The file contents | ||
14 | * are as follows: | ||
15 | * | ||
16 | * upage: 1 page consisting of a user struct that tells gdb | ||
17 | * what is present in the file. Directly after this is a | ||
18 | * copy of the task_struct, which is currently not used by gdb, | ||
19 | * but it may come in handy at some point. All of the registers | ||
20 | * are stored as part of the upage. The upage should always be | ||
21 | * only one page long. | ||
22 | * data: The data segment follows next. We use current->end_text to | ||
23 | * current->brk to pick up all of the user variables, plus any memory | ||
24 | * that may have been sbrk'ed. No attempt is made to determine if a | ||
25 | * page is demand-zero or if a page is totally unused, we just cover | ||
26 | * the entire range. All of the addresses are rounded in such a way | ||
27 | * that an integral number of pages is written. | ||
28 | * stack: We need the stack information in order to get a meaningful | ||
29 | * backtrace. We need to write the data from usp to | ||
30 | * current->start_stack, so we round each of these in order to be able | ||
31 | * to write an integer number of pages. | ||
32 | */ | ||
33 | struct user { | ||
34 | unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */ | ||
35 | size_t u_tsize; /* text size (pages) */ | ||
36 | size_t u_dsize; /* data size (pages) */ | ||
37 | size_t u_ssize; /* stack size (pages) */ | ||
38 | unsigned long start_code; /* text starting address */ | ||
39 | unsigned long start_data; /* data starting address */ | ||
40 | unsigned long start_stack; /* stack starting address */ | ||
41 | long int signal; /* signal causing core dump */ | ||
42 | unsigned long u_ar0; /* help gdb find registers */ | ||
43 | unsigned long magic; /* identifies a core file */ | ||
44 | char u_comm[32]; /* user command name */ | ||
45 | }; | ||
46 | |||
47 | #define NBPG PAGE_SIZE | ||
48 | #define UPAGES 1 | ||
49 | #define HOST_TEXT_START_ADDR (u.start_code) | ||
50 | #define HOST_DATA_START_ADDR (u.start_data) | ||
51 | #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) | ||
52 | |||
53 | #endif /* _ALPHA_USER_H */ | ||
diff --git a/arch/alpha/include/asm/vga.h b/arch/alpha/include/asm/vga.h new file mode 100644 index 000000000000..c00106bac521 --- /dev/null +++ b/arch/alpha/include/asm/vga.h | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * Access to VGA videoram | ||
3 | * | ||
4 | * (c) 1998 Martin Mares <mj@ucw.cz> | ||
5 | */ | ||
6 | |||
7 | #ifndef _LINUX_ASM_VGA_H_ | ||
8 | #define _LINUX_ASM_VGA_H_ | ||
9 | |||
10 | #include <asm/io.h> | ||
11 | |||
12 | #define VT_BUF_HAVE_RW | ||
13 | #define VT_BUF_HAVE_MEMSETW | ||
14 | #define VT_BUF_HAVE_MEMCPYW | ||
15 | |||
16 | static inline void scr_writew(u16 val, volatile u16 *addr) | ||
17 | { | ||
18 | if (__is_ioaddr(addr)) | ||
19 | __raw_writew(val, (volatile u16 __iomem *) addr); | ||
20 | else | ||
21 | *addr = val; | ||
22 | } | ||
23 | |||
24 | static inline u16 scr_readw(volatile const u16 *addr) | ||
25 | { | ||
26 | if (__is_ioaddr(addr)) | ||
27 | return __raw_readw((volatile const u16 __iomem *) addr); | ||
28 | else | ||
29 | return *addr; | ||
30 | } | ||
31 | |||
32 | static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) | ||
33 | { | ||
34 | if (__is_ioaddr(s)) | ||
35 | memsetw_io((u16 __iomem *) s, c, count); | ||
36 | else | ||
37 | memsetw(s, c, count); | ||
38 | } | ||
39 | |||
40 | /* Do not trust that the usage will be correct; analyze the arguments. */ | ||
41 | extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count); | ||
42 | |||
43 | /* ??? These are currently only used for downloading character sets. As | ||
44 | such, they don't need memory barriers. Is this all they are intended | ||
45 | to be used for? */ | ||
46 | #define vga_readb(a) readb((u8 __iomem *)(a)) | ||
47 | #define vga_writeb(v,a) writeb(v, (u8 __iomem *)(a)) | ||
48 | |||
49 | #ifdef CONFIG_VGA_HOSE | ||
50 | #include <linux/ioport.h> | ||
51 | #include <linux/pci.h> | ||
52 | |||
53 | extern struct pci_controller *pci_vga_hose; | ||
54 | |||
55 | # define __is_port_vga(a) \ | ||
56 | (((a) >= 0x3b0) && ((a) < 0x3e0) && \ | ||
57 | ((a) != 0x3b3) && ((a) != 0x3d3)) | ||
58 | |||
59 | # define __is_mem_vga(a) \ | ||
60 | (((a) >= 0xa0000) && ((a) <= 0xc0000)) | ||
61 | |||
62 | # define FIXUP_IOADDR_VGA(a) do { \ | ||
63 | if (pci_vga_hose && __is_port_vga(a)) \ | ||
64 | (a) += pci_vga_hose->io_space->start; \ | ||
65 | } while(0) | ||
66 | |||
67 | # define FIXUP_MEMADDR_VGA(a) do { \ | ||
68 | if (pci_vga_hose && __is_mem_vga(a)) \ | ||
69 | (a) += pci_vga_hose->mem_space->start; \ | ||
70 | } while(0) | ||
71 | |||
72 | #else /* CONFIG_VGA_HOSE */ | ||
73 | # define pci_vga_hose 0 | ||
74 | # define __is_port_vga(a) 0 | ||
75 | # define __is_mem_vga(a) 0 | ||
76 | # define FIXUP_IOADDR_VGA(a) | ||
77 | # define FIXUP_MEMADDR_VGA(a) | ||
78 | #endif /* CONFIG_VGA_HOSE */ | ||
79 | |||
80 | #define VGA_MAP_MEM(x,s) ((unsigned long) ioremap(x, s)) | ||
81 | |||
82 | #endif | ||
diff --git a/arch/alpha/include/asm/xor.h b/arch/alpha/include/asm/xor.h new file mode 100644 index 000000000000..5ee1c2bc0499 --- /dev/null +++ b/arch/alpha/include/asm/xor.h | |||
@@ -0,0 +1,855 @@ | |||
1 | /* | ||
2 | * include/asm-alpha/xor.h | ||
3 | * | ||
4 | * Optimized RAID-5 checksumming functions for alpha EV5 and EV6 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2, or (at your option) | ||
9 | * any later version. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * (for example /usr/src/linux/COPYING); if not, write to the Free | ||
13 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
14 | */ | ||
15 | |||
16 | extern void xor_alpha_2(unsigned long, unsigned long *, unsigned long *); | ||
17 | extern void xor_alpha_3(unsigned long, unsigned long *, unsigned long *, | ||
18 | unsigned long *); | ||
19 | extern void xor_alpha_4(unsigned long, unsigned long *, unsigned long *, | ||
20 | unsigned long *, unsigned long *); | ||
21 | extern void xor_alpha_5(unsigned long, unsigned long *, unsigned long *, | ||
22 | unsigned long *, unsigned long *, unsigned long *); | ||
23 | |||
24 | extern void xor_alpha_prefetch_2(unsigned long, unsigned long *, | ||
25 | unsigned long *); | ||
26 | extern void xor_alpha_prefetch_3(unsigned long, unsigned long *, | ||
27 | unsigned long *, unsigned long *); | ||
28 | extern void xor_alpha_prefetch_4(unsigned long, unsigned long *, | ||
29 | unsigned long *, unsigned long *, | ||
30 | unsigned long *); | ||
31 | extern void xor_alpha_prefetch_5(unsigned long, unsigned long *, | ||
32 | unsigned long *, unsigned long *, | ||
33 | unsigned long *, unsigned long *); | ||
34 | |||
35 | asm(" \n\ | ||
36 | .text \n\ | ||
37 | .align 3 \n\ | ||
38 | .ent xor_alpha_2 \n\ | ||
39 | xor_alpha_2: \n\ | ||
40 | .prologue 0 \n\ | ||
41 | srl $16, 6, $16 \n\ | ||
42 | .align 4 \n\ | ||
43 | 2: \n\ | ||
44 | ldq $0,0($17) \n\ | ||
45 | ldq $1,0($18) \n\ | ||
46 | ldq $2,8($17) \n\ | ||
47 | ldq $3,8($18) \n\ | ||
48 | \n\ | ||
49 | ldq $4,16($17) \n\ | ||
50 | ldq $5,16($18) \n\ | ||
51 | ldq $6,24($17) \n\ | ||
52 | ldq $7,24($18) \n\ | ||
53 | \n\ | ||
54 | ldq $19,32($17) \n\ | ||
55 | ldq $20,32($18) \n\ | ||
56 | ldq $21,40($17) \n\ | ||
57 | ldq $22,40($18) \n\ | ||
58 | \n\ | ||
59 | ldq $23,48($17) \n\ | ||
60 | ldq $24,48($18) \n\ | ||
61 | ldq $25,56($17) \n\ | ||
62 | xor $0,$1,$0 # 7 cycles from $1 load \n\ | ||
63 | \n\ | ||
64 | ldq $27,56($18) \n\ | ||
65 | xor $2,$3,$2 \n\ | ||
66 | stq $0,0($17) \n\ | ||
67 | xor $4,$5,$4 \n\ | ||
68 | \n\ | ||
69 | stq $2,8($17) \n\ | ||
70 | xor $6,$7,$6 \n\ | ||
71 | stq $4,16($17) \n\ | ||
72 | xor $19,$20,$19 \n\ | ||
73 | \n\ | ||
74 | stq $6,24($17) \n\ | ||
75 | xor $21,$22,$21 \n\ | ||
76 | stq $19,32($17) \n\ | ||
77 | xor $23,$24,$23 \n\ | ||
78 | \n\ | ||
79 | stq $21,40($17) \n\ | ||
80 | xor $25,$27,$25 \n\ | ||
81 | stq $23,48($17) \n\ | ||
82 | subq $16,1,$16 \n\ | ||
83 | \n\ | ||
84 | stq $25,56($17) \n\ | ||
85 | addq $17,64,$17 \n\ | ||
86 | addq $18,64,$18 \n\ | ||
87 | bgt $16,2b \n\ | ||
88 | \n\ | ||
89 | ret \n\ | ||
90 | .end xor_alpha_2 \n\ | ||
91 | \n\ | ||
92 | .align 3 \n\ | ||
93 | .ent xor_alpha_3 \n\ | ||
94 | xor_alpha_3: \n\ | ||
95 | .prologue 0 \n\ | ||
96 | srl $16, 6, $16 \n\ | ||
97 | .align 4 \n\ | ||
98 | 3: \n\ | ||
99 | ldq $0,0($17) \n\ | ||
100 | ldq $1,0($18) \n\ | ||
101 | ldq $2,0($19) \n\ | ||
102 | ldq $3,8($17) \n\ | ||
103 | \n\ | ||
104 | ldq $4,8($18) \n\ | ||
105 | ldq $6,16($17) \n\ | ||
106 | ldq $7,16($18) \n\ | ||
107 | ldq $21,24($17) \n\ | ||
108 | \n\ | ||
109 | ldq $22,24($18) \n\ | ||
110 | ldq $24,32($17) \n\ | ||
111 | ldq $25,32($18) \n\ | ||
112 | ldq $5,8($19) \n\ | ||
113 | \n\ | ||
114 | ldq $20,16($19) \n\ | ||
115 | ldq $23,24($19) \n\ | ||
116 | ldq $27,32($19) \n\ | ||
117 | nop \n\ | ||
118 | \n\ | ||
119 | xor $0,$1,$1 # 8 cycles from $0 load \n\ | ||
120 | xor $3,$4,$4 # 6 cycles from $4 load \n\ | ||
121 | xor $6,$7,$7 # 6 cycles from $7 load \n\ | ||
122 | xor $21,$22,$22 # 5 cycles from $22 load \n\ | ||
123 | \n\ | ||
124 | xor $1,$2,$2 # 9 cycles from $2 load \n\ | ||
125 | xor $24,$25,$25 # 5 cycles from $25 load \n\ | ||
126 | stq $2,0($17) \n\ | ||
127 | xor $4,$5,$5 # 6 cycles from $5 load \n\ | ||
128 | \n\ | ||
129 | stq $5,8($17) \n\ | ||
130 | xor $7,$20,$20 # 7 cycles from $20 load \n\ | ||
131 | stq $20,16($17) \n\ | ||
132 | xor $22,$23,$23 # 7 cycles from $23 load \n\ | ||
133 | \n\ | ||
134 | stq $23,24($17) \n\ | ||
135 | xor $25,$27,$27 # 7 cycles from $27 load \n\ | ||
136 | stq $27,32($17) \n\ | ||
137 | nop \n\ | ||
138 | \n\ | ||
139 | ldq $0,40($17) \n\ | ||
140 | ldq $1,40($18) \n\ | ||
141 | ldq $3,48($17) \n\ | ||
142 | ldq $4,48($18) \n\ | ||
143 | \n\ | ||
144 | ldq $6,56($17) \n\ | ||
145 | ldq $7,56($18) \n\ | ||
146 | ldq $2,40($19) \n\ | ||
147 | ldq $5,48($19) \n\ | ||
148 | \n\ | ||
149 | ldq $20,56($19) \n\ | ||
150 | xor $0,$1,$1 # 4 cycles from $1 load \n\ | ||
151 | xor $3,$4,$4 # 5 cycles from $4 load \n\ | ||
152 | xor $6,$7,$7 # 5 cycles from $7 load \n\ | ||
153 | \n\ | ||
154 | xor $1,$2,$2 # 4 cycles from $2 load \n\ | ||
155 | xor $4,$5,$5 # 5 cycles from $5 load \n\ | ||
156 | stq $2,40($17) \n\ | ||
157 | xor $7,$20,$20 # 4 cycles from $20 load \n\ | ||
158 | \n\ | ||
159 | stq $5,48($17) \n\ | ||
160 | subq $16,1,$16 \n\ | ||
161 | stq $20,56($17) \n\ | ||
162 | addq $19,64,$19 \n\ | ||
163 | \n\ | ||
164 | addq $18,64,$18 \n\ | ||
165 | addq $17,64,$17 \n\ | ||
166 | bgt $16,3b \n\ | ||
167 | ret \n\ | ||
168 | .end xor_alpha_3 \n\ | ||
169 | \n\ | ||
170 | .align 3 \n\ | ||
171 | .ent xor_alpha_4 \n\ | ||
172 | xor_alpha_4: \n\ | ||
173 | .prologue 0 \n\ | ||
174 | srl $16, 6, $16 \n\ | ||
175 | .align 4 \n\ | ||
176 | 4: \n\ | ||
177 | ldq $0,0($17) \n\ | ||
178 | ldq $1,0($18) \n\ | ||
179 | ldq $2,0($19) \n\ | ||
180 | ldq $3,0($20) \n\ | ||
181 | \n\ | ||
182 | ldq $4,8($17) \n\ | ||
183 | ldq $5,8($18) \n\ | ||
184 | ldq $6,8($19) \n\ | ||
185 | ldq $7,8($20) \n\ | ||
186 | \n\ | ||
187 | ldq $21,16($17) \n\ | ||
188 | ldq $22,16($18) \n\ | ||
189 | ldq $23,16($19) \n\ | ||
190 | ldq $24,16($20) \n\ | ||
191 | \n\ | ||
192 | ldq $25,24($17) \n\ | ||
193 | xor $0,$1,$1 # 6 cycles from $1 load \n\ | ||
194 | ldq $27,24($18) \n\ | ||
195 | xor $2,$3,$3 # 6 cycles from $3 load \n\ | ||
196 | \n\ | ||
197 | ldq $0,24($19) \n\ | ||
198 | xor $1,$3,$3 \n\ | ||
199 | ldq $1,24($20) \n\ | ||
200 | xor $4,$5,$5 # 7 cycles from $5 load \n\ | ||
201 | \n\ | ||
202 | stq $3,0($17) \n\ | ||
203 | xor $6,$7,$7 \n\ | ||
204 | xor $21,$22,$22 # 7 cycles from $22 load \n\ | ||
205 | xor $5,$7,$7 \n\ | ||
206 | \n\ | ||
207 | stq $7,8($17) \n\ | ||
208 | xor $23,$24,$24 # 7 cycles from $24 load \n\ | ||
209 | ldq $2,32($17) \n\ | ||
210 | xor $22,$24,$24 \n\ | ||
211 | \n\ | ||
212 | ldq $3,32($18) \n\ | ||
213 | ldq $4,32($19) \n\ | ||
214 | ldq $5,32($20) \n\ | ||
215 | xor $25,$27,$27 # 8 cycles from $27 load \n\ | ||
216 | \n\ | ||
217 | ldq $6,40($17) \n\ | ||
218 | ldq $7,40($18) \n\ | ||
219 | ldq $21,40($19) \n\ | ||
220 | ldq $22,40($20) \n\ | ||
221 | \n\ | ||
222 | stq $24,16($17) \n\ | ||
223 | xor $0,$1,$1 # 9 cycles from $1 load \n\ | ||
224 | xor $2,$3,$3 # 5 cycles from $3 load \n\ | ||
225 | xor $27,$1,$1 \n\ | ||
226 | \n\ | ||
227 | stq $1,24($17) \n\ | ||
228 | xor $4,$5,$5 # 5 cycles from $5 load \n\ | ||
229 | ldq $23,48($17) \n\ | ||
230 | ldq $24,48($18) \n\ | ||
231 | \n\ | ||
232 | ldq $25,48($19) \n\ | ||
233 | xor $3,$5,$5 \n\ | ||
234 | ldq $27,48($20) \n\ | ||
235 | ldq $0,56($17) \n\ | ||
236 | \n\ | ||
237 | ldq $1,56($18) \n\ | ||
238 | ldq $2,56($19) \n\ | ||
239 | xor $6,$7,$7 # 8 cycles from $6 load \n\ | ||
240 | ldq $3,56($20) \n\ | ||
241 | \n\ | ||
242 | stq $5,32($17) \n\ | ||
243 | xor $21,$22,$22 # 8 cycles from $22 load \n\ | ||
244 | xor $7,$22,$22 \n\ | ||
245 | xor $23,$24,$24 # 5 cycles from $24 load \n\ | ||
246 | \n\ | ||
247 | stq $22,40($17) \n\ | ||
248 | xor $25,$27,$27 # 5 cycles from $27 load \n\ | ||
249 | xor $24,$27,$27 \n\ | ||
250 | xor $0,$1,$1 # 5 cycles from $1 load \n\ | ||
251 | \n\ | ||
252 | stq $27,48($17) \n\ | ||
253 | xor $2,$3,$3 # 4 cycles from $3 load \n\ | ||
254 | xor $1,$3,$3 \n\ | ||
255 | subq $16,1,$16 \n\ | ||
256 | \n\ | ||
257 | stq $3,56($17) \n\ | ||
258 | addq $20,64,$20 \n\ | ||
259 | addq $19,64,$19 \n\ | ||
260 | addq $18,64,$18 \n\ | ||
261 | \n\ | ||
262 | addq $17,64,$17 \n\ | ||
263 | bgt $16,4b \n\ | ||
264 | ret \n\ | ||
265 | .end xor_alpha_4 \n\ | ||
266 | \n\ | ||
267 | .align 3 \n\ | ||
268 | .ent xor_alpha_5 \n\ | ||
269 | xor_alpha_5: \n\ | ||
270 | .prologue 0 \n\ | ||
271 | srl $16, 6, $16 \n\ | ||
272 | .align 4 \n\ | ||
273 | 5: \n\ | ||
274 | ldq $0,0($17) \n\ | ||
275 | ldq $1,0($18) \n\ | ||
276 | ldq $2,0($19) \n\ | ||
277 | ldq $3,0($20) \n\ | ||
278 | \n\ | ||
279 | ldq $4,0($21) \n\ | ||
280 | ldq $5,8($17) \n\ | ||
281 | ldq $6,8($18) \n\ | ||
282 | ldq $7,8($19) \n\ | ||
283 | \n\ | ||
284 | ldq $22,8($20) \n\ | ||
285 | ldq $23,8($21) \n\ | ||
286 | ldq $24,16($17) \n\ | ||
287 | ldq $25,16($18) \n\ | ||
288 | \n\ | ||
289 | ldq $27,16($19) \n\ | ||
290 | xor $0,$1,$1 # 6 cycles from $1 load \n\ | ||
291 | ldq $28,16($20) \n\ | ||
292 | xor $2,$3,$3 # 6 cycles from $3 load \n\ | ||
293 | \n\ | ||
294 | ldq $0,16($21) \n\ | ||
295 | xor $1,$3,$3 \n\ | ||
296 | ldq $1,24($17) \n\ | ||
297 | xor $3,$4,$4 # 7 cycles from $4 load \n\ | ||
298 | \n\ | ||
299 | stq $4,0($17) \n\ | ||
300 | xor $5,$6,$6 # 7 cycles from $6 load \n\ | ||
301 | xor $7,$22,$22 # 7 cycles from $22 load \n\ | ||
302 | xor $6,$23,$23 # 7 cycles from $23 load \n\ | ||
303 | \n\ | ||
304 | ldq $2,24($18) \n\ | ||
305 | xor $22,$23,$23 \n\ | ||
306 | ldq $3,24($19) \n\ | ||
307 | xor $24,$25,$25 # 8 cycles from $25 load \n\ | ||
308 | \n\ | ||
309 | stq $23,8($17) \n\ | ||
310 | xor $25,$27,$27 # 8 cycles from $27 load \n\ | ||
311 | ldq $4,24($20) \n\ | ||
312 | xor $28,$0,$0 # 7 cycles from $0 load \n\ | ||
313 | \n\ | ||
314 | ldq $5,24($21) \n\ | ||
315 | xor $27,$0,$0 \n\ | ||
316 | ldq $6,32($17) \n\ | ||
317 | ldq $7,32($18) \n\ | ||
318 | \n\ | ||
319 | stq $0,16($17) \n\ | ||
320 | xor $1,$2,$2 # 6 cycles from $2 load \n\ | ||
321 | ldq $22,32($19) \n\ | ||
322 | xor $3,$4,$4 # 4 cycles from $4 load \n\ | ||
323 | \n\ | ||
324 | ldq $23,32($20) \n\ | ||
325 | xor $2,$4,$4 \n\ | ||
326 | ldq $24,32($21) \n\ | ||
327 | ldq $25,40($17) \n\ | ||
328 | \n\ | ||
329 | ldq $27,40($18) \n\ | ||
330 | ldq $28,40($19) \n\ | ||
331 | ldq $0,40($20) \n\ | ||
332 | xor $4,$5,$5 # 7 cycles from $5 load \n\ | ||
333 | \n\ | ||
334 | stq $5,24($17) \n\ | ||
335 | xor $6,$7,$7 # 7 cycles from $7 load \n\ | ||
336 | ldq $1,40($21) \n\ | ||
337 | ldq $2,48($17) \n\ | ||
338 | \n\ | ||
339 | ldq $3,48($18) \n\ | ||
340 | xor $7,$22,$22 # 7 cycles from $22 load \n\ | ||
341 | ldq $4,48($19) \n\ | ||
342 | xor $23,$24,$24 # 6 cycles from $24 load \n\ | ||
343 | \n\ | ||
344 | ldq $5,48($20) \n\ | ||
345 | xor $22,$24,$24 \n\ | ||
346 | ldq $6,48($21) \n\ | ||
347 | xor $25,$27,$27 # 7 cycles from $27 load \n\ | ||
348 | \n\ | ||
349 | stq $24,32($17) \n\ | ||
350 | xor $27,$28,$28 # 8 cycles from $28 load \n\ | ||
351 | ldq $7,56($17) \n\ | ||
352 | xor $0,$1,$1 # 6 cycles from $1 load \n\ | ||
353 | \n\ | ||
354 | ldq $22,56($18) \n\ | ||
355 | ldq $23,56($19) \n\ | ||
356 | ldq $24,56($20) \n\ | ||
357 | ldq $25,56($21) \n\ | ||
358 | \n\ | ||
359 | xor $28,$1,$1 \n\ | ||
360 | xor $2,$3,$3 # 9 cycles from $3 load \n\ | ||
361 | xor $3,$4,$4 # 9 cycles from $4 load \n\ | ||
362 | xor $5,$6,$6 # 8 cycles from $6 load \n\ | ||
363 | \n\ | ||
364 | stq $1,40($17) \n\ | ||
365 | xor $4,$6,$6 \n\ | ||
366 | xor $7,$22,$22 # 7 cycles from $22 load \n\ | ||
367 | xor $23,$24,$24 # 6 cycles from $24 load \n\ | ||
368 | \n\ | ||
369 | stq $6,48($17) \n\ | ||
370 | xor $22,$24,$24 \n\ | ||
371 | subq $16,1,$16 \n\ | ||
372 | xor $24,$25,$25 # 8 cycles from $25 load \n\ | ||
373 | \n\ | ||
374 | stq $25,56($17) \n\ | ||
375 | addq $21,64,$21 \n\ | ||
376 | addq $20,64,$20 \n\ | ||
377 | addq $19,64,$19 \n\ | ||
378 | \n\ | ||
379 | addq $18,64,$18 \n\ | ||
380 | addq $17,64,$17 \n\ | ||
381 | bgt $16,5b \n\ | ||
382 | ret \n\ | ||
383 | .end xor_alpha_5 \n\ | ||
384 | \n\ | ||
385 | .align 3 \n\ | ||
386 | .ent xor_alpha_prefetch_2 \n\ | ||
387 | xor_alpha_prefetch_2: \n\ | ||
388 | .prologue 0 \n\ | ||
389 | srl $16, 6, $16 \n\ | ||
390 | \n\ | ||
391 | ldq $31, 0($17) \n\ | ||
392 | ldq $31, 0($18) \n\ | ||
393 | \n\ | ||
394 | ldq $31, 64($17) \n\ | ||
395 | ldq $31, 64($18) \n\ | ||
396 | \n\ | ||
397 | ldq $31, 128($17) \n\ | ||
398 | ldq $31, 128($18) \n\ | ||
399 | \n\ | ||
400 | ldq $31, 192($17) \n\ | ||
401 | ldq $31, 192($18) \n\ | ||
402 | .align 4 \n\ | ||
403 | 2: \n\ | ||
404 | ldq $0,0($17) \n\ | ||
405 | ldq $1,0($18) \n\ | ||
406 | ldq $2,8($17) \n\ | ||
407 | ldq $3,8($18) \n\ | ||
408 | \n\ | ||
409 | ldq $4,16($17) \n\ | ||
410 | ldq $5,16($18) \n\ | ||
411 | ldq $6,24($17) \n\ | ||
412 | ldq $7,24($18) \n\ | ||
413 | \n\ | ||
414 | ldq $19,32($17) \n\ | ||
415 | ldq $20,32($18) \n\ | ||
416 | ldq $21,40($17) \n\ | ||
417 | ldq $22,40($18) \n\ | ||
418 | \n\ | ||
419 | ldq $23,48($17) \n\ | ||
420 | ldq $24,48($18) \n\ | ||
421 | ldq $25,56($17) \n\ | ||
422 | ldq $27,56($18) \n\ | ||
423 | \n\ | ||
424 | ldq $31,256($17) \n\ | ||
425 | xor $0,$1,$0 # 8 cycles from $1 load \n\ | ||
426 | ldq $31,256($18) \n\ | ||
427 | xor $2,$3,$2 \n\ | ||
428 | \n\ | ||
429 | stq $0,0($17) \n\ | ||
430 | xor $4,$5,$4 \n\ | ||
431 | stq $2,8($17) \n\ | ||
432 | xor $6,$7,$6 \n\ | ||
433 | \n\ | ||
434 | stq $4,16($17) \n\ | ||
435 | xor $19,$20,$19 \n\ | ||
436 | stq $6,24($17) \n\ | ||
437 | xor $21,$22,$21 \n\ | ||
438 | \n\ | ||
439 | stq $19,32($17) \n\ | ||
440 | xor $23,$24,$23 \n\ | ||
441 | stq $21,40($17) \n\ | ||
442 | xor $25,$27,$25 \n\ | ||
443 | \n\ | ||
444 | stq $23,48($17) \n\ | ||
445 | subq $16,1,$16 \n\ | ||
446 | stq $25,56($17) \n\ | ||
447 | addq $17,64,$17 \n\ | ||
448 | \n\ | ||
449 | addq $18,64,$18 \n\ | ||
450 | bgt $16,2b \n\ | ||
451 | ret \n\ | ||
452 | .end xor_alpha_prefetch_2 \n\ | ||
453 | \n\ | ||
454 | .align 3 \n\ | ||
455 | .ent xor_alpha_prefetch_3 \n\ | ||
456 | xor_alpha_prefetch_3: \n\ | ||
457 | .prologue 0 \n\ | ||
458 | srl $16, 6, $16 \n\ | ||
459 | \n\ | ||
460 | ldq $31, 0($17) \n\ | ||
461 | ldq $31, 0($18) \n\ | ||
462 | ldq $31, 0($19) \n\ | ||
463 | \n\ | ||
464 | ldq $31, 64($17) \n\ | ||
465 | ldq $31, 64($18) \n\ | ||
466 | ldq $31, 64($19) \n\ | ||
467 | \n\ | ||
468 | ldq $31, 128($17) \n\ | ||
469 | ldq $31, 128($18) \n\ | ||
470 | ldq $31, 128($19) \n\ | ||
471 | \n\ | ||
472 | ldq $31, 192($17) \n\ | ||
473 | ldq $31, 192($18) \n\ | ||
474 | ldq $31, 192($19) \n\ | ||
475 | .align 4 \n\ | ||
476 | 3: \n\ | ||
477 | ldq $0,0($17) \n\ | ||
478 | ldq $1,0($18) \n\ | ||
479 | ldq $2,0($19) \n\ | ||
480 | ldq $3,8($17) \n\ | ||
481 | \n\ | ||
482 | ldq $4,8($18) \n\ | ||
483 | ldq $6,16($17) \n\ | ||
484 | ldq $7,16($18) \n\ | ||
485 | ldq $21,24($17) \n\ | ||
486 | \n\ | ||
487 | ldq $22,24($18) \n\ | ||
488 | ldq $24,32($17) \n\ | ||
489 | ldq $25,32($18) \n\ | ||
490 | ldq $5,8($19) \n\ | ||
491 | \n\ | ||
492 | ldq $20,16($19) \n\ | ||
493 | ldq $23,24($19) \n\ | ||
494 | ldq $27,32($19) \n\ | ||
495 | nop \n\ | ||
496 | \n\ | ||
497 | xor $0,$1,$1 # 8 cycles from $0 load \n\ | ||
498 | xor $3,$4,$4 # 7 cycles from $4 load \n\ | ||
499 | xor $6,$7,$7 # 6 cycles from $7 load \n\ | ||
500 | xor $21,$22,$22 # 5 cycles from $22 load \n\ | ||
501 | \n\ | ||
502 | xor $1,$2,$2 # 9 cycles from $2 load \n\ | ||
503 | xor $24,$25,$25 # 5 cycles from $25 load \n\ | ||
504 | stq $2,0($17) \n\ | ||
505 | xor $4,$5,$5 # 6 cycles from $5 load \n\ | ||
506 | \n\ | ||
507 | stq $5,8($17) \n\ | ||
508 | xor $7,$20,$20 # 7 cycles from $20 load \n\ | ||
509 | stq $20,16($17) \n\ | ||
510 | xor $22,$23,$23 # 7 cycles from $23 load \n\ | ||
511 | \n\ | ||
512 | stq $23,24($17) \n\ | ||
513 | xor $25,$27,$27 # 7 cycles from $27 load \n\ | ||
514 | stq $27,32($17) \n\ | ||
515 | nop \n\ | ||
516 | \n\ | ||
517 | ldq $0,40($17) \n\ | ||
518 | ldq $1,40($18) \n\ | ||
519 | ldq $3,48($17) \n\ | ||
520 | ldq $4,48($18) \n\ | ||
521 | \n\ | ||
522 | ldq $6,56($17) \n\ | ||
523 | ldq $7,56($18) \n\ | ||
524 | ldq $2,40($19) \n\ | ||
525 | ldq $5,48($19) \n\ | ||
526 | \n\ | ||
527 | ldq $20,56($19) \n\ | ||
528 | ldq $31,256($17) \n\ | ||
529 | ldq $31,256($18) \n\ | ||
530 | ldq $31,256($19) \n\ | ||
531 | \n\ | ||
532 | xor $0,$1,$1 # 6 cycles from $1 load \n\ | ||
533 | xor $3,$4,$4 # 5 cycles from $4 load \n\ | ||
534 | xor $6,$7,$7 # 5 cycles from $7 load \n\ | ||
535 | xor $1,$2,$2 # 4 cycles from $2 load \n\ | ||
536 | \n\ | ||
537 | xor $4,$5,$5 # 5 cycles from $5 load \n\ | ||
538 | xor $7,$20,$20 # 4 cycles from $20 load \n\ | ||
539 | stq $2,40($17) \n\ | ||
540 | subq $16,1,$16 \n\ | ||
541 | \n\ | ||
542 | stq $5,48($17) \n\ | ||
543 | addq $19,64,$19 \n\ | ||
544 | stq $20,56($17) \n\ | ||
545 | addq $18,64,$18 \n\ | ||
546 | \n\ | ||
547 | addq $17,64,$17 \n\ | ||
548 | bgt $16,3b \n\ | ||
549 | ret \n\ | ||
550 | .end xor_alpha_prefetch_3 \n\ | ||
551 | \n\ | ||
552 | .align 3 \n\ | ||
553 | .ent xor_alpha_prefetch_4 \n\ | ||
554 | xor_alpha_prefetch_4: \n\ | ||
555 | .prologue 0 \n\ | ||
556 | srl $16, 6, $16 \n\ | ||
557 | \n\ | ||
558 | ldq $31, 0($17) \n\ | ||
559 | ldq $31, 0($18) \n\ | ||
560 | ldq $31, 0($19) \n\ | ||
561 | ldq $31, 0($20) \n\ | ||
562 | \n\ | ||
563 | ldq $31, 64($17) \n\ | ||
564 | ldq $31, 64($18) \n\ | ||
565 | ldq $31, 64($19) \n\ | ||
566 | ldq $31, 64($20) \n\ | ||
567 | \n\ | ||
568 | ldq $31, 128($17) \n\ | ||
569 | ldq $31, 128($18) \n\ | ||
570 | ldq $31, 128($19) \n\ | ||
571 | ldq $31, 128($20) \n\ | ||
572 | \n\ | ||
573 | ldq $31, 192($17) \n\ | ||
574 | ldq $31, 192($18) \n\ | ||
575 | ldq $31, 192($19) \n\ | ||
576 | ldq $31, 192($20) \n\ | ||
577 | .align 4 \n\ | ||
578 | 4: \n\ | ||
579 | ldq $0,0($17) \n\ | ||
580 | ldq $1,0($18) \n\ | ||
581 | ldq $2,0($19) \n\ | ||
582 | ldq $3,0($20) \n\ | ||
583 | \n\ | ||
584 | ldq $4,8($17) \n\ | ||
585 | ldq $5,8($18) \n\ | ||
586 | ldq $6,8($19) \n\ | ||
587 | ldq $7,8($20) \n\ | ||
588 | \n\ | ||
589 | ldq $21,16($17) \n\ | ||
590 | ldq $22,16($18) \n\ | ||
591 | ldq $23,16($19) \n\ | ||
592 | ldq $24,16($20) \n\ | ||
593 | \n\ | ||
594 | ldq $25,24($17) \n\ | ||
595 | xor $0,$1,$1 # 6 cycles from $1 load \n\ | ||
596 | ldq $27,24($18) \n\ | ||
597 | xor $2,$3,$3 # 6 cycles from $3 load \n\ | ||
598 | \n\ | ||
599 | ldq $0,24($19) \n\ | ||
600 | xor $1,$3,$3 \n\ | ||
601 | ldq $1,24($20) \n\ | ||
602 | xor $4,$5,$5 # 7 cycles from $5 load \n\ | ||
603 | \n\ | ||
604 | stq $3,0($17) \n\ | ||
605 | xor $6,$7,$7 \n\ | ||
606 | xor $21,$22,$22 # 7 cycles from $22 load \n\ | ||
607 | xor $5,$7,$7 \n\ | ||
608 | \n\ | ||
609 | stq $7,8($17) \n\ | ||
610 | xor $23,$24,$24 # 7 cycles from $24 load \n\ | ||
611 | ldq $2,32($17) \n\ | ||
612 | xor $22,$24,$24 \n\ | ||
613 | \n\ | ||
614 | ldq $3,32($18) \n\ | ||
615 | ldq $4,32($19) \n\ | ||
616 | ldq $5,32($20) \n\ | ||
617 | xor $25,$27,$27 # 8 cycles from $27 load \n\ | ||
618 | \n\ | ||
619 | ldq $6,40($17) \n\ | ||
620 | ldq $7,40($18) \n\ | ||
621 | ldq $21,40($19) \n\ | ||
622 | ldq $22,40($20) \n\ | ||
623 | \n\ | ||
624 | stq $24,16($17) \n\ | ||
625 | xor $0,$1,$1 # 9 cycles from $1 load \n\ | ||
626 | xor $2,$3,$3 # 5 cycles from $3 load \n\ | ||
627 | xor $27,$1,$1 \n\ | ||
628 | \n\ | ||
629 | stq $1,24($17) \n\ | ||
630 | xor $4,$5,$5 # 5 cycles from $5 load \n\ | ||
631 | ldq $23,48($17) \n\ | ||
632 | xor $3,$5,$5 \n\ | ||
633 | \n\ | ||
634 | ldq $24,48($18) \n\ | ||
635 | ldq $25,48($19) \n\ | ||
636 | ldq $27,48($20) \n\ | ||
637 | ldq $0,56($17) \n\ | ||
638 | \n\ | ||
639 | ldq $1,56($18) \n\ | ||
640 | ldq $2,56($19) \n\ | ||
641 | ldq $3,56($20) \n\ | ||
642 | xor $6,$7,$7 # 8 cycles from $6 load \n\ | ||
643 | \n\ | ||
644 | ldq $31,256($17) \n\ | ||
645 | xor $21,$22,$22 # 8 cycles from $22 load \n\ | ||
646 | ldq $31,256($18) \n\ | ||
647 | xor $7,$22,$22 \n\ | ||
648 | \n\ | ||
649 | ldq $31,256($19) \n\ | ||
650 | xor $23,$24,$24 # 6 cycles from $24 load \n\ | ||
651 | ldq $31,256($20) \n\ | ||
652 | xor $25,$27,$27 # 6 cycles from $27 load \n\ | ||
653 | \n\ | ||
654 | stq $5,32($17) \n\ | ||
655 | xor $24,$27,$27 \n\ | ||
656 | xor $0,$1,$1 # 7 cycles from $1 load \n\ | ||
657 | xor $2,$3,$3 # 6 cycles from $3 load \n\ | ||
658 | \n\ | ||
659 | stq $22,40($17) \n\ | ||
660 | xor $1,$3,$3 \n\ | ||
661 | stq $27,48($17) \n\ | ||
662 | subq $16,1,$16 \n\ | ||
663 | \n\ | ||
664 | stq $3,56($17) \n\ | ||
665 | addq $20,64,$20 \n\ | ||
666 | addq $19,64,$19 \n\ | ||
667 | addq $18,64,$18 \n\ | ||
668 | \n\ | ||
669 | addq $17,64,$17 \n\ | ||
670 | bgt $16,4b \n\ | ||
671 | ret \n\ | ||
672 | .end xor_alpha_prefetch_4 \n\ | ||
673 | \n\ | ||
674 | .align 3 \n\ | ||
675 | .ent xor_alpha_prefetch_5 \n\ | ||
676 | xor_alpha_prefetch_5: \n\ | ||
677 | .prologue 0 \n\ | ||
678 | srl $16, 6, $16 \n\ | ||
679 | \n\ | ||
680 | ldq $31, 0($17) \n\ | ||
681 | ldq $31, 0($18) \n\ | ||
682 | ldq $31, 0($19) \n\ | ||
683 | ldq $31, 0($20) \n\ | ||
684 | ldq $31, 0($21) \n\ | ||
685 | \n\ | ||
686 | ldq $31, 64($17) \n\ | ||
687 | ldq $31, 64($18) \n\ | ||
688 | ldq $31, 64($19) \n\ | ||
689 | ldq $31, 64($20) \n\ | ||
690 | ldq $31, 64($21) \n\ | ||
691 | \n\ | ||
692 | ldq $31, 128($17) \n\ | ||
693 | ldq $31, 128($18) \n\ | ||
694 | ldq $31, 128($19) \n\ | ||
695 | ldq $31, 128($20) \n\ | ||
696 | ldq $31, 128($21) \n\ | ||
697 | \n\ | ||
698 | ldq $31, 192($17) \n\ | ||
699 | ldq $31, 192($18) \n\ | ||
700 | ldq $31, 192($19) \n\ | ||
701 | ldq $31, 192($20) \n\ | ||
702 | ldq $31, 192($21) \n\ | ||
703 | .align 4 \n\ | ||
704 | 5: \n\ | ||
705 | ldq $0,0($17) \n\ | ||
706 | ldq $1,0($18) \n\ | ||
707 | ldq $2,0($19) \n\ | ||
708 | ldq $3,0($20) \n\ | ||
709 | \n\ | ||
710 | ldq $4,0($21) \n\ | ||
711 | ldq $5,8($17) \n\ | ||
712 | ldq $6,8($18) \n\ | ||
713 | ldq $7,8($19) \n\ | ||
714 | \n\ | ||
715 | ldq $22,8($20) \n\ | ||
716 | ldq $23,8($21) \n\ | ||
717 | ldq $24,16($17) \n\ | ||
718 | ldq $25,16($18) \n\ | ||
719 | \n\ | ||
720 | ldq $27,16($19) \n\ | ||
721 | xor $0,$1,$1 # 6 cycles from $1 load \n\ | ||
722 | ldq $28,16($20) \n\ | ||
723 | xor $2,$3,$3 # 6 cycles from $3 load \n\ | ||
724 | \n\ | ||
725 | ldq $0,16($21) \n\ | ||
726 | xor $1,$3,$3 \n\ | ||
727 | ldq $1,24($17) \n\ | ||
728 | xor $3,$4,$4 # 7 cycles from $4 load \n\ | ||
729 | \n\ | ||
730 | stq $4,0($17) \n\ | ||
731 | xor $5,$6,$6 # 7 cycles from $6 load \n\ | ||
732 | xor $7,$22,$22 # 7 cycles from $22 load \n\ | ||
733 | xor $6,$23,$23 # 7 cycles from $23 load \n\ | ||
734 | \n\ | ||
735 | ldq $2,24($18) \n\ | ||
736 | xor $22,$23,$23 \n\ | ||
737 | ldq $3,24($19) \n\ | ||
738 | xor $24,$25,$25 # 8 cycles from $25 load \n\ | ||
739 | \n\ | ||
740 | stq $23,8($17) \n\ | ||
741 | xor $25,$27,$27 # 8 cycles from $27 load \n\ | ||
742 | ldq $4,24($20) \n\ | ||
743 | xor $28,$0,$0 # 7 cycles from $0 load \n\ | ||
744 | \n\ | ||
745 | ldq $5,24($21) \n\ | ||
746 | xor $27,$0,$0 \n\ | ||
747 | ldq $6,32($17) \n\ | ||
748 | ldq $7,32($18) \n\ | ||
749 | \n\ | ||
750 | stq $0,16($17) \n\ | ||
751 | xor $1,$2,$2 # 6 cycles from $2 load \n\ | ||
752 | ldq $22,32($19) \n\ | ||
753 | xor $3,$4,$4 # 4 cycles from $4 load \n\ | ||
754 | \n\ | ||
755 | ldq $23,32($20) \n\ | ||
756 | xor $2,$4,$4 \n\ | ||
757 | ldq $24,32($21) \n\ | ||
758 | ldq $25,40($17) \n\ | ||
759 | \n\ | ||
760 | ldq $27,40($18) \n\ | ||
761 | ldq $28,40($19) \n\ | ||
762 | ldq $0,40($20) \n\ | ||
763 | xor $4,$5,$5 # 7 cycles from $5 load \n\ | ||
764 | \n\ | ||
765 | stq $5,24($17) \n\ | ||
766 | xor $6,$7,$7 # 7 cycles from $7 load \n\ | ||
767 | ldq $1,40($21) \n\ | ||
768 | ldq $2,48($17) \n\ | ||
769 | \n\ | ||
770 | ldq $3,48($18) \n\ | ||
771 | xor $7,$22,$22 # 7 cycles from $22 load \n\ | ||
772 | ldq $4,48($19) \n\ | ||
773 | xor $23,$24,$24 # 6 cycles from $24 load \n\ | ||
774 | \n\ | ||
775 | ldq $5,48($20) \n\ | ||
776 | xor $22,$24,$24 \n\ | ||
777 | ldq $6,48($21) \n\ | ||
778 | xor $25,$27,$27 # 7 cycles from $27 load \n\ | ||
779 | \n\ | ||
780 | stq $24,32($17) \n\ | ||
781 | xor $27,$28,$28 # 8 cycles from $28 load \n\ | ||
782 | ldq $7,56($17) \n\ | ||
783 | xor $0,$1,$1 # 6 cycles from $1 load \n\ | ||
784 | \n\ | ||
785 | ldq $22,56($18) \n\ | ||
786 | ldq $23,56($19) \n\ | ||
787 | ldq $24,56($20) \n\ | ||
788 | ldq $25,56($21) \n\ | ||
789 | \n\ | ||
790 | ldq $31,256($17) \n\ | ||
791 | xor $28,$1,$1 \n\ | ||
792 | ldq $31,256($18) \n\ | ||
793 | xor $2,$3,$3 # 9 cycles from $3 load \n\ | ||
794 | \n\ | ||
795 | ldq $31,256($19) \n\ | ||
796 | xor $3,$4,$4 # 9 cycles from $4 load \n\ | ||
797 | ldq $31,256($20) \n\ | ||
798 | xor $5,$6,$6 # 8 cycles from $6 load \n\ | ||
799 | \n\ | ||
800 | stq $1,40($17) \n\ | ||
801 | xor $4,$6,$6 \n\ | ||
802 | xor $7,$22,$22 # 7 cycles from $22 load \n\ | ||
803 | xor $23,$24,$24 # 6 cycles from $24 load \n\ | ||
804 | \n\ | ||
805 | stq $6,48($17) \n\ | ||
806 | xor $22,$24,$24 \n\ | ||
807 | ldq $31,256($21) \n\ | ||
808 | xor $24,$25,$25 # 8 cycles from $25 load \n\ | ||
809 | \n\ | ||
810 | stq $25,56($17) \n\ | ||
811 | subq $16,1,$16 \n\ | ||
812 | addq $21,64,$21 \n\ | ||
813 | addq $20,64,$20 \n\ | ||
814 | \n\ | ||
815 | addq $19,64,$19 \n\ | ||
816 | addq $18,64,$18 \n\ | ||
817 | addq $17,64,$17 \n\ | ||
818 | bgt $16,5b \n\ | ||
819 | \n\ | ||
820 | ret \n\ | ||
821 | .end xor_alpha_prefetch_5 \n\ | ||
822 | "); | ||
823 | |||
824 | static struct xor_block_template xor_block_alpha = { | ||
825 | .name = "alpha", | ||
826 | .do_2 = xor_alpha_2, | ||
827 | .do_3 = xor_alpha_3, | ||
828 | .do_4 = xor_alpha_4, | ||
829 | .do_5 = xor_alpha_5, | ||
830 | }; | ||
831 | |||
832 | static struct xor_block_template xor_block_alpha_prefetch = { | ||
833 | .name = "alpha prefetch", | ||
834 | .do_2 = xor_alpha_prefetch_2, | ||
835 | .do_3 = xor_alpha_prefetch_3, | ||
836 | .do_4 = xor_alpha_prefetch_4, | ||
837 | .do_5 = xor_alpha_prefetch_5, | ||
838 | }; | ||
839 | |||
840 | /* For grins, also test the generic routines. */ | ||
841 | #include <asm-generic/xor.h> | ||
842 | |||
843 | #undef XOR_TRY_TEMPLATES | ||
844 | #define XOR_TRY_TEMPLATES \ | ||
845 | do { \ | ||
846 | xor_speed(&xor_block_8regs); \ | ||
847 | xor_speed(&xor_block_32regs); \ | ||
848 | xor_speed(&xor_block_alpha); \ | ||
849 | xor_speed(&xor_block_alpha_prefetch); \ | ||
850 | } while (0) | ||
851 | |||
852 | /* Force the use of alpha_prefetch if EV6, as it is significantly | ||
853 | faster in the cold cache case. */ | ||
854 | #define XOR_SELECT_TEMPLATE(FASTEST) \ | ||
855 | (implver() == IMPLVER_EV6 ? &xor_block_alpha_prefetch : FASTEST) | ||