diff options
Diffstat (limited to 'arch')
378 files changed, 13792 insertions, 4680 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 0c79b9d95f74..f7c96635d3b4 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -280,6 +280,10 @@ config ISA | |||
280 | (MCA) or VESA. ISA is an older system, now being displaced by PCI; | 280 | (MCA) or VESA. ISA is an older system, now being displaced by PCI; |
281 | newer boards don't support it. If you have ISA, say Y, otherwise N. | 281 | newer boards don't support it. If you have ISA, say Y, otherwise N. |
282 | 282 | ||
283 | config ISA_DMA_API | ||
284 | bool | ||
285 | default y | ||
286 | |||
283 | config PCI | 287 | config PCI |
284 | bool | 288 | bool |
285 | depends on !ALPHA_JENSEN | 289 | depends on !ALPHA_JENSEN |
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index b5d0fd2bb10a..64e450dddb49 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -457,22 +457,6 @@ osf_getdomainname(char __user *name, int namelen) | |||
457 | return 0; | 457 | return 0; |
458 | } | 458 | } |
459 | 459 | ||
460 | asmlinkage long | ||
461 | osf_shmat(int shmid, void __user *shmaddr, int shmflg) | ||
462 | { | ||
463 | unsigned long raddr; | ||
464 | long err; | ||
465 | |||
466 | err = do_shmat(shmid, shmaddr, shmflg, &raddr); | ||
467 | |||
468 | /* | ||
469 | * This works because all user-level addresses are | ||
470 | * non-negative longs! | ||
471 | */ | ||
472 | return err ? err : (long)raddr; | ||
473 | } | ||
474 | |||
475 | |||
476 | /* | 460 | /* |
477 | * The following stuff should move into a header file should it ever | 461 | * The following stuff should move into a header file should it ever |
478 | * be labeled "officially supported." Right now, there is just enough | 462 | * be labeled "officially supported." Right now, there is just enough |
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c index d00583161574..bbd37536d14e 100644 --- a/arch/alpha/kernel/ptrace.c +++ b/arch/alpha/kernel/ptrace.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/user.h> | 14 | #include <linux/user.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/security.h> | 16 | #include <linux/security.h> |
17 | #include <linux/signal.h> | ||
17 | 18 | ||
18 | #include <asm/uaccess.h> | 19 | #include <asm/uaccess.h> |
19 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
@@ -335,7 +336,7 @@ do_sys_ptrace(long request, long pid, long addr, long data, | |||
335 | /* continue and stop at next (return from) syscall */ | 336 | /* continue and stop at next (return from) syscall */ |
336 | case PTRACE_CONT: /* restart after signal. */ | 337 | case PTRACE_CONT: /* restart after signal. */ |
337 | ret = -EIO; | 338 | ret = -EIO; |
338 | if ((unsigned long) data > _NSIG) | 339 | if (!valid_signal(data)) |
339 | break; | 340 | break; |
340 | if (request == PTRACE_SYSCALL) | 341 | if (request == PTRACE_SYSCALL) |
341 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 342 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -365,7 +366,7 @@ do_sys_ptrace(long request, long pid, long addr, long data, | |||
365 | 366 | ||
366 | case PTRACE_SINGLESTEP: /* execute single instruction. */ | 367 | case PTRACE_SINGLESTEP: /* execute single instruction. */ |
367 | ret = -EIO; | 368 | ret = -EIO; |
368 | if ((unsigned long) data > _NSIG) | 369 | if (!valid_signal(data)) |
369 | break; | 370 | break; |
370 | /* Mark single stepping. */ | 371 | /* Mark single stepping. */ |
371 | child->thread_info->bpt_nsaved = -1; | 372 | child->thread_info->bpt_nsaved = -1; |
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 3864b33562ee..052120882876 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S | |||
@@ -227,7 +227,7 @@ sys_call_table: | |||
227 | .quad sys_semop | 227 | .quad sys_semop |
228 | .quad osf_utsname | 228 | .quad osf_utsname |
229 | .quad sys_lchown | 229 | .quad sys_lchown |
230 | .quad osf_shmat | 230 | .quad sys_shmat |
231 | .quad sys_shmctl /* 210 */ | 231 | .quad sys_shmctl /* 210 */ |
232 | .quad sys_shmdt | 232 | .quad sys_shmdt |
233 | .quad sys_shmget | 233 | .quad sys_shmget |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 4055115ae0e2..bf397a9f8ac2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -85,6 +85,7 @@ choice | |||
85 | config ARCH_CLPS7500 | 85 | config ARCH_CLPS7500 |
86 | bool "Cirrus-CL-PS7500FE" | 86 | bool "Cirrus-CL-PS7500FE" |
87 | select TIMER_ACORN | 87 | select TIMER_ACORN |
88 | select ISA | ||
88 | 89 | ||
89 | config ARCH_CLPS711X | 90 | config ARCH_CLPS711X |
90 | bool "CLPS711x/EP721x-based" | 91 | bool "CLPS711x/EP721x-based" |
@@ -96,6 +97,7 @@ config ARCH_CO285 | |||
96 | 97 | ||
97 | config ARCH_EBSA110 | 98 | config ARCH_EBSA110 |
98 | bool "EBSA-110" | 99 | bool "EBSA-110" |
100 | select ISA | ||
99 | help | 101 | help |
100 | This is an evaluation board for the StrongARM processor available | 102 | This is an evaluation board for the StrongARM processor available |
101 | from Digital. It has limited hardware on-board, including an onboard | 103 | from Digital. It has limited hardware on-board, including an onboard |
@@ -120,13 +122,16 @@ config ARCH_INTEGRATOR | |||
120 | 122 | ||
121 | config ARCH_IOP3XX | 123 | config ARCH_IOP3XX |
122 | bool "IOP3xx-based" | 124 | bool "IOP3xx-based" |
125 | select PCI | ||
123 | 126 | ||
124 | config ARCH_IXP4XX | 127 | config ARCH_IXP4XX |
125 | bool "IXP4xx-based" | 128 | bool "IXP4xx-based" |
126 | select DMABOUNCE | 129 | select DMABOUNCE |
130 | select PCI | ||
127 | 131 | ||
128 | config ARCH_IXP2000 | 132 | config ARCH_IXP2000 |
129 | bool "IXP2400/2800-based" | 133 | bool "IXP2400/2800-based" |
134 | select PCI | ||
130 | 135 | ||
131 | config ARCH_L7200 | 136 | config ARCH_L7200 |
132 | bool "LinkUp-L7200" | 137 | bool "LinkUp-L7200" |
@@ -155,6 +160,8 @@ config ARCH_RPC | |||
155 | 160 | ||
156 | config ARCH_SA1100 | 161 | config ARCH_SA1100 |
157 | bool "SA1100-based" | 162 | bool "SA1100-based" |
163 | select ISA | ||
164 | select DISCONTIGMEM | ||
158 | 165 | ||
159 | config ARCH_S3C2410 | 166 | config ARCH_S3C2410 |
160 | bool "Samsung S3C2410" | 167 | bool "Samsung S3C2410" |
@@ -165,6 +172,9 @@ config ARCH_S3C2410 | |||
165 | 172 | ||
166 | config ARCH_SHARK | 173 | config ARCH_SHARK |
167 | bool "Shark" | 174 | bool "Shark" |
175 | select ISA | ||
176 | select ISA_DMA | ||
177 | select PCI | ||
168 | 178 | ||
169 | config ARCH_LH7A40X | 179 | config ARCH_LH7A40X |
170 | bool "Sharp LH7A40X" | 180 | bool "Sharp LH7A40X" |
@@ -252,8 +262,6 @@ config ARM_AMBA | |||
252 | 262 | ||
253 | config ISA | 263 | config ISA |
254 | bool | 264 | bool |
255 | depends on FOOTBRIDGE_HOST || ARCH_SHARK || ARCH_CLPS7500 || ARCH_EBSA110 || ARCH_CDB89712 || ARCH_EDB7211 || ARCH_SA1100 || ARCH_MX1ADS | ||
256 | default y | ||
257 | help | 265 | help |
258 | Find out whether you have ISA slots on your motherboard. ISA is the | 266 | Find out whether you have ISA slots on your motherboard. ISA is the |
259 | name of a bus system, i.e. the way the CPU talks to the other stuff | 267 | name of a bus system, i.e. the way the CPU talks to the other stuff |
@@ -263,12 +271,13 @@ config ISA | |||
263 | 271 | ||
264 | config ISA_DMA | 272 | config ISA_DMA |
265 | bool | 273 | bool |
266 | depends on FOOTBRIDGE_HOST || ARCH_SHARK | 274 | |
275 | config ISA_DMA_API | ||
276 | bool | ||
267 | default y | 277 | default y |
268 | 278 | ||
269 | config PCI | 279 | config PCI |
270 | bool "PCI support" if ARCH_INTEGRATOR_AP | 280 | bool "PCI support" if ARCH_INTEGRATOR_AP |
271 | default y if ARCH_SHARK || FOOTBRIDGE_HOST || ARCH_IOP3XX || ARCH_IXP4XX || ARCH_IXP2000 | ||
272 | help | 281 | help |
273 | Find out whether you have a PCI motherboard. PCI is the name of a | 282 | Find out whether you have a PCI motherboard. PCI is the name of a |
274 | bus system, i.e. the way the CPU talks to the other stuff inside | 283 | bus system, i.e. the way the CPU talks to the other stuff inside |
@@ -296,7 +305,7 @@ menu "Kernel Features" | |||
296 | 305 | ||
297 | config SMP | 306 | config SMP |
298 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" | 307 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" |
299 | depends on EXPERIMENTAL && n | 308 | depends on EXPERIMENTAL #&& n |
300 | help | 309 | help |
301 | This enables support for systems with more than one CPU. If you have | 310 | This enables support for systems with more than one CPU. If you have |
302 | a system with only one CPU, like most personal computers, say N. If | 311 | a system with only one CPU, like most personal computers, say N. If |
@@ -336,8 +345,7 @@ config PREEMPT | |||
336 | 345 | ||
337 | config DISCONTIGMEM | 346 | config DISCONTIGMEM |
338 | bool | 347 | bool |
339 | depends on ARCH_EDB7211 || ARCH_SA1100 || (ARCH_LH7A40X && !LH7A40X_CONTIGMEM) | 348 | default (ARCH_LH7A40X && !LH7A40X_CONTIGMEM) |
340 | default y | ||
341 | help | 349 | help |
342 | Say Y to support efficient handling of discontiguous physical memory, | 350 | Say Y to support efficient handling of discontiguous physical memory, |
343 | for architectures which are either NUMA (Non-Uniform Memory Access) | 351 | for architectures which are either NUMA (Non-Uniform Memory Access) |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index c0e7aff3dec2..7c7f475e213e 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -18,48 +18,30 @@ | |||
18 | * Please select one of the following when turning on debugging. | 18 | * Please select one of the following when turning on debugging. |
19 | */ | 19 | */ |
20 | #ifdef DEBUG | 20 | #ifdef DEBUG |
21 | #if defined(CONFIG_DEBUG_DC21285_PORT) | 21 | |
22 | .macro loadsp, rb | 22 | #include <asm/arch/debug-macro.S> |
23 | mov \rb, #0x42000000 | 23 | |
24 | .endm | 24 | #if defined(CONFIG_DEBUG_ICEDCC) |
25 | .macro writeb, rb | ||
26 | str \rb, [r3, #0x160] | ||
27 | .endm | ||
28 | #elif defined(CONFIG_DEBUG_ICEDCC) | ||
29 | .macro loadsp, rb | 25 | .macro loadsp, rb |
30 | .endm | 26 | .endm |
31 | .macro writeb, rb | 27 | .macro writeb, ch, rb |
32 | mcr p14, 0, \rb, c0, c1, 0 | 28 | mcr p14, 0, \ch, c0, c1, 0 |
33 | .endm | ||
34 | #elif defined(CONFIG_FOOTBRIDGE) | ||
35 | .macro loadsp, rb | ||
36 | mov \rb, #0x7c000000 | ||
37 | .endm | 29 | .endm |
38 | .macro writeb, rb | 30 | #else |
39 | strb \rb, [r3, #0x3f8] | 31 | .macro writeb, ch, rb |
32 | senduart \ch, \rb | ||
40 | .endm | 33 | .endm |
41 | #elif defined(CONFIG_ARCH_RPC) | 34 | |
35 | #if defined(CONFIG_FOOTBRIDGE) || \ | ||
36 | defined(CONFIG_ARCH_RPC) || \ | ||
37 | defined(CONFIG_ARCH_INTEGRATOR) || \ | ||
38 | defined(CONFIG_ARCH_PXA) || \ | ||
39 | defined(CONFIG_ARCH_IXP4XX) || \ | ||
40 | defined(CONFIG_ARCH_IXP2000) || \ | ||
41 | defined(CONFIG_ARCH_LH7A40X) || \ | ||
42 | defined(CONFIG_ARCH_OMAP) | ||
42 | .macro loadsp, rb | 43 | .macro loadsp, rb |
43 | mov \rb, #0x03000000 | 44 | addruart \rb |
44 | orr \rb, \rb, #0x00010000 | ||
45 | .endm | ||
46 | .macro writeb, rb | ||
47 | strb \rb, [r3, #0x3f8 << 2] | ||
48 | .endm | ||
49 | #elif defined(CONFIG_ARCH_INTEGRATOR) | ||
50 | .macro loadsp, rb | ||
51 | mov \rb, #0x16000000 | ||
52 | .endm | ||
53 | .macro writeb, rb | ||
54 | strb \rb, [r3, #0] | ||
55 | .endm | ||
56 | #elif defined(CONFIG_ARCH_PXA) /* Xscale-type */ | ||
57 | .macro loadsp, rb | ||
58 | mov \rb, #0x40000000 | ||
59 | orr \rb, \rb, #0x00100000 | ||
60 | .endm | ||
61 | .macro writeb, rb | ||
62 | strb \rb, [r3, #0] | ||
63 | .endm | 45 | .endm |
64 | #elif defined(CONFIG_ARCH_SA1100) | 46 | #elif defined(CONFIG_ARCH_SA1100) |
65 | .macro loadsp, rb | 47 | .macro loadsp, rb |
@@ -70,65 +52,22 @@ | |||
70 | add \rb, \rb, #0x00010000 @ Ser1 | 52 | add \rb, \rb, #0x00010000 @ Ser1 |
71 | # endif | 53 | # endif |
72 | .endm | 54 | .endm |
73 | .macro writeb, rb | ||
74 | str \rb, [r3, #0x14] @ UTDR | ||
75 | .endm | ||
76 | #elif defined(CONFIG_ARCH_IXP4XX) | ||
77 | .macro loadsp, rb | ||
78 | mov \rb, #0xc8000000 | ||
79 | .endm | ||
80 | .macro writeb, rb | ||
81 | str \rb, [r3, #0] | ||
82 | #elif defined(CONFIG_ARCH_IXP2000) | ||
83 | .macro loadsp, rb | ||
84 | mov \rb, #0xc0000000 | ||
85 | orr \rb, \rb, #0x00030000 | ||
86 | .endm | ||
87 | .macro writeb, rb | ||
88 | str \rb, [r3, #0] | ||
89 | .endm | ||
90 | #elif defined(CONFIG_ARCH_LH7A40X) | ||
91 | .macro loadsp, rb | ||
92 | ldr \rb, =0x80000700 @ UART2 UARTBASE | ||
93 | .endm | ||
94 | .macro writeb, rb | ||
95 | strb \rb, [r3, #0] | ||
96 | .endm | ||
97 | #elif defined(CONFIG_ARCH_OMAP) | ||
98 | .macro loadsp, rb | ||
99 | mov \rb, #0xff000000 @ physical base address | ||
100 | add \rb, \rb, #0x00fb0000 | ||
101 | #if defined(CONFIG_OMAP_LL_DEBUG_UART2) || defined(CONFIG_OMAP_LL_DEBUG_UART3) | ||
102 | add \rb, \rb, #0x00000800 | ||
103 | #endif | ||
104 | #ifdef CONFIG_OMAP_LL_DEBUG_UART3 | ||
105 | add \rb, \rb, #0x00009000 | ||
106 | #endif | ||
107 | .endm | ||
108 | .macro writeb, rb | ||
109 | strb \rb, [r3] | ||
110 | .endm | ||
111 | #elif defined(CONFIG_ARCH_IOP331) | 55 | #elif defined(CONFIG_ARCH_IOP331) |
112 | .macro loadsp, rb | 56 | .macro loadsp, rb |
113 | mov \rb, #0xff000000 | 57 | mov \rb, #0xff000000 |
114 | orr \rb, \rb, #0x00ff0000 | 58 | orr \rb, \rb, #0x00ff0000 |
115 | orr \rb, \rb, #0x0000f700 @ location of the UART | 59 | orr \rb, \rb, #0x0000f700 @ location of the UART |
116 | .endm | 60 | .endm |
117 | .macro writeb, rb | ||
118 | str \rb, [r3, #0] | ||
119 | .endm | ||
120 | #elif defined(CONFIG_ARCH_S3C2410) | 61 | #elif defined(CONFIG_ARCH_S3C2410) |
121 | .macro loadsp, rb | 62 | .macro loadsp, rb |
122 | mov \rb, #0x50000000 | 63 | mov \rb, #0x50000000 |
123 | add \rb, \rb, #0x4000 * CONFIG_S3C2410_LOWLEVEL_UART_PORT | 64 | add \rb, \rb, #0x4000 * CONFIG_S3C2410_LOWLEVEL_UART_PORT |
124 | .endm | 65 | .endm |
125 | .macro writeb, rb | ||
126 | strb \rb, [r3, #0x20] | ||
127 | .endm | ||
128 | #else | 66 | #else |
129 | #error no serial architecture defined | 67 | #error no serial architecture defined |
130 | #endif | 68 | #endif |
131 | #endif | 69 | #endif |
70 | #endif | ||
132 | 71 | ||
133 | .macro kputc,val | 72 | .macro kputc,val |
134 | mov r0, \val | 73 | mov r0, \val |
@@ -734,7 +673,7 @@ puts: loadsp r3 | |||
734 | 1: ldrb r2, [r0], #1 | 673 | 1: ldrb r2, [r0], #1 |
735 | teq r2, #0 | 674 | teq r2, #0 |
736 | moveq pc, lr | 675 | moveq pc, lr |
737 | 2: writeb r2 | 676 | 2: writeb r2, r3 |
738 | mov r1, #0x00020000 | 677 | mov r1, #0x00020000 |
739 | 3: subs r1, r1, #1 | 678 | 3: subs r1, r1, #1 |
740 | bne 3b | 679 | bne 3b |
diff --git a/arch/arm/common/rtctime.c b/arch/arm/common/rtctime.c index c397e71f938d..72b03f201eb9 100644 --- a/arch/arm/common/rtctime.c +++ b/arch/arm/common/rtctime.c | |||
@@ -141,10 +141,10 @@ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc | |||
141 | next->tm_sec = alrm->tm_sec; | 141 | next->tm_sec = alrm->tm_sec; |
142 | } | 142 | } |
143 | 143 | ||
144 | static inline void rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) | 144 | static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) |
145 | { | 145 | { |
146 | memset(tm, 0, sizeof(struct rtc_time)); | 146 | memset(tm, 0, sizeof(struct rtc_time)); |
147 | ops->read_time(tm); | 147 | return ops->read_time(tm); |
148 | } | 148 | } |
149 | 149 | ||
150 | static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm) | 150 | static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm) |
@@ -163,8 +163,7 @@ static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) | |||
163 | int ret = -EINVAL; | 163 | int ret = -EINVAL; |
164 | if (ops->read_alarm) { | 164 | if (ops->read_alarm) { |
165 | memset(alrm, 0, sizeof(struct rtc_wkalrm)); | 165 | memset(alrm, 0, sizeof(struct rtc_wkalrm)); |
166 | ops->read_alarm(alrm); | 166 | ret = ops->read_alarm(alrm); |
167 | ret = 0; | ||
168 | } | 167 | } |
169 | return ret; | 168 | return ret; |
170 | } | 169 | } |
@@ -283,7 +282,9 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | |||
283 | break; | 282 | break; |
284 | 283 | ||
285 | case RTC_RD_TIME: | 284 | case RTC_RD_TIME: |
286 | rtc_read_time(ops, &tm); | 285 | ret = rtc_read_time(ops, &tm); |
286 | if (ret) | ||
287 | break; | ||
287 | ret = copy_to_user(uarg, &tm, sizeof(tm)); | 288 | ret = copy_to_user(uarg, &tm, sizeof(tm)); |
288 | if (ret) | 289 | if (ret) |
289 | ret = -EFAULT; | 290 | ret = -EFAULT; |
@@ -424,15 +425,15 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo | |||
424 | struct rtc_time tm; | 425 | struct rtc_time tm; |
425 | char *p = page; | 426 | char *p = page; |
426 | 427 | ||
427 | rtc_read_time(ops, &tm); | 428 | if (rtc_read_time(ops, &tm) == 0) { |
428 | 429 | p += sprintf(p, | |
429 | p += sprintf(p, | 430 | "rtc_time\t: %02d:%02d:%02d\n" |
430 | "rtc_time\t: %02d:%02d:%02d\n" | 431 | "rtc_date\t: %04d-%02d-%02d\n" |
431 | "rtc_date\t: %04d-%02d-%02d\n" | 432 | "rtc_epoch\t: %04lu\n", |
432 | "rtc_epoch\t: %04lu\n", | 433 | tm.tm_hour, tm.tm_min, tm.tm_sec, |
433 | tm.tm_hour, tm.tm_min, tm.tm_sec, | 434 | tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, |
434 | tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, | 435 | rtc_epoch); |
435 | rtc_epoch); | 436 | } |
436 | 437 | ||
437 | if (rtc_read_alarm(ops, &alrm) == 0) { | 438 | if (rtc_read_alarm(ops, &alrm) == 0) { |
438 | p += sprintf(p, "alrm_time\t: "); | 439 | p += sprintf(p, "alrm_time\t: "); |
diff --git a/arch/arm/configs/ixdp2800_defconfig b/arch/arm/configs/ixdp2800_defconfig index d36f99192962..7be3521f91fc 100644 --- a/arch/arm/configs/ixdp2800_defconfig +++ b/arch/arm/configs/ixdp2800_defconfig | |||
@@ -133,7 +133,7 @@ CONFIG_ALIGNMENT_TRAP=y | |||
133 | # | 133 | # |
134 | CONFIG_ZBOOT_ROM_TEXT=0x0 | 134 | CONFIG_ZBOOT_ROM_TEXT=0x0 |
135 | CONFIG_ZBOOT_ROM_BSS=0x0 | 135 | CONFIG_ZBOOT_ROM_BSS=0x0 |
136 | CONFIG_CMDLINE="console=ttyS0,9600 root=/dev/nfs ip=bootp mem=64M@0x0 pci=firmware" | 136 | CONFIG_CMDLINE="console=ttyS0,9600 root=/dev/nfs ip=bootp mem=64M@0x0" |
137 | # CONFIG_XIP_KERNEL is not set | 137 | # CONFIG_XIP_KERNEL is not set |
138 | 138 | ||
139 | # | 139 | # |
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 07a56ff61494..4a2af55e134b 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -31,8 +31,3 @@ head-y := head.o | |||
31 | obj-$(CONFIG_DEBUG_LL) += debug.o | 31 | obj-$(CONFIG_DEBUG_LL) += debug.o |
32 | 32 | ||
33 | extra-y := $(head-y) init_task.o vmlinux.lds | 33 | extra-y := $(head-y) init_task.o vmlinux.lds |
34 | |||
35 | # Spell out some dependencies that aren't automatically figured out | ||
36 | $(obj)/entry-armv.o: $(obj)/entry-header.S include/asm-arm/constants.h | ||
37 | $(obj)/entry-common.o: $(obj)/entry-header.S include/asm-arm/constants.h \ | ||
38 | $(obj)/calls.S | ||
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 99d43259ff89..c1ff4d1f1bfd 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -64,6 +64,26 @@ int main(void) | |||
64 | DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); | 64 | DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); |
65 | DEFINE(TI_IWMMXT_STATE, (offsetof(struct thread_info, fpstate)+4)&~7); | 65 | DEFINE(TI_IWMMXT_STATE, (offsetof(struct thread_info, fpstate)+4)&~7); |
66 | BLANK(); | 66 | BLANK(); |
67 | DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0)); | ||
68 | DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1)); | ||
69 | DEFINE(S_R2, offsetof(struct pt_regs, ARM_r2)); | ||
70 | DEFINE(S_R3, offsetof(struct pt_regs, ARM_r3)); | ||
71 | DEFINE(S_R4, offsetof(struct pt_regs, ARM_r4)); | ||
72 | DEFINE(S_R5, offsetof(struct pt_regs, ARM_r5)); | ||
73 | DEFINE(S_R6, offsetof(struct pt_regs, ARM_r6)); | ||
74 | DEFINE(S_R7, offsetof(struct pt_regs, ARM_r7)); | ||
75 | DEFINE(S_R8, offsetof(struct pt_regs, ARM_r8)); | ||
76 | DEFINE(S_R9, offsetof(struct pt_regs, ARM_r9)); | ||
77 | DEFINE(S_R10, offsetof(struct pt_regs, ARM_r10)); | ||
78 | DEFINE(S_FP, offsetof(struct pt_regs, ARM_fp)); | ||
79 | DEFINE(S_IP, offsetof(struct pt_regs, ARM_ip)); | ||
80 | DEFINE(S_SP, offsetof(struct pt_regs, ARM_sp)); | ||
81 | DEFINE(S_LR, offsetof(struct pt_regs, ARM_lr)); | ||
82 | DEFINE(S_PC, offsetof(struct pt_regs, ARM_pc)); | ||
83 | DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr)); | ||
84 | DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0)); | ||
85 | DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); | ||
86 | BLANK(); | ||
67 | #if __LINUX_ARM_ARCH__ >= 6 | 87 | #if __LINUX_ARM_ARCH__ >= 6 |
68 | DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); | 88 | DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); |
69 | BLANK(); | 89 | BLANK(); |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index bb27c317d94b..4eb36155dc93 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -14,12 +14,12 @@ | |||
14 | * it to save wrong values... Be aware! | 14 | * it to save wrong values... Be aware! |
15 | */ | 15 | */ |
16 | #include <linux/config.h> | 16 | #include <linux/config.h> |
17 | #include <linux/init.h> | ||
18 | 17 | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/glue.h> | 18 | #include <asm/glue.h> |
21 | #include <asm/ptrace.h> | ||
22 | #include <asm/vfpmacros.h> | 19 | #include <asm/vfpmacros.h> |
20 | #include <asm/hardware.h> /* should be moved into entry-macro.S */ | ||
21 | #include <asm/arch/irqs.h> /* should be moved into entry-macro.S */ | ||
22 | #include <asm/arch/entry-macro.S> | ||
23 | 23 | ||
24 | #include "entry-header.S" | 24 | #include "entry-header.S" |
25 | 25 | ||
@@ -118,7 +118,7 @@ __dabt_svc: | |||
118 | @ | 118 | @ |
119 | @ IRQs off again before pulling preserved data off the stack | 119 | @ IRQs off again before pulling preserved data off the stack |
120 | @ | 120 | @ |
121 | disable_irq r0 | 121 | disable_irq |
122 | 122 | ||
123 | @ | 123 | @ |
124 | @ restore SPSR and restart the instruction | 124 | @ restore SPSR and restart the instruction |
@@ -198,7 +198,7 @@ __und_svc: | |||
198 | @ | 198 | @ |
199 | @ IRQs off again before pulling preserved data off the stack | 199 | @ IRQs off again before pulling preserved data off the stack |
200 | @ | 200 | @ |
201 | 1: disable_irq r0 | 201 | 1: disable_irq |
202 | 202 | ||
203 | @ | 203 | @ |
204 | @ restore SPSR and restart the instruction | 204 | @ restore SPSR and restart the instruction |
@@ -232,7 +232,7 @@ __pabt_svc: | |||
232 | @ | 232 | @ |
233 | @ IRQs off again before pulling preserved data off the stack | 233 | @ IRQs off again before pulling preserved data off the stack |
234 | @ | 234 | @ |
235 | disable_irq r0 | 235 | disable_irq |
236 | 236 | ||
237 | @ | 237 | @ |
238 | @ restore SPSR and restart the instruction | 238 | @ restore SPSR and restart the instruction |
@@ -269,6 +269,12 @@ __pabt_svc: | |||
269 | add r5, sp, #S_PC | 269 | add r5, sp, #S_PC |
270 | ldmia r7, {r2 - r4} @ Get USR pc, cpsr | 270 | ldmia r7, {r2 - r4} @ Get USR pc, cpsr |
271 | 271 | ||
272 | #if __LINUX_ARM_ARCH__ < 6 | ||
273 | @ make sure our user space atomic helper is aborted | ||
274 | cmp r2, #VIRT_OFFSET | ||
275 | bichs r3, r3, #PSR_Z_BIT | ||
276 | #endif | ||
277 | |||
272 | @ | 278 | @ |
273 | @ We are now ready to fill in the remaining blanks on the stack: | 279 | @ We are now ready to fill in the remaining blanks on the stack: |
274 | @ | 280 | @ |
@@ -316,7 +322,7 @@ __dabt_usr: | |||
316 | @ | 322 | @ |
317 | @ IRQs on, then call the main handler | 323 | @ IRQs on, then call the main handler |
318 | @ | 324 | @ |
319 | enable_irq r2 | 325 | enable_irq |
320 | mov r2, sp | 326 | mov r2, sp |
321 | adr lr, ret_from_exception | 327 | adr lr, ret_from_exception |
322 | b do_DataAbort | 328 | b do_DataAbort |
@@ -418,7 +424,7 @@ call_fpe: | |||
418 | movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) | 424 | movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) |
419 | bcs iwmmxt_task_enable | 425 | bcs iwmmxt_task_enable |
420 | #endif | 426 | #endif |
421 | enable_irq r7 | 427 | enable_irq |
422 | add pc, pc, r8, lsr #6 | 428 | add pc, pc, r8, lsr #6 |
423 | mov r0, r0 | 429 | mov r0, r0 |
424 | 430 | ||
@@ -472,7 +478,7 @@ fpundefinstr: | |||
472 | __pabt_usr: | 478 | __pabt_usr: |
473 | usr_entry abt | 479 | usr_entry abt |
474 | 480 | ||
475 | enable_irq r0 @ Enable interrupts | 481 | enable_irq @ Enable interrupts |
476 | mov r0, r2 @ address (pc) | 482 | mov r0, r2 @ address (pc) |
477 | mov r1, sp @ regs | 483 | mov r1, sp @ regs |
478 | bl do_PrefetchAbort @ call abort handler | 484 | bl do_PrefetchAbort @ call abort handler |
@@ -499,8 +505,12 @@ ENTRY(__switch_to) | |||
499 | mra r4, r5, acc0 | 505 | mra r4, r5, acc0 |
500 | stmia ip, {r4, r5} | 506 | stmia ip, {r4, r5} |
501 | #endif | 507 | #endif |
508 | #if defined(CONFIG_HAS_TLS_REG) | ||
509 | mcr p15, 0, r3, c13, c0, 3 @ set TLS register | ||
510 | #elif !defined(CONFIG_TLS_REG_EMUL) | ||
502 | mov r4, #0xffff0fff | 511 | mov r4, #0xffff0fff |
503 | str r3, [r4, #-3] @ Set TLS ptr | 512 | str r3, [r4, #-15] @ TLS val at 0xffff0ff0 |
513 | #endif | ||
504 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register | 514 | mcr p15, 0, r6, c3, c0, 0 @ Set domain register |
505 | #ifdef CONFIG_VFP | 515 | #ifdef CONFIG_VFP |
506 | @ Always disable VFP so we can lazily save/restore the old | 516 | @ Always disable VFP so we can lazily save/restore the old |
@@ -519,11 +529,209 @@ ENTRY(__switch_to) | |||
519 | ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously | 529 | ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously |
520 | 530 | ||
521 | __INIT | 531 | __INIT |
532 | |||
533 | /* | ||
534 | * User helpers. | ||
535 | * | ||
536 | * These are segment of kernel provided user code reachable from user space | ||
537 | * at a fixed address in kernel memory. This is used to provide user space | ||
538 | * with some operations which require kernel help because of unimplemented | ||
539 | * native feature and/or instructions in many ARM CPUs. The idea is for | ||
540 | * this code to be executed directly in user mode for best efficiency but | ||
541 | * which is too intimate with the kernel counter part to be left to user | ||
542 | * libraries. In fact this code might even differ from one CPU to another | ||
543 | * depending on the available instruction set and restrictions like on | ||
544 | * SMP systems. In other words, the kernel reserves the right to change | ||
545 | * this code as needed without warning. Only the entry points and their | ||
546 | * results are guaranteed to be stable. | ||
547 | * | ||
548 | * Each segment is 32-byte aligned and will be moved to the top of the high | ||
549 | * vector page. New segments (if ever needed) must be added in front of | ||
550 | * existing ones. This mechanism should be used only for things that are | ||
551 | * really small and justified, and not be abused freely. | ||
552 | * | ||
553 | * User space is expected to implement those things inline when optimizing | ||
554 | * for a processor that has the necessary native support, but only if such | ||
555 | * resulting binaries are already to be incompatible with earlier ARM | ||
556 | * processors due to the use of unsupported instructions other than what | ||
557 | * is provided here. In other words don't make binaries unable to run on | ||
558 | * earlier processors just for the sake of not using these kernel helpers | ||
559 | * if your compiled code is not going to use the new instructions for other | ||
560 | * purpose. | ||
561 | */ | ||
562 | |||
563 | .align 5 | ||
564 | .globl __kuser_helper_start | ||
565 | __kuser_helper_start: | ||
566 | |||
567 | /* | ||
568 | * Reference prototype: | ||
569 | * | ||
570 | * int __kernel_cmpxchg(int oldval, int newval, int *ptr) | ||
571 | * | ||
572 | * Input: | ||
573 | * | ||
574 | * r0 = oldval | ||
575 | * r1 = newval | ||
576 | * r2 = ptr | ||
577 | * lr = return address | ||
578 | * | ||
579 | * Output: | ||
580 | * | ||
581 | * r0 = returned value (zero or non-zero) | ||
582 | * C flag = set if r0 == 0, clear if r0 != 0 | ||
583 | * | ||
584 | * Clobbered: | ||
585 | * | ||
586 | * r3, ip, flags | ||
587 | * | ||
588 | * Definition and user space usage example: | ||
589 | * | ||
590 | * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); | ||
591 | * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) | ||
592 | * | ||
593 | * Atomically store newval in *ptr if *ptr is equal to oldval for user space. | ||
594 | * Return zero if *ptr was changed or non-zero if no exchange happened. | ||
595 | * The C flag is also set if *ptr was changed to allow for assembly | ||
596 | * optimization in the calling code. | ||
597 | * | ||
598 | * For example, a user space atomic_add implementation could look like this: | ||
599 | * | ||
600 | * #define atomic_add(ptr, val) \ | ||
601 | * ({ register unsigned int *__ptr asm("r2") = (ptr); \ | ||
602 | * register unsigned int __result asm("r1"); \ | ||
603 | * asm volatile ( \ | ||
604 | * "1: @ atomic_add\n\t" \ | ||
605 | * "ldr r0, [r2]\n\t" \ | ||
606 | * "mov r3, #0xffff0fff\n\t" \ | ||
607 | * "add lr, pc, #4\n\t" \ | ||
608 | * "add r1, r0, %2\n\t" \ | ||
609 | * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ | ||
610 | * "bcc 1b" \ | ||
611 | * : "=&r" (__result) \ | ||
612 | * : "r" (__ptr), "rIL" (val) \ | ||
613 | * : "r0","r3","ip","lr","cc","memory" ); \ | ||
614 | * __result; }) | ||
615 | */ | ||
616 | |||
617 | __kuser_cmpxchg: @ 0xffff0fc0 | ||
618 | |||
619 | #if __LINUX_ARM_ARCH__ < 6 | ||
620 | |||
621 | #ifdef CONFIG_SMP /* sanity check */ | ||
622 | #error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" | ||
623 | #endif | ||
624 | |||
625 | /* | ||
626 | * Theory of operation: | ||
627 | * | ||
628 | * We set the Z flag before loading oldval. If ever an exception | ||
629 | * occurs we can not be sure the loaded value will still be the same | ||
630 | * when the exception returns, therefore the user exception handler | ||
631 | * will clear the Z flag whenever the interrupted user code was | ||
632 | * actually from the kernel address space (see the usr_entry macro). | ||
633 | * | ||
634 | * The post-increment on the str is used to prevent a race with an | ||
635 | * exception happening just after the str instruction which would | ||
636 | * clear the Z flag although the exchange was done. | ||
637 | */ | ||
638 | teq ip, ip @ set Z flag | ||
639 | ldr ip, [r2] @ load current val | ||
640 | add r3, r2, #1 @ prepare store ptr | ||
641 | teqeq ip, r0 @ compare with oldval if still allowed | ||
642 | streq r1, [r3, #-1]! @ store newval if still allowed | ||
643 | subs r0, r2, r3 @ if r2 == r3 the str occured | ||
644 | mov pc, lr | ||
645 | |||
646 | #else | ||
647 | |||
648 | ldrex r3, [r2] | ||
649 | subs r3, r3, r0 | ||
650 | strexeq r3, r1, [r2] | ||
651 | rsbs r0, r3, #0 | ||
652 | mov pc, lr | ||
653 | |||
654 | #endif | ||
655 | |||
656 | .align 5 | ||
657 | |||
658 | /* | ||
659 | * Reference prototype: | ||
660 | * | ||
661 | * int __kernel_get_tls(void) | ||
662 | * | ||
663 | * Input: | ||
664 | * | ||
665 | * lr = return address | ||
666 | * | ||
667 | * Output: | ||
668 | * | ||
669 | * r0 = TLS value | ||
670 | * | ||
671 | * Clobbered: | ||
672 | * | ||
673 | * the Z flag might be lost | ||
674 | * | ||
675 | * Definition and user space usage example: | ||
676 | * | ||
677 | * typedef int (__kernel_get_tls_t)(void); | ||
678 | * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) | ||
679 | * | ||
680 | * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. | ||
681 | * | ||
682 | * This could be used as follows: | ||
683 | * | ||
684 | * #define __kernel_get_tls() \ | ||
685 | * ({ register unsigned int __val asm("r0"); \ | ||
686 | * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ | ||
687 | * : "=r" (__val) : : "lr","cc" ); \ | ||
688 | * __val; }) | ||
689 | */ | ||
690 | |||
691 | __kuser_get_tls: @ 0xffff0fe0 | ||
692 | |||
693 | #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL) | ||
694 | |||
695 | ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 | ||
696 | mov pc, lr | ||
697 | |||
698 | #else | ||
699 | |||
700 | mrc p15, 0, r0, c13, c0, 3 @ read TLS register | ||
701 | mov pc, lr | ||
702 | |||
703 | #endif | ||
704 | |||
705 | .rep 5 | ||
706 | .word 0 @ pad up to __kuser_helper_version | ||
707 | .endr | ||
708 | |||
709 | /* | ||
710 | * Reference declaration: | ||
711 | * | ||
712 | * extern unsigned int __kernel_helper_version; | ||
713 | * | ||
714 | * Definition and user space usage example: | ||
715 | * | ||
716 | * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) | ||
717 | * | ||
718 | * User space may read this to determine the curent number of helpers | ||
719 | * available. | ||
720 | */ | ||
721 | |||
722 | __kuser_helper_version: @ 0xffff0ffc | ||
723 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | ||
724 | |||
725 | .globl __kuser_helper_end | ||
726 | __kuser_helper_end: | ||
727 | |||
728 | |||
522 | /* | 729 | /* |
523 | * Vector stubs. | 730 | * Vector stubs. |
524 | * | 731 | * |
525 | * This code is copied to 0x200 or 0xffff0200 so we can use branches in the | 732 | * This code is copied to 0xffff0200 so we can use branches in the |
526 | * vectors, rather than ldr's. | 733 | * vectors, rather than ldr's. Note that this code must not |
734 | * exceed 0x300 bytes. | ||
527 | * | 735 | * |
528 | * Common stub entry macro: | 736 | * Common stub entry macro: |
529 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC | 737 | * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC |
@@ -544,7 +752,7 @@ vector_\name: | |||
544 | @ | 752 | @ |
545 | mrs r13, cpsr | 753 | mrs r13, cpsr |
546 | bic r13, r13, #MODE_MASK | 754 | bic r13, r13, #MODE_MASK |
547 | orr r13, r13, #MODE_SVC | 755 | orr r13, r13, #SVC_MODE |
548 | msr spsr_cxsf, r13 @ switch to SVC_32 mode | 756 | msr spsr_cxsf, r13 @ switch to SVC_32 mode |
549 | 757 | ||
550 | and lr, lr, #15 | 758 | and lr, lr, #15 |
@@ -552,6 +760,7 @@ vector_\name: | |||
552 | movs pc, lr @ Changes mode and branches | 760 | movs pc, lr @ Changes mode and branches |
553 | .endm | 761 | .endm |
554 | 762 | ||
763 | .globl __stubs_start | ||
555 | __stubs_start: | 764 | __stubs_start: |
556 | /* | 765 | /* |
557 | * Interrupt dispatcher | 766 | * Interrupt dispatcher |
@@ -686,37 +895,24 @@ vector_addrexcptn: | |||
686 | .LCsabt: | 895 | .LCsabt: |
687 | .word __temp_abt | 896 | .word __temp_abt |
688 | 897 | ||
898 | .globl __stubs_end | ||
689 | __stubs_end: | 899 | __stubs_end: |
690 | 900 | ||
691 | .equ __real_stubs_start, .LCvectors + 0x200 | 901 | .equ stubs_offset, __vectors_start + 0x200 - __stubs_start |
692 | 902 | ||
693 | .LCvectors: | 903 | .globl __vectors_start |
904 | __vectors_start: | ||
694 | swi SYS_ERROR0 | 905 | swi SYS_ERROR0 |
695 | b __real_stubs_start + (vector_und - __stubs_start) | 906 | b vector_und + stubs_offset |
696 | ldr pc, __real_stubs_start + (.LCvswi - __stubs_start) | 907 | ldr pc, .LCvswi + stubs_offset |
697 | b __real_stubs_start + (vector_pabt - __stubs_start) | 908 | b vector_pabt + stubs_offset |
698 | b __real_stubs_start + (vector_dabt - __stubs_start) | 909 | b vector_dabt + stubs_offset |
699 | b __real_stubs_start + (vector_addrexcptn - __stubs_start) | 910 | b vector_addrexcptn + stubs_offset |
700 | b __real_stubs_start + (vector_irq - __stubs_start) | 911 | b vector_irq + stubs_offset |
701 | b __real_stubs_start + (vector_fiq - __stubs_start) | 912 | b vector_fiq + stubs_offset |
702 | 913 | ||
703 | ENTRY(__trap_init) | 914 | .globl __vectors_end |
704 | stmfd sp!, {r4 - r6, lr} | 915 | __vectors_end: |
705 | |||
706 | mov r0, #0xff000000 | ||
707 | orr r0, r0, #0x00ff0000 @ high vectors position | ||
708 | adr r1, .LCvectors @ set up the vectors | ||
709 | ldmia r1, {r1, r2, r3, r4, r5, r6, ip, lr} | ||
710 | stmia r0, {r1, r2, r3, r4, r5, r6, ip, lr} | ||
711 | |||
712 | add r2, r0, #0x200 | ||
713 | adr r0, __stubs_start @ copy stubs to 0x200 | ||
714 | adr r1, __stubs_end | ||
715 | 1: ldr r3, [r0], #4 | ||
716 | str r3, [r2], #4 | ||
717 | cmp r0, r1 | ||
718 | blt 1b | ||
719 | LOADREGS(fd, sp!, {r4 - r6, pc}) | ||
720 | 916 | ||
721 | .data | 917 | .data |
722 | 918 | ||
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 53a7e0dea44d..3f8d0e3aefab 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -9,19 +9,10 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/config.h> | 10 | #include <linux/config.h> |
11 | 11 | ||
12 | #include <asm/thread_info.h> | ||
13 | #include <asm/ptrace.h> | ||
14 | #include <asm/unistd.h> | 12 | #include <asm/unistd.h> |
15 | 13 | ||
16 | #include "entry-header.S" | 14 | #include "entry-header.S" |
17 | 15 | ||
18 | /* | ||
19 | * We rely on the fact that R0 is at the bottom of the stack (due to | ||
20 | * slow/fast restore user regs). | ||
21 | */ | ||
22 | #if S_R0 != 0 | ||
23 | #error "Please fix" | ||
24 | #endif | ||
25 | 16 | ||
26 | .align 5 | 17 | .align 5 |
27 | /* | 18 | /* |
@@ -30,11 +21,19 @@ | |||
30 | * stack. | 21 | * stack. |
31 | */ | 22 | */ |
32 | ret_fast_syscall: | 23 | ret_fast_syscall: |
33 | disable_irq r1 @ disable interrupts | 24 | disable_irq @ disable interrupts |
34 | ldr r1, [tsk, #TI_FLAGS] | 25 | ldr r1, [tsk, #TI_FLAGS] |
35 | tst r1, #_TIF_WORK_MASK | 26 | tst r1, #_TIF_WORK_MASK |
36 | bne fast_work_pending | 27 | bne fast_work_pending |
37 | fast_restore_user_regs | 28 | |
29 | @ fast_restore_user_regs | ||
30 | ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr | ||
31 | ldr lr, [sp, #S_OFF + S_PC]! @ get pc | ||
32 | msr spsr_cxsf, r1 @ save in spsr_svc | ||
33 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr | ||
34 | mov r0, r0 | ||
35 | add sp, sp, #S_FRAME_SIZE - S_PC | ||
36 | movs pc, lr @ return & move spsr_svc into cpsr | ||
38 | 37 | ||
39 | /* | 38 | /* |
40 | * Ok, we need to do extra processing, enter the slow path. | 39 | * Ok, we need to do extra processing, enter the slow path. |
@@ -49,7 +48,7 @@ work_pending: | |||
49 | mov r0, sp @ 'regs' | 48 | mov r0, sp @ 'regs' |
50 | mov r2, why @ 'syscall' | 49 | mov r2, why @ 'syscall' |
51 | bl do_notify_resume | 50 | bl do_notify_resume |
52 | disable_irq r1 @ disable interrupts | 51 | disable_irq @ disable interrupts |
53 | b no_work_pending | 52 | b no_work_pending |
54 | 53 | ||
55 | work_resched: | 54 | work_resched: |
@@ -59,12 +58,19 @@ work_resched: | |||
59 | */ | 58 | */ |
60 | ENTRY(ret_to_user) | 59 | ENTRY(ret_to_user) |
61 | ret_slow_syscall: | 60 | ret_slow_syscall: |
62 | disable_irq r1 @ disable interrupts | 61 | disable_irq @ disable interrupts |
63 | ldr r1, [tsk, #TI_FLAGS] | 62 | ldr r1, [tsk, #TI_FLAGS] |
64 | tst r1, #_TIF_WORK_MASK | 63 | tst r1, #_TIF_WORK_MASK |
65 | bne work_pending | 64 | bne work_pending |
66 | no_work_pending: | 65 | no_work_pending: |
67 | slow_restore_user_regs | 66 | @ slow_restore_user_regs |
67 | ldr r1, [sp, #S_PSR] @ get calling cpsr | ||
68 | ldr lr, [sp, #S_PC]! @ get pc | ||
69 | msr spsr_cxsf, r1 @ save in spsr_svc | ||
70 | ldmdb sp, {r0 - lr}^ @ get calling r1 - lr | ||
71 | mov r0, r0 | ||
72 | add sp, sp, #S_FRAME_SIZE - S_PC | ||
73 | movs pc, lr @ return & move spsr_svc into cpsr | ||
68 | 74 | ||
69 | /* | 75 | /* |
70 | * This is how we return from a fork. | 76 | * This is how we return from a fork. |
@@ -116,9 +122,26 @@ ENTRY(ret_from_fork) | |||
116 | 122 | ||
117 | .align 5 | 123 | .align 5 |
118 | ENTRY(vector_swi) | 124 | ENTRY(vector_swi) |
119 | save_user_regs | 125 | sub sp, sp, #S_FRAME_SIZE |
126 | stmia sp, {r0 - r12} @ Calling r0 - r12 | ||
127 | add r8, sp, #S_PC | ||
128 | stmdb r8, {sp, lr}^ @ Calling sp, lr | ||
129 | mrs r8, spsr @ called from non-FIQ mode, so ok. | ||
130 | str lr, [sp, #S_PC] @ Save calling PC | ||
131 | str r8, [sp, #S_PSR] @ Save CPSR | ||
132 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | ||
120 | zero_fp | 133 | zero_fp |
121 | get_scno | 134 | |
135 | /* | ||
136 | * Get the system call number. | ||
137 | */ | ||
138 | #ifdef CONFIG_ARM_THUMB | ||
139 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs | ||
140 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in | ||
141 | ldreq scno, [lr, #-4] | ||
142 | #else | ||
143 | ldr scno, [lr, #-4] @ get SWI instruction | ||
144 | #endif | ||
122 | arm710_bug_check scno, ip | 145 | arm710_bug_check scno, ip |
123 | 146 | ||
124 | #ifdef CONFIG_ALIGNMENT_TRAP | 147 | #ifdef CONFIG_ALIGNMENT_TRAP |
@@ -126,14 +149,14 @@ ENTRY(vector_swi) | |||
126 | ldr ip, [ip] | 149 | ldr ip, [ip] |
127 | mcr p15, 0, ip, c1, c0 @ update control register | 150 | mcr p15, 0, ip, c1, c0 @ update control register |
128 | #endif | 151 | #endif |
129 | enable_irq ip | 152 | enable_irq |
130 | 153 | ||
131 | str r4, [sp, #-S_OFF]! @ push fifth arg | 154 | str r4, [sp, #-S_OFF]! @ push fifth arg |
132 | 155 | ||
133 | get_thread_info tsk | 156 | get_thread_info tsk |
134 | ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing | 157 | ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing |
135 | bic scno, scno, #0xff000000 @ mask off SWI op-code | 158 | bic scno, scno, #0xff000000 @ mask off SWI op-code |
136 | eor scno, scno, #OS_NUMBER << 20 @ check OS number | 159 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
137 | adr tbl, sys_call_table @ load syscall table pointer | 160 | adr tbl, sys_call_table @ load syscall table pointer |
138 | tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? | 161 | tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? |
139 | bne __sys_trace | 162 | bne __sys_trace |
@@ -144,8 +167,8 @@ ENTRY(vector_swi) | |||
144 | 167 | ||
145 | add r1, sp, #S_OFF | 168 | add r1, sp, #S_OFF |
146 | 2: mov why, #0 @ no longer a real syscall | 169 | 2: mov why, #0 @ no longer a real syscall |
147 | cmp scno, #ARMSWI_OFFSET | 170 | cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) |
148 | eor r0, scno, #OS_NUMBER << 20 @ put OS number back | 171 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back |
149 | bcs arm_syscall | 172 | bcs arm_syscall |
150 | b sys_ni_syscall @ not private func | 173 | b sys_ni_syscall @ not private func |
151 | 174 | ||
@@ -190,7 +213,7 @@ ENTRY(sys_call_table) | |||
190 | @ r5 = syscall table | 213 | @ r5 = syscall table |
191 | .type sys_syscall, #function | 214 | .type sys_syscall, #function |
192 | sys_syscall: | 215 | sys_syscall: |
193 | eor scno, r0, #OS_NUMBER << 20 | 216 | eor scno, r0, #__NR_SYSCALL_BASE |
194 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE | 217 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE |
195 | cmpne scno, #NR_syscalls @ check range | 218 | cmpne scno, #NR_syscalls @ check range |
196 | stmloia sp, {r5, r6} @ shuffle args | 219 | stmloia sp, {r5, r6} @ shuffle args |
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 4039d8c120b5..a3d40a0e2b04 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -1,24 +1,11 @@ | |||
1 | #include <linux/config.h> /* for CONFIG_ARCH_xxxx */ | 1 | #include <linux/config.h> |
2 | #include <linux/init.h> | ||
2 | #include <linux/linkage.h> | 3 | #include <linux/linkage.h> |
3 | 4 | ||
4 | #include <asm/assembler.h> | 5 | #include <asm/assembler.h> |
5 | #include <asm/constants.h> | 6 | #include <asm/constants.h> |
6 | #include <asm/errno.h> | 7 | #include <asm/errno.h> |
7 | #include <asm/hardware.h> | 8 | #include <asm/thread_info.h> |
8 | #include <asm/arch/irqs.h> | ||
9 | #include <asm/arch/entry-macro.S> | ||
10 | |||
11 | #ifndef MODE_SVC | ||
12 | #define MODE_SVC 0x13 | ||
13 | #endif | ||
14 | |||
15 | .macro zero_fp | ||
16 | #ifdef CONFIG_FRAME_POINTER | ||
17 | mov fp, #0 | ||
18 | #endif | ||
19 | .endm | ||
20 | |||
21 | .text | ||
22 | 9 | ||
23 | @ Bad Abort numbers | 10 | @ Bad Abort numbers |
24 | @ ----------------- | 11 | @ ----------------- |
@@ -29,113 +16,44 @@ | |||
29 | #define BAD_IRQ 3 | 16 | #define BAD_IRQ 3 |
30 | #define BAD_UNDEFINSTR 4 | 17 | #define BAD_UNDEFINSTR 4 |
31 | 18 | ||
32 | #define PT_TRACESYS 0x00000002 | ||
33 | |||
34 | @ OS version number used in SWIs | ||
35 | @ RISC OS is 0 | ||
36 | @ RISC iX is 8 | ||
37 | @ | 19 | @ |
38 | #define OS_NUMBER 9 | 20 | @ Most of the stack format comes from struct pt_regs, but with |
39 | #define ARMSWI_OFFSET 0x000f0000 | 21 | @ the addition of 8 bytes for storing syscall args 5 and 6. |
40 | |||
41 | @ | 22 | @ |
42 | @ Stack format (ensured by USER_* and SVC_*) | ||
43 | @ | ||
44 | #define S_FRAME_SIZE 72 | ||
45 | #define S_OLD_R0 68 | ||
46 | #define S_PSR 64 | ||
47 | |||
48 | #define S_PC 60 | ||
49 | #define S_LR 56 | ||
50 | #define S_SP 52 | ||
51 | #define S_IP 48 | ||
52 | #define S_FP 44 | ||
53 | #define S_R10 40 | ||
54 | #define S_R9 36 | ||
55 | #define S_R8 32 | ||
56 | #define S_R7 28 | ||
57 | #define S_R6 24 | ||
58 | #define S_R5 20 | ||
59 | #define S_R4 16 | ||
60 | #define S_R3 12 | ||
61 | #define S_R2 8 | ||
62 | #define S_R1 4 | ||
63 | #define S_R0 0 | ||
64 | #define S_OFF 8 | 23 | #define S_OFF 8 |
65 | 24 | ||
66 | .macro set_cpsr_c, reg, mode | 25 | /* |
67 | msr cpsr_c, \mode | 26 | * The SWI code relies on the fact that R0 is at the bottom of the stack |
27 | * (due to slow/fast restore user regs). | ||
28 | */ | ||
29 | #if S_R0 != 0 | ||
30 | #error "Please fix" | ||
31 | #endif | ||
32 | |||
33 | .macro zero_fp | ||
34 | #ifdef CONFIG_FRAME_POINTER | ||
35 | mov fp, #0 | ||
36 | #endif | ||
68 | .endm | 37 | .endm |
69 | 38 | ||
70 | #if __LINUX_ARM_ARCH__ >= 6 | 39 | #if __LINUX_ARM_ARCH__ >= 6 |
71 | .macro disable_irq, temp | 40 | .macro disable_irq |
72 | cpsid i | 41 | cpsid i |
73 | .endm | 42 | .endm |
74 | 43 | ||
75 | .macro enable_irq, temp | 44 | .macro enable_irq |
76 | cpsie i | 45 | cpsie i |
77 | .endm | 46 | .endm |
78 | #else | 47 | #else |
79 | .macro disable_irq, temp | 48 | .macro disable_irq |
80 | set_cpsr_c \temp, #PSR_I_BIT | MODE_SVC | 49 | msr cpsr_c, #PSR_I_BIT | SVC_MODE |
81 | .endm | 50 | .endm |
82 | 51 | ||
83 | .macro enable_irq, temp | 52 | .macro enable_irq |
84 | set_cpsr_c \temp, #MODE_SVC | 53 | msr cpsr_c, #SVC_MODE |
85 | .endm | 54 | .endm |
86 | #endif | 55 | #endif |
87 | 56 | ||
88 | .macro save_user_regs | ||
89 | sub sp, sp, #S_FRAME_SIZE | ||
90 | stmia sp, {r0 - r12} @ Calling r0 - r12 | ||
91 | add r8, sp, #S_PC | ||
92 | stmdb r8, {sp, lr}^ @ Calling sp, lr | ||
93 | mrs r8, spsr @ called from non-FIQ mode, so ok. | ||
94 | str lr, [sp, #S_PC] @ Save calling PC | ||
95 | str r8, [sp, #S_PSR] @ Save CPSR | ||
96 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | ||
97 | .endm | ||
98 | |||
99 | .macro restore_user_regs | ||
100 | ldr r1, [sp, #S_PSR] @ Get calling cpsr | ||
101 | disable_irq ip @ disable IRQs | ||
102 | ldr lr, [sp, #S_PC]! @ Get PC | ||
103 | msr spsr_cxsf, r1 @ save in spsr_svc | ||
104 | ldmdb sp, {r0 - lr}^ @ Get calling r0 - lr | ||
105 | mov r0, r0 | ||
106 | add sp, sp, #S_FRAME_SIZE - S_PC | ||
107 | movs pc, lr @ return & move spsr_svc into cpsr | ||
108 | .endm | ||
109 | |||
110 | /* | ||
111 | * Must be called with IRQs already disabled. | ||
112 | */ | ||
113 | .macro fast_restore_user_regs | ||
114 | ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr | ||
115 | ldr lr, [sp, #S_OFF + S_PC]! @ get pc | ||
116 | msr spsr_cxsf, r1 @ save in spsr_svc | ||
117 | ldmdb sp, {r1 - lr}^ @ get calling r1 - lr | ||
118 | mov r0, r0 | ||
119 | add sp, sp, #S_FRAME_SIZE - S_PC | ||
120 | movs pc, lr @ return & move spsr_svc into cpsr | ||
121 | .endm | ||
122 | |||
123 | /* | ||
124 | * Must be called with IRQs already disabled. | ||
125 | */ | ||
126 | .macro slow_restore_user_regs | ||
127 | ldr r1, [sp, #S_PSR] @ get calling cpsr | ||
128 | ldr lr, [sp, #S_PC]! @ get pc | ||
129 | msr spsr_cxsf, r1 @ save in spsr_svc | ||
130 | ldmdb sp, {r0 - lr}^ @ get calling r1 - lr | ||
131 | mov r0, r0 | ||
132 | add sp, sp, #S_FRAME_SIZE - S_PC | ||
133 | movs pc, lr @ return & move spsr_svc into cpsr | ||
134 | .endm | ||
135 | |||
136 | .macro mask_pc, rd, rm | ||
137 | .endm | ||
138 | |||
139 | .macro get_thread_info, rd | 57 | .macro get_thread_info, rd |
140 | mov \rd, sp, lsr #13 | 58 | mov \rd, sp, lsr #13 |
141 | mov \rd, \rd, lsl #13 | 59 | mov \rd, \rd, lsl #13 |
@@ -165,18 +83,3 @@ scno .req r7 @ syscall number | |||
165 | tbl .req r8 @ syscall table pointer | 83 | tbl .req r8 @ syscall table pointer |
166 | why .req r8 @ Linux syscall (!= 0) | 84 | why .req r8 @ Linux syscall (!= 0) |
167 | tsk .req r9 @ current thread_info | 85 | tsk .req r9 @ current thread_info |
168 | |||
169 | /* | ||
170 | * Get the system call number. | ||
171 | */ | ||
172 | .macro get_scno | ||
173 | #ifdef CONFIG_ARM_THUMB | ||
174 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs | ||
175 | addne scno, r7, #OS_NUMBER << 20 @ put OS number in | ||
176 | ldreq scno, [lr, #-4] | ||
177 | |||
178 | #else | ||
179 | mask_pc lr, lr | ||
180 | ldr scno, [lr, #-4] @ get SWI instruction | ||
181 | #endif | ||
182 | .endm | ||
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 171b3e811c71..4733877296d4 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/procinfo.h> | 19 | #include <asm/procinfo.h> |
20 | #include <asm/ptrace.h> | 20 | #include <asm/ptrace.h> |
21 | #include <asm/constants.h> | 21 | #include <asm/constants.h> |
22 | #include <asm/thread_info.h> | ||
22 | #include <asm/system.h> | 23 | #include <asm/system.h> |
23 | 24 | ||
24 | #define PROCINFO_MMUFLAGS 8 | 25 | #define PROCINFO_MMUFLAGS 8 |
@@ -131,7 +132,7 @@ __switch_data: | |||
131 | .long processor_id @ r4 | 132 | .long processor_id @ r4 |
132 | .long __machine_arch_type @ r5 | 133 | .long __machine_arch_type @ r5 |
133 | .long cr_alignment @ r6 | 134 | .long cr_alignment @ r6 |
134 | .long init_thread_union+8192 @ sp | 135 | .long init_thread_union + THREAD_START_SP @ sp |
135 | 136 | ||
136 | /* | 137 | /* |
137 | * The following fragment of code is executed with the MMU on, and uses | 138 | * The following fragment of code is executed with the MMU on, and uses |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 26eacd3e5def..8f146a4b4752 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -256,8 +256,6 @@ static unsigned long *thread_info_head; | |||
256 | static unsigned int nr_thread_info; | 256 | static unsigned int nr_thread_info; |
257 | 257 | ||
258 | #define EXTRA_TASK_STRUCT 4 | 258 | #define EXTRA_TASK_STRUCT 4 |
259 | #define ll_alloc_task_struct() ((struct thread_info *) __get_free_pages(GFP_KERNEL,1)) | ||
260 | #define ll_free_task_struct(p) free_pages((unsigned long)(p),1) | ||
261 | 259 | ||
262 | struct thread_info *alloc_thread_info(struct task_struct *task) | 260 | struct thread_info *alloc_thread_info(struct task_struct *task) |
263 | { | 261 | { |
@@ -274,17 +272,16 @@ struct thread_info *alloc_thread_info(struct task_struct *task) | |||
274 | } | 272 | } |
275 | 273 | ||
276 | if (!thread) | 274 | if (!thread) |
277 | thread = ll_alloc_task_struct(); | 275 | thread = (struct thread_info *) |
276 | __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); | ||
278 | 277 | ||
279 | #ifdef CONFIG_MAGIC_SYSRQ | 278 | #ifdef CONFIG_DEBUG_STACK_USAGE |
280 | /* | 279 | /* |
281 | * The stack must be cleared if you want SYSRQ-T to | 280 | * The stack must be cleared if you want SYSRQ-T to |
282 | * give sensible stack usage information | 281 | * give sensible stack usage information |
283 | */ | 282 | */ |
284 | if (thread) { | 283 | if (thread) |
285 | char *p = (char *)thread; | 284 | memzero(thread, THREAD_SIZE); |
286 | memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE); | ||
287 | } | ||
288 | #endif | 285 | #endif |
289 | return thread; | 286 | return thread; |
290 | } | 287 | } |
@@ -297,7 +294,7 @@ void free_thread_info(struct thread_info *thread) | |||
297 | thread_info_head = p; | 294 | thread_info_head = p; |
298 | nr_thread_info += 1; | 295 | nr_thread_info += 1; |
299 | } else | 296 | } else |
300 | ll_free_task_struct(thread); | 297 | free_pages((unsigned long)thread, THREAD_SIZE_ORDER); |
301 | } | 298 | } |
302 | 299 | ||
303 | /* | 300 | /* |
@@ -350,7 +347,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start, | |||
350 | struct thread_info *thread = p->thread_info; | 347 | struct thread_info *thread = p->thread_info; |
351 | struct pt_regs *childregs; | 348 | struct pt_regs *childregs; |
352 | 349 | ||
353 | childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_SIZE - 8)) - 1; | 350 | childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_START_SP)) - 1; |
354 | *childregs = *regs; | 351 | *childregs = *regs; |
355 | childregs->ARM_r0 = 0; | 352 | childregs->ARM_r0 = 0; |
356 | childregs->ARM_sp = stack_start; | 353 | childregs->ARM_sp = stack_start; |
@@ -447,15 +444,17 @@ EXPORT_SYMBOL(kernel_thread); | |||
447 | unsigned long get_wchan(struct task_struct *p) | 444 | unsigned long get_wchan(struct task_struct *p) |
448 | { | 445 | { |
449 | unsigned long fp, lr; | 446 | unsigned long fp, lr; |
450 | unsigned long stack_page; | 447 | unsigned long stack_start, stack_end; |
451 | int count = 0; | 448 | int count = 0; |
452 | if (!p || p == current || p->state == TASK_RUNNING) | 449 | if (!p || p == current || p->state == TASK_RUNNING) |
453 | return 0; | 450 | return 0; |
454 | 451 | ||
455 | stack_page = 4096 + (unsigned long)p->thread_info; | 452 | stack_start = (unsigned long)(p->thread_info + 1); |
453 | stack_end = ((unsigned long)p->thread_info) + THREAD_SIZE; | ||
454 | |||
456 | fp = thread_saved_fp(p); | 455 | fp = thread_saved_fp(p); |
457 | do { | 456 | do { |
458 | if (fp < stack_page || fp > 4092+stack_page) | 457 | if (fp < stack_start || fp > stack_end) |
459 | return 0; | 458 | return 0; |
460 | lr = pc_pointer (((unsigned long *)fp)[-1]); | 459 | lr = pc_pointer (((unsigned long *)fp)[-1]); |
461 | if (!in_sched_functions(lr)) | 460 | if (!in_sched_functions(lr)) |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index efd7a341614b..cd99b83f14c2 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/user.h> | 19 | #include <linux/user.h> |
20 | #include <linux/security.h> | 20 | #include <linux/security.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/signal.h> | ||
22 | 23 | ||
23 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
24 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
@@ -693,7 +694,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat | |||
693 | case PTRACE_SYSCALL: | 694 | case PTRACE_SYSCALL: |
694 | case PTRACE_CONT: | 695 | case PTRACE_CONT: |
695 | ret = -EIO; | 696 | ret = -EIO; |
696 | if ((unsigned long) data > _NSIG) | 697 | if (!valid_signal(data)) |
697 | break; | 698 | break; |
698 | if (request == PTRACE_SYSCALL) | 699 | if (request == PTRACE_SYSCALL) |
699 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 700 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -728,7 +729,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat | |||
728 | */ | 729 | */ |
729 | case PTRACE_SINGLESTEP: | 730 | case PTRACE_SINGLESTEP: |
730 | ret = -EIO; | 731 | ret = -EIO; |
731 | if ((unsigned long) data > _NSIG) | 732 | if (!valid_signal(data)) |
732 | break; | 733 | break; |
733 | child->ptrace |= PT_SINGLESTEP; | 734 | child->ptrace |= PT_SINGLESTEP; |
734 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 735 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 7ba6342cf93d..f897ce2ccf0d 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c | |||
@@ -227,18 +227,6 @@ asmlinkage int sys_ipc(uint call, int first, int second, int third, | |||
227 | } | 227 | } |
228 | } | 228 | } |
229 | 229 | ||
230 | asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg, | ||
231 | unsigned long __user *addr) | ||
232 | { | ||
233 | unsigned long ret; | ||
234 | long err; | ||
235 | |||
236 | err = do_shmat(shmid, shmaddr, shmflg, &ret); | ||
237 | if (err == 0) | ||
238 | err = put_user(ret, addr); | ||
239 | return err; | ||
240 | } | ||
241 | |||
242 | /* Fork a new task - this creates a new program thread. | 230 | /* Fork a new task - this creates a new program thread. |
243 | * This is called indirectly via a small wrapper | 231 | * This is called indirectly via a small wrapper |
244 | */ | 232 | */ |
@@ -314,7 +302,7 @@ long execve(const char *filename, char **argv, char **envp) | |||
314 | "b ret_to_user" | 302 | "b ret_to_user" |
315 | : | 303 | : |
316 | : "r" (current_thread_info()), | 304 | : "r" (current_thread_info()), |
317 | "Ir" (THREAD_SIZE - 8 - sizeof(regs)), | 305 | "Ir" (THREAD_START_SP - sizeof(regs)), |
318 | "r" (®s), | 306 | "r" (®s), |
319 | "Ir" (sizeof(regs)) | 307 | "Ir" (sizeof(regs)) |
320 | : "r0", "r1", "r2", "r3", "ip", "memory"); | 308 | : "r0", "r1", "r2", "r3", "ip", "memory"); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 6e31718f6008..14df16b983f4 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -218,7 +218,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) | |||
218 | tsk->comm, tsk->pid, tsk->thread_info + 1); | 218 | tsk->comm, tsk->pid, tsk->thread_info + 1); |
219 | 219 | ||
220 | if (!user_mode(regs) || in_interrupt()) { | 220 | if (!user_mode(regs) || in_interrupt()) { |
221 | dump_mem("Stack: ", regs->ARM_sp, 8192+(unsigned long)tsk->thread_info); | 221 | dump_mem("Stack: ", regs->ARM_sp, |
222 | THREAD_SIZE + (unsigned long)tsk->thread_info); | ||
222 | dump_backtrace(regs, tsk); | 223 | dump_backtrace(regs, tsk); |
223 | dump_instr(regs); | 224 | dump_instr(regs); |
224 | } | 225 | } |
@@ -450,13 +451,17 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
450 | 451 | ||
451 | case NR(set_tls): | 452 | case NR(set_tls): |
452 | thread->tp_value = regs->ARM_r0; | 453 | thread->tp_value = regs->ARM_r0; |
454 | #if defined(CONFIG_HAS_TLS_REG) | ||
455 | asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); | ||
456 | #elif !defined(CONFIG_TLS_REG_EMUL) | ||
453 | /* | 457 | /* |
454 | * Our user accessible TLS ptr is located at 0xffff0ffc. | 458 | * User space must never try to access this directly. |
455 | * On SMP read access to this address must raise a fault | 459 | * Expect your app to break eventually if you do so. |
456 | * and be emulated from the data abort handler. | 460 | * The user helper at 0xffff0fe0 must be used instead. |
457 | * m | 461 | * (see entry-armv.S for details) |
458 | */ | 462 | */ |
459 | *((unsigned long *)0xffff0ffc) = thread->tp_value; | 463 | *((unsigned int *)0xffff0ff0) = regs->ARM_r0; |
464 | #endif | ||
460 | return 0; | 465 | return 0; |
461 | 466 | ||
462 | default: | 467 | default: |
@@ -493,6 +498,44 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
493 | return 0; | 498 | return 0; |
494 | } | 499 | } |
495 | 500 | ||
501 | #ifdef CONFIG_TLS_REG_EMUL | ||
502 | |||
503 | /* | ||
504 | * We might be running on an ARMv6+ processor which should have the TLS | ||
505 | * register but for some reason we can't use it, or maybe an SMP system | ||
506 | * using a pre-ARMv6 processor (there are apparently a few prototypes like | ||
507 | * that in existence) and therefore access to that register must be | ||
508 | * emulated. | ||
509 | */ | ||
510 | |||
511 | static int get_tp_trap(struct pt_regs *regs, unsigned int instr) | ||
512 | { | ||
513 | int reg = (instr >> 12) & 15; | ||
514 | if (reg == 15) | ||
515 | return 1; | ||
516 | regs->uregs[reg] = current_thread_info()->tp_value; | ||
517 | regs->ARM_pc += 4; | ||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static struct undef_hook arm_mrc_hook = { | ||
522 | .instr_mask = 0x0fff0fff, | ||
523 | .instr_val = 0x0e1d0f70, | ||
524 | .cpsr_mask = PSR_T_BIT, | ||
525 | .cpsr_val = 0, | ||
526 | .fn = get_tp_trap, | ||
527 | }; | ||
528 | |||
529 | static int __init arm_mrc_hook_init(void) | ||
530 | { | ||
531 | register_undef_hook(&arm_mrc_hook); | ||
532 | return 0; | ||
533 | } | ||
534 | |||
535 | late_initcall(arm_mrc_hook_init); | ||
536 | |||
537 | #endif | ||
538 | |||
496 | void __bad_xchg(volatile void *ptr, int size) | 539 | void __bad_xchg(volatile void *ptr, int size) |
497 | { | 540 | { |
498 | printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", | 541 | printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", |
@@ -578,9 +621,19 @@ EXPORT_SYMBOL(abort); | |||
578 | 621 | ||
579 | void __init trap_init(void) | 622 | void __init trap_init(void) |
580 | { | 623 | { |
581 | extern void __trap_init(void); | 624 | extern char __stubs_start[], __stubs_end[]; |
625 | extern char __vectors_start[], __vectors_end[]; | ||
626 | extern char __kuser_helper_start[], __kuser_helper_end[]; | ||
627 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | ||
582 | 628 | ||
583 | __trap_init(); | 629 | /* |
630 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) | ||
631 | * into the vector page, mapped at 0xffff0000, and ensure these | ||
632 | * are visible to the instruction stream. | ||
633 | */ | ||
634 | memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start); | ||
635 | memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start); | ||
636 | memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz); | ||
584 | flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); | 637 | flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); |
585 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); | 638 | modify_domain(DOMAIN_USER, DOMAIN_CLIENT); |
586 | } | 639 | } |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index a39c6a42d68a..ad2d66c93a5c 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <asm-generic/vmlinux.lds.h> | 6 | #include <asm-generic/vmlinux.lds.h> |
7 | #include <linux/config.h> | 7 | #include <linux/config.h> |
8 | #include <asm/thread_info.h> | ||
8 | 9 | ||
9 | OUTPUT_ARCH(arm) | 10 | OUTPUT_ARCH(arm) |
10 | ENTRY(stext) | 11 | ENTRY(stext) |
@@ -103,7 +104,7 @@ SECTIONS | |||
103 | __data_loc = ALIGN(4); /* location in binary */ | 104 | __data_loc = ALIGN(4); /* location in binary */ |
104 | . = DATAADDR; | 105 | . = DATAADDR; |
105 | #else | 106 | #else |
106 | . = ALIGN(8192); | 107 | . = ALIGN(THREAD_SIZE); |
107 | __data_loc = .; | 108 | __data_loc = .; |
108 | #endif | 109 | #endif |
109 | 110 | ||
diff --git a/arch/arm/mach-clps711x/Kconfig b/arch/arm/mach-clps711x/Kconfig index f6e676322ca9..45c930ccd064 100644 --- a/arch/arm/mach-clps711x/Kconfig +++ b/arch/arm/mach-clps711x/Kconfig | |||
@@ -10,6 +10,7 @@ config ARCH_AUTCPU12 | |||
10 | 10 | ||
11 | config ARCH_CDB89712 | 11 | config ARCH_CDB89712 |
12 | bool "CDB89712" | 12 | bool "CDB89712" |
13 | select ISA | ||
13 | help | 14 | help |
14 | This is an evaluation board from Cirrus for the CS89712 processor. | 15 | This is an evaluation board from Cirrus for the CS89712 processor. |
15 | The board includes 2 serial ports, Ethernet, IRDA, and expansion | 16 | The board includes 2 serial ports, Ethernet, IRDA, and expansion |
@@ -26,6 +27,8 @@ config ARCH_CLEP7312 | |||
26 | 27 | ||
27 | config ARCH_EDB7211 | 28 | config ARCH_EDB7211 |
28 | bool "EDB7211" | 29 | bool "EDB7211" |
30 | select ISA | ||
31 | select DISCONTIGMEM | ||
29 | help | 32 | help |
30 | Say Y here if you intend to run this kernel on a Cirrus Logic EDB-7211 | 33 | Say Y here if you intend to run this kernel on a Cirrus Logic EDB-7211 |
31 | evaluation board. | 34 | evaluation board. |
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index 1090c680b6dd..324d9edeec38 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig | |||
@@ -5,6 +5,9 @@ menu "Footbridge Implementations" | |||
5 | config ARCH_CATS | 5 | config ARCH_CATS |
6 | bool "CATS" | 6 | bool "CATS" |
7 | select FOOTBRIDGE_HOST | 7 | select FOOTBRIDGE_HOST |
8 | select ISA | ||
9 | select ISA_DMA | ||
10 | select PCI | ||
8 | help | 11 | help |
9 | Say Y here if you intend to run this kernel on the CATS. | 12 | Say Y here if you intend to run this kernel on the CATS. |
10 | 13 | ||
@@ -13,6 +16,9 @@ config ARCH_CATS | |||
13 | config ARCH_PERSONAL_SERVER | 16 | config ARCH_PERSONAL_SERVER |
14 | bool "Compaq Personal Server" | 17 | bool "Compaq Personal Server" |
15 | select FOOTBRIDGE_HOST | 18 | select FOOTBRIDGE_HOST |
19 | select ISA | ||
20 | select ISA_DMA | ||
21 | select PCI | ||
16 | ---help--- | 22 | ---help--- |
17 | Say Y here if you intend to run this kernel on the Compaq | 23 | Say Y here if you intend to run this kernel on the Compaq |
18 | Personal Server. | 24 | Personal Server. |
@@ -42,6 +48,9 @@ config ARCH_EBSA285_HOST | |||
42 | bool "EBSA285 (host mode)" | 48 | bool "EBSA285 (host mode)" |
43 | select ARCH_EBSA285 | 49 | select ARCH_EBSA285 |
44 | select FOOTBRIDGE_HOST | 50 | select FOOTBRIDGE_HOST |
51 | select ISA | ||
52 | select ISA_DMA | ||
53 | select PCI | ||
45 | help | 54 | help |
46 | Say Y here if you intend to run this kernel on the EBSA285 card | 55 | Say Y here if you intend to run this kernel on the EBSA285 card |
47 | in host ("central function") mode. | 56 | in host ("central function") mode. |
@@ -51,6 +60,9 @@ config ARCH_EBSA285_HOST | |||
51 | config ARCH_NETWINDER | 60 | config ARCH_NETWINDER |
52 | bool "NetWinder" | 61 | bool "NetWinder" |
53 | select FOOTBRIDGE_HOST | 62 | select FOOTBRIDGE_HOST |
63 | select ISA | ||
64 | select ISA_DMA | ||
65 | select PCI | ||
54 | help | 66 | help |
55 | Say Y here if you intend to run this kernel on the Rebel.COM | 67 | Say Y here if you intend to run this kernel on the Rebel.COM |
56 | NetWinder. Information about this machine can be found at: | 68 | NetWinder. Information about this machine can be found at: |
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig index ec85813ee5dc..cddd194ac6eb 100644 --- a/arch/arm/mach-imx/Kconfig +++ b/arch/arm/mach-imx/Kconfig | |||
@@ -4,6 +4,7 @@ menu "IMX Implementations" | |||
4 | config ARCH_MX1ADS | 4 | config ARCH_MX1ADS |
5 | bool "mx1ads" | 5 | bool "mx1ads" |
6 | depends on ARCH_IMX | 6 | depends on ARCH_IMX |
7 | select ISA | ||
7 | help | 8 | help |
8 | Say Y here if you are using the Motorola MX1ADS board | 9 | Say Y here if you are using the Motorola MX1ADS board |
9 | 10 | ||
diff --git a/arch/arm/mach-imx/generic.c b/arch/arm/mach-imx/generic.c index 54377d0f578c..41e5849ae8da 100644 --- a/arch/arm/mach-imx/generic.c +++ b/arch/arm/mach-imx/generic.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <asm/arch/imxfb.h> | ||
29 | #include <asm/hardware.h> | 30 | #include <asm/hardware.h> |
30 | 31 | ||
31 | #include <asm/mach/map.h> | 32 | #include <asm/mach/map.h> |
@@ -228,6 +229,14 @@ static struct platform_device imx_uart2_device = { | |||
228 | .resource = imx_uart2_resources, | 229 | .resource = imx_uart2_resources, |
229 | }; | 230 | }; |
230 | 231 | ||
232 | static struct imxfb_mach_info imx_fb_info; | ||
233 | |||
234 | void __init set_imx_fb_info(struct imxfb_mach_info *hard_imx_fb_info) | ||
235 | { | ||
236 | memcpy(&imx_fb_info,hard_imx_fb_info,sizeof(struct imxfb_mach_info)); | ||
237 | } | ||
238 | EXPORT_SYMBOL(set_imx_fb_info); | ||
239 | |||
231 | static struct resource imxfb_resources[] = { | 240 | static struct resource imxfb_resources[] = { |
232 | [0] = { | 241 | [0] = { |
233 | .start = 0x00205000, | 242 | .start = 0x00205000, |
@@ -241,9 +250,16 @@ static struct resource imxfb_resources[] = { | |||
241 | }, | 250 | }, |
242 | }; | 251 | }; |
243 | 252 | ||
253 | static u64 fb_dma_mask = ~(u64)0; | ||
254 | |||
244 | static struct platform_device imxfb_device = { | 255 | static struct platform_device imxfb_device = { |
245 | .name = "imx-fb", | 256 | .name = "imx-fb", |
246 | .id = 0, | 257 | .id = 0, |
258 | .dev = { | ||
259 | .platform_data = &imx_fb_info, | ||
260 | .dma_mask = &fb_dma_mask, | ||
261 | .coherent_dma_mask = 0xffffffff, | ||
262 | }, | ||
247 | .num_resources = ARRAY_SIZE(imxfb_resources), | 263 | .num_resources = ARRAY_SIZE(imxfb_resources), |
248 | .resource = imxfb_resources, | 264 | .resource = imxfb_resources, |
249 | }; | 265 | }; |
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index 86c50c3889b7..bd17b5154311 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c | |||
@@ -216,7 +216,9 @@ integrator_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
216 | 216 | ||
217 | write_seqlock(&xtime_lock); | 217 | write_seqlock(&xtime_lock); |
218 | 218 | ||
219 | // ...clear the interrupt | 219 | /* |
220 | * clear the interrupt | ||
221 | */ | ||
220 | timer1->TimerClear = 1; | 222 | timer1->TimerClear = 1; |
221 | 223 | ||
222 | timer_tick(regs); | 224 | timer_tick(regs); |
@@ -264,7 +266,7 @@ void __init integrator_time_init(unsigned long reload, unsigned int ctrl) | |||
264 | timer1->TimerValue = timer_reload; | 266 | timer1->TimerValue = timer_reload; |
265 | timer1->TimerControl = timer_ctrl; | 267 | timer1->TimerControl = timer_ctrl; |
266 | 268 | ||
267 | /* | 269 | /* |
268 | * Make irqs happen for the system timer | 270 | * Make irqs happen for the system timer |
269 | */ | 271 | */ |
270 | setup_irq(IRQ_TIMERINT1, &integrator_timer_irq); | 272 | setup_irq(IRQ_TIMERINT1, &integrator_timer_irq); |
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c index 68e15c36e336..3b948e8c2751 100644 --- a/arch/arm/mach-integrator/integrator_cp.c +++ b/arch/arm/mach-integrator/integrator_cp.c | |||
@@ -420,7 +420,22 @@ static struct clcd_panel vga = { | |||
420 | */ | 420 | */ |
421 | static void cp_clcd_enable(struct clcd_fb *fb) | 421 | static void cp_clcd_enable(struct clcd_fb *fb) |
422 | { | 422 | { |
423 | cm_control(CM_CTRL_LCDMUXSEL_MASK, CM_CTRL_LCDMUXSEL_VGA); | 423 | u32 val; |
424 | |||
425 | if (fb->fb.var.bits_per_pixel <= 8) | ||
426 | val = CM_CTRL_LCDMUXSEL_VGA_8421BPP; | ||
427 | else if (fb->fb.var.bits_per_pixel <= 16) | ||
428 | val = CM_CTRL_LCDMUXSEL_VGA_16BPP; | ||
429 | else | ||
430 | val = 0; /* no idea for this, don't trust the docs */ | ||
431 | |||
432 | cm_control(CM_CTRL_LCDMUXSEL_MASK| | ||
433 | CM_CTRL_LCDEN0| | ||
434 | CM_CTRL_LCDEN1| | ||
435 | CM_CTRL_STATIC1| | ||
436 | CM_CTRL_STATIC2| | ||
437 | CM_CTRL_STATIC| | ||
438 | CM_CTRL_n24BITEN, val); | ||
424 | } | 439 | } |
425 | 440 | ||
426 | static unsigned long framesize = SZ_1M; | 441 | static unsigned long framesize = SZ_1M; |
diff --git a/arch/arm/mach-integrator/leds.c b/arch/arm/mach-integrator/leds.c index 9d182b77b312..d2c0ab21150c 100644 --- a/arch/arm/mach-integrator/leds.c +++ b/arch/arm/mach-integrator/leds.c | |||
@@ -37,7 +37,7 @@ static void integrator_leds_event(led_event_t ledevt) | |||
37 | unsigned long flags; | 37 | unsigned long flags; |
38 | const unsigned int dbg_base = IO_ADDRESS(INTEGRATOR_DBG_BASE); | 38 | const unsigned int dbg_base = IO_ADDRESS(INTEGRATOR_DBG_BASE); |
39 | unsigned int update_alpha_leds; | 39 | unsigned int update_alpha_leds; |
40 | 40 | ||
41 | // yup, change the LEDs | 41 | // yup, change the LEDs |
42 | local_irq_save(flags); | 42 | local_irq_save(flags); |
43 | update_alpha_leds = 0; | 43 | update_alpha_leds = 0; |
diff --git a/arch/arm/mach-integrator/time.c b/arch/arm/mach-integrator/time.c index 20729de2af28..1a844ca139e0 100644 --- a/arch/arm/mach-integrator/time.c +++ b/arch/arm/mach-integrator/time.c | |||
@@ -40,25 +40,32 @@ static int integrator_set_rtc(void) | |||
40 | return 1; | 40 | return 1; |
41 | } | 41 | } |
42 | 42 | ||
43 | static void rtc_read_alarm(struct rtc_wkalrm *alrm) | 43 | static int rtc_read_alarm(struct rtc_wkalrm *alrm) |
44 | { | 44 | { |
45 | rtc_time_to_tm(readl(rtc_base + RTC_MR), &alrm->time); | 45 | rtc_time_to_tm(readl(rtc_base + RTC_MR), &alrm->time); |
46 | return 0; | ||
46 | } | 47 | } |
47 | 48 | ||
48 | static int rtc_set_alarm(struct rtc_wkalrm *alrm) | 49 | static inline int rtc_set_alarm(struct rtc_wkalrm *alrm) |
49 | { | 50 | { |
50 | unsigned long time; | 51 | unsigned long time; |
51 | int ret; | 52 | int ret; |
52 | 53 | ||
53 | ret = rtc_tm_to_time(&alrm->time, &time); | 54 | /* |
55 | * At the moment, we can only deal with non-wildcarded alarm times. | ||
56 | */ | ||
57 | ret = rtc_valid_tm(&alrm->time); | ||
58 | if (ret == 0) | ||
59 | ret = rtc_tm_to_time(&alrm->time, &time); | ||
54 | if (ret == 0) | 60 | if (ret == 0) |
55 | writel(time, rtc_base + RTC_MR); | 61 | writel(time, rtc_base + RTC_MR); |
56 | return ret; | 62 | return ret; |
57 | } | 63 | } |
58 | 64 | ||
59 | static void rtc_read_time(struct rtc_time *tm) | 65 | static int rtc_read_time(struct rtc_time *tm) |
60 | { | 66 | { |
61 | rtc_time_to_tm(readl(rtc_base + RTC_DR), tm); | 67 | rtc_time_to_tm(readl(rtc_base + RTC_DR), tm); |
68 | return 0; | ||
62 | } | 69 | } |
63 | 70 | ||
64 | /* | 71 | /* |
@@ -69,7 +76,7 @@ static void rtc_read_time(struct rtc_time *tm) | |||
69 | * edge of the 1Hz clock, we must write the time one second | 76 | * edge of the 1Hz clock, we must write the time one second |
70 | * in advance. | 77 | * in advance. |
71 | */ | 78 | */ |
72 | static int rtc_set_time(struct rtc_time *tm) | 79 | static inline int rtc_set_time(struct rtc_time *tm) |
73 | { | 80 | { |
74 | unsigned long time; | 81 | unsigned long time; |
75 | int ret; | 82 | int ret; |
diff --git a/arch/arm/mach-ixp2000/ixdp2800.c b/arch/arm/mach-ixp2000/ixdp2800.c index c4683aaff84a..aec13c7108a9 100644 --- a/arch/arm/mach-ixp2000/ixdp2800.c +++ b/arch/arm/mach-ixp2000/ixdp2800.c | |||
@@ -65,19 +65,102 @@ static struct sys_timer ixdp2800_timer = { | |||
65 | /************************************************************************* | 65 | /************************************************************************* |
66 | * IXDP2800 PCI | 66 | * IXDP2800 PCI |
67 | *************************************************************************/ | 67 | *************************************************************************/ |
68 | static void __init ixdp2800_slave_disable_pci_master(void) | ||
69 | { | ||
70 | *IXP2000_PCI_CMDSTAT &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY); | ||
71 | } | ||
72 | |||
73 | static void __init ixdp2800_master_wait_for_slave(void) | ||
74 | { | ||
75 | volatile u32 *addr; | ||
76 | |||
77 | printk(KERN_INFO "IXDP2800: waiting for slave NPU to configure " | ||
78 | "its BAR sizes\n"); | ||
79 | |||
80 | addr = ixp2000_pci_config_addr(0, IXDP2X00_SLAVE_NPU_DEVFN, | ||
81 | PCI_BASE_ADDRESS_1); | ||
82 | do { | ||
83 | *addr = 0xffffffff; | ||
84 | cpu_relax(); | ||
85 | } while (*addr != 0xfe000008); | ||
86 | |||
87 | addr = ixp2000_pci_config_addr(0, IXDP2X00_SLAVE_NPU_DEVFN, | ||
88 | PCI_BASE_ADDRESS_2); | ||
89 | do { | ||
90 | *addr = 0xffffffff; | ||
91 | cpu_relax(); | ||
92 | } while (*addr != 0xc0000008); | ||
93 | |||
94 | /* | ||
95 | * Configure the slave's SDRAM BAR by hand. | ||
96 | */ | ||
97 | *addr = 0x40000008; | ||
98 | } | ||
99 | |||
100 | static void __init ixdp2800_slave_wait_for_master_enable(void) | ||
101 | { | ||
102 | printk(KERN_INFO "IXDP2800: waiting for master NPU to enable us\n"); | ||
103 | |||
104 | while ((*IXP2000_PCI_CMDSTAT & PCI_COMMAND_MASTER) == 0) | ||
105 | cpu_relax(); | ||
106 | } | ||
107 | |||
68 | void __init ixdp2800_pci_preinit(void) | 108 | void __init ixdp2800_pci_preinit(void) |
69 | { | 109 | { |
70 | printk("ixdp2x00_pci_preinit called\n"); | 110 | printk("ixdp2x00_pci_preinit called\n"); |
71 | 111 | ||
72 | *IXP2000_PCI_ADDR_EXT = 0x0000e000; | 112 | *IXP2000_PCI_ADDR_EXT = 0x0001e000; |
113 | |||
114 | if (!ixdp2x00_master_npu()) | ||
115 | ixdp2800_slave_disable_pci_master(); | ||
73 | 116 | ||
74 | *IXP2000_PCI_DRAM_BASE_ADDR_MASK = (0x40000000 - 1) & ~0xfffff; | ||
75 | *IXP2000_PCI_SRAM_BASE_ADDR_MASK = (0x2000000 - 1) & ~0x3ffff; | 117 | *IXP2000_PCI_SRAM_BASE_ADDR_MASK = (0x2000000 - 1) & ~0x3ffff; |
118 | *IXP2000_PCI_DRAM_BASE_ADDR_MASK = (0x40000000 - 1) & ~0xfffff; | ||
76 | 119 | ||
77 | ixp2000_pci_preinit(); | 120 | ixp2000_pci_preinit(); |
121 | |||
122 | if (ixdp2x00_master_npu()) { | ||
123 | /* | ||
124 | * Wait until the slave set its SRAM/SDRAM BAR sizes | ||
125 | * correctly before we proceed to scan and enumerate | ||
126 | * the bus. | ||
127 | */ | ||
128 | ixdp2800_master_wait_for_slave(); | ||
129 | |||
130 | /* | ||
131 | * We configure the SDRAM BARs by hand because they | ||
132 | * are 1G and fall outside of the regular allocated | ||
133 | * PCI address space. | ||
134 | */ | ||
135 | *IXP2000_PCI_SDRAM_BAR = 0x00000008; | ||
136 | } else { | ||
137 | /* | ||
138 | * Wait for the master to complete scanning the bus | ||
139 | * and assigning resources before we proceed to scan | ||
140 | * the bus ourselves. Set pci=firmware to honor the | ||
141 | * master's resource assignment. | ||
142 | */ | ||
143 | ixdp2800_slave_wait_for_master_enable(); | ||
144 | pcibios_setup("firmware"); | ||
145 | } | ||
78 | } | 146 | } |
79 | 147 | ||
80 | int ixdp2800_pci_setup(int nr, struct pci_sys_data *sys) | 148 | /* |
149 | * We assign the SDRAM BARs for the two IXP2800 CPUs by hand, outside | ||
150 | * of the regular PCI window, because there's only 512M of outbound PCI | ||
151 | * memory window on each IXP, while we need 1G for each of the BARs. | ||
152 | */ | ||
153 | static void __devinit ixp2800_pci_fixup(struct pci_dev *dev) | ||
154 | { | ||
155 | if (machine_is_ixdp2800()) { | ||
156 | dev->resource[2].start = 0; | ||
157 | dev->resource[2].end = 0; | ||
158 | dev->resource[2].flags = 0; | ||
159 | } | ||
160 | } | ||
161 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IXP2800, ixp2800_pci_fixup); | ||
162 | |||
163 | static int __init ixdp2800_pci_setup(int nr, struct pci_sys_data *sys) | ||
81 | { | 164 | { |
82 | sys->mem_offset = 0x00000000; | 165 | sys->mem_offset = 0x00000000; |
83 | 166 | ||
@@ -129,22 +212,47 @@ static int __init ixdp2800_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | |||
129 | } else return IRQ_IXP2000_PCIB; /* Slave NIC interrupt */ | 212 | } else return IRQ_IXP2000_PCIB; /* Slave NIC interrupt */ |
130 | } | 213 | } |
131 | 214 | ||
132 | static void ixdp2800_pci_postinit(void) | 215 | static void __init ixdp2800_master_enable_slave(void) |
133 | { | 216 | { |
134 | struct pci_dev *dev; | 217 | volatile u32 *addr; |
135 | 218 | ||
136 | if (ixdp2x00_master_npu()) { | 219 | printk(KERN_INFO "IXDP2800: enabling slave NPU\n"); |
137 | dev = pci_find_slot(1, IXDP2800_SLAVE_ENET_DEVFN); | 220 | |
138 | pci_remove_bus_device(dev); | 221 | addr = (volatile u32 *)ixp2000_pci_config_addr(0, |
139 | } else { | 222 | IXDP2X00_SLAVE_NPU_DEVFN, |
140 | dev = pci_find_slot(1, IXDP2800_MASTER_ENET_DEVFN); | 223 | PCI_COMMAND); |
141 | pci_remove_bus_device(dev); | 224 | |
225 | *addr |= PCI_COMMAND_MASTER; | ||
226 | } | ||
142 | 227 | ||
228 | static void __init ixdp2800_master_wait_for_slave_bus_scan(void) | ||
229 | { | ||
230 | volatile u32 *addr; | ||
231 | |||
232 | printk(KERN_INFO "IXDP2800: waiting for slave to finish bus scan\n"); | ||
233 | |||
234 | addr = (volatile u32 *)ixp2000_pci_config_addr(0, | ||
235 | IXDP2X00_SLAVE_NPU_DEVFN, | ||
236 | PCI_COMMAND); | ||
237 | while ((*addr & PCI_COMMAND_MEMORY) == 0) | ||
238 | cpu_relax(); | ||
239 | } | ||
240 | |||
241 | static void __init ixdp2800_slave_signal_bus_scan_completion(void) | ||
242 | { | ||
243 | printk(KERN_INFO "IXDP2800: bus scan done, signaling master\n"); | ||
244 | *IXP2000_PCI_CMDSTAT |= PCI_COMMAND_MEMORY; | ||
245 | } | ||
246 | |||
247 | static void __init ixdp2800_pci_postinit(void) | ||
248 | { | ||
249 | if (!ixdp2x00_master_npu()) { | ||
143 | ixdp2x00_slave_pci_postinit(); | 250 | ixdp2x00_slave_pci_postinit(); |
251 | ixdp2800_slave_signal_bus_scan_completion(); | ||
144 | } | 252 | } |
145 | } | 253 | } |
146 | 254 | ||
147 | struct hw_pci ixdp2800_pci __initdata = { | 255 | struct __initdata hw_pci ixdp2800_pci __initdata = { |
148 | .nr_controllers = 1, | 256 | .nr_controllers = 1, |
149 | .setup = ixdp2800_pci_setup, | 257 | .setup = ixdp2800_pci_setup, |
150 | .preinit = ixdp2800_pci_preinit, | 258 | .preinit = ixdp2800_pci_preinit, |
@@ -155,8 +263,21 @@ struct hw_pci ixdp2800_pci __initdata = { | |||
155 | 263 | ||
156 | int __init ixdp2800_pci_init(void) | 264 | int __init ixdp2800_pci_init(void) |
157 | { | 265 | { |
158 | if (machine_is_ixdp2800()) | 266 | if (machine_is_ixdp2800()) { |
267 | struct pci_dev *dev; | ||
268 | |||
159 | pci_common_init(&ixdp2800_pci); | 269 | pci_common_init(&ixdp2800_pci); |
270 | if (ixdp2x00_master_npu()) { | ||
271 | dev = pci_find_slot(1, IXDP2800_SLAVE_ENET_DEVFN); | ||
272 | pci_remove_bus_device(dev); | ||
273 | |||
274 | ixdp2800_master_enable_slave(); | ||
275 | ixdp2800_master_wait_for_slave_bus_scan(); | ||
276 | } else { | ||
277 | dev = pci_find_slot(1, IXDP2800_MASTER_ENET_DEVFN); | ||
278 | pci_remove_bus_device(dev); | ||
279 | } | ||
280 | } | ||
160 | 281 | ||
161 | return 0; | 282 | return 0; |
162 | } | 283 | } |
diff --git a/arch/arm/mach-ixp2000/pci.c b/arch/arm/mach-ixp2000/pci.c index 831f8ffb6b61..5ff2f2718c58 100644 --- a/arch/arm/mach-ixp2000/pci.c +++ b/arch/arm/mach-ixp2000/pci.c | |||
@@ -37,7 +37,7 @@ static int pci_master_aborts = 0; | |||
37 | 37 | ||
38 | static int clear_master_aborts(void); | 38 | static int clear_master_aborts(void); |
39 | 39 | ||
40 | static u32 * | 40 | u32 * |
41 | ixp2000_pci_config_addr(unsigned int bus_nr, unsigned int devfn, int where) | 41 | ixp2000_pci_config_addr(unsigned int bus_nr, unsigned int devfn, int where) |
42 | { | 42 | { |
43 | u32 *paddress; | 43 | u32 *paddress; |
@@ -208,15 +208,15 @@ ixp2000_pci_preinit(void) | |||
208 | * use our own resource space. | 208 | * use our own resource space. |
209 | */ | 209 | */ |
210 | static struct resource ixp2000_pci_mem_space = { | 210 | static struct resource ixp2000_pci_mem_space = { |
211 | .start = 0x00000000, | 211 | .start = 0xe0000000, |
212 | .end = 0xffffffff, | 212 | .end = 0xffffffff, |
213 | .flags = IORESOURCE_MEM, | 213 | .flags = IORESOURCE_MEM, |
214 | .name = "PCI Mem Space" | 214 | .name = "PCI Mem Space" |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static struct resource ixp2000_pci_io_space = { | 217 | static struct resource ixp2000_pci_io_space = { |
218 | .start = 0x00000000, | 218 | .start = 0x00010000, |
219 | .end = 0xffffffff, | 219 | .end = 0x0001ffff, |
220 | .flags = IORESOURCE_IO, | 220 | .flags = IORESOURCE_IO, |
221 | .name = "PCI I/O Space" | 221 | .name = "PCI I/O Space" |
222 | }; | 222 | }; |
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 94bcdb933e41..aa92e3708838 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c | |||
@@ -502,15 +502,6 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask) | |||
502 | } | 502 | } |
503 | 503 | ||
504 | int | 504 | int |
505 | pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask) | ||
506 | { | ||
507 | if (mask >= SZ_64M - 1 ) | ||
508 | return 0; | ||
509 | |||
510 | return -EIO; | ||
511 | } | ||
512 | |||
513 | int | ||
514 | pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | 505 | pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) |
515 | { | 506 | { |
516 | if (mask >= SZ_64M - 1 ) | 507 | if (mask >= SZ_64M - 1 ) |
@@ -520,7 +511,6 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) | |||
520 | } | 511 | } |
521 | 512 | ||
522 | EXPORT_SYMBOL(pci_set_dma_mask); | 513 | EXPORT_SYMBOL(pci_set_dma_mask); |
523 | EXPORT_SYMBOL(pci_dac_set_dma_mask); | ||
524 | EXPORT_SYMBOL(pci_set_consistent_dma_mask); | 514 | EXPORT_SYMBOL(pci_set_consistent_dma_mask); |
525 | EXPORT_SYMBOL(ixp4xx_pci_read); | 515 | EXPORT_SYMBOL(ixp4xx_pci_read); |
526 | EXPORT_SYMBOL(ixp4xx_pci_write); | 516 | EXPORT_SYMBOL(ixp4xx_pci_write); |
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index b1575b8dc1cd..a45aaa115a76 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c | |||
@@ -220,6 +220,30 @@ static struct platform_device stuart_device = { | |||
220 | .id = 2, | 220 | .id = 2, |
221 | }; | 221 | }; |
222 | 222 | ||
223 | static struct resource i2c_resources[] = { | ||
224 | { | ||
225 | .start = 0x40301680, | ||
226 | .end = 0x403016a3, | ||
227 | .flags = IORESOURCE_MEM, | ||
228 | }, { | ||
229 | .start = IRQ_I2C, | ||
230 | .end = IRQ_I2C, | ||
231 | .flags = IORESOURCE_IRQ, | ||
232 | }, | ||
233 | }; | ||
234 | |||
235 | static struct platform_device i2c_device = { | ||
236 | .name = "pxa2xx-i2c", | ||
237 | .id = 0, | ||
238 | .resource = i2c_resources, | ||
239 | .num_resources = ARRAY_SIZE(i2c_resources), | ||
240 | }; | ||
241 | |||
242 | void __init pxa_set_i2c_info(struct i2c_pxa_platform_data *info) | ||
243 | { | ||
244 | i2c_device.dev.platform_data = info; | ||
245 | } | ||
246 | |||
223 | static struct platform_device *devices[] __initdata = { | 247 | static struct platform_device *devices[] __initdata = { |
224 | &pxamci_device, | 248 | &pxamci_device, |
225 | &udc_device, | 249 | &udc_device, |
@@ -227,6 +251,7 @@ static struct platform_device *devices[] __initdata = { | |||
227 | &ffuart_device, | 251 | &ffuart_device, |
228 | &btuart_device, | 252 | &btuart_device, |
229 | &stuart_device, | 253 | &stuart_device, |
254 | &i2c_device, | ||
230 | }; | 255 | }; |
231 | 256 | ||
232 | static int __init pxa_init(void) | 257 | static int __init pxa_init(void) |
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S index 16cad2c2497c..5786ccad938c 100644 --- a/arch/arm/mach-pxa/sleep.S +++ b/arch/arm/mach-pxa/sleep.S | |||
@@ -18,6 +18,11 @@ | |||
18 | 18 | ||
19 | #include <asm/arch/pxa-regs.h> | 19 | #include <asm/arch/pxa-regs.h> |
20 | 20 | ||
21 | #ifdef CONFIG_PXA27x // workaround for Errata 50 | ||
22 | #define MDREFR_KDIV 0x200a4000 // all banks | ||
23 | #define CCCR_SLEEP 0x00000107 // L=7 2N=2 A=0 PPDIS=0 CPDIS=0 | ||
24 | #endif | ||
25 | |||
21 | .text | 26 | .text |
22 | 27 | ||
23 | /* | 28 | /* |
@@ -28,7 +33,9 @@ | |||
28 | 33 | ||
29 | ENTRY(pxa_cpu_suspend) | 34 | ENTRY(pxa_cpu_suspend) |
30 | 35 | ||
36 | #ifndef CONFIG_IWMMXT | ||
31 | mra r2, r3, acc0 | 37 | mra r2, r3, acc0 |
38 | #endif | ||
32 | stmfd sp!, {r2 - r12, lr} @ save registers on stack | 39 | stmfd sp!, {r2 - r12, lr} @ save registers on stack |
33 | 40 | ||
34 | @ get coprocessor registers | 41 | @ get coprocessor registers |
@@ -61,14 +68,23 @@ ENTRY(pxa_cpu_suspend) | |||
61 | @ prepare value for sleep mode | 68 | @ prepare value for sleep mode |
62 | mov r1, #3 @ sleep mode | 69 | mov r1, #3 @ sleep mode |
63 | 70 | ||
64 | @ prepare to put SDRAM into self-refresh manually | 71 | @ prepare pointer to physical address 0 (virtual mapping in generic.c) |
72 | mov r2, #UNCACHED_PHYS_0 | ||
73 | |||
74 | @ prepare SDRAM refresh settings | ||
65 | ldr r4, =MDREFR | 75 | ldr r4, =MDREFR |
66 | ldr r5, [r4] | 76 | ldr r5, [r4] |
77 | |||
78 | @ enable SDRAM self-refresh mode | ||
67 | orr r5, r5, #MDREFR_SLFRSH | 79 | orr r5, r5, #MDREFR_SLFRSH |
68 | 80 | ||
69 | @ prepare pointer to physical address 0 (virtual mapping in generic.c) | 81 | #ifdef CONFIG_PXA27x |
70 | mov r2, #UNCACHED_PHYS_0 | 82 | @ set SDCLKx divide-by-2 bits (this is part of a workaround for Errata 50) |
83 | ldr r6, =MDREFR_KDIV | ||
84 | orr r5, r5, r6 | ||
85 | #endif | ||
71 | 86 | ||
87 | #ifdef CONFIG_PXA25x | ||
72 | @ Intel PXA255 Specification Update notes problems | 88 | @ Intel PXA255 Specification Update notes problems |
73 | @ about suspending with PXBus operating above 133MHz | 89 | @ about suspending with PXBus operating above 133MHz |
74 | @ (see Errata 31, GPIO output signals, ... unpredictable in sleep | 90 | @ (see Errata 31, GPIO output signals, ... unpredictable in sleep |
@@ -100,6 +116,18 @@ ENTRY(pxa_cpu_suspend) | |||
100 | mov r0, #0 | 116 | mov r0, #0 |
101 | mcr p14, 0, r0, c6, c0, 0 | 117 | mcr p14, 0, r0, c6, c0, 0 |
102 | orr r0, r0, #2 @ initiate change bit | 118 | orr r0, r0, #2 @ initiate change bit |
119 | #endif | ||
120 | #ifdef CONFIG_PXA27x | ||
121 | @ Intel PXA270 Specification Update notes problems sleeping | ||
122 | @ with core operating above 91 MHz | ||
123 | @ (see Errata 50, ...processor does not exit from sleep...) | ||
124 | |||
125 | ldr r6, =CCCR | ||
126 | ldr r8, [r6] @ keep original value for resume | ||
127 | |||
128 | ldr r7, =CCCR_SLEEP @ prepare CCCR sleep value | ||
129 | mov r0, #0x2 @ prepare value for CLKCFG | ||
130 | #endif | ||
103 | 131 | ||
104 | @ align execution to a cache line | 132 | @ align execution to a cache line |
105 | b 1f | 133 | b 1f |
@@ -111,6 +139,7 @@ ENTRY(pxa_cpu_suspend) | |||
111 | @ All needed values are now in registers. | 139 | @ All needed values are now in registers. |
112 | @ These last instructions should be in cache | 140 | @ These last instructions should be in cache |
113 | 141 | ||
142 | #if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x) | ||
114 | @ initiate the frequency change... | 143 | @ initiate the frequency change... |
115 | str r7, [r6] | 144 | str r7, [r6] |
116 | mcr p14, 0, r0, c6, c0, 0 | 145 | mcr p14, 0, r0, c6, c0, 0 |
@@ -118,14 +147,27 @@ ENTRY(pxa_cpu_suspend) | |||
118 | @ restore the original cpu speed value for resume | 147 | @ restore the original cpu speed value for resume |
119 | str r8, [r6] | 148 | str r8, [r6] |
120 | 149 | ||
121 | @ put SDRAM into self-refresh | 150 | @ need 6 13-MHz cycles before changing PWRMODE |
122 | str r5, [r4] | 151 | @ just set frequency to 91-MHz... 6*91/13 = 42 |
152 | |||
153 | mov r0, #42 | ||
154 | 10: subs r0, r0, #1 | ||
155 | bne 10b | ||
156 | #endif | ||
157 | |||
158 | @ Do not reorder... | ||
159 | @ Intel PXA270 Specification Update notes problems performing | ||
160 | @ external accesses after SDRAM is put in self-refresh mode | ||
161 | @ (see Errata 39 ...hangs when entering self-refresh mode) | ||
123 | 162 | ||
124 | @ force address lines low by reading at physical address 0 | 163 | @ force address lines low by reading at physical address 0 |
125 | ldr r3, [r2] | 164 | ldr r3, [r2] |
126 | 165 | ||
166 | @ put SDRAM into self-refresh | ||
167 | str r5, [r4] | ||
168 | |||
127 | @ enter sleep mode | 169 | @ enter sleep mode |
128 | mcr p14, 0, r1, c7, c0, 0 | 170 | mcr p14, 0, r1, c7, c0, 0 @ PWRMODE |
129 | 171 | ||
130 | 20: b 20b @ loop waiting for sleep | 172 | 20: b 20b @ loop waiting for sleep |
131 | 173 | ||
@@ -188,7 +230,9 @@ resume_after_mmu: | |||
188 | bl cpu_xscale_proc_init | 230 | bl cpu_xscale_proc_init |
189 | #endif | 231 | #endif |
190 | ldmfd sp!, {r2, r3} | 232 | ldmfd sp!, {r2, r3} |
233 | #ifndef CONFIG_IWMMXT | ||
191 | mar acc0, r2, r3 | 234 | mar acc0, r2, r3 |
235 | #endif | ||
192 | ldmfd sp!, {r4 - r12, pc} @ return to caller | 236 | ldmfd sp!, {r4 - r12, pc} @ return to caller |
193 | 237 | ||
194 | 238 | ||
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5b670c9ac5ef..c4fc6be629de 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -409,3 +409,24 @@ config CPU_BPREDICT_DISABLE | |||
409 | depends on CPU_ARM1020 | 409 | depends on CPU_ARM1020 |
410 | help | 410 | help |
411 | Say Y here to disable branch prediction. If unsure, say N. | 411 | Say Y here to disable branch prediction. If unsure, say N. |
412 | |||
413 | config TLS_REG_EMUL | ||
414 | bool | ||
415 | default y if (SMP || CPU_32v6) && (CPU_32v5 || CPU_32v4 || CPU_32v3) | ||
416 | help | ||
417 | We might be running on an ARMv6+ processor which should have the TLS | ||
418 | register but for some reason we can't use it, or maybe an SMP system | ||
419 | using a pre-ARMv6 processor (there are apparently a few prototypes | ||
420 | like that in existence) and therefore access to that register must | ||
421 | be emulated. | ||
422 | |||
423 | config HAS_TLS_REG | ||
424 | bool | ||
425 | depends on CPU_32v6 | ||
426 | default y if !TLS_REG_EMUL | ||
427 | help | ||
428 | This selects support for the CP15 thread register. | ||
429 | It is defined to be available on ARMv6 or later. If a particular | ||
430 | ARMv6 or later CPU doesn't support it then it must omc;ide "select | ||
431 | TLS_REG_EMUL" along with its other caracteristics. | ||
432 | |||
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 38b2cbb89beb..8f76f3df7b4c 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/linkage.h> | 1 | #include <linux/linkage.h> |
2 | #include <asm/assembler.h> | 2 | #include <asm/assembler.h> |
3 | #include "abort-macro.S" | ||
3 | /* | 4 | /* |
4 | * Function: v6_early_abort | 5 | * Function: v6_early_abort |
5 | * | 6 | * |
@@ -13,11 +14,26 @@ | |||
13 | * : sp = pointer to registers | 14 | * : sp = pointer to registers |
14 | * | 15 | * |
15 | * Purpose : obtain information about current aborted instruction. | 16 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | ||
18 | * abort here if the I-TLB and D-TLB aren't seeing the same | ||
19 | * picture. Unfortunately, this does happen. We live with it. | ||
16 | */ | 20 | */ |
17 | .align 5 | 21 | .align 5 |
18 | ENTRY(v6_early_abort) | 22 | ENTRY(v6_early_abort) |
19 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
20 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
25 | /* | ||
26 | * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR. | ||
27 | * The test below covers all the write situations, including Java bytecodes | ||
28 | */ | ||
29 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR | ||
30 | tst r3, #PSR_J_BIT @ Java? | ||
31 | movne pc, lr | ||
32 | do_thumb_abort | ||
33 | ldreq r3, [r2] @ read aborted ARM instruction | ||
34 | do_ldrd_abort | ||
35 | tst r3, #1 << 20 @ L = 0 -> write | ||
36 | orreq r1, r1, #1 << 11 @ yes. | ||
21 | mov pc, lr | 37 | mov pc, lr |
22 | 38 | ||
23 | 39 | ||
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c index f5a87db8b498..585dfb8e20b9 100644 --- a/arch/arm/mm/mm-armv.c +++ b/arch/arm/mm/mm-armv.c | |||
@@ -411,9 +411,10 @@ static void __init build_mem_type_table(void) | |||
411 | mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; | 411 | mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; |
412 | mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; | 412 | mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; |
413 | /* | 413 | /* |
414 | * Mark cache clean areas read only from SVC mode | 414 | * Mark cache clean areas and XIP ROM read only |
415 | * and no access from userspace. | 415 | * from SVC mode and no access from userspace. |
416 | */ | 416 | */ |
417 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | ||
417 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 418 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
418 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 419 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
419 | } | 420 | } |
diff --git a/arch/arm26/Kconfig b/arch/arm26/Kconfig index 3955de5af4c0..6caed90661fc 100644 --- a/arch/arm26/Kconfig +++ b/arch/arm26/Kconfig | |||
@@ -89,6 +89,10 @@ config PAGESIZE_16 | |||
89 | machine with 4MB of memory. | 89 | machine with 4MB of memory. |
90 | endmenu | 90 | endmenu |
91 | 91 | ||
92 | config ISA_DMA_API | ||
93 | bool | ||
94 | default y | ||
95 | |||
92 | menu "General setup" | 96 | menu "General setup" |
93 | 97 | ||
94 | # Compressed boot loader in ROM. Yes, we really want to ask about | 98 | # Compressed boot loader in ROM. Yes, we really want to ask about |
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c index 2a137146a77c..8a52124de0e1 100644 --- a/arch/arm26/kernel/ptrace.c +++ b/arch/arm26/kernel/ptrace.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/ptrace.h> | 18 | #include <linux/ptrace.h> |
19 | #include <linux/user.h> | 19 | #include <linux/user.h> |
20 | #include <linux/security.h> | 20 | #include <linux/security.h> |
21 | #include <linux/signal.h> | ||
21 | 22 | ||
22 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
23 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
@@ -591,7 +592,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat | |||
591 | case PTRACE_SYSCALL: | 592 | case PTRACE_SYSCALL: |
592 | case PTRACE_CONT: | 593 | case PTRACE_CONT: |
593 | ret = -EIO; | 594 | ret = -EIO; |
594 | if ((unsigned long) data > _NSIG) | 595 | if (!valid_signal(data)) |
595 | break; | 596 | break; |
596 | if (request == PTRACE_SYSCALL) | 597 | if (request == PTRACE_SYSCALL) |
597 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 598 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -626,7 +627,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat | |||
626 | */ | 627 | */ |
627 | case PTRACE_SINGLESTEP: | 628 | case PTRACE_SINGLESTEP: |
628 | ret = -EIO; | 629 | ret = -EIO; |
629 | if ((unsigned long) data > _NSIG) | 630 | if (!valid_signal(data)) |
630 | break; | 631 | break; |
631 | child->ptrace |= PT_SINGLESTEP; | 632 | child->ptrace |= PT_SINGLESTEP; |
632 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 633 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
diff --git a/arch/arm26/mm/small_page.c b/arch/arm26/mm/small_page.c index 77be86cca789..30447106c25f 100644 --- a/arch/arm26/mm/small_page.c +++ b/arch/arm26/mm/small_page.c | |||
@@ -92,8 +92,7 @@ static unsigned long __get_small_page(int priority, struct order *order) | |||
92 | page = list_entry(order->queue.next, struct page, lru); | 92 | page = list_entry(order->queue.next, struct page, lru); |
93 | again: | 93 | again: |
94 | #ifdef PEDANTIC | 94 | #ifdef PEDANTIC |
95 | if (USED_MAP(page) & ~order->all_used) | 95 | BUG_ON(USED_MAP(page) & ~order->all_used); |
96 | PAGE_BUG(page); | ||
97 | #endif | 96 | #endif |
98 | offset = ffz(USED_MAP(page)); | 97 | offset = ffz(USED_MAP(page)); |
99 | SET_USED(page, offset); | 98 | SET_USED(page, offset); |
@@ -141,8 +140,7 @@ static void __free_small_page(unsigned long spage, struct order *order) | |||
141 | goto non_small; | 140 | goto non_small; |
142 | 141 | ||
143 | #ifdef PEDANTIC | 142 | #ifdef PEDANTIC |
144 | if (USED_MAP(page) & ~order->all_used) | 143 | BUG_ON(USED_MAP(page) & ~order->all_used); |
145 | PAGE_BUG(page); | ||
146 | #endif | 144 | #endif |
147 | 145 | ||
148 | spage = spage >> order->shift; | 146 | spage = spage >> order->shift; |
diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c index da15db8ae482..581ecabaae53 100644 --- a/arch/cris/arch-v10/kernel/ptrace.c +++ b/arch/cris/arch-v10/kernel/ptrace.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/ptrace.h> | 11 | #include <linux/ptrace.h> |
12 | #include <linux/user.h> | 12 | #include <linux/user.h> |
13 | #include <linux/signal.h> | ||
13 | 14 | ||
14 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
15 | #include <asm/page.h> | 16 | #include <asm/page.h> |
@@ -184,7 +185,7 @@ sys_ptrace(long request, long pid, long addr, long data) | |||
184 | case PTRACE_CONT: | 185 | case PTRACE_CONT: |
185 | ret = -EIO; | 186 | ret = -EIO; |
186 | 187 | ||
187 | if ((unsigned long) data > _NSIG) | 188 | if (!valid_signal(data)) |
188 | break; | 189 | break; |
189 | 190 | ||
190 | if (request == PTRACE_SYSCALL) { | 191 | if (request == PTRACE_SYSCALL) { |
@@ -219,7 +220,7 @@ sys_ptrace(long request, long pid, long addr, long data) | |||
219 | case PTRACE_SINGLESTEP: | 220 | case PTRACE_SINGLESTEP: |
220 | ret = -EIO; | 221 | ret = -EIO; |
221 | 222 | ||
222 | if ((unsigned long) data > _NSIG) | 223 | if (!valid_signal(data)) |
223 | break; | 224 | break; |
224 | 225 | ||
225 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 226 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c index 2a0efb739adc..cbe03cba9f02 100644 --- a/arch/frv/kernel/ptrace.c +++ b/arch/frv/kernel/ptrace.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/user.h> | 20 | #include <linux/user.h> |
21 | #include <linux/config.h> | 21 | #include <linux/config.h> |
22 | #include <linux/security.h> | 22 | #include <linux/security.h> |
23 | #include <linux/signal.h> | ||
23 | 24 | ||
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
25 | #include <asm/page.h> | 26 | #include <asm/page.h> |
@@ -239,7 +240,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
239 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 240 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
240 | case PTRACE_CONT: /* restart after signal. */ | 241 | case PTRACE_CONT: /* restart after signal. */ |
241 | ret = -EIO; | 242 | ret = -EIO; |
242 | if ((unsigned long) data > _NSIG) | 243 | if (!valid_signal(data)) |
243 | break; | 244 | break; |
244 | if (request == PTRACE_SYSCALL) | 245 | if (request == PTRACE_SYSCALL) |
245 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 246 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -267,7 +268,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
267 | 268 | ||
268 | case PTRACE_SINGLESTEP: /* set the trap flag. */ | 269 | case PTRACE_SINGLESTEP: /* set the trap flag. */ |
269 | ret = -EIO; | 270 | ret = -EIO; |
270 | if ((unsigned long) data > _NSIG) | 271 | if (!valid_signal(data)) |
271 | break; | 272 | break; |
272 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 273 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
273 | ptrace_enable(child); | 274 | ptrace_enable(child); |
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c index 5f19d774a288..05c15e869777 100644 --- a/arch/h8300/kernel/ptrace.c +++ b/arch/h8300/kernel/ptrace.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/ptrace.h> | 24 | #include <linux/ptrace.h> |
25 | #include <linux/user.h> | 25 | #include <linux/user.h> |
26 | #include <linux/config.h> | 26 | #include <linux/config.h> |
27 | #include <linux/signal.h> | ||
27 | 28 | ||
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/page.h> | 30 | #include <asm/page.h> |
@@ -171,7 +172,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
171 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 172 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
172 | case PTRACE_CONT: { /* restart after signal. */ | 173 | case PTRACE_CONT: { /* restart after signal. */ |
173 | ret = -EIO; | 174 | ret = -EIO; |
174 | if ((unsigned long) data >= _NSIG) | 175 | if (!valid_signal(data)) |
175 | break ; | 176 | break ; |
176 | if (request == PTRACE_SYSCALL) | 177 | if (request == PTRACE_SYSCALL) |
177 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 178 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -202,7 +203,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
202 | 203 | ||
203 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ | 204 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ |
204 | ret = -EIO; | 205 | ret = -EIO; |
205 | if ((unsigned long) data > _NSIG) | 206 | if (!valid_signal(data)) |
206 | break; | 207 | break; |
207 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 208 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
208 | child->exit_code = data; | 209 | child->exit_code = data; |
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 17a0cbce6f30..e382f32d435e 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -183,7 +183,7 @@ config M386 | |||
183 | - "Winchip-C6" for original IDT Winchip. | 183 | - "Winchip-C6" for original IDT Winchip. |
184 | - "Winchip-2" for IDT Winchip 2. | 184 | - "Winchip-2" for IDT Winchip 2. |
185 | - "Winchip-2A" for IDT Winchips with 3dNow! capabilities. | 185 | - "Winchip-2A" for IDT Winchips with 3dNow! capabilities. |
186 | - "MediaGX/Geode" for Cyrix MediaGX aka Geode. | 186 | - "GeodeGX1" for Geode GX1 (Cyrix MediaGX). |
187 | - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3. | 187 | - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3. |
188 | - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above). | 188 | - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above). |
189 | 189 | ||
@@ -311,12 +311,10 @@ config MWINCHIP3D | |||
311 | stores for this CPU, which can increase performance of some | 311 | stores for this CPU, which can increase performance of some |
312 | operations. | 312 | operations. |
313 | 313 | ||
314 | config MGEODE | 314 | config MGEODEGX1 |
315 | bool "MediaGX/Geode" | 315 | bool "GeodeGX1" |
316 | help | 316 | help |
317 | Select this for a Cyrix MediaGX aka Geode chip. Linux and GCC | 317 | Select this for a Geode GX1 (Cyrix MediaGX) chip. |
318 | treat this chip as a 586TSC with some extended instructions | ||
319 | and alignment reqirements. | ||
320 | 318 | ||
321 | config MCYRIXIII | 319 | config MCYRIXIII |
322 | bool "CyrixIII/VIA-C3" | 320 | bool "CyrixIII/VIA-C3" |
@@ -368,7 +366,7 @@ config X86_L1_CACHE_SHIFT | |||
368 | int | 366 | int |
369 | default "7" if MPENTIUM4 || X86_GENERIC | 367 | default "7" if MPENTIUM4 || X86_GENERIC |
370 | default "4" if X86_ELAN || M486 || M386 | 368 | default "4" if X86_ELAN || M486 || M386 |
371 | default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE | 369 | default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1 |
372 | default "6" if MK7 || MK8 || MPENTIUMM | 370 | default "6" if MK7 || MK8 || MPENTIUMM |
373 | 371 | ||
374 | config RWSEM_GENERIC_SPINLOCK | 372 | config RWSEM_GENERIC_SPINLOCK |
@@ -387,7 +385,7 @@ config GENERIC_CALIBRATE_DELAY | |||
387 | 385 | ||
388 | config X86_PPRO_FENCE | 386 | config X86_PPRO_FENCE |
389 | bool | 387 | bool |
390 | depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODE | 388 | depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1 |
391 | default y | 389 | default y |
392 | 390 | ||
393 | config X86_F00F_BUG | 391 | config X86_F00F_BUG |
@@ -417,7 +415,7 @@ config X86_POPAD_OK | |||
417 | 415 | ||
418 | config X86_ALIGNMENT_16 | 416 | config X86_ALIGNMENT_16 |
419 | bool | 417 | bool |
420 | depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODE | 418 | depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 |
421 | default y | 419 | default y |
422 | 420 | ||
423 | config X86_GOOD_APIC | 421 | config X86_GOOD_APIC |
@@ -442,7 +440,7 @@ config X86_USE_3DNOW | |||
442 | 440 | ||
443 | config X86_OOSTORE | 441 | config X86_OOSTORE |
444 | bool | 442 | bool |
445 | depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MGEODE) && MTRR | 443 | depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR |
446 | default y | 444 | default y |
447 | 445 | ||
448 | config HPET_TIMER | 446 | config HPET_TIMER |
@@ -578,7 +576,7 @@ config X86_VISWS_APIC | |||
578 | 576 | ||
579 | config X86_TSC | 577 | config X86_TSC |
580 | bool | 578 | bool |
581 | depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODE) && !X86_NUMAQ | 579 | depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1) && !X86_NUMAQ |
582 | default y | 580 | default y |
583 | 581 | ||
584 | config X86_MCE | 582 | config X86_MCE |
@@ -653,6 +651,24 @@ config I8K | |||
653 | Say Y if you intend to run this kernel on a Dell Inspiron 8000. | 651 | Say Y if you intend to run this kernel on a Dell Inspiron 8000. |
654 | Say N otherwise. | 652 | Say N otherwise. |
655 | 653 | ||
654 | config X86_REBOOTFIXUPS | ||
655 | bool "Enable X86 board specific fixups for reboot" | ||
656 | depends on X86 | ||
657 | default n | ||
658 | ---help--- | ||
659 | This enables chipset and/or board specific fixups to be done | ||
660 | in order to get reboot to work correctly. This is only needed on | ||
661 | some combinations of hardware and BIOS. The symptom, for which | ||
662 | this config is intended, is when reboot ends with a stalled/hung | ||
663 | system. | ||
664 | |||
665 | Currently, the only fixup is for the Geode GX1/CS5530A/TROM2.1. | ||
666 | combination. | ||
667 | |||
668 | Say Y if you want to enable the fixup. Currently, it's safe to | ||
669 | enable this option even if you don't need it. | ||
670 | Say N otherwise. | ||
671 | |||
656 | config MICROCODE | 672 | config MICROCODE |
657 | tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support" | 673 | tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support" |
658 | ---help--- | 674 | ---help--- |
@@ -1155,6 +1171,10 @@ source "drivers/pci/pcie/Kconfig" | |||
1155 | 1171 | ||
1156 | source "drivers/pci/Kconfig" | 1172 | source "drivers/pci/Kconfig" |
1157 | 1173 | ||
1174 | config ISA_DMA_API | ||
1175 | bool | ||
1176 | default y | ||
1177 | |||
1158 | config ISA | 1178 | config ISA |
1159 | bool "ISA support" | 1179 | bool "ISA support" |
1160 | depends on !(X86_VOYAGER || X86_VISWS) | 1180 | depends on !(X86_VOYAGER || X86_VISWS) |
diff --git a/arch/i386/Makefile b/arch/i386/Makefile index 314c7146e9bf..1c36ca332a96 100644 --- a/arch/i386/Makefile +++ b/arch/i386/Makefile | |||
@@ -14,7 +14,7 @@ | |||
14 | # 19990713 Artur Skawina <skawina@geocities.com> | 14 | # 19990713 Artur Skawina <skawina@geocities.com> |
15 | # Added '-march' and '-mpreferred-stack-boundary' support | 15 | # Added '-march' and '-mpreferred-stack-boundary' support |
16 | # | 16 | # |
17 | # Kianusch Sayah Karadji <kianusch@sk-tech.net> | 17 | # 20050320 Kianusch Sayah Karadji <kianusch@sk-tech.net> |
18 | # Added support for GEODE CPU | 18 | # Added support for GEODE CPU |
19 | 19 | ||
20 | LDFLAGS := -m elf_i386 | 20 | LDFLAGS := -m elf_i386 |
@@ -54,8 +54,8 @@ cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) | |||
54 | # AMD Elan support | 54 | # AMD Elan support |
55 | cflags-$(CONFIG_X86_ELAN) += -march=i486 | 55 | cflags-$(CONFIG_X86_ELAN) += -march=i486 |
56 | 56 | ||
57 | # MediaGX aka Geode support | 57 | # Geode GX1 support |
58 | cflags-$(CONFIG_MGEODE) += $(call cc-option,-march=pentium-mmx,-march=i586) | 58 | cflags-$(CONFIG_MGEODEGX1) += $(call cc-option,-march=pentium-mmx,-march=i486) |
59 | 59 | ||
60 | # -mregparm=3 works ok on gcc-3.0 and later | 60 | # -mregparm=3 works ok on gcc-3.0 and later |
61 | # | 61 | # |
@@ -123,7 +123,7 @@ AFLAGS += $(mflags-y) | |||
123 | boot := arch/i386/boot | 123 | boot := arch/i386/boot |
124 | 124 | ||
125 | .PHONY: zImage bzImage compressed zlilo bzlilo \ | 125 | .PHONY: zImage bzImage compressed zlilo bzlilo \ |
126 | zdisk bzdisk fdimage fdimage144 fdimage288 install | 126 | zdisk bzdisk fdimage fdimage144 fdimage288 install kernel_install |
127 | 127 | ||
128 | all: bzImage | 128 | all: bzImage |
129 | 129 | ||
@@ -145,8 +145,9 @@ zdisk bzdisk: vmlinux | |||
145 | fdimage fdimage144 fdimage288: vmlinux | 145 | fdimage fdimage144 fdimage288: vmlinux |
146 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ | 146 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ |
147 | 147 | ||
148 | install: | 148 | install: vmlinux |
149 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ | 149 | install kernel_install: |
150 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install | ||
150 | 151 | ||
151 | prepare: include/asm-$(ARCH)/asm_offsets.h | 152 | prepare: include/asm-$(ARCH)/asm_offsets.h |
152 | CLEAN_FILES += include/asm-$(ARCH)/asm_offsets.h | 153 | CLEAN_FILES += include/asm-$(ARCH)/asm_offsets.h |
diff --git a/arch/i386/boot/bootsect.S b/arch/i386/boot/bootsect.S index ba9fe14db6a9..011b7a4993d4 100644 --- a/arch/i386/boot/bootsect.S +++ b/arch/i386/boot/bootsect.S | |||
@@ -83,7 +83,7 @@ bugger_off_msg: | |||
83 | .ascii "\n" | 83 | .ascii "\n" |
84 | .ascii "Remove disk and press any key to reboot . . .\r\n" | 84 | .ascii "Remove disk and press any key to reboot . . .\r\n" |
85 | .byte 0 | 85 | .byte 0 |
86 | 86 | ||
87 | 87 | ||
88 | # Kernel attributes; used by setup | 88 | # Kernel attributes; used by setup |
89 | 89 | ||
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c index fa67045234a3..cedc55cc47de 100644 --- a/arch/i386/boot/compressed/misc.c +++ b/arch/i386/boot/compressed/misc.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | #include <linux/vmalloc.h> | 13 | #include <linux/vmalloc.h> |
14 | #include <linux/tty.h> | 14 | #include <linux/tty.h> |
15 | #include <video/edid.h> | ||
16 | #include <asm/io.h> | 15 | #include <asm/io.h> |
17 | 16 | ||
18 | /* | 17 | /* |
diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S index a934ab32bf8e..caa1fde6904e 100644 --- a/arch/i386/boot/setup.S +++ b/arch/i386/boot/setup.S | |||
@@ -164,7 +164,7 @@ ramdisk_max: .long (-__PAGE_OFFSET-(512 << 20)-1) & 0x7fffffff | |||
164 | trampoline: call start_of_setup | 164 | trampoline: call start_of_setup |
165 | .align 16 | 165 | .align 16 |
166 | # The offset at this point is 0x240 | 166 | # The offset at this point is 0x240 |
167 | .space (0x7ff-0x240+1) # E820 & EDD space (ending at 0x7ff) | 167 | .space (0xeff-0x240+1) # E820 & EDD space (ending at 0xeff) |
168 | # End of setup header ##################################################### | 168 | # End of setup header ##################################################### |
169 | 169 | ||
170 | start_of_setup: | 170 | start_of_setup: |
@@ -333,9 +333,9 @@ jmpe820: | |||
333 | # sizeof(e820rec). | 333 | # sizeof(e820rec). |
334 | # | 334 | # |
335 | good820: | 335 | good820: |
336 | movb (E820NR), %al # up to 32 entries | 336 | movb (E820NR), %al # up to 128 entries |
337 | cmpb $E820MAX, %al | 337 | cmpb $E820MAX, %al |
338 | jnl bail820 | 338 | jae bail820 |
339 | 339 | ||
340 | incb (E820NR) | 340 | incb (E820NR) |
341 | movw %di, %ax | 341 | movw %di, %ax |
diff --git a/arch/i386/boot/video.S b/arch/i386/boot/video.S index 925d3f5a3824..0587477c99f2 100644 --- a/arch/i386/boot/video.S +++ b/arch/i386/boot/video.S | |||
@@ -1924,36 +1924,36 @@ skip10: movb %ah, %al | |||
1924 | ret | 1924 | ret |
1925 | 1925 | ||
1926 | store_edid: | 1926 | store_edid: |
1927 | pushw %es # just save all registers | 1927 | pushw %es # just save all registers |
1928 | pushw %ax | 1928 | pushw %ax |
1929 | pushw %bx | 1929 | pushw %bx |
1930 | pushw %cx | 1930 | pushw %cx |
1931 | pushw %dx | 1931 | pushw %dx |
1932 | pushw %di | 1932 | pushw %di |
1933 | 1933 | ||
1934 | pushw %fs | 1934 | pushw %fs |
1935 | popw %es | 1935 | popw %es |
1936 | 1936 | ||
1937 | movl $0x13131313, %eax # memset block with 0x13 | 1937 | movl $0x13131313, %eax # memset block with 0x13 |
1938 | movw $32, %cx | 1938 | movw $32, %cx |
1939 | movw $0x140, %di | 1939 | movw $0x140, %di |
1940 | cld | 1940 | cld |
1941 | rep | 1941 | rep |
1942 | stosl | 1942 | stosl |
1943 | 1943 | ||
1944 | movw $0x4f15, %ax # do VBE/DDC | 1944 | movw $0x4f15, %ax # do VBE/DDC |
1945 | movw $0x01, %bx | 1945 | movw $0x01, %bx |
1946 | movw $0x00, %cx | 1946 | movw $0x00, %cx |
1947 | movw $0x01, %dx | 1947 | movw $0x01, %dx |
1948 | movw $0x140, %di | 1948 | movw $0x140, %di |
1949 | int $0x10 | 1949 | int $0x10 |
1950 | 1950 | ||
1951 | popw %di # restore all registers | 1951 | popw %di # restore all registers |
1952 | popw %dx | 1952 | popw %dx |
1953 | popw %cx | 1953 | popw %cx |
1954 | popw %bx | 1954 | popw %bx |
1955 | popw %ax | 1955 | popw %ax |
1956 | popw %es | 1956 | popw %es |
1957 | ret | 1957 | ret |
1958 | 1958 | ||
1959 | # VIDEO_SELECT-only variables | 1959 | # VIDEO_SELECT-only variables |
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index aacdae6f372d..0fbcfe00dd8d 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
@@ -23,6 +23,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o | |||
23 | obj-$(CONFIG_X86_MPPARSE) += mpparse.o | 23 | obj-$(CONFIG_X86_MPPARSE) += mpparse.o |
24 | obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o | 24 | obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o |
25 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o | 25 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o |
26 | obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups.o | ||
26 | obj-$(CONFIG_X86_NUMAQ) += numaq.o | 27 | obj-$(CONFIG_X86_NUMAQ) += numaq.o |
27 | obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o | 28 | obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o |
28 | obj-$(CONFIG_KPROBES) += kprobes.o | 29 | obj-$(CONFIG_KPROBES) += kprobes.o |
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 53eb5cfd5b63..848bb97af7ca 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c | |||
@@ -650,7 +650,7 @@ acpi_find_rsdp (void) | |||
650 | */ | 650 | */ |
651 | rsdp_phys = acpi_scan_rsdp (0, 0x400); | 651 | rsdp_phys = acpi_scan_rsdp (0, 0x400); |
652 | if (!rsdp_phys) | 652 | if (!rsdp_phys) |
653 | rsdp_phys = acpi_scan_rsdp (0xE0000, 0xFFFFF); | 653 | rsdp_phys = acpi_scan_rsdp (0xE0000, 0x20000); |
654 | 654 | ||
655 | return rsdp_phys; | 655 | return rsdp_phys; |
656 | } | 656 | } |
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index e3879f7625c2..d509836b70c3 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -1265,8 +1265,6 @@ int __init APIC_init_uniprocessor (void) | |||
1265 | 1265 | ||
1266 | setup_local_APIC(); | 1266 | setup_local_APIC(); |
1267 | 1267 | ||
1268 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
1269 | check_nmi_watchdog(); | ||
1270 | #ifdef CONFIG_X86_IO_APIC | 1268 | #ifdef CONFIG_X86_IO_APIC |
1271 | if (smp_found_config) | 1269 | if (smp_found_config) |
1272 | if (!skip_ioapic_setup && nr_ioapics) | 1270 | if (!skip_ioapic_setup && nr_ioapics) |
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 8d182e875cd7..16dbc4151be4 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c | |||
@@ -24,7 +24,7 @@ __asm__(".align 4\nvide: ret"); | |||
24 | 24 | ||
25 | static void __init init_amd(struct cpuinfo_x86 *c) | 25 | static void __init init_amd(struct cpuinfo_x86 *c) |
26 | { | 26 | { |
27 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_X86_SMP |
28 | int cpu = c == &boot_cpu_data ? 0 : c - cpu_data; | 28 | int cpu = c == &boot_cpu_data ? 0 : c - cpu_data; |
29 | #endif | 29 | #endif |
30 | u32 l, h; | 30 | u32 l, h; |
@@ -198,7 +198,7 @@ static void __init init_amd(struct cpuinfo_x86 *c) | |||
198 | c->x86_num_cores = 1; | 198 | c->x86_num_cores = 1; |
199 | } | 199 | } |
200 | 200 | ||
201 | #ifdef CONFIG_SMP | 201 | #ifdef CONFIG_X86_SMP |
202 | /* | 202 | /* |
203 | * On a AMD dual core setup the lower bits of the APIC id | 203 | * On a AMD dual core setup the lower bits of the APIC id |
204 | * distingush the cores. Assumes number of cores is a power | 204 | * distingush the cores. Assumes number of cores is a power |
diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c index 933b0dd62f48..9027a987006b 100644 --- a/arch/i386/kernel/cpu/mtrr/cyrix.c +++ b/arch/i386/kernel/cpu/mtrr/cyrix.c | |||
@@ -218,12 +218,12 @@ typedef struct { | |||
218 | mtrr_type type; | 218 | mtrr_type type; |
219 | } arr_state_t; | 219 | } arr_state_t; |
220 | 220 | ||
221 | static arr_state_t arr_state[8] __initdata = { | 221 | static arr_state_t arr_state[8] __devinitdata = { |
222 | {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, | 222 | {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, |
223 | {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL} | 223 | {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL} |
224 | }; | 224 | }; |
225 | 225 | ||
226 | static unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 }; | 226 | static unsigned char ccr_state[7] __devinitdata = { 0, 0, 0, 0, 0, 0, 0 }; |
227 | 227 | ||
228 | static void cyrix_set_all(void) | 228 | static void cyrix_set_all(void) |
229 | { | 229 | { |
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c index 9f7a7ea6388d..f468a979e9aa 100644 --- a/arch/i386/kernel/cpu/mtrr/generic.c +++ b/arch/i386/kernel/cpu/mtrr/generic.c | |||
@@ -124,8 +124,8 @@ int generic_get_free_region(unsigned long base, unsigned long size) | |||
124 | return -ENOSPC; | 124 | return -ENOSPC; |
125 | } | 125 | } |
126 | 126 | ||
127 | void generic_get_mtrr(unsigned int reg, unsigned long *base, | 127 | static void generic_get_mtrr(unsigned int reg, unsigned long *base, |
128 | unsigned int *size, mtrr_type * type) | 128 | unsigned int *size, mtrr_type * type) |
129 | { | 129 | { |
130 | unsigned int mask_lo, mask_hi, base_lo, base_hi; | 130 | unsigned int mask_lo, mask_hi, base_lo, base_hi; |
131 | 131 | ||
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index 54999e4c55fd..e1c2042b9b7e 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c | |||
@@ -72,17 +72,21 @@ void set_mtrr_ops(struct mtrr_ops * ops) | |||
72 | static int have_wrcomb(void) | 72 | static int have_wrcomb(void) |
73 | { | 73 | { |
74 | struct pci_dev *dev; | 74 | struct pci_dev *dev; |
75 | u8 rev; | ||
75 | 76 | ||
76 | if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { | 77 | if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { |
77 | /* ServerWorks LE chipsets have problems with write-combining | 78 | /* ServerWorks LE chipsets < rev 6 have problems with write-combining |
78 | Don't allow it and leave room for other chipsets to be tagged */ | 79 | Don't allow it and leave room for other chipsets to be tagged */ |
79 | if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && | 80 | if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && |
80 | dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { | 81 | dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { |
81 | printk(KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n"); | 82 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); |
82 | pci_dev_put(dev); | 83 | if (rev <= 5) { |
83 | return 0; | 84 | printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); |
85 | pci_dev_put(dev); | ||
86 | return 0; | ||
87 | } | ||
84 | } | 88 | } |
85 | /* Intel 450NX errata # 23. Non ascending cachline evictions to | 89 | /* Intel 450NX errata # 23. Non ascending cacheline evictions to |
86 | write combining memory may resulting in data corruption */ | 90 | write combining memory may resulting in data corruption */ |
87 | if (dev->vendor == PCI_VENDOR_ID_INTEL && | 91 | if (dev->vendor == PCI_VENDOR_ID_INTEL && |
88 | dev->device == PCI_DEVICE_ID_INTEL_82451NX) { | 92 | dev->device == PCI_DEVICE_ID_INTEL_82451NX) { |
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index 4f28eba7fb8a..7323c19f354e 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c | |||
@@ -25,7 +25,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
25 | "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", | 25 | "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe", |
26 | 26 | ||
27 | /* AMD-defined */ | 27 | /* AMD-defined */ |
28 | "pni", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 28 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
29 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, | 29 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, |
30 | NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL, | 30 | NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL, |
31 | NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow", | 31 | NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow", |
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 3c73dc865ead..a991d4e5edd2 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
@@ -260,11 +260,9 @@ restore_nocheck: | |||
260 | .section .fixup,"ax" | 260 | .section .fixup,"ax" |
261 | iret_exc: | 261 | iret_exc: |
262 | sti | 262 | sti |
263 | movl $__USER_DS, %edx | 263 | pushl $0 # no error code |
264 | movl %edx, %ds | 264 | pushl $do_iret_error |
265 | movl %edx, %es | 265 | jmp error_code |
266 | movl $11,%eax | ||
267 | call do_exit | ||
268 | .previous | 266 | .previous |
269 | .section __ex_table,"a" | 267 | .section __ex_table,"a" |
270 | .align 4 | 268 | .align 4 |
@@ -516,8 +514,6 @@ debug_stack_correct: | |||
516 | xorl %edx,%edx # error code 0 | 514 | xorl %edx,%edx # error code 0 |
517 | movl %esp,%eax # pt_regs pointer | 515 | movl %esp,%eax # pt_regs pointer |
518 | call do_debug | 516 | call do_debug |
519 | testl %eax,%eax | ||
520 | jnz restore_all | ||
521 | jmp ret_from_exception | 517 | jmp ret_from_exception |
522 | 518 | ||
523 | /* | 519 | /* |
@@ -598,8 +594,6 @@ ENTRY(int3) | |||
598 | xorl %edx,%edx # zero error code | 594 | xorl %edx,%edx # zero error code |
599 | movl %esp,%eax # pt_regs pointer | 595 | movl %esp,%eax # pt_regs pointer |
600 | call do_int3 | 596 | call do_int3 |
601 | testl %eax,%eax | ||
602 | jnz restore_all | ||
603 | jmp ret_from_exception | 597 | jmp ret_from_exception |
604 | 598 | ||
605 | ENTRY(overflow) | 599 | ENTRY(overflow) |
@@ -658,296 +652,6 @@ ENTRY(spurious_interrupt_bug) | |||
658 | pushl $do_spurious_interrupt_bug | 652 | pushl $do_spurious_interrupt_bug |
659 | jmp error_code | 653 | jmp error_code |
660 | 654 | ||
661 | .data | 655 | #include "syscall_table.S" |
662 | ENTRY(sys_call_table) | ||
663 | .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ | ||
664 | .long sys_exit | ||
665 | .long sys_fork | ||
666 | .long sys_read | ||
667 | .long sys_write | ||
668 | .long sys_open /* 5 */ | ||
669 | .long sys_close | ||
670 | .long sys_waitpid | ||
671 | .long sys_creat | ||
672 | .long sys_link | ||
673 | .long sys_unlink /* 10 */ | ||
674 | .long sys_execve | ||
675 | .long sys_chdir | ||
676 | .long sys_time | ||
677 | .long sys_mknod | ||
678 | .long sys_chmod /* 15 */ | ||
679 | .long sys_lchown16 | ||
680 | .long sys_ni_syscall /* old break syscall holder */ | ||
681 | .long sys_stat | ||
682 | .long sys_lseek | ||
683 | .long sys_getpid /* 20 */ | ||
684 | .long sys_mount | ||
685 | .long sys_oldumount | ||
686 | .long sys_setuid16 | ||
687 | .long sys_getuid16 | ||
688 | .long sys_stime /* 25 */ | ||
689 | .long sys_ptrace | ||
690 | .long sys_alarm | ||
691 | .long sys_fstat | ||
692 | .long sys_pause | ||
693 | .long sys_utime /* 30 */ | ||
694 | .long sys_ni_syscall /* old stty syscall holder */ | ||
695 | .long sys_ni_syscall /* old gtty syscall holder */ | ||
696 | .long sys_access | ||
697 | .long sys_nice | ||
698 | .long sys_ni_syscall /* 35 - old ftime syscall holder */ | ||
699 | .long sys_sync | ||
700 | .long sys_kill | ||
701 | .long sys_rename | ||
702 | .long sys_mkdir | ||
703 | .long sys_rmdir /* 40 */ | ||
704 | .long sys_dup | ||
705 | .long sys_pipe | ||
706 | .long sys_times | ||
707 | .long sys_ni_syscall /* old prof syscall holder */ | ||
708 | .long sys_brk /* 45 */ | ||
709 | .long sys_setgid16 | ||
710 | .long sys_getgid16 | ||
711 | .long sys_signal | ||
712 | .long sys_geteuid16 | ||
713 | .long sys_getegid16 /* 50 */ | ||
714 | .long sys_acct | ||
715 | .long sys_umount /* recycled never used phys() */ | ||
716 | .long sys_ni_syscall /* old lock syscall holder */ | ||
717 | .long sys_ioctl | ||
718 | .long sys_fcntl /* 55 */ | ||
719 | .long sys_ni_syscall /* old mpx syscall holder */ | ||
720 | .long sys_setpgid | ||
721 | .long sys_ni_syscall /* old ulimit syscall holder */ | ||
722 | .long sys_olduname | ||
723 | .long sys_umask /* 60 */ | ||
724 | .long sys_chroot | ||
725 | .long sys_ustat | ||
726 | .long sys_dup2 | ||
727 | .long sys_getppid | ||
728 | .long sys_getpgrp /* 65 */ | ||
729 | .long sys_setsid | ||
730 | .long sys_sigaction | ||
731 | .long sys_sgetmask | ||
732 | .long sys_ssetmask | ||
733 | .long sys_setreuid16 /* 70 */ | ||
734 | .long sys_setregid16 | ||
735 | .long sys_sigsuspend | ||
736 | .long sys_sigpending | ||
737 | .long sys_sethostname | ||
738 | .long sys_setrlimit /* 75 */ | ||
739 | .long sys_old_getrlimit | ||
740 | .long sys_getrusage | ||
741 | .long sys_gettimeofday | ||
742 | .long sys_settimeofday | ||
743 | .long sys_getgroups16 /* 80 */ | ||
744 | .long sys_setgroups16 | ||
745 | .long old_select | ||
746 | .long sys_symlink | ||
747 | .long sys_lstat | ||
748 | .long sys_readlink /* 85 */ | ||
749 | .long sys_uselib | ||
750 | .long sys_swapon | ||
751 | .long sys_reboot | ||
752 | .long old_readdir | ||
753 | .long old_mmap /* 90 */ | ||
754 | .long sys_munmap | ||
755 | .long sys_truncate | ||
756 | .long sys_ftruncate | ||
757 | .long sys_fchmod | ||
758 | .long sys_fchown16 /* 95 */ | ||
759 | .long sys_getpriority | ||
760 | .long sys_setpriority | ||
761 | .long sys_ni_syscall /* old profil syscall holder */ | ||
762 | .long sys_statfs | ||
763 | .long sys_fstatfs /* 100 */ | ||
764 | .long sys_ioperm | ||
765 | .long sys_socketcall | ||
766 | .long sys_syslog | ||
767 | .long sys_setitimer | ||
768 | .long sys_getitimer /* 105 */ | ||
769 | .long sys_newstat | ||
770 | .long sys_newlstat | ||
771 | .long sys_newfstat | ||
772 | .long sys_uname | ||
773 | .long sys_iopl /* 110 */ | ||
774 | .long sys_vhangup | ||
775 | .long sys_ni_syscall /* old "idle" system call */ | ||
776 | .long sys_vm86old | ||
777 | .long sys_wait4 | ||
778 | .long sys_swapoff /* 115 */ | ||
779 | .long sys_sysinfo | ||
780 | .long sys_ipc | ||
781 | .long sys_fsync | ||
782 | .long sys_sigreturn | ||
783 | .long sys_clone /* 120 */ | ||
784 | .long sys_setdomainname | ||
785 | .long sys_newuname | ||
786 | .long sys_modify_ldt | ||
787 | .long sys_adjtimex | ||
788 | .long sys_mprotect /* 125 */ | ||
789 | .long sys_sigprocmask | ||
790 | .long sys_ni_syscall /* old "create_module" */ | ||
791 | .long sys_init_module | ||
792 | .long sys_delete_module | ||
793 | .long sys_ni_syscall /* 130: old "get_kernel_syms" */ | ||
794 | .long sys_quotactl | ||
795 | .long sys_getpgid | ||
796 | .long sys_fchdir | ||
797 | .long sys_bdflush | ||
798 | .long sys_sysfs /* 135 */ | ||
799 | .long sys_personality | ||
800 | .long sys_ni_syscall /* reserved for afs_syscall */ | ||
801 | .long sys_setfsuid16 | ||
802 | .long sys_setfsgid16 | ||
803 | .long sys_llseek /* 140 */ | ||
804 | .long sys_getdents | ||
805 | .long sys_select | ||
806 | .long sys_flock | ||
807 | .long sys_msync | ||
808 | .long sys_readv /* 145 */ | ||
809 | .long sys_writev | ||
810 | .long sys_getsid | ||
811 | .long sys_fdatasync | ||
812 | .long sys_sysctl | ||
813 | .long sys_mlock /* 150 */ | ||
814 | .long sys_munlock | ||
815 | .long sys_mlockall | ||
816 | .long sys_munlockall | ||
817 | .long sys_sched_setparam | ||
818 | .long sys_sched_getparam /* 155 */ | ||
819 | .long sys_sched_setscheduler | ||
820 | .long sys_sched_getscheduler | ||
821 | .long sys_sched_yield | ||
822 | .long sys_sched_get_priority_max | ||
823 | .long sys_sched_get_priority_min /* 160 */ | ||
824 | .long sys_sched_rr_get_interval | ||
825 | .long sys_nanosleep | ||
826 | .long sys_mremap | ||
827 | .long sys_setresuid16 | ||
828 | .long sys_getresuid16 /* 165 */ | ||
829 | .long sys_vm86 | ||
830 | .long sys_ni_syscall /* Old sys_query_module */ | ||
831 | .long sys_poll | ||
832 | .long sys_nfsservctl | ||
833 | .long sys_setresgid16 /* 170 */ | ||
834 | .long sys_getresgid16 | ||
835 | .long sys_prctl | ||
836 | .long sys_rt_sigreturn | ||
837 | .long sys_rt_sigaction | ||
838 | .long sys_rt_sigprocmask /* 175 */ | ||
839 | .long sys_rt_sigpending | ||
840 | .long sys_rt_sigtimedwait | ||
841 | .long sys_rt_sigqueueinfo | ||
842 | .long sys_rt_sigsuspend | ||
843 | .long sys_pread64 /* 180 */ | ||
844 | .long sys_pwrite64 | ||
845 | .long sys_chown16 | ||
846 | .long sys_getcwd | ||
847 | .long sys_capget | ||
848 | .long sys_capset /* 185 */ | ||
849 | .long sys_sigaltstack | ||
850 | .long sys_sendfile | ||
851 | .long sys_ni_syscall /* reserved for streams1 */ | ||
852 | .long sys_ni_syscall /* reserved for streams2 */ | ||
853 | .long sys_vfork /* 190 */ | ||
854 | .long sys_getrlimit | ||
855 | .long sys_mmap2 | ||
856 | .long sys_truncate64 | ||
857 | .long sys_ftruncate64 | ||
858 | .long sys_stat64 /* 195 */ | ||
859 | .long sys_lstat64 | ||
860 | .long sys_fstat64 | ||
861 | .long sys_lchown | ||
862 | .long sys_getuid | ||
863 | .long sys_getgid /* 200 */ | ||
864 | .long sys_geteuid | ||
865 | .long sys_getegid | ||
866 | .long sys_setreuid | ||
867 | .long sys_setregid | ||
868 | .long sys_getgroups /* 205 */ | ||
869 | .long sys_setgroups | ||
870 | .long sys_fchown | ||
871 | .long sys_setresuid | ||
872 | .long sys_getresuid | ||
873 | .long sys_setresgid /* 210 */ | ||
874 | .long sys_getresgid | ||
875 | .long sys_chown | ||
876 | .long sys_setuid | ||
877 | .long sys_setgid | ||
878 | .long sys_setfsuid /* 215 */ | ||
879 | .long sys_setfsgid | ||
880 | .long sys_pivot_root | ||
881 | .long sys_mincore | ||
882 | .long sys_madvise | ||
883 | .long sys_getdents64 /* 220 */ | ||
884 | .long sys_fcntl64 | ||
885 | .long sys_ni_syscall /* reserved for TUX */ | ||
886 | .long sys_ni_syscall | ||
887 | .long sys_gettid | ||
888 | .long sys_readahead /* 225 */ | ||
889 | .long sys_setxattr | ||
890 | .long sys_lsetxattr | ||
891 | .long sys_fsetxattr | ||
892 | .long sys_getxattr | ||
893 | .long sys_lgetxattr /* 230 */ | ||
894 | .long sys_fgetxattr | ||
895 | .long sys_listxattr | ||
896 | .long sys_llistxattr | ||
897 | .long sys_flistxattr | ||
898 | .long sys_removexattr /* 235 */ | ||
899 | .long sys_lremovexattr | ||
900 | .long sys_fremovexattr | ||
901 | .long sys_tkill | ||
902 | .long sys_sendfile64 | ||
903 | .long sys_futex /* 240 */ | ||
904 | .long sys_sched_setaffinity | ||
905 | .long sys_sched_getaffinity | ||
906 | .long sys_set_thread_area | ||
907 | .long sys_get_thread_area | ||
908 | .long sys_io_setup /* 245 */ | ||
909 | .long sys_io_destroy | ||
910 | .long sys_io_getevents | ||
911 | .long sys_io_submit | ||
912 | .long sys_io_cancel | ||
913 | .long sys_fadvise64 /* 250 */ | ||
914 | .long sys_ni_syscall | ||
915 | .long sys_exit_group | ||
916 | .long sys_lookup_dcookie | ||
917 | .long sys_epoll_create | ||
918 | .long sys_epoll_ctl /* 255 */ | ||
919 | .long sys_epoll_wait | ||
920 | .long sys_remap_file_pages | ||
921 | .long sys_set_tid_address | ||
922 | .long sys_timer_create | ||
923 | .long sys_timer_settime /* 260 */ | ||
924 | .long sys_timer_gettime | ||
925 | .long sys_timer_getoverrun | ||
926 | .long sys_timer_delete | ||
927 | .long sys_clock_settime | ||
928 | .long sys_clock_gettime /* 265 */ | ||
929 | .long sys_clock_getres | ||
930 | .long sys_clock_nanosleep | ||
931 | .long sys_statfs64 | ||
932 | .long sys_fstatfs64 | ||
933 | .long sys_tgkill /* 270 */ | ||
934 | .long sys_utimes | ||
935 | .long sys_fadvise64_64 | ||
936 | .long sys_ni_syscall /* sys_vserver */ | ||
937 | .long sys_mbind | ||
938 | .long sys_get_mempolicy | ||
939 | .long sys_set_mempolicy | ||
940 | .long sys_mq_open | ||
941 | .long sys_mq_unlink | ||
942 | .long sys_mq_timedsend | ||
943 | .long sys_mq_timedreceive /* 280 */ | ||
944 | .long sys_mq_notify | ||
945 | .long sys_mq_getsetattr | ||
946 | .long sys_ni_syscall /* reserved for kexec */ | ||
947 | .long sys_waitid | ||
948 | .long sys_ni_syscall /* 285 */ /* available */ | ||
949 | .long sys_add_key | ||
950 | .long sys_request_key | ||
951 | .long sys_keyctl | ||
952 | 656 | ||
953 | syscall_table_size=(.-sys_call_table) | 657 | syscall_table_size=(.-sys_call_table) |
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index d273fd746192..e966fc8c44c4 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S | |||
@@ -380,6 +380,7 @@ rp_sidt: | |||
380 | ALIGN | 380 | ALIGN |
381 | ignore_int: | 381 | ignore_int: |
382 | cld | 382 | cld |
383 | #ifdef CONFIG_PRINTK | ||
383 | pushl %eax | 384 | pushl %eax |
384 | pushl %ecx | 385 | pushl %ecx |
385 | pushl %edx | 386 | pushl %edx |
@@ -400,6 +401,7 @@ ignore_int: | |||
400 | popl %edx | 401 | popl %edx |
401 | popl %ecx | 402 | popl %ecx |
402 | popl %eax | 403 | popl %eax |
404 | #endif | ||
403 | iret | 405 | iret |
404 | 406 | ||
405 | /* | 407 | /* |
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c index 14ec354bec92..903190a4b3ff 100644 --- a/arch/i386/kernel/i386_ksyms.c +++ b/arch/i386/kernel/i386_ksyms.c | |||
@@ -169,10 +169,6 @@ EXPORT_SYMBOL(rtc_lock); | |||
169 | EXPORT_SYMBOL_GPL(set_nmi_callback); | 169 | EXPORT_SYMBOL_GPL(set_nmi_callback); |
170 | EXPORT_SYMBOL_GPL(unset_nmi_callback); | 170 | EXPORT_SYMBOL_GPL(unset_nmi_callback); |
171 | 171 | ||
172 | #undef memcmp | ||
173 | extern int memcmp(const void *,const void *,__kernel_size_t); | ||
174 | EXPORT_SYMBOL(memcmp); | ||
175 | |||
176 | EXPORT_SYMBOL(register_die_notifier); | 172 | EXPORT_SYMBOL(register_die_notifier); |
177 | #ifdef CONFIG_HAVE_DEC_LOCK | 173 | #ifdef CONFIG_HAVE_DEC_LOCK |
178 | EXPORT_SYMBOL(_atomic_dec_and_lock); | 174 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 5e0d55be5435..7a324e8b86f9 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c | |||
@@ -2175,7 +2175,6 @@ static inline void check_timer(void) | |||
2175 | disable_8259A_irq(0); | 2175 | disable_8259A_irq(0); |
2176 | setup_nmi(); | 2176 | setup_nmi(); |
2177 | enable_8259A_irq(0); | 2177 | enable_8259A_irq(0); |
2178 | check_nmi_watchdog(); | ||
2179 | } | 2178 | } |
2180 | return; | 2179 | return; |
2181 | } | 2180 | } |
@@ -2198,7 +2197,6 @@ static inline void check_timer(void) | |||
2198 | add_pin_to_irq(0, 0, pin2); | 2197 | add_pin_to_irq(0, 0, pin2); |
2199 | if (nmi_watchdog == NMI_IO_APIC) { | 2198 | if (nmi_watchdog == NMI_IO_APIC) { |
2200 | setup_nmi(); | 2199 | setup_nmi(); |
2201 | check_nmi_watchdog(); | ||
2202 | } | 2200 | } |
2203 | return; | 2201 | return; |
2204 | } | 2202 | } |
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 671681659243..59ff9b455069 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
@@ -217,6 +217,13 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
217 | *tos &= ~(TF_MASK | IF_MASK); | 217 | *tos &= ~(TF_MASK | IF_MASK); |
218 | *tos |= kprobe_old_eflags; | 218 | *tos |= kprobe_old_eflags; |
219 | break; | 219 | break; |
220 | case 0xc3: /* ret/lret */ | ||
221 | case 0xcb: | ||
222 | case 0xc2: | ||
223 | case 0xca: | ||
224 | regs->eflags &= ~TF_MASK; | ||
225 | /* eip is already adjusted, no more changes required*/ | ||
226 | return; | ||
220 | case 0xe8: /* call relative - Fix return addr */ | 227 | case 0xe8: /* call relative - Fix return addr */ |
221 | *tos = orig_eip + (*tos - copy_eip); | 228 | *tos = orig_eip + (*tos - copy_eip); |
222 | break; | 229 | break; |
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 2f89d000f954..2c0ee9c2d020 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -102,20 +102,21 @@ int nmi_active; | |||
102 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ | 102 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ |
103 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) | 103 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) |
104 | 104 | ||
105 | int __init check_nmi_watchdog (void) | 105 | static int __init check_nmi_watchdog(void) |
106 | { | 106 | { |
107 | unsigned int prev_nmi_count[NR_CPUS]; | 107 | unsigned int prev_nmi_count[NR_CPUS]; |
108 | int cpu; | 108 | int cpu; |
109 | 109 | ||
110 | printk(KERN_INFO "testing NMI watchdog ... "); | 110 | if (nmi_watchdog == NMI_NONE) |
111 | return 0; | ||
112 | |||
113 | printk(KERN_INFO "Testing NMI watchdog ... "); | ||
111 | 114 | ||
112 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 115 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
113 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; | 116 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; |
114 | local_irq_enable(); | 117 | local_irq_enable(); |
115 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | 118 | mdelay((10*1000)/nmi_hz); // wait 10 ticks |
116 | 119 | ||
117 | /* FIXME: Only boot CPU is online at this stage. Check CPUs | ||
118 | as they come up. */ | ||
119 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 120 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
120 | #ifdef CONFIG_SMP | 121 | #ifdef CONFIG_SMP |
121 | /* Check cpu_callin_map here because that is set | 122 | /* Check cpu_callin_map here because that is set |
@@ -139,6 +140,8 @@ int __init check_nmi_watchdog (void) | |||
139 | 140 | ||
140 | return 0; | 141 | return 0; |
141 | } | 142 | } |
143 | /* This needs to happen later in boot so counters are working */ | ||
144 | late_initcall(check_nmi_watchdog); | ||
142 | 145 | ||
143 | static int __init setup_nmi_watchdog(char *str) | 146 | static int __init setup_nmi_watchdog(char *str) |
144 | { | 147 | { |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index b2203e21acb3..96e3ea6b17c7 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -400,11 +400,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, | |||
400 | int err; | 400 | int err; |
401 | 401 | ||
402 | childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; | 402 | childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; |
403 | *childregs = *regs; | ||
404 | childregs->eax = 0; | ||
405 | childregs->esp = esp; | ||
406 | |||
407 | p->thread.esp = (unsigned long) childregs; | ||
408 | /* | 403 | /* |
409 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. | 404 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. |
410 | * This is necessary to guarantee that the entire "struct pt_regs" | 405 | * This is necessary to guarantee that the entire "struct pt_regs" |
@@ -415,7 +410,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, | |||
415 | * "struct pt_regs" is possible, but they may contain the | 410 | * "struct pt_regs" is possible, but they may contain the |
416 | * completely wrong values. | 411 | * completely wrong values. |
417 | */ | 412 | */ |
418 | p->thread.esp0 = (unsigned long) (childregs+1) - 8; | 413 | childregs = (struct pt_regs *) ((unsigned long) childregs - 8); |
414 | *childregs = *regs; | ||
415 | childregs->eax = 0; | ||
416 | childregs->esp = esp; | ||
417 | |||
418 | p->thread.esp = (unsigned long) childregs; | ||
419 | p->thread.esp0 = (unsigned long) (childregs+1); | ||
419 | 420 | ||
420 | p->thread.eip = (unsigned long) ret_from_fork; | 421 | p->thread.eip = (unsigned long) ret_from_fork; |
421 | 422 | ||
@@ -611,8 +612,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas | |||
611 | * Save away %fs and %gs. No need to save %es and %ds, as | 612 | * Save away %fs and %gs. No need to save %es and %ds, as |
612 | * those are always kernel segments while inside the kernel. | 613 | * those are always kernel segments while inside the kernel. |
613 | */ | 614 | */ |
614 | asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); | 615 | asm volatile("mov %%fs,%0":"=m" (prev->fs)); |
615 | asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); | 616 | asm volatile("mov %%gs,%0":"=m" (prev->gs)); |
616 | 617 | ||
617 | /* | 618 | /* |
618 | * Restore %fs and %gs if needed. | 619 | * Restore %fs and %gs if needed. |
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index b2f17640ceff..e34f651fa13c 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/security.h> | 16 | #include <linux/security.h> |
17 | #include <linux/audit.h> | 17 | #include <linux/audit.h> |
18 | #include <linux/seccomp.h> | 18 | #include <linux/seccomp.h> |
19 | #include <linux/signal.h> | ||
19 | 20 | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
@@ -511,7 +512,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
511 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 512 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
512 | case PTRACE_CONT: /* restart after signal. */ | 513 | case PTRACE_CONT: /* restart after signal. */ |
513 | ret = -EIO; | 514 | ret = -EIO; |
514 | if ((unsigned long) data > _NSIG) | 515 | if (!valid_signal(data)) |
515 | break; | 516 | break; |
516 | if (request == PTRACE_SYSCALL) { | 517 | if (request == PTRACE_SYSCALL) { |
517 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 518 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -543,7 +544,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
543 | 544 | ||
544 | case PTRACE_SINGLESTEP: /* set the trap flag. */ | 545 | case PTRACE_SINGLESTEP: /* set the trap flag. */ |
545 | ret = -EIO; | 546 | ret = -EIO; |
546 | if ((unsigned long) data > _NSIG) | 547 | if (!valid_signal(data)) |
547 | break; | 548 | break; |
548 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 549 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
549 | set_singlestep(child); | 550 | set_singlestep(child); |
@@ -682,24 +683,18 @@ void do_syscall_trace(struct pt_regs *regs, int entryexit) | |||
682 | /* do the secure computing check first */ | 683 | /* do the secure computing check first */ |
683 | secure_computing(regs->orig_eax); | 684 | secure_computing(regs->orig_eax); |
684 | 685 | ||
685 | if (unlikely(current->audit_context)) { | 686 | if (unlikely(current->audit_context) && entryexit) |
686 | if (!entryexit) | 687 | audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), regs->eax); |
687 | audit_syscall_entry(current, regs->orig_eax, | ||
688 | regs->ebx, regs->ecx, | ||
689 | regs->edx, regs->esi); | ||
690 | else | ||
691 | audit_syscall_exit(current, regs->eax); | ||
692 | } | ||
693 | 688 | ||
694 | if (!(current->ptrace & PT_PTRACED)) | 689 | if (!(current->ptrace & PT_PTRACED)) |
695 | return; | 690 | goto out; |
696 | 691 | ||
697 | /* Fake a debug trap */ | 692 | /* Fake a debug trap */ |
698 | if (test_thread_flag(TIF_SINGLESTEP)) | 693 | if (test_thread_flag(TIF_SINGLESTEP)) |
699 | send_sigtrap(current, regs, 0); | 694 | send_sigtrap(current, regs, 0); |
700 | 695 | ||
701 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 696 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) |
702 | return; | 697 | goto out; |
703 | 698 | ||
704 | /* the 0x80 provides a way for the tracing parent to distinguish | 699 | /* the 0x80 provides a way for the tracing parent to distinguish |
705 | between a syscall stop and SIGTRAP delivery */ | 700 | between a syscall stop and SIGTRAP delivery */ |
@@ -714,4 +709,9 @@ void do_syscall_trace(struct pt_regs *regs, int entryexit) | |||
714 | send_sig(current->exit_code, current, 1); | 709 | send_sig(current->exit_code, current, 1); |
715 | current->exit_code = 0; | 710 | current->exit_code = 0; |
716 | } | 711 | } |
712 | out: | ||
713 | if (unlikely(current->audit_context) && !entryexit) | ||
714 | audit_syscall_entry(current, AUDIT_ARCH_I386, regs->orig_eax, | ||
715 | regs->ebx, regs->ecx, regs->edx, regs->esi); | ||
716 | |||
717 | } | 717 | } |
diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c index 3d7e994563df..6dc27eb70ee7 100644 --- a/arch/i386/kernel/reboot.c +++ b/arch/i386/kernel/reboot.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
14 | #include <asm/apic.h> | 14 | #include <asm/apic.h> |
15 | #include "mach_reboot.h" | 15 | #include "mach_reboot.h" |
16 | #include <linux/reboot_fixups.h> | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * Power off function, if any | 19 | * Power off function, if any |
@@ -348,6 +349,7 @@ void machine_restart(char * __unused) | |||
348 | /* rebooting needs to touch the page at absolute addr 0 */ | 349 | /* rebooting needs to touch the page at absolute addr 0 */ |
349 | *((unsigned short *)__va(0x472)) = reboot_mode; | 350 | *((unsigned short *)__va(0x472)) = reboot_mode; |
350 | for (;;) { | 351 | for (;;) { |
352 | mach_reboot_fixups(); /* for board specific fixups */ | ||
351 | mach_reboot(); | 353 | mach_reboot(); |
352 | /* That didn't work - force a triple fault.. */ | 354 | /* That didn't work - force a triple fault.. */ |
353 | __asm__ __volatile__("lidt %0": :"m" (no_idt)); | 355 | __asm__ __volatile__("lidt %0": :"m" (no_idt)); |
diff --git a/arch/i386/kernel/reboot_fixups.c b/arch/i386/kernel/reboot_fixups.c new file mode 100644 index 000000000000..1b183b378c2c --- /dev/null +++ b/arch/i386/kernel/reboot_fixups.c | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * linux/arch/i386/kernel/reboot_fixups.c | ||
3 | * | ||
4 | * This is a good place to put board specific reboot fixups. | ||
5 | * | ||
6 | * List of supported fixups: | ||
7 | * geode-gx1/cs5530a - Jaya Kumar <jayalk@intworks.biz> | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <asm/delay.h> | ||
12 | #include <linux/pci.h> | ||
13 | |||
14 | static void cs5530a_warm_reset(struct pci_dev *dev) | ||
15 | { | ||
16 | /* writing 1 to the reset control register, 0x44 causes the | ||
17 | cs5530a to perform a system warm reset */ | ||
18 | pci_write_config_byte(dev, 0x44, 0x1); | ||
19 | udelay(50); /* shouldn't get here but be safe and spin-a-while */ | ||
20 | return; | ||
21 | } | ||
22 | |||
23 | struct device_fixup { | ||
24 | unsigned int vendor; | ||
25 | unsigned int device; | ||
26 | void (*reboot_fixup)(struct pci_dev *); | ||
27 | }; | ||
28 | |||
29 | static struct device_fixup fixups_table[] = { | ||
30 | { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset }, | ||
31 | }; | ||
32 | |||
33 | /* | ||
34 | * we see if any fixup is available for our current hardware. if there | ||
35 | * is a fixup, we call it and we expect to never return from it. if we | ||
36 | * do return, we keep looking and then eventually fall back to the | ||
37 | * standard mach_reboot on return. | ||
38 | */ | ||
39 | void mach_reboot_fixups(void) | ||
40 | { | ||
41 | struct device_fixup *cur; | ||
42 | struct pci_dev *dev; | ||
43 | int i; | ||
44 | |||
45 | for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) { | ||
46 | cur = &(fixups_table[i]); | ||
47 | dev = pci_get_device(cur->vendor, cur->device, 0); | ||
48 | if (!dev) | ||
49 | continue; | ||
50 | |||
51 | cur->reboot_fixup(dev); | ||
52 | } | ||
53 | |||
54 | printk(KERN_WARNING "No reboot fixup found for your hardware\n"); | ||
55 | } | ||
56 | |||
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index fd36d2f65f88..cbea7ac582e5 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -1089,9 +1089,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) | |||
1089 | } | 1089 | } |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
1093 | check_nmi_watchdog(); | ||
1094 | |||
1095 | smpboot_setup_io_apic(); | 1092 | smpboot_setup_io_apic(); |
1096 | 1093 | ||
1097 | setup_boot_APIC_clock(); | 1094 | setup_boot_APIC_clock(); |
diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S new file mode 100644 index 000000000000..6cd1ed311f02 --- /dev/null +++ b/arch/i386/kernel/syscall_table.S | |||
@@ -0,0 +1,291 @@ | |||
1 | .data | ||
2 | ENTRY(sys_call_table) | ||
3 | .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ | ||
4 | .long sys_exit | ||
5 | .long sys_fork | ||
6 | .long sys_read | ||
7 | .long sys_write | ||
8 | .long sys_open /* 5 */ | ||
9 | .long sys_close | ||
10 | .long sys_waitpid | ||
11 | .long sys_creat | ||
12 | .long sys_link | ||
13 | .long sys_unlink /* 10 */ | ||
14 | .long sys_execve | ||
15 | .long sys_chdir | ||
16 | .long sys_time | ||
17 | .long sys_mknod | ||
18 | .long sys_chmod /* 15 */ | ||
19 | .long sys_lchown16 | ||
20 | .long sys_ni_syscall /* old break syscall holder */ | ||
21 | .long sys_stat | ||
22 | .long sys_lseek | ||
23 | .long sys_getpid /* 20 */ | ||
24 | .long sys_mount | ||
25 | .long sys_oldumount | ||
26 | .long sys_setuid16 | ||
27 | .long sys_getuid16 | ||
28 | .long sys_stime /* 25 */ | ||
29 | .long sys_ptrace | ||
30 | .long sys_alarm | ||
31 | .long sys_fstat | ||
32 | .long sys_pause | ||
33 | .long sys_utime /* 30 */ | ||
34 | .long sys_ni_syscall /* old stty syscall holder */ | ||
35 | .long sys_ni_syscall /* old gtty syscall holder */ | ||
36 | .long sys_access | ||
37 | .long sys_nice | ||
38 | .long sys_ni_syscall /* 35 - old ftime syscall holder */ | ||
39 | .long sys_sync | ||
40 | .long sys_kill | ||
41 | .long sys_rename | ||
42 | .long sys_mkdir | ||
43 | .long sys_rmdir /* 40 */ | ||
44 | .long sys_dup | ||
45 | .long sys_pipe | ||
46 | .long sys_times | ||
47 | .long sys_ni_syscall /* old prof syscall holder */ | ||
48 | .long sys_brk /* 45 */ | ||
49 | .long sys_setgid16 | ||
50 | .long sys_getgid16 | ||
51 | .long sys_signal | ||
52 | .long sys_geteuid16 | ||
53 | .long sys_getegid16 /* 50 */ | ||
54 | .long sys_acct | ||
55 | .long sys_umount /* recycled never used phys() */ | ||
56 | .long sys_ni_syscall /* old lock syscall holder */ | ||
57 | .long sys_ioctl | ||
58 | .long sys_fcntl /* 55 */ | ||
59 | .long sys_ni_syscall /* old mpx syscall holder */ | ||
60 | .long sys_setpgid | ||
61 | .long sys_ni_syscall /* old ulimit syscall holder */ | ||
62 | .long sys_olduname | ||
63 | .long sys_umask /* 60 */ | ||
64 | .long sys_chroot | ||
65 | .long sys_ustat | ||
66 | .long sys_dup2 | ||
67 | .long sys_getppid | ||
68 | .long sys_getpgrp /* 65 */ | ||
69 | .long sys_setsid | ||
70 | .long sys_sigaction | ||
71 | .long sys_sgetmask | ||
72 | .long sys_ssetmask | ||
73 | .long sys_setreuid16 /* 70 */ | ||
74 | .long sys_setregid16 | ||
75 | .long sys_sigsuspend | ||
76 | .long sys_sigpending | ||
77 | .long sys_sethostname | ||
78 | .long sys_setrlimit /* 75 */ | ||
79 | .long sys_old_getrlimit | ||
80 | .long sys_getrusage | ||
81 | .long sys_gettimeofday | ||
82 | .long sys_settimeofday | ||
83 | .long sys_getgroups16 /* 80 */ | ||
84 | .long sys_setgroups16 | ||
85 | .long old_select | ||
86 | .long sys_symlink | ||
87 | .long sys_lstat | ||
88 | .long sys_readlink /* 85 */ | ||
89 | .long sys_uselib | ||
90 | .long sys_swapon | ||
91 | .long sys_reboot | ||
92 | .long old_readdir | ||
93 | .long old_mmap /* 90 */ | ||
94 | .long sys_munmap | ||
95 | .long sys_truncate | ||
96 | .long sys_ftruncate | ||
97 | .long sys_fchmod | ||
98 | .long sys_fchown16 /* 95 */ | ||
99 | .long sys_getpriority | ||
100 | .long sys_setpriority | ||
101 | .long sys_ni_syscall /* old profil syscall holder */ | ||
102 | .long sys_statfs | ||
103 | .long sys_fstatfs /* 100 */ | ||
104 | .long sys_ioperm | ||
105 | .long sys_socketcall | ||
106 | .long sys_syslog | ||
107 | .long sys_setitimer | ||
108 | .long sys_getitimer /* 105 */ | ||
109 | .long sys_newstat | ||
110 | .long sys_newlstat | ||
111 | .long sys_newfstat | ||
112 | .long sys_uname | ||
113 | .long sys_iopl /* 110 */ | ||
114 | .long sys_vhangup | ||
115 | .long sys_ni_syscall /* old "idle" system call */ | ||
116 | .long sys_vm86old | ||
117 | .long sys_wait4 | ||
118 | .long sys_swapoff /* 115 */ | ||
119 | .long sys_sysinfo | ||
120 | .long sys_ipc | ||
121 | .long sys_fsync | ||
122 | .long sys_sigreturn | ||
123 | .long sys_clone /* 120 */ | ||
124 | .long sys_setdomainname | ||
125 | .long sys_newuname | ||
126 | .long sys_modify_ldt | ||
127 | .long sys_adjtimex | ||
128 | .long sys_mprotect /* 125 */ | ||
129 | .long sys_sigprocmask | ||
130 | .long sys_ni_syscall /* old "create_module" */ | ||
131 | .long sys_init_module | ||
132 | .long sys_delete_module | ||
133 | .long sys_ni_syscall /* 130: old "get_kernel_syms" */ | ||
134 | .long sys_quotactl | ||
135 | .long sys_getpgid | ||
136 | .long sys_fchdir | ||
137 | .long sys_bdflush | ||
138 | .long sys_sysfs /* 135 */ | ||
139 | .long sys_personality | ||
140 | .long sys_ni_syscall /* reserved for afs_syscall */ | ||
141 | .long sys_setfsuid16 | ||
142 | .long sys_setfsgid16 | ||
143 | .long sys_llseek /* 140 */ | ||
144 | .long sys_getdents | ||
145 | .long sys_select | ||
146 | .long sys_flock | ||
147 | .long sys_msync | ||
148 | .long sys_readv /* 145 */ | ||
149 | .long sys_writev | ||
150 | .long sys_getsid | ||
151 | .long sys_fdatasync | ||
152 | .long sys_sysctl | ||
153 | .long sys_mlock /* 150 */ | ||
154 | .long sys_munlock | ||
155 | .long sys_mlockall | ||
156 | .long sys_munlockall | ||
157 | .long sys_sched_setparam | ||
158 | .long sys_sched_getparam /* 155 */ | ||
159 | .long sys_sched_setscheduler | ||
160 | .long sys_sched_getscheduler | ||
161 | .long sys_sched_yield | ||
162 | .long sys_sched_get_priority_max | ||
163 | .long sys_sched_get_priority_min /* 160 */ | ||
164 | .long sys_sched_rr_get_interval | ||
165 | .long sys_nanosleep | ||
166 | .long sys_mremap | ||
167 | .long sys_setresuid16 | ||
168 | .long sys_getresuid16 /* 165 */ | ||
169 | .long sys_vm86 | ||
170 | .long sys_ni_syscall /* Old sys_query_module */ | ||
171 | .long sys_poll | ||
172 | .long sys_nfsservctl | ||
173 | .long sys_setresgid16 /* 170 */ | ||
174 | .long sys_getresgid16 | ||
175 | .long sys_prctl | ||
176 | .long sys_rt_sigreturn | ||
177 | .long sys_rt_sigaction | ||
178 | .long sys_rt_sigprocmask /* 175 */ | ||
179 | .long sys_rt_sigpending | ||
180 | .long sys_rt_sigtimedwait | ||
181 | .long sys_rt_sigqueueinfo | ||
182 | .long sys_rt_sigsuspend | ||
183 | .long sys_pread64 /* 180 */ | ||
184 | .long sys_pwrite64 | ||
185 | .long sys_chown16 | ||
186 | .long sys_getcwd | ||
187 | .long sys_capget | ||
188 | .long sys_capset /* 185 */ | ||
189 | .long sys_sigaltstack | ||
190 | .long sys_sendfile | ||
191 | .long sys_ni_syscall /* reserved for streams1 */ | ||
192 | .long sys_ni_syscall /* reserved for streams2 */ | ||
193 | .long sys_vfork /* 190 */ | ||
194 | .long sys_getrlimit | ||
195 | .long sys_mmap2 | ||
196 | .long sys_truncate64 | ||
197 | .long sys_ftruncate64 | ||
198 | .long sys_stat64 /* 195 */ | ||
199 | .long sys_lstat64 | ||
200 | .long sys_fstat64 | ||
201 | .long sys_lchown | ||
202 | .long sys_getuid | ||
203 | .long sys_getgid /* 200 */ | ||
204 | .long sys_geteuid | ||
205 | .long sys_getegid | ||
206 | .long sys_setreuid | ||
207 | .long sys_setregid | ||
208 | .long sys_getgroups /* 205 */ | ||
209 | .long sys_setgroups | ||
210 | .long sys_fchown | ||
211 | .long sys_setresuid | ||
212 | .long sys_getresuid | ||
213 | .long sys_setresgid /* 210 */ | ||
214 | .long sys_getresgid | ||
215 | .long sys_chown | ||
216 | .long sys_setuid | ||
217 | .long sys_setgid | ||
218 | .long sys_setfsuid /* 215 */ | ||
219 | .long sys_setfsgid | ||
220 | .long sys_pivot_root | ||
221 | .long sys_mincore | ||
222 | .long sys_madvise | ||
223 | .long sys_getdents64 /* 220 */ | ||
224 | .long sys_fcntl64 | ||
225 | .long sys_ni_syscall /* reserved for TUX */ | ||
226 | .long sys_ni_syscall | ||
227 | .long sys_gettid | ||
228 | .long sys_readahead /* 225 */ | ||
229 | .long sys_setxattr | ||
230 | .long sys_lsetxattr | ||
231 | .long sys_fsetxattr | ||
232 | .long sys_getxattr | ||
233 | .long sys_lgetxattr /* 230 */ | ||
234 | .long sys_fgetxattr | ||
235 | .long sys_listxattr | ||
236 | .long sys_llistxattr | ||
237 | .long sys_flistxattr | ||
238 | .long sys_removexattr /* 235 */ | ||
239 | .long sys_lremovexattr | ||
240 | .long sys_fremovexattr | ||
241 | .long sys_tkill | ||
242 | .long sys_sendfile64 | ||
243 | .long sys_futex /* 240 */ | ||
244 | .long sys_sched_setaffinity | ||
245 | .long sys_sched_getaffinity | ||
246 | .long sys_set_thread_area | ||
247 | .long sys_get_thread_area | ||
248 | .long sys_io_setup /* 245 */ | ||
249 | .long sys_io_destroy | ||
250 | .long sys_io_getevents | ||
251 | .long sys_io_submit | ||
252 | .long sys_io_cancel | ||
253 | .long sys_fadvise64 /* 250 */ | ||
254 | .long sys_ni_syscall | ||
255 | .long sys_exit_group | ||
256 | .long sys_lookup_dcookie | ||
257 | .long sys_epoll_create | ||
258 | .long sys_epoll_ctl /* 255 */ | ||
259 | .long sys_epoll_wait | ||
260 | .long sys_remap_file_pages | ||
261 | .long sys_set_tid_address | ||
262 | .long sys_timer_create | ||
263 | .long sys_timer_settime /* 260 */ | ||
264 | .long sys_timer_gettime | ||
265 | .long sys_timer_getoverrun | ||
266 | .long sys_timer_delete | ||
267 | .long sys_clock_settime | ||
268 | .long sys_clock_gettime /* 265 */ | ||
269 | .long sys_clock_getres | ||
270 | .long sys_clock_nanosleep | ||
271 | .long sys_statfs64 | ||
272 | .long sys_fstatfs64 | ||
273 | .long sys_tgkill /* 270 */ | ||
274 | .long sys_utimes | ||
275 | .long sys_fadvise64_64 | ||
276 | .long sys_ni_syscall /* sys_vserver */ | ||
277 | .long sys_mbind | ||
278 | .long sys_get_mempolicy | ||
279 | .long sys_set_mempolicy | ||
280 | .long sys_mq_open | ||
281 | .long sys_mq_unlink | ||
282 | .long sys_mq_timedsend | ||
283 | .long sys_mq_timedreceive /* 280 */ | ||
284 | .long sys_mq_notify | ||
285 | .long sys_mq_getsetattr | ||
286 | .long sys_ni_syscall /* reserved for kexec */ | ||
287 | .long sys_waitid | ||
288 | .long sys_ni_syscall /* 285 */ /* available */ | ||
289 | .long sys_add_key | ||
290 | .long sys_request_key | ||
291 | .long sys_keyctl | ||
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 4d75b373f90e..a0dcb7c87c30 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
@@ -441,7 +441,7 @@ static void __init hpet_time_init(void) | |||
441 | set_normalized_timespec(&wall_to_monotonic, | 441 | set_normalized_timespec(&wall_to_monotonic, |
442 | -xtime.tv_sec, -xtime.tv_nsec); | 442 | -xtime.tv_sec, -xtime.tv_nsec); |
443 | 443 | ||
444 | if (hpet_enable() >= 0) { | 444 | if ((hpet_enable() >= 0) && hpet_use_timer) { |
445 | printk("Using HPET for base-timer\n"); | 445 | printk("Using HPET for base-timer\n"); |
446 | } | 446 | } |
447 | 447 | ||
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c index 244a31b04be7..10a0cbb88e75 100644 --- a/arch/i386/kernel/time_hpet.c +++ b/arch/i386/kernel/time_hpet.c | |||
@@ -26,6 +26,7 @@ | |||
26 | static unsigned long hpet_period; /* fsecs / HPET clock */ | 26 | static unsigned long hpet_period; /* fsecs / HPET clock */ |
27 | unsigned long hpet_tick; /* hpet clks count per tick */ | 27 | unsigned long hpet_tick; /* hpet clks count per tick */ |
28 | unsigned long hpet_address; /* hpet memory map physical address */ | 28 | unsigned long hpet_address; /* hpet memory map physical address */ |
29 | int hpet_use_timer; | ||
29 | 30 | ||
30 | static int use_hpet; /* can be used for runtime check of hpet */ | 31 | static int use_hpet; /* can be used for runtime check of hpet */ |
31 | static int boot_hpet_disable; /* boottime override for HPET timer */ | 32 | static int boot_hpet_disable; /* boottime override for HPET timer */ |
@@ -73,27 +74,30 @@ static int hpet_timer_stop_set_go(unsigned long tick) | |||
73 | hpet_writel(0, HPET_COUNTER); | 74 | hpet_writel(0, HPET_COUNTER); |
74 | hpet_writel(0, HPET_COUNTER + 4); | 75 | hpet_writel(0, HPET_COUNTER + 4); |
75 | 76 | ||
76 | /* | 77 | if (hpet_use_timer) { |
77 | * Set up timer 0, as periodic with first interrupt to happen at | 78 | /* |
78 | * hpet_tick, and period also hpet_tick. | 79 | * Set up timer 0, as periodic with first interrupt to happen at |
79 | */ | 80 | * hpet_tick, and period also hpet_tick. |
80 | cfg = hpet_readl(HPET_T0_CFG); | 81 | */ |
81 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | | 82 | cfg = hpet_readl(HPET_T0_CFG); |
82 | HPET_TN_SETVAL | HPET_TN_32BIT; | 83 | cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | |
83 | hpet_writel(cfg, HPET_T0_CFG); | 84 | HPET_TN_SETVAL | HPET_TN_32BIT; |
84 | 85 | hpet_writel(cfg, HPET_T0_CFG); | |
85 | /* | ||
86 | * The first write after writing TN_SETVAL to the config register sets | ||
87 | * the counter value, the second write sets the threshold. | ||
88 | */ | ||
89 | hpet_writel(tick, HPET_T0_CMP); | ||
90 | hpet_writel(tick, HPET_T0_CMP); | ||
91 | 86 | ||
87 | /* | ||
88 | * The first write after writing TN_SETVAL to the config register sets | ||
89 | * the counter value, the second write sets the threshold. | ||
90 | */ | ||
91 | hpet_writel(tick, HPET_T0_CMP); | ||
92 | hpet_writel(tick, HPET_T0_CMP); | ||
93 | } | ||
92 | /* | 94 | /* |
93 | * Go! | 95 | * Go! |
94 | */ | 96 | */ |
95 | cfg = hpet_readl(HPET_CFG); | 97 | cfg = hpet_readl(HPET_CFG); |
96 | cfg |= HPET_CFG_ENABLE | HPET_CFG_LEGACY; | 98 | if (hpet_use_timer) |
99 | cfg |= HPET_CFG_LEGACY; | ||
100 | cfg |= HPET_CFG_ENABLE; | ||
97 | hpet_writel(cfg, HPET_CFG); | 101 | hpet_writel(cfg, HPET_CFG); |
98 | 102 | ||
99 | return 0; | 103 | return 0; |
@@ -128,12 +132,11 @@ int __init hpet_enable(void) | |||
128 | * However, we can do with one timer otherwise using the | 132 | * However, we can do with one timer otherwise using the |
129 | * the single HPET timer for system time. | 133 | * the single HPET timer for system time. |
130 | */ | 134 | */ |
131 | if ( | ||
132 | #ifdef CONFIG_HPET_EMULATE_RTC | 135 | #ifdef CONFIG_HPET_EMULATE_RTC |
133 | !(id & HPET_ID_NUMBER) || | 136 | if (!(id & HPET_ID_NUMBER)) |
134 | #endif | ||
135 | !(id & HPET_ID_LEGSUP)) | ||
136 | return -1; | 137 | return -1; |
138 | #endif | ||
139 | |||
137 | 140 | ||
138 | hpet_period = hpet_readl(HPET_PERIOD); | 141 | hpet_period = hpet_readl(HPET_PERIOD); |
139 | if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD)) | 142 | if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD)) |
@@ -152,6 +155,8 @@ int __init hpet_enable(void) | |||
152 | if (hpet_tick_rem > (hpet_period >> 1)) | 155 | if (hpet_tick_rem > (hpet_period >> 1)) |
153 | hpet_tick++; /* rounding the result */ | 156 | hpet_tick++; /* rounding the result */ |
154 | 157 | ||
158 | hpet_use_timer = id & HPET_ID_LEGSUP; | ||
159 | |||
155 | if (hpet_timer_stop_set_go(hpet_tick)) | 160 | if (hpet_timer_stop_set_go(hpet_tick)) |
156 | return -1; | 161 | return -1; |
157 | 162 | ||
@@ -202,7 +207,8 @@ int __init hpet_enable(void) | |||
202 | #endif | 207 | #endif |
203 | 208 | ||
204 | #ifdef CONFIG_X86_LOCAL_APIC | 209 | #ifdef CONFIG_X86_LOCAL_APIC |
205 | wait_timer_tick = wait_hpet_tick; | 210 | if (hpet_use_timer) |
211 | wait_timer_tick = wait_hpet_tick; | ||
206 | #endif | 212 | #endif |
207 | return 0; | 213 | return 0; |
208 | } | 214 | } |
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c index 713134e71844..f778f471a09a 100644 --- a/arch/i386/kernel/timers/timer_hpet.c +++ b/arch/i386/kernel/timers/timer_hpet.c | |||
@@ -79,7 +79,7 @@ static unsigned long get_offset_hpet(void) | |||
79 | 79 | ||
80 | eax = hpet_readl(HPET_COUNTER); | 80 | eax = hpet_readl(HPET_COUNTER); |
81 | eax -= hpet_last; /* hpet delta */ | 81 | eax -= hpet_last; /* hpet delta */ |
82 | 82 | eax = min(hpet_tick, eax); | |
83 | /* | 83 | /* |
84 | * Time offset = (hpet delta) * ( usecs per HPET clock ) | 84 | * Time offset = (hpet delta) * ( usecs per HPET clock ) |
85 | * = (hpet delta) * ( usecs per tick / HPET clocks per tick) | 85 | * = (hpet delta) * ( usecs per tick / HPET clocks per tick) |
@@ -105,9 +105,12 @@ static void mark_offset_hpet(void) | |||
105 | last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low; | 105 | last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low; |
106 | rdtsc(last_tsc_low, last_tsc_high); | 106 | rdtsc(last_tsc_low, last_tsc_high); |
107 | 107 | ||
108 | offset = hpet_readl(HPET_T0_CMP) - hpet_tick; | 108 | if (hpet_use_timer) |
109 | if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) { | 109 | offset = hpet_readl(HPET_T0_CMP) - hpet_tick; |
110 | int lost_ticks = (offset - hpet_last) / hpet_tick; | 110 | else |
111 | offset = hpet_readl(HPET_COUNTER); | ||
112 | if (unlikely(((offset - hpet_last) >= (2*hpet_tick)) && (hpet_last != 0))) { | ||
113 | int lost_ticks = ((offset - hpet_last) / hpet_tick) - 1; | ||
111 | jiffies_64 += lost_ticks; | 114 | jiffies_64 += lost_ticks; |
112 | } | 115 | } |
113 | hpet_last = offset; | 116 | hpet_last = offset; |
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index a685994e5c8e..7926d967be00 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c | |||
@@ -477,7 +477,7 @@ static int __init init_tsc(char* override) | |||
477 | if (cpu_has_tsc) { | 477 | if (cpu_has_tsc) { |
478 | unsigned long tsc_quotient; | 478 | unsigned long tsc_quotient; |
479 | #ifdef CONFIG_HPET_TIMER | 479 | #ifdef CONFIG_HPET_TIMER |
480 | if (is_hpet_enabled()){ | 480 | if (is_hpet_enabled() && hpet_use_timer) { |
481 | unsigned long result, remain; | 481 | unsigned long result, remain; |
482 | printk("Using TSC for gettimeofday\n"); | 482 | printk("Using TSC for gettimeofday\n"); |
483 | tsc_quotient = calibrate_tsc_hpet(NULL); | 483 | tsc_quotient = calibrate_tsc_hpet(NULL); |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 6c0e383915b6..00c63419c06f 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -451,6 +451,7 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | |||
451 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | 451 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
452 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) | 452 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) |
453 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | 453 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) |
454 | DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0) | ||
454 | 455 | ||
455 | fastcall void do_general_protection(struct pt_regs * regs, long error_code) | 456 | fastcall void do_general_protection(struct pt_regs * regs, long error_code) |
456 | { | 457 | { |
@@ -642,16 +643,15 @@ void unset_nmi_callback(void) | |||
642 | } | 643 | } |
643 | 644 | ||
644 | #ifdef CONFIG_KPROBES | 645 | #ifdef CONFIG_KPROBES |
645 | fastcall int do_int3(struct pt_regs *regs, long error_code) | 646 | fastcall void do_int3(struct pt_regs *regs, long error_code) |
646 | { | 647 | { |
647 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) | 648 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) |
648 | == NOTIFY_STOP) | 649 | == NOTIFY_STOP) |
649 | return 1; | 650 | return; |
650 | /* This is an interrupt gate, because kprobes wants interrupts | 651 | /* This is an interrupt gate, because kprobes wants interrupts |
651 | disabled. Normal trap handlers don't. */ | 652 | disabled. Normal trap handlers don't. */ |
652 | restore_interrupts(regs); | 653 | restore_interrupts(regs); |
653 | do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL); | 654 | do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL); |
654 | return 0; | ||
655 | } | 655 | } |
656 | #endif | 656 | #endif |
657 | 657 | ||
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index 2f3d52dacff7..ec0f68ce6886 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c | |||
@@ -222,7 +222,7 @@ asmlinkage int sys_vm86(struct pt_regs regs) | |||
222 | goto out; | 222 | goto out; |
223 | case VM86_PLUS_INSTALL_CHECK: | 223 | case VM86_PLUS_INSTALL_CHECK: |
224 | /* NOTE: on old vm86 stuff this will return the error | 224 | /* NOTE: on old vm86 stuff this will return the error |
225 | from verify_area(), because the subfunction is | 225 | from access_ok(), because the subfunction is |
226 | interpreted as (invalid) address to vm86_struct. | 226 | interpreted as (invalid) address to vm86_struct. |
227 | So the installation check works. | 227 | So the installation check works. |
228 | */ | 228 | */ |
@@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk | |||
294 | */ | 294 | */ |
295 | info->regs32->eax = 0; | 295 | info->regs32->eax = 0; |
296 | tsk->thread.saved_esp0 = tsk->thread.esp0; | 296 | tsk->thread.saved_esp0 = tsk->thread.esp0; |
297 | asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs)); | 297 | asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs)); |
298 | asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs)); | 298 | asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs)); |
299 | 299 | ||
300 | tss = &per_cpu(init_tss, get_cpu()); | 300 | tss = &per_cpu(init_tss, get_cpu()); |
301 | tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; | 301 | tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; |
@@ -717,12 +717,12 @@ static irqreturn_t irq_handler(int intno, void *dev_id, struct pt_regs * regs) | |||
717 | irqbits |= irq_bit; | 717 | irqbits |= irq_bit; |
718 | if (vm86_irqs[intno].sig) | 718 | if (vm86_irqs[intno].sig) |
719 | send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); | 719 | send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); |
720 | spin_unlock_irqrestore(&irqbits_lock, flags); | ||
721 | /* | 720 | /* |
722 | * IRQ will be re-enabled when user asks for the irq (whether | 721 | * IRQ will be re-enabled when user asks for the irq (whether |
723 | * polling or as a result of the signal) | 722 | * polling or as a result of the signal) |
724 | */ | 723 | */ |
725 | disable_irq(intno); | 724 | disable_irq_nosync(intno); |
725 | spin_unlock_irqrestore(&irqbits_lock, flags); | ||
726 | return IRQ_HANDLED; | 726 | return IRQ_HANDLED; |
727 | 727 | ||
728 | out: | 728 | out: |
@@ -754,17 +754,20 @@ static inline int get_and_reset_irq(int irqnumber) | |||
754 | { | 754 | { |
755 | int bit; | 755 | int bit; |
756 | unsigned long flags; | 756 | unsigned long flags; |
757 | int ret = 0; | ||
757 | 758 | ||
758 | if (invalid_vm86_irq(irqnumber)) return 0; | 759 | if (invalid_vm86_irq(irqnumber)) return 0; |
759 | if (vm86_irqs[irqnumber].tsk != current) return 0; | 760 | if (vm86_irqs[irqnumber].tsk != current) return 0; |
760 | spin_lock_irqsave(&irqbits_lock, flags); | 761 | spin_lock_irqsave(&irqbits_lock, flags); |
761 | bit = irqbits & (1 << irqnumber); | 762 | bit = irqbits & (1 << irqnumber); |
762 | irqbits &= ~bit; | 763 | irqbits &= ~bit; |
764 | if (bit) { | ||
765 | enable_irq(irqnumber); | ||
766 | ret = 1; | ||
767 | } | ||
768 | |||
763 | spin_unlock_irqrestore(&irqbits_lock, flags); | 769 | spin_unlock_irqrestore(&irqbits_lock, flags); |
764 | if (!bit) | 770 | return ret; |
765 | return 0; | ||
766 | enable_irq(irqnumber); | ||
767 | return 1; | ||
768 | } | 771 | } |
769 | 772 | ||
770 | 773 | ||
diff --git a/arch/i386/oprofile/nmi_timer_int.c b/arch/i386/oprofile/nmi_timer_int.c index b2e462abf337..c58d0c14f274 100644 --- a/arch/i386/oprofile/nmi_timer_int.c +++ b/arch/i386/oprofile/nmi_timer_int.c | |||
@@ -36,7 +36,7 @@ static void timer_stop(void) | |||
36 | { | 36 | { |
37 | enable_timer_nmi_watchdog(); | 37 | enable_timer_nmi_watchdog(); |
38 | unset_nmi_callback(); | 38 | unset_nmi_callback(); |
39 | synchronize_kernel(); | 39 | synchronize_sched(); /* Allow already-started NMIs to complete. */ |
40 | } | 40 | } |
41 | 41 | ||
42 | 42 | ||
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c index e07589d04f64..d6598da4b67b 100644 --- a/arch/i386/pci/irq.c +++ b/arch/i386/pci/irq.c | |||
@@ -495,6 +495,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route | |||
495 | case PCI_DEVICE_ID_INTEL_ICH6_1: | 495 | case PCI_DEVICE_ID_INTEL_ICH6_1: |
496 | case PCI_DEVICE_ID_INTEL_ICH7_0: | 496 | case PCI_DEVICE_ID_INTEL_ICH7_0: |
497 | case PCI_DEVICE_ID_INTEL_ICH7_1: | 497 | case PCI_DEVICE_ID_INTEL_ICH7_1: |
498 | case PCI_DEVICE_ID_INTEL_ICH7_30: | ||
499 | case PCI_DEVICE_ID_INTEL_ICH7_31: | ||
498 | case PCI_DEVICE_ID_INTEL_ESB2_0: | 500 | case PCI_DEVICE_ID_INTEL_ESB2_0: |
499 | r->name = "PIIX/ICH"; | 501 | r->name = "PIIX/ICH"; |
500 | r->get = pirq_piix_get; | 502 | r->get = pirq_piix_get; |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 33fcb205fcb7..3ad2c4af099c 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -46,6 +46,10 @@ config GENERIC_IOMAP | |||
46 | bool | 46 | bool |
47 | default y | 47 | default y |
48 | 48 | ||
49 | config SCHED_NO_NO_OMIT_FRAME_POINTER | ||
50 | bool | ||
51 | default y | ||
52 | |||
49 | choice | 53 | choice |
50 | prompt "System type" | 54 | prompt "System type" |
51 | default IA64_GENERIC | 55 | default IA64_GENERIC |
@@ -217,6 +221,16 @@ config IA64_SGI_SN_SIM | |||
217 | If you are compiling a kernel that will run under SGI's IA-64 | 221 | If you are compiling a kernel that will run under SGI's IA-64 |
218 | simulator (Medusa) then say Y, otherwise say N. | 222 | simulator (Medusa) then say Y, otherwise say N. |
219 | 223 | ||
224 | config IA64_SGI_SN_XP | ||
225 | tristate "Support communication between SGI SSIs" | ||
226 | depends on MSPEC | ||
227 | help | ||
228 | An SGI machine can be divided into multiple Single System | ||
229 | Images which act independently of each other and have | ||
230 | hardware based memory protection from the others. Enabling | ||
231 | this feature will allow for direct communication between SSIs | ||
232 | based on a network adapter and DMA messaging. | ||
233 | |||
220 | config FORCE_MAX_ZONEORDER | 234 | config FORCE_MAX_ZONEORDER |
221 | int | 235 | int |
222 | default "18" | 236 | default "18" |
@@ -261,6 +275,15 @@ config HOTPLUG_CPU | |||
261 | can be controlled through /sys/devices/system/cpu/cpu#. | 275 | can be controlled through /sys/devices/system/cpu/cpu#. |
262 | Say N if you want to disable CPU hotplug. | 276 | Say N if you want to disable CPU hotplug. |
263 | 277 | ||
278 | config SCHED_SMT | ||
279 | bool "SMT scheduler support" | ||
280 | depends on SMP | ||
281 | default off | ||
282 | help | ||
283 | Improves the CPU scheduler's decision making when dealing with | ||
284 | Intel IA64 chips with MultiThreading at a cost of slightly increased | ||
285 | overhead in some places. If unsure say N here. | ||
286 | |||
264 | config PREEMPT | 287 | config PREEMPT |
265 | bool "Preemptible Kernel" | 288 | bool "Preemptible Kernel" |
266 | help | 289 | help |
@@ -329,7 +352,7 @@ menu "Power management and ACPI" | |||
329 | 352 | ||
330 | config PM | 353 | config PM |
331 | bool "Power Management support" | 354 | bool "Power Management support" |
332 | depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB | 355 | depends on !IA64_HP_SIM |
333 | default y | 356 | default y |
334 | help | 357 | help |
335 | "Power Management" means that parts of your computer are shut | 358 | "Power Management" means that parts of your computer are shut |
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig index bfeb952fe8e2..6ff7107fee4d 100644 --- a/arch/ia64/configs/sn2_defconfig +++ b/arch/ia64/configs/sn2_defconfig | |||
@@ -574,6 +574,8 @@ CONFIG_SERIAL_NONSTANDARD=y | |||
574 | # CONFIG_N_HDLC is not set | 574 | # CONFIG_N_HDLC is not set |
575 | # CONFIG_STALDRV is not set | 575 | # CONFIG_STALDRV is not set |
576 | CONFIG_SGI_SNSC=y | 576 | CONFIG_SGI_SNSC=y |
577 | CONFIG_SGI_TIOCX=y | ||
578 | CONFIG_SGI_MBCS=m | ||
577 | 579 | ||
578 | # | 580 | # |
579 | # Serial drivers | 581 | # Serial drivers |
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index 99830e8fc9ba..9086b789f6ac 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.11-rc2 | 3 | # Linux kernel version: 2.6.12-rc3 |
4 | # Sat Jan 22 11:17:02 2005 | 4 | # Tue May 3 15:55:04 2005 |
5 | # | 5 | # |
6 | 6 | ||
7 | # | 7 | # |
@@ -10,6 +10,7 @@ | |||
10 | CONFIG_EXPERIMENTAL=y | 10 | CONFIG_EXPERIMENTAL=y |
11 | CONFIG_CLEAN_COMPILE=y | 11 | CONFIG_CLEAN_COMPILE=y |
12 | CONFIG_LOCK_KERNEL=y | 12 | CONFIG_LOCK_KERNEL=y |
13 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
13 | 14 | ||
14 | # | 15 | # |
15 | # General setup | 16 | # General setup |
@@ -21,24 +22,27 @@ CONFIG_POSIX_MQUEUE=y | |||
21 | # CONFIG_BSD_PROCESS_ACCT is not set | 22 | # CONFIG_BSD_PROCESS_ACCT is not set |
22 | CONFIG_SYSCTL=y | 23 | CONFIG_SYSCTL=y |
23 | # CONFIG_AUDIT is not set | 24 | # CONFIG_AUDIT is not set |
24 | CONFIG_LOG_BUF_SHIFT=20 | ||
25 | CONFIG_HOTPLUG=y | 25 | CONFIG_HOTPLUG=y |
26 | CONFIG_KOBJECT_UEVENT=y | 26 | CONFIG_KOBJECT_UEVENT=y |
27 | CONFIG_IKCONFIG=y | 27 | CONFIG_IKCONFIG=y |
28 | CONFIG_IKCONFIG_PROC=y | 28 | CONFIG_IKCONFIG_PROC=y |
29 | # CONFIG_CPUSETS is not set | ||
29 | # CONFIG_EMBEDDED is not set | 30 | # CONFIG_EMBEDDED is not set |
30 | CONFIG_KALLSYMS=y | 31 | CONFIG_KALLSYMS=y |
31 | CONFIG_KALLSYMS_ALL=y | 32 | CONFIG_KALLSYMS_ALL=y |
32 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | 33 | # CONFIG_KALLSYMS_EXTRA_PASS is not set |
34 | CONFIG_PRINTK=y | ||
35 | CONFIG_BUG=y | ||
36 | CONFIG_BASE_FULL=y | ||
33 | CONFIG_FUTEX=y | 37 | CONFIG_FUTEX=y |
34 | CONFIG_EPOLL=y | 38 | CONFIG_EPOLL=y |
35 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
36 | CONFIG_SHMEM=y | 39 | CONFIG_SHMEM=y |
37 | CONFIG_CC_ALIGN_FUNCTIONS=0 | 40 | CONFIG_CC_ALIGN_FUNCTIONS=0 |
38 | CONFIG_CC_ALIGN_LABELS=0 | 41 | CONFIG_CC_ALIGN_LABELS=0 |
39 | CONFIG_CC_ALIGN_LOOPS=0 | 42 | CONFIG_CC_ALIGN_LOOPS=0 |
40 | CONFIG_CC_ALIGN_JUMPS=0 | 43 | CONFIG_CC_ALIGN_JUMPS=0 |
41 | # CONFIG_TINY_SHMEM is not set | 44 | # CONFIG_TINY_SHMEM is not set |
45 | CONFIG_BASE_SMALL=0 | ||
42 | 46 | ||
43 | # | 47 | # |
44 | # Loadable module support | 48 | # Loadable module support |
@@ -85,6 +89,7 @@ CONFIG_FORCE_MAX_ZONEORDER=18 | |||
85 | CONFIG_SMP=y | 89 | CONFIG_SMP=y |
86 | CONFIG_NR_CPUS=4 | 90 | CONFIG_NR_CPUS=4 |
87 | CONFIG_HOTPLUG_CPU=y | 91 | CONFIG_HOTPLUG_CPU=y |
92 | # CONFIG_SCHED_SMT is not set | ||
88 | # CONFIG_PREEMPT is not set | 93 | # CONFIG_PREEMPT is not set |
89 | CONFIG_HAVE_DEC_LOCK=y | 94 | CONFIG_HAVE_DEC_LOCK=y |
90 | CONFIG_IA32_SUPPORT=y | 95 | CONFIG_IA32_SUPPORT=y |
@@ -135,6 +140,7 @@ CONFIG_PCI_DOMAINS=y | |||
135 | # CONFIG_PCI_MSI is not set | 140 | # CONFIG_PCI_MSI is not set |
136 | CONFIG_PCI_LEGACY_PROC=y | 141 | CONFIG_PCI_LEGACY_PROC=y |
137 | CONFIG_PCI_NAMES=y | 142 | CONFIG_PCI_NAMES=y |
143 | # CONFIG_PCI_DEBUG is not set | ||
138 | 144 | ||
139 | # | 145 | # |
140 | # PCI Hotplug Support | 146 | # PCI Hotplug Support |
@@ -152,10 +158,6 @@ CONFIG_HOTPLUG_PCI_ACPI=m | |||
152 | # CONFIG_PCCARD is not set | 158 | # CONFIG_PCCARD is not set |
153 | 159 | ||
154 | # | 160 | # |
155 | # PC-card bridges | ||
156 | # | ||
157 | |||
158 | # | ||
159 | # Device Drivers | 161 | # Device Drivers |
160 | # | 162 | # |
161 | 163 | ||
@@ -195,9 +197,10 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m | |||
195 | CONFIG_BLK_DEV_NBD=m | 197 | CONFIG_BLK_DEV_NBD=m |
196 | # CONFIG_BLK_DEV_SX8 is not set | 198 | # CONFIG_BLK_DEV_SX8 is not set |
197 | # CONFIG_BLK_DEV_UB is not set | 199 | # CONFIG_BLK_DEV_UB is not set |
198 | CONFIG_BLK_DEV_RAM=m | 200 | CONFIG_BLK_DEV_RAM=y |
199 | CONFIG_BLK_DEV_RAM_COUNT=16 | 201 | CONFIG_BLK_DEV_RAM_COUNT=16 |
200 | CONFIG_BLK_DEV_RAM_SIZE=4096 | 202 | CONFIG_BLK_DEV_RAM_SIZE=4096 |
203 | CONFIG_BLK_DEV_INITRD=y | ||
201 | CONFIG_INITRAMFS_SOURCE="" | 204 | CONFIG_INITRAMFS_SOURCE="" |
202 | # CONFIG_CDROM_PKTCDVD is not set | 205 | # CONFIG_CDROM_PKTCDVD is not set |
203 | 206 | ||
@@ -313,7 +316,6 @@ CONFIG_SCSI_FC_ATTRS=y | |||
313 | # CONFIG_SCSI_BUSLOGIC is not set | 316 | # CONFIG_SCSI_BUSLOGIC is not set |
314 | # CONFIG_SCSI_DMX3191D is not set | 317 | # CONFIG_SCSI_DMX3191D is not set |
315 | # CONFIG_SCSI_EATA is not set | 318 | # CONFIG_SCSI_EATA is not set |
316 | # CONFIG_SCSI_EATA_PIO is not set | ||
317 | # CONFIG_SCSI_FUTURE_DOMAIN is not set | 319 | # CONFIG_SCSI_FUTURE_DOMAIN is not set |
318 | # CONFIG_SCSI_GDTH is not set | 320 | # CONFIG_SCSI_GDTH is not set |
319 | # CONFIG_SCSI_IPS is not set | 321 | # CONFIG_SCSI_IPS is not set |
@@ -325,7 +327,6 @@ CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 | |||
325 | CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 | 327 | CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 |
326 | # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set | 328 | # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set |
327 | # CONFIG_SCSI_IPR is not set | 329 | # CONFIG_SCSI_IPR is not set |
328 | # CONFIG_SCSI_QLOGIC_ISP is not set | ||
329 | CONFIG_SCSI_QLOGIC_FC=y | 330 | CONFIG_SCSI_QLOGIC_FC=y |
330 | # CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set | 331 | # CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set |
331 | CONFIG_SCSI_QLOGIC_1280=y | 332 | CONFIG_SCSI_QLOGIC_1280=y |
@@ -336,6 +337,7 @@ CONFIG_SCSI_QLA22XX=m | |||
336 | CONFIG_SCSI_QLA2300=m | 337 | CONFIG_SCSI_QLA2300=m |
337 | CONFIG_SCSI_QLA2322=m | 338 | CONFIG_SCSI_QLA2322=m |
338 | # CONFIG_SCSI_QLA6312 is not set | 339 | # CONFIG_SCSI_QLA6312 is not set |
340 | # CONFIG_SCSI_LPFC is not set | ||
339 | # CONFIG_SCSI_DC395x is not set | 341 | # CONFIG_SCSI_DC395x is not set |
340 | # CONFIG_SCSI_DC390T is not set | 342 | # CONFIG_SCSI_DC390T is not set |
341 | # CONFIG_SCSI_DEBUG is not set | 343 | # CONFIG_SCSI_DEBUG is not set |
@@ -358,6 +360,7 @@ CONFIG_DM_CRYPT=m | |||
358 | CONFIG_DM_SNAPSHOT=m | 360 | CONFIG_DM_SNAPSHOT=m |
359 | CONFIG_DM_MIRROR=m | 361 | CONFIG_DM_MIRROR=m |
360 | CONFIG_DM_ZERO=m | 362 | CONFIG_DM_ZERO=m |
363 | # CONFIG_DM_MULTIPATH is not set | ||
361 | 364 | ||
362 | # | 365 | # |
363 | # Fusion MPT device support | 366 | # Fusion MPT device support |
@@ -386,7 +389,6 @@ CONFIG_NET=y | |||
386 | # | 389 | # |
387 | CONFIG_PACKET=y | 390 | CONFIG_PACKET=y |
388 | # CONFIG_PACKET_MMAP is not set | 391 | # CONFIG_PACKET_MMAP is not set |
389 | CONFIG_NETLINK_DEV=y | ||
390 | CONFIG_UNIX=y | 392 | CONFIG_UNIX=y |
391 | # CONFIG_NET_KEY is not set | 393 | # CONFIG_NET_KEY is not set |
392 | CONFIG_INET=y | 394 | CONFIG_INET=y |
@@ -446,7 +448,6 @@ CONFIG_DUMMY=m | |||
446 | # CONFIG_BONDING is not set | 448 | # CONFIG_BONDING is not set |
447 | # CONFIG_EQUALIZER is not set | 449 | # CONFIG_EQUALIZER is not set |
448 | # CONFIG_TUN is not set | 450 | # CONFIG_TUN is not set |
449 | # CONFIG_ETHERTAP is not set | ||
450 | 451 | ||
451 | # | 452 | # |
452 | # ARCnet devices | 453 | # ARCnet devices |
@@ -484,7 +485,6 @@ CONFIG_NET_PCI=y | |||
484 | # CONFIG_DGRS is not set | 485 | # CONFIG_DGRS is not set |
485 | CONFIG_EEPRO100=m | 486 | CONFIG_EEPRO100=m |
486 | CONFIG_E100=m | 487 | CONFIG_E100=m |
487 | # CONFIG_E100_NAPI is not set | ||
488 | # CONFIG_FEALNX is not set | 488 | # CONFIG_FEALNX is not set |
489 | # CONFIG_NATSEMI is not set | 489 | # CONFIG_NATSEMI is not set |
490 | # CONFIG_NE2K_PCI is not set | 490 | # CONFIG_NE2K_PCI is not set |
@@ -566,25 +566,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 | |||
566 | # CONFIG_INPUT_EVBUG is not set | 566 | # CONFIG_INPUT_EVBUG is not set |
567 | 567 | ||
568 | # | 568 | # |
569 | # Input I/O drivers | ||
570 | # | ||
571 | CONFIG_GAMEPORT=m | ||
572 | CONFIG_SOUND_GAMEPORT=m | ||
573 | # CONFIG_GAMEPORT_NS558 is not set | ||
574 | # CONFIG_GAMEPORT_L4 is not set | ||
575 | # CONFIG_GAMEPORT_EMU10K1 is not set | ||
576 | # CONFIG_GAMEPORT_VORTEX is not set | ||
577 | # CONFIG_GAMEPORT_FM801 is not set | ||
578 | # CONFIG_GAMEPORT_CS461X is not set | ||
579 | CONFIG_SERIO=y | ||
580 | CONFIG_SERIO_I8042=y | ||
581 | # CONFIG_SERIO_SERPORT is not set | ||
582 | # CONFIG_SERIO_CT82C710 is not set | ||
583 | # CONFIG_SERIO_PCIPS2 is not set | ||
584 | CONFIG_SERIO_LIBPS2=y | ||
585 | # CONFIG_SERIO_RAW is not set | ||
586 | |||
587 | # | ||
588 | # Input Device Drivers | 569 | # Input Device Drivers |
589 | # | 570 | # |
590 | CONFIG_INPUT_KEYBOARD=y | 571 | CONFIG_INPUT_KEYBOARD=y |
@@ -602,6 +583,24 @@ CONFIG_MOUSE_PS2=y | |||
602 | # CONFIG_INPUT_MISC is not set | 583 | # CONFIG_INPUT_MISC is not set |
603 | 584 | ||
604 | # | 585 | # |
586 | # Hardware I/O ports | ||
587 | # | ||
588 | CONFIG_SERIO=y | ||
589 | CONFIG_SERIO_I8042=y | ||
590 | # CONFIG_SERIO_SERPORT is not set | ||
591 | # CONFIG_SERIO_PCIPS2 is not set | ||
592 | CONFIG_SERIO_LIBPS2=y | ||
593 | # CONFIG_SERIO_RAW is not set | ||
594 | CONFIG_GAMEPORT=m | ||
595 | # CONFIG_GAMEPORT_NS558 is not set | ||
596 | # CONFIG_GAMEPORT_L4 is not set | ||
597 | # CONFIG_GAMEPORT_EMU10K1 is not set | ||
598 | # CONFIG_GAMEPORT_VORTEX is not set | ||
599 | # CONFIG_GAMEPORT_FM801 is not set | ||
600 | # CONFIG_GAMEPORT_CS461X is not set | ||
601 | CONFIG_SOUND_GAMEPORT=m | ||
602 | |||
603 | # | ||
605 | # Character devices | 604 | # Character devices |
606 | # | 605 | # |
607 | CONFIG_VT=y | 606 | CONFIG_VT=y |
@@ -615,6 +614,8 @@ CONFIG_SERIAL_NONSTANDARD=y | |||
615 | # CONFIG_SYNCLINK is not set | 614 | # CONFIG_SYNCLINK is not set |
616 | # CONFIG_SYNCLINKMP is not set | 615 | # CONFIG_SYNCLINKMP is not set |
617 | # CONFIG_N_HDLC is not set | 616 | # CONFIG_N_HDLC is not set |
617 | # CONFIG_SPECIALIX is not set | ||
618 | # CONFIG_SX is not set | ||
618 | # CONFIG_STALDRV is not set | 619 | # CONFIG_STALDRV is not set |
619 | 620 | ||
620 | # | 621 | # |
@@ -635,6 +636,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y | |||
635 | # | 636 | # |
636 | CONFIG_SERIAL_CORE=y | 637 | CONFIG_SERIAL_CORE=y |
637 | CONFIG_SERIAL_CORE_CONSOLE=y | 638 | CONFIG_SERIAL_CORE_CONSOLE=y |
639 | # CONFIG_SERIAL_JSM is not set | ||
638 | CONFIG_UNIX98_PTYS=y | 640 | CONFIG_UNIX98_PTYS=y |
639 | CONFIG_LEGACY_PTYS=y | 641 | CONFIG_LEGACY_PTYS=y |
640 | CONFIG_LEGACY_PTY_COUNT=256 | 642 | CONFIG_LEGACY_PTY_COUNT=256 |
@@ -670,6 +672,12 @@ CONFIG_HPET=y | |||
670 | # CONFIG_HPET_RTC_IRQ is not set | 672 | # CONFIG_HPET_RTC_IRQ is not set |
671 | CONFIG_HPET_MMAP=y | 673 | CONFIG_HPET_MMAP=y |
672 | CONFIG_MAX_RAW_DEVS=256 | 674 | CONFIG_MAX_RAW_DEVS=256 |
675 | # CONFIG_HANGCHECK_TIMER is not set | ||
676 | |||
677 | # | ||
678 | # TPM devices | ||
679 | # | ||
680 | # CONFIG_TCG_TPM is not set | ||
673 | 681 | ||
674 | # | 682 | # |
675 | # I2C support | 683 | # I2C support |
@@ -705,7 +713,6 @@ CONFIG_MAX_RAW_DEVS=256 | |||
705 | # | 713 | # |
706 | CONFIG_VGA_CONSOLE=y | 714 | CONFIG_VGA_CONSOLE=y |
707 | CONFIG_DUMMY_CONSOLE=y | 715 | CONFIG_DUMMY_CONSOLE=y |
708 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
709 | 716 | ||
710 | # | 717 | # |
711 | # Sound | 718 | # Sound |
@@ -715,6 +722,8 @@ CONFIG_DUMMY_CONSOLE=y | |||
715 | # | 722 | # |
716 | # USB support | 723 | # USB support |
717 | # | 724 | # |
725 | CONFIG_USB_ARCH_HAS_HCD=y | ||
726 | CONFIG_USB_ARCH_HAS_OHCI=y | ||
718 | CONFIG_USB=y | 727 | CONFIG_USB=y |
719 | # CONFIG_USB_DEBUG is not set | 728 | # CONFIG_USB_DEBUG is not set |
720 | 729 | ||
@@ -726,8 +735,6 @@ CONFIG_USB_DEVICEFS=y | |||
726 | # CONFIG_USB_DYNAMIC_MINORS is not set | 735 | # CONFIG_USB_DYNAMIC_MINORS is not set |
727 | # CONFIG_USB_SUSPEND is not set | 736 | # CONFIG_USB_SUSPEND is not set |
728 | # CONFIG_USB_OTG is not set | 737 | # CONFIG_USB_OTG is not set |
729 | CONFIG_USB_ARCH_HAS_HCD=y | ||
730 | CONFIG_USB_ARCH_HAS_OHCI=y | ||
731 | 738 | ||
732 | # | 739 | # |
733 | # USB Host Controller Drivers | 740 | # USB Host Controller Drivers |
@@ -736,6 +743,8 @@ CONFIG_USB_EHCI_HCD=m | |||
736 | # CONFIG_USB_EHCI_SPLIT_ISO is not set | 743 | # CONFIG_USB_EHCI_SPLIT_ISO is not set |
737 | # CONFIG_USB_EHCI_ROOT_HUB_TT is not set | 744 | # CONFIG_USB_EHCI_ROOT_HUB_TT is not set |
738 | CONFIG_USB_OHCI_HCD=m | 745 | CONFIG_USB_OHCI_HCD=m |
746 | # CONFIG_USB_OHCI_BIG_ENDIAN is not set | ||
747 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y | ||
739 | CONFIG_USB_UHCI_HCD=y | 748 | CONFIG_USB_UHCI_HCD=y |
740 | # CONFIG_USB_SL811_HCD is not set | 749 | # CONFIG_USB_SL811_HCD is not set |
741 | 750 | ||
@@ -751,12 +760,11 @@ CONFIG_USB_UHCI_HCD=y | |||
751 | # | 760 | # |
752 | CONFIG_USB_STORAGE=m | 761 | CONFIG_USB_STORAGE=m |
753 | # CONFIG_USB_STORAGE_DEBUG is not set | 762 | # CONFIG_USB_STORAGE_DEBUG is not set |
754 | # CONFIG_USB_STORAGE_RW_DETECT is not set | ||
755 | # CONFIG_USB_STORAGE_DATAFAB is not set | 763 | # CONFIG_USB_STORAGE_DATAFAB is not set |
756 | # CONFIG_USB_STORAGE_FREECOM is not set | 764 | # CONFIG_USB_STORAGE_FREECOM is not set |
757 | # CONFIG_USB_STORAGE_ISD200 is not set | 765 | # CONFIG_USB_STORAGE_ISD200 is not set |
758 | # CONFIG_USB_STORAGE_DPCM is not set | 766 | # CONFIG_USB_STORAGE_DPCM is not set |
759 | # CONFIG_USB_STORAGE_HP8200e is not set | 767 | # CONFIG_USB_STORAGE_USBAT is not set |
760 | # CONFIG_USB_STORAGE_SDDR09 is not set | 768 | # CONFIG_USB_STORAGE_SDDR09 is not set |
761 | # CONFIG_USB_STORAGE_SDDR55 is not set | 769 | # CONFIG_USB_STORAGE_SDDR55 is not set |
762 | # CONFIG_USB_STORAGE_JUMPSHOT is not set | 770 | # CONFIG_USB_STORAGE_JUMPSHOT is not set |
@@ -800,6 +808,7 @@ CONFIG_USB_HIDINPUT=y | |||
800 | # CONFIG_USB_PEGASUS is not set | 808 | # CONFIG_USB_PEGASUS is not set |
801 | # CONFIG_USB_RTL8150 is not set | 809 | # CONFIG_USB_RTL8150 is not set |
802 | # CONFIG_USB_USBNET is not set | 810 | # CONFIG_USB_USBNET is not set |
811 | # CONFIG_USB_MON is not set | ||
803 | 812 | ||
804 | # | 813 | # |
805 | # USB port drivers | 814 | # USB port drivers |
@@ -824,6 +833,7 @@ CONFIG_USB_HIDINPUT=y | |||
824 | # CONFIG_USB_PHIDGETKIT is not set | 833 | # CONFIG_USB_PHIDGETKIT is not set |
825 | # CONFIG_USB_PHIDGETSERVO is not set | 834 | # CONFIG_USB_PHIDGETSERVO is not set |
826 | # CONFIG_USB_IDMOUSE is not set | 835 | # CONFIG_USB_IDMOUSE is not set |
836 | # CONFIG_USB_SISUSBVGA is not set | ||
827 | # CONFIG_USB_TEST is not set | 837 | # CONFIG_USB_TEST is not set |
828 | 838 | ||
829 | # | 839 | # |
@@ -867,7 +877,12 @@ CONFIG_REISERFS_FS_POSIX_ACL=y | |||
867 | CONFIG_REISERFS_FS_SECURITY=y | 877 | CONFIG_REISERFS_FS_SECURITY=y |
868 | # CONFIG_JFS_FS is not set | 878 | # CONFIG_JFS_FS is not set |
869 | CONFIG_FS_POSIX_ACL=y | 879 | CONFIG_FS_POSIX_ACL=y |
880 | |||
881 | # | ||
882 | # XFS support | ||
883 | # | ||
870 | CONFIG_XFS_FS=y | 884 | CONFIG_XFS_FS=y |
885 | CONFIG_XFS_EXPORT=y | ||
871 | # CONFIG_XFS_RT is not set | 886 | # CONFIG_XFS_RT is not set |
872 | # CONFIG_XFS_QUOTA is not set | 887 | # CONFIG_XFS_QUOTA is not set |
873 | # CONFIG_XFS_SECURITY is not set | 888 | # CONFIG_XFS_SECURITY is not set |
@@ -945,7 +960,7 @@ CONFIG_NFSD_V4=y | |||
945 | CONFIG_NFSD_TCP=y | 960 | CONFIG_NFSD_TCP=y |
946 | CONFIG_LOCKD=m | 961 | CONFIG_LOCKD=m |
947 | CONFIG_LOCKD_V4=y | 962 | CONFIG_LOCKD_V4=y |
948 | CONFIG_EXPORTFS=m | 963 | CONFIG_EXPORTFS=y |
949 | CONFIG_SUNRPC=m | 964 | CONFIG_SUNRPC=m |
950 | CONFIG_SUNRPC_GSS=m | 965 | CONFIG_SUNRPC_GSS=m |
951 | CONFIG_RPCSEC_GSS_KRB5=m | 966 | CONFIG_RPCSEC_GSS_KRB5=m |
@@ -1042,8 +1057,10 @@ CONFIG_GENERIC_IRQ_PROBE=y | |||
1042 | # | 1057 | # |
1043 | # Kernel hacking | 1058 | # Kernel hacking |
1044 | # | 1059 | # |
1060 | # CONFIG_PRINTK_TIME is not set | ||
1045 | CONFIG_DEBUG_KERNEL=y | 1061 | CONFIG_DEBUG_KERNEL=y |
1046 | CONFIG_MAGIC_SYSRQ=y | 1062 | CONFIG_MAGIC_SYSRQ=y |
1063 | CONFIG_LOG_BUF_SHIFT=20 | ||
1047 | # CONFIG_SCHEDSTATS is not set | 1064 | # CONFIG_SCHEDSTATS is not set |
1048 | # CONFIG_DEBUG_SLAB is not set | 1065 | # CONFIG_DEBUG_SLAB is not set |
1049 | # CONFIG_DEBUG_SPINLOCK is not set | 1066 | # CONFIG_DEBUG_SPINLOCK is not set |
@@ -1077,6 +1094,7 @@ CONFIG_CRYPTO_MD5=m | |||
1077 | # CONFIG_CRYPTO_SHA256 is not set | 1094 | # CONFIG_CRYPTO_SHA256 is not set |
1078 | # CONFIG_CRYPTO_SHA512 is not set | 1095 | # CONFIG_CRYPTO_SHA512 is not set |
1079 | # CONFIG_CRYPTO_WP512 is not set | 1096 | # CONFIG_CRYPTO_WP512 is not set |
1097 | # CONFIG_CRYPTO_TGR192 is not set | ||
1080 | CONFIG_CRYPTO_DES=m | 1098 | CONFIG_CRYPTO_DES=m |
1081 | # CONFIG_CRYPTO_BLOWFISH is not set | 1099 | # CONFIG_CRYPTO_BLOWFISH is not set |
1082 | # CONFIG_CRYPTO_TWOFISH is not set | 1100 | # CONFIG_CRYPTO_TWOFISH is not set |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 017c9ab5fc1b..b8db6e3e5e81 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -1,9 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | ** IA64 System Bus Adapter (SBA) I/O MMU manager | 2 | ** IA64 System Bus Adapter (SBA) I/O MMU manager |
3 | ** | 3 | ** |
4 | ** (c) Copyright 2002-2004 Alex Williamson | 4 | ** (c) Copyright 2002-2005 Alex Williamson |
5 | ** (c) Copyright 2002-2003 Grant Grundler | 5 | ** (c) Copyright 2002-2003 Grant Grundler |
6 | ** (c) Copyright 2002-2004 Hewlett-Packard Company | 6 | ** (c) Copyright 2002-2005 Hewlett-Packard Company |
7 | ** | 7 | ** |
8 | ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) | 8 | ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) |
9 | ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) | 9 | ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) |
@@ -459,21 +459,32 @@ get_iovp_order (unsigned long size) | |||
459 | * sba_search_bitmap - find free space in IO PDIR resource bitmap | 459 | * sba_search_bitmap - find free space in IO PDIR resource bitmap |
460 | * @ioc: IO MMU structure which owns the pdir we are interested in. | 460 | * @ioc: IO MMU structure which owns the pdir we are interested in. |
461 | * @bits_wanted: number of entries we need. | 461 | * @bits_wanted: number of entries we need. |
462 | * @use_hint: use res_hint to indicate where to start looking | ||
462 | * | 463 | * |
463 | * Find consecutive free bits in resource bitmap. | 464 | * Find consecutive free bits in resource bitmap. |
464 | * Each bit represents one entry in the IO Pdir. | 465 | * Each bit represents one entry in the IO Pdir. |
465 | * Cool perf optimization: search for log2(size) bits at a time. | 466 | * Cool perf optimization: search for log2(size) bits at a time. |
466 | */ | 467 | */ |
467 | static SBA_INLINE unsigned long | 468 | static SBA_INLINE unsigned long |
468 | sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | 469 | sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) |
469 | { | 470 | { |
470 | unsigned long *res_ptr = ioc->res_hint; | 471 | unsigned long *res_ptr; |
471 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); | 472 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); |
472 | unsigned long pide = ~0UL; | 473 | unsigned long flags, pide = ~0UL; |
473 | 474 | ||
474 | ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); | 475 | ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); |
475 | ASSERT(res_ptr < res_end); | 476 | ASSERT(res_ptr < res_end); |
476 | 477 | ||
478 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
479 | |||
480 | /* Allow caller to force a search through the entire resource space */ | ||
481 | if (likely(use_hint)) { | ||
482 | res_ptr = ioc->res_hint; | ||
483 | } else { | ||
484 | res_ptr = (ulong *)ioc->res_map; | ||
485 | ioc->res_bitshift = 0; | ||
486 | } | ||
487 | |||
477 | /* | 488 | /* |
478 | * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts | 489 | * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts |
479 | * if a TLB entry is purged while in use. sba_mark_invalid() | 490 | * if a TLB entry is purged while in use. sba_mark_invalid() |
@@ -570,10 +581,12 @@ not_found: | |||
570 | prefetch(ioc->res_map); | 581 | prefetch(ioc->res_map); |
571 | ioc->res_hint = (unsigned long *) ioc->res_map; | 582 | ioc->res_hint = (unsigned long *) ioc->res_map; |
572 | ioc->res_bitshift = 0; | 583 | ioc->res_bitshift = 0; |
584 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
573 | return (pide); | 585 | return (pide); |
574 | 586 | ||
575 | found_it: | 587 | found_it: |
576 | ioc->res_hint = res_ptr; | 588 | ioc->res_hint = res_ptr; |
589 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
577 | return (pide); | 590 | return (pide); |
578 | } | 591 | } |
579 | 592 | ||
@@ -594,36 +607,36 @@ sba_alloc_range(struct ioc *ioc, size_t size) | |||
594 | unsigned long itc_start; | 607 | unsigned long itc_start; |
595 | #endif | 608 | #endif |
596 | unsigned long pide; | 609 | unsigned long pide; |
597 | unsigned long flags; | ||
598 | 610 | ||
599 | ASSERT(pages_needed); | 611 | ASSERT(pages_needed); |
600 | ASSERT(0 == (size & ~iovp_mask)); | 612 | ASSERT(0 == (size & ~iovp_mask)); |
601 | 613 | ||
602 | spin_lock_irqsave(&ioc->res_lock, flags); | ||
603 | |||
604 | #ifdef PDIR_SEARCH_TIMING | 614 | #ifdef PDIR_SEARCH_TIMING |
605 | itc_start = ia64_get_itc(); | 615 | itc_start = ia64_get_itc(); |
606 | #endif | 616 | #endif |
607 | /* | 617 | /* |
608 | ** "seek and ye shall find"...praying never hurts either... | 618 | ** "seek and ye shall find"...praying never hurts either... |
609 | */ | 619 | */ |
610 | pide = sba_search_bitmap(ioc, pages_needed); | 620 | pide = sba_search_bitmap(ioc, pages_needed, 1); |
611 | if (unlikely(pide >= (ioc->res_size << 3))) { | 621 | if (unlikely(pide >= (ioc->res_size << 3))) { |
612 | pide = sba_search_bitmap(ioc, pages_needed); | 622 | pide = sba_search_bitmap(ioc, pages_needed, 0); |
613 | if (unlikely(pide >= (ioc->res_size << 3))) { | 623 | if (unlikely(pide >= (ioc->res_size << 3))) { |
614 | #if DELAYED_RESOURCE_CNT > 0 | 624 | #if DELAYED_RESOURCE_CNT > 0 |
625 | unsigned long flags; | ||
626 | |||
615 | /* | 627 | /* |
616 | ** With delayed resource freeing, we can give this one more shot. We're | 628 | ** With delayed resource freeing, we can give this one more shot. We're |
617 | ** getting close to being in trouble here, so do what we can to make this | 629 | ** getting close to being in trouble here, so do what we can to make this |
618 | ** one count. | 630 | ** one count. |
619 | */ | 631 | */ |
620 | spin_lock(&ioc->saved_lock); | 632 | spin_lock_irqsave(&ioc->saved_lock, flags); |
621 | if (ioc->saved_cnt > 0) { | 633 | if (ioc->saved_cnt > 0) { |
622 | struct sba_dma_pair *d; | 634 | struct sba_dma_pair *d; |
623 | int cnt = ioc->saved_cnt; | 635 | int cnt = ioc->saved_cnt; |
624 | 636 | ||
625 | d = &(ioc->saved[ioc->saved_cnt]); | 637 | d = &(ioc->saved[ioc->saved_cnt - 1]); |
626 | 638 | ||
639 | spin_lock(&ioc->res_lock); | ||
627 | while (cnt--) { | 640 | while (cnt--) { |
628 | sba_mark_invalid(ioc, d->iova, d->size); | 641 | sba_mark_invalid(ioc, d->iova, d->size); |
629 | sba_free_range(ioc, d->iova, d->size); | 642 | sba_free_range(ioc, d->iova, d->size); |
@@ -631,10 +644,11 @@ sba_alloc_range(struct ioc *ioc, size_t size) | |||
631 | } | 644 | } |
632 | ioc->saved_cnt = 0; | 645 | ioc->saved_cnt = 0; |
633 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | 646 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ |
647 | spin_unlock(&ioc->res_lock); | ||
634 | } | 648 | } |
635 | spin_unlock(&ioc->saved_lock); | 649 | spin_unlock_irqrestore(&ioc->saved_lock, flags); |
636 | 650 | ||
637 | pide = sba_search_bitmap(ioc, pages_needed); | 651 | pide = sba_search_bitmap(ioc, pages_needed, 0); |
638 | if (unlikely(pide >= (ioc->res_size << 3))) | 652 | if (unlikely(pide >= (ioc->res_size << 3))) |
639 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | 653 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", |
640 | ioc->ioc_hpa); | 654 | ioc->ioc_hpa); |
@@ -664,8 +678,6 @@ sba_alloc_range(struct ioc *ioc, size_t size) | |||
664 | (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), | 678 | (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), |
665 | ioc->res_bitshift ); | 679 | ioc->res_bitshift ); |
666 | 680 | ||
667 | spin_unlock_irqrestore(&ioc->res_lock, flags); | ||
668 | |||
669 | return (pide); | 681 | return (pide); |
670 | } | 682 | } |
671 | 683 | ||
@@ -950,6 +962,30 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) | |||
950 | return SBA_IOVA(ioc, iovp, offset); | 962 | return SBA_IOVA(ioc, iovp, offset); |
951 | } | 963 | } |
952 | 964 | ||
965 | #ifdef ENABLE_MARK_CLEAN | ||
966 | static SBA_INLINE void | ||
967 | sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) | ||
968 | { | ||
969 | u32 iovp = (u32) SBA_IOVP(ioc,iova); | ||
970 | int off = PDIR_INDEX(iovp); | ||
971 | void *addr; | ||
972 | |||
973 | if (size <= iovp_size) { | ||
974 | addr = phys_to_virt(ioc->pdir_base[off] & | ||
975 | ~0xE000000000000FFFULL); | ||
976 | mark_clean(addr, size); | ||
977 | } else { | ||
978 | do { | ||
979 | addr = phys_to_virt(ioc->pdir_base[off] & | ||
980 | ~0xE000000000000FFFULL); | ||
981 | mark_clean(addr, min(size, iovp_size)); | ||
982 | off++; | ||
983 | size -= iovp_size; | ||
984 | } while (size > 0); | ||
985 | } | ||
986 | } | ||
987 | #endif | ||
988 | |||
953 | /** | 989 | /** |
954 | * sba_unmap_single - unmap one IOVA and free resources | 990 | * sba_unmap_single - unmap one IOVA and free resources |
955 | * @dev: instance of PCI owned by the driver that's asking. | 991 | * @dev: instance of PCI owned by the driver that's asking. |
@@ -995,6 +1031,10 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) | |||
995 | size += offset; | 1031 | size += offset; |
996 | size = ROUNDUP(size, iovp_size); | 1032 | size = ROUNDUP(size, iovp_size); |
997 | 1033 | ||
1034 | #ifdef ENABLE_MARK_CLEAN | ||
1035 | if (dir == DMA_FROM_DEVICE) | ||
1036 | sba_mark_clean(ioc, iova, size); | ||
1037 | #endif | ||
998 | 1038 | ||
999 | #if DELAYED_RESOURCE_CNT > 0 | 1039 | #if DELAYED_RESOURCE_CNT > 0 |
1000 | spin_lock_irqsave(&ioc->saved_lock, flags); | 1040 | spin_lock_irqsave(&ioc->saved_lock, flags); |
@@ -1021,30 +1061,6 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) | |||
1021 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | 1061 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ |
1022 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 1062 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1023 | #endif /* DELAYED_RESOURCE_CNT == 0 */ | 1063 | #endif /* DELAYED_RESOURCE_CNT == 0 */ |
1024 | #ifdef ENABLE_MARK_CLEAN | ||
1025 | if (dir == DMA_FROM_DEVICE) { | ||
1026 | u32 iovp = (u32) SBA_IOVP(ioc,iova); | ||
1027 | int off = PDIR_INDEX(iovp); | ||
1028 | void *addr; | ||
1029 | |||
1030 | if (size <= iovp_size) { | ||
1031 | addr = phys_to_virt(ioc->pdir_base[off] & | ||
1032 | ~0xE000000000000FFFULL); | ||
1033 | mark_clean(addr, size); | ||
1034 | } else { | ||
1035 | size_t byte_cnt = size; | ||
1036 | |||
1037 | do { | ||
1038 | addr = phys_to_virt(ioc->pdir_base[off] & | ||
1039 | ~0xE000000000000FFFULL); | ||
1040 | mark_clean(addr, min(byte_cnt, iovp_size)); | ||
1041 | off++; | ||
1042 | byte_cnt -= iovp_size; | ||
1043 | |||
1044 | } while (byte_cnt > 0); | ||
1045 | } | ||
1046 | } | ||
1047 | #endif | ||
1048 | } | 1064 | } |
1049 | 1065 | ||
1050 | 1066 | ||
@@ -1928,43 +1944,17 @@ sba_connect_bus(struct pci_bus *bus) | |||
1928 | static void __init | 1944 | static void __init |
1929 | sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) | 1945 | sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) |
1930 | { | 1946 | { |
1931 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
1932 | union acpi_object *obj; | ||
1933 | acpi_handle phandle; | ||
1934 | unsigned int node; | 1947 | unsigned int node; |
1948 | int pxm; | ||
1935 | 1949 | ||
1936 | ioc->node = MAX_NUMNODES; | 1950 | ioc->node = MAX_NUMNODES; |
1937 | 1951 | ||
1938 | /* | 1952 | pxm = acpi_get_pxm(handle); |
1939 | * Check for a _PXM on this node first. We don't typically see | ||
1940 | * one here, so we'll end up getting it from the parent. | ||
1941 | */ | ||
1942 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) { | ||
1943 | if (ACPI_FAILURE(acpi_get_parent(handle, &phandle))) | ||
1944 | return; | ||
1945 | |||
1946 | /* Reset the acpi buffer */ | ||
1947 | buffer.length = ACPI_ALLOCATE_BUFFER; | ||
1948 | buffer.pointer = NULL; | ||
1949 | 1953 | ||
1950 | if (ACPI_FAILURE(acpi_evaluate_object(phandle, "_PXM", NULL, | 1954 | if (pxm < 0) |
1951 | &buffer))) | ||
1952 | return; | ||
1953 | } | ||
1954 | |||
1955 | if (!buffer.length || !buffer.pointer) | ||
1956 | return; | 1955 | return; |
1957 | 1956 | ||
1958 | obj = buffer.pointer; | 1957 | node = pxm_to_nid_map[pxm]; |
1959 | |||
1960 | if (obj->type != ACPI_TYPE_INTEGER || | ||
1961 | obj->integer.value >= MAX_PXM_DOMAINS) { | ||
1962 | acpi_os_free(buffer.pointer); | ||
1963 | return; | ||
1964 | } | ||
1965 | |||
1966 | node = pxm_to_nid_map[obj->integer.value]; | ||
1967 | acpi_os_free(buffer.pointer); | ||
1968 | 1958 | ||
1969 | if (node >= MAX_NUMNODES || !node_online(node)) | 1959 | if (node >= MAX_NUMNODES || !node_online(node)) |
1970 | return; | 1960 | return; |
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c index 19b02adce68c..ebb89be2aa2d 100644 --- a/arch/ia64/ia32/ia32_signal.c +++ b/arch/ia64/ia32/ia32_signal.c | |||
@@ -460,10 +460,9 @@ __ia32_rt_sigsuspend (compat_sigset_t *sset, unsigned int sigsetsize, struct sig | |||
460 | sigset_t oldset, set; | 460 | sigset_t oldset, set; |
461 | 461 | ||
462 | scr->scratch_unat = 0; /* avoid leaking kernel bits to user level */ | 462 | scr->scratch_unat = 0; /* avoid leaking kernel bits to user level */ |
463 | memset(&set, 0, sizeof(&set)); | 463 | memset(&set, 0, sizeof(set)); |
464 | 464 | ||
465 | if (memcpy(&set.sig, &sset->sig, sigsetsize)) | 465 | memcpy(&set.sig, &sset->sig, sigsetsize); |
466 | return -EFAULT; | ||
467 | 466 | ||
468 | sigdelsetmask(&set, ~_BLOCKABLE); | 467 | sigdelsetmask(&set, ~_BLOCKABLE); |
469 | 468 | ||
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index a8e99c56a768..72dfd9e7de0f 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -779,7 +779,7 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) | |||
779 | union acpi_object *obj; | 779 | union acpi_object *obj; |
780 | struct acpi_table_iosapic *iosapic; | 780 | struct acpi_table_iosapic *iosapic; |
781 | unsigned int gsi_base; | 781 | unsigned int gsi_base; |
782 | int node; | 782 | int pxm, node; |
783 | 783 | ||
784 | /* Only care about objects w/ a method that returns the MADT */ | 784 | /* Only care about objects w/ a method that returns the MADT */ |
785 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) | 785 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) |
@@ -805,29 +805,16 @@ acpi_map_iosapic (acpi_handle handle, u32 depth, void *context, void **ret) | |||
805 | gsi_base = iosapic->global_irq_base; | 805 | gsi_base = iosapic->global_irq_base; |
806 | 806 | ||
807 | acpi_os_free(buffer.pointer); | 807 | acpi_os_free(buffer.pointer); |
808 | buffer.length = ACPI_ALLOCATE_BUFFER; | ||
809 | buffer.pointer = NULL; | ||
810 | 808 | ||
811 | /* | 809 | /* |
812 | * OK, it's an IOSAPIC MADT entry, look for a _PXM method to tell | 810 | * OK, it's an IOSAPIC MADT entry, look for a _PXM value to tell |
813 | * us which node to associate this with. | 811 | * us which node to associate this with. |
814 | */ | 812 | */ |
815 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PXM", NULL, &buffer))) | 813 | pxm = acpi_get_pxm(handle); |
816 | return AE_OK; | 814 | if (pxm < 0) |
817 | |||
818 | if (!buffer.length || !buffer.pointer) | ||
819 | return AE_OK; | ||
820 | |||
821 | obj = buffer.pointer; | ||
822 | |||
823 | if (obj->type != ACPI_TYPE_INTEGER || | ||
824 | obj->integer.value >= MAX_PXM_DOMAINS) { | ||
825 | acpi_os_free(buffer.pointer); | ||
826 | return AE_OK; | 815 | return AE_OK; |
827 | } | ||
828 | 816 | ||
829 | node = pxm_to_nid_map[obj->integer.value]; | 817 | node = pxm_to_nid_map[pxm]; |
830 | acpi_os_free(buffer.pointer); | ||
831 | 818 | ||
832 | if (node >= MAX_NUMNODES || !node_online(node) || | 819 | if (node >= MAX_NUMNODES || !node_online(node) || |
833 | cpus_empty(node_to_cpumask(node))) | 820 | cpus_empty(node_to_cpumask(node))) |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 0272c010a3ba..81c45d447394 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -728,12 +728,8 @@ ENTRY(ia64_leave_syscall) | |||
728 | mov f8=f0 // clear f8 | 728 | mov f8=f0 // clear f8 |
729 | ;; | 729 | ;; |
730 | ld8 r30=[r2],16 // M0|1 load cr.ifs | 730 | ld8 r30=[r2],16 // M0|1 load cr.ifs |
731 | mov.m ar.ssd=r0 // M2 clear ar.ssd | ||
732 | cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs | ||
733 | ;; | ||
734 | ld8 r25=[r3],16 // M0|1 load ar.unat | 731 | ld8 r25=[r3],16 // M0|1 load ar.unat |
735 | mov.m ar.csd=r0 // M2 clear ar.csd | 732 | cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs |
736 | mov r22=r0 // clear r22 | ||
737 | ;; | 733 | ;; |
738 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs | 734 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs |
739 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled | 735 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled |
@@ -756,11 +752,15 @@ ENTRY(ia64_leave_syscall) | |||
756 | mov f7=f0 // clear f7 | 752 | mov f7=f0 // clear f7 |
757 | ;; | 753 | ;; |
758 | ld8.fill r12=[r2] // restore r12 (sp) | 754 | ld8.fill r12=[r2] // restore r12 (sp) |
755 | mov.m ar.ssd=r0 // M2 clear ar.ssd | ||
756 | mov r22=r0 // clear r22 | ||
757 | |||
759 | ld8.fill r15=[r3] // restore r15 | 758 | ld8.fill r15=[r3] // restore r15 |
759 | (pUStk) st1 [r14]=r17 | ||
760 | addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0 | 760 | addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0 |
761 | ;; | 761 | ;; |
762 | (pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8 | 762 | (pUStk) ld4 r17=[r3] // r17 = cpu_data->phys_stacked_size_p8 |
763 | (pUStk) st1 [r14]=r17 | 763 | mov.m ar.csd=r0 // M2 clear ar.csd |
764 | mov b6=r18 // I0 restore b6 | 764 | mov b6=r18 // I0 restore b6 |
765 | ;; | 765 | ;; |
766 | mov r14=r0 // clear r14 | 766 | mov r14=r0 // clear r14 |
@@ -782,7 +782,7 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve) | |||
782 | st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit | 782 | st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit |
783 | .mem.offset 8,0 | 783 | .mem.offset 8,0 |
784 | st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit | 784 | st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit |
785 | END(ia64_ret_from_ia32_execve_syscall) | 785 | END(ia64_ret_from_ia32_execve) |
786 | // fall through | 786 | // fall through |
787 | #endif /* CONFIG_IA32_SUPPORT */ | 787 | #endif /* CONFIG_IA32_SUPPORT */ |
788 | GLOBAL_ENTRY(ia64_leave_kernel) | 788 | GLOBAL_ENTRY(ia64_leave_kernel) |
@@ -1417,7 +1417,7 @@ sys_call_table: | |||
1417 | data8 sys_msgrcv | 1417 | data8 sys_msgrcv |
1418 | data8 sys_msgctl | 1418 | data8 sys_msgctl |
1419 | data8 sys_shmget | 1419 | data8 sys_shmget |
1420 | data8 ia64_shmat | 1420 | data8 sys_shmat |
1421 | data8 sys_shmdt // 1115 | 1421 | data8 sys_shmdt // 1115 |
1422 | data8 sys_shmctl | 1422 | data8 sys_shmctl |
1423 | data8 sys_syslog | 1423 | data8 sys_syslog |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 0d8650f7fce7..4f3cdef75797 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -611,8 +611,10 @@ GLOBAL_ENTRY(fsys_bubble_down) | |||
611 | movl r2=ia64_ret_from_syscall | 611 | movl r2=ia64_ret_from_syscall |
612 | ;; | 612 | ;; |
613 | mov rp=r2 // set the real return addr | 613 | mov rp=r2 // set the real return addr |
614 | tbit.z p8,p0=r3,TIF_SYSCALL_TRACE | 614 | and r3=_TIF_SYSCALL_TRACEAUDIT,r3 |
615 | ;; | 615 | ;; |
616 | cmp.eq p8,p0=r3,r0 | ||
617 | |||
616 | (p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8 | 618 | (p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8 |
617 | (p8) br.call.sptk.many b6=b6 // ignore this return addr | 619 | (p8) br.call.sptk.many b6=b6 // ignore this return addr |
618 | br.cond.sptk ia64_trace_syscall | 620 | br.cond.sptk ia64_trace_syscall |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 105c7fec8c6d..8d3a9291b47f 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -15,6 +15,8 @@ | |||
15 | * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com> | 15 | * Copyright (C) 1999 Don Dugger <Don.Dugger@intel.com> |
16 | * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com> | 16 | * Copyright (C) 2002 Fenghua Yu <fenghua.yu@intel.com> |
17 | * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2. | 17 | * -Optimize __ia64_save_fpu() and __ia64_load_fpu() for Itanium 2. |
18 | * Copyright (C) 2004 Ashok Raj <ashok.raj@intel.com> | ||
19 | * Support for CPU Hotplug | ||
18 | */ | 20 | */ |
19 | 21 | ||
20 | #include <linux/config.h> | 22 | #include <linux/config.h> |
@@ -29,6 +31,139 @@ | |||
29 | #include <asm/processor.h> | 31 | #include <asm/processor.h> |
30 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
31 | #include <asm/system.h> | 33 | #include <asm/system.h> |
34 | #include <asm/mca_asm.h> | ||
35 | |||
36 | #ifdef CONFIG_HOTPLUG_CPU | ||
37 | #define SAL_PSR_BITS_TO_SET \ | ||
38 | (IA64_PSR_AC | IA64_PSR_BN | IA64_PSR_MFH | IA64_PSR_MFL) | ||
39 | |||
40 | #define SAVE_FROM_REG(src, ptr, dest) \ | ||
41 | mov dest=src;; \ | ||
42 | st8 [ptr]=dest,0x08 | ||
43 | |||
44 | #define RESTORE_REG(reg, ptr, _tmp) \ | ||
45 | ld8 _tmp=[ptr],0x08;; \ | ||
46 | mov reg=_tmp | ||
47 | |||
48 | #define SAVE_BREAK_REGS(ptr, _idx, _breg, _dest)\ | ||
49 | mov ar.lc=IA64_NUM_DBG_REGS-1;; \ | ||
50 | mov _idx=0;; \ | ||
51 | 1: \ | ||
52 | SAVE_FROM_REG(_breg[_idx], ptr, _dest);; \ | ||
53 | add _idx=1,_idx;; \ | ||
54 | br.cloop.sptk.many 1b | ||
55 | |||
56 | #define RESTORE_BREAK_REGS(ptr, _idx, _breg, _tmp, _lbl)\ | ||
57 | mov ar.lc=IA64_NUM_DBG_REGS-1;; \ | ||
58 | mov _idx=0;; \ | ||
59 | _lbl: RESTORE_REG(_breg[_idx], ptr, _tmp);; \ | ||
60 | add _idx=1, _idx;; \ | ||
61 | br.cloop.sptk.many _lbl | ||
62 | |||
63 | #define SAVE_ONE_RR(num, _reg, _tmp) \ | ||
64 | movl _tmp=(num<<61);; \ | ||
65 | mov _reg=rr[_tmp] | ||
66 | |||
67 | #define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \ | ||
68 | SAVE_ONE_RR(0,_r0, _tmp);; \ | ||
69 | SAVE_ONE_RR(1,_r1, _tmp);; \ | ||
70 | SAVE_ONE_RR(2,_r2, _tmp);; \ | ||
71 | SAVE_ONE_RR(3,_r3, _tmp);; \ | ||
72 | SAVE_ONE_RR(4,_r4, _tmp);; \ | ||
73 | SAVE_ONE_RR(5,_r5, _tmp);; \ | ||
74 | SAVE_ONE_RR(6,_r6, _tmp);; \ | ||
75 | SAVE_ONE_RR(7,_r7, _tmp);; | ||
76 | |||
77 | #define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) \ | ||
78 | st8 [ptr]=_r0, 8;; \ | ||
79 | st8 [ptr]=_r1, 8;; \ | ||
80 | st8 [ptr]=_r2, 8;; \ | ||
81 | st8 [ptr]=_r3, 8;; \ | ||
82 | st8 [ptr]=_r4, 8;; \ | ||
83 | st8 [ptr]=_r5, 8;; \ | ||
84 | st8 [ptr]=_r6, 8;; \ | ||
85 | st8 [ptr]=_r7, 8;; | ||
86 | |||
87 | #define RESTORE_REGION_REGS(ptr, _idx1, _idx2, _tmp) \ | ||
88 | mov ar.lc=0x08-1;; \ | ||
89 | movl _idx1=0x00;; \ | ||
90 | RestRR: \ | ||
91 | dep.z _idx2=_idx1,61,3;; \ | ||
92 | ld8 _tmp=[ptr],8;; \ | ||
93 | mov rr[_idx2]=_tmp;; \ | ||
94 | srlz.d;; \ | ||
95 | add _idx1=1,_idx1;; \ | ||
96 | br.cloop.sptk.few RestRR | ||
97 | |||
98 | #define SET_AREA_FOR_BOOTING_CPU(reg1, reg2) \ | ||
99 | movl reg1=sal_state_for_booting_cpu;; \ | ||
100 | ld8 reg2=[reg1];; | ||
101 | |||
102 | /* | ||
103 | * Adjust region registers saved before starting to save | ||
104 | * break regs and rest of the states that need to be preserved. | ||
105 | */ | ||
106 | #define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(_reg1,_reg2,_pred) \ | ||
107 | SAVE_FROM_REG(b0,_reg1,_reg2);; \ | ||
108 | SAVE_FROM_REG(b1,_reg1,_reg2);; \ | ||
109 | SAVE_FROM_REG(b2,_reg1,_reg2);; \ | ||
110 | SAVE_FROM_REG(b3,_reg1,_reg2);; \ | ||
111 | SAVE_FROM_REG(b4,_reg1,_reg2);; \ | ||
112 | SAVE_FROM_REG(b5,_reg1,_reg2);; \ | ||
113 | st8 [_reg1]=r1,0x08;; \ | ||
114 | st8 [_reg1]=r12,0x08;; \ | ||
115 | st8 [_reg1]=r13,0x08;; \ | ||
116 | SAVE_FROM_REG(ar.fpsr,_reg1,_reg2);; \ | ||
117 | SAVE_FROM_REG(ar.pfs,_reg1,_reg2);; \ | ||
118 | SAVE_FROM_REG(ar.rnat,_reg1,_reg2);; \ | ||
119 | SAVE_FROM_REG(ar.unat,_reg1,_reg2);; \ | ||
120 | SAVE_FROM_REG(ar.bspstore,_reg1,_reg2);; \ | ||
121 | SAVE_FROM_REG(cr.dcr,_reg1,_reg2);; \ | ||
122 | SAVE_FROM_REG(cr.iva,_reg1,_reg2);; \ | ||
123 | SAVE_FROM_REG(cr.pta,_reg1,_reg2);; \ | ||
124 | SAVE_FROM_REG(cr.itv,_reg1,_reg2);; \ | ||
125 | SAVE_FROM_REG(cr.pmv,_reg1,_reg2);; \ | ||
126 | SAVE_FROM_REG(cr.cmcv,_reg1,_reg2);; \ | ||
127 | SAVE_FROM_REG(cr.lrr0,_reg1,_reg2);; \ | ||
128 | SAVE_FROM_REG(cr.lrr1,_reg1,_reg2);; \ | ||
129 | st8 [_reg1]=r4,0x08;; \ | ||
130 | st8 [_reg1]=r5,0x08;; \ | ||
131 | st8 [_reg1]=r6,0x08;; \ | ||
132 | st8 [_reg1]=r7,0x08;; \ | ||
133 | st8 [_reg1]=_pred,0x08;; \ | ||
134 | SAVE_FROM_REG(ar.lc, _reg1, _reg2);; \ | ||
135 | stf.spill.nta [_reg1]=f2,16;; \ | ||
136 | stf.spill.nta [_reg1]=f3,16;; \ | ||
137 | stf.spill.nta [_reg1]=f4,16;; \ | ||
138 | stf.spill.nta [_reg1]=f5,16;; \ | ||
139 | stf.spill.nta [_reg1]=f16,16;; \ | ||
140 | stf.spill.nta [_reg1]=f17,16;; \ | ||
141 | stf.spill.nta [_reg1]=f18,16;; \ | ||
142 | stf.spill.nta [_reg1]=f19,16;; \ | ||
143 | stf.spill.nta [_reg1]=f20,16;; \ | ||
144 | stf.spill.nta [_reg1]=f21,16;; \ | ||
145 | stf.spill.nta [_reg1]=f22,16;; \ | ||
146 | stf.spill.nta [_reg1]=f23,16;; \ | ||
147 | stf.spill.nta [_reg1]=f24,16;; \ | ||
148 | stf.spill.nta [_reg1]=f25,16;; \ | ||
149 | stf.spill.nta [_reg1]=f26,16;; \ | ||
150 | stf.spill.nta [_reg1]=f27,16;; \ | ||
151 | stf.spill.nta [_reg1]=f28,16;; \ | ||
152 | stf.spill.nta [_reg1]=f29,16;; \ | ||
153 | stf.spill.nta [_reg1]=f30,16;; \ | ||
154 | stf.spill.nta [_reg1]=f31,16;; | ||
155 | |||
156 | #else | ||
157 | #define SET_AREA_FOR_BOOTING_CPU(a1, a2) | ||
158 | #define SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(a1,a2, a3) | ||
159 | #define SAVE_REGION_REGS(_tmp, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) | ||
160 | #define STORE_REGION_REGS(ptr, _r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7) | ||
161 | #endif | ||
162 | |||
163 | #define SET_ONE_RR(num, pgsize, _tmp1, _tmp2, vhpt) \ | ||
164 | movl _tmp1=(num << 61);; \ | ||
165 | mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \ | ||
166 | mov rr[_tmp1]=_tmp2 | ||
32 | 167 | ||
33 | .section __special_page_section,"ax" | 168 | .section __special_page_section,"ax" |
34 | 169 | ||
@@ -64,6 +199,12 @@ start_ap: | |||
64 | srlz.i | 199 | srlz.i |
65 | ;; | 200 | ;; |
66 | /* | 201 | /* |
202 | * Save the region registers, predicate before they get clobbered | ||
203 | */ | ||
204 | SAVE_REGION_REGS(r2, r8,r9,r10,r11,r12,r13,r14,r15); | ||
205 | mov r25=pr;; | ||
206 | |||
207 | /* | ||
67 | * Initialize kernel region registers: | 208 | * Initialize kernel region registers: |
68 | * rr[0]: VHPT enabled, page size = PAGE_SHIFT | 209 | * rr[0]: VHPT enabled, page size = PAGE_SHIFT |
69 | * rr[1]: VHPT enabled, page size = PAGE_SHIFT | 210 | * rr[1]: VHPT enabled, page size = PAGE_SHIFT |
@@ -76,32 +217,14 @@ start_ap: | |||
76 | * We initialize all of them to prevent inadvertently assuming | 217 | * We initialize all of them to prevent inadvertently assuming |
77 | * something about the state of address translation early in boot. | 218 | * something about the state of address translation early in boot. |
78 | */ | 219 | */ |
79 | mov r6=((ia64_rid(IA64_REGION_ID_KERNEL, (0<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | 220 | SET_ONE_RR(0, PAGE_SHIFT, r2, r16, 1);; |
80 | movl r7=(0<<61) | 221 | SET_ONE_RR(1, PAGE_SHIFT, r2, r16, 1);; |
81 | mov r8=((ia64_rid(IA64_REGION_ID_KERNEL, (1<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | 222 | SET_ONE_RR(2, PAGE_SHIFT, r2, r16, 1);; |
82 | movl r9=(1<<61) | 223 | SET_ONE_RR(3, PAGE_SHIFT, r2, r16, 1);; |
83 | mov r10=((ia64_rid(IA64_REGION_ID_KERNEL, (2<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | 224 | SET_ONE_RR(4, PAGE_SHIFT, r2, r16, 1);; |
84 | movl r11=(2<<61) | 225 | SET_ONE_RR(5, PAGE_SHIFT, r2, r16, 1);; |
85 | mov r12=((ia64_rid(IA64_REGION_ID_KERNEL, (3<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | 226 | SET_ONE_RR(6, IA64_GRANULE_SHIFT, r2, r16, 0);; |
86 | movl r13=(3<<61) | 227 | SET_ONE_RR(7, IA64_GRANULE_SHIFT, r2, r16, 0);; |
87 | mov r14=((ia64_rid(IA64_REGION_ID_KERNEL, (4<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | ||
88 | movl r15=(4<<61) | ||
89 | mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1) | ||
90 | movl r17=(5<<61) | ||
91 | mov r18=((ia64_rid(IA64_REGION_ID_KERNEL, (6<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) | ||
92 | movl r19=(6<<61) | ||
93 | mov r20=((ia64_rid(IA64_REGION_ID_KERNEL, (7<<61)) << 8) | (IA64_GRANULE_SHIFT << 2)) | ||
94 | movl r21=(7<<61) | ||
95 | ;; | ||
96 | mov rr[r7]=r6 | ||
97 | mov rr[r9]=r8 | ||
98 | mov rr[r11]=r10 | ||
99 | mov rr[r13]=r12 | ||
100 | mov rr[r15]=r14 | ||
101 | mov rr[r17]=r16 | ||
102 | mov rr[r19]=r18 | ||
103 | mov rr[r21]=r20 | ||
104 | ;; | ||
105 | /* | 228 | /* |
106 | * Now pin mappings into the TLB for kernel text and data | 229 | * Now pin mappings into the TLB for kernel text and data |
107 | */ | 230 | */ |
@@ -142,6 +265,12 @@ start_ap: | |||
142 | ;; | 265 | ;; |
143 | 1: // now we are in virtual mode | 266 | 1: // now we are in virtual mode |
144 | 267 | ||
268 | SET_AREA_FOR_BOOTING_CPU(r2, r16); | ||
269 | |||
270 | STORE_REGION_REGS(r16, r8,r9,r10,r11,r12,r13,r14,r15); | ||
271 | SAL_TO_OS_BOOT_HANDOFF_STATE_SAVE(r16,r17,r25) | ||
272 | ;; | ||
273 | |||
145 | // set IVT entry point---can't access I/O ports without it | 274 | // set IVT entry point---can't access I/O ports without it |
146 | movl r3=ia64_ivt | 275 | movl r3=ia64_ivt |
147 | ;; | 276 | ;; |
@@ -211,12 +340,13 @@ start_ap: | |||
211 | mov IA64_KR(CURRENT_STACK)=r16 | 340 | mov IA64_KR(CURRENT_STACK)=r16 |
212 | mov r13=r2 | 341 | mov r13=r2 |
213 | /* | 342 | /* |
214 | * Reserve space at the top of the stack for "struct pt_regs". Kernel threads | 343 | * Reserve space at the top of the stack for "struct pt_regs". Kernel |
215 | * don't store interesting values in that structure, but the space still needs | 344 | * threads don't store interesting values in that structure, but the space |
216 | * to be there because time-critical stuff such as the context switching can | 345 | * still needs to be there because time-critical stuff such as the context |
217 | * be implemented more efficiently (for example, __switch_to() | 346 | * switching can be implemented more efficiently (for example, __switch_to() |
218 | * always sets the psr.dfh bit of the task it is switching to). | 347 | * always sets the psr.dfh bit of the task it is switching to). |
219 | */ | 348 | */ |
349 | |||
220 | addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2 | 350 | addl r12=IA64_STK_OFFSET-IA64_PT_REGS_SIZE-16,r2 |
221 | addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE | 351 | addl r2=IA64_RBS_OFFSET,r2 // initialize the RSE |
222 | mov ar.rsc=0 // place RSE in enforced lazy mode | 352 | mov ar.rsc=0 // place RSE in enforced lazy mode |
@@ -993,4 +1123,98 @@ END(ia64_spinlock_contention) | |||
993 | 1123 | ||
994 | #endif | 1124 | #endif |
995 | 1125 | ||
1126 | #ifdef CONFIG_HOTPLUG_CPU | ||
1127 | GLOBAL_ENTRY(ia64_jump_to_sal) | ||
1128 | alloc r16=ar.pfs,1,0,0,0;; | ||
1129 | rsm psr.i | psr.ic | ||
1130 | { | ||
1131 | flushrs | ||
1132 | srlz.i | ||
1133 | } | ||
1134 | tpa r25=in0 | ||
1135 | movl r18=tlb_purge_done;; | ||
1136 | DATA_VA_TO_PA(r18);; | ||
1137 | mov b1=r18 // Return location | ||
1138 | movl r18=ia64_do_tlb_purge;; | ||
1139 | DATA_VA_TO_PA(r18);; | ||
1140 | mov b2=r18 // doing tlb_flush work | ||
1141 | mov ar.rsc=0 // Put RSE in enforced lazy, LE mode | ||
1142 | movl r17=1f;; | ||
1143 | DATA_VA_TO_PA(r17);; | ||
1144 | mov cr.iip=r17 | ||
1145 | movl r16=SAL_PSR_BITS_TO_SET;; | ||
1146 | mov cr.ipsr=r16 | ||
1147 | mov cr.ifs=r0;; | ||
1148 | rfi;; | ||
1149 | 1: | ||
1150 | /* | ||
1151 | * Invalidate all TLB data/inst | ||
1152 | */ | ||
1153 | br.sptk.many b2;; // jump to tlb purge code | ||
1154 | |||
1155 | tlb_purge_done: | ||
1156 | RESTORE_REGION_REGS(r25, r17,r18,r19);; | ||
1157 | RESTORE_REG(b0, r25, r17);; | ||
1158 | RESTORE_REG(b1, r25, r17);; | ||
1159 | RESTORE_REG(b2, r25, r17);; | ||
1160 | RESTORE_REG(b3, r25, r17);; | ||
1161 | RESTORE_REG(b4, r25, r17);; | ||
1162 | RESTORE_REG(b5, r25, r17);; | ||
1163 | ld8 r1=[r25],0x08;; | ||
1164 | ld8 r12=[r25],0x08;; | ||
1165 | ld8 r13=[r25],0x08;; | ||
1166 | RESTORE_REG(ar.fpsr, r25, r17);; | ||
1167 | RESTORE_REG(ar.pfs, r25, r17);; | ||
1168 | RESTORE_REG(ar.rnat, r25, r17);; | ||
1169 | RESTORE_REG(ar.unat, r25, r17);; | ||
1170 | RESTORE_REG(ar.bspstore, r25, r17);; | ||
1171 | RESTORE_REG(cr.dcr, r25, r17);; | ||
1172 | RESTORE_REG(cr.iva, r25, r17);; | ||
1173 | RESTORE_REG(cr.pta, r25, r17);; | ||
1174 | RESTORE_REG(cr.itv, r25, r17);; | ||
1175 | RESTORE_REG(cr.pmv, r25, r17);; | ||
1176 | RESTORE_REG(cr.cmcv, r25, r17);; | ||
1177 | RESTORE_REG(cr.lrr0, r25, r17);; | ||
1178 | RESTORE_REG(cr.lrr1, r25, r17);; | ||
1179 | ld8 r4=[r25],0x08;; | ||
1180 | ld8 r5=[r25],0x08;; | ||
1181 | ld8 r6=[r25],0x08;; | ||
1182 | ld8 r7=[r25],0x08;; | ||
1183 | ld8 r17=[r25],0x08;; | ||
1184 | mov pr=r17,-1;; | ||
1185 | RESTORE_REG(ar.lc, r25, r17);; | ||
1186 | /* | ||
1187 | * Now Restore floating point regs | ||
1188 | */ | ||
1189 | ldf.fill.nta f2=[r25],16;; | ||
1190 | ldf.fill.nta f3=[r25],16;; | ||
1191 | ldf.fill.nta f4=[r25],16;; | ||
1192 | ldf.fill.nta f5=[r25],16;; | ||
1193 | ldf.fill.nta f16=[r25],16;; | ||
1194 | ldf.fill.nta f17=[r25],16;; | ||
1195 | ldf.fill.nta f18=[r25],16;; | ||
1196 | ldf.fill.nta f19=[r25],16;; | ||
1197 | ldf.fill.nta f20=[r25],16;; | ||
1198 | ldf.fill.nta f21=[r25],16;; | ||
1199 | ldf.fill.nta f22=[r25],16;; | ||
1200 | ldf.fill.nta f23=[r25],16;; | ||
1201 | ldf.fill.nta f24=[r25],16;; | ||
1202 | ldf.fill.nta f25=[r25],16;; | ||
1203 | ldf.fill.nta f26=[r25],16;; | ||
1204 | ldf.fill.nta f27=[r25],16;; | ||
1205 | ldf.fill.nta f28=[r25],16;; | ||
1206 | ldf.fill.nta f29=[r25],16;; | ||
1207 | ldf.fill.nta f30=[r25],16;; | ||
1208 | ldf.fill.nta f31=[r25],16;; | ||
1209 | |||
1210 | /* | ||
1211 | * Now that we have done all the register restores | ||
1212 | * we are now ready for the big DIVE to SAL Land | ||
1213 | */ | ||
1214 | ssm psr.ic;; | ||
1215 | srlz.d;; | ||
1216 | br.ret.sptk.many b0;; | ||
1217 | END(ia64_jump_to_sal) | ||
1218 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
1219 | |||
996 | #endif /* CONFIG_SMP */ | 1220 | #endif /* CONFIG_SMP */ |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index c15be5c38f56..88b014381df5 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -79,6 +79,7 @@ | |||
79 | #include <linux/smp.h> | 79 | #include <linux/smp.h> |
80 | #include <linux/smp_lock.h> | 80 | #include <linux/smp_lock.h> |
81 | #include <linux/string.h> | 81 | #include <linux/string.h> |
82 | #include <linux/bootmem.h> | ||
82 | 83 | ||
83 | #include <asm/delay.h> | 84 | #include <asm/delay.h> |
84 | #include <asm/hw_irq.h> | 85 | #include <asm/hw_irq.h> |
@@ -98,19 +99,30 @@ | |||
98 | #define DBG(fmt...) | 99 | #define DBG(fmt...) |
99 | #endif | 100 | #endif |
100 | 101 | ||
102 | #define NR_PREALLOCATE_RTE_ENTRIES (PAGE_SIZE / sizeof(struct iosapic_rte_info)) | ||
103 | #define RTE_PREALLOCATED (1) | ||
104 | |||
101 | static DEFINE_SPINLOCK(iosapic_lock); | 105 | static DEFINE_SPINLOCK(iosapic_lock); |
102 | 106 | ||
103 | /* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */ | 107 | /* These tables map IA-64 vectors to the IOSAPIC pin that generates this vector. */ |
104 | 108 | ||
105 | static struct iosapic_intr_info { | 109 | struct iosapic_rte_info { |
110 | struct list_head rte_list; /* node in list of RTEs sharing the same vector */ | ||
106 | char __iomem *addr; /* base address of IOSAPIC */ | 111 | char __iomem *addr; /* base address of IOSAPIC */ |
107 | u32 low32; /* current value of low word of Redirection table entry */ | ||
108 | unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */ | 112 | unsigned int gsi_base; /* first GSI assigned to this IOSAPIC */ |
109 | char rte_index; /* IOSAPIC RTE index (-1 => not an IOSAPIC interrupt) */ | 113 | char rte_index; /* IOSAPIC RTE index */ |
114 | int refcnt; /* reference counter */ | ||
115 | unsigned int flags; /* flags */ | ||
116 | } ____cacheline_aligned; | ||
117 | |||
118 | static struct iosapic_intr_info { | ||
119 | struct list_head rtes; /* RTEs using this vector (empty => not an IOSAPIC interrupt) */ | ||
120 | int count; /* # of RTEs that shares this vector */ | ||
121 | u32 low32; /* current value of low word of Redirection table entry */ | ||
122 | unsigned int dest; /* destination CPU physical ID */ | ||
110 | unsigned char dmode : 3; /* delivery mode (see iosapic.h) */ | 123 | unsigned char dmode : 3; /* delivery mode (see iosapic.h) */ |
111 | unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */ | 124 | unsigned char polarity: 1; /* interrupt polarity (see iosapic.h) */ |
112 | unsigned char trigger : 1; /* trigger mode (see iosapic.h) */ | 125 | unsigned char trigger : 1; /* trigger mode (see iosapic.h) */ |
113 | int refcnt; /* reference counter */ | ||
114 | } iosapic_intr_info[IA64_NUM_VECTORS]; | 126 | } iosapic_intr_info[IA64_NUM_VECTORS]; |
115 | 127 | ||
116 | static struct iosapic { | 128 | static struct iosapic { |
@@ -126,6 +138,8 @@ static int num_iosapic; | |||
126 | 138 | ||
127 | static unsigned char pcat_compat __initdata; /* 8259 compatibility flag */ | 139 | static unsigned char pcat_compat __initdata; /* 8259 compatibility flag */ |
128 | 140 | ||
141 | static int iosapic_kmalloc_ok; | ||
142 | static LIST_HEAD(free_rte_list); | ||
129 | 143 | ||
130 | /* | 144 | /* |
131 | * Find an IOSAPIC associated with a GSI | 145 | * Find an IOSAPIC associated with a GSI |
@@ -147,10 +161,12 @@ static inline int | |||
147 | _gsi_to_vector (unsigned int gsi) | 161 | _gsi_to_vector (unsigned int gsi) |
148 | { | 162 | { |
149 | struct iosapic_intr_info *info; | 163 | struct iosapic_intr_info *info; |
164 | struct iosapic_rte_info *rte; | ||
150 | 165 | ||
151 | for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info) | 166 | for (info = iosapic_intr_info; info < iosapic_intr_info + IA64_NUM_VECTORS; ++info) |
152 | if (info->gsi_base + info->rte_index == gsi) | 167 | list_for_each_entry(rte, &info->rtes, rte_list) |
153 | return info - iosapic_intr_info; | 168 | if (rte->gsi_base + rte->rte_index == gsi) |
169 | return info - iosapic_intr_info; | ||
154 | return -1; | 170 | return -1; |
155 | } | 171 | } |
156 | 172 | ||
@@ -167,33 +183,52 @@ gsi_to_vector (unsigned int gsi) | |||
167 | int | 183 | int |
168 | gsi_to_irq (unsigned int gsi) | 184 | gsi_to_irq (unsigned int gsi) |
169 | { | 185 | { |
186 | unsigned long flags; | ||
187 | int irq; | ||
170 | /* | 188 | /* |
171 | * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq | 189 | * XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq |
172 | * numbers... | 190 | * numbers... |
173 | */ | 191 | */ |
174 | return _gsi_to_vector(gsi); | 192 | spin_lock_irqsave(&iosapic_lock, flags); |
193 | { | ||
194 | irq = _gsi_to_vector(gsi); | ||
195 | } | ||
196 | spin_unlock_irqrestore(&iosapic_lock, flags); | ||
197 | |||
198 | return irq; | ||
199 | } | ||
200 | |||
201 | static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, unsigned int vec) | ||
202 | { | ||
203 | struct iosapic_rte_info *rte; | ||
204 | |||
205 | list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) | ||
206 | if (rte->gsi_base + rte->rte_index == gsi) | ||
207 | return rte; | ||
208 | return NULL; | ||
175 | } | 209 | } |
176 | 210 | ||
177 | static void | 211 | static void |
178 | set_rte (unsigned int vector, unsigned int dest, int mask) | 212 | set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask) |
179 | { | 213 | { |
180 | unsigned long pol, trigger, dmode; | 214 | unsigned long pol, trigger, dmode; |
181 | u32 low32, high32; | 215 | u32 low32, high32; |
182 | char __iomem *addr; | 216 | char __iomem *addr; |
183 | int rte_index; | 217 | int rte_index; |
184 | char redir; | 218 | char redir; |
219 | struct iosapic_rte_info *rte; | ||
185 | 220 | ||
186 | DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest); | 221 | DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest); |
187 | 222 | ||
188 | rte_index = iosapic_intr_info[vector].rte_index; | 223 | rte = gsi_vector_to_rte(gsi, vector); |
189 | if (rte_index < 0) | 224 | if (!rte) |
190 | return; /* not an IOSAPIC interrupt */ | 225 | return; /* not an IOSAPIC interrupt */ |
191 | 226 | ||
192 | addr = iosapic_intr_info[vector].addr; | 227 | rte_index = rte->rte_index; |
228 | addr = rte->addr; | ||
193 | pol = iosapic_intr_info[vector].polarity; | 229 | pol = iosapic_intr_info[vector].polarity; |
194 | trigger = iosapic_intr_info[vector].trigger; | 230 | trigger = iosapic_intr_info[vector].trigger; |
195 | dmode = iosapic_intr_info[vector].dmode; | 231 | dmode = iosapic_intr_info[vector].dmode; |
196 | vector &= (~IA64_IRQ_REDIRECTED); | ||
197 | 232 | ||
198 | redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0; | 233 | redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0; |
199 | 234 | ||
@@ -221,6 +256,7 @@ set_rte (unsigned int vector, unsigned int dest, int mask) | |||
221 | iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); | 256 | iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); |
222 | iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); | 257 | iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); |
223 | iosapic_intr_info[vector].low32 = low32; | 258 | iosapic_intr_info[vector].low32 = low32; |
259 | iosapic_intr_info[vector].dest = dest; | ||
224 | } | 260 | } |
225 | 261 | ||
226 | static void | 262 | static void |
@@ -237,18 +273,20 @@ mask_irq (unsigned int irq) | |||
237 | u32 low32; | 273 | u32 low32; |
238 | int rte_index; | 274 | int rte_index; |
239 | ia64_vector vec = irq_to_vector(irq); | 275 | ia64_vector vec = irq_to_vector(irq); |
276 | struct iosapic_rte_info *rte; | ||
240 | 277 | ||
241 | addr = iosapic_intr_info[vec].addr; | 278 | if (list_empty(&iosapic_intr_info[vec].rtes)) |
242 | rte_index = iosapic_intr_info[vec].rte_index; | ||
243 | |||
244 | if (rte_index < 0) | ||
245 | return; /* not an IOSAPIC interrupt! */ | 279 | return; /* not an IOSAPIC interrupt! */ |
246 | 280 | ||
247 | spin_lock_irqsave(&iosapic_lock, flags); | 281 | spin_lock_irqsave(&iosapic_lock, flags); |
248 | { | 282 | { |
249 | /* set only the mask bit */ | 283 | /* set only the mask bit */ |
250 | low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK; | 284 | low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK; |
251 | iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); | 285 | list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) { |
286 | addr = rte->addr; | ||
287 | rte_index = rte->rte_index; | ||
288 | iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); | ||
289 | } | ||
252 | } | 290 | } |
253 | spin_unlock_irqrestore(&iosapic_lock, flags); | 291 | spin_unlock_irqrestore(&iosapic_lock, flags); |
254 | } | 292 | } |
@@ -261,16 +299,19 @@ unmask_irq (unsigned int irq) | |||
261 | u32 low32; | 299 | u32 low32; |
262 | int rte_index; | 300 | int rte_index; |
263 | ia64_vector vec = irq_to_vector(irq); | 301 | ia64_vector vec = irq_to_vector(irq); |
302 | struct iosapic_rte_info *rte; | ||
264 | 303 | ||
265 | addr = iosapic_intr_info[vec].addr; | 304 | if (list_empty(&iosapic_intr_info[vec].rtes)) |
266 | rte_index = iosapic_intr_info[vec].rte_index; | ||
267 | if (rte_index < 0) | ||
268 | return; /* not an IOSAPIC interrupt! */ | 305 | return; /* not an IOSAPIC interrupt! */ |
269 | 306 | ||
270 | spin_lock_irqsave(&iosapic_lock, flags); | 307 | spin_lock_irqsave(&iosapic_lock, flags); |
271 | { | 308 | { |
272 | low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK; | 309 | low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK; |
273 | iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); | 310 | list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) { |
311 | addr = rte->addr; | ||
312 | rte_index = rte->rte_index; | ||
313 | iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); | ||
314 | } | ||
274 | } | 315 | } |
275 | spin_unlock_irqrestore(&iosapic_lock, flags); | 316 | spin_unlock_irqrestore(&iosapic_lock, flags); |
276 | } | 317 | } |
@@ -286,6 +327,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) | |||
286 | char __iomem *addr; | 327 | char __iomem *addr; |
287 | int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; | 328 | int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; |
288 | ia64_vector vec; | 329 | ia64_vector vec; |
330 | struct iosapic_rte_info *rte; | ||
289 | 331 | ||
290 | irq &= (~IA64_IRQ_REDIRECTED); | 332 | irq &= (~IA64_IRQ_REDIRECTED); |
291 | vec = irq_to_vector(irq); | 333 | vec = irq_to_vector(irq); |
@@ -295,10 +337,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) | |||
295 | 337 | ||
296 | dest = cpu_physical_id(first_cpu(mask)); | 338 | dest = cpu_physical_id(first_cpu(mask)); |
297 | 339 | ||
298 | rte_index = iosapic_intr_info[vec].rte_index; | 340 | if (list_empty(&iosapic_intr_info[vec].rtes)) |
299 | addr = iosapic_intr_info[vec].addr; | ||
300 | |||
301 | if (rte_index < 0) | ||
302 | return; /* not an IOSAPIC interrupt */ | 341 | return; /* not an IOSAPIC interrupt */ |
303 | 342 | ||
304 | set_irq_affinity_info(irq, dest, redir); | 343 | set_irq_affinity_info(irq, dest, redir); |
@@ -318,8 +357,13 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) | |||
318 | low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); | 357 | low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); |
319 | 358 | ||
320 | iosapic_intr_info[vec].low32 = low32; | 359 | iosapic_intr_info[vec].low32 = low32; |
321 | iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); | 360 | iosapic_intr_info[vec].dest = dest; |
322 | iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); | 361 | list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) { |
362 | addr = rte->addr; | ||
363 | rte_index = rte->rte_index; | ||
364 | iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); | ||
365 | iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); | ||
366 | } | ||
323 | } | 367 | } |
324 | spin_unlock_irqrestore(&iosapic_lock, flags); | 368 | spin_unlock_irqrestore(&iosapic_lock, flags); |
325 | #endif | 369 | #endif |
@@ -340,9 +384,11 @@ static void | |||
340 | iosapic_end_level_irq (unsigned int irq) | 384 | iosapic_end_level_irq (unsigned int irq) |
341 | { | 385 | { |
342 | ia64_vector vec = irq_to_vector(irq); | 386 | ia64_vector vec = irq_to_vector(irq); |
387 | struct iosapic_rte_info *rte; | ||
343 | 388 | ||
344 | move_irq(irq); | 389 | move_irq(irq); |
345 | iosapic_eoi(iosapic_intr_info[vec].addr, vec); | 390 | list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) |
391 | iosapic_eoi(rte->addr, vec); | ||
346 | } | 392 | } |
347 | 393 | ||
348 | #define iosapic_shutdown_level_irq mask_irq | 394 | #define iosapic_shutdown_level_irq mask_irq |
@@ -422,6 +468,34 @@ iosapic_version (char __iomem *addr) | |||
422 | return iosapic_read(addr, IOSAPIC_VERSION); | 468 | return iosapic_read(addr, IOSAPIC_VERSION); |
423 | } | 469 | } |
424 | 470 | ||
471 | static int iosapic_find_sharable_vector (unsigned long trigger, unsigned long pol) | ||
472 | { | ||
473 | int i, vector = -1, min_count = -1; | ||
474 | struct iosapic_intr_info *info; | ||
475 | |||
476 | /* | ||
477 | * shared vectors for edge-triggered interrupts are not | ||
478 | * supported yet | ||
479 | */ | ||
480 | if (trigger == IOSAPIC_EDGE) | ||
481 | return -1; | ||
482 | |||
483 | for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) { | ||
484 | info = &iosapic_intr_info[i]; | ||
485 | if (info->trigger == trigger && info->polarity == pol && | ||
486 | (info->dmode == IOSAPIC_FIXED || info->dmode == IOSAPIC_LOWEST_PRIORITY)) { | ||
487 | if (min_count == -1 || info->count < min_count) { | ||
488 | vector = i; | ||
489 | min_count = info->count; | ||
490 | } | ||
491 | } | ||
492 | } | ||
493 | if (vector < 0) | ||
494 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | ||
495 | |||
496 | return vector; | ||
497 | } | ||
498 | |||
425 | /* | 499 | /* |
426 | * if the given vector is already owned by other, | 500 | * if the given vector is already owned by other, |
427 | * assign a new vector for the other and make the vector available | 501 | * assign a new vector for the other and make the vector available |
@@ -431,19 +505,63 @@ iosapic_reassign_vector (int vector) | |||
431 | { | 505 | { |
432 | int new_vector; | 506 | int new_vector; |
433 | 507 | ||
434 | if (iosapic_intr_info[vector].rte_index >= 0 || iosapic_intr_info[vector].addr | 508 | if (!list_empty(&iosapic_intr_info[vector].rtes)) { |
435 | || iosapic_intr_info[vector].gsi_base || iosapic_intr_info[vector].dmode | ||
436 | || iosapic_intr_info[vector].polarity || iosapic_intr_info[vector].trigger) | ||
437 | { | ||
438 | new_vector = assign_irq_vector(AUTO_ASSIGN); | 509 | new_vector = assign_irq_vector(AUTO_ASSIGN); |
439 | printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector); | 510 | printk(KERN_INFO "Reassigning vector %d to %d\n", vector, new_vector); |
440 | memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], | 511 | memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], |
441 | sizeof(struct iosapic_intr_info)); | 512 | sizeof(struct iosapic_intr_info)); |
513 | INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes); | ||
514 | list_move(iosapic_intr_info[vector].rtes.next, &iosapic_intr_info[new_vector].rtes); | ||
442 | memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); | 515 | memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); |
443 | iosapic_intr_info[vector].rte_index = -1; | 516 | iosapic_intr_info[vector].low32 = IOSAPIC_MASK; |
517 | INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); | ||
444 | } | 518 | } |
445 | } | 519 | } |
446 | 520 | ||
521 | static struct iosapic_rte_info *iosapic_alloc_rte (void) | ||
522 | { | ||
523 | int i; | ||
524 | struct iosapic_rte_info *rte; | ||
525 | int preallocated = 0; | ||
526 | |||
527 | if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) { | ||
528 | rte = alloc_bootmem(sizeof(struct iosapic_rte_info) * NR_PREALLOCATE_RTE_ENTRIES); | ||
529 | if (!rte) | ||
530 | return NULL; | ||
531 | for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++) | ||
532 | list_add(&rte->rte_list, &free_rte_list); | ||
533 | } | ||
534 | |||
535 | if (!list_empty(&free_rte_list)) { | ||
536 | rte = list_entry(free_rte_list.next, struct iosapic_rte_info, rte_list); | ||
537 | list_del(&rte->rte_list); | ||
538 | preallocated++; | ||
539 | } else { | ||
540 | rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC); | ||
541 | if (!rte) | ||
542 | return NULL; | ||
543 | } | ||
544 | |||
545 | memset(rte, 0, sizeof(struct iosapic_rte_info)); | ||
546 | if (preallocated) | ||
547 | rte->flags |= RTE_PREALLOCATED; | ||
548 | |||
549 | return rte; | ||
550 | } | ||
551 | |||
552 | static void iosapic_free_rte (struct iosapic_rte_info *rte) | ||
553 | { | ||
554 | if (rte->flags & RTE_PREALLOCATED) | ||
555 | list_add_tail(&rte->rte_list, &free_rte_list); | ||
556 | else | ||
557 | kfree(rte); | ||
558 | } | ||
559 | |||
560 | static inline int vector_is_shared (int vector) | ||
561 | { | ||
562 | return (iosapic_intr_info[vector].count > 1); | ||
563 | } | ||
564 | |||
447 | static void | 565 | static void |
448 | register_intr (unsigned int gsi, int vector, unsigned char delivery, | 566 | register_intr (unsigned int gsi, int vector, unsigned char delivery, |
449 | unsigned long polarity, unsigned long trigger) | 567 | unsigned long polarity, unsigned long trigger) |
@@ -454,6 +572,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
454 | int index; | 572 | int index; |
455 | unsigned long gsi_base; | 573 | unsigned long gsi_base; |
456 | void __iomem *iosapic_address; | 574 | void __iomem *iosapic_address; |
575 | struct iosapic_rte_info *rte; | ||
457 | 576 | ||
458 | index = find_iosapic(gsi); | 577 | index = find_iosapic(gsi); |
459 | if (index < 0) { | 578 | if (index < 0) { |
@@ -464,14 +583,33 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, | |||
464 | iosapic_address = iosapic_lists[index].addr; | 583 | iosapic_address = iosapic_lists[index].addr; |
465 | gsi_base = iosapic_lists[index].gsi_base; | 584 | gsi_base = iosapic_lists[index].gsi_base; |
466 | 585 | ||
467 | rte_index = gsi - gsi_base; | 586 | rte = gsi_vector_to_rte(gsi, vector); |
468 | iosapic_intr_info[vector].rte_index = rte_index; | 587 | if (!rte) { |
588 | rte = iosapic_alloc_rte(); | ||
589 | if (!rte) { | ||
590 | printk(KERN_WARNING "%s: cannot allocate memory\n", __FUNCTION__); | ||
591 | return; | ||
592 | } | ||
593 | |||
594 | rte_index = gsi - gsi_base; | ||
595 | rte->rte_index = rte_index; | ||
596 | rte->addr = iosapic_address; | ||
597 | rte->gsi_base = gsi_base; | ||
598 | rte->refcnt++; | ||
599 | list_add_tail(&rte->rte_list, &iosapic_intr_info[vector].rtes); | ||
600 | iosapic_intr_info[vector].count++; | ||
601 | } | ||
602 | else if (vector_is_shared(vector)) { | ||
603 | struct iosapic_intr_info *info = &iosapic_intr_info[vector]; | ||
604 | if (info->trigger != trigger || info->polarity != polarity) { | ||
605 | printk (KERN_WARNING "%s: cannot override the interrupt\n", __FUNCTION__); | ||
606 | return; | ||
607 | } | ||
608 | } | ||
609 | |||
469 | iosapic_intr_info[vector].polarity = polarity; | 610 | iosapic_intr_info[vector].polarity = polarity; |
470 | iosapic_intr_info[vector].dmode = delivery; | 611 | iosapic_intr_info[vector].dmode = delivery; |
471 | iosapic_intr_info[vector].addr = iosapic_address; | ||
472 | iosapic_intr_info[vector].gsi_base = gsi_base; | ||
473 | iosapic_intr_info[vector].trigger = trigger; | 612 | iosapic_intr_info[vector].trigger = trigger; |
474 | iosapic_intr_info[vector].refcnt++; | ||
475 | 613 | ||
476 | if (trigger == IOSAPIC_EDGE) | 614 | if (trigger == IOSAPIC_EDGE) |
477 | irq_type = &irq_type_iosapic_edge; | 615 | irq_type = &irq_type_iosapic_edge; |
@@ -494,6 +632,13 @@ get_target_cpu (unsigned int gsi, int vector) | |||
494 | static int cpu = -1; | 632 | static int cpu = -1; |
495 | 633 | ||
496 | /* | 634 | /* |
635 | * In case of vector shared by multiple RTEs, all RTEs that | ||
636 | * share the vector need to use the same destination CPU. | ||
637 | */ | ||
638 | if (!list_empty(&iosapic_intr_info[vector].rtes)) | ||
639 | return iosapic_intr_info[vector].dest; | ||
640 | |||
641 | /* | ||
497 | * If the platform supports redirection via XTP, let it | 642 | * If the platform supports redirection via XTP, let it |
498 | * distribute interrupts. | 643 | * distribute interrupts. |
499 | */ | 644 | */ |
@@ -565,10 +710,12 @@ int | |||
565 | iosapic_register_intr (unsigned int gsi, | 710 | iosapic_register_intr (unsigned int gsi, |
566 | unsigned long polarity, unsigned long trigger) | 711 | unsigned long polarity, unsigned long trigger) |
567 | { | 712 | { |
568 | int vector; | 713 | int vector, mask = 1; |
569 | unsigned int dest; | 714 | unsigned int dest; |
570 | unsigned long flags; | 715 | unsigned long flags; |
571 | 716 | struct iosapic_rte_info *rte; | |
717 | u32 low32; | ||
718 | again: | ||
572 | /* | 719 | /* |
573 | * If this GSI has already been registered (i.e., it's a | 720 | * If this GSI has already been registered (i.e., it's a |
574 | * shared interrupt, or we lost a race to register it), | 721 | * shared interrupt, or we lost a race to register it), |
@@ -578,19 +725,45 @@ iosapic_register_intr (unsigned int gsi, | |||
578 | { | 725 | { |
579 | vector = gsi_to_vector(gsi); | 726 | vector = gsi_to_vector(gsi); |
580 | if (vector > 0) { | 727 | if (vector > 0) { |
581 | iosapic_intr_info[vector].refcnt++; | 728 | rte = gsi_vector_to_rte(gsi, vector); |
729 | rte->refcnt++; | ||
582 | spin_unlock_irqrestore(&iosapic_lock, flags); | 730 | spin_unlock_irqrestore(&iosapic_lock, flags); |
583 | return vector; | 731 | return vector; |
584 | } | 732 | } |
733 | } | ||
734 | spin_unlock_irqrestore(&iosapic_lock, flags); | ||
735 | |||
736 | /* If vector is running out, we try to find a sharable vector */ | ||
737 | vector = assign_irq_vector_nopanic(AUTO_ASSIGN); | ||
738 | if (vector < 0) | ||
739 | vector = iosapic_find_sharable_vector(trigger, polarity); | ||
740 | |||
741 | spin_lock_irqsave(&irq_descp(vector)->lock, flags); | ||
742 | spin_lock(&iosapic_lock); | ||
743 | { | ||
744 | if (gsi_to_vector(gsi) > 0) { | ||
745 | if (list_empty(&iosapic_intr_info[vector].rtes)) | ||
746 | free_irq_vector(vector); | ||
747 | spin_unlock(&iosapic_lock); | ||
748 | spin_unlock_irqrestore(&irq_descp(vector)->lock, flags); | ||
749 | goto again; | ||
750 | } | ||
585 | 751 | ||
586 | vector = assign_irq_vector(AUTO_ASSIGN); | ||
587 | dest = get_target_cpu(gsi, vector); | 752 | dest = get_target_cpu(gsi, vector); |
588 | register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, | 753 | register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, |
589 | polarity, trigger); | 754 | polarity, trigger); |
590 | 755 | ||
591 | set_rte(vector, dest, 1); | 756 | /* |
757 | * If the vector is shared and already unmasked for | ||
758 | * other interrupt sources, don't mask it. | ||
759 | */ | ||
760 | low32 = iosapic_intr_info[vector].low32; | ||
761 | if (vector_is_shared(vector) && !(low32 & IOSAPIC_MASK)) | ||
762 | mask = 0; | ||
763 | set_rte(gsi, vector, dest, mask); | ||
592 | } | 764 | } |
593 | spin_unlock_irqrestore(&iosapic_lock, flags); | 765 | spin_unlock(&iosapic_lock); |
766 | spin_unlock_irqrestore(&irq_descp(vector)->lock, flags); | ||
594 | 767 | ||
595 | printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", | 768 | printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", |
596 | gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), | 769 | gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), |
@@ -607,8 +780,10 @@ iosapic_unregister_intr (unsigned int gsi) | |||
607 | unsigned long flags; | 780 | unsigned long flags; |
608 | int irq, vector; | 781 | int irq, vector; |
609 | irq_desc_t *idesc; | 782 | irq_desc_t *idesc; |
610 | int rte_index; | 783 | u32 low32; |
611 | unsigned long trigger, polarity; | 784 | unsigned long trigger, polarity; |
785 | unsigned int dest; | ||
786 | struct iosapic_rte_info *rte; | ||
612 | 787 | ||
613 | /* | 788 | /* |
614 | * If the irq associated with the gsi is not found, | 789 | * If the irq associated with the gsi is not found, |
@@ -627,54 +802,56 @@ iosapic_unregister_intr (unsigned int gsi) | |||
627 | spin_lock_irqsave(&idesc->lock, flags); | 802 | spin_lock_irqsave(&idesc->lock, flags); |
628 | spin_lock(&iosapic_lock); | 803 | spin_lock(&iosapic_lock); |
629 | { | 804 | { |
630 | rte_index = iosapic_intr_info[vector].rte_index; | 805 | if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) { |
631 | if (rte_index < 0) { | ||
632 | spin_unlock(&iosapic_lock); | ||
633 | spin_unlock_irqrestore(&idesc->lock, flags); | ||
634 | printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi); | 806 | printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n", gsi); |
635 | WARN_ON(1); | 807 | WARN_ON(1); |
636 | return; | 808 | goto out; |
637 | } | 809 | } |
638 | 810 | ||
639 | if (--iosapic_intr_info[vector].refcnt > 0) { | 811 | if (--rte->refcnt > 0) |
640 | spin_unlock(&iosapic_lock); | 812 | goto out; |
641 | spin_unlock_irqrestore(&idesc->lock, flags); | ||
642 | return; | ||
643 | } | ||
644 | 813 | ||
645 | /* | 814 | /* Mask the interrupt */ |
646 | * If interrupt handlers still exist on the irq | 815 | low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK; |
647 | * associated with the gsi, don't unregister the | 816 | iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index), low32); |
648 | * interrupt. | ||
649 | */ | ||
650 | if (idesc->action) { | ||
651 | iosapic_intr_info[vector].refcnt++; | ||
652 | spin_unlock(&iosapic_lock); | ||
653 | spin_unlock_irqrestore(&idesc->lock, flags); | ||
654 | printk(KERN_WARNING "Cannot unregister GSI. IRQ %u is still in use.\n", irq); | ||
655 | return; | ||
656 | } | ||
657 | 817 | ||
658 | /* Clear the interrupt controller descriptor. */ | 818 | /* Remove the rte entry from the list */ |
659 | idesc->handler = &no_irq_type; | 819 | list_del(&rte->rte_list); |
820 | iosapic_intr_info[vector].count--; | ||
821 | iosapic_free_rte(rte); | ||
660 | 822 | ||
661 | trigger = iosapic_intr_info[vector].trigger; | 823 | trigger = iosapic_intr_info[vector].trigger; |
662 | polarity = iosapic_intr_info[vector].polarity; | 824 | polarity = iosapic_intr_info[vector].polarity; |
825 | dest = iosapic_intr_info[vector].dest; | ||
826 | printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n", | ||
827 | gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), | ||
828 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), | ||
829 | cpu_logical_id(dest), dest, vector); | ||
830 | |||
831 | if (list_empty(&iosapic_intr_info[vector].rtes)) { | ||
832 | /* Sanity check */ | ||
833 | BUG_ON(iosapic_intr_info[vector].count); | ||
834 | |||
835 | /* Clear the interrupt controller descriptor */ | ||
836 | idesc->handler = &no_irq_type; | ||
837 | |||
838 | /* Clear the interrupt information */ | ||
839 | memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); | ||
840 | iosapic_intr_info[vector].low32 |= IOSAPIC_MASK; | ||
841 | INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); | ||
842 | |||
843 | if (idesc->action) { | ||
844 | printk(KERN_ERR "interrupt handlers still exist on IRQ %u\n", irq); | ||
845 | WARN_ON(1); | ||
846 | } | ||
663 | 847 | ||
664 | /* Clear the interrupt information. */ | 848 | /* Free the interrupt vector */ |
665 | memset(&iosapic_intr_info[vector], 0, sizeof(struct iosapic_intr_info)); | 849 | free_irq_vector(vector); |
666 | iosapic_intr_info[vector].rte_index = -1; /* mark as unused */ | 850 | } |
667 | } | 851 | } |
852 | out: | ||
668 | spin_unlock(&iosapic_lock); | 853 | spin_unlock(&iosapic_lock); |
669 | spin_unlock_irqrestore(&idesc->lock, flags); | 854 | spin_unlock_irqrestore(&idesc->lock, flags); |
670 | |||
671 | /* Free the interrupt vector */ | ||
672 | free_irq_vector(vector); | ||
673 | |||
674 | printk(KERN_INFO "GSI %u (%s, %s) -> vector %d unregisterd.\n", | ||
675 | gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), | ||
676 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), | ||
677 | vector); | ||
678 | } | 855 | } |
679 | #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */ | 856 | #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */ |
680 | 857 | ||
@@ -724,7 +901,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, | |||
724 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), | 901 | (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), |
725 | cpu_logical_id(dest), dest, vector); | 902 | cpu_logical_id(dest), dest, vector); |
726 | 903 | ||
727 | set_rte(vector, dest, mask); | 904 | set_rte(gsi, vector, dest, mask); |
728 | return vector; | 905 | return vector; |
729 | } | 906 | } |
730 | 907 | ||
@@ -750,7 +927,7 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, | |||
750 | polarity == IOSAPIC_POL_HIGH ? "high" : "low", | 927 | polarity == IOSAPIC_POL_HIGH ? "high" : "low", |
751 | cpu_logical_id(dest), dest, vector); | 928 | cpu_logical_id(dest), dest, vector); |
752 | 929 | ||
753 | set_rte(vector, dest, 1); | 930 | set_rte(gsi, vector, dest, 1); |
754 | } | 931 | } |
755 | 932 | ||
756 | void __init | 933 | void __init |
@@ -758,8 +935,10 @@ iosapic_system_init (int system_pcat_compat) | |||
758 | { | 935 | { |
759 | int vector; | 936 | int vector; |
760 | 937 | ||
761 | for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) | 938 | for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) { |
762 | iosapic_intr_info[vector].rte_index = -1; /* mark as unused */ | 939 | iosapic_intr_info[vector].low32 = IOSAPIC_MASK; |
940 | INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); /* mark as unused */ | ||
941 | } | ||
763 | 942 | ||
764 | pcat_compat = system_pcat_compat; | 943 | pcat_compat = system_pcat_compat; |
765 | if (pcat_compat) { | 944 | if (pcat_compat) { |
@@ -825,3 +1004,10 @@ map_iosapic_to_node(unsigned int gsi_base, int node) | |||
825 | return; | 1004 | return; |
826 | } | 1005 | } |
827 | #endif | 1006 | #endif |
1007 | |||
1008 | static int __init iosapic_enable_kmalloc (void) | ||
1009 | { | ||
1010 | iosapic_kmalloc_ok = 1; | ||
1011 | return 0; | ||
1012 | } | ||
1013 | core_initcall (iosapic_enable_kmalloc); | ||
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 5ba06ebe355b..4fe60c7a2e90 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -63,20 +63,30 @@ EXPORT_SYMBOL(isa_irq_to_vector_map); | |||
63 | static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; | 63 | static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; |
64 | 64 | ||
65 | int | 65 | int |
66 | assign_irq_vector (int irq) | 66 | assign_irq_vector_nopanic (int irq) |
67 | { | 67 | { |
68 | int pos, vector; | 68 | int pos, vector; |
69 | again: | 69 | again: |
70 | pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); | 70 | pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); |
71 | vector = IA64_FIRST_DEVICE_VECTOR + pos; | 71 | vector = IA64_FIRST_DEVICE_VECTOR + pos; |
72 | if (vector > IA64_LAST_DEVICE_VECTOR) | 72 | if (vector > IA64_LAST_DEVICE_VECTOR) |
73 | /* XXX could look for sharable vectors instead of panic'ing... */ | 73 | return -1; |
74 | panic("assign_irq_vector: out of interrupt vectors!"); | ||
75 | if (test_and_set_bit(pos, ia64_vector_mask)) | 74 | if (test_and_set_bit(pos, ia64_vector_mask)) |
76 | goto again; | 75 | goto again; |
77 | return vector; | 76 | return vector; |
78 | } | 77 | } |
79 | 78 | ||
79 | int | ||
80 | assign_irq_vector (int irq) | ||
81 | { | ||
82 | int vector = assign_irq_vector_nopanic(irq); | ||
83 | |||
84 | if (vector < 0) | ||
85 | panic("assign_irq_vector: out of interrupt vectors!"); | ||
86 | |||
87 | return vector; | ||
88 | } | ||
89 | |||
80 | void | 90 | void |
81 | free_irq_vector (int vector) | 91 | free_irq_vector (int vector) |
82 | { | 92 | { |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index cf3f8014f9ad..ef3fd7265b67 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
@@ -110,46 +110,19 @@ | |||
110 | .global ia64_os_mca_dispatch_end | 110 | .global ia64_os_mca_dispatch_end |
111 | .global ia64_sal_to_os_handoff_state | 111 | .global ia64_sal_to_os_handoff_state |
112 | .global ia64_os_to_sal_handoff_state | 112 | .global ia64_os_to_sal_handoff_state |
113 | .global ia64_do_tlb_purge | ||
113 | 114 | ||
114 | .text | 115 | .text |
115 | .align 16 | 116 | .align 16 |
116 | 117 | ||
117 | ia64_os_mca_dispatch: | 118 | /* |
118 | 119 | * Just the TLB purge part is moved to a separate function | |
119 | // Serialize all MCA processing | 120 | * so we can re-use the code for cpu hotplug code as well |
120 | mov r3=1;; | 121 | * Caller should now setup b1, so we can branch once the |
121 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; | 122 | * tlb flush is complete. |
122 | ia64_os_mca_spin: | 123 | */ |
123 | xchg8 r4=[r2],r3;; | ||
124 | cmp.ne p6,p0=r4,r0 | ||
125 | (p6) br ia64_os_mca_spin | ||
126 | |||
127 | // Save the SAL to OS MCA handoff state as defined | ||
128 | // by SAL SPEC 3.0 | ||
129 | // NOTE : The order in which the state gets saved | ||
130 | // is dependent on the way the C-structure | ||
131 | // for ia64_mca_sal_to_os_state_t has been | ||
132 | // defined in include/asm/mca.h | ||
133 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | ||
134 | ;; | ||
135 | |||
136 | // LOG PROCESSOR STATE INFO FROM HERE ON.. | ||
137 | begin_os_mca_dump: | ||
138 | br ia64_os_mca_proc_state_dump;; | ||
139 | |||
140 | ia64_os_mca_done_dump: | ||
141 | |||
142 | LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) | ||
143 | ;; | ||
144 | ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. | ||
145 | ;; | ||
146 | tbit.nz p6,p7=r18,60 | ||
147 | (p7) br.spnt done_tlb_purge_and_reload | ||
148 | |||
149 | // The following code purges TC and TR entries. Then reload all TC entries. | ||
150 | // Purge percpu data TC entries. | ||
151 | begin_tlb_purge_and_reload: | ||
152 | 124 | ||
125 | ia64_do_tlb_purge: | ||
153 | #define O(member) IA64_CPUINFO_##member##_OFFSET | 126 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
154 | 127 | ||
155 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | 128 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 |
@@ -230,6 +203,51 @@ begin_tlb_purge_and_reload: | |||
230 | ;; | 203 | ;; |
231 | srlz.i | 204 | srlz.i |
232 | ;; | 205 | ;; |
206 | // Now branch away to caller. | ||
207 | br.sptk.many b1 | ||
208 | ;; | ||
209 | |||
210 | ia64_os_mca_dispatch: | ||
211 | |||
212 | // Serialize all MCA processing | ||
213 | mov r3=1;; | ||
214 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; | ||
215 | ia64_os_mca_spin: | ||
216 | xchg8 r4=[r2],r3;; | ||
217 | cmp.ne p6,p0=r4,r0 | ||
218 | (p6) br ia64_os_mca_spin | ||
219 | |||
220 | // Save the SAL to OS MCA handoff state as defined | ||
221 | // by SAL SPEC 3.0 | ||
222 | // NOTE : The order in which the state gets saved | ||
223 | // is dependent on the way the C-structure | ||
224 | // for ia64_mca_sal_to_os_state_t has been | ||
225 | // defined in include/asm/mca.h | ||
226 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | ||
227 | ;; | ||
228 | |||
229 | // LOG PROCESSOR STATE INFO FROM HERE ON.. | ||
230 | begin_os_mca_dump: | ||
231 | br ia64_os_mca_proc_state_dump;; | ||
232 | |||
233 | ia64_os_mca_done_dump: | ||
234 | |||
235 | LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) | ||
236 | ;; | ||
237 | ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. | ||
238 | ;; | ||
239 | tbit.nz p6,p7=r18,60 | ||
240 | (p7) br.spnt done_tlb_purge_and_reload | ||
241 | |||
242 | // The following code purges TC and TR entries. Then reload all TC entries. | ||
243 | // Purge percpu data TC entries. | ||
244 | begin_tlb_purge_and_reload: | ||
245 | movl r18=ia64_reload_tr;; | ||
246 | LOAD_PHYSICAL(p0,r18,ia64_reload_tr);; | ||
247 | mov b1=r18;; | ||
248 | br.sptk.many ia64_do_tlb_purge;; | ||
249 | |||
250 | ia64_reload_tr: | ||
233 | // Finally reload the TR registers. | 251 | // Finally reload the TR registers. |
234 | // 1. Reload DTR/ITR registers for kernel. | 252 | // 1. Reload DTR/ITR registers for kernel. |
235 | mov r18=KERNEL_TR_PAGE_SHIFT<<2 | 253 | mov r18=KERNEL_TR_PAGE_SHIFT<<2 |
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index ab478172c349..abc0113a821d 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c | |||
@@ -132,8 +132,7 @@ mca_handler_bh(unsigned long paddr) | |||
132 | spin_unlock(&mca_bh_lock); | 132 | spin_unlock(&mca_bh_lock); |
133 | 133 | ||
134 | /* This process is about to be killed itself */ | 134 | /* This process is about to be killed itself */ |
135 | force_sig(SIGKILL, current); | 135 | do_exit(SIGKILL); |
136 | schedule(); | ||
137 | } | 136 | } |
138 | 137 | ||
139 | /** | 138 | /** |
@@ -439,6 +438,7 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec | |||
439 | psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; | 438 | psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; |
440 | psr2->cpl = 0; | 439 | psr2->cpl = 0; |
441 | psr2->ri = 0; | 440 | psr2->ri = 0; |
441 | psr2->i = 0; | ||
442 | 442 | ||
443 | return 1; | 443 | return 1; |
444 | } | 444 | } |
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S index bcfa05acc561..2d7e0217638d 100644 --- a/arch/ia64/kernel/mca_drv_asm.S +++ b/arch/ia64/kernel/mca_drv_asm.S | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <asm/asmmacro.h> | 11 | #include <asm/asmmacro.h> |
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | #include <asm/ptrace.h> | ||
13 | 14 | ||
14 | GLOBAL_ENTRY(mca_handler_bhhook) | 15 | GLOBAL_ENTRY(mca_handler_bhhook) |
15 | invala // clear RSE ? | 16 | invala // clear RSE ? |
@@ -20,12 +21,21 @@ GLOBAL_ENTRY(mca_handler_bhhook) | |||
20 | ;; | 21 | ;; |
21 | alloc r16=ar.pfs,0,2,1,0 // make a new frame | 22 | alloc r16=ar.pfs,0,2,1,0 // make a new frame |
22 | ;; | 23 | ;; |
24 | mov ar.rsc=0 | ||
25 | ;; | ||
23 | mov r13=IA64_KR(CURRENT) // current task pointer | 26 | mov r13=IA64_KR(CURRENT) // current task pointer |
24 | ;; | 27 | ;; |
25 | adds r12=IA64_TASK_THREAD_KSP_OFFSET,r13 | 28 | mov r2=r13 |
29 | ;; | ||
30 | addl r22=IA64_RBS_OFFSET,r2 | ||
31 | ;; | ||
32 | mov ar.bspstore=r22 | ||
26 | ;; | 33 | ;; |
27 | ld8 r12=[r12] // stack pointer | 34 | addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 |
28 | ;; | 35 | ;; |
36 | adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 | ||
37 | ;; | ||
38 | st1 [r2]=r0 // clear current->thread.on_ustack flag | ||
29 | mov loc0=r16 | 39 | mov loc0=r16 |
30 | movl loc1=mca_handler_bh // recovery C function | 40 | movl loc1=mca_handler_bh // recovery C function |
31 | ;; | 41 | ;; |
@@ -34,7 +44,9 @@ GLOBAL_ENTRY(mca_handler_bhhook) | |||
34 | ;; | 44 | ;; |
35 | mov loc1=rp | 45 | mov loc1=rp |
36 | ;; | 46 | ;; |
37 | br.call.sptk.many rp=b6 // not return ... | 47 | ssm psr.i |
48 | ;; | ||
49 | br.call.sptk.many rp=b6 // does not return ... | ||
38 | ;; | 50 | ;; |
39 | mov ar.pfs=loc0 | 51 | mov ar.pfs=loc0 |
40 | mov rp=loc1 | 52 | mov rp=loc1 |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 71147be3279c..71c101601e3e 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -480,14 +480,6 @@ typedef struct { | |||
480 | #define PFM_CMD_ARG_MANY -1 /* cannot be zero */ | 480 | #define PFM_CMD_ARG_MANY -1 /* cannot be zero */ |
481 | 481 | ||
482 | typedef struct { | 482 | typedef struct { |
483 | int debug; /* turn on/off debugging via syslog */ | ||
484 | int debug_ovfl; /* turn on/off debug printk in overflow handler */ | ||
485 | int fastctxsw; /* turn on/off fast (unsecure) ctxsw */ | ||
486 | int expert_mode; /* turn on/off value checking */ | ||
487 | int debug_pfm_read; | ||
488 | } pfm_sysctl_t; | ||
489 | |||
490 | typedef struct { | ||
491 | unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ | 483 | unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ |
492 | unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ | 484 | unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ |
493 | unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ | 485 | unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ |
@@ -514,8 +506,8 @@ static LIST_HEAD(pfm_buffer_fmt_list); | |||
514 | static pmu_config_t *pmu_conf; | 506 | static pmu_config_t *pmu_conf; |
515 | 507 | ||
516 | /* sysctl() controls */ | 508 | /* sysctl() controls */ |
517 | static pfm_sysctl_t pfm_sysctl; | 509 | pfm_sysctl_t pfm_sysctl; |
518 | int pfm_debug_var; | 510 | EXPORT_SYMBOL(pfm_sysctl); |
519 | 511 | ||
520 | static ctl_table pfm_ctl_table[]={ | 512 | static ctl_table pfm_ctl_table[]={ |
521 | {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, | 513 | {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, |
@@ -1273,6 +1265,8 @@ out: | |||
1273 | } | 1265 | } |
1274 | EXPORT_SYMBOL(pfm_unregister_buffer_fmt); | 1266 | EXPORT_SYMBOL(pfm_unregister_buffer_fmt); |
1275 | 1267 | ||
1268 | extern void update_pal_halt_status(int); | ||
1269 | |||
1276 | static int | 1270 | static int |
1277 | pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) | 1271 | pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) |
1278 | { | 1272 | { |
@@ -1319,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) | |||
1319 | is_syswide, | 1313 | is_syswide, |
1320 | cpu)); | 1314 | cpu)); |
1321 | 1315 | ||
1316 | /* | ||
1317 | * disable default_idle() to go to PAL_HALT | ||
1318 | */ | ||
1319 | update_pal_halt_status(0); | ||
1320 | |||
1322 | UNLOCK_PFS(flags); | 1321 | UNLOCK_PFS(flags); |
1323 | 1322 | ||
1324 | return 0; | 1323 | return 0; |
@@ -1374,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) | |||
1374 | is_syswide, | 1373 | is_syswide, |
1375 | cpu)); | 1374 | cpu)); |
1376 | 1375 | ||
1376 | /* | ||
1377 | * if possible, enable default_idle() to go into PAL_HALT | ||
1378 | */ | ||
1379 | if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) | ||
1380 | update_pal_halt_status(1); | ||
1381 | |||
1377 | UNLOCK_PFS(flags); | 1382 | UNLOCK_PFS(flags); |
1378 | 1383 | ||
1379 | return 0; | 1384 | return 0; |
@@ -1576,7 +1581,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) | |||
1576 | goto abort_locked; | 1581 | goto abort_locked; |
1577 | } | 1582 | } |
1578 | 1583 | ||
1579 | DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); | 1584 | DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); |
1580 | 1585 | ||
1581 | ret = -EFAULT; | 1586 | ret = -EFAULT; |
1582 | if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); | 1587 | if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); |
@@ -3695,8 +3700,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
3695 | 3700 | ||
3696 | pfm_sysctl.debug = m == 0 ? 0 : 1; | 3701 | pfm_sysctl.debug = m == 0 ? 0 : 1; |
3697 | 3702 | ||
3698 | pfm_debug_var = pfm_sysctl.debug; | ||
3699 | |||
3700 | printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); | 3703 | printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); |
3701 | 3704 | ||
3702 | if (m == 0) { | 3705 | if (m == 0) { |
@@ -4212,7 +4215,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) | |||
4212 | DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", | 4215 | DPRINT(("cannot load to [%d], invalid ctx_state=%d\n", |
4213 | req->load_pid, | 4216 | req->load_pid, |
4214 | ctx->ctx_state)); | 4217 | ctx->ctx_state)); |
4215 | return -EINVAL; | 4218 | return -EBUSY; |
4216 | } | 4219 | } |
4217 | 4220 | ||
4218 | DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); | 4221 | DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); |
@@ -4714,16 +4717,26 @@ recheck: | |||
4714 | if (task == current || ctx->ctx_fl_system) return 0; | 4717 | if (task == current || ctx->ctx_fl_system) return 0; |
4715 | 4718 | ||
4716 | /* | 4719 | /* |
4717 | * if context is UNLOADED we are safe to go | 4720 | * we are monitoring another thread |
4718 | */ | 4721 | */ |
4719 | if (state == PFM_CTX_UNLOADED) return 0; | 4722 | switch(state) { |
4720 | 4723 | case PFM_CTX_UNLOADED: | |
4721 | /* | 4724 | /* |
4722 | * no command can operate on a zombie context | 4725 | * if context is UNLOADED we are safe to go |
4723 | */ | 4726 | */ |
4724 | if (state == PFM_CTX_ZOMBIE) { | 4727 | return 0; |
4725 | DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); | 4728 | case PFM_CTX_ZOMBIE: |
4726 | return -EINVAL; | 4729 | /* |
4730 | * no command can operate on a zombie context | ||
4731 | */ | ||
4732 | DPRINT(("cmd %d state zombie cannot operate on context\n", cmd)); | ||
4733 | return -EINVAL; | ||
4734 | case PFM_CTX_MASKED: | ||
4735 | /* | ||
4736 | * PMU state has been saved to software even though | ||
4737 | * the thread may still be running. | ||
4738 | */ | ||
4739 | if (cmd != PFM_UNLOAD_CONTEXT) return 0; | ||
4727 | } | 4740 | } |
4728 | 4741 | ||
4729 | /* | 4742 | /* |
@@ -4996,13 +5009,21 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) | |||
4996 | } | 5009 | } |
4997 | 5010 | ||
4998 | static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); | 5011 | static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); |
4999 | 5012 | /* | |
5013 | * pfm_handle_work() can be called with interrupts enabled | ||
5014 | * (TIF_NEED_RESCHED) or disabled. The down_interruptible | ||
5015 | * call may sleep, therefore we must re-enable interrupts | ||
5016 | * to avoid deadlocks. It is safe to do so because this function | ||
5017 | * is called ONLY when returning to user level (PUStk=1), in which case | ||
5018 | * there is no risk of kernel stack overflow due to deep | ||
5019 | * interrupt nesting. | ||
5020 | */ | ||
5000 | void | 5021 | void |
5001 | pfm_handle_work(void) | 5022 | pfm_handle_work(void) |
5002 | { | 5023 | { |
5003 | pfm_context_t *ctx; | 5024 | pfm_context_t *ctx; |
5004 | struct pt_regs *regs; | 5025 | struct pt_regs *regs; |
5005 | unsigned long flags; | 5026 | unsigned long flags, dummy_flags; |
5006 | unsigned long ovfl_regs; | 5027 | unsigned long ovfl_regs; |
5007 | unsigned int reason; | 5028 | unsigned int reason; |
5008 | int ret; | 5029 | int ret; |
@@ -5039,18 +5060,15 @@ pfm_handle_work(void) | |||
5039 | //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; | 5060 | //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; |
5040 | if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; | 5061 | if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; |
5041 | 5062 | ||
5063 | /* | ||
5064 | * restore interrupt mask to what it was on entry. | ||
5065 | * Could be enabled/diasbled. | ||
5066 | */ | ||
5042 | UNPROTECT_CTX(ctx, flags); | 5067 | UNPROTECT_CTX(ctx, flags); |
5043 | 5068 | ||
5044 | /* | 5069 | /* |
5045 | * pfm_handle_work() is currently called with interrupts disabled. | 5070 | * force interrupt enable because of down_interruptible() |
5046 | * The down_interruptible call may sleep, therefore we | 5071 | */ |
5047 | * must re-enable interrupts to avoid deadlocks. It is | ||
5048 | * safe to do so because this function is called ONLY | ||
5049 | * when returning to user level (PUStk=1), in which case | ||
5050 | * there is no risk of kernel stack overflow due to deep | ||
5051 | * interrupt nesting. | ||
5052 | */ | ||
5053 | BUG_ON(flags & IA64_PSR_I); | ||
5054 | local_irq_enable(); | 5072 | local_irq_enable(); |
5055 | 5073 | ||
5056 | DPRINT(("before block sleeping\n")); | 5074 | DPRINT(("before block sleeping\n")); |
@@ -5064,12 +5082,12 @@ pfm_handle_work(void) | |||
5064 | DPRINT(("after block sleeping ret=%d\n", ret)); | 5082 | DPRINT(("after block sleeping ret=%d\n", ret)); |
5065 | 5083 | ||
5066 | /* | 5084 | /* |
5067 | * disable interrupts to restore state we had upon entering | 5085 | * lock context and mask interrupts again |
5068 | * this function | 5086 | * We save flags into a dummy because we may have |
5087 | * altered interrupts mask compared to entry in this | ||
5088 | * function. | ||
5069 | */ | 5089 | */ |
5070 | local_irq_disable(); | 5090 | PROTECT_CTX(ctx, dummy_flags); |
5071 | |||
5072 | PROTECT_CTX(ctx, flags); | ||
5073 | 5091 | ||
5074 | /* | 5092 | /* |
5075 | * we need to read the ovfl_regs only after wake-up | 5093 | * we need to read the ovfl_regs only after wake-up |
@@ -5095,7 +5113,9 @@ skip_blocking: | |||
5095 | ctx->ctx_ovfl_regs[0] = 0UL; | 5113 | ctx->ctx_ovfl_regs[0] = 0UL; |
5096 | 5114 | ||
5097 | nothing_to_do: | 5115 | nothing_to_do: |
5098 | 5116 | /* | |
5117 | * restore flags as they were upon entry | ||
5118 | */ | ||
5099 | UNPROTECT_CTX(ctx, flags); | 5119 | UNPROTECT_CTX(ctx, flags); |
5100 | } | 5120 | } |
5101 | 5121 | ||
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c index 965d29004555..344941db0a9e 100644 --- a/arch/ia64/kernel/perfmon_default_smpl.c +++ b/arch/ia64/kernel/perfmon_default_smpl.c | |||
@@ -20,24 +20,17 @@ MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>"); | |||
20 | MODULE_DESCRIPTION("perfmon default sampling format"); | 20 | MODULE_DESCRIPTION("perfmon default sampling format"); |
21 | MODULE_LICENSE("GPL"); | 21 | MODULE_LICENSE("GPL"); |
22 | 22 | ||
23 | MODULE_PARM(debug, "i"); | ||
24 | MODULE_PARM_DESC(debug, "debug"); | ||
25 | |||
26 | MODULE_PARM(debug_ovfl, "i"); | ||
27 | MODULE_PARM_DESC(debug_ovfl, "debug ovfl"); | ||
28 | |||
29 | |||
30 | #define DEFAULT_DEBUG 1 | 23 | #define DEFAULT_DEBUG 1 |
31 | 24 | ||
32 | #ifdef DEFAULT_DEBUG | 25 | #ifdef DEFAULT_DEBUG |
33 | #define DPRINT(a) \ | 26 | #define DPRINT(a) \ |
34 | do { \ | 27 | do { \ |
35 | if (unlikely(debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ | 28 | if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ |
36 | } while (0) | 29 | } while (0) |
37 | 30 | ||
38 | #define DPRINT_ovfl(a) \ | 31 | #define DPRINT_ovfl(a) \ |
39 | do { \ | 32 | do { \ |
40 | if (unlikely(debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ | 33 | if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ |
41 | } while (0) | 34 | } while (0) |
42 | 35 | ||
43 | #else | 36 | #else |
@@ -45,8 +38,6 @@ MODULE_PARM_DESC(debug_ovfl, "debug ovfl"); | |||
45 | #define DPRINT_ovfl(a) | 38 | #define DPRINT_ovfl(a) |
46 | #endif | 39 | #endif |
47 | 40 | ||
48 | static int debug, debug_ovfl; | ||
49 | |||
50 | static int | 41 | static int |
51 | default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data) | 42 | default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data) |
52 | { | 43 | { |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 91293388dd29..ebb71f3d6d19 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support | ||
6 | */ | 7 | */ |
7 | #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ | 8 | #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ |
8 | #include <linux/config.h> | 9 | #include <linux/config.h> |
@@ -49,7 +50,7 @@ | |||
49 | #include "sigframe.h" | 50 | #include "sigframe.h" |
50 | 51 | ||
51 | void (*ia64_mark_idle)(int); | 52 | void (*ia64_mark_idle)(int); |
52 | static cpumask_t cpu_idle_map; | 53 | static DEFINE_PER_CPU(unsigned int, cpu_idle_state); |
53 | 54 | ||
54 | unsigned long boot_option_idle_override = 0; | 55 | unsigned long boot_option_idle_override = 0; |
55 | EXPORT_SYMBOL(boot_option_idle_override); | 56 | EXPORT_SYMBOL(boot_option_idle_override); |
@@ -172,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall | |||
172 | ia64_do_signal(oldset, scr, in_syscall); | 173 | ia64_do_signal(oldset, scr, in_syscall); |
173 | } | 174 | } |
174 | 175 | ||
175 | static int pal_halt = 1; | 176 | static int pal_halt = 1; |
177 | static int can_do_pal_halt = 1; | ||
178 | |||
176 | static int __init nohalt_setup(char * str) | 179 | static int __init nohalt_setup(char * str) |
177 | { | 180 | { |
178 | pal_halt = 0; | 181 | pal_halt = 0; |
@@ -180,16 +183,20 @@ static int __init nohalt_setup(char * str) | |||
180 | } | 183 | } |
181 | __setup("nohalt", nohalt_setup); | 184 | __setup("nohalt", nohalt_setup); |
182 | 185 | ||
186 | void | ||
187 | update_pal_halt_status(int status) | ||
188 | { | ||
189 | can_do_pal_halt = pal_halt && status; | ||
190 | } | ||
191 | |||
183 | /* | 192 | /* |
184 | * We use this if we don't have any better idle routine.. | 193 | * We use this if we don't have any better idle routine.. |
185 | */ | 194 | */ |
186 | void | 195 | void |
187 | default_idle (void) | 196 | default_idle (void) |
188 | { | 197 | { |
189 | unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP); | ||
190 | |||
191 | while (!need_resched()) | 198 | while (!need_resched()) |
192 | if (pal_halt && !pmu_active) | 199 | if (can_do_pal_halt) |
193 | safe_halt(); | 200 | safe_halt(); |
194 | else | 201 | else |
195 | cpu_relax(); | 202 | cpu_relax(); |
@@ -200,27 +207,20 @@ default_idle (void) | |||
200 | static inline void play_dead(void) | 207 | static inline void play_dead(void) |
201 | { | 208 | { |
202 | extern void ia64_cpu_local_tick (void); | 209 | extern void ia64_cpu_local_tick (void); |
210 | unsigned int this_cpu = smp_processor_id(); | ||
211 | |||
203 | /* Ack it */ | 212 | /* Ack it */ |
204 | __get_cpu_var(cpu_state) = CPU_DEAD; | 213 | __get_cpu_var(cpu_state) = CPU_DEAD; |
205 | 214 | ||
206 | /* We shouldn't have to disable interrupts while dead, but | ||
207 | * some interrupts just don't seem to go away, and this makes | ||
208 | * it "work" for testing purposes. */ | ||
209 | max_xtp(); | 215 | max_xtp(); |
210 | local_irq_disable(); | 216 | local_irq_disable(); |
211 | /* Death loop */ | 217 | idle_task_exit(); |
212 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | 218 | ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); |
213 | cpu_relax(); | ||
214 | |||
215 | /* | 219 | /* |
216 | * Enable timer interrupts from now on | 220 | * The above is a point of no-return, the processor is |
217 | * Not required if we put processor in SAL_BOOT_RENDEZ mode. | 221 | * expected to be in SAL loop now. |
218 | */ | 222 | */ |
219 | local_flush_tlb_all(); | 223 | BUG(); |
220 | cpu_set(smp_processor_id(), cpu_online_map); | ||
221 | wmb(); | ||
222 | ia64_cpu_local_tick (); | ||
223 | local_irq_enable(); | ||
224 | } | 224 | } |
225 | #else | 225 | #else |
226 | static inline void play_dead(void) | 226 | static inline void play_dead(void) |
@@ -229,20 +229,31 @@ static inline void play_dead(void) | |||
229 | } | 229 | } |
230 | #endif /* CONFIG_HOTPLUG_CPU */ | 230 | #endif /* CONFIG_HOTPLUG_CPU */ |
231 | 231 | ||
232 | |||
233 | void cpu_idle_wait(void) | 232 | void cpu_idle_wait(void) |
234 | { | 233 | { |
235 | int cpu; | 234 | unsigned int cpu, this_cpu = get_cpu(); |
236 | cpumask_t map; | 235 | cpumask_t map; |
236 | |||
237 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); | ||
238 | put_cpu(); | ||
237 | 239 | ||
238 | for_each_online_cpu(cpu) | 240 | cpus_clear(map); |
239 | cpu_set(cpu, cpu_idle_map); | 241 | for_each_online_cpu(cpu) { |
242 | per_cpu(cpu_idle_state, cpu) = 1; | ||
243 | cpu_set(cpu, map); | ||
244 | } | ||
240 | 245 | ||
241 | wmb(); | 246 | __get_cpu_var(cpu_idle_state) = 0; |
242 | do { | 247 | |
243 | ssleep(1); | 248 | wmb(); |
244 | cpus_and(map, cpu_idle_map, cpu_online_map); | 249 | do { |
245 | } while (!cpus_empty(map)); | 250 | ssleep(1); |
251 | for_each_online_cpu(cpu) { | ||
252 | if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) | ||
253 | cpu_clear(cpu, map); | ||
254 | } | ||
255 | cpus_and(map, map, cpu_online_map); | ||
256 | } while (!cpus_empty(map)); | ||
246 | } | 257 | } |
247 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 258 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
248 | 259 | ||
@@ -250,7 +261,6 @@ void __attribute__((noreturn)) | |||
250 | cpu_idle (void) | 261 | cpu_idle (void) |
251 | { | 262 | { |
252 | void (*mark_idle)(int) = ia64_mark_idle; | 263 | void (*mark_idle)(int) = ia64_mark_idle; |
253 | int cpu = smp_processor_id(); | ||
254 | 264 | ||
255 | /* endless idle loop with no priority at all */ | 265 | /* endless idle loop with no priority at all */ |
256 | while (1) { | 266 | while (1) { |
@@ -261,12 +271,13 @@ cpu_idle (void) | |||
261 | while (!need_resched()) { | 271 | while (!need_resched()) { |
262 | void (*idle)(void); | 272 | void (*idle)(void); |
263 | 273 | ||
274 | if (__get_cpu_var(cpu_idle_state)) | ||
275 | __get_cpu_var(cpu_idle_state) = 0; | ||
276 | |||
277 | rmb(); | ||
264 | if (mark_idle) | 278 | if (mark_idle) |
265 | (*mark_idle)(1); | 279 | (*mark_idle)(1); |
266 | 280 | ||
267 | if (cpu_isset(cpu, cpu_idle_map)) | ||
268 | cpu_clear(cpu, cpu_idle_map); | ||
269 | rmb(); | ||
270 | idle = pm_idle; | 281 | idle = pm_idle; |
271 | if (!idle) | 282 | if (!idle) |
272 | idle = default_idle; | 283 | idle = default_idle; |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 55789fcd7210..907464ee7273 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/user.h> | 17 | #include <linux/user.h> |
18 | #include <linux/security.h> | 18 | #include <linux/security.h> |
19 | #include <linux/audit.h> | 19 | #include <linux/audit.h> |
20 | #include <linux/signal.h> | ||
20 | 21 | ||
21 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
22 | #include <asm/processor.h> | 23 | #include <asm/processor.h> |
@@ -1481,7 +1482,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data) | |||
1481 | case PTRACE_CONT: | 1482 | case PTRACE_CONT: |
1482 | /* restart after signal. */ | 1483 | /* restart after signal. */ |
1483 | ret = -EIO; | 1484 | ret = -EIO; |
1484 | if (data > _NSIG) | 1485 | if (!valid_signal(data)) |
1485 | goto out_tsk; | 1486 | goto out_tsk; |
1486 | if (request == PTRACE_SYSCALL) | 1487 | if (request == PTRACE_SYSCALL) |
1487 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 1488 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -1520,7 +1521,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data) | |||
1520 | /* let child execute for one instruction */ | 1521 | /* let child execute for one instruction */ |
1521 | case PTRACE_SINGLEBLOCK: | 1522 | case PTRACE_SINGLEBLOCK: |
1522 | ret = -EIO; | 1523 | ret = -EIO; |
1523 | if (data > _NSIG) | 1524 | if (!valid_signal(data)) |
1524 | goto out_tsk; | 1525 | goto out_tsk; |
1525 | 1526 | ||
1526 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 1527 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -1595,20 +1596,25 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, | |||
1595 | long arg4, long arg5, long arg6, long arg7, | 1596 | long arg4, long arg5, long arg6, long arg7, |
1596 | struct pt_regs regs) | 1597 | struct pt_regs regs) |
1597 | { | 1598 | { |
1598 | long syscall; | 1599 | if (test_thread_flag(TIF_SYSCALL_TRACE) |
1600 | && (current->ptrace & PT_PTRACED)) | ||
1601 | syscall_trace(); | ||
1599 | 1602 | ||
1600 | if (unlikely(current->audit_context)) { | 1603 | if (unlikely(current->audit_context)) { |
1601 | if (IS_IA32_PROCESS(®s)) | 1604 | long syscall; |
1605 | int arch; | ||
1606 | |||
1607 | if (IS_IA32_PROCESS(®s)) { | ||
1602 | syscall = regs.r1; | 1608 | syscall = regs.r1; |
1603 | else | 1609 | arch = AUDIT_ARCH_I386; |
1610 | } else { | ||
1604 | syscall = regs.r15; | 1611 | syscall = regs.r15; |
1612 | arch = AUDIT_ARCH_IA64; | ||
1613 | } | ||
1605 | 1614 | ||
1606 | audit_syscall_entry(current, syscall, arg0, arg1, arg2, arg3); | 1615 | audit_syscall_entry(current, arch, syscall, arg0, arg1, arg2, arg3); |
1607 | } | 1616 | } |
1608 | 1617 | ||
1609 | if (test_thread_flag(TIF_SYSCALL_TRACE) | ||
1610 | && (current->ptrace & PT_PTRACED)) | ||
1611 | syscall_trace(); | ||
1612 | } | 1618 | } |
1613 | 1619 | ||
1614 | /* "asmlinkage" so the input arguments are preserved... */ | 1620 | /* "asmlinkage" so the input arguments are preserved... */ |
@@ -1619,7 +1625,7 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, | |||
1619 | struct pt_regs regs) | 1625 | struct pt_regs regs) |
1620 | { | 1626 | { |
1621 | if (unlikely(current->audit_context)) | 1627 | if (unlikely(current->audit_context)) |
1622 | audit_syscall_exit(current, regs.r8); | 1628 | audit_syscall_exit(current, AUDITSC_RESULT(regs.r10), regs.r8); |
1623 | 1629 | ||
1624 | if (test_thread_flag(TIF_SYSCALL_TRACE) | 1630 | if (test_thread_flag(TIF_SYSCALL_TRACE) |
1625 | && (current->ptrace & PT_PTRACED)) | 1631 | && (current->ptrace & PT_PTRACED)) |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index f05650c801d2..b7e6b4cb374b 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -4,10 +4,15 @@ | |||
4 | * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | * Stephane Eranian <eranian@hpl.hp.com> | 6 | * Stephane Eranian <eranian@hpl.hp.com> |
7 | * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com> | 7 | * Copyright (C) 2000, 2004 Intel Corp |
8 | * Rohit Seth <rohit.seth@intel.com> | ||
9 | * Suresh Siddha <suresh.b.siddha@intel.com> | ||
10 | * Gordon Jin <gordon.jin@intel.com> | ||
8 | * Copyright (C) 1999 VA Linux Systems | 11 | * Copyright (C) 1999 VA Linux Systems |
9 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | 12 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> |
10 | * | 13 | * |
14 | * 12/26/04 S.Siddha, G.Jin, R.Seth | ||
15 | * Add multi-threading and multi-core detection | ||
11 | * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). | 16 | * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). |
12 | * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map | 17 | * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map |
13 | * 03/31/00 R.Seth cpu_initialized and current->processor fixes | 18 | * 03/31/00 R.Seth cpu_initialized and current->processor fixes |
@@ -296,6 +301,34 @@ mark_bsp_online (void) | |||
296 | #endif | 301 | #endif |
297 | } | 302 | } |
298 | 303 | ||
304 | #ifdef CONFIG_SMP | ||
305 | static void | ||
306 | check_for_logical_procs (void) | ||
307 | { | ||
308 | pal_logical_to_physical_t info; | ||
309 | s64 status; | ||
310 | |||
311 | status = ia64_pal_logical_to_phys(0, &info); | ||
312 | if (status == -1) { | ||
313 | printk(KERN_INFO "No logical to physical processor mapping " | ||
314 | "available\n"); | ||
315 | return; | ||
316 | } | ||
317 | if (status) { | ||
318 | printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", | ||
319 | status); | ||
320 | return; | ||
321 | } | ||
322 | /* | ||
323 | * Total number of siblings that BSP has. Though not all of them | ||
324 | * may have booted successfully. The correct number of siblings | ||
325 | * booted is in info.overview_num_log. | ||
326 | */ | ||
327 | smp_num_siblings = info.overview_tpc; | ||
328 | smp_num_cpucores = info.overview_cpp; | ||
329 | } | ||
330 | #endif | ||
331 | |||
299 | void __init | 332 | void __init |
300 | setup_arch (char **cmdline_p) | 333 | setup_arch (char **cmdline_p) |
301 | { | 334 | { |
@@ -356,6 +389,19 @@ setup_arch (char **cmdline_p) | |||
356 | 389 | ||
357 | #ifdef CONFIG_SMP | 390 | #ifdef CONFIG_SMP |
358 | cpu_physical_id(0) = hard_smp_processor_id(); | 391 | cpu_physical_id(0) = hard_smp_processor_id(); |
392 | |||
393 | cpu_set(0, cpu_sibling_map[0]); | ||
394 | cpu_set(0, cpu_core_map[0]); | ||
395 | |||
396 | check_for_logical_procs(); | ||
397 | if (smp_num_cpucores > 1) | ||
398 | printk(KERN_INFO | ||
399 | "cpu package is Multi-Core capable: number of cores=%d\n", | ||
400 | smp_num_cpucores); | ||
401 | if (smp_num_siblings > 1) | ||
402 | printk(KERN_INFO | ||
403 | "cpu package is Multi-Threading capable: number of siblings=%d\n", | ||
404 | smp_num_siblings); | ||
359 | #endif | 405 | #endif |
360 | 406 | ||
361 | cpu_init(); /* initialize the bootstrap CPU */ | 407 | cpu_init(); /* initialize the bootstrap CPU */ |
@@ -459,12 +505,23 @@ show_cpuinfo (struct seq_file *m, void *v) | |||
459 | "cpu regs : %u\n" | 505 | "cpu regs : %u\n" |
460 | "cpu MHz : %lu.%06lu\n" | 506 | "cpu MHz : %lu.%06lu\n" |
461 | "itc MHz : %lu.%06lu\n" | 507 | "itc MHz : %lu.%06lu\n" |
462 | "BogoMIPS : %lu.%02lu\n\n", | 508 | "BogoMIPS : %lu.%02lu\n", |
463 | cpunum, c->vendor, family, c->model, c->revision, c->archrev, | 509 | cpunum, c->vendor, family, c->model, c->revision, c->archrev, |
464 | features, c->ppn, c->number, | 510 | features, c->ppn, c->number, |
465 | c->proc_freq / 1000000, c->proc_freq % 1000000, | 511 | c->proc_freq / 1000000, c->proc_freq % 1000000, |
466 | c->itc_freq / 1000000, c->itc_freq % 1000000, | 512 | c->itc_freq / 1000000, c->itc_freq % 1000000, |
467 | lpj*HZ/500000, (lpj*HZ/5000) % 100); | 513 | lpj*HZ/500000, (lpj*HZ/5000) % 100); |
514 | #ifdef CONFIG_SMP | ||
515 | seq_printf(m, "siblings : %u\n", c->num_log); | ||
516 | if (c->threads_per_core > 1 || c->cores_per_socket > 1) | ||
517 | seq_printf(m, | ||
518 | "physical id: %u\n" | ||
519 | "core id : %u\n" | ||
520 | "thread id : %u\n", | ||
521 | c->socket_id, c->core_id, c->thread_id); | ||
522 | #endif | ||
523 | seq_printf(m,"\n"); | ||
524 | |||
468 | return 0; | 525 | return 0; |
469 | } | 526 | } |
470 | 527 | ||
@@ -533,6 +590,14 @@ identify_cpu (struct cpuinfo_ia64 *c) | |||
533 | memcpy(c->vendor, cpuid.field.vendor, 16); | 590 | memcpy(c->vendor, cpuid.field.vendor, 16); |
534 | #ifdef CONFIG_SMP | 591 | #ifdef CONFIG_SMP |
535 | c->cpu = smp_processor_id(); | 592 | c->cpu = smp_processor_id(); |
593 | |||
594 | /* below default values will be overwritten by identify_siblings() | ||
595 | * for Multi-Threading/Multi-Core capable cpu's | ||
596 | */ | ||
597 | c->threads_per_core = c->cores_per_socket = c->num_log = 1; | ||
598 | c->socket_id = -1; | ||
599 | |||
600 | identify_siblings(c); | ||
536 | #endif | 601 | #endif |
537 | c->ppn = cpuid.field.ppn; | 602 | c->ppn = cpuid.field.ppn; |
538 | c->number = cpuid.field.number; | 603 | c->number = cpuid.field.number; |
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 6891d86937d9..499b7e5317cf 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c | |||
@@ -224,7 +224,8 @@ ia64_rt_sigreturn (struct sigscratch *scr) | |||
224 | * could be corrupted. | 224 | * could be corrupted. |
225 | */ | 225 | */ |
226 | retval = (long) &ia64_leave_kernel; | 226 | retval = (long) &ia64_leave_kernel; |
227 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | 227 | if (test_thread_flag(TIF_SYSCALL_TRACE) |
228 | || test_thread_flag(TIF_SYSCALL_AUDIT)) | ||
228 | /* | 229 | /* |
229 | * strace expects to be notified after sigreturn returns even though the | 230 | * strace expects to be notified after sigreturn returns even though the |
230 | * context to which we return may not be in the middle of a syscall. | 231 | * context to which we return may not be in the middle of a syscall. |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 5318f0cbfc26..0d5ee57c9865 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -1,14 +1,25 @@ | |||
1 | /* | 1 | /* |
2 | * SMP boot-related support | 2 | * SMP boot-related support |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | * Copyright (C) 2001, 2004-2005 Intel Corp | ||
7 | * Rohit Seth <rohit.seth@intel.com> | ||
8 | * Suresh Siddha <suresh.b.siddha@intel.com> | ||
9 | * Gordon Jin <gordon.jin@intel.com> | ||
10 | * Ashok Raj <ashok.raj@intel.com> | ||
6 | * | 11 | * |
7 | * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here. | 12 | * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here. |
8 | * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code. | 13 | * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code. |
9 | * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence. | 14 | * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence. |
10 | * smp_boot_cpus()/smp_commence() is replaced by | 15 | * smp_boot_cpus()/smp_commence() is replaced by |
11 | * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). | 16 | * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). |
17 | * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support | ||
18 | * 04/12/26 Jin Gordon <gordon.jin@intel.com> | ||
19 | * 04/12/26 Rohit Seth <rohit.seth@intel.com> | ||
20 | * Add multi-threading and multi-core detection | ||
21 | * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com> | ||
22 | * Setup cpu_sibling_map and cpu_core_map | ||
12 | */ | 23 | */ |
13 | #include <linux/config.h> | 24 | #include <linux/config.h> |
14 | 25 | ||
@@ -58,6 +69,37 @@ | |||
58 | #define Dprintk(x...) | 69 | #define Dprintk(x...) |
59 | #endif | 70 | #endif |
60 | 71 | ||
72 | #ifdef CONFIG_HOTPLUG_CPU | ||
73 | /* | ||
74 | * Store all idle threads, this can be reused instead of creating | ||
75 | * a new thread. Also avoids complicated thread destroy functionality | ||
76 | * for idle threads. | ||
77 | */ | ||
78 | struct task_struct *idle_thread_array[NR_CPUS]; | ||
79 | |||
80 | /* | ||
81 | * Global array allocated for NR_CPUS at boot time | ||
82 | */ | ||
83 | struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; | ||
84 | |||
85 | /* | ||
86 | * start_ap in head.S uses this to store current booting cpu | ||
87 | * info. | ||
88 | */ | ||
89 | struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; | ||
90 | |||
91 | #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); | ||
92 | |||
93 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
94 | #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) | ||
95 | |||
96 | #else | ||
97 | |||
98 | #define get_idle_for_cpu(x) (NULL) | ||
99 | #define set_idle_for_cpu(x,p) | ||
100 | #define set_brendez_area(x) | ||
101 | #endif | ||
102 | |||
61 | 103 | ||
62 | /* | 104 | /* |
63 | * ITC synchronization related stuff: | 105 | * ITC synchronization related stuff: |
@@ -90,6 +132,11 @@ EXPORT_SYMBOL(cpu_online_map); | |||
90 | cpumask_t cpu_possible_map; | 132 | cpumask_t cpu_possible_map; |
91 | EXPORT_SYMBOL(cpu_possible_map); | 133 | EXPORT_SYMBOL(cpu_possible_map); |
92 | 134 | ||
135 | cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; | ||
136 | cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; | ||
137 | int smp_num_siblings = 1; | ||
138 | int smp_num_cpucores = 1; | ||
139 | |||
93 | /* which logical CPU number maps to which CPU (physical APIC ID) */ | 140 | /* which logical CPU number maps to which CPU (physical APIC ID) */ |
94 | volatile int ia64_cpu_to_sapicid[NR_CPUS]; | 141 | volatile int ia64_cpu_to_sapicid[NR_CPUS]; |
95 | EXPORT_SYMBOL(ia64_cpu_to_sapicid); | 142 | EXPORT_SYMBOL(ia64_cpu_to_sapicid); |
@@ -124,7 +171,8 @@ sync_master (void *arg) | |||
124 | local_irq_save(flags); | 171 | local_irq_save(flags); |
125 | { | 172 | { |
126 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) { | 173 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) { |
127 | while (!go[MASTER]); | 174 | while (!go[MASTER]) |
175 | cpu_relax(); | ||
128 | go[MASTER] = 0; | 176 | go[MASTER] = 0; |
129 | go[SLAVE] = ia64_get_itc(); | 177 | go[SLAVE] = ia64_get_itc(); |
130 | } | 178 | } |
@@ -147,7 +195,8 @@ get_delta (long *rt, long *master) | |||
147 | for (i = 0; i < NUM_ITERS; ++i) { | 195 | for (i = 0; i < NUM_ITERS; ++i) { |
148 | t0 = ia64_get_itc(); | 196 | t0 = ia64_get_itc(); |
149 | go[MASTER] = 1; | 197 | go[MASTER] = 1; |
150 | while (!(tm = go[SLAVE])); | 198 | while (!(tm = go[SLAVE])) |
199 | cpu_relax(); | ||
151 | go[SLAVE] = 0; | 200 | go[SLAVE] = 0; |
152 | t1 = ia64_get_itc(); | 201 | t1 = ia64_get_itc(); |
153 | 202 | ||
@@ -226,7 +275,8 @@ ia64_sync_itc (unsigned int master) | |||
226 | return; | 275 | return; |
227 | } | 276 | } |
228 | 277 | ||
229 | while (go[MASTER]); /* wait for master to be ready */ | 278 | while (go[MASTER]) |
279 | cpu_relax(); /* wait for master to be ready */ | ||
230 | 280 | ||
231 | spin_lock_irqsave(&itc_sync_lock, flags); | 281 | spin_lock_irqsave(&itc_sync_lock, flags); |
232 | { | 282 | { |
@@ -345,7 +395,6 @@ start_secondary (void *unused) | |||
345 | { | 395 | { |
346 | /* Early console may use I/O ports */ | 396 | /* Early console may use I/O ports */ |
347 | ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); | 397 | ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); |
348 | |||
349 | Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); | 398 | Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); |
350 | efi_map_pal_code(); | 399 | efi_map_pal_code(); |
351 | cpu_init(); | 400 | cpu_init(); |
@@ -384,6 +433,13 @@ do_boot_cpu (int sapicid, int cpu) | |||
384 | .done = COMPLETION_INITIALIZER(c_idle.done), | 433 | .done = COMPLETION_INITIALIZER(c_idle.done), |
385 | }; | 434 | }; |
386 | DECLARE_WORK(work, do_fork_idle, &c_idle); | 435 | DECLARE_WORK(work, do_fork_idle, &c_idle); |
436 | |||
437 | c_idle.idle = get_idle_for_cpu(cpu); | ||
438 | if (c_idle.idle) { | ||
439 | init_idle(c_idle.idle, cpu); | ||
440 | goto do_rest; | ||
441 | } | ||
442 | |||
387 | /* | 443 | /* |
388 | * We can't use kernel_thread since we must avoid to reschedule the child. | 444 | * We can't use kernel_thread since we must avoid to reschedule the child. |
389 | */ | 445 | */ |
@@ -396,10 +452,15 @@ do_boot_cpu (int sapicid, int cpu) | |||
396 | 452 | ||
397 | if (IS_ERR(c_idle.idle)) | 453 | if (IS_ERR(c_idle.idle)) |
398 | panic("failed fork for CPU %d", cpu); | 454 | panic("failed fork for CPU %d", cpu); |
455 | |||
456 | set_idle_for_cpu(cpu, c_idle.idle); | ||
457 | |||
458 | do_rest: | ||
399 | task_for_booting_cpu = c_idle.idle; | 459 | task_for_booting_cpu = c_idle.idle; |
400 | 460 | ||
401 | Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); | 461 | Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); |
402 | 462 | ||
463 | set_brendez_area(cpu); | ||
403 | platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); | 464 | platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); |
404 | 465 | ||
405 | /* | 466 | /* |
@@ -552,19 +613,70 @@ void __devinit smp_prepare_boot_cpu(void) | |||
552 | cpu_set(smp_processor_id(), cpu_callin_map); | 613 | cpu_set(smp_processor_id(), cpu_callin_map); |
553 | } | 614 | } |
554 | 615 | ||
616 | /* | ||
617 | * mt_info[] is a temporary store for all info returned by | ||
618 | * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the | ||
619 | * specific cpu comes. | ||
620 | */ | ||
621 | static struct { | ||
622 | __u32 socket_id; | ||
623 | __u16 core_id; | ||
624 | __u16 thread_id; | ||
625 | __u16 proc_fixed_addr; | ||
626 | __u8 valid; | ||
627 | }mt_info[NR_CPUS] __devinit; | ||
628 | |||
555 | #ifdef CONFIG_HOTPLUG_CPU | 629 | #ifdef CONFIG_HOTPLUG_CPU |
556 | extern void fixup_irqs(void); | 630 | static inline void |
557 | /* must be called with cpucontrol mutex held */ | 631 | remove_from_mtinfo(int cpu) |
558 | static int __devinit cpu_enable(unsigned int cpu) | ||
559 | { | 632 | { |
560 | per_cpu(cpu_state,cpu) = CPU_UP_PREPARE; | 633 | int i; |
561 | wmb(); | ||
562 | 634 | ||
563 | while (!cpu_online(cpu)) | 635 | for_each_cpu(i) |
564 | cpu_relax(); | 636 | if (mt_info[i].valid && mt_info[i].socket_id == |
565 | return 0; | 637 | cpu_data(cpu)->socket_id) |
638 | mt_info[i].valid = 0; | ||
639 | } | ||
640 | |||
641 | static inline void | ||
642 | clear_cpu_sibling_map(int cpu) | ||
643 | { | ||
644 | int i; | ||
645 | |||
646 | for_each_cpu_mask(i, cpu_sibling_map[cpu]) | ||
647 | cpu_clear(cpu, cpu_sibling_map[i]); | ||
648 | for_each_cpu_mask(i, cpu_core_map[cpu]) | ||
649 | cpu_clear(cpu, cpu_core_map[i]); | ||
650 | |||
651 | cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE; | ||
652 | } | ||
653 | |||
654 | static void | ||
655 | remove_siblinginfo(int cpu) | ||
656 | { | ||
657 | int last = 0; | ||
658 | |||
659 | if (cpu_data(cpu)->threads_per_core == 1 && | ||
660 | cpu_data(cpu)->cores_per_socket == 1) { | ||
661 | cpu_clear(cpu, cpu_core_map[cpu]); | ||
662 | cpu_clear(cpu, cpu_sibling_map[cpu]); | ||
663 | return; | ||
664 | } | ||
665 | |||
666 | last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0); | ||
667 | |||
668 | /* remove it from all sibling map's */ | ||
669 | clear_cpu_sibling_map(cpu); | ||
670 | |||
671 | /* if this cpu is the last in the core group, remove all its info | ||
672 | * from mt_info structure | ||
673 | */ | ||
674 | if (last) | ||
675 | remove_from_mtinfo(cpu); | ||
566 | } | 676 | } |
567 | 677 | ||
678 | extern void fixup_irqs(void); | ||
679 | /* must be called with cpucontrol mutex held */ | ||
568 | int __cpu_disable(void) | 680 | int __cpu_disable(void) |
569 | { | 681 | { |
570 | int cpu = smp_processor_id(); | 682 | int cpu = smp_processor_id(); |
@@ -575,9 +687,10 @@ int __cpu_disable(void) | |||
575 | if (cpu == 0) | 687 | if (cpu == 0) |
576 | return -EBUSY; | 688 | return -EBUSY; |
577 | 689 | ||
690 | remove_siblinginfo(cpu); | ||
578 | fixup_irqs(); | 691 | fixup_irqs(); |
579 | local_flush_tlb_all(); | 692 | local_flush_tlb_all(); |
580 | printk ("Disabled cpu %u\n", smp_processor_id()); | 693 | cpu_clear(cpu, cpu_callin_map); |
581 | return 0; | 694 | return 0; |
582 | } | 695 | } |
583 | 696 | ||
@@ -589,12 +702,7 @@ void __cpu_die(unsigned int cpu) | |||
589 | /* They ack this in play_dead by setting CPU_DEAD */ | 702 | /* They ack this in play_dead by setting CPU_DEAD */ |
590 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) | 703 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) |
591 | { | 704 | { |
592 | /* | 705 | printk ("CPU %d is now offline\n", cpu); |
593 | * TBD: Enable this when physical removal | ||
594 | * or when we put the processor is put in | ||
595 | * SAL_BOOT_RENDEZ mode | ||
596 | * cpu_clear(cpu, cpu_callin_map); | ||
597 | */ | ||
598 | return; | 706 | return; |
599 | } | 707 | } |
600 | msleep(100); | 708 | msleep(100); |
@@ -602,11 +710,6 @@ void __cpu_die(unsigned int cpu) | |||
602 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | 710 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); |
603 | } | 711 | } |
604 | #else /* !CONFIG_HOTPLUG_CPU */ | 712 | #else /* !CONFIG_HOTPLUG_CPU */ |
605 | static int __devinit cpu_enable(unsigned int cpu) | ||
606 | { | ||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | int __cpu_disable(void) | 713 | int __cpu_disable(void) |
611 | { | 714 | { |
612 | return -ENOSYS; | 715 | return -ENOSYS; |
@@ -637,6 +740,23 @@ smp_cpus_done (unsigned int dummy) | |||
637 | (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); | 740 | (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); |
638 | } | 741 | } |
639 | 742 | ||
743 | static inline void __devinit | ||
744 | set_cpu_sibling_map(int cpu) | ||
745 | { | ||
746 | int i; | ||
747 | |||
748 | for_each_online_cpu(i) { | ||
749 | if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { | ||
750 | cpu_set(i, cpu_core_map[cpu]); | ||
751 | cpu_set(cpu, cpu_core_map[i]); | ||
752 | if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { | ||
753 | cpu_set(i, cpu_sibling_map[cpu]); | ||
754 | cpu_set(cpu, cpu_sibling_map[i]); | ||
755 | } | ||
756 | } | ||
757 | } | ||
758 | } | ||
759 | |||
640 | int __devinit | 760 | int __devinit |
641 | __cpu_up (unsigned int cpu) | 761 | __cpu_up (unsigned int cpu) |
642 | { | 762 | { |
@@ -648,21 +768,26 @@ __cpu_up (unsigned int cpu) | |||
648 | return -EINVAL; | 768 | return -EINVAL; |
649 | 769 | ||
650 | /* | 770 | /* |
651 | * Already booted.. just enable and get outa idle lool | 771 | * Already booted cpu? not valid anymore since we dont |
772 | * do idle loop tightspin anymore. | ||
652 | */ | 773 | */ |
653 | if (cpu_isset(cpu, cpu_callin_map)) | 774 | if (cpu_isset(cpu, cpu_callin_map)) |
654 | { | 775 | return -EINVAL; |
655 | cpu_enable(cpu); | 776 | |
656 | local_irq_enable(); | ||
657 | while (!cpu_isset(cpu, cpu_online_map)) | ||
658 | mb(); | ||
659 | return 0; | ||
660 | } | ||
661 | /* Processor goes to start_secondary(), sets online flag */ | 777 | /* Processor goes to start_secondary(), sets online flag */ |
662 | ret = do_boot_cpu(sapicid, cpu); | 778 | ret = do_boot_cpu(sapicid, cpu); |
663 | if (ret < 0) | 779 | if (ret < 0) |
664 | return ret; | 780 | return ret; |
665 | 781 | ||
782 | if (cpu_data(cpu)->threads_per_core == 1 && | ||
783 | cpu_data(cpu)->cores_per_socket == 1) { | ||
784 | cpu_set(cpu, cpu_sibling_map[cpu]); | ||
785 | cpu_set(cpu, cpu_core_map[cpu]); | ||
786 | return 0; | ||
787 | } | ||
788 | |||
789 | set_cpu_sibling_map(cpu); | ||
790 | |||
666 | return 0; | 791 | return 0; |
667 | } | 792 | } |
668 | 793 | ||
@@ -690,3 +815,106 @@ init_smp_config(void) | |||
690 | ia64_sal_strerror(sal_ret)); | 815 | ia64_sal_strerror(sal_ret)); |
691 | } | 816 | } |
692 | 817 | ||
818 | static inline int __devinit | ||
819 | check_for_mtinfo_index(void) | ||
820 | { | ||
821 | int i; | ||
822 | |||
823 | for_each_cpu(i) | ||
824 | if (!mt_info[i].valid) | ||
825 | return i; | ||
826 | |||
827 | return -1; | ||
828 | } | ||
829 | |||
830 | /* | ||
831 | * Search the mt_info to find out if this socket's cid/tid information is | ||
832 | * cached or not. If the socket exists, fill in the core_id and thread_id | ||
833 | * in cpuinfo | ||
834 | */ | ||
835 | static int __devinit | ||
836 | check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c) | ||
837 | { | ||
838 | int i; | ||
839 | __u32 sid = c->socket_id; | ||
840 | |||
841 | for_each_cpu(i) { | ||
842 | if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address | ||
843 | && mt_info[i].socket_id == sid) { | ||
844 | c->core_id = mt_info[i].core_id; | ||
845 | c->thread_id = mt_info[i].thread_id; | ||
846 | return 1; /* not a new socket */ | ||
847 | } | ||
848 | } | ||
849 | return 0; | ||
850 | } | ||
851 | |||
852 | /* | ||
853 | * identify_siblings(cpu) gets called from identify_cpu. This populates the | ||
854 | * information related to logical execution units in per_cpu_data structure. | ||
855 | */ | ||
856 | void __devinit | ||
857 | identify_siblings(struct cpuinfo_ia64 *c) | ||
858 | { | ||
859 | s64 status; | ||
860 | u16 pltid; | ||
861 | u64 proc_fixed_addr; | ||
862 | int count, i; | ||
863 | pal_logical_to_physical_t info; | ||
864 | |||
865 | if (smp_num_cpucores == 1 && smp_num_siblings == 1) | ||
866 | return; | ||
867 | |||
868 | if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) { | ||
869 | printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", | ||
870 | status); | ||
871 | return; | ||
872 | } | ||
873 | if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) { | ||
874 | printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status); | ||
875 | return; | ||
876 | } | ||
877 | if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) { | ||
878 | printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status); | ||
879 | return; | ||
880 | } | ||
881 | |||
882 | c->socket_id = (pltid << 8) | info.overview_ppid; | ||
883 | c->cores_per_socket = info.overview_cpp; | ||
884 | c->threads_per_core = info.overview_tpc; | ||
885 | count = c->num_log = info.overview_num_log; | ||
886 | |||
887 | /* If the thread and core id information is already cached, then | ||
888 | * we will simply update cpu_info and return. Otherwise, we will | ||
889 | * do the PAL calls and cache core and thread id's of all the siblings. | ||
890 | */ | ||
891 | if (check_for_new_socket(proc_fixed_addr, c)) | ||
892 | return; | ||
893 | |||
894 | for (i = 0; i < count; i++) { | ||
895 | int index; | ||
896 | |||
897 | if (i && (status = ia64_pal_logical_to_phys(i, &info)) | ||
898 | != PAL_STATUS_SUCCESS) { | ||
899 | printk(KERN_ERR "ia64_pal_logical_to_phys failed" | ||
900 | " with %ld\n", status); | ||
901 | return; | ||
902 | } | ||
903 | if (info.log2_la == proc_fixed_addr) { | ||
904 | c->core_id = info.log1_cid; | ||
905 | c->thread_id = info.log1_tid; | ||
906 | } | ||
907 | |||
908 | index = check_for_mtinfo_index(); | ||
909 | /* We will not do the mt_info caching optimization in this case. | ||
910 | */ | ||
911 | if (index < 0) | ||
912 | continue; | ||
913 | |||
914 | mt_info[index].valid = 1; | ||
915 | mt_info[index].socket_id = c->socket_id; | ||
916 | mt_info[index].core_id = info.log1_cid; | ||
917 | mt_info[index].thread_id = info.log1_tid; | ||
918 | mt_info[index].proc_fixed_addr = info.log2_la; | ||
919 | } | ||
920 | } | ||
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 3ac216e1c8bb..a8cf6d8a509c 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c | |||
@@ -93,20 +93,6 @@ sys_getpagesize (void) | |||
93 | } | 93 | } |
94 | 94 | ||
95 | asmlinkage unsigned long | 95 | asmlinkage unsigned long |
96 | ia64_shmat (int shmid, void __user *shmaddr, int shmflg) | ||
97 | { | ||
98 | unsigned long raddr; | ||
99 | int retval; | ||
100 | |||
101 | retval = do_shmat(shmid, shmaddr, shmflg, &raddr); | ||
102 | if (retval < 0) | ||
103 | return retval; | ||
104 | |||
105 | force_successful_syscall_return(); | ||
106 | return raddr; | ||
107 | } | ||
108 | |||
109 | asmlinkage unsigned long | ||
110 | ia64_brk (unsigned long brk) | 96 | ia64_brk (unsigned long brk) |
111 | { | 97 | { |
112 | unsigned long rlim, retval, newbrk, oldbrk; | 98 | unsigned long rlim, retval, newbrk, oldbrk; |
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index d494ff647cac..2776a074c6f1 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c | |||
@@ -1943,23 +1943,30 @@ EXPORT_SYMBOL(unw_unwind); | |||
1943 | int | 1943 | int |
1944 | unw_unwind_to_user (struct unw_frame_info *info) | 1944 | unw_unwind_to_user (struct unw_frame_info *info) |
1945 | { | 1945 | { |
1946 | unsigned long ip, sp; | 1946 | unsigned long ip, sp, pr = 0; |
1947 | 1947 | ||
1948 | while (unw_unwind(info) >= 0) { | 1948 | while (unw_unwind(info) >= 0) { |
1949 | if (unw_get_rp(info, &ip) < 0) { | ||
1950 | unw_get_ip(info, &ip); | ||
1951 | UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n", | ||
1952 | __FUNCTION__, ip); | ||
1953 | return -1; | ||
1954 | } | ||
1955 | unw_get_sp(info, &sp); | 1949 | unw_get_sp(info, &sp); |
1956 | if (sp >= (unsigned long)info->task + IA64_STK_OFFSET) | 1950 | if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) |
1951 | < IA64_PT_REGS_SIZE) { | ||
1952 | UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n", | ||
1953 | __FUNCTION__); | ||
1957 | break; | 1954 | break; |
1958 | if (ip < FIXADDR_USER_END) | 1955 | } |
1956 | if (unw_is_intr_frame(info) && | ||
1957 | (pr & (1UL << PRED_USER_STACK))) | ||
1959 | return 0; | 1958 | return 0; |
1959 | if (unw_get_pr (info, &pr) < 0) { | ||
1960 | unw_get_rp(info, &ip); | ||
1961 | UNW_DPRINT(0, "unwind.%s: failed to read " | ||
1962 | "predicate register (ip=0x%lx)\n", | ||
1963 | __FUNCTION__, ip); | ||
1964 | return -1; | ||
1965 | } | ||
1960 | } | 1966 | } |
1961 | unw_get_ip(info, &ip); | 1967 | unw_get_ip(info, &ip); |
1962 | UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip); | 1968 | UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", |
1969 | __FUNCTION__, ip); | ||
1963 | return -1; | 1970 | return -1; |
1964 | } | 1971 | } |
1965 | EXPORT_SYMBOL(unw_unwind_to_user); | 1972 | EXPORT_SYMBOL(unw_unwind_to_user); |
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S index 29c802b19669..a1af9146cfdb 100644 --- a/arch/ia64/lib/flush.S +++ b/arch/ia64/lib/flush.S | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Cache flushing routines. | 2 | * Cache flushing routines. |
3 | * | 3 | * |
4 | * Copyright (C) 1999-2001 Hewlett-Packard Co | 4 | * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co |
5 | * Copyright (C) 1999-2001 David Mosberger-Tang <davidm@hpl.hp.com> | 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | */ | 6 | */ |
7 | #include <asm/asmmacro.h> | 7 | #include <asm/asmmacro.h> |
8 | #include <asm/page.h> | 8 | #include <asm/page.h> |
@@ -26,7 +26,7 @@ GLOBAL_ENTRY(flush_icache_range) | |||
26 | 26 | ||
27 | mov ar.lc=r8 | 27 | mov ar.lc=r8 |
28 | ;; | 28 | ;; |
29 | .Loop: fc in0 // issuable on M0 only | 29 | .Loop: fc.i in0 // issuable on M2 only |
30 | add in0=32,in0 | 30 | add in0=32,in0 |
31 | br.cloop.sptk.few .Loop | 31 | br.cloop.sptk.few .Loop |
32 | ;; | 32 | ;; |
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S index 6f26ef7cc236..6f308e62c137 100644 --- a/arch/ia64/lib/memcpy_mck.S +++ b/arch/ia64/lib/memcpy_mck.S | |||
@@ -75,6 +75,7 @@ GLOBAL_ENTRY(memcpy) | |||
75 | mov f6=f0 | 75 | mov f6=f0 |
76 | br.cond.sptk .common_code | 76 | br.cond.sptk .common_code |
77 | ;; | 77 | ;; |
78 | END(memcpy) | ||
78 | GLOBAL_ENTRY(__copy_user) | 79 | GLOBAL_ENTRY(__copy_user) |
79 | .prologue | 80 | .prologue |
80 | // check dest alignment | 81 | // check dest alignment |
@@ -300,7 +301,7 @@ EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8) | |||
300 | add src_pre_mem=0,src0 // prefetch src pointer | 301 | add src_pre_mem=0,src0 // prefetch src pointer |
301 | add dst_pre_mem=0,dst0 // prefetch dest pointer | 302 | add dst_pre_mem=0,dst0 // prefetch dest pointer |
302 | and src0=-8,src0 // 1st src pointer | 303 | and src0=-8,src0 // 1st src pointer |
303 | (p7) mov ar.lc = r21 | 304 | (p7) mov ar.lc = cnt |
304 | (p8) mov ar.lc = r0 | 305 | (p8) mov ar.lc = r0 |
305 | ;; | 306 | ;; |
306 | TEXT_ALIGN(32) | 307 | TEXT_ALIGN(32) |
@@ -524,7 +525,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ | |||
524 | #undef B | 525 | #undef B |
525 | #undef C | 526 | #undef C |
526 | #undef D | 527 | #undef D |
527 | END(memcpy) | ||
528 | 528 | ||
529 | /* | 529 | /* |
530 | * Due to lack of local tag support in gcc 2.x assembler, it is not clear which | 530 | * Due to lack of local tag support in gcc 2.x assembler, it is not clear which |
diff --git a/arch/ia64/lib/memset.S b/arch/ia64/lib/memset.S index bd8cf907fe22..f26c16aefb1c 100644 --- a/arch/ia64/lib/memset.S +++ b/arch/ia64/lib/memset.S | |||
@@ -57,10 +57,10 @@ GLOBAL_ENTRY(memset) | |||
57 | { .mmi | 57 | { .mmi |
58 | .prologue | 58 | .prologue |
59 | alloc tmp = ar.pfs, 3, 0, 0, 0 | 59 | alloc tmp = ar.pfs, 3, 0, 0, 0 |
60 | .body | ||
61 | lfetch.nt1 [dest] // | 60 | lfetch.nt1 [dest] // |
62 | .save ar.lc, save_lc | 61 | .save ar.lc, save_lc |
63 | mov.i save_lc = ar.lc | 62 | mov.i save_lc = ar.lc |
63 | .body | ||
64 | } { .mmi | 64 | } { .mmi |
65 | mov ret0 = dest // return value | 65 | mov ret0 = dest // return value |
66 | cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero | 66 | cmp.ne p_nz, p_zr = value, r0 // use stf.spill if value is zero |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 6daf15ac8940..91a055f5731f 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -61,7 +61,8 @@ show_mem (void) | |||
61 | printk("%d reserved pages\n", reserved); | 61 | printk("%d reserved pages\n", reserved); |
62 | printk("%d pages shared\n", shared); | 62 | printk("%d pages shared\n", shared); |
63 | printk("%d pages swap cached\n", cached); | 63 | printk("%d pages swap cached\n", cached); |
64 | printk("%ld pages in page table cache\n", pgtable_cache_size); | 64 | printk("%ld pages in page table cache\n", |
65 | pgtable_quicklist_total_size()); | ||
65 | } | 66 | } |
66 | 67 | ||
67 | /* physical address where the bootmem map is located */ | 68 | /* physical address where the bootmem map is located */ |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 3456a9b6971e..c00710929390 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -582,7 +582,8 @@ void show_mem(void) | |||
582 | printk("%d reserved pages\n", total_reserved); | 582 | printk("%d reserved pages\n", total_reserved); |
583 | printk("%d pages shared\n", total_shared); | 583 | printk("%d pages shared\n", total_shared); |
584 | printk("%d pages swap cached\n", total_cached); | 584 | printk("%d pages swap cached\n", total_cached); |
585 | printk("Total of %ld pages in page table cache\n", pgtable_cache_size); | 585 | printk("Total of %ld pages in page table cache\n", |
586 | pgtable_quicklist_total_size()); | ||
586 | printk("%d free buffer pages\n", nr_free_buffer_pages()); | 587 | printk("%d free buffer pages\n", nr_free_buffer_pages()); |
587 | } | 588 | } |
588 | 589 | ||
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index da859125aaef..4174ec999dde 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -209,10 +209,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re | |||
209 | } | 209 | } |
210 | 210 | ||
211 | no_context: | 211 | no_context: |
212 | if (isr & IA64_ISR_SP) { | 212 | if ((isr & IA64_ISR_SP) |
213 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) | ||
214 | { | ||
213 | /* | 215 | /* |
214 | * This fault was due to a speculative load set the "ed" bit in the psr to | 216 | * This fault was due to a speculative load or lfetch.fault, set the "ed" |
215 | * ensure forward progress (target register will get a NaT). | 217 | * bit in the psr to ensure forward progress. (Target register will get a |
218 | * NaT for ld.s, lfetch will be canceled.) | ||
216 | */ | 219 | */ |
217 | ia64_psr(regs)->ed = 1; | 220 | ia64_psr(regs)->ed = 1; |
218 | return; | 221 | return; |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 65cf839573ea..547785e3cba2 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -39,6 +39,9 @@ | |||
39 | 39 | ||
40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
41 | 41 | ||
42 | DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist); | ||
43 | DEFINE_PER_CPU(long, __pgtable_quicklist_size); | ||
44 | |||
42 | extern void ia64_tlb_init (void); | 45 | extern void ia64_tlb_init (void); |
43 | 46 | ||
44 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; | 47 | unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; |
@@ -50,27 +53,53 @@ struct page *vmem_map; | |||
50 | EXPORT_SYMBOL(vmem_map); | 53 | EXPORT_SYMBOL(vmem_map); |
51 | #endif | 54 | #endif |
52 | 55 | ||
53 | static int pgt_cache_water[2] = { 25, 50 }; | 56 | struct page *zero_page_memmap_ptr; /* map entry for zero page */ |
54 | |||
55 | struct page *zero_page_memmap_ptr; /* map entry for zero page */ | ||
56 | EXPORT_SYMBOL(zero_page_memmap_ptr); | 57 | EXPORT_SYMBOL(zero_page_memmap_ptr); |
57 | 58 | ||
59 | #define MIN_PGT_PAGES 25UL | ||
60 | #define MAX_PGT_FREES_PER_PASS 16L | ||
61 | #define PGT_FRACTION_OF_NODE_MEM 16 | ||
62 | |||
63 | static inline long | ||
64 | max_pgt_pages(void) | ||
65 | { | ||
66 | u64 node_free_pages, max_pgt_pages; | ||
67 | |||
68 | #ifndef CONFIG_NUMA | ||
69 | node_free_pages = nr_free_pages(); | ||
70 | #else | ||
71 | node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id())); | ||
72 | #endif | ||
73 | max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; | ||
74 | max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); | ||
75 | return max_pgt_pages; | ||
76 | } | ||
77 | |||
78 | static inline long | ||
79 | min_pages_to_free(void) | ||
80 | { | ||
81 | long pages_to_free; | ||
82 | |||
83 | pages_to_free = pgtable_quicklist_size - max_pgt_pages(); | ||
84 | pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS); | ||
85 | return pages_to_free; | ||
86 | } | ||
87 | |||
58 | void | 88 | void |
59 | check_pgt_cache (void) | 89 | check_pgt_cache(void) |
60 | { | 90 | { |
61 | int low, high; | 91 | long pages_to_free; |
62 | 92 | ||
63 | low = pgt_cache_water[0]; | 93 | if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES)) |
64 | high = pgt_cache_water[1]; | 94 | return; |
65 | 95 | ||
66 | preempt_disable(); | 96 | preempt_disable(); |
67 | if (pgtable_cache_size > (u64) high) { | 97 | while (unlikely((pages_to_free = min_pages_to_free()) > 0)) { |
68 | do { | 98 | while (pages_to_free--) { |
69 | if (pgd_quicklist) | 99 | free_page((unsigned long)pgtable_quicklist_alloc()); |
70 | free_page((unsigned long)pgd_alloc_one_fast(NULL)); | 100 | } |
71 | if (pmd_quicklist) | 101 | preempt_enable(); |
72 | free_page((unsigned long)pmd_alloc_one_fast(NULL, 0)); | 102 | preempt_disable(); |
73 | } while (pgtable_cache_size > (u64) low); | ||
74 | } | 103 | } |
75 | preempt_enable(); | 104 | preempt_enable(); |
76 | } | 105 | } |
@@ -523,11 +552,14 @@ void | |||
523 | mem_init (void) | 552 | mem_init (void) |
524 | { | 553 | { |
525 | long reserved_pages, codesize, datasize, initsize; | 554 | long reserved_pages, codesize, datasize, initsize; |
526 | unsigned long num_pgt_pages; | ||
527 | pg_data_t *pgdat; | 555 | pg_data_t *pgdat; |
528 | int i; | 556 | int i; |
529 | static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; | 557 | static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; |
530 | 558 | ||
559 | BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); | ||
560 | BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); | ||
561 | BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); | ||
562 | |||
531 | #ifdef CONFIG_PCI | 563 | #ifdef CONFIG_PCI |
532 | /* | 564 | /* |
533 | * This needs to be called _after_ the command line has been parsed but _before_ | 565 | * This needs to be called _after_ the command line has been parsed but _before_ |
@@ -564,18 +596,6 @@ mem_init (void) | |||
564 | num_physpages << (PAGE_SHIFT - 10), codesize >> 10, | 596 | num_physpages << (PAGE_SHIFT - 10), codesize >> 10, |
565 | reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); | 597 | reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); |
566 | 598 | ||
567 | /* | ||
568 | * Allow for enough (cached) page table pages so that we can map the entire memory | ||
569 | * at least once. Each task also needs a couple of page tables pages, so add in a | ||
570 | * fudge factor for that (don't use "threads-max" here; that would be wrong!). | ||
571 | * Don't allow the cache to be more than 10% of total memory, though. | ||
572 | */ | ||
573 | # define NUM_TASKS 500 /* typical number of tasks */ | ||
574 | num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS; | ||
575 | if (num_pgt_pages > nr_free_pages() / 10) | ||
576 | num_pgt_pages = nr_free_pages() / 10; | ||
577 | if (num_pgt_pages > (u64) pgt_cache_water[1]) | ||
578 | pgt_cache_water[1] = num_pgt_pages; | ||
579 | 599 | ||
580 | /* | 600 | /* |
581 | * For fsyscall entrpoints with no light-weight handler, use the ordinary | 601 | * For fsyscall entrpoints with no light-weight handler, use the ordinary |
diff --git a/arch/ia64/sn/include/pci/pcibr_provider.h b/arch/ia64/sn/include/pci/pcibr_provider.h index b1f05ffec70b..1cd291d8badd 100644 --- a/arch/ia64/sn/include/pci/pcibr_provider.h +++ b/arch/ia64/sn/include/pci/pcibr_provider.h | |||
@@ -123,9 +123,11 @@ pcibr_lock(struct pcibus_info *pcibus_info) | |||
123 | } | 123 | } |
124 | #define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag) | 124 | #define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag) |
125 | 125 | ||
126 | extern int pcibr_init_provider(void); | ||
126 | extern void *pcibr_bus_fixup(struct pcibus_bussoft *); | 127 | extern void *pcibr_bus_fixup(struct pcibus_bussoft *); |
127 | extern uint64_t pcibr_dma_map(struct pcidev_info *, unsigned long, size_t, unsigned int); | 128 | extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t); |
128 | extern void pcibr_dma_unmap(struct pcidev_info *, dma_addr_t, int); | 129 | extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t); |
130 | extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int); | ||
129 | 131 | ||
130 | /* | 132 | /* |
131 | * prototypes for the bridge asic register access routines in pcibr_reg.c | 133 | * prototypes for the bridge asic register access routines in pcibr_reg.c |
diff --git a/arch/ia64/sn/include/pci/pcibus_provider_defs.h b/arch/ia64/sn/include/pci/pcibus_provider_defs.h deleted file mode 100644 index 07065615bbea..000000000000 --- a/arch/ia64/sn/include/pci/pcibus_provider_defs.h +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H | ||
9 | #define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H | ||
10 | |||
11 | /* | ||
12 | * SN pci asic types. Do not ever renumber these or reuse values. The | ||
13 | * values must agree with what prom thinks they are. | ||
14 | */ | ||
15 | |||
16 | #define PCIIO_ASIC_TYPE_UNKNOWN 0 | ||
17 | #define PCIIO_ASIC_TYPE_PPB 1 | ||
18 | #define PCIIO_ASIC_TYPE_PIC 2 | ||
19 | #define PCIIO_ASIC_TYPE_TIOCP 3 | ||
20 | |||
21 | /* | ||
22 | * Common pciio bus provider data. There should be one of these as the | ||
23 | * first field in any pciio based provider soft structure (e.g. pcibr_soft | ||
24 | * tioca_soft, etc). | ||
25 | */ | ||
26 | |||
27 | struct pcibus_bussoft { | ||
28 | uint32_t bs_asic_type; /* chipset type */ | ||
29 | uint32_t bs_xid; /* xwidget id */ | ||
30 | uint64_t bs_persist_busnum; /* Persistent Bus Number */ | ||
31 | uint64_t bs_legacy_io; /* legacy io pio addr */ | ||
32 | uint64_t bs_legacy_mem; /* legacy mem pio addr */ | ||
33 | uint64_t bs_base; /* widget base */ | ||
34 | struct xwidget_info *bs_xwidget_info; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * DMA mapping flags | ||
39 | */ | ||
40 | |||
41 | #define SN_PCIDMA_CONSISTENT 0x0001 | ||
42 | |||
43 | #endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */ | ||
diff --git a/arch/ia64/sn/include/pci/pcidev.h b/arch/ia64/sn/include/pci/pcidev.h deleted file mode 100644 index 81eb95d3bf47..000000000000 --- a/arch/ia64/sn/include/pci/pcidev.h +++ /dev/null | |||
@@ -1,54 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | #ifndef _ASM_IA64_SN_PCI_PCIDEV_H | ||
9 | #define _ASM_IA64_SN_PCI_PCIDEV_H | ||
10 | |||
11 | #include <linux/pci.h> | ||
12 | |||
13 | extern struct sn_irq_info **sn_irq; | ||
14 | |||
15 | #define SN_PCIDEV_INFO(pci_dev) \ | ||
16 | ((struct pcidev_info *)(pci_dev)->sysdata) | ||
17 | |||
18 | /* | ||
19 | * Given a pci_bus, return the sn pcibus_bussoft struct. Note that | ||
20 | * this only works for root busses, not for busses represented by PPB's. | ||
21 | */ | ||
22 | |||
23 | #define SN_PCIBUS_BUSSOFT(pci_bus) \ | ||
24 | ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data)) | ||
25 | |||
26 | /* | ||
27 | * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note | ||
28 | * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due | ||
29 | * due to possible PPB's in the path. | ||
30 | */ | ||
31 | |||
32 | #define SN_PCIDEV_BUSSOFT(pci_dev) \ | ||
33 | (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info) | ||
34 | |||
35 | #define PCIIO_BUS_NONE 255 /* bus 255 reserved */ | ||
36 | #define PCIIO_SLOT_NONE 255 | ||
37 | #define PCIIO_FUNC_NONE 255 | ||
38 | #define PCIIO_VENDOR_ID_NONE (-1) | ||
39 | |||
40 | struct pcidev_info { | ||
41 | uint64_t pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */ | ||
42 | uint64_t pdi_slot_host_handle; /* Bus and devfn Host pci_dev */ | ||
43 | |||
44 | struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */ | ||
45 | struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */ | ||
46 | struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */ | ||
47 | |||
48 | struct sn_irq_info *pdi_sn_irq_info; | ||
49 | }; | ||
50 | |||
51 | extern void sn_irq_fixup(struct pci_dev *pci_dev, | ||
52 | struct sn_irq_info *sn_irq_info); | ||
53 | |||
54 | #endif /* _ASM_IA64_SN_PCI_PCIDEV_H */ | ||
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile index 6c7f4d9e8ea0..4351c4ff9845 100644 --- a/arch/ia64/sn/kernel/Makefile +++ b/arch/ia64/sn/kernel/Makefile | |||
@@ -4,9 +4,15 @@ | |||
4 | # License. See the file "COPYING" in the main directory of this archive | 4 | # License. See the file "COPYING" in the main directory of this archive |
5 | # for more details. | 5 | # for more details. |
6 | # | 6 | # |
7 | # Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved. | 7 | # Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All Rights Reserved. |
8 | # | 8 | # |
9 | 9 | ||
10 | obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ | 10 | obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ |
11 | huberror.o io_init.o iomv.o klconflib.o sn2/ | 11 | huberror.o io_init.o iomv.o klconflib.o sn2/ |
12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o | 12 | obj-$(CONFIG_IA64_GENERIC) += machvec.o |
13 | obj-$(CONFIG_SGI_TIOCX) += tiocx.o | ||
14 | obj-$(CONFIG_IA64_SGI_SN_XP) += xp.o | ||
15 | xp-y := xp_main.o xp_nofault.o | ||
16 | obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o | ||
17 | xpc-y := xpc_main.o xpc_channel.o xpc_partition.o | ||
18 | obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o | ||
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c index ce0bc4085eae..647deae9bfcd 100644 --- a/arch/ia64/sn/kernel/bte.c +++ b/arch/ia64/sn/kernel/bte.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/config.h> | 9 | #include <linux/config.h> |
@@ -170,10 +170,6 @@ retry_bteop: | |||
170 | /* Initialize the notification to a known value. */ | 170 | /* Initialize the notification to a known value. */ |
171 | *bte->most_rcnt_na = BTE_WORD_BUSY; | 171 | *bte->most_rcnt_na = BTE_WORD_BUSY; |
172 | 172 | ||
173 | /* Set the status reg busy bit and transfer length */ | ||
174 | BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size)); | ||
175 | BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size); | ||
176 | |||
177 | /* Set the source and destination registers */ | 173 | /* Set the source and destination registers */ |
178 | BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src)))); | 174 | BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src)))); |
179 | BTE_SRC_STORE(bte, TO_PHYS(src)); | 175 | BTE_SRC_STORE(bte, TO_PHYS(src)); |
@@ -188,7 +184,7 @@ retry_bteop: | |||
188 | 184 | ||
189 | /* Initiate the transfer */ | 185 | /* Initiate the transfer */ |
190 | BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode))); | 186 | BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode))); |
191 | BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode)); | 187 | BTE_START_TRANSFER(bte, transfer_size, BTE_VALID_MODE(mode)); |
192 | 188 | ||
193 | itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec); | 189 | itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec); |
194 | 190 | ||
@@ -429,10 +425,16 @@ void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode) | |||
429 | mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda; | 425 | mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda; |
430 | 426 | ||
431 | for (i = 0; i < BTES_PER_NODE; i++) { | 427 | for (i = 0; i < BTES_PER_NODE; i++) { |
428 | u64 *base_addr; | ||
429 | |||
432 | /* Which link status register should we use? */ | 430 | /* Which link status register should we use? */ |
433 | unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1); | 431 | base_addr = (u64 *) |
434 | mynodepda->bte_if[i].bte_base_addr = (u64 *) | 432 | REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), BTE_BASE_ADDR(i)); |
435 | REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status); | 433 | mynodepda->bte_if[i].bte_base_addr = base_addr; |
434 | mynodepda->bte_if[i].bte_source_addr = BTE_SOURCE_ADDR(base_addr); | ||
435 | mynodepda->bte_if[i].bte_destination_addr = BTE_DEST_ADDR(base_addr); | ||
436 | mynodepda->bte_if[i].bte_control_addr = BTE_CTRL_ADDR(base_addr); | ||
437 | mynodepda->bte_if[i].bte_notify_addr = BTE_NOTIF_ADDR(base_addr); | ||
436 | 438 | ||
437 | /* | 439 | /* |
438 | * Initialize the notification and spinlock | 440 | * Initialize the notification and spinlock |
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c index fd104312c6bd..fcbc748ae433 100644 --- a/arch/ia64/sn/kernel/bte_error.c +++ b/arch/ia64/sn/kernel/bte_error.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. | 6 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
@@ -33,48 +33,28 @@ void bte_error_handler(unsigned long); | |||
33 | * Wait until all BTE related CRBs are completed | 33 | * Wait until all BTE related CRBs are completed |
34 | * and then reset the interfaces. | 34 | * and then reset the interfaces. |
35 | */ | 35 | */ |
36 | void bte_error_handler(unsigned long _nodepda) | 36 | void shub1_bte_error_handler(unsigned long _nodepda) |
37 | { | 37 | { |
38 | struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; | 38 | struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; |
39 | spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; | ||
40 | struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; | 39 | struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; |
41 | nasid_t nasid; | 40 | nasid_t nasid; |
42 | int i; | 41 | int i; |
43 | int valid_crbs; | 42 | int valid_crbs; |
44 | unsigned long irq_flags; | ||
45 | volatile u64 *notify; | ||
46 | bte_result_t bh_error; | ||
47 | ii_imem_u_t imem; /* II IMEM Register */ | 43 | ii_imem_u_t imem; /* II IMEM Register */ |
48 | ii_icrb0_d_u_t icrbd; /* II CRB Register D */ | 44 | ii_icrb0_d_u_t icrbd; /* II CRB Register D */ |
49 | ii_ibcr_u_t ibcr; | 45 | ii_ibcr_u_t ibcr; |
50 | ii_icmr_u_t icmr; | 46 | ii_icmr_u_t icmr; |
51 | ii_ieclr_u_t ieclr; | 47 | ii_ieclr_u_t ieclr; |
52 | 48 | ||
53 | BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda, | 49 | BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda, |
54 | smp_processor_id())); | 50 | smp_processor_id())); |
55 | 51 | ||
56 | spin_lock_irqsave(recovery_lock, irq_flags); | ||
57 | |||
58 | if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) && | 52 | if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) && |
59 | (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { | 53 | (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { |
60 | BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, | 54 | BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, |
61 | smp_processor_id())); | 55 | smp_processor_id())); |
62 | spin_unlock_irqrestore(recovery_lock, irq_flags); | ||
63 | return; | 56 | return; |
64 | } | 57 | } |
65 | /* | ||
66 | * Lock all interfaces on this node to prevent new transfers | ||
67 | * from being queued. | ||
68 | */ | ||
69 | for (i = 0; i < BTES_PER_NODE; i++) { | ||
70 | if (err_nodepda->bte_if[i].cleanup_active) { | ||
71 | continue; | ||
72 | } | ||
73 | spin_lock(&err_nodepda->bte_if[i].spinlock); | ||
74 | BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda, | ||
75 | smp_processor_id(), i)); | ||
76 | err_nodepda->bte_if[i].cleanup_active = 1; | ||
77 | } | ||
78 | 58 | ||
79 | /* Determine information about our hub */ | 59 | /* Determine information about our hub */ |
80 | nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); | 60 | nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); |
@@ -101,7 +81,6 @@ void bte_error_handler(unsigned long _nodepda) | |||
101 | mod_timer(recovery_timer, HZ * 5); | 81 | mod_timer(recovery_timer, HZ * 5); |
102 | BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, | 82 | BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, |
103 | smp_processor_id())); | 83 | smp_processor_id())); |
104 | spin_unlock_irqrestore(recovery_lock, irq_flags); | ||
105 | return; | 84 | return; |
106 | } | 85 | } |
107 | if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { | 86 | if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { |
@@ -120,8 +99,6 @@ void bte_error_handler(unsigned long _nodepda) | |||
120 | BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", | 99 | BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", |
121 | err_nodepda, smp_processor_id(), | 100 | err_nodepda, smp_processor_id(), |
122 | i)); | 101 | i)); |
123 | spin_unlock_irqrestore(recovery_lock, | ||
124 | irq_flags); | ||
125 | return; | 102 | return; |
126 | } | 103 | } |
127 | } | 104 | } |
@@ -146,6 +123,51 @@ void bte_error_handler(unsigned long _nodepda) | |||
146 | ibcr.ii_ibcr_fld_s.i_soft_reset = 1; | 123 | ibcr.ii_ibcr_fld_s.i_soft_reset = 1; |
147 | REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); | 124 | REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); |
148 | 125 | ||
126 | del_timer(recovery_timer); | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Wait until all BTE related CRBs are completed | ||
131 | * and then reset the interfaces. | ||
132 | */ | ||
133 | void bte_error_handler(unsigned long _nodepda) | ||
134 | { | ||
135 | struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; | ||
136 | spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; | ||
137 | int i; | ||
138 | nasid_t nasid; | ||
139 | unsigned long irq_flags; | ||
140 | volatile u64 *notify; | ||
141 | bte_result_t bh_error; | ||
142 | |||
143 | BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda, | ||
144 | smp_processor_id())); | ||
145 | |||
146 | spin_lock_irqsave(recovery_lock, irq_flags); | ||
147 | |||
148 | /* | ||
149 | * Lock all interfaces on this node to prevent new transfers | ||
150 | * from being queued. | ||
151 | */ | ||
152 | for (i = 0; i < BTES_PER_NODE; i++) { | ||
153 | if (err_nodepda->bte_if[i].cleanup_active) { | ||
154 | continue; | ||
155 | } | ||
156 | spin_lock(&err_nodepda->bte_if[i].spinlock); | ||
157 | BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda, | ||
158 | smp_processor_id(), i)); | ||
159 | err_nodepda->bte_if[i].cleanup_active = 1; | ||
160 | } | ||
161 | |||
162 | if (is_shub1()) { | ||
163 | shub1_bte_error_handler(_nodepda); | ||
164 | } else { | ||
165 | nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); | ||
166 | |||
167 | if (ia64_sn_bte_recovery(nasid)) | ||
168 | panic("bte_error_handler(): Fatal BTE Error"); | ||
169 | } | ||
170 | |||
149 | for (i = 0; i < BTES_PER_NODE; i++) { | 171 | for (i = 0; i < BTES_PER_NODE; i++) { |
150 | bh_error = err_nodepda->bte_if[i].bh_error; | 172 | bh_error = err_nodepda->bte_if[i].bh_error; |
151 | if (bh_error != BTE_SUCCESS) { | 173 | if (bh_error != BTE_SUCCESS) { |
@@ -165,8 +187,6 @@ void bte_error_handler(unsigned long _nodepda) | |||
165 | spin_unlock(&err_nodepda->bte_if[i].spinlock); | 187 | spin_unlock(&err_nodepda->bte_if[i].spinlock); |
166 | } | 188 | } |
167 | 189 | ||
168 | del_timer(recovery_timer); | ||
169 | |||
170 | spin_unlock_irqrestore(recovery_lock, irq_flags); | 190 | spin_unlock_irqrestore(recovery_lock, irq_flags); |
171 | } | 191 | } |
172 | 192 | ||
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c index 2bdf684c5066..5c39b43ba3c0 100644 --- a/arch/ia64/sn/kernel/huberror.c +++ b/arch/ia64/sn/kernel/huberror.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
@@ -38,8 +38,11 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep) | |||
38 | if ((int)ret_stuff.v0) | 38 | if ((int)ret_stuff.v0) |
39 | panic("hubii_eint_handler(): Fatal TIO Error"); | 39 | panic("hubii_eint_handler(): Fatal TIO Error"); |
40 | 40 | ||
41 | if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ | 41 | if (is_shub1()) { |
42 | (void)hubiio_crb_error_handler(hubdev_info); | 42 | if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ |
43 | (void)hubiio_crb_error_handler(hubdev_info); | ||
44 | } else | ||
45 | bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); | ||
43 | 46 | ||
44 | return IRQ_HANDLED; | 47 | return IRQ_HANDLED; |
45 | } | 48 | } |
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 001880812b7c..9e07f5463f21 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -11,14 +11,15 @@ | |||
11 | #include <asm/sn/types.h> | 11 | #include <asm/sn/types.h> |
12 | #include <asm/sn/sn_sal.h> | 12 | #include <asm/sn/sn_sal.h> |
13 | #include <asm/sn/addrs.h> | 13 | #include <asm/sn/addrs.h> |
14 | #include "pci/pcibus_provider_defs.h" | 14 | #include <asm/sn/pcibus_provider_defs.h> |
15 | #include "pci/pcidev.h" | 15 | #include <asm/sn/pcidev.h> |
16 | #include "pci/pcibr_provider.h" | 16 | #include "pci/pcibr_provider.h" |
17 | #include "xtalk/xwidgetdev.h" | 17 | #include "xtalk/xwidgetdev.h" |
18 | #include <asm/sn/geo.h> | 18 | #include <asm/sn/geo.h> |
19 | #include "xtalk/hubdev.h" | 19 | #include "xtalk/hubdev.h" |
20 | #include <asm/sn/io.h> | 20 | #include <asm/sn/io.h> |
21 | #include <asm/sn/simulator.h> | 21 | #include <asm/sn/simulator.h> |
22 | #include <asm/sn/tioca_provider.h> | ||
22 | 23 | ||
23 | char master_baseio_wid; | 24 | char master_baseio_wid; |
24 | nasid_t master_nasid = INVALID_NASID; /* Partition Master */ | 25 | nasid_t master_nasid = INVALID_NASID; /* Partition Master */ |
@@ -34,6 +35,37 @@ struct brick { | |||
34 | 35 | ||
35 | int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ | 36 | int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ |
36 | 37 | ||
38 | struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ | ||
39 | |||
40 | /* | ||
41 | * Hooks and struct for unsupported pci providers | ||
42 | */ | ||
43 | |||
44 | static dma_addr_t | ||
45 | sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size) | ||
46 | { | ||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | static void | ||
51 | sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction) | ||
52 | { | ||
53 | return; | ||
54 | } | ||
55 | |||
56 | static void * | ||
57 | sn_default_pci_bus_fixup(struct pcibus_bussoft *soft) | ||
58 | { | ||
59 | return NULL; | ||
60 | } | ||
61 | |||
62 | static struct sn_pcibus_provider sn_pci_default_provider = { | ||
63 | .dma_map = sn_default_pci_map, | ||
64 | .dma_map_consistent = sn_default_pci_map, | ||
65 | .dma_unmap = sn_default_pci_unmap, | ||
66 | .bus_fixup = sn_default_pci_bus_fixup, | ||
67 | }; | ||
68 | |||
37 | /* | 69 | /* |
38 | * Retrieve the DMA Flush List given nasid. This list is needed | 70 | * Retrieve the DMA Flush List given nasid. This list is needed |
39 | * to implement the WAR - Flush DMA data on PIO Reads. | 71 | * to implement the WAR - Flush DMA data on PIO Reads. |
@@ -142,6 +174,12 @@ static void sn_fixup_ionodes(void) | |||
142 | if (status) | 174 | if (status) |
143 | continue; | 175 | continue; |
144 | 176 | ||
177 | /* Attach the error interrupt handlers */ | ||
178 | if (nasid & 1) | ||
179 | ice_error_init(hubdev); | ||
180 | else | ||
181 | hub_error_init(hubdev); | ||
182 | |||
145 | for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) | 183 | for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) |
146 | hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; | 184 | hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; |
147 | 185 | ||
@@ -179,10 +217,6 @@ static void sn_fixup_ionodes(void) | |||
179 | sn_flush_device_list; | 217 | sn_flush_device_list; |
180 | } | 218 | } |
181 | 219 | ||
182 | if (!(i & 1)) | ||
183 | hub_error_init(hubdev); | ||
184 | else | ||
185 | ice_error_init(hubdev); | ||
186 | } | 220 | } |
187 | 221 | ||
188 | } | 222 | } |
@@ -201,6 +235,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
201 | struct sn_irq_info *sn_irq_info; | 235 | struct sn_irq_info *sn_irq_info; |
202 | struct pci_dev *host_pci_dev; | 236 | struct pci_dev *host_pci_dev; |
203 | int status = 0; | 237 | int status = 0; |
238 | struct pcibus_bussoft *bs; | ||
204 | 239 | ||
205 | dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); | 240 | dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); |
206 | if (SN_PCIDEV_INFO(dev) <= 0) | 241 | if (SN_PCIDEV_INFO(dev) <= 0) |
@@ -241,6 +276,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
241 | } | 276 | } |
242 | 277 | ||
243 | /* set up host bus linkages */ | 278 | /* set up host bus linkages */ |
279 | bs = SN_PCIBUS_BUSSOFT(dev->bus); | ||
244 | host_pci_dev = | 280 | host_pci_dev = |
245 | pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32, | 281 | pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32, |
246 | SN_PCIDEV_INFO(dev)-> | 282 | SN_PCIDEV_INFO(dev)-> |
@@ -248,10 +284,16 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) | |||
248 | SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = | 284 | SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = |
249 | SN_PCIDEV_INFO(host_pci_dev); | 285 | SN_PCIDEV_INFO(host_pci_dev); |
250 | SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; | 286 | SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; |
251 | SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus); | 287 | SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs; |
288 | |||
289 | if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { | ||
290 | SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type]; | ||
291 | } else { | ||
292 | SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider; | ||
293 | } | ||
252 | 294 | ||
253 | /* Only set up IRQ stuff if this device has a host bus context */ | 295 | /* Only set up IRQ stuff if this device has a host bus context */ |
254 | if (SN_PCIDEV_BUSSOFT(dev) && sn_irq_info->irq_irq) { | 296 | if (bs && sn_irq_info->irq_irq) { |
255 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; | 297 | SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; |
256 | dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; | 298 | dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; |
257 | sn_irq_fixup(dev, sn_irq_info); | 299 | sn_irq_fixup(dev, sn_irq_info); |
@@ -271,6 +313,7 @@ static void sn_pci_controller_fixup(int segment, int busnum) | |||
271 | struct pcibus_bussoft *prom_bussoft_ptr; | 313 | struct pcibus_bussoft *prom_bussoft_ptr; |
272 | struct hubdev_info *hubdev_info; | 314 | struct hubdev_info *hubdev_info; |
273 | void *provider_soft; | 315 | void *provider_soft; |
316 | struct sn_pcibus_provider *provider; | ||
274 | 317 | ||
275 | status = | 318 | status = |
276 | sal_get_pcibus_info((u64) segment, (u64) busnum, | 319 | sal_get_pcibus_info((u64) segment, (u64) busnum, |
@@ -291,16 +334,22 @@ static void sn_pci_controller_fixup(int segment, int busnum) | |||
291 | /* | 334 | /* |
292 | * Per-provider fixup. Copies the contents from prom to local | 335 | * Per-provider fixup. Copies the contents from prom to local |
293 | * area and links SN_PCIBUS_BUSSOFT(). | 336 | * area and links SN_PCIBUS_BUSSOFT(). |
294 | * | ||
295 | * Note: Provider is responsible for ensuring that prom_bussoft_ptr | ||
296 | * represents an asic-type that it can handle. | ||
297 | */ | 337 | */ |
298 | 338 | ||
299 | if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) { | 339 | if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) { |
300 | return; /* no further fixup necessary */ | 340 | return; /* unsupported asic type */ |
341 | } | ||
342 | |||
343 | provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type]; | ||
344 | if (provider == NULL) { | ||
345 | return; /* no provider registerd for this asic */ | ||
346 | } | ||
347 | |||
348 | provider_soft = NULL; | ||
349 | if (provider->bus_fixup) { | ||
350 | provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr); | ||
301 | } | 351 | } |
302 | 352 | ||
303 | provider_soft = pcibr_bus_fixup(prom_bussoft_ptr); | ||
304 | if (provider_soft == NULL) { | 353 | if (provider_soft == NULL) { |
305 | return; /* fixup failed or not applicable */ | 354 | return; /* fixup failed or not applicable */ |
306 | } | 355 | } |
@@ -339,6 +388,17 @@ static int __init sn_pci_init(void) | |||
339 | return 0; | 388 | return 0; |
340 | 389 | ||
341 | /* | 390 | /* |
391 | * prime sn_pci_provider[]. Individial provider init routines will | ||
392 | * override their respective default entries. | ||
393 | */ | ||
394 | |||
395 | for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++) | ||
396 | sn_pci_provider[i] = &sn_pci_default_provider; | ||
397 | |||
398 | pcibr_init_provider(); | ||
399 | tioca_init_provider(); | ||
400 | |||
401 | /* | ||
342 | * This is needed to avoid bounce limit checks in the blk layer | 402 | * This is needed to avoid bounce limit checks in the blk layer |
343 | */ | 403 | */ |
344 | ia64_max_iommu_merge_mask = ~PAGE_MASK; | 404 | ia64_max_iommu_merge_mask = ~PAGE_MASK; |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 3be44724f6c8..0f4e8138658f 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -13,8 +13,8 @@ | |||
13 | #include <asm/sn/addrs.h> | 13 | #include <asm/sn/addrs.h> |
14 | #include <asm/sn/arch.h> | 14 | #include <asm/sn/arch.h> |
15 | #include "xtalk/xwidgetdev.h" | 15 | #include "xtalk/xwidgetdev.h" |
16 | #include "pci/pcibus_provider_defs.h" | 16 | #include <asm/sn/pcibus_provider_defs.h> |
17 | #include "pci/pcidev.h" | 17 | #include <asm/sn/pcidev.h> |
18 | #include "pci/pcibr_provider.h" | 18 | #include "pci/pcibr_provider.h" |
19 | #include <asm/sn/shub_mmr.h> | 19 | #include <asm/sn/shub_mmr.h> |
20 | #include <asm/sn/sn_sal.h> | 20 | #include <asm/sn/sn_sal.h> |
@@ -82,20 +82,9 @@ static void sn_ack_irq(unsigned int irq) | |||
82 | nasid = get_nasid(); | 82 | nasid = get_nasid(); |
83 | event_occurred = | 83 | event_occurred = |
84 | HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED)); | 84 | HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED)); |
85 | if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { | 85 | mask = event_occurred & SH_ALL_INT_MASK; |
86 | mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT); | ||
87 | } | ||
88 | if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) { | ||
89 | mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT); | ||
90 | } | ||
91 | if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) { | ||
92 | mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT); | ||
93 | } | ||
94 | if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) { | ||
95 | mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT); | ||
96 | } | ||
97 | HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), | 86 | HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), |
98 | mask); | 87 | mask); |
99 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); | 88 | __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); |
100 | 89 | ||
101 | move_irq(irq); | 90 | move_irq(irq); |
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c index 857774bb2c9a..6546db6abdba 100644 --- a/arch/ia64/sn/kernel/mca.c +++ b/arch/ia64/sn/kernel/mca.c | |||
@@ -37,6 +37,11 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize; | |||
37 | * This function is the callback routine that SAL calls to log error | 37 | * This function is the callback routine that SAL calls to log error |
38 | * info for platform errors. buf is appended to sn_oemdata, resizing as | 38 | * info for platform errors. buf is appended to sn_oemdata, resizing as |
39 | * required. | 39 | * required. |
40 | * Note: this is a SAL to OS callback, running under the same rules as the SAL | ||
41 | * code. SAL calls are run with preempt disabled so this routine must not | ||
42 | * sleep. vmalloc can sleep so print_hook cannot resize the output buffer | ||
43 | * itself, instead it must set the required size and return to let the caller | ||
44 | * resize the buffer then redrive the SAL call. | ||
40 | */ | 45 | */ |
41 | static int print_hook(const char *fmt, ...) | 46 | static int print_hook(const char *fmt, ...) |
42 | { | 47 | { |
@@ -47,18 +52,8 @@ static int print_hook(const char *fmt, ...) | |||
47 | vsnprintf(buf, sizeof(buf), fmt, args); | 52 | vsnprintf(buf, sizeof(buf), fmt, args); |
48 | va_end(args); | 53 | va_end(args); |
49 | len = strlen(buf); | 54 | len = strlen(buf); |
50 | while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) { | 55 | if (*sn_oemdata_size + len <= sn_oemdata_bufsize) |
51 | u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000); | 56 | memcpy(*sn_oemdata + *sn_oemdata_size, buf, len); |
52 | if (!newbuf) { | ||
53 | printk(KERN_ERR "%s: unable to extend sn_oemdata\n", | ||
54 | __FUNCTION__); | ||
55 | return 0; | ||
56 | } | ||
57 | memcpy(newbuf, *sn_oemdata, *sn_oemdata_size); | ||
58 | vfree(*sn_oemdata); | ||
59 | *sn_oemdata = newbuf; | ||
60 | } | ||
61 | memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1); | ||
62 | *sn_oemdata_size += len; | 57 | *sn_oemdata_size += len; |
63 | return 0; | 58 | return 0; |
64 | } | 59 | } |
@@ -98,7 +93,20 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, | |||
98 | sn_oemdata = oemdata; | 93 | sn_oemdata = oemdata; |
99 | sn_oemdata_size = oemdata_size; | 94 | sn_oemdata_size = oemdata_size; |
100 | sn_oemdata_bufsize = 0; | 95 | sn_oemdata_bufsize = 0; |
101 | ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); | 96 | *sn_oemdata_size = PAGE_SIZE; /* first guess at how much data will be generated */ |
97 | while (*sn_oemdata_size > sn_oemdata_bufsize) { | ||
98 | u8 *newbuf = vmalloc(*sn_oemdata_size); | ||
99 | if (!newbuf) { | ||
100 | printk(KERN_ERR "%s: unable to extend sn_oemdata\n", | ||
101 | __FUNCTION__); | ||
102 | return 1; | ||
103 | } | ||
104 | vfree(*sn_oemdata); | ||
105 | *sn_oemdata = newbuf; | ||
106 | sn_oemdata_bufsize = *sn_oemdata_size; | ||
107 | *sn_oemdata_size = 0; | ||
108 | ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header); | ||
109 | } | ||
102 | up(&sn_oemdata_mutex); | 110 | up(&sn_oemdata_mutex); |
103 | return 0; | 111 | return 0; |
104 | } | 112 | } |
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index f0306b516afb..4fb44984afe6 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved. | 6 | * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/config.h> | 9 | #include <linux/config.h> |
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/root_dev.h> | 30 | #include <linux/root_dev.h> |
31 | #include <linux/nodemask.h> | 31 | #include <linux/nodemask.h> |
32 | #include <linux/pm.h> | ||
32 | 33 | ||
33 | #include <asm/io.h> | 34 | #include <asm/io.h> |
34 | #include <asm/sal.h> | 35 | #include <asm/sal.h> |
@@ -72,6 +73,12 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); | |||
72 | DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); | 73 | DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); |
73 | EXPORT_PER_CPU_SYMBOL(__sn_hub_info); | 74 | EXPORT_PER_CPU_SYMBOL(__sn_hub_info); |
74 | 75 | ||
76 | DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_NUMNODES]); | ||
77 | EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); | ||
78 | |||
79 | DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); | ||
80 | EXPORT_PER_CPU_SYMBOL(__sn_nodepda); | ||
81 | |||
75 | partid_t sn_partid = -1; | 82 | partid_t sn_partid = -1; |
76 | EXPORT_SYMBOL(sn_partid); | 83 | EXPORT_SYMBOL(sn_partid); |
77 | char sn_system_serial_number_string[128]; | 84 | char sn_system_serial_number_string[128]; |
@@ -353,6 +360,14 @@ void __init sn_setup(char **cmdline_p) | |||
353 | screen_info = sn_screen_info; | 360 | screen_info = sn_screen_info; |
354 | 361 | ||
355 | sn_timer_init(); | 362 | sn_timer_init(); |
363 | |||
364 | /* | ||
365 | * set pm_power_off to a SAL call to allow | ||
366 | * sn machines to power off. The SAL call can be replaced | ||
367 | * by an ACPI interface call when ACPI is fully implemented | ||
368 | * for sn. | ||
369 | */ | ||
370 | pm_power_off = ia64_sn_power_down; | ||
356 | } | 371 | } |
357 | 372 | ||
358 | /** | 373 | /** |
@@ -364,11 +379,11 @@ static void __init sn_init_pdas(char **cmdline_p) | |||
364 | { | 379 | { |
365 | cnodeid_t cnode; | 380 | cnodeid_t cnode; |
366 | 381 | ||
367 | memset(pda->cnodeid_to_nasid_table, -1, | 382 | memset(sn_cnodeid_to_nasid, -1, |
368 | sizeof(pda->cnodeid_to_nasid_table)); | 383 | sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); |
369 | for_each_online_node(cnode) | 384 | for_each_online_node(cnode) |
370 | pda->cnodeid_to_nasid_table[cnode] = | 385 | sn_cnodeid_to_nasid[cnode] = |
371 | pxm_to_nasid(nid_to_pxm_map[cnode]); | 386 | pxm_to_nasid(nid_to_pxm_map[cnode]); |
372 | 387 | ||
373 | numionodes = num_online_nodes(); | 388 | numionodes = num_online_nodes(); |
374 | scan_for_ionodes(); | 389 | scan_for_ionodes(); |
@@ -468,7 +483,8 @@ void __init sn_cpu_init(void) | |||
468 | 483 | ||
469 | cnode = nasid_to_cnodeid(nasid); | 484 | cnode = nasid_to_cnodeid(nasid); |
470 | 485 | ||
471 | pda->p_nodepda = nodepdaindr[cnode]; | 486 | sn_nodepda = nodepdaindr[cnode]; |
487 | |||
472 | pda->led_address = | 488 | pda->led_address = |
473 | (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); | 489 | (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); |
474 | pda->led_state = LED_ALWAYS_SET; | 490 | pda->led_state = LED_ALWAYS_SET; |
@@ -477,15 +493,18 @@ void __init sn_cpu_init(void) | |||
477 | pda->idle_flag = 0; | 493 | pda->idle_flag = 0; |
478 | 494 | ||
479 | if (cpuid != 0) { | 495 | if (cpuid != 0) { |
480 | memcpy(pda->cnodeid_to_nasid_table, | 496 | /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */ |
481 | pdacpu(0)->cnodeid_to_nasid_table, | 497 | memcpy(sn_cnodeid_to_nasid, |
482 | sizeof(pda->cnodeid_to_nasid_table)); | 498 | (&per_cpu(__sn_cnodeid_to_nasid, 0)), |
499 | sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); | ||
483 | } | 500 | } |
484 | 501 | ||
485 | /* | 502 | /* |
486 | * Check for WARs. | 503 | * Check for WARs. |
487 | * Only needs to be done once, on BSP. | 504 | * Only needs to be done once, on BSP. |
488 | * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i]. | 505 | * Has to be done after loop above, because it uses this cpu's |
506 | * sn_cnodeid_to_nasid table which was just initialized if this | ||
507 | * isn't cpu 0. | ||
489 | * Has to be done before assignment below. | 508 | * Has to be done before assignment below. |
490 | */ | 509 | */ |
491 | if (!wars_have_been_checked) { | 510 | if (!wars_have_been_checked) { |
@@ -571,8 +590,7 @@ static void __init scan_for_ionodes(void) | |||
571 | brd = find_lboard_any(brd, KLTYPE_SNIA); | 590 | brd = find_lboard_any(brd, KLTYPE_SNIA); |
572 | 591 | ||
573 | while (brd) { | 592 | while (brd) { |
574 | pda->cnodeid_to_nasid_table[numionodes] = | 593 | sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid; |
575 | brd->brd_nasid; | ||
576 | physical_node_map[brd->brd_nasid] = numionodes; | 594 | physical_node_map[brd->brd_nasid] = numionodes; |
577 | root_lboard[numionodes] = brd; | 595 | root_lboard[numionodes] = brd; |
578 | numionodes++; | 596 | numionodes++; |
@@ -593,8 +611,7 @@ static void __init scan_for_ionodes(void) | |||
593 | root_lboard[nasid_to_cnodeid(nasid)], | 611 | root_lboard[nasid_to_cnodeid(nasid)], |
594 | KLTYPE_TIO); | 612 | KLTYPE_TIO); |
595 | while (brd) { | 613 | while (brd) { |
596 | pda->cnodeid_to_nasid_table[numionodes] = | 614 | sn_cnodeid_to_nasid[numionodes] = brd->brd_nasid; |
597 | brd->brd_nasid; | ||
598 | physical_node_map[brd->brd_nasid] = numionodes; | 615 | physical_node_map[brd->brd_nasid] = numionodes; |
599 | root_lboard[numionodes] = brd; | 616 | root_lboard[numionodes] = brd; |
600 | numionodes++; | 617 | numionodes++; |
@@ -605,7 +622,6 @@ static void __init scan_for_ionodes(void) | |||
605 | brd = find_lboard_any(brd, KLTYPE_TIO); | 622 | brd = find_lboard_any(brd, KLTYPE_TIO); |
606 | } | 623 | } |
607 | } | 624 | } |
608 | |||
609 | } | 625 | } |
610 | 626 | ||
611 | int | 627 | int |
@@ -614,7 +630,8 @@ nasid_slice_to_cpuid(int nasid, int slice) | |||
614 | long cpu; | 630 | long cpu; |
615 | 631 | ||
616 | for (cpu=0; cpu < NR_CPUS; cpu++) | 632 | for (cpu=0; cpu < NR_CPUS; cpu++) |
617 | if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice) | 633 | if (cpuid_to_nasid(cpu) == nasid && |
634 | cpuid_to_slice(cpu) == slice) | ||
618 | return cpu; | 635 | return cpu; |
619 | 636 | ||
620 | return -1; | 637 | return -1; |
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 197356460ee1..833e700fdac9 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/vmalloc.h> | 28 | #include <linux/vmalloc.h> |
29 | #include <linux/seq_file.h> | 29 | #include <linux/seq_file.h> |
30 | #include <linux/miscdevice.h> | 30 | #include <linux/miscdevice.h> |
31 | #include <linux/utsname.h> | ||
31 | #include <linux/cpumask.h> | 32 | #include <linux/cpumask.h> |
32 | #include <linux/smp_lock.h> | 33 | #include <linux/smp_lock.h> |
33 | #include <linux/nodemask.h> | 34 | #include <linux/nodemask.h> |
@@ -43,6 +44,7 @@ | |||
43 | #include <asm/sn/module.h> | 44 | #include <asm/sn/module.h> |
44 | #include <asm/sn/geo.h> | 45 | #include <asm/sn/geo.h> |
45 | #include <asm/sn/sn2/sn_hwperf.h> | 46 | #include <asm/sn/sn2/sn_hwperf.h> |
47 | #include <asm/sn/addrs.h> | ||
46 | 48 | ||
47 | static void *sn_hwperf_salheap = NULL; | 49 | static void *sn_hwperf_salheap = NULL; |
48 | static int sn_hwperf_obj_cnt = 0; | 50 | static int sn_hwperf_obj_cnt = 0; |
@@ -81,26 +83,45 @@ out: | |||
81 | return e; | 83 | return e; |
82 | } | 84 | } |
83 | 85 | ||
86 | static int sn_hwperf_location_to_bpos(char *location, | ||
87 | int *rack, int *bay, int *slot, int *slab) | ||
88 | { | ||
89 | char type; | ||
90 | |||
91 | /* first scan for an old style geoid string */ | ||
92 | if (sscanf(location, "%03d%c%02d#%d", | ||
93 | rack, &type, bay, slab) == 4) | ||
94 | *slot = 0; | ||
95 | else /* scan for a new bladed geoid string */ | ||
96 | if (sscanf(location, "%03d%c%02d^%02d#%d", | ||
97 | rack, &type, bay, slot, slab) != 5) | ||
98 | return -1; | ||
99 | /* success */ | ||
100 | return 0; | ||
101 | } | ||
102 | |||
84 | static int sn_hwperf_geoid_to_cnode(char *location) | 103 | static int sn_hwperf_geoid_to_cnode(char *location) |
85 | { | 104 | { |
86 | int cnode; | 105 | int cnode; |
87 | geoid_t geoid; | 106 | geoid_t geoid; |
88 | moduleid_t module_id; | 107 | moduleid_t module_id; |
89 | char type; | 108 | int rack, bay, slot, slab; |
90 | int rack, slot, slab; | 109 | int this_rack, this_bay, this_slot, this_slab; |
91 | int this_rack, this_slot, this_slab; | ||
92 | 110 | ||
93 | if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4) | 111 | if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab)) |
94 | return -1; | 112 | return -1; |
95 | 113 | ||
96 | for (cnode = 0; cnode < numionodes; cnode++) { | 114 | for (cnode = 0; cnode < numionodes; cnode++) { |
97 | geoid = cnodeid_get_geoid(cnode); | 115 | geoid = cnodeid_get_geoid(cnode); |
98 | module_id = geo_module(geoid); | 116 | module_id = geo_module(geoid); |
99 | this_rack = MODULE_GET_RACK(module_id); | 117 | this_rack = MODULE_GET_RACK(module_id); |
100 | this_slot = MODULE_GET_BPOS(module_id); | 118 | this_bay = MODULE_GET_BPOS(module_id); |
119 | this_slot = geo_slot(geoid); | ||
101 | this_slab = geo_slab(geoid); | 120 | this_slab = geo_slab(geoid); |
102 | if (rack == this_rack && slot == this_slot && slab == this_slab) | 121 | if (rack == this_rack && bay == this_bay && |
122 | slot == this_slot && slab == this_slab) { | ||
103 | break; | 123 | break; |
124 | } | ||
104 | } | 125 | } |
105 | 126 | ||
106 | return cnode < numionodes ? cnode : -1; | 127 | return cnode < numionodes ? cnode : -1; |
@@ -153,11 +174,36 @@ static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj, | |||
153 | return slabname; | 174 | return slabname; |
154 | } | 175 | } |
155 | 176 | ||
177 | static void print_pci_topology(struct seq_file *s, | ||
178 | struct sn_hwperf_object_info *obj, int *ordinal, | ||
179 | u64 rack, u64 bay, u64 slot, u64 slab) | ||
180 | { | ||
181 | char *p1; | ||
182 | char *p2; | ||
183 | char *pg; | ||
184 | |||
185 | if (!(pg = (char *)get_zeroed_page(GFP_KERNEL))) | ||
186 | return; /* ignore */ | ||
187 | if (ia64_sn_ioif_get_pci_topology(rack, bay, slot, slab, | ||
188 | __pa(pg), PAGE_SIZE) == SN_HWPERF_OP_OK) { | ||
189 | for (p1=pg; *p1 && p1 < pg + PAGE_SIZE;) { | ||
190 | if (!(p2 = strchr(p1, '\n'))) | ||
191 | break; | ||
192 | *p2 = '\0'; | ||
193 | seq_printf(s, "pcibus %d %s-%s\n", | ||
194 | *ordinal, obj->location, p1); | ||
195 | (*ordinal)++; | ||
196 | p1 = p2 + 1; | ||
197 | } | ||
198 | } | ||
199 | free_page((unsigned long)pg); | ||
200 | } | ||
201 | |||
156 | static int sn_topology_show(struct seq_file *s, void *d) | 202 | static int sn_topology_show(struct seq_file *s, void *d) |
157 | { | 203 | { |
158 | int sz; | 204 | int sz; |
159 | int pt; | 205 | int pt; |
160 | int e; | 206 | int e = 0; |
161 | int i; | 207 | int i; |
162 | int j; | 208 | int j; |
163 | const char *slabname; | 209 | const char *slabname; |
@@ -169,11 +215,44 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
169 | struct sn_hwperf_object_info *p; | 215 | struct sn_hwperf_object_info *p; |
170 | struct sn_hwperf_object_info *obj = d; /* this object */ | 216 | struct sn_hwperf_object_info *obj = d; /* this object */ |
171 | struct sn_hwperf_object_info *objs = s->private; /* all objects */ | 217 | struct sn_hwperf_object_info *objs = s->private; /* all objects */ |
218 | int rack, bay, slot, slab; | ||
219 | u8 shubtype; | ||
220 | u8 system_size; | ||
221 | u8 sharing_size; | ||
222 | u8 partid; | ||
223 | u8 coher; | ||
224 | u8 nasid_shift; | ||
225 | u8 region_size; | ||
226 | u16 nasid_mask; | ||
227 | int nasid_msb; | ||
228 | int pci_bus_ordinal = 0; | ||
172 | 229 | ||
173 | if (obj == objs) { | 230 | if (obj == objs) { |
174 | seq_printf(s, "# sn_topology version 1\n"); | 231 | seq_printf(s, "# sn_topology version 2\n"); |
175 | seq_printf(s, "# objtype ordinal location partition" | 232 | seq_printf(s, "# objtype ordinal location partition" |
176 | " [attribute value [, ...]]\n"); | 233 | " [attribute value [, ...]]\n"); |
234 | |||
235 | if (ia64_sn_get_sn_info(0, | ||
236 | &shubtype, &nasid_mask, &nasid_shift, &system_size, | ||
237 | &sharing_size, &partid, &coher, ®ion_size)) | ||
238 | BUG(); | ||
239 | for (nasid_msb=63; nasid_msb > 0; nasid_msb--) { | ||
240 | if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb)) | ||
241 | break; | ||
242 | } | ||
243 | seq_printf(s, "partition %u %s local " | ||
244 | "shubtype %s, " | ||
245 | "nasid_mask 0x%016lx, " | ||
246 | "nasid_bits %d:%d, " | ||
247 | "system_size %d, " | ||
248 | "sharing_size %d, " | ||
249 | "coherency_domain %d, " | ||
250 | "region_size %d\n", | ||
251 | |||
252 | partid, system_utsname.nodename, | ||
253 | shubtype ? "shub2" : "shub1", | ||
254 | (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift, | ||
255 | system_size, sharing_size, coher, region_size); | ||
177 | } | 256 | } |
178 | 257 | ||
179 | if (SN_HWPERF_FOREIGN(obj)) { | 258 | if (SN_HWPERF_FOREIGN(obj)) { |
@@ -181,7 +260,7 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
181 | return 0; | 260 | return 0; |
182 | } | 261 | } |
183 | 262 | ||
184 | for (i = 0; obj->name[i]; i++) { | 263 | for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) { |
185 | if (obj->name[i] == ' ') | 264 | if (obj->name[i] == ' ') |
186 | obj->name[i] = '_'; | 265 | obj->name[i] = '_'; |
187 | } | 266 | } |
@@ -221,6 +300,17 @@ static int sn_topology_show(struct seq_file *s, void *d) | |||
221 | seq_putc(s, '\n'); | 300 | seq_putc(s, '\n'); |
222 | } | 301 | } |
223 | } | 302 | } |
303 | |||
304 | /* | ||
305 | * PCI busses attached to this node, if any | ||
306 | */ | ||
307 | if (sn_hwperf_location_to_bpos(obj->location, | ||
308 | &rack, &bay, &slot, &slab)) { | ||
309 | /* export pci bus info */ | ||
310 | print_pci_topology(s, obj, &pci_bus_ordinal, | ||
311 | rack, bay, slot, slab); | ||
312 | |||
313 | } | ||
224 | } | 314 | } |
225 | 315 | ||
226 | if (obj->ports) { | 316 | if (obj->ports) { |
@@ -397,6 +487,9 @@ static int sn_hwperf_map_err(int hwperf_err) | |||
397 | break; | 487 | break; |
398 | 488 | ||
399 | case SN_HWPERF_OP_BUSY: | 489 | case SN_HWPERF_OP_BUSY: |
490 | e = -EBUSY; | ||
491 | break; | ||
492 | |||
400 | case SN_HWPERF_OP_RECONFIGURE: | 493 | case SN_HWPERF_OP_RECONFIGURE: |
401 | e = -EAGAIN; | 494 | e = -EAGAIN; |
402 | break; | 495 | break; |
@@ -549,6 +642,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg) | |||
549 | r = sn_hwperf_op_cpu(&op_info); | 642 | r = sn_hwperf_op_cpu(&op_info); |
550 | if (r) { | 643 | if (r) { |
551 | r = sn_hwperf_map_err(r); | 644 | r = sn_hwperf_map_err(r); |
645 | a.v0 = v0; | ||
552 | goto error; | 646 | goto error; |
553 | } | 647 | } |
554 | break; | 648 | break; |
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c new file mode 100644 index 000000000000..ab9b5f35c2a7 --- /dev/null +++ b/arch/ia64/sn/kernel/tiocx.c | |||
@@ -0,0 +1,552 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/version.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/proc_fs.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <asm/uaccess.h> | ||
18 | #include <asm/sn/sn_sal.h> | ||
19 | #include <asm/sn/addrs.h> | ||
20 | #include <asm/sn/io.h> | ||
21 | #include <asm/sn/types.h> | ||
22 | #include <asm/sn/shubio.h> | ||
23 | #include <asm/sn/tiocx.h> | ||
24 | #include <asm/sn/l1.h> | ||
25 | #include <asm/sn/module.h> | ||
26 | #include "tio.h" | ||
27 | #include "xtalk/xwidgetdev.h" | ||
28 | #include "xtalk/hubdev.h" | ||
29 | |||
30 | #define CX_DEV_NONE 0 | ||
31 | #define DEVICE_NAME "tiocx" | ||
32 | #define WIDGET_ID 0 | ||
33 | #define TIOCX_DEBUG 0 | ||
34 | |||
35 | #if TIOCX_DEBUG | ||
36 | #define DBG(fmt...) printk(KERN_ALERT fmt) | ||
37 | #else | ||
38 | #define DBG(fmt...) | ||
39 | #endif | ||
40 | |||
41 | struct device_attribute dev_attr_cxdev_control; | ||
42 | |||
43 | /** | ||
44 | * tiocx_match - Try to match driver id list with device. | ||
45 | * @dev: device pointer | ||
46 | * @drv: driver pointer | ||
47 | * | ||
48 | * Returns 1 if match, 0 otherwise. | ||
49 | */ | ||
50 | static int tiocx_match(struct device *dev, struct device_driver *drv) | ||
51 | { | ||
52 | struct cx_dev *cx_dev = to_cx_dev(dev); | ||
53 | struct cx_drv *cx_drv = to_cx_driver(drv); | ||
54 | const struct cx_device_id *ids = cx_drv->id_table; | ||
55 | |||
56 | if (!ids) | ||
57 | return 0; | ||
58 | |||
59 | while (ids->part_num) { | ||
60 | if (ids->part_num == cx_dev->cx_id.part_num) | ||
61 | return 1; | ||
62 | ids++; | ||
63 | } | ||
64 | return 0; | ||
65 | |||
66 | } | ||
67 | |||
68 | static int tiocx_hotplug(struct device *dev, char **envp, int num_envp, | ||
69 | char *buffer, int buffer_size) | ||
70 | { | ||
71 | return -ENODEV; | ||
72 | } | ||
73 | |||
74 | static void tiocx_bus_release(struct device *dev) | ||
75 | { | ||
76 | kfree(to_cx_dev(dev)); | ||
77 | } | ||
78 | |||
79 | struct bus_type tiocx_bus_type = { | ||
80 | .name = "tiocx", | ||
81 | .match = tiocx_match, | ||
82 | .hotplug = tiocx_hotplug, | ||
83 | }; | ||
84 | |||
85 | /** | ||
86 | * cx_device_match - Find cx_device in the id table. | ||
87 | * @ids: id table from driver | ||
88 | * @cx_device: part/mfg id for the device | ||
89 | * | ||
90 | */ | ||
91 | static const struct cx_device_id *cx_device_match(const struct cx_device_id | ||
92 | *ids, | ||
93 | struct cx_dev *cx_device) | ||
94 | { | ||
95 | /* | ||
96 | * NOTES: We may want to check for CX_ANY_ID too. | ||
97 | * Do we want to match against nasid too? | ||
98 | * CX_DEV_NONE == 0, if the driver tries to register for | ||
99 | * part/mfg == 0 we should return no-match (NULL) here. | ||
100 | */ | ||
101 | while (ids->part_num && ids->mfg_num) { | ||
102 | if (ids->part_num == cx_device->cx_id.part_num && | ||
103 | ids->mfg_num == cx_device->cx_id.mfg_num) | ||
104 | return ids; | ||
105 | ids++; | ||
106 | } | ||
107 | |||
108 | return NULL; | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * cx_device_probe - Look for matching device. | ||
113 | * Call driver probe routine if found. | ||
114 | * @cx_driver: driver table (cx_drv struct) from driver | ||
115 | * @cx_device: part/mfg id for the device | ||
116 | */ | ||
117 | static int cx_device_probe(struct device *dev) | ||
118 | { | ||
119 | const struct cx_device_id *id; | ||
120 | struct cx_drv *cx_drv = to_cx_driver(dev->driver); | ||
121 | struct cx_dev *cx_dev = to_cx_dev(dev); | ||
122 | int error = 0; | ||
123 | |||
124 | if (!cx_dev->driver && cx_drv->probe) { | ||
125 | id = cx_device_match(cx_drv->id_table, cx_dev); | ||
126 | if (id) { | ||
127 | if ((error = cx_drv->probe(cx_dev, id)) < 0) | ||
128 | return error; | ||
129 | else | ||
130 | cx_dev->driver = cx_drv; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | return error; | ||
135 | } | ||
136 | |||
137 | /** | ||
138 | * cx_driver_remove - Remove driver from device struct. | ||
139 | * @dev: device | ||
140 | */ | ||
141 | static int cx_driver_remove(struct device *dev) | ||
142 | { | ||
143 | struct cx_dev *cx_dev = to_cx_dev(dev); | ||
144 | struct cx_drv *cx_drv = cx_dev->driver; | ||
145 | if (cx_drv->remove) | ||
146 | cx_drv->remove(cx_dev); | ||
147 | cx_dev->driver = NULL; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * cx_driver_register - Register the driver. | ||
153 | * @cx_driver: driver table (cx_drv struct) from driver | ||
154 | * | ||
155 | * Called from the driver init routine to register a driver. | ||
156 | * The cx_drv struct contains the driver name, a pointer to | ||
157 | * a table of part/mfg numbers and a pointer to the driver's | ||
158 | * probe/attach routine. | ||
159 | */ | ||
160 | int cx_driver_register(struct cx_drv *cx_driver) | ||
161 | { | ||
162 | cx_driver->driver.name = cx_driver->name; | ||
163 | cx_driver->driver.bus = &tiocx_bus_type; | ||
164 | cx_driver->driver.probe = cx_device_probe; | ||
165 | cx_driver->driver.remove = cx_driver_remove; | ||
166 | |||
167 | return driver_register(&cx_driver->driver); | ||
168 | } | ||
169 | |||
170 | /** | ||
171 | * cx_driver_unregister - Unregister the driver. | ||
172 | * @cx_driver: driver table (cx_drv struct) from driver | ||
173 | */ | ||
174 | int cx_driver_unregister(struct cx_drv *cx_driver) | ||
175 | { | ||
176 | driver_unregister(&cx_driver->driver); | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * cx_device_register - Register a device. | ||
182 | * @nasid: device's nasid | ||
183 | * @part_num: device's part number | ||
184 | * @mfg_num: device's manufacturer number | ||
185 | * @hubdev: hub info associated with this device | ||
186 | * | ||
187 | */ | ||
188 | int | ||
189 | cx_device_register(nasid_t nasid, int part_num, int mfg_num, | ||
190 | struct hubdev_info *hubdev) | ||
191 | { | ||
192 | struct cx_dev *cx_dev; | ||
193 | |||
194 | cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL); | ||
195 | DBG("cx_dev= 0x%p\n", cx_dev); | ||
196 | if (cx_dev == NULL) | ||
197 | return -ENOMEM; | ||
198 | |||
199 | cx_dev->cx_id.part_num = part_num; | ||
200 | cx_dev->cx_id.mfg_num = mfg_num; | ||
201 | cx_dev->cx_id.nasid = nasid; | ||
202 | cx_dev->hubdev = hubdev; | ||
203 | |||
204 | cx_dev->dev.parent = NULL; | ||
205 | cx_dev->dev.bus = &tiocx_bus_type; | ||
206 | cx_dev->dev.release = tiocx_bus_release; | ||
207 | snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d.0x%x", | ||
208 | cx_dev->cx_id.nasid, cx_dev->cx_id.part_num); | ||
209 | device_register(&cx_dev->dev); | ||
210 | get_device(&cx_dev->dev); | ||
211 | |||
212 | device_create_file(&cx_dev->dev, &dev_attr_cxdev_control); | ||
213 | |||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * cx_device_unregister - Unregister a device. | ||
219 | * @cx_dev: part/mfg id for the device | ||
220 | */ | ||
221 | int cx_device_unregister(struct cx_dev *cx_dev) | ||
222 | { | ||
223 | put_device(&cx_dev->dev); | ||
224 | device_unregister(&cx_dev->dev); | ||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | /** | ||
229 | * cx_device_reload - Reload the device. | ||
230 | * @nasid: device's nasid | ||
231 | * @part_num: device's part number | ||
232 | * @mfg_num: device's manufacturer number | ||
233 | * | ||
234 | * Remove the device associated with 'nasid' from device list and then | ||
235 | * call device-register with the given part/mfg numbers. | ||
236 | */ | ||
237 | static int cx_device_reload(struct cx_dev *cx_dev) | ||
238 | { | ||
239 | device_remove_file(&cx_dev->dev, &dev_attr_cxdev_control); | ||
240 | cx_device_unregister(cx_dev); | ||
241 | return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num, | ||
242 | cx_dev->cx_id.mfg_num, cx_dev->hubdev); | ||
243 | } | ||
244 | |||
245 | static inline uint64_t tiocx_intr_alloc(nasid_t nasid, int widget, | ||
246 | u64 sn_irq_info, | ||
247 | int req_irq, nasid_t req_nasid, | ||
248 | int req_slice) | ||
249 | { | ||
250 | struct ia64_sal_retval rv; | ||
251 | rv.status = 0; | ||
252 | rv.v0 = 0; | ||
253 | |||
254 | ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT, | ||
255 | SAL_INTR_ALLOC, nasid, | ||
256 | widget, sn_irq_info, req_irq, | ||
257 | req_nasid, req_slice); | ||
258 | return rv.status; | ||
259 | } | ||
260 | |||
261 | static inline void tiocx_intr_free(nasid_t nasid, int widget, | ||
262 | struct sn_irq_info *sn_irq_info) | ||
263 | { | ||
264 | struct ia64_sal_retval rv; | ||
265 | rv.status = 0; | ||
266 | rv.v0 = 0; | ||
267 | |||
268 | ia64_sal_oemcall_nolock(&rv, SN_SAL_IOIF_INTERRUPT, | ||
269 | SAL_INTR_FREE, nasid, | ||
270 | widget, sn_irq_info->irq_irq, | ||
271 | sn_irq_info->irq_cookie, 0, 0); | ||
272 | } | ||
273 | |||
274 | struct sn_irq_info *tiocx_irq_alloc(nasid_t nasid, int widget, int irq, | ||
275 | nasid_t req_nasid, int slice) | ||
276 | { | ||
277 | struct sn_irq_info *sn_irq_info; | ||
278 | int status; | ||
279 | int sn_irq_size = sizeof(struct sn_irq_info); | ||
280 | |||
281 | if ((nasid & 1) == 0) | ||
282 | return NULL; | ||
283 | |||
284 | sn_irq_info = kmalloc(sn_irq_size, GFP_KERNEL); | ||
285 | if (sn_irq_info == NULL) | ||
286 | return NULL; | ||
287 | |||
288 | memset(sn_irq_info, 0x0, sn_irq_size); | ||
289 | |||
290 | status = tiocx_intr_alloc(nasid, widget, __pa(sn_irq_info), irq, | ||
291 | req_nasid, slice); | ||
292 | if (status) { | ||
293 | kfree(sn_irq_info); | ||
294 | return NULL; | ||
295 | } else { | ||
296 | return sn_irq_info; | ||
297 | } | ||
298 | } | ||
299 | |||
300 | void tiocx_irq_free(struct sn_irq_info *sn_irq_info) | ||
301 | { | ||
302 | uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge; | ||
303 | nasid_t nasid = NASID_GET(bridge); | ||
304 | int widget; | ||
305 | |||
306 | if (nasid & 1) { | ||
307 | widget = TIO_SWIN_WIDGETNUM(bridge); | ||
308 | tiocx_intr_free(nasid, widget, sn_irq_info); | ||
309 | kfree(sn_irq_info); | ||
310 | } | ||
311 | } | ||
312 | |||
313 | uint64_t tiocx_dma_addr(uint64_t addr) | ||
314 | { | ||
315 | return PHYS_TO_TIODMA(addr); | ||
316 | } | ||
317 | |||
318 | uint64_t tiocx_swin_base(int nasid) | ||
319 | { | ||
320 | return TIO_SWIN_BASE(nasid, TIOCX_CORELET); | ||
321 | } | ||
322 | |||
323 | EXPORT_SYMBOL(cx_driver_register); | ||
324 | EXPORT_SYMBOL(cx_driver_unregister); | ||
325 | EXPORT_SYMBOL(cx_device_register); | ||
326 | EXPORT_SYMBOL(cx_device_unregister); | ||
327 | EXPORT_SYMBOL(tiocx_irq_alloc); | ||
328 | EXPORT_SYMBOL(tiocx_irq_free); | ||
329 | EXPORT_SYMBOL(tiocx_bus_type); | ||
330 | EXPORT_SYMBOL(tiocx_dma_addr); | ||
331 | EXPORT_SYMBOL(tiocx_swin_base); | ||
332 | |||
333 | static void tio_conveyor_set(nasid_t nasid, int enable_flag) | ||
334 | { | ||
335 | uint64_t ice_frz; | ||
336 | uint64_t disable_cb = (1ull << 61); | ||
337 | |||
338 | if (!(nasid & 1)) | ||
339 | return; | ||
340 | |||
341 | ice_frz = REMOTE_HUB_L(nasid, TIO_ICE_FRZ_CFG); | ||
342 | if (enable_flag) { | ||
343 | if (!(ice_frz & disable_cb)) /* already enabled */ | ||
344 | return; | ||
345 | ice_frz &= ~disable_cb; | ||
346 | } else { | ||
347 | if (ice_frz & disable_cb) /* already disabled */ | ||
348 | return; | ||
349 | ice_frz |= disable_cb; | ||
350 | } | ||
351 | DBG(KERN_ALERT "TIO_ICE_FRZ_CFG= 0x%lx\n", ice_frz); | ||
352 | REMOTE_HUB_S(nasid, TIO_ICE_FRZ_CFG, ice_frz); | ||
353 | } | ||
354 | |||
355 | #define tio_conveyor_enable(nasid) tio_conveyor_set(nasid, 1) | ||
356 | #define tio_conveyor_disable(nasid) tio_conveyor_set(nasid, 0) | ||
357 | |||
358 | static void tio_corelet_reset(nasid_t nasid, int corelet) | ||
359 | { | ||
360 | if (!(nasid & 1)) | ||
361 | return; | ||
362 | |||
363 | REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 1 << corelet); | ||
364 | udelay(2000); | ||
365 | REMOTE_HUB_S(nasid, TIO_ICE_PMI_TX_CFG, 0); | ||
366 | udelay(2000); | ||
367 | } | ||
368 | |||
369 | static int tiocx_btchar_get(int nasid) | ||
370 | { | ||
371 | moduleid_t module_id; | ||
372 | geoid_t geoid; | ||
373 | int cnodeid; | ||
374 | |||
375 | cnodeid = nasid_to_cnodeid(nasid); | ||
376 | geoid = cnodeid_get_geoid(cnodeid); | ||
377 | module_id = geo_module(geoid); | ||
378 | return MODULE_GET_BTCHAR(module_id); | ||
379 | } | ||
380 | |||
381 | static int is_fpga_brick(int nasid) | ||
382 | { | ||
383 | switch (tiocx_btchar_get(nasid)) { | ||
384 | case L1_BRICKTYPE_SA: | ||
385 | case L1_BRICKTYPE_ATHENA: | ||
386 | return 1; | ||
387 | } | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | static int bitstream_loaded(nasid_t nasid) | ||
392 | { | ||
393 | uint64_t cx_credits; | ||
394 | |||
395 | cx_credits = REMOTE_HUB_L(nasid, TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3); | ||
396 | cx_credits &= TIO_ICE_PMI_TX_DYN_CREDIT_STAT_CB3_CREDIT_CNT_MASK; | ||
397 | DBG("cx_credits= 0x%lx\n", cx_credits); | ||
398 | |||
399 | return (cx_credits == 0xf) ? 1 : 0; | ||
400 | } | ||
401 | |||
402 | static int tiocx_reload(struct cx_dev *cx_dev) | ||
403 | { | ||
404 | int part_num = CX_DEV_NONE; | ||
405 | int mfg_num = CX_DEV_NONE; | ||
406 | nasid_t nasid = cx_dev->cx_id.nasid; | ||
407 | |||
408 | if (bitstream_loaded(nasid)) { | ||
409 | uint64_t cx_id; | ||
410 | |||
411 | cx_id = | ||
412 | *(volatile int32_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) + | ||
413 | WIDGET_ID); | ||
414 | part_num = XWIDGET_PART_NUM(cx_id); | ||
415 | mfg_num = XWIDGET_MFG_NUM(cx_id); | ||
416 | DBG("part= 0x%x, mfg= 0x%x\n", part_num, mfg_num); | ||
417 | /* just ignore it if it's a CE */ | ||
418 | if (part_num == TIO_CE_ASIC_PARTNUM) | ||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | cx_dev->cx_id.part_num = part_num; | ||
423 | cx_dev->cx_id.mfg_num = mfg_num; | ||
424 | |||
425 | /* | ||
426 | * Delete old device and register the new one. It's ok if | ||
427 | * part_num/mfg_num == CX_DEV_NONE. We want to register | ||
428 | * devices in the table even if a bitstream isn't loaded. | ||
429 | * That allows use to see that a bitstream isn't loaded via | ||
430 | * TIOCX_IOCTL_DEV_LIST. | ||
431 | */ | ||
432 | return cx_device_reload(cx_dev); | ||
433 | } | ||
434 | |||
435 | static ssize_t show_cxdev_control(struct device *dev, char *buf) | ||
436 | { | ||
437 | struct cx_dev *cx_dev = to_cx_dev(dev); | ||
438 | |||
439 | return sprintf(buf, "0x%x 0x%x 0x%x %d\n", | ||
440 | cx_dev->cx_id.nasid, | ||
441 | cx_dev->cx_id.part_num, cx_dev->cx_id.mfg_num, | ||
442 | tiocx_btchar_get(cx_dev->cx_id.nasid)); | ||
443 | } | ||
444 | |||
445 | static ssize_t store_cxdev_control(struct device *dev, const char *buf, | ||
446 | size_t count) | ||
447 | { | ||
448 | int n; | ||
449 | struct cx_dev *cx_dev = to_cx_dev(dev); | ||
450 | |||
451 | if (!capable(CAP_SYS_ADMIN)) | ||
452 | return -EPERM; | ||
453 | |||
454 | if (count <= 0) | ||
455 | return 0; | ||
456 | |||
457 | n = simple_strtoul(buf, NULL, 0); | ||
458 | |||
459 | switch (n) { | ||
460 | case 1: | ||
461 | tiocx_reload(cx_dev); | ||
462 | break; | ||
463 | case 3: | ||
464 | tio_corelet_reset(cx_dev->cx_id.nasid, TIOCX_CORELET); | ||
465 | break; | ||
466 | default: | ||
467 | break; | ||
468 | } | ||
469 | |||
470 | return count; | ||
471 | } | ||
472 | |||
473 | DEVICE_ATTR(cxdev_control, 0644, show_cxdev_control, store_cxdev_control); | ||
474 | |||
475 | static int __init tiocx_init(void) | ||
476 | { | ||
477 | cnodeid_t cnodeid; | ||
478 | int found_tiocx_device = 0; | ||
479 | |||
480 | bus_register(&tiocx_bus_type); | ||
481 | |||
482 | for (cnodeid = 0; cnodeid < MAX_COMPACT_NODES; cnodeid++) { | ||
483 | nasid_t nasid; | ||
484 | |||
485 | if ((nasid = cnodeid_to_nasid(cnodeid)) < 0) | ||
486 | break; /* No more nasids .. bail out of loop */ | ||
487 | |||
488 | if ((nasid & 0x1) && is_fpga_brick(nasid)) { | ||
489 | struct hubdev_info *hubdev; | ||
490 | struct xwidget_info *widgetp; | ||
491 | |||
492 | DBG("Found TIO at nasid 0x%x\n", nasid); | ||
493 | |||
494 | hubdev = | ||
495 | (struct hubdev_info *)(NODEPDA(cnodeid)->pdinfo); | ||
496 | |||
497 | widgetp = &hubdev->hdi_xwidget_info[TIOCX_CORELET]; | ||
498 | |||
499 | /* The CE hangs off of the CX port but is not an FPGA */ | ||
500 | if (widgetp->xwi_hwid.part_num == TIO_CE_ASIC_PARTNUM) | ||
501 | continue; | ||
502 | |||
503 | tio_corelet_reset(nasid, TIOCX_CORELET); | ||
504 | tio_conveyor_enable(nasid); | ||
505 | |||
506 | if (cx_device_register | ||
507 | (nasid, widgetp->xwi_hwid.part_num, | ||
508 | widgetp->xwi_hwid.mfg_num, hubdev) < 0) | ||
509 | return -ENXIO; | ||
510 | else | ||
511 | found_tiocx_device++; | ||
512 | } | ||
513 | } | ||
514 | |||
515 | /* It's ok if we find zero devices. */ | ||
516 | DBG("found_tiocx_device= %d\n", found_tiocx_device); | ||
517 | |||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static void __exit tiocx_exit(void) | ||
522 | { | ||
523 | struct device *dev; | ||
524 | struct device *tdev; | ||
525 | |||
526 | DBG("tiocx_exit\n"); | ||
527 | |||
528 | /* | ||
529 | * Unregister devices. | ||
530 | */ | ||
531 | list_for_each_entry_safe(dev, tdev, &tiocx_bus_type.devices.list, | ||
532 | bus_list) { | ||
533 | if (dev) { | ||
534 | struct cx_dev *cx_dev = to_cx_dev(dev); | ||
535 | device_remove_file(dev, &dev_attr_cxdev_control); | ||
536 | cx_device_unregister(cx_dev); | ||
537 | } | ||
538 | } | ||
539 | |||
540 | bus_unregister(&tiocx_bus_type); | ||
541 | } | ||
542 | |||
543 | module_init(tiocx_init); | ||
544 | module_exit(tiocx_exit); | ||
545 | |||
546 | /************************************************************************ | ||
547 | * Module licensing and description | ||
548 | ************************************************************************/ | ||
549 | MODULE_LICENSE("GPL"); | ||
550 | MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>"); | ||
551 | MODULE_DESCRIPTION("TIOCX module"); | ||
552 | MODULE_SUPPORTED_DEVICE(DEVICE_NAME); | ||
diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c new file mode 100644 index 000000000000..3be52a34c80f --- /dev/null +++ b/arch/ia64/sn/kernel/xp_main.c | |||
@@ -0,0 +1,289 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | |||
10 | /* | ||
11 | * Cross Partition (XP) base. | ||
12 | * | ||
13 | * XP provides a base from which its users can interact | ||
14 | * with XPC, yet not be dependent on XPC. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <asm/sn/intr.h> | ||
23 | #include <asm/sn/sn_sal.h> | ||
24 | #include <asm/sn/xp.h> | ||
25 | |||
26 | |||
27 | /* | ||
28 | * Target of nofault PIO read. | ||
29 | */ | ||
30 | u64 xp_nofault_PIOR_target; | ||
31 | |||
32 | |||
33 | /* | ||
34 | * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level | ||
35 | * users of XPC. | ||
36 | */ | ||
37 | struct xpc_registration xpc_registrations[XPC_NCHANNELS]; | ||
38 | |||
39 | |||
40 | /* | ||
41 | * Initialize the XPC interface to indicate that XPC isn't loaded. | ||
42 | */ | ||
43 | static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; } | ||
44 | |||
45 | struct xpc_interface xpc_interface = { | ||
46 | (void (*)(int)) xpc_notloaded, | ||
47 | (void (*)(int)) xpc_notloaded, | ||
48 | (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded, | ||
49 | (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded, | ||
50 | (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *)) | ||
51 | xpc_notloaded, | ||
52 | (void (*)(partid_t, int, void *)) xpc_notloaded, | ||
53 | (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded | ||
54 | }; | ||
55 | |||
56 | |||
57 | /* | ||
58 | * XPC calls this when it (the XPC module) has been loaded. | ||
59 | */ | ||
60 | void | ||
61 | xpc_set_interface(void (*connect)(int), | ||
62 | void (*disconnect)(int), | ||
63 | enum xpc_retval (*allocate)(partid_t, int, u32, void **), | ||
64 | enum xpc_retval (*send)(partid_t, int, void *), | ||
65 | enum xpc_retval (*send_notify)(partid_t, int, void *, | ||
66 | xpc_notify_func, void *), | ||
67 | void (*received)(partid_t, int, void *), | ||
68 | enum xpc_retval (*partid_to_nasids)(partid_t, void *)) | ||
69 | { | ||
70 | xpc_interface.connect = connect; | ||
71 | xpc_interface.disconnect = disconnect; | ||
72 | xpc_interface.allocate = allocate; | ||
73 | xpc_interface.send = send; | ||
74 | xpc_interface.send_notify = send_notify; | ||
75 | xpc_interface.received = received; | ||
76 | xpc_interface.partid_to_nasids = partid_to_nasids; | ||
77 | } | ||
78 | |||
79 | |||
80 | /* | ||
81 | * XPC calls this when it (the XPC module) is being unloaded. | ||
82 | */ | ||
83 | void | ||
84 | xpc_clear_interface(void) | ||
85 | { | ||
86 | xpc_interface.connect = (void (*)(int)) xpc_notloaded; | ||
87 | xpc_interface.disconnect = (void (*)(int)) xpc_notloaded; | ||
88 | xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32, | ||
89 | void **)) xpc_notloaded; | ||
90 | xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *)) | ||
91 | xpc_notloaded; | ||
92 | xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *, | ||
93 | xpc_notify_func, void *)) xpc_notloaded; | ||
94 | xpc_interface.received = (void (*)(partid_t, int, void *)) | ||
95 | xpc_notloaded; | ||
96 | xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *)) | ||
97 | xpc_notloaded; | ||
98 | } | ||
99 | |||
100 | |||
101 | /* | ||
102 | * Register for automatic establishment of a channel connection whenever | ||
103 | * a partition comes up. | ||
104 | * | ||
105 | * Arguments: | ||
106 | * | ||
107 | * ch_number - channel # to register for connection. | ||
108 | * func - function to call for asynchronous notification of channel | ||
109 | * state changes (i.e., connection, disconnection, error) and | ||
110 | * the arrival of incoming messages. | ||
111 | * key - pointer to optional user-defined value that gets passed back | ||
112 | * to the user on any callouts made to func. | ||
113 | * payload_size - size in bytes of the XPC message's payload area which | ||
114 | * contains a user-defined message. The user should make | ||
115 | * this large enough to hold their largest message. | ||
116 | * nentries - max #of XPC message entries a message queue can contain. | ||
117 | * The actual number, which is determined when a connection | ||
118 | * is established and may be less then requested, will be | ||
119 | * passed to the user via the xpcConnected callout. | ||
120 | * assigned_limit - max number of kthreads allowed to be processing | ||
121 | * messages (per connection) at any given instant. | ||
122 | * idle_limit - max number of kthreads allowed to be idle at any given | ||
123 | * instant. | ||
124 | */ | ||
125 | enum xpc_retval | ||
126 | xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | ||
127 | u16 nentries, u32 assigned_limit, u32 idle_limit) | ||
128 | { | ||
129 | struct xpc_registration *registration; | ||
130 | |||
131 | |||
132 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); | ||
133 | DBUG_ON(payload_size == 0 || nentries == 0); | ||
134 | DBUG_ON(func == NULL); | ||
135 | DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); | ||
136 | |||
137 | registration = &xpc_registrations[ch_number]; | ||
138 | |||
139 | if (down_interruptible(®istration->sema) != 0) { | ||
140 | return xpcInterrupted; | ||
141 | } | ||
142 | |||
143 | /* if XPC_CHANNEL_REGISTERED(ch_number) */ | ||
144 | if (registration->func != NULL) { | ||
145 | up(®istration->sema); | ||
146 | return xpcAlreadyRegistered; | ||
147 | } | ||
148 | |||
149 | /* register the channel for connection */ | ||
150 | registration->msg_size = XPC_MSG_SIZE(payload_size); | ||
151 | registration->nentries = nentries; | ||
152 | registration->assigned_limit = assigned_limit; | ||
153 | registration->idle_limit = idle_limit; | ||
154 | registration->key = key; | ||
155 | registration->func = func; | ||
156 | |||
157 | up(®istration->sema); | ||
158 | |||
159 | xpc_interface.connect(ch_number); | ||
160 | |||
161 | return xpcSuccess; | ||
162 | } | ||
163 | |||
164 | |||
165 | /* | ||
166 | * Remove the registration for automatic connection of the specified channel | ||
167 | * when a partition comes up. | ||
168 | * | ||
169 | * Before returning this xpc_disconnect() will wait for all connections on the | ||
170 | * specified channel have been closed/torndown. So the caller can be assured | ||
171 | * that they will not be receiving any more callouts from XPC to their | ||
172 | * function registered via xpc_connect(). | ||
173 | * | ||
174 | * Arguments: | ||
175 | * | ||
176 | * ch_number - channel # to unregister. | ||
177 | */ | ||
178 | void | ||
179 | xpc_disconnect(int ch_number) | ||
180 | { | ||
181 | struct xpc_registration *registration; | ||
182 | |||
183 | |||
184 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); | ||
185 | |||
186 | registration = &xpc_registrations[ch_number]; | ||
187 | |||
188 | /* | ||
189 | * We've decided not to make this a down_interruptible(), since we | ||
190 | * figured XPC's users will just turn around and call xpc_disconnect() | ||
191 | * again anyways, so we might as well wait, if need be. | ||
192 | */ | ||
193 | down(®istration->sema); | ||
194 | |||
195 | /* if !XPC_CHANNEL_REGISTERED(ch_number) */ | ||
196 | if (registration->func == NULL) { | ||
197 | up(®istration->sema); | ||
198 | return; | ||
199 | } | ||
200 | |||
201 | /* remove the connection registration for the specified channel */ | ||
202 | registration->func = NULL; | ||
203 | registration->key = NULL; | ||
204 | registration->nentries = 0; | ||
205 | registration->msg_size = 0; | ||
206 | registration->assigned_limit = 0; | ||
207 | registration->idle_limit = 0; | ||
208 | |||
209 | xpc_interface.disconnect(ch_number); | ||
210 | |||
211 | up(®istration->sema); | ||
212 | |||
213 | return; | ||
214 | } | ||
215 | |||
216 | |||
217 | int __init | ||
218 | xp_init(void) | ||
219 | { | ||
220 | int ret, ch_number; | ||
221 | u64 func_addr = *(u64 *) xp_nofault_PIOR; | ||
222 | u64 err_func_addr = *(u64 *) xp_error_PIOR; | ||
223 | |||
224 | |||
225 | if (!ia64_platform_is("sn2")) { | ||
226 | return -ENODEV; | ||
227 | } | ||
228 | |||
229 | /* | ||
230 | * Register a nofault code region which performs a cross-partition | ||
231 | * PIO read. If the PIO read times out, the MCA handler will consume | ||
232 | * the error and return to a kernel-provided instruction to indicate | ||
233 | * an error. This PIO read exists because it is guaranteed to timeout | ||
234 | * if the destination is down (AMO operations do not timeout on at | ||
235 | * least some CPUs on Shubs <= v1.2, which unfortunately we have to | ||
236 | * work around). | ||
237 | */ | ||
238 | if ((ret = sn_register_nofault_code(func_addr, err_func_addr, | ||
239 | err_func_addr, 1, 1)) != 0) { | ||
240 | printk(KERN_ERR "XP: can't register nofault code, error=%d\n", | ||
241 | ret); | ||
242 | } | ||
243 | /* | ||
244 | * Setup the nofault PIO read target. (There is no special reason why | ||
245 | * SH_IPI_ACCESS was selected.) | ||
246 | */ | ||
247 | if (is_shub2()) { | ||
248 | xp_nofault_PIOR_target = SH2_IPI_ACCESS0; | ||
249 | } else { | ||
250 | xp_nofault_PIOR_target = SH1_IPI_ACCESS; | ||
251 | } | ||
252 | |||
253 | /* initialize the connection registration semaphores */ | ||
254 | for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { | ||
255 | sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */ | ||
256 | } | ||
257 | |||
258 | return 0; | ||
259 | } | ||
260 | module_init(xp_init); | ||
261 | |||
262 | |||
263 | void __exit | ||
264 | xp_exit(void) | ||
265 | { | ||
266 | u64 func_addr = *(u64 *) xp_nofault_PIOR; | ||
267 | u64 err_func_addr = *(u64 *) xp_error_PIOR; | ||
268 | |||
269 | |||
270 | /* unregister the PIO read nofault code region */ | ||
271 | (void) sn_register_nofault_code(func_addr, err_func_addr, | ||
272 | err_func_addr, 1, 0); | ||
273 | } | ||
274 | module_exit(xp_exit); | ||
275 | |||
276 | |||
277 | MODULE_AUTHOR("Silicon Graphics, Inc."); | ||
278 | MODULE_DESCRIPTION("Cross Partition (XP) base"); | ||
279 | MODULE_LICENSE("GPL"); | ||
280 | |||
281 | EXPORT_SYMBOL(xp_nofault_PIOR); | ||
282 | EXPORT_SYMBOL(xp_nofault_PIOR_target); | ||
283 | EXPORT_SYMBOL(xpc_registrations); | ||
284 | EXPORT_SYMBOL(xpc_interface); | ||
285 | EXPORT_SYMBOL(xpc_clear_interface); | ||
286 | EXPORT_SYMBOL(xpc_set_interface); | ||
287 | EXPORT_SYMBOL(xpc_connect); | ||
288 | EXPORT_SYMBOL(xpc_disconnect); | ||
289 | |||
diff --git a/arch/ia64/sn/kernel/xp_nofault.S b/arch/ia64/sn/kernel/xp_nofault.S new file mode 100644 index 000000000000..b772543053c9 --- /dev/null +++ b/arch/ia64/sn/kernel/xp_nofault.S | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | |||
10 | /* | ||
11 | * The xp_nofault_PIOR function takes a pointer to a remote PIO register | ||
12 | * and attempts to load and consume a value from it. This function | ||
13 | * will be registered as a nofault code block. In the event that the | ||
14 | * PIO read fails, the MCA handler will force the error to look | ||
15 | * corrected and vector to the xp_error_PIOR which will return an error. | ||
16 | * | ||
17 | * extern int xp_nofault_PIOR(void *remote_register); | ||
18 | */ | ||
19 | |||
20 | .global xp_nofault_PIOR | ||
21 | xp_nofault_PIOR: | ||
22 | mov r8=r0 // Stage a success return value | ||
23 | ld8.acq r9=[r32];; // PIO Read the specified register | ||
24 | adds r9=1,r9 // Add to force a consume | ||
25 | br.ret.sptk.many b0;; // Return success | ||
26 | |||
27 | .global xp_error_PIOR | ||
28 | xp_error_PIOR: | ||
29 | mov r8=1 // Return value of 1 | ||
30 | br.ret.sptk.many b0;; // Return failure | ||
31 | |||
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h new file mode 100644 index 000000000000..1a0aed8490d1 --- /dev/null +++ b/arch/ia64/sn/kernel/xpc.h | |||
@@ -0,0 +1,991 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | |||
10 | /* | ||
11 | * Cross Partition Communication (XPC) structures and macros. | ||
12 | */ | ||
13 | |||
14 | #ifndef _IA64_SN_KERNEL_XPC_H | ||
15 | #define _IA64_SN_KERNEL_XPC_H | ||
16 | |||
17 | |||
18 | #include <linux/config.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/sysctl.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/processor.h> | ||
24 | #include <asm/sn/bte.h> | ||
25 | #include <asm/sn/clksupport.h> | ||
26 | #include <asm/sn/addrs.h> | ||
27 | #include <asm/sn/mspec.h> | ||
28 | #include <asm/sn/shub_mmr.h> | ||
29 | #include <asm/sn/xp.h> | ||
30 | |||
31 | |||
32 | /* | ||
33 | * XPC Version numbers consist of a major and minor number. XPC can always | ||
34 | * talk to versions with same major #, and never talk to versions with a | ||
35 | * different major #. | ||
36 | */ | ||
37 | #define _XPC_VERSION(_maj, _min) (((_maj) << 4) | ((_min) & 0xf)) | ||
38 | #define XPC_VERSION_MAJOR(_v) ((_v) >> 4) | ||
39 | #define XPC_VERSION_MINOR(_v) ((_v) & 0xf) | ||
40 | |||
41 | |||
42 | /* | ||
43 | * The next macros define word or bit representations for given | ||
44 | * C-brick nasid in either the SAL provided bit array representing | ||
45 | * nasids in the partition/machine or the AMO_t array used for | ||
46 | * inter-partition initiation communications. | ||
47 | * | ||
48 | * For SN2 machines, C-Bricks are alway even numbered NASIDs. As | ||
49 | * such, some space will be saved by insisting that nasid information | ||
50 | * passed from SAL always be packed for C-Bricks and the | ||
51 | * cross-partition interrupts use the same packing scheme. | ||
52 | */ | ||
53 | #define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2) | ||
54 | #define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1)) | ||
55 | #define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \ | ||
56 | (1UL << XPC_NASID_B_INDEX(_n))) | ||
57 | #define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2) | ||
58 | |||
59 | #define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ | ||
60 | #define XPC_HB_CHECK_DEFAULT_TIMEOUT 20 /* check HB every x secs */ | ||
61 | |||
62 | /* define the process name of HB checker and the CPU it is pinned to */ | ||
63 | #define XPC_HB_CHECK_THREAD_NAME "xpc_hb" | ||
64 | #define XPC_HB_CHECK_CPU 0 | ||
65 | |||
66 | /* define the process name of the discovery thread */ | ||
67 | #define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" | ||
68 | |||
69 | |||
70 | #define XPC_HB_ALLOWED(_p, _v) ((_v)->heartbeating_to_mask & (1UL << (_p))) | ||
71 | #define XPC_ALLOW_HB(_p, _v) (_v)->heartbeating_to_mask |= (1UL << (_p)) | ||
72 | #define XPC_DISALLOW_HB(_p, _v) (_v)->heartbeating_to_mask &= (~(1UL << (_p))) | ||
73 | |||
74 | |||
75 | /* | ||
76 | * Reserved Page provided by SAL. | ||
77 | * | ||
78 | * SAL provides one page per partition of reserved memory. When SAL | ||
79 | * initialization is complete, SAL_signature, SAL_version, partid, | ||
80 | * part_nasids, and mach_nasids are set. | ||
81 | * | ||
82 | * Note: Until vars_pa is set, the partition XPC code has not been initialized. | ||
83 | */ | ||
84 | struct xpc_rsvd_page { | ||
85 | u64 SAL_signature; /* SAL unique signature */ | ||
86 | u64 SAL_version; /* SAL specified version */ | ||
87 | u8 partid; /* partition ID from SAL */ | ||
88 | u8 version; | ||
89 | u8 pad[6]; /* pad to u64 align */ | ||
90 | u64 vars_pa; | ||
91 | u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; | ||
92 | u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; | ||
93 | }; | ||
94 | #define XPC_RP_VERSION _XPC_VERSION(1,0) /* version 1.0 of the reserved page */ | ||
95 | |||
96 | #define XPC_RSVD_PAGE_ALIGNED_SIZE \ | ||
97 | (L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))) | ||
98 | |||
99 | |||
100 | /* | ||
101 | * Define the structures by which XPC variables can be exported to other | ||
102 | * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) | ||
103 | */ | ||
104 | |||
105 | /* | ||
106 | * The following structure describes the partition generic variables | ||
107 | * needed by other partitions in order to properly initialize. | ||
108 | * | ||
109 | * struct xpc_vars version number also applies to struct xpc_vars_part. | ||
110 | * Changes to either structure and/or related functionality should be | ||
111 | * reflected by incrementing either the major or minor version numbers | ||
112 | * of struct xpc_vars. | ||
113 | */ | ||
114 | struct xpc_vars { | ||
115 | u8 version; | ||
116 | u64 heartbeat; | ||
117 | u64 heartbeating_to_mask; | ||
118 | u64 kdb_status; /* 0 = machine running */ | ||
119 | int act_nasid; | ||
120 | int act_phys_cpuid; | ||
121 | u64 vars_part_pa; | ||
122 | u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ | ||
123 | AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ | ||
124 | AMO_t *act_amos; /* pointer to the first activation AMO */ | ||
125 | }; | ||
126 | #define XPC_V_VERSION _XPC_VERSION(3,0) /* version 3.0 of the cross vars */ | ||
127 | |||
128 | #define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars))) | ||
129 | |||
130 | /* | ||
131 | * The following structure describes the per partition specific variables. | ||
132 | * | ||
133 | * An array of these structures, one per partition, will be defined. As a | ||
134 | * partition becomes active XPC will copy the array entry corresponding to | ||
135 | * itself from that partition. It is desirable that the size of this | ||
136 | * structure evenly divide into a cacheline, such that none of the entries | ||
137 | * in this array crosses a cacheline boundary. As it is now, each entry | ||
138 | * occupies half a cacheline. | ||
139 | */ | ||
140 | struct xpc_vars_part { | ||
141 | u64 magic; | ||
142 | |||
143 | u64 openclose_args_pa; /* physical address of open and close args */ | ||
144 | u64 GPs_pa; /* physical address of Get/Put values */ | ||
145 | |||
146 | u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */ | ||
147 | int IPI_nasid; /* nasid of where to send IPIs */ | ||
148 | int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */ | ||
149 | |||
150 | u8 nchannels; /* #of defined channels supported */ | ||
151 | |||
152 | u8 reserved[23]; /* pad to a full 64 bytes */ | ||
153 | }; | ||
154 | |||
155 | /* | ||
156 | * The vars_part MAGIC numbers play a part in the first contact protocol. | ||
157 | * | ||
158 | * MAGIC1 indicates that the per partition specific variables for a remote | ||
159 | * partition have been initialized by this partition. | ||
160 | * | ||
161 | * MAGIC2 indicates that this partition has pulled the remote partititions | ||
162 | * per partition variables that pertain to this partition. | ||
163 | */ | ||
164 | #define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ | ||
165 | #define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ | ||
166 | |||
167 | |||
168 | |||
169 | /* | ||
170 | * Functions registered by add_timer() or called by kernel_thread() only | ||
171 | * allow for a single 64-bit argument. The following macros can be used to | ||
172 | * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from | ||
173 | * the passed argument. | ||
174 | */ | ||
175 | #define XPC_PACK_ARGS(_arg1, _arg2) \ | ||
176 | ((((u64) _arg1) & 0xffffffff) | \ | ||
177 | ((((u64) _arg2) & 0xffffffff) << 32)) | ||
178 | |||
179 | #define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) | ||
180 | #define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) | ||
181 | |||
182 | |||
183 | |||
184 | /* | ||
185 | * Define a Get/Put value pair (pointers) used with a message queue. | ||
186 | */ | ||
187 | struct xpc_gp { | ||
188 | s64 get; /* Get value */ | ||
189 | s64 put; /* Put value */ | ||
190 | }; | ||
191 | |||
192 | #define XPC_GP_SIZE \ | ||
193 | L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) | ||
194 | |||
195 | |||
196 | |||
197 | /* | ||
198 | * Define a structure that contains arguments associated with opening and | ||
199 | * closing a channel. | ||
200 | */ | ||
201 | struct xpc_openclose_args { | ||
202 | u16 reason; /* reason why channel is closing */ | ||
203 | u16 msg_size; /* sizeof each message entry */ | ||
204 | u16 remote_nentries; /* #of message entries in remote msg queue */ | ||
205 | u16 local_nentries; /* #of message entries in local msg queue */ | ||
206 | u64 local_msgqueue_pa; /* physical address of local message queue */ | ||
207 | }; | ||
208 | |||
209 | #define XPC_OPENCLOSE_ARGS_SIZE \ | ||
210 | L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) | ||
211 | |||
212 | |||
213 | |||
214 | /* struct xpc_msg flags */ | ||
215 | |||
216 | #define XPC_M_DONE 0x01 /* msg has been received/consumed */ | ||
217 | #define XPC_M_READY 0x02 /* msg is ready to be sent */ | ||
218 | #define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ | ||
219 | |||
220 | |||
221 | #define XPC_MSG_ADDRESS(_payload) \ | ||
222 | ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) | ||
223 | |||
224 | |||
225 | |||
226 | /* | ||
227 | * Defines notify entry. | ||
228 | * | ||
229 | * This is used to notify a message's sender that their message was received | ||
230 | * and consumed by the intended recipient. | ||
231 | */ | ||
232 | struct xpc_notify { | ||
233 | struct semaphore sema; /* notify semaphore */ | ||
234 | u8 type; /* type of notification */ | ||
235 | |||
236 | /* the following two fields are only used if type == XPC_N_CALL */ | ||
237 | xpc_notify_func func; /* user's notify function */ | ||
238 | void *key; /* pointer to user's key */ | ||
239 | }; | ||
240 | |||
241 | /* struct xpc_notify type of notification */ | ||
242 | |||
243 | #define XPC_N_CALL 0x01 /* notify function provided by user */ | ||
244 | |||
245 | |||
246 | |||
247 | /* | ||
248 | * Define the structure that manages all the stuff required by a channel. In | ||
249 | * particular, they are used to manage the messages sent across the channel. | ||
250 | * | ||
251 | * This structure is private to a partition, and is NOT shared across the | ||
252 | * partition boundary. | ||
253 | * | ||
254 | * There is an array of these structures for each remote partition. It is | ||
255 | * allocated at the time a partition becomes active. The array contains one | ||
256 | * of these structures for each potential channel connection to that partition. | ||
257 | * | ||
258 | * Each of these structures manages two message queues (circular buffers). | ||
259 | * They are allocated at the time a channel connection is made. One of | ||
260 | * these message queues (local_msgqueue) holds the locally created messages | ||
261 | * that are destined for the remote partition. The other of these message | ||
262 | * queues (remote_msgqueue) is a locally cached copy of the remote partition's | ||
263 | * own local_msgqueue. | ||
264 | * | ||
265 | * The following is a description of the Get/Put pointers used to manage these | ||
266 | * two message queues. Consider the local_msgqueue to be on one partition | ||
267 | * and the remote_msgqueue to be its cached copy on another partition. A | ||
268 | * description of what each of the lettered areas contains is included. | ||
269 | * | ||
270 | * | ||
271 | * local_msgqueue remote_msgqueue | ||
272 | * | ||
273 | * |/////////| |/////////| | ||
274 | * w_remote_GP.get --> +---------+ |/////////| | ||
275 | * | F | |/////////| | ||
276 | * remote_GP.get --> +---------+ +---------+ <-- local_GP->get | ||
277 | * | | | | | ||
278 | * | | | E | | ||
279 | * | | | | | ||
280 | * | | +---------+ <-- w_local_GP.get | ||
281 | * | B | |/////////| | ||
282 | * | | |////D////| | ||
283 | * | | |/////////| | ||
284 | * | | +---------+ <-- w_remote_GP.put | ||
285 | * | | |////C////| | ||
286 | * local_GP->put --> +---------+ +---------+ <-- remote_GP.put | ||
287 | * | | |/////////| | ||
288 | * | A | |/////////| | ||
289 | * | | |/////////| | ||
290 | * w_local_GP.put --> +---------+ |/////////| | ||
291 | * |/////////| |/////////| | ||
292 | * | ||
293 | * | ||
294 | * ( remote_GP.[get|put] are cached copies of the remote | ||
295 | * partition's local_GP->[get|put], and thus their values can | ||
296 | * lag behind their counterparts on the remote partition. ) | ||
297 | * | ||
298 | * | ||
299 | * A - Messages that have been allocated, but have not yet been sent to the | ||
300 | * remote partition. | ||
301 | * | ||
302 | * B - Messages that have been sent, but have not yet been acknowledged by the | ||
303 | * remote partition as having been received. | ||
304 | * | ||
305 | * C - Area that needs to be prepared for the copying of sent messages, by | ||
306 | * the clearing of the message flags of any previously received messages. | ||
307 | * | ||
308 | * D - Area into which sent messages are to be copied from the remote | ||
309 | * partition's local_msgqueue and then delivered to their intended | ||
310 | * recipients. [ To allow for a multi-message copy, another pointer | ||
311 | * (next_msg_to_pull) has been added to keep track of the next message | ||
312 | * number needing to be copied (pulled). It chases after w_remote_GP.put. | ||
313 | * Any messages lying between w_local_GP.get and next_msg_to_pull have | ||
314 | * been copied and are ready to be delivered. ] | ||
315 | * | ||
316 | * E - Messages that have been copied and delivered, but have not yet been | ||
317 | * acknowledged by the recipient as having been received. | ||
318 | * | ||
319 | * F - Messages that have been acknowledged, but XPC has not yet notified the | ||
320 | * sender that the message was received by its intended recipient. | ||
321 | * This is also an area that needs to be prepared for the allocating of | ||
322 | * new messages, by the clearing of the message flags of the acknowledged | ||
323 | * messages. | ||
324 | */ | ||
325 | struct xpc_channel { | ||
326 | partid_t partid; /* ID of remote partition connected */ | ||
327 | spinlock_t lock; /* lock for updating this structure */ | ||
328 | u32 flags; /* general flags */ | ||
329 | |||
330 | enum xpc_retval reason; /* reason why channel is disconnect'g */ | ||
331 | int reason_line; /* line# disconnect initiated from */ | ||
332 | |||
333 | u16 number; /* channel # */ | ||
334 | |||
335 | u16 msg_size; /* sizeof each msg entry */ | ||
336 | u16 local_nentries; /* #of msg entries in local msg queue */ | ||
337 | u16 remote_nentries; /* #of msg entries in remote msg queue*/ | ||
338 | |||
339 | void *local_msgqueue_base; /* base address of kmalloc'd space */ | ||
340 | struct xpc_msg *local_msgqueue; /* local message queue */ | ||
341 | void *remote_msgqueue_base; /* base address of kmalloc'd space */ | ||
342 | struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */ | ||
343 | /* local message queue */ | ||
344 | u64 remote_msgqueue_pa; /* phys addr of remote partition's */ | ||
345 | /* local message queue */ | ||
346 | |||
347 | atomic_t references; /* #of external references to queues */ | ||
348 | |||
349 | atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ | ||
350 | wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ | ||
351 | |||
352 | /* queue of msg senders who want to be notified when msg received */ | ||
353 | |||
354 | atomic_t n_to_notify; /* #of msg senders to notify */ | ||
355 | struct xpc_notify *notify_queue;/* notify queue for messages sent */ | ||
356 | |||
357 | xpc_channel_func func; /* user's channel function */ | ||
358 | void *key; /* pointer to user's key */ | ||
359 | |||
360 | struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ | ||
361 | struct semaphore teardown_sema; /* wait for teardown completion */ | ||
362 | |||
363 | struct xpc_openclose_args *local_openclose_args; /* args passed on */ | ||
364 | /* opening or closing of channel */ | ||
365 | |||
366 | /* various flavors of local and remote Get/Put values */ | ||
367 | |||
368 | struct xpc_gp *local_GP; /* local Get/Put values */ | ||
369 | struct xpc_gp remote_GP; /* remote Get/Put values */ | ||
370 | struct xpc_gp w_local_GP; /* working local Get/Put values */ | ||
371 | struct xpc_gp w_remote_GP; /* working remote Get/Put values */ | ||
372 | s64 next_msg_to_pull; /* Put value of next msg to pull */ | ||
373 | |||
374 | /* kthread management related fields */ | ||
375 | |||
376 | // >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps | ||
377 | // >>> allow the assigned limit be unbounded and let the idle limit be dynamic | ||
378 | // >>> dependent on activity over the last interval of time | ||
379 | atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ | ||
380 | u32 kthreads_assigned_limit; /* limit on #of kthreads assigned */ | ||
381 | atomic_t kthreads_idle; /* #of kthreads idle waiting for work */ | ||
382 | u32 kthreads_idle_limit; /* limit on #of kthreads idle */ | ||
383 | atomic_t kthreads_active; /* #of kthreads actively working */ | ||
384 | // >>> following field is temporary | ||
385 | u32 kthreads_created; /* total #of kthreads created */ | ||
386 | |||
387 | wait_queue_head_t idle_wq; /* idle kthread wait queue */ | ||
388 | |||
389 | } ____cacheline_aligned; | ||
390 | |||
391 | |||
392 | /* struct xpc_channel flags */ | ||
393 | |||
394 | #define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ | ||
395 | |||
396 | #define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ | ||
397 | #define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ | ||
398 | #define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ | ||
399 | #define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ | ||
400 | |||
401 | #define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ | ||
402 | #define XPC_C_CONNECTCALLOUT 0x00000040 /* channel connected callout made */ | ||
403 | #define XPC_C_CONNECTED 0x00000080 /* local channel is connected */ | ||
404 | #define XPC_C_CONNECTING 0x00000100 /* channel is being connected */ | ||
405 | |||
406 | #define XPC_C_RCLOSEREPLY 0x00000200 /* remote close channel reply */ | ||
407 | #define XPC_C_CLOSEREPLY 0x00000400 /* local close channel reply */ | ||
408 | #define XPC_C_RCLOSEREQUEST 0x00000800 /* remote close channel request */ | ||
409 | #define XPC_C_CLOSEREQUEST 0x00001000 /* local close channel request */ | ||
410 | |||
411 | #define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ | ||
412 | #define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ | ||
413 | |||
414 | |||
415 | |||
416 | /* | ||
417 | * Manages channels on a partition basis. There is one of these structures | ||
418 | * for each partition (a partition will never utilize the structure that | ||
419 | * represents itself). | ||
420 | */ | ||
421 | struct xpc_partition { | ||
422 | |||
423 | /* XPC HB infrastructure */ | ||
424 | |||
425 | u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ | ||
426 | u64 remote_vars_pa; /* phys addr of partition's vars */ | ||
427 | u64 remote_vars_part_pa; /* phys addr of partition's vars part */ | ||
428 | u64 last_heartbeat; /* HB at last read */ | ||
429 | u64 remote_amos_page_pa; /* phys addr of partition's amos page */ | ||
430 | int remote_act_nasid; /* active part's act/deact nasid */ | ||
431 | int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */ | ||
432 | u32 act_IRQ_rcvd; /* IRQs since activation */ | ||
433 | spinlock_t act_lock; /* protect updating of act_state */ | ||
434 | u8 act_state; /* from XPC HB viewpoint */ | ||
435 | enum xpc_retval reason; /* reason partition is deactivating */ | ||
436 | int reason_line; /* line# deactivation initiated from */ | ||
437 | int reactivate_nasid; /* nasid in partition to reactivate */ | ||
438 | |||
439 | |||
440 | /* XPC infrastructure referencing and teardown control */ | ||
441 | |||
442 | u8 setup_state; /* infrastructure setup state */ | ||
443 | wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ | ||
444 | atomic_t references; /* #of references to infrastructure */ | ||
445 | |||
446 | |||
447 | /* | ||
448 | * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN | ||
449 | * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION | ||
450 | * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE | ||
451 | * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.) | ||
452 | */ | ||
453 | |||
454 | |||
455 | u8 nchannels; /* #of defined channels supported */ | ||
456 | atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ | ||
457 | struct xpc_channel *channels;/* array of channel structures */ | ||
458 | |||
459 | void *local_GPs_base; /* base address of kmalloc'd space */ | ||
460 | struct xpc_gp *local_GPs; /* local Get/Put values */ | ||
461 | void *remote_GPs_base; /* base address of kmalloc'd space */ | ||
462 | struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */ | ||
463 | /* values */ | ||
464 | u64 remote_GPs_pa; /* phys address of remote partition's local */ | ||
465 | /* Get/Put values */ | ||
466 | |||
467 | |||
468 | /* fields used to pass args when opening or closing a channel */ | ||
469 | |||
470 | void *local_openclose_args_base; /* base address of kmalloc'd space */ | ||
471 | struct xpc_openclose_args *local_openclose_args; /* local's args */ | ||
472 | void *remote_openclose_args_base; /* base address of kmalloc'd space */ | ||
473 | struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ | ||
474 | /* args */ | ||
475 | u64 remote_openclose_args_pa; /* phys addr of remote's args */ | ||
476 | |||
477 | |||
478 | /* IPI sending, receiving and handling related fields */ | ||
479 | |||
480 | int remote_IPI_nasid; /* nasid of where to send IPIs */ | ||
481 | int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ | ||
482 | AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ | ||
483 | |||
484 | AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ | ||
485 | u64 local_IPI_amo; /* IPI amo flags yet to be handled */ | ||
486 | char IPI_owner[8]; /* IPI owner's name */ | ||
487 | struct timer_list dropped_IPI_timer; /* dropped IPI timer */ | ||
488 | |||
489 | spinlock_t IPI_lock; /* IPI handler lock */ | ||
490 | |||
491 | |||
492 | /* channel manager related fields */ | ||
493 | |||
494 | atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ | ||
495 | wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ | ||
496 | |||
497 | } ____cacheline_aligned; | ||
498 | |||
499 | |||
500 | /* struct xpc_partition act_state values (for XPC HB) */ | ||
501 | |||
502 | #define XPC_P_INACTIVE 0x00 /* partition is not active */ | ||
503 | #define XPC_P_ACTIVATION_REQ 0x01 /* created thread to activate */ | ||
504 | #define XPC_P_ACTIVATING 0x02 /* activation thread started */ | ||
505 | #define XPC_P_ACTIVE 0x03 /* xpc_partition_up() was called */ | ||
506 | #define XPC_P_DEACTIVATING 0x04 /* partition deactivation initiated */ | ||
507 | |||
508 | |||
509 | #define XPC_DEACTIVATE_PARTITION(_p, _reason) \ | ||
510 | xpc_deactivate_partition(__LINE__, (_p), (_reason)) | ||
511 | |||
512 | |||
513 | /* struct xpc_partition setup_state values */ | ||
514 | |||
515 | #define XPC_P_UNSET 0x00 /* infrastructure was never setup */ | ||
516 | #define XPC_P_SETUP 0x01 /* infrastructure is setup */ | ||
517 | #define XPC_P_WTEARDOWN 0x02 /* waiting to teardown infrastructure */ | ||
518 | #define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ | ||
519 | |||
520 | |||
521 | /* | ||
522 | * struct xpc_partition IPI_timer #of seconds to wait before checking for | ||
523 | * dropped IPIs. These occur whenever an IPI amo write doesn't complete until | ||
524 | * after the IPI was received. | ||
525 | */ | ||
526 | #define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) | ||
527 | |||
528 | |||
529 | #define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) | ||
530 | |||
531 | |||
532 | |||
533 | /* found in xp_main.c */ | ||
534 | extern struct xpc_registration xpc_registrations[]; | ||
535 | |||
536 | |||
537 | /* >>> found in xpc_main.c only */ | ||
538 | extern struct device *xpc_part; | ||
539 | extern struct device *xpc_chan; | ||
540 | extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *); | ||
541 | extern void xpc_dropped_IPI_check(struct xpc_partition *); | ||
542 | extern void xpc_activate_kthreads(struct xpc_channel *, int); | ||
543 | extern void xpc_create_kthreads(struct xpc_channel *, int); | ||
544 | extern void xpc_disconnect_wait(int); | ||
545 | |||
546 | |||
547 | /* found in xpc_main.c and efi-xpc.c */ | ||
548 | extern void xpc_activate_partition(struct xpc_partition *); | ||
549 | |||
550 | |||
551 | /* found in xpc_partition.c */ | ||
552 | extern int xpc_exiting; | ||
553 | extern int xpc_hb_interval; | ||
554 | extern int xpc_hb_check_interval; | ||
555 | extern struct xpc_vars *xpc_vars; | ||
556 | extern struct xpc_rsvd_page *xpc_rsvd_page; | ||
557 | extern struct xpc_vars_part *xpc_vars_part; | ||
558 | extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; | ||
559 | extern char xpc_remote_copy_buffer[]; | ||
560 | extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); | ||
561 | extern void xpc_allow_IPI_ops(void); | ||
562 | extern void xpc_restrict_IPI_ops(void); | ||
563 | extern int xpc_identify_act_IRQ_sender(void); | ||
564 | extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *); | ||
565 | extern void xpc_mark_partition_inactive(struct xpc_partition *); | ||
566 | extern void xpc_discovery(void); | ||
567 | extern void xpc_check_remote_hb(void); | ||
568 | extern void xpc_deactivate_partition(const int, struct xpc_partition *, | ||
569 | enum xpc_retval); | ||
570 | extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); | ||
571 | |||
572 | |||
573 | /* found in xpc_channel.c */ | ||
574 | extern void xpc_initiate_connect(int); | ||
575 | extern void xpc_initiate_disconnect(int); | ||
576 | extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **); | ||
577 | extern enum xpc_retval xpc_initiate_send(partid_t, int, void *); | ||
578 | extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *, | ||
579 | xpc_notify_func, void *); | ||
580 | extern void xpc_initiate_received(partid_t, int, void *); | ||
581 | extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *); | ||
582 | extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *); | ||
583 | extern void xpc_process_channel_activity(struct xpc_partition *); | ||
584 | extern void xpc_connected_callout(struct xpc_channel *); | ||
585 | extern void xpc_deliver_msg(struct xpc_channel *); | ||
586 | extern void xpc_disconnect_channel(const int, struct xpc_channel *, | ||
587 | enum xpc_retval, unsigned long *); | ||
588 | extern void xpc_disconnected_callout(struct xpc_channel *); | ||
589 | extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval); | ||
590 | extern void xpc_teardown_infrastructure(struct xpc_partition *); | ||
591 | |||
592 | |||
593 | |||
594 | static inline void | ||
595 | xpc_wakeup_channel_mgr(struct xpc_partition *part) | ||
596 | { | ||
597 | if (atomic_inc_return(&part->channel_mgr_requests) == 1) { | ||
598 | wake_up(&part->channel_mgr_wq); | ||
599 | } | ||
600 | } | ||
601 | |||
602 | |||
603 | |||
604 | /* | ||
605 | * These next two inlines are used to keep us from tearing down a channel's | ||
606 | * msg queues while a thread may be referencing them. | ||
607 | */ | ||
608 | static inline void | ||
609 | xpc_msgqueue_ref(struct xpc_channel *ch) | ||
610 | { | ||
611 | atomic_inc(&ch->references); | ||
612 | } | ||
613 | |||
614 | static inline void | ||
615 | xpc_msgqueue_deref(struct xpc_channel *ch) | ||
616 | { | ||
617 | s32 refs = atomic_dec_return(&ch->references); | ||
618 | |||
619 | DBUG_ON(refs < 0); | ||
620 | if (refs == 0) { | ||
621 | xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]); | ||
622 | } | ||
623 | } | ||
624 | |||
625 | |||
626 | |||
627 | #define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \ | ||
628 | xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs) | ||
629 | |||
630 | |||
631 | /* | ||
632 | * These two inlines are used to keep us from tearing down a partition's | ||
633 | * setup infrastructure while a thread may be referencing it. | ||
634 | */ | ||
635 | static inline void | ||
636 | xpc_part_deref(struct xpc_partition *part) | ||
637 | { | ||
638 | s32 refs = atomic_dec_return(&part->references); | ||
639 | |||
640 | |||
641 | DBUG_ON(refs < 0); | ||
642 | if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) { | ||
643 | wake_up(&part->teardown_wq); | ||
644 | } | ||
645 | } | ||
646 | |||
647 | static inline int | ||
648 | xpc_part_ref(struct xpc_partition *part) | ||
649 | { | ||
650 | int setup; | ||
651 | |||
652 | |||
653 | atomic_inc(&part->references); | ||
654 | setup = (part->setup_state == XPC_P_SETUP); | ||
655 | if (!setup) { | ||
656 | xpc_part_deref(part); | ||
657 | } | ||
658 | return setup; | ||
659 | } | ||
660 | |||
661 | |||
662 | |||
663 | /* | ||
664 | * The following macro is to be used for the setting of the reason and | ||
665 | * reason_line fields in both the struct xpc_channel and struct xpc_partition | ||
666 | * structures. | ||
667 | */ | ||
668 | #define XPC_SET_REASON(_p, _reason, _line) \ | ||
669 | { \ | ||
670 | (_p)->reason = _reason; \ | ||
671 | (_p)->reason_line = _line; \ | ||
672 | } | ||
673 | |||
674 | |||
675 | |||
676 | /* | ||
677 | * The following set of macros and inlines are used for the sending and | ||
678 | * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, | ||
679 | * one that is associated with partition activity (SGI_XPC_ACTIVATE) and | ||
680 | * the other that is associated with channel activity (SGI_XPC_NOTIFY). | ||
681 | */ | ||
682 | |||
683 | static inline u64 | ||
684 | xpc_IPI_receive(AMO_t *amo) | ||
685 | { | ||
686 | return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR); | ||
687 | } | ||
688 | |||
689 | |||
690 | static inline enum xpc_retval | ||
691 | xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) | ||
692 | { | ||
693 | int ret = 0; | ||
694 | unsigned long irq_flags; | ||
695 | |||
696 | |||
697 | local_irq_save(irq_flags); | ||
698 | |||
699 | FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag); | ||
700 | sn_send_IPI_phys(nasid, phys_cpuid, vector, 0); | ||
701 | |||
702 | /* | ||
703 | * We must always use the nofault function regardless of whether we | ||
704 | * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we | ||
705 | * didn't, we'd never know that the other partition is down and would | ||
706 | * keep sending IPIs and AMOs to it until the heartbeat times out. | ||
707 | */ | ||
708 | ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), | ||
709 | xp_nofault_PIOR_target)); | ||
710 | |||
711 | local_irq_restore(irq_flags); | ||
712 | |||
713 | return ((ret == 0) ? xpcSuccess : xpcPioReadError); | ||
714 | } | ||
715 | |||
716 | |||
717 | /* | ||
718 | * IPIs associated with SGI_XPC_ACTIVATE IRQ. | ||
719 | */ | ||
720 | |||
721 | /* | ||
722 | * Flag the appropriate AMO variable and send an IPI to the specified node. | ||
723 | */ | ||
724 | static inline void | ||
725 | xpc_activate_IRQ_send(u64 amos_page, int from_nasid, int to_nasid, | ||
726 | int to_phys_cpuid) | ||
727 | { | ||
728 | int w_index = XPC_NASID_W_INDEX(from_nasid); | ||
729 | int b_index = XPC_NASID_B_INDEX(from_nasid); | ||
730 | AMO_t *amos = (AMO_t *) __va(amos_page + | ||
731 | (XP_MAX_PARTITIONS * sizeof(AMO_t))); | ||
732 | |||
733 | |||
734 | (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, | ||
735 | to_phys_cpuid, SGI_XPC_ACTIVATE); | ||
736 | } | ||
737 | |||
738 | static inline void | ||
739 | xpc_IPI_send_activate(struct xpc_vars *vars) | ||
740 | { | ||
741 | xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0), | ||
742 | vars->act_nasid, vars->act_phys_cpuid); | ||
743 | } | ||
744 | |||
745 | static inline void | ||
746 | xpc_IPI_send_activated(struct xpc_partition *part) | ||
747 | { | ||
748 | xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0), | ||
749 | part->remote_act_nasid, part->remote_act_phys_cpuid); | ||
750 | } | ||
751 | |||
752 | static inline void | ||
753 | xpc_IPI_send_reactivate(struct xpc_partition *part) | ||
754 | { | ||
755 | xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid, | ||
756 | xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); | ||
757 | } | ||
758 | |||
759 | |||
760 | /* | ||
761 | * IPIs associated with SGI_XPC_NOTIFY IRQ. | ||
762 | */ | ||
763 | |||
764 | /* | ||
765 | * Send an IPI to the remote partition that is associated with the | ||
766 | * specified channel. | ||
767 | */ | ||
768 | #define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \ | ||
769 | xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f) | ||
770 | |||
771 | static inline void | ||
772 | xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, | ||
773 | unsigned long *irq_flags) | ||
774 | { | ||
775 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
776 | enum xpc_retval ret; | ||
777 | |||
778 | |||
779 | if (likely(part->act_state != XPC_P_DEACTIVATING)) { | ||
780 | ret = xpc_IPI_send(part->remote_IPI_amo_va, | ||
781 | (u64) ipi_flag << (ch->number * 8), | ||
782 | part->remote_IPI_nasid, | ||
783 | part->remote_IPI_phys_cpuid, | ||
784 | SGI_XPC_NOTIFY); | ||
785 | dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", | ||
786 | ipi_flag_string, ch->partid, ch->number, ret); | ||
787 | if (unlikely(ret != xpcSuccess)) { | ||
788 | if (irq_flags != NULL) { | ||
789 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | ||
790 | } | ||
791 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
792 | if (irq_flags != NULL) { | ||
793 | spin_lock_irqsave(&ch->lock, *irq_flags); | ||
794 | } | ||
795 | } | ||
796 | } | ||
797 | } | ||
798 | |||
799 | |||
800 | /* | ||
801 | * Make it look like the remote partition, which is associated with the | ||
802 | * specified channel, sent us an IPI. This faked IPI will be handled | ||
803 | * by xpc_dropped_IPI_check(). | ||
804 | */ | ||
805 | #define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \ | ||
806 | xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f) | ||
807 | |||
808 | static inline void | ||
809 | xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag, | ||
810 | char *ipi_flag_string) | ||
811 | { | ||
812 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
813 | |||
814 | |||
815 | FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable), | ||
816 | FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8))); | ||
817 | dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", | ||
818 | ipi_flag_string, ch->partid, ch->number); | ||
819 | } | ||
820 | |||
821 | |||
822 | /* | ||
823 | * The sending and receiving of IPIs includes the setting of an AMO variable | ||
824 | * to indicate the reason the IPI was sent. The 64-bit variable is divided | ||
825 | * up into eight bytes, ordered from right to left. Byte zero pertains to | ||
826 | * channel 0, byte one to channel 1, and so on. Each byte is described by | ||
827 | * the following IPI flags. | ||
828 | */ | ||
829 | |||
830 | #define XPC_IPI_CLOSEREQUEST 0x01 | ||
831 | #define XPC_IPI_CLOSEREPLY 0x02 | ||
832 | #define XPC_IPI_OPENREQUEST 0x04 | ||
833 | #define XPC_IPI_OPENREPLY 0x08 | ||
834 | #define XPC_IPI_MSGREQUEST 0x10 | ||
835 | |||
836 | |||
837 | /* given an AMO variable and a channel#, get its associated IPI flags */ | ||
838 | #define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) | ||
839 | |||
840 | #define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) | ||
841 | #define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) | ||
842 | |||
843 | |||
844 | static inline void | ||
845 | xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags) | ||
846 | { | ||
847 | struct xpc_openclose_args *args = ch->local_openclose_args; | ||
848 | |||
849 | |||
850 | args->reason = ch->reason; | ||
851 | |||
852 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags); | ||
853 | } | ||
854 | |||
855 | static inline void | ||
856 | xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags) | ||
857 | { | ||
858 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags); | ||
859 | } | ||
860 | |||
861 | static inline void | ||
862 | xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags) | ||
863 | { | ||
864 | struct xpc_openclose_args *args = ch->local_openclose_args; | ||
865 | |||
866 | |||
867 | args->msg_size = ch->msg_size; | ||
868 | args->local_nentries = ch->local_nentries; | ||
869 | |||
870 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags); | ||
871 | } | ||
872 | |||
873 | static inline void | ||
874 | xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags) | ||
875 | { | ||
876 | struct xpc_openclose_args *args = ch->local_openclose_args; | ||
877 | |||
878 | |||
879 | args->remote_nentries = ch->remote_nentries; | ||
880 | args->local_nentries = ch->local_nentries; | ||
881 | args->local_msgqueue_pa = __pa(ch->local_msgqueue); | ||
882 | |||
883 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags); | ||
884 | } | ||
885 | |||
886 | static inline void | ||
887 | xpc_IPI_send_msgrequest(struct xpc_channel *ch) | ||
888 | { | ||
889 | XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL); | ||
890 | } | ||
891 | |||
892 | static inline void | ||
893 | xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) | ||
894 | { | ||
895 | XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST); | ||
896 | } | ||
897 | |||
898 | |||
899 | /* | ||
900 | * Memory for XPC's AMO variables is allocated by the MSPEC driver. These | ||
901 | * pages are located in the lowest granule. The lowest granule uses 4k pages | ||
902 | * for cached references and an alternate TLB handler to never provide a | ||
903 | * cacheable mapping for the entire region. This will prevent speculative | ||
904 | * reading of cached copies of our lines from being issued which will cause | ||
905 | * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 | ||
906 | * (XP_MAX_PARTITIONS) AMO variables for message notification (xpc_main.c) | ||
907 | * and an additional 16 AMO variables for partition activation (xpc_hb.c). | ||
908 | */ | ||
909 | static inline AMO_t * | ||
910 | xpc_IPI_init(partid_t partid) | ||
911 | { | ||
912 | AMO_t *part_amo = xpc_vars->amos_page + partid; | ||
913 | |||
914 | |||
915 | xpc_IPI_receive(part_amo); | ||
916 | return part_amo; | ||
917 | } | ||
918 | |||
919 | |||
920 | |||
921 | static inline enum xpc_retval | ||
922 | xpc_map_bte_errors(bte_result_t error) | ||
923 | { | ||
924 | switch (error) { | ||
925 | case BTE_SUCCESS: return xpcSuccess; | ||
926 | case BTEFAIL_DIR: return xpcBteDirectoryError; | ||
927 | case BTEFAIL_POISON: return xpcBtePoisonError; | ||
928 | case BTEFAIL_WERR: return xpcBteWriteError; | ||
929 | case BTEFAIL_ACCESS: return xpcBteAccessError; | ||
930 | case BTEFAIL_PWERR: return xpcBtePWriteError; | ||
931 | case BTEFAIL_PRERR: return xpcBtePReadError; | ||
932 | case BTEFAIL_TOUT: return xpcBteTimeOutError; | ||
933 | case BTEFAIL_XTERR: return xpcBteXtalkError; | ||
934 | case BTEFAIL_NOTAVAIL: return xpcBteNotAvailable; | ||
935 | default: return xpcBteUnmappedError; | ||
936 | } | ||
937 | } | ||
938 | |||
939 | |||
940 | |||
941 | static inline void * | ||
942 | xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base) | ||
943 | { | ||
944 | /* see if kmalloc will give us cachline aligned memory by default */ | ||
945 | *base = kmalloc(size, flags); | ||
946 | if (*base == NULL) { | ||
947 | return NULL; | ||
948 | } | ||
949 | if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) { | ||
950 | return *base; | ||
951 | } | ||
952 | kfree(*base); | ||
953 | |||
954 | /* nope, we'll have to do it ourselves */ | ||
955 | *base = kmalloc(size + L1_CACHE_BYTES, flags); | ||
956 | if (*base == NULL) { | ||
957 | return NULL; | ||
958 | } | ||
959 | return (void *) L1_CACHE_ALIGN((u64) *base); | ||
960 | } | ||
961 | |||
962 | |||
963 | /* | ||
964 | * Check to see if there is any channel activity to/from the specified | ||
965 | * partition. | ||
966 | */ | ||
967 | static inline void | ||
968 | xpc_check_for_channel_activity(struct xpc_partition *part) | ||
969 | { | ||
970 | u64 IPI_amo; | ||
971 | unsigned long irq_flags; | ||
972 | |||
973 | |||
974 | IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va); | ||
975 | if (IPI_amo == 0) { | ||
976 | return; | ||
977 | } | ||
978 | |||
979 | spin_lock_irqsave(&part->IPI_lock, irq_flags); | ||
980 | part->local_IPI_amo |= IPI_amo; | ||
981 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); | ||
982 | |||
983 | dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n", | ||
984 | XPC_PARTID(part), IPI_amo); | ||
985 | |||
986 | xpc_wakeup_channel_mgr(part); | ||
987 | } | ||
988 | |||
989 | |||
990 | #endif /* _IA64_SN_KERNEL_XPC_H */ | ||
991 | |||
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c new file mode 100644 index 000000000000..0bf6fbcc46d2 --- /dev/null +++ b/arch/ia64/sn/kernel/xpc_channel.c | |||
@@ -0,0 +1,2297 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | |||
10 | /* | ||
11 | * Cross Partition Communication (XPC) channel support. | ||
12 | * | ||
13 | * This is the part of XPC that manages the channels and | ||
14 | * sends/receives messages across them to/from other partitions. | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/cache.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <asm/sn/bte.h> | ||
26 | #include <asm/sn/sn_sal.h> | ||
27 | #include "xpc.h" | ||
28 | |||
29 | |||
30 | /* | ||
31 | * Set up the initial values for the XPartition Communication channels. | ||
32 | */ | ||
33 | static void | ||
34 | xpc_initialize_channels(struct xpc_partition *part, partid_t partid) | ||
35 | { | ||
36 | int ch_number; | ||
37 | struct xpc_channel *ch; | ||
38 | |||
39 | |||
40 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | ||
41 | ch = &part->channels[ch_number]; | ||
42 | |||
43 | ch->partid = partid; | ||
44 | ch->number = ch_number; | ||
45 | ch->flags = XPC_C_DISCONNECTED; | ||
46 | |||
47 | ch->local_GP = &part->local_GPs[ch_number]; | ||
48 | ch->local_openclose_args = | ||
49 | &part->local_openclose_args[ch_number]; | ||
50 | |||
51 | atomic_set(&ch->kthreads_assigned, 0); | ||
52 | atomic_set(&ch->kthreads_idle, 0); | ||
53 | atomic_set(&ch->kthreads_active, 0); | ||
54 | |||
55 | atomic_set(&ch->references, 0); | ||
56 | atomic_set(&ch->n_to_notify, 0); | ||
57 | |||
58 | spin_lock_init(&ch->lock); | ||
59 | sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ | ||
60 | |||
61 | atomic_set(&ch->n_on_msg_allocate_wq, 0); | ||
62 | init_waitqueue_head(&ch->msg_allocate_wq); | ||
63 | init_waitqueue_head(&ch->idle_wq); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | |||
68 | /* | ||
69 | * Setup the infrastructure necessary to support XPartition Communication | ||
70 | * between the specified remote partition and the local one. | ||
71 | */ | ||
72 | enum xpc_retval | ||
73 | xpc_setup_infrastructure(struct xpc_partition *part) | ||
74 | { | ||
75 | int ret; | ||
76 | struct timer_list *timer; | ||
77 | partid_t partid = XPC_PARTID(part); | ||
78 | |||
79 | |||
80 | /* | ||
81 | * Zero out MOST of the entry for this partition. Only the fields | ||
82 | * starting with `nchannels' will be zeroed. The preceding fields must | ||
83 | * remain `viable' across partition ups and downs, since they may be | ||
84 | * referenced during this memset() operation. | ||
85 | */ | ||
86 | memset(&part->nchannels, 0, sizeof(struct xpc_partition) - | ||
87 | offsetof(struct xpc_partition, nchannels)); | ||
88 | |||
89 | /* | ||
90 | * Allocate all of the channel structures as a contiguous chunk of | ||
91 | * memory. | ||
92 | */ | ||
93 | part->channels = kmalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, | ||
94 | GFP_KERNEL); | ||
95 | if (part->channels == NULL) { | ||
96 | dev_err(xpc_chan, "can't get memory for channels\n"); | ||
97 | return xpcNoMemory; | ||
98 | } | ||
99 | memset(part->channels, 0, sizeof(struct xpc_channel) * XPC_NCHANNELS); | ||
100 | |||
101 | part->nchannels = XPC_NCHANNELS; | ||
102 | |||
103 | |||
104 | /* allocate all the required GET/PUT values */ | ||
105 | |||
106 | part->local_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, | ||
107 | GFP_KERNEL, &part->local_GPs_base); | ||
108 | if (part->local_GPs == NULL) { | ||
109 | kfree(part->channels); | ||
110 | part->channels = NULL; | ||
111 | dev_err(xpc_chan, "can't get memory for local get/put " | ||
112 | "values\n"); | ||
113 | return xpcNoMemory; | ||
114 | } | ||
115 | memset(part->local_GPs, 0, XPC_GP_SIZE); | ||
116 | |||
117 | part->remote_GPs = xpc_kmalloc_cacheline_aligned(XPC_GP_SIZE, | ||
118 | GFP_KERNEL, &part->remote_GPs_base); | ||
119 | if (part->remote_GPs == NULL) { | ||
120 | kfree(part->channels); | ||
121 | part->channels = NULL; | ||
122 | kfree(part->local_GPs_base); | ||
123 | part->local_GPs = NULL; | ||
124 | dev_err(xpc_chan, "can't get memory for remote get/put " | ||
125 | "values\n"); | ||
126 | return xpcNoMemory; | ||
127 | } | ||
128 | memset(part->remote_GPs, 0, XPC_GP_SIZE); | ||
129 | |||
130 | |||
131 | /* allocate all the required open and close args */ | ||
132 | |||
133 | part->local_openclose_args = xpc_kmalloc_cacheline_aligned( | ||
134 | XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, | ||
135 | &part->local_openclose_args_base); | ||
136 | if (part->local_openclose_args == NULL) { | ||
137 | kfree(part->channels); | ||
138 | part->channels = NULL; | ||
139 | kfree(part->local_GPs_base); | ||
140 | part->local_GPs = NULL; | ||
141 | kfree(part->remote_GPs_base); | ||
142 | part->remote_GPs = NULL; | ||
143 | dev_err(xpc_chan, "can't get memory for local connect args\n"); | ||
144 | return xpcNoMemory; | ||
145 | } | ||
146 | memset(part->local_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE); | ||
147 | |||
148 | part->remote_openclose_args = xpc_kmalloc_cacheline_aligned( | ||
149 | XPC_OPENCLOSE_ARGS_SIZE, GFP_KERNEL, | ||
150 | &part->remote_openclose_args_base); | ||
151 | if (part->remote_openclose_args == NULL) { | ||
152 | kfree(part->channels); | ||
153 | part->channels = NULL; | ||
154 | kfree(part->local_GPs_base); | ||
155 | part->local_GPs = NULL; | ||
156 | kfree(part->remote_GPs_base); | ||
157 | part->remote_GPs = NULL; | ||
158 | kfree(part->local_openclose_args_base); | ||
159 | part->local_openclose_args = NULL; | ||
160 | dev_err(xpc_chan, "can't get memory for remote connect args\n"); | ||
161 | return xpcNoMemory; | ||
162 | } | ||
163 | memset(part->remote_openclose_args, 0, XPC_OPENCLOSE_ARGS_SIZE); | ||
164 | |||
165 | |||
166 | xpc_initialize_channels(part, partid); | ||
167 | |||
168 | atomic_set(&part->nchannels_active, 0); | ||
169 | |||
170 | |||
171 | /* local_IPI_amo were set to 0 by an earlier memset() */ | ||
172 | |||
173 | /* Initialize this partitions AMO_t structure */ | ||
174 | part->local_IPI_amo_va = xpc_IPI_init(partid); | ||
175 | |||
176 | spin_lock_init(&part->IPI_lock); | ||
177 | |||
178 | atomic_set(&part->channel_mgr_requests, 1); | ||
179 | init_waitqueue_head(&part->channel_mgr_wq); | ||
180 | |||
181 | sprintf(part->IPI_owner, "xpc%02d", partid); | ||
182 | ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ, | ||
183 | part->IPI_owner, (void *) (u64) partid); | ||
184 | if (ret != 0) { | ||
185 | kfree(part->channels); | ||
186 | part->channels = NULL; | ||
187 | kfree(part->local_GPs_base); | ||
188 | part->local_GPs = NULL; | ||
189 | kfree(part->remote_GPs_base); | ||
190 | part->remote_GPs = NULL; | ||
191 | kfree(part->local_openclose_args_base); | ||
192 | part->local_openclose_args = NULL; | ||
193 | kfree(part->remote_openclose_args_base); | ||
194 | part->remote_openclose_args = NULL; | ||
195 | dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " | ||
196 | "errno=%d\n", -ret); | ||
197 | return xpcLackOfResources; | ||
198 | } | ||
199 | |||
200 | /* Setup a timer to check for dropped IPIs */ | ||
201 | timer = &part->dropped_IPI_timer; | ||
202 | init_timer(timer); | ||
203 | timer->function = (void (*)(unsigned long)) xpc_dropped_IPI_check; | ||
204 | timer->data = (unsigned long) part; | ||
205 | timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT; | ||
206 | add_timer(timer); | ||
207 | |||
208 | /* | ||
209 | * With the setting of the partition setup_state to XPC_P_SETUP, we're | ||
210 | * declaring that this partition is ready to go. | ||
211 | */ | ||
212 | (volatile u8) part->setup_state = XPC_P_SETUP; | ||
213 | |||
214 | |||
215 | /* | ||
216 | * Setup the per partition specific variables required by the | ||
217 | * remote partition to establish channel connections with us. | ||
218 | * | ||
219 | * The setting of the magic # indicates that these per partition | ||
220 | * specific variables are ready to be used. | ||
221 | */ | ||
222 | xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); | ||
223 | xpc_vars_part[partid].openclose_args_pa = | ||
224 | __pa(part->local_openclose_args); | ||
225 | xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); | ||
226 | xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(smp_processor_id()); | ||
227 | xpc_vars_part[partid].IPI_phys_cpuid = | ||
228 | cpu_physical_id(smp_processor_id()); | ||
229 | xpc_vars_part[partid].nchannels = part->nchannels; | ||
230 | (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC1; | ||
231 | |||
232 | return xpcSuccess; | ||
233 | } | ||
234 | |||
235 | |||
236 | /* | ||
237 | * Create a wrapper that hides the underlying mechanism for pulling a cacheline | ||
238 | * (or multiple cachelines) from a remote partition. | ||
239 | * | ||
240 | * src must be a cacheline aligned physical address on the remote partition. | ||
241 | * dst must be a cacheline aligned virtual address on this partition. | ||
242 | * cnt must be an cacheline sized | ||
243 | */ | ||
244 | static enum xpc_retval | ||
245 | xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, | ||
246 | const void *src, size_t cnt) | ||
247 | { | ||
248 | bte_result_t bte_ret; | ||
249 | |||
250 | |||
251 | DBUG_ON((u64) src != L1_CACHE_ALIGN((u64) src)); | ||
252 | DBUG_ON((u64) dst != L1_CACHE_ALIGN((u64) dst)); | ||
253 | DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); | ||
254 | |||
255 | if (part->act_state == XPC_P_DEACTIVATING) { | ||
256 | return part->reason; | ||
257 | } | ||
258 | |||
259 | bte_ret = xp_bte_copy((u64) src, (u64) ia64_tpa((u64) dst), | ||
260 | (u64) cnt, (BTE_NORMAL | BTE_WACQUIRE), NULL); | ||
261 | if (bte_ret == BTE_SUCCESS) { | ||
262 | return xpcSuccess; | ||
263 | } | ||
264 | |||
265 | dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", | ||
266 | XPC_PARTID(part), bte_ret); | ||
267 | |||
268 | return xpc_map_bte_errors(bte_ret); | ||
269 | } | ||
270 | |||
271 | |||
272 | /* | ||
273 | * Pull the remote per partititon specific variables from the specified | ||
274 | * partition. | ||
275 | */ | ||
276 | enum xpc_retval | ||
277 | xpc_pull_remote_vars_part(struct xpc_partition *part) | ||
278 | { | ||
279 | u8 buffer[L1_CACHE_BYTES * 2]; | ||
280 | struct xpc_vars_part *pulled_entry_cacheline = | ||
281 | (struct xpc_vars_part *) L1_CACHE_ALIGN((u64) buffer); | ||
282 | struct xpc_vars_part *pulled_entry; | ||
283 | u64 remote_entry_cacheline_pa, remote_entry_pa; | ||
284 | partid_t partid = XPC_PARTID(part); | ||
285 | enum xpc_retval ret; | ||
286 | |||
287 | |||
288 | /* pull the cacheline that contains the variables we're interested in */ | ||
289 | |||
290 | DBUG_ON(part->remote_vars_part_pa != | ||
291 | L1_CACHE_ALIGN(part->remote_vars_part_pa)); | ||
292 | DBUG_ON(sizeof(struct xpc_vars_part) != L1_CACHE_BYTES / 2); | ||
293 | |||
294 | remote_entry_pa = part->remote_vars_part_pa + | ||
295 | sn_partition_id * sizeof(struct xpc_vars_part); | ||
296 | |||
297 | remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); | ||
298 | |||
299 | pulled_entry = (struct xpc_vars_part *) ((u64) pulled_entry_cacheline + | ||
300 | (remote_entry_pa & (L1_CACHE_BYTES - 1))); | ||
301 | |||
302 | ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, | ||
303 | (void *) remote_entry_cacheline_pa, | ||
304 | L1_CACHE_BYTES); | ||
305 | if (ret != xpcSuccess) { | ||
306 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " | ||
307 | "partition %d, ret=%d\n", partid, ret); | ||
308 | return ret; | ||
309 | } | ||
310 | |||
311 | |||
312 | /* see if they've been set up yet */ | ||
313 | |||
314 | if (pulled_entry->magic != XPC_VP_MAGIC1 && | ||
315 | pulled_entry->magic != XPC_VP_MAGIC2) { | ||
316 | |||
317 | if (pulled_entry->magic != 0) { | ||
318 | dev_dbg(xpc_chan, "partition %d's XPC vars_part for " | ||
319 | "partition %d has bad magic value (=0x%lx)\n", | ||
320 | partid, sn_partition_id, pulled_entry->magic); | ||
321 | return xpcBadMagic; | ||
322 | } | ||
323 | |||
324 | /* they've not been initialized yet */ | ||
325 | return xpcRetry; | ||
326 | } | ||
327 | |||
328 | if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { | ||
329 | |||
330 | /* validate the variables */ | ||
331 | |||
332 | if (pulled_entry->GPs_pa == 0 || | ||
333 | pulled_entry->openclose_args_pa == 0 || | ||
334 | pulled_entry->IPI_amo_pa == 0) { | ||
335 | |||
336 | dev_err(xpc_chan, "partition %d's XPC vars_part for " | ||
337 | "partition %d are not valid\n", partid, | ||
338 | sn_partition_id); | ||
339 | return xpcInvalidAddress; | ||
340 | } | ||
341 | |||
342 | /* the variables we imported look to be valid */ | ||
343 | |||
344 | part->remote_GPs_pa = pulled_entry->GPs_pa; | ||
345 | part->remote_openclose_args_pa = | ||
346 | pulled_entry->openclose_args_pa; | ||
347 | part->remote_IPI_amo_va = | ||
348 | (AMO_t *) __va(pulled_entry->IPI_amo_pa); | ||
349 | part->remote_IPI_nasid = pulled_entry->IPI_nasid; | ||
350 | part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; | ||
351 | |||
352 | if (part->nchannels > pulled_entry->nchannels) { | ||
353 | part->nchannels = pulled_entry->nchannels; | ||
354 | } | ||
355 | |||
356 | /* let the other side know that we've pulled their variables */ | ||
357 | |||
358 | (volatile u64) xpc_vars_part[partid].magic = XPC_VP_MAGIC2; | ||
359 | } | ||
360 | |||
361 | if (pulled_entry->magic == XPC_VP_MAGIC1) { | ||
362 | return xpcRetry; | ||
363 | } | ||
364 | |||
365 | return xpcSuccess; | ||
366 | } | ||
367 | |||
368 | |||
369 | /* | ||
370 | * Get the IPI flags and pull the openclose args and/or remote GPs as needed. | ||
371 | */ | ||
372 | static u64 | ||
373 | xpc_get_IPI_flags(struct xpc_partition *part) | ||
374 | { | ||
375 | unsigned long irq_flags; | ||
376 | u64 IPI_amo; | ||
377 | enum xpc_retval ret; | ||
378 | |||
379 | |||
380 | /* | ||
381 | * See if there are any IPI flags to be handled. | ||
382 | */ | ||
383 | |||
384 | spin_lock_irqsave(&part->IPI_lock, irq_flags); | ||
385 | if ((IPI_amo = part->local_IPI_amo) != 0) { | ||
386 | part->local_IPI_amo = 0; | ||
387 | } | ||
388 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); | ||
389 | |||
390 | |||
391 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { | ||
392 | ret = xpc_pull_remote_cachelines(part, | ||
393 | part->remote_openclose_args, | ||
394 | (void *) part->remote_openclose_args_pa, | ||
395 | XPC_OPENCLOSE_ARGS_SIZE); | ||
396 | if (ret != xpcSuccess) { | ||
397 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
398 | |||
399 | dev_dbg(xpc_chan, "failed to pull openclose args from " | ||
400 | "partition %d, ret=%d\n", XPC_PARTID(part), | ||
401 | ret); | ||
402 | |||
403 | /* don't bother processing IPIs anymore */ | ||
404 | IPI_amo = 0; | ||
405 | } | ||
406 | } | ||
407 | |||
408 | if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { | ||
409 | ret = xpc_pull_remote_cachelines(part, part->remote_GPs, | ||
410 | (void *) part->remote_GPs_pa, | ||
411 | XPC_GP_SIZE); | ||
412 | if (ret != xpcSuccess) { | ||
413 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
414 | |||
415 | dev_dbg(xpc_chan, "failed to pull GPs from partition " | ||
416 | "%d, ret=%d\n", XPC_PARTID(part), ret); | ||
417 | |||
418 | /* don't bother processing IPIs anymore */ | ||
419 | IPI_amo = 0; | ||
420 | } | ||
421 | } | ||
422 | |||
423 | return IPI_amo; | ||
424 | } | ||
425 | |||
426 | |||
427 | /* | ||
428 | * Allocate the local message queue and the notify queue. | ||
429 | */ | ||
430 | static enum xpc_retval | ||
431 | xpc_allocate_local_msgqueue(struct xpc_channel *ch) | ||
432 | { | ||
433 | unsigned long irq_flags; | ||
434 | int nentries; | ||
435 | size_t nbytes; | ||
436 | |||
437 | |||
438 | // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between | ||
439 | // >>> iterations of the for-loop, bail if set? | ||
440 | |||
441 | // >>> should we impose a minumum #of entries? like 4 or 8? | ||
442 | for (nentries = ch->local_nentries; nentries > 0; nentries--) { | ||
443 | |||
444 | nbytes = nentries * ch->msg_size; | ||
445 | ch->local_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, | ||
446 | (GFP_KERNEL | GFP_DMA), | ||
447 | &ch->local_msgqueue_base); | ||
448 | if (ch->local_msgqueue == NULL) { | ||
449 | continue; | ||
450 | } | ||
451 | memset(ch->local_msgqueue, 0, nbytes); | ||
452 | |||
453 | nbytes = nentries * sizeof(struct xpc_notify); | ||
454 | ch->notify_queue = kmalloc(nbytes, (GFP_KERNEL | GFP_DMA)); | ||
455 | if (ch->notify_queue == NULL) { | ||
456 | kfree(ch->local_msgqueue_base); | ||
457 | ch->local_msgqueue = NULL; | ||
458 | continue; | ||
459 | } | ||
460 | memset(ch->notify_queue, 0, nbytes); | ||
461 | |||
462 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
463 | if (nentries < ch->local_nentries) { | ||
464 | dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, " | ||
465 | "partid=%d, channel=%d\n", nentries, | ||
466 | ch->local_nentries, ch->partid, ch->number); | ||
467 | |||
468 | ch->local_nentries = nentries; | ||
469 | } | ||
470 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
471 | return xpcSuccess; | ||
472 | } | ||
473 | |||
474 | dev_dbg(xpc_chan, "can't get memory for local message queue and notify " | ||
475 | "queue, partid=%d, channel=%d\n", ch->partid, ch->number); | ||
476 | return xpcNoMemory; | ||
477 | } | ||
478 | |||
479 | |||
480 | /* | ||
481 | * Allocate the cached remote message queue. | ||
482 | */ | ||
483 | static enum xpc_retval | ||
484 | xpc_allocate_remote_msgqueue(struct xpc_channel *ch) | ||
485 | { | ||
486 | unsigned long irq_flags; | ||
487 | int nentries; | ||
488 | size_t nbytes; | ||
489 | |||
490 | |||
491 | DBUG_ON(ch->remote_nentries <= 0); | ||
492 | |||
493 | // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between | ||
494 | // >>> iterations of the for-loop, bail if set? | ||
495 | |||
496 | // >>> should we impose a minumum #of entries? like 4 or 8? | ||
497 | for (nentries = ch->remote_nentries; nentries > 0; nentries--) { | ||
498 | |||
499 | nbytes = nentries * ch->msg_size; | ||
500 | ch->remote_msgqueue = xpc_kmalloc_cacheline_aligned(nbytes, | ||
501 | (GFP_KERNEL | GFP_DMA), | ||
502 | &ch->remote_msgqueue_base); | ||
503 | if (ch->remote_msgqueue == NULL) { | ||
504 | continue; | ||
505 | } | ||
506 | memset(ch->remote_msgqueue, 0, nbytes); | ||
507 | |||
508 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
509 | if (nentries < ch->remote_nentries) { | ||
510 | dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, " | ||
511 | "partid=%d, channel=%d\n", nentries, | ||
512 | ch->remote_nentries, ch->partid, ch->number); | ||
513 | |||
514 | ch->remote_nentries = nentries; | ||
515 | } | ||
516 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
517 | return xpcSuccess; | ||
518 | } | ||
519 | |||
520 | dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " | ||
521 | "partid=%d, channel=%d\n", ch->partid, ch->number); | ||
522 | return xpcNoMemory; | ||
523 | } | ||
524 | |||
525 | |||
526 | /* | ||
527 | * Allocate message queues and other stuff associated with a channel. | ||
528 | * | ||
529 | * Note: Assumes all of the channel sizes are filled in. | ||
530 | */ | ||
531 | static enum xpc_retval | ||
532 | xpc_allocate_msgqueues(struct xpc_channel *ch) | ||
533 | { | ||
534 | unsigned long irq_flags; | ||
535 | int i; | ||
536 | enum xpc_retval ret; | ||
537 | |||
538 | |||
539 | DBUG_ON(ch->flags & XPC_C_SETUP); | ||
540 | |||
541 | if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { | ||
542 | return ret; | ||
543 | } | ||
544 | |||
545 | if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) { | ||
546 | kfree(ch->local_msgqueue_base); | ||
547 | ch->local_msgqueue = NULL; | ||
548 | kfree(ch->notify_queue); | ||
549 | ch->notify_queue = NULL; | ||
550 | return ret; | ||
551 | } | ||
552 | |||
553 | for (i = 0; i < ch->local_nentries; i++) { | ||
554 | /* use a semaphore as an event wait queue */ | ||
555 | sema_init(&ch->notify_queue[i].sema, 0); | ||
556 | } | ||
557 | |||
558 | sema_init(&ch->teardown_sema, 0); /* event wait */ | ||
559 | |||
560 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
561 | ch->flags |= XPC_C_SETUP; | ||
562 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
563 | |||
564 | return xpcSuccess; | ||
565 | } | ||
566 | |||
567 | |||
568 | /* | ||
569 | * Process a connect message from a remote partition. | ||
570 | * | ||
571 | * Note: xpc_process_connect() is expecting to be called with the | ||
572 | * spin_lock_irqsave held and will leave it locked upon return. | ||
573 | */ | ||
574 | static void | ||
575 | xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | ||
576 | { | ||
577 | enum xpc_retval ret; | ||
578 | |||
579 | |||
580 | DBUG_ON(!spin_is_locked(&ch->lock)); | ||
581 | |||
582 | if (!(ch->flags & XPC_C_OPENREQUEST) || | ||
583 | !(ch->flags & XPC_C_ROPENREQUEST)) { | ||
584 | /* nothing more to do for now */ | ||
585 | return; | ||
586 | } | ||
587 | DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); | ||
588 | |||
589 | if (!(ch->flags & XPC_C_SETUP)) { | ||
590 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | ||
591 | ret = xpc_allocate_msgqueues(ch); | ||
592 | spin_lock_irqsave(&ch->lock, *irq_flags); | ||
593 | |||
594 | if (ret != xpcSuccess) { | ||
595 | XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); | ||
596 | } | ||
597 | if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) { | ||
598 | return; | ||
599 | } | ||
600 | |||
601 | DBUG_ON(!(ch->flags & XPC_C_SETUP)); | ||
602 | DBUG_ON(ch->local_msgqueue == NULL); | ||
603 | DBUG_ON(ch->remote_msgqueue == NULL); | ||
604 | } | ||
605 | |||
606 | if (!(ch->flags & XPC_C_OPENREPLY)) { | ||
607 | ch->flags |= XPC_C_OPENREPLY; | ||
608 | xpc_IPI_send_openreply(ch, irq_flags); | ||
609 | } | ||
610 | |||
611 | if (!(ch->flags & XPC_C_ROPENREPLY)) { | ||
612 | return; | ||
613 | } | ||
614 | |||
615 | DBUG_ON(ch->remote_msgqueue_pa == 0); | ||
616 | |||
617 | ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ | ||
618 | |||
619 | dev_info(xpc_chan, "channel %d to partition %d connected\n", | ||
620 | ch->number, ch->partid); | ||
621 | |||
622 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | ||
623 | xpc_create_kthreads(ch, 1); | ||
624 | spin_lock_irqsave(&ch->lock, *irq_flags); | ||
625 | } | ||
626 | |||
627 | |||
628 | /* | ||
629 | * Free up message queues and other stuff that were allocated for the specified | ||
630 | * channel. | ||
631 | * | ||
632 | * Note: ch->reason and ch->reason_line are left set for debugging purposes, | ||
633 | * they're cleared when XPC_C_DISCONNECTED is cleared. | ||
634 | */ | ||
635 | static void | ||
636 | xpc_free_msgqueues(struct xpc_channel *ch) | ||
637 | { | ||
638 | DBUG_ON(!spin_is_locked(&ch->lock)); | ||
639 | DBUG_ON(atomic_read(&ch->n_to_notify) != 0); | ||
640 | |||
641 | ch->remote_msgqueue_pa = 0; | ||
642 | ch->func = NULL; | ||
643 | ch->key = NULL; | ||
644 | ch->msg_size = 0; | ||
645 | ch->local_nentries = 0; | ||
646 | ch->remote_nentries = 0; | ||
647 | ch->kthreads_assigned_limit = 0; | ||
648 | ch->kthreads_idle_limit = 0; | ||
649 | |||
650 | ch->local_GP->get = 0; | ||
651 | ch->local_GP->put = 0; | ||
652 | ch->remote_GP.get = 0; | ||
653 | ch->remote_GP.put = 0; | ||
654 | ch->w_local_GP.get = 0; | ||
655 | ch->w_local_GP.put = 0; | ||
656 | ch->w_remote_GP.get = 0; | ||
657 | ch->w_remote_GP.put = 0; | ||
658 | ch->next_msg_to_pull = 0; | ||
659 | |||
660 | if (ch->flags & XPC_C_SETUP) { | ||
661 | ch->flags &= ~XPC_C_SETUP; | ||
662 | |||
663 | dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", | ||
664 | ch->flags, ch->partid, ch->number); | ||
665 | |||
666 | kfree(ch->local_msgqueue_base); | ||
667 | ch->local_msgqueue = NULL; | ||
668 | kfree(ch->remote_msgqueue_base); | ||
669 | ch->remote_msgqueue = NULL; | ||
670 | kfree(ch->notify_queue); | ||
671 | ch->notify_queue = NULL; | ||
672 | |||
673 | /* in case someone is waiting for the teardown to complete */ | ||
674 | up(&ch->teardown_sema); | ||
675 | } | ||
676 | } | ||
677 | |||
678 | |||
679 | /* | ||
680 | * spin_lock_irqsave() is expected to be held on entry. | ||
681 | */ | ||
682 | static void | ||
683 | xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | ||
684 | { | ||
685 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
686 | u32 ch_flags = ch->flags; | ||
687 | |||
688 | |||
689 | DBUG_ON(!spin_is_locked(&ch->lock)); | ||
690 | |||
691 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | ||
692 | return; | ||
693 | } | ||
694 | |||
695 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); | ||
696 | |||
697 | /* make sure all activity has settled down first */ | ||
698 | |||
699 | if (atomic_read(&ch->references) > 0) { | ||
700 | return; | ||
701 | } | ||
702 | DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); | ||
703 | |||
704 | /* it's now safe to free the channel's message queues */ | ||
705 | |||
706 | xpc_free_msgqueues(ch); | ||
707 | DBUG_ON(ch->flags & XPC_C_SETUP); | ||
708 | |||
709 | if (part->act_state != XPC_P_DEACTIVATING) { | ||
710 | |||
711 | /* as long as the other side is up do the full protocol */ | ||
712 | |||
713 | if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { | ||
714 | return; | ||
715 | } | ||
716 | |||
717 | if (!(ch->flags & XPC_C_CLOSEREPLY)) { | ||
718 | ch->flags |= XPC_C_CLOSEREPLY; | ||
719 | xpc_IPI_send_closereply(ch, irq_flags); | ||
720 | } | ||
721 | |||
722 | if (!(ch->flags & XPC_C_RCLOSEREPLY)) { | ||
723 | return; | ||
724 | } | ||
725 | } | ||
726 | |||
727 | /* both sides are disconnected now */ | ||
728 | |||
729 | ch->flags = XPC_C_DISCONNECTED; /* clear all flags, but this one */ | ||
730 | |||
731 | atomic_dec(&part->nchannels_active); | ||
732 | |||
733 | if (ch_flags & XPC_C_WASCONNECTED) { | ||
734 | dev_info(xpc_chan, "channel %d to partition %d disconnected, " | ||
735 | "reason=%d\n", ch->number, ch->partid, ch->reason); | ||
736 | } | ||
737 | } | ||
738 | |||
739 | |||
740 | /* | ||
741 | * Process a change in the channel's remote connection state. | ||
742 | */ | ||
743 | static void | ||
744 | xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | ||
745 | u8 IPI_flags) | ||
746 | { | ||
747 | unsigned long irq_flags; | ||
748 | struct xpc_openclose_args *args = | ||
749 | &part->remote_openclose_args[ch_number]; | ||
750 | struct xpc_channel *ch = &part->channels[ch_number]; | ||
751 | enum xpc_retval reason; | ||
752 | |||
753 | |||
754 | |||
755 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
756 | |||
757 | |||
758 | if (IPI_flags & XPC_IPI_CLOSEREQUEST) { | ||
759 | |||
760 | dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " | ||
761 | "from partid=%d, channel=%d\n", args->reason, | ||
762 | ch->partid, ch->number); | ||
763 | |||
764 | /* | ||
765 | * If RCLOSEREQUEST is set, we're probably waiting for | ||
766 | * RCLOSEREPLY. We should find it and a ROPENREQUEST packed | ||
767 | * with this RCLOSEQREUQEST in the IPI_flags. | ||
768 | */ | ||
769 | |||
770 | if (ch->flags & XPC_C_RCLOSEREQUEST) { | ||
771 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); | ||
772 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); | ||
773 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); | ||
774 | DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); | ||
775 | |||
776 | DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY)); | ||
777 | IPI_flags &= ~XPC_IPI_CLOSEREPLY; | ||
778 | ch->flags |= XPC_C_RCLOSEREPLY; | ||
779 | |||
780 | /* both sides have finished disconnecting */ | ||
781 | xpc_process_disconnect(ch, &irq_flags); | ||
782 | } | ||
783 | |||
784 | if (ch->flags & XPC_C_DISCONNECTED) { | ||
785 | // >>> explain this section | ||
786 | |||
787 | if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { | ||
788 | DBUG_ON(part->act_state != | ||
789 | XPC_P_DEACTIVATING); | ||
790 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
791 | return; | ||
792 | } | ||
793 | |||
794 | XPC_SET_REASON(ch, 0, 0); | ||
795 | ch->flags &= ~XPC_C_DISCONNECTED; | ||
796 | |||
797 | atomic_inc(&part->nchannels_active); | ||
798 | ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); | ||
799 | } | ||
800 | |||
801 | IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY); | ||
802 | |||
803 | /* | ||
804 | * The meaningful CLOSEREQUEST connection state fields are: | ||
805 | * reason = reason connection is to be closed | ||
806 | */ | ||
807 | |||
808 | ch->flags |= XPC_C_RCLOSEREQUEST; | ||
809 | |||
810 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | ||
811 | reason = args->reason; | ||
812 | if (reason <= xpcSuccess || reason > xpcUnknownReason) { | ||
813 | reason = xpcUnknownReason; | ||
814 | } else if (reason == xpcUnregistering) { | ||
815 | reason = xpcOtherUnregistering; | ||
816 | } | ||
817 | |||
818 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); | ||
819 | } else { | ||
820 | xpc_process_disconnect(ch, &irq_flags); | ||
821 | } | ||
822 | } | ||
823 | |||
824 | |||
825 | if (IPI_flags & XPC_IPI_CLOSEREPLY) { | ||
826 | |||
827 | dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," | ||
828 | " channel=%d\n", ch->partid, ch->number); | ||
829 | |||
830 | if (ch->flags & XPC_C_DISCONNECTED) { | ||
831 | DBUG_ON(part->act_state != XPC_P_DEACTIVATING); | ||
832 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
833 | return; | ||
834 | } | ||
835 | |||
836 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); | ||
837 | DBUG_ON(!(ch->flags & XPC_C_RCLOSEREQUEST)); | ||
838 | |||
839 | ch->flags |= XPC_C_RCLOSEREPLY; | ||
840 | |||
841 | if (ch->flags & XPC_C_CLOSEREPLY) { | ||
842 | /* both sides have finished disconnecting */ | ||
843 | xpc_process_disconnect(ch, &irq_flags); | ||
844 | } | ||
845 | } | ||
846 | |||
847 | |||
848 | if (IPI_flags & XPC_IPI_OPENREQUEST) { | ||
849 | |||
850 | dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " | ||
851 | "local_nentries=%d) received from partid=%d, " | ||
852 | "channel=%d\n", args->msg_size, args->local_nentries, | ||
853 | ch->partid, ch->number); | ||
854 | |||
855 | if ((ch->flags & XPC_C_DISCONNECTING) || | ||
856 | part->act_state == XPC_P_DEACTIVATING) { | ||
857 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
858 | return; | ||
859 | } | ||
860 | DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | | ||
861 | XPC_C_OPENREQUEST))); | ||
862 | DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | | ||
863 | XPC_C_OPENREPLY | XPC_C_CONNECTED)); | ||
864 | |||
865 | /* | ||
866 | * The meaningful OPENREQUEST connection state fields are: | ||
867 | * msg_size = size of channel's messages in bytes | ||
868 | * local_nentries = remote partition's local_nentries | ||
869 | */ | ||
870 | DBUG_ON(args->msg_size == 0); | ||
871 | DBUG_ON(args->local_nentries == 0); | ||
872 | |||
873 | ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); | ||
874 | ch->remote_nentries = args->local_nentries; | ||
875 | |||
876 | |||
877 | if (ch->flags & XPC_C_OPENREQUEST) { | ||
878 | if (args->msg_size != ch->msg_size) { | ||
879 | XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, | ||
880 | &irq_flags); | ||
881 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
882 | return; | ||
883 | } | ||
884 | } else { | ||
885 | ch->msg_size = args->msg_size; | ||
886 | |||
887 | XPC_SET_REASON(ch, 0, 0); | ||
888 | ch->flags &= ~XPC_C_DISCONNECTED; | ||
889 | |||
890 | atomic_inc(&part->nchannels_active); | ||
891 | } | ||
892 | |||
893 | xpc_process_connect(ch, &irq_flags); | ||
894 | } | ||
895 | |||
896 | |||
897 | if (IPI_flags & XPC_IPI_OPENREPLY) { | ||
898 | |||
899 | dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " | ||
900 | "local_nentries=%d, remote_nentries=%d) received from " | ||
901 | "partid=%d, channel=%d\n", args->local_msgqueue_pa, | ||
902 | args->local_nentries, args->remote_nentries, | ||
903 | ch->partid, ch->number); | ||
904 | |||
905 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { | ||
906 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
907 | return; | ||
908 | } | ||
909 | DBUG_ON(!(ch->flags & XPC_C_OPENREQUEST)); | ||
910 | DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); | ||
911 | DBUG_ON(ch->flags & XPC_C_CONNECTED); | ||
912 | |||
913 | /* | ||
914 | * The meaningful OPENREPLY connection state fields are: | ||
915 | * local_msgqueue_pa = physical address of remote | ||
916 | * partition's local_msgqueue | ||
917 | * local_nentries = remote partition's local_nentries | ||
918 | * remote_nentries = remote partition's remote_nentries | ||
919 | */ | ||
920 | DBUG_ON(args->local_msgqueue_pa == 0); | ||
921 | DBUG_ON(args->local_nentries == 0); | ||
922 | DBUG_ON(args->remote_nentries == 0); | ||
923 | |||
924 | ch->flags |= XPC_C_ROPENREPLY; | ||
925 | ch->remote_msgqueue_pa = args->local_msgqueue_pa; | ||
926 | |||
927 | if (args->local_nentries < ch->remote_nentries) { | ||
928 | dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " | ||
929 | "remote_nentries=%d, old remote_nentries=%d, " | ||
930 | "partid=%d, channel=%d\n", | ||
931 | args->local_nentries, ch->remote_nentries, | ||
932 | ch->partid, ch->number); | ||
933 | |||
934 | ch->remote_nentries = args->local_nentries; | ||
935 | } | ||
936 | if (args->remote_nentries < ch->local_nentries) { | ||
937 | dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " | ||
938 | "local_nentries=%d, old local_nentries=%d, " | ||
939 | "partid=%d, channel=%d\n", | ||
940 | args->remote_nentries, ch->local_nentries, | ||
941 | ch->partid, ch->number); | ||
942 | |||
943 | ch->local_nentries = args->remote_nentries; | ||
944 | } | ||
945 | |||
946 | xpc_process_connect(ch, &irq_flags); | ||
947 | } | ||
948 | |||
949 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
950 | } | ||
951 | |||
952 | |||
953 | /* | ||
954 | * Attempt to establish a channel connection to a remote partition. | ||
955 | */ | ||
956 | static enum xpc_retval | ||
957 | xpc_connect_channel(struct xpc_channel *ch) | ||
958 | { | ||
959 | unsigned long irq_flags; | ||
960 | struct xpc_registration *registration = &xpc_registrations[ch->number]; | ||
961 | |||
962 | |||
963 | if (down_interruptible(®istration->sema) != 0) { | ||
964 | return xpcInterrupted; | ||
965 | } | ||
966 | |||
967 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { | ||
968 | up(®istration->sema); | ||
969 | return xpcUnregistered; | ||
970 | } | ||
971 | |||
972 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
973 | |||
974 | DBUG_ON(ch->flags & XPC_C_CONNECTED); | ||
975 | DBUG_ON(ch->flags & XPC_C_OPENREQUEST); | ||
976 | |||
977 | if (ch->flags & XPC_C_DISCONNECTING) { | ||
978 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
979 | up(®istration->sema); | ||
980 | return ch->reason; | ||
981 | } | ||
982 | |||
983 | |||
984 | /* add info from the channel connect registration to the channel */ | ||
985 | |||
986 | ch->kthreads_assigned_limit = registration->assigned_limit; | ||
987 | ch->kthreads_idle_limit = registration->idle_limit; | ||
988 | DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); | ||
989 | DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); | ||
990 | DBUG_ON(atomic_read(&ch->kthreads_active) != 0); | ||
991 | |||
992 | ch->func = registration->func; | ||
993 | DBUG_ON(registration->func == NULL); | ||
994 | ch->key = registration->key; | ||
995 | |||
996 | ch->local_nentries = registration->nentries; | ||
997 | |||
998 | if (ch->flags & XPC_C_ROPENREQUEST) { | ||
999 | if (registration->msg_size != ch->msg_size) { | ||
1000 | /* the local and remote sides aren't the same */ | ||
1001 | |||
1002 | /* | ||
1003 | * Because XPC_DISCONNECT_CHANNEL() can block we're | ||
1004 | * forced to up the registration sema before we unlock | ||
1005 | * the channel lock. But that's okay here because we're | ||
1006 | * done with the part that required the registration | ||
1007 | * sema. XPC_DISCONNECT_CHANNEL() requires that the | ||
1008 | * channel lock be locked and will unlock and relock | ||
1009 | * the channel lock as needed. | ||
1010 | */ | ||
1011 | up(®istration->sema); | ||
1012 | XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, | ||
1013 | &irq_flags); | ||
1014 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
1015 | return xpcUnequalMsgSizes; | ||
1016 | } | ||
1017 | } else { | ||
1018 | ch->msg_size = registration->msg_size; | ||
1019 | |||
1020 | XPC_SET_REASON(ch, 0, 0); | ||
1021 | ch->flags &= ~XPC_C_DISCONNECTED; | ||
1022 | |||
1023 | atomic_inc(&xpc_partitions[ch->partid].nchannels_active); | ||
1024 | } | ||
1025 | |||
1026 | up(®istration->sema); | ||
1027 | |||
1028 | |||
1029 | /* initiate the connection */ | ||
1030 | |||
1031 | ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); | ||
1032 | xpc_IPI_send_openrequest(ch, &irq_flags); | ||
1033 | |||
1034 | xpc_process_connect(ch, &irq_flags); | ||
1035 | |||
1036 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
1037 | |||
1038 | return xpcSuccess; | ||
1039 | } | ||
1040 | |||
1041 | |||
1042 | /* | ||
1043 | * Notify those who wanted to be notified upon delivery of their message. | ||
1044 | */ | ||
1045 | static void | ||
1046 | xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) | ||
1047 | { | ||
1048 | struct xpc_notify *notify; | ||
1049 | u8 notify_type; | ||
1050 | s64 get = ch->w_remote_GP.get - 1; | ||
1051 | |||
1052 | |||
1053 | while (++get < put && atomic_read(&ch->n_to_notify) > 0) { | ||
1054 | |||
1055 | notify = &ch->notify_queue[get % ch->local_nentries]; | ||
1056 | |||
1057 | /* | ||
1058 | * See if the notify entry indicates it was associated with | ||
1059 | * a message who's sender wants to be notified. It is possible | ||
1060 | * that it is, but someone else is doing or has done the | ||
1061 | * notification. | ||
1062 | */ | ||
1063 | notify_type = notify->type; | ||
1064 | if (notify_type == 0 || | ||
1065 | cmpxchg(¬ify->type, notify_type, 0) != | ||
1066 | notify_type) { | ||
1067 | continue; | ||
1068 | } | ||
1069 | |||
1070 | DBUG_ON(notify_type != XPC_N_CALL); | ||
1071 | |||
1072 | atomic_dec(&ch->n_to_notify); | ||
1073 | |||
1074 | if (notify->func != NULL) { | ||
1075 | dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " | ||
1076 | "msg_number=%ld, partid=%d, channel=%d\n", | ||
1077 | (void *) notify, get, ch->partid, ch->number); | ||
1078 | |||
1079 | notify->func(reason, ch->partid, ch->number, | ||
1080 | notify->key); | ||
1081 | |||
1082 | dev_dbg(xpc_chan, "notify->func() returned, " | ||
1083 | "notify=0x%p, msg_number=%ld, partid=%d, " | ||
1084 | "channel=%d\n", (void *) notify, get, | ||
1085 | ch->partid, ch->number); | ||
1086 | } | ||
1087 | } | ||
1088 | } | ||
1089 | |||
1090 | |||
1091 | /* | ||
1092 | * Clear some of the msg flags in the local message queue. | ||
1093 | */ | ||
1094 | static inline void | ||
1095 | xpc_clear_local_msgqueue_flags(struct xpc_channel *ch) | ||
1096 | { | ||
1097 | struct xpc_msg *msg; | ||
1098 | s64 get; | ||
1099 | |||
1100 | |||
1101 | get = ch->w_remote_GP.get; | ||
1102 | do { | ||
1103 | msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + | ||
1104 | (get % ch->local_nentries) * ch->msg_size); | ||
1105 | msg->flags = 0; | ||
1106 | } while (++get < (volatile s64) ch->remote_GP.get); | ||
1107 | } | ||
1108 | |||
1109 | |||
1110 | /* | ||
1111 | * Clear some of the msg flags in the remote message queue. | ||
1112 | */ | ||
1113 | static inline void | ||
1114 | xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch) | ||
1115 | { | ||
1116 | struct xpc_msg *msg; | ||
1117 | s64 put; | ||
1118 | |||
1119 | |||
1120 | put = ch->w_remote_GP.put; | ||
1121 | do { | ||
1122 | msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + | ||
1123 | (put % ch->remote_nentries) * ch->msg_size); | ||
1124 | msg->flags = 0; | ||
1125 | } while (++put < (volatile s64) ch->remote_GP.put); | ||
1126 | } | ||
1127 | |||
1128 | |||
1129 | static void | ||
1130 | xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) | ||
1131 | { | ||
1132 | struct xpc_channel *ch = &part->channels[ch_number]; | ||
1133 | int nmsgs_sent; | ||
1134 | |||
1135 | |||
1136 | ch->remote_GP = part->remote_GPs[ch_number]; | ||
1137 | |||
1138 | |||
1139 | /* See what, if anything, has changed for each connected channel */ | ||
1140 | |||
1141 | xpc_msgqueue_ref(ch); | ||
1142 | |||
1143 | if (ch->w_remote_GP.get == ch->remote_GP.get && | ||
1144 | ch->w_remote_GP.put == ch->remote_GP.put) { | ||
1145 | /* nothing changed since GPs were last pulled */ | ||
1146 | xpc_msgqueue_deref(ch); | ||
1147 | return; | ||
1148 | } | ||
1149 | |||
1150 | if (!(ch->flags & XPC_C_CONNECTED)){ | ||
1151 | xpc_msgqueue_deref(ch); | ||
1152 | return; | ||
1153 | } | ||
1154 | |||
1155 | |||
1156 | /* | ||
1157 | * First check to see if messages recently sent by us have been | ||
1158 | * received by the other side. (The remote GET value will have | ||
1159 | * changed since we last looked at it.) | ||
1160 | */ | ||
1161 | |||
1162 | if (ch->w_remote_GP.get != ch->remote_GP.get) { | ||
1163 | |||
1164 | /* | ||
1165 | * We need to notify any senders that want to be notified | ||
1166 | * that their sent messages have been received by their | ||
1167 | * intended recipients. We need to do this before updating | ||
1168 | * w_remote_GP.get so that we don't allocate the same message | ||
1169 | * queue entries prematurely (see xpc_allocate_msg()). | ||
1170 | */ | ||
1171 | if (atomic_read(&ch->n_to_notify) > 0) { | ||
1172 | /* | ||
1173 | * Notify senders that messages sent have been | ||
1174 | * received and delivered by the other side. | ||
1175 | */ | ||
1176 | xpc_notify_senders(ch, xpcMsgDelivered, | ||
1177 | ch->remote_GP.get); | ||
1178 | } | ||
1179 | |||
1180 | /* | ||
1181 | * Clear msg->flags in previously sent messages, so that | ||
1182 | * they're ready for xpc_allocate_msg(). | ||
1183 | */ | ||
1184 | xpc_clear_local_msgqueue_flags(ch); | ||
1185 | |||
1186 | (volatile s64) ch->w_remote_GP.get = ch->remote_GP.get; | ||
1187 | |||
1188 | dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, " | ||
1189 | "channel=%d\n", ch->w_remote_GP.get, ch->partid, | ||
1190 | ch->number); | ||
1191 | |||
1192 | /* | ||
1193 | * If anyone was waiting for message queue entries to become | ||
1194 | * available, wake them up. | ||
1195 | */ | ||
1196 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { | ||
1197 | wake_up(&ch->msg_allocate_wq); | ||
1198 | } | ||
1199 | } | ||
1200 | |||
1201 | |||
1202 | /* | ||
1203 | * Now check for newly sent messages by the other side. (The remote | ||
1204 | * PUT value will have changed since we last looked at it.) | ||
1205 | */ | ||
1206 | |||
1207 | if (ch->w_remote_GP.put != ch->remote_GP.put) { | ||
1208 | /* | ||
1209 | * Clear msg->flags in previously received messages, so that | ||
1210 | * they're ready for xpc_get_deliverable_msg(). | ||
1211 | */ | ||
1212 | xpc_clear_remote_msgqueue_flags(ch); | ||
1213 | |||
1214 | (volatile s64) ch->w_remote_GP.put = ch->remote_GP.put; | ||
1215 | |||
1216 | dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, " | ||
1217 | "channel=%d\n", ch->w_remote_GP.put, ch->partid, | ||
1218 | ch->number); | ||
1219 | |||
1220 | nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get; | ||
1221 | if (nmsgs_sent > 0) { | ||
1222 | dev_dbg(xpc_chan, "msgs waiting to be copied and " | ||
1223 | "delivered=%d, partid=%d, channel=%d\n", | ||
1224 | nmsgs_sent, ch->partid, ch->number); | ||
1225 | |||
1226 | if (ch->flags & XPC_C_CONNECTCALLOUT) { | ||
1227 | xpc_activate_kthreads(ch, nmsgs_sent); | ||
1228 | } | ||
1229 | } | ||
1230 | } | ||
1231 | |||
1232 | xpc_msgqueue_deref(ch); | ||
1233 | } | ||
1234 | |||
1235 | |||
1236 | void | ||
1237 | xpc_process_channel_activity(struct xpc_partition *part) | ||
1238 | { | ||
1239 | unsigned long irq_flags; | ||
1240 | u64 IPI_amo, IPI_flags; | ||
1241 | struct xpc_channel *ch; | ||
1242 | int ch_number; | ||
1243 | |||
1244 | |||
1245 | IPI_amo = xpc_get_IPI_flags(part); | ||
1246 | |||
1247 | /* | ||
1248 | * Initiate channel connections for registered channels. | ||
1249 | * | ||
1250 | * For each connected channel that has pending messages activate idle | ||
1251 | * kthreads and/or create new kthreads as needed. | ||
1252 | */ | ||
1253 | |||
1254 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | ||
1255 | ch = &part->channels[ch_number]; | ||
1256 | |||
1257 | |||
1258 | /* | ||
1259 | * Process any open or close related IPI flags, and then deal | ||
1260 | * with connecting or disconnecting the channel as required. | ||
1261 | */ | ||
1262 | |||
1263 | IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); | ||
1264 | |||
1265 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) { | ||
1266 | xpc_process_openclose_IPI(part, ch_number, IPI_flags); | ||
1267 | } | ||
1268 | |||
1269 | |||
1270 | if (ch->flags & XPC_C_DISCONNECTING) { | ||
1271 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
1272 | xpc_process_disconnect(ch, &irq_flags); | ||
1273 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
1274 | continue; | ||
1275 | } | ||
1276 | |||
1277 | if (part->act_state == XPC_P_DEACTIVATING) { | ||
1278 | continue; | ||
1279 | } | ||
1280 | |||
1281 | if (!(ch->flags & XPC_C_CONNECTED)) { | ||
1282 | if (!(ch->flags & XPC_C_OPENREQUEST)) { | ||
1283 | DBUG_ON(ch->flags & XPC_C_SETUP); | ||
1284 | (void) xpc_connect_channel(ch); | ||
1285 | } else { | ||
1286 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
1287 | xpc_process_connect(ch, &irq_flags); | ||
1288 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
1289 | } | ||
1290 | continue; | ||
1291 | } | ||
1292 | |||
1293 | |||
1294 | /* | ||
1295 | * Process any message related IPI flags, this may involve the | ||
1296 | * activation of kthreads to deliver any pending messages sent | ||
1297 | * from the other partition. | ||
1298 | */ | ||
1299 | |||
1300 | if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) { | ||
1301 | xpc_process_msg_IPI(part, ch_number); | ||
1302 | } | ||
1303 | } | ||
1304 | } | ||
1305 | |||
1306 | |||
1307 | /* | ||
1308 | * XPC's heartbeat code calls this function to inform XPC that a partition has | ||
1309 | * gone down. XPC responds by tearing down the XPartition Communication | ||
1310 | * infrastructure used for the just downed partition. | ||
1311 | * | ||
1312 | * XPC's heartbeat code will never call this function and xpc_partition_up() | ||
1313 | * at the same time. Nor will it ever make multiple calls to either function | ||
1314 | * at the same time. | ||
1315 | */ | ||
1316 | void | ||
1317 | xpc_partition_down(struct xpc_partition *part, enum xpc_retval reason) | ||
1318 | { | ||
1319 | unsigned long irq_flags; | ||
1320 | int ch_number; | ||
1321 | struct xpc_channel *ch; | ||
1322 | |||
1323 | |||
1324 | dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", | ||
1325 | XPC_PARTID(part), reason); | ||
1326 | |||
1327 | if (!xpc_part_ref(part)) { | ||
1328 | /* infrastructure for this partition isn't currently set up */ | ||
1329 | return; | ||
1330 | } | ||
1331 | |||
1332 | |||
1333 | /* disconnect all channels associated with the downed partition */ | ||
1334 | |||
1335 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | ||
1336 | ch = &part->channels[ch_number]; | ||
1337 | |||
1338 | |||
1339 | xpc_msgqueue_ref(ch); | ||
1340 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
1341 | |||
1342 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); | ||
1343 | |||
1344 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
1345 | xpc_msgqueue_deref(ch); | ||
1346 | } | ||
1347 | |||
1348 | xpc_wakeup_channel_mgr(part); | ||
1349 | |||
1350 | xpc_part_deref(part); | ||
1351 | } | ||
1352 | |||
1353 | |||
1354 | /* | ||
1355 | * Teardown the infrastructure necessary to support XPartition Communication | ||
1356 | * between the specified remote partition and the local one. | ||
1357 | */ | ||
1358 | void | ||
1359 | xpc_teardown_infrastructure(struct xpc_partition *part) | ||
1360 | { | ||
1361 | partid_t partid = XPC_PARTID(part); | ||
1362 | |||
1363 | |||
1364 | /* | ||
1365 | * We start off by making this partition inaccessible to local | ||
1366 | * processes by marking it as no longer setup. Then we make it | ||
1367 | * inaccessible to remote processes by clearing the XPC per partition | ||
1368 | * specific variable's magic # (which indicates that these variables | ||
1369 | * are no longer valid) and by ignoring all XPC notify IPIs sent to | ||
1370 | * this partition. | ||
1371 | */ | ||
1372 | |||
1373 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); | ||
1374 | DBUG_ON(part->setup_state != XPC_P_SETUP); | ||
1375 | part->setup_state = XPC_P_WTEARDOWN; | ||
1376 | |||
1377 | xpc_vars_part[partid].magic = 0; | ||
1378 | |||
1379 | |||
1380 | free_irq(SGI_XPC_NOTIFY, (void *) (u64) partid); | ||
1381 | |||
1382 | |||
1383 | /* | ||
1384 | * Before proceding with the teardown we have to wait until all | ||
1385 | * existing references cease. | ||
1386 | */ | ||
1387 | wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); | ||
1388 | |||
1389 | |||
1390 | /* now we can begin tearing down the infrastructure */ | ||
1391 | |||
1392 | part->setup_state = XPC_P_TORNDOWN; | ||
1393 | |||
1394 | /* in case we've still got outstanding timers registered... */ | ||
1395 | del_timer_sync(&part->dropped_IPI_timer); | ||
1396 | |||
1397 | kfree(part->remote_openclose_args_base); | ||
1398 | part->remote_openclose_args = NULL; | ||
1399 | kfree(part->local_openclose_args_base); | ||
1400 | part->local_openclose_args = NULL; | ||
1401 | kfree(part->remote_GPs_base); | ||
1402 | part->remote_GPs = NULL; | ||
1403 | kfree(part->local_GPs_base); | ||
1404 | part->local_GPs = NULL; | ||
1405 | kfree(part->channels); | ||
1406 | part->channels = NULL; | ||
1407 | part->local_IPI_amo_va = NULL; | ||
1408 | } | ||
1409 | |||
1410 | |||
1411 | /* | ||
1412 | * Called by XP at the time of channel connection registration to cause | ||
1413 | * XPC to establish connections to all currently active partitions. | ||
1414 | */ | ||
1415 | void | ||
1416 | xpc_initiate_connect(int ch_number) | ||
1417 | { | ||
1418 | partid_t partid; | ||
1419 | struct xpc_partition *part; | ||
1420 | struct xpc_channel *ch; | ||
1421 | |||
1422 | |||
1423 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); | ||
1424 | |||
1425 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | ||
1426 | part = &xpc_partitions[partid]; | ||
1427 | |||
1428 | if (xpc_part_ref(part)) { | ||
1429 | ch = &part->channels[ch_number]; | ||
1430 | |||
1431 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | ||
1432 | DBUG_ON(ch->flags & XPC_C_OPENREQUEST); | ||
1433 | DBUG_ON(ch->flags & XPC_C_CONNECTED); | ||
1434 | DBUG_ON(ch->flags & XPC_C_SETUP); | ||
1435 | |||
1436 | /* | ||
1437 | * Initiate the establishment of a connection | ||
1438 | * on the newly registered channel to the | ||
1439 | * remote partition. | ||
1440 | */ | ||
1441 | xpc_wakeup_channel_mgr(part); | ||
1442 | } | ||
1443 | |||
1444 | xpc_part_deref(part); | ||
1445 | } | ||
1446 | } | ||
1447 | } | ||
1448 | |||
1449 | |||
1450 | void | ||
1451 | xpc_connected_callout(struct xpc_channel *ch) | ||
1452 | { | ||
1453 | unsigned long irq_flags; | ||
1454 | |||
1455 | |||
1456 | /* let the registerer know that a connection has been established */ | ||
1457 | |||
1458 | if (ch->func != NULL) { | ||
1459 | dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, " | ||
1460 | "partid=%d, channel=%d\n", ch->partid, ch->number); | ||
1461 | |||
1462 | ch->func(xpcConnected, ch->partid, ch->number, | ||
1463 | (void *) (u64) ch->local_nentries, ch->key); | ||
1464 | |||
1465 | dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " | ||
1466 | "partid=%d, channel=%d\n", ch->partid, ch->number); | ||
1467 | } | ||
1468 | |||
1469 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
1470 | ch->flags |= XPC_C_CONNECTCALLOUT; | ||
1471 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
1472 | } | ||
1473 | |||
1474 | |||
1475 | /* | ||
1476 | * Called by XP at the time of channel connection unregistration to cause | ||
1477 | * XPC to teardown all current connections for the specified channel. | ||
1478 | * | ||
1479 | * Before returning xpc_initiate_disconnect() will wait until all connections | ||
1480 | * on the specified channel have been closed/torndown. So the caller can be | ||
1481 | * assured that they will not be receiving any more callouts from XPC to the | ||
1482 | * function they registered via xpc_connect(). | ||
1483 | * | ||
1484 | * Arguments: | ||
1485 | * | ||
1486 | * ch_number - channel # to unregister. | ||
1487 | */ | ||
1488 | void | ||
1489 | xpc_initiate_disconnect(int ch_number) | ||
1490 | { | ||
1491 | unsigned long irq_flags; | ||
1492 | partid_t partid; | ||
1493 | struct xpc_partition *part; | ||
1494 | struct xpc_channel *ch; | ||
1495 | |||
1496 | |||
1497 | DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); | ||
1498 | |||
1499 | /* initiate the channel disconnect for every active partition */ | ||
1500 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | ||
1501 | part = &xpc_partitions[partid]; | ||
1502 | |||
1503 | if (xpc_part_ref(part)) { | ||
1504 | ch = &part->channels[ch_number]; | ||
1505 | xpc_msgqueue_ref(ch); | ||
1506 | |||
1507 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
1508 | |||
1509 | XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, | ||
1510 | &irq_flags); | ||
1511 | |||
1512 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
1513 | |||
1514 | xpc_msgqueue_deref(ch); | ||
1515 | xpc_part_deref(part); | ||
1516 | } | ||
1517 | } | ||
1518 | |||
1519 | xpc_disconnect_wait(ch_number); | ||
1520 | } | ||
1521 | |||
1522 | |||
1523 | /* | ||
1524 | * To disconnect a channel, and reflect it back to all who may be waiting. | ||
1525 | * | ||
1526 | * >>> An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by | ||
1527 | * >>> xpc_free_msgqueues(). | ||
1528 | * | ||
1529 | * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. | ||
1530 | */ | ||
1531 | void | ||
1532 | xpc_disconnect_channel(const int line, struct xpc_channel *ch, | ||
1533 | enum xpc_retval reason, unsigned long *irq_flags) | ||
1534 | { | ||
1535 | u32 flags; | ||
1536 | |||
1537 | |||
1538 | DBUG_ON(!spin_is_locked(&ch->lock)); | ||
1539 | |||
1540 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { | ||
1541 | return; | ||
1542 | } | ||
1543 | DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); | ||
1544 | |||
1545 | dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", | ||
1546 | reason, line, ch->partid, ch->number); | ||
1547 | |||
1548 | XPC_SET_REASON(ch, reason, line); | ||
1549 | |||
1550 | flags = ch->flags; | ||
1551 | /* some of these may not have been set */ | ||
1552 | ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | | ||
1553 | XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | | ||
1554 | XPC_C_CONNECTING | XPC_C_CONNECTED); | ||
1555 | |||
1556 | ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); | ||
1557 | xpc_IPI_send_closerequest(ch, irq_flags); | ||
1558 | |||
1559 | if (flags & XPC_C_CONNECTED) { | ||
1560 | ch->flags |= XPC_C_WASCONNECTED; | ||
1561 | } | ||
1562 | |||
1563 | if (atomic_read(&ch->kthreads_idle) > 0) { | ||
1564 | /* wake all idle kthreads so they can exit */ | ||
1565 | wake_up_all(&ch->idle_wq); | ||
1566 | } | ||
1567 | |||
1568 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | ||
1569 | |||
1570 | |||
1571 | /* wake those waiting to allocate an entry from the local msg queue */ | ||
1572 | |||
1573 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { | ||
1574 | wake_up(&ch->msg_allocate_wq); | ||
1575 | } | ||
1576 | |||
1577 | /* wake those waiting for notify completion */ | ||
1578 | |||
1579 | if (atomic_read(&ch->n_to_notify) > 0) { | ||
1580 | xpc_notify_senders(ch, reason, ch->w_local_GP.put); | ||
1581 | } | ||
1582 | |||
1583 | spin_lock_irqsave(&ch->lock, *irq_flags); | ||
1584 | } | ||
1585 | |||
1586 | |||
1587 | void | ||
1588 | xpc_disconnected_callout(struct xpc_channel *ch) | ||
1589 | { | ||
1590 | /* | ||
1591 | * Let the channel's registerer know that the channel is now | ||
1592 | * disconnected. We don't want to do this if the registerer was never | ||
1593 | * informed of a connection being made, unless the disconnect was for | ||
1594 | * abnormal reasons. | ||
1595 | */ | ||
1596 | |||
1597 | if (ch->func != NULL) { | ||
1598 | dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " | ||
1599 | "channel=%d\n", ch->reason, ch->partid, ch->number); | ||
1600 | |||
1601 | ch->func(ch->reason, ch->partid, ch->number, NULL, ch->key); | ||
1602 | |||
1603 | dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " | ||
1604 | "channel=%d\n", ch->reason, ch->partid, ch->number); | ||
1605 | } | ||
1606 | } | ||
1607 | |||
1608 | |||
1609 | /* | ||
1610 | * Wait for a message entry to become available for the specified channel, | ||
1611 | * but don't wait any longer than 1 jiffy. | ||
1612 | */ | ||
1613 | static enum xpc_retval | ||
1614 | xpc_allocate_msg_wait(struct xpc_channel *ch) | ||
1615 | { | ||
1616 | enum xpc_retval ret; | ||
1617 | |||
1618 | |||
1619 | if (ch->flags & XPC_C_DISCONNECTING) { | ||
1620 | DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? | ||
1621 | return ch->reason; | ||
1622 | } | ||
1623 | |||
1624 | atomic_inc(&ch->n_on_msg_allocate_wq); | ||
1625 | ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1); | ||
1626 | atomic_dec(&ch->n_on_msg_allocate_wq); | ||
1627 | |||
1628 | if (ch->flags & XPC_C_DISCONNECTING) { | ||
1629 | ret = ch->reason; | ||
1630 | DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? | ||
1631 | } else if (ret == 0) { | ||
1632 | ret = xpcTimeout; | ||
1633 | } else { | ||
1634 | ret = xpcInterrupted; | ||
1635 | } | ||
1636 | |||
1637 | return ret; | ||
1638 | } | ||
1639 | |||
1640 | |||
1641 | /* | ||
1642 | * Allocate an entry for a message from the message queue associated with the | ||
1643 | * specified channel. | ||
1644 | */ | ||
1645 | static enum xpc_retval | ||
1646 | xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | ||
1647 | struct xpc_msg **address_of_msg) | ||
1648 | { | ||
1649 | struct xpc_msg *msg; | ||
1650 | enum xpc_retval ret; | ||
1651 | s64 put; | ||
1652 | |||
1653 | |||
1654 | /* this reference will be dropped in xpc_send_msg() */ | ||
1655 | xpc_msgqueue_ref(ch); | ||
1656 | |||
1657 | if (ch->flags & XPC_C_DISCONNECTING) { | ||
1658 | xpc_msgqueue_deref(ch); | ||
1659 | return ch->reason; | ||
1660 | } | ||
1661 | if (!(ch->flags & XPC_C_CONNECTED)) { | ||
1662 | xpc_msgqueue_deref(ch); | ||
1663 | return xpcNotConnected; | ||
1664 | } | ||
1665 | |||
1666 | |||
1667 | /* | ||
1668 | * Get the next available message entry from the local message queue. | ||
1669 | * If none are available, we'll make sure that we grab the latest | ||
1670 | * GP values. | ||
1671 | */ | ||
1672 | ret = xpcTimeout; | ||
1673 | |||
1674 | while (1) { | ||
1675 | |||
1676 | put = (volatile s64) ch->w_local_GP.put; | ||
1677 | if (put - (volatile s64) ch->w_remote_GP.get < | ||
1678 | ch->local_nentries) { | ||
1679 | |||
1680 | /* There are available message entries. We need to try | ||
1681 | * to secure one for ourselves. We'll do this by trying | ||
1682 | * to increment w_local_GP.put as long as someone else | ||
1683 | * doesn't beat us to it. If they do, we'll have to | ||
1684 | * try again. | ||
1685 | */ | ||
1686 | if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == | ||
1687 | put) { | ||
1688 | /* we got the entry referenced by put */ | ||
1689 | break; | ||
1690 | } | ||
1691 | continue; /* try again */ | ||
1692 | } | ||
1693 | |||
1694 | |||
1695 | /* | ||
1696 | * There aren't any available msg entries at this time. | ||
1697 | * | ||
1698 | * In waiting for a message entry to become available, | ||
1699 | * we set a timeout in case the other side is not | ||
1700 | * sending completion IPIs. This lets us fake an IPI | ||
1701 | * that will cause the IPI handler to fetch the latest | ||
1702 | * GP values as if an IPI was sent by the other side. | ||
1703 | */ | ||
1704 | if (ret == xpcTimeout) { | ||
1705 | xpc_IPI_send_local_msgrequest(ch); | ||
1706 | } | ||
1707 | |||
1708 | if (flags & XPC_NOWAIT) { | ||
1709 | xpc_msgqueue_deref(ch); | ||
1710 | return xpcNoWait; | ||
1711 | } | ||
1712 | |||
1713 | ret = xpc_allocate_msg_wait(ch); | ||
1714 | if (ret != xpcInterrupted && ret != xpcTimeout) { | ||
1715 | xpc_msgqueue_deref(ch); | ||
1716 | return ret; | ||
1717 | } | ||
1718 | } | ||
1719 | |||
1720 | |||
1721 | /* get the message's address and initialize it */ | ||
1722 | msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + | ||
1723 | (put % ch->local_nentries) * ch->msg_size); | ||
1724 | |||
1725 | |||
1726 | DBUG_ON(msg->flags != 0); | ||
1727 | msg->number = put; | ||
1728 | |||
1729 | dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, " | ||
1730 | "msg_number=%ld, partid=%d, channel=%d\n", put + 1, | ||
1731 | (void *) msg, msg->number, ch->partid, ch->number); | ||
1732 | |||
1733 | *address_of_msg = msg; | ||
1734 | |||
1735 | return xpcSuccess; | ||
1736 | } | ||
1737 | |||
1738 | |||
1739 | /* | ||
1740 | * Allocate an entry for a message from the message queue associated with the | ||
1741 | * specified channel. NOTE that this routine can sleep waiting for a message | ||
1742 | * entry to become available. To not sleep, pass in the XPC_NOWAIT flag. | ||
1743 | * | ||
1744 | * Arguments: | ||
1745 | * | ||
1746 | * partid - ID of partition to which the channel is connected. | ||
1747 | * ch_number - channel #. | ||
1748 | * flags - see xpc.h for valid flags. | ||
1749 | * payload - address of the allocated payload area pointer (filled in on | ||
1750 | * return) in which the user-defined message is constructed. | ||
1751 | */ | ||
1752 | enum xpc_retval | ||
1753 | xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) | ||
1754 | { | ||
1755 | struct xpc_partition *part = &xpc_partitions[partid]; | ||
1756 | enum xpc_retval ret = xpcUnknownReason; | ||
1757 | struct xpc_msg *msg; | ||
1758 | |||
1759 | |||
1760 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | ||
1761 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); | ||
1762 | |||
1763 | *payload = NULL; | ||
1764 | |||
1765 | if (xpc_part_ref(part)) { | ||
1766 | ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); | ||
1767 | xpc_part_deref(part); | ||
1768 | |||
1769 | if (msg != NULL) { | ||
1770 | *payload = &msg->payload; | ||
1771 | } | ||
1772 | } | ||
1773 | |||
1774 | return ret; | ||
1775 | } | ||
1776 | |||
1777 | |||
1778 | /* | ||
1779 | * Now we actually send the messages that are ready to be sent by advancing | ||
1780 | * the local message queue's Put value and then send an IPI to the recipient | ||
1781 | * partition. | ||
1782 | */ | ||
1783 | static void | ||
1784 | xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) | ||
1785 | { | ||
1786 | struct xpc_msg *msg; | ||
1787 | s64 put = initial_put + 1; | ||
1788 | int send_IPI = 0; | ||
1789 | |||
1790 | |||
1791 | while (1) { | ||
1792 | |||
1793 | while (1) { | ||
1794 | if (put == (volatile s64) ch->w_local_GP.put) { | ||
1795 | break; | ||
1796 | } | ||
1797 | |||
1798 | msg = (struct xpc_msg *) ((u64) ch->local_msgqueue + | ||
1799 | (put % ch->local_nentries) * ch->msg_size); | ||
1800 | |||
1801 | if (!(msg->flags & XPC_M_READY)) { | ||
1802 | break; | ||
1803 | } | ||
1804 | |||
1805 | put++; | ||
1806 | } | ||
1807 | |||
1808 | if (put == initial_put) { | ||
1809 | /* nothing's changed */ | ||
1810 | break; | ||
1811 | } | ||
1812 | |||
1813 | if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != | ||
1814 | initial_put) { | ||
1815 | /* someone else beat us to it */ | ||
1816 | DBUG_ON((volatile s64) ch->local_GP->put < initial_put); | ||
1817 | break; | ||
1818 | } | ||
1819 | |||
1820 | /* we just set the new value of local_GP->put */ | ||
1821 | |||
1822 | dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, " | ||
1823 | "channel=%d\n", put, ch->partid, ch->number); | ||
1824 | |||
1825 | send_IPI = 1; | ||
1826 | |||
1827 | /* | ||
1828 | * We need to ensure that the message referenced by | ||
1829 | * local_GP->put is not XPC_M_READY or that local_GP->put | ||
1830 | * equals w_local_GP.put, so we'll go have a look. | ||
1831 | */ | ||
1832 | initial_put = put; | ||
1833 | } | ||
1834 | |||
1835 | if (send_IPI) { | ||
1836 | xpc_IPI_send_msgrequest(ch); | ||
1837 | } | ||
1838 | } | ||
1839 | |||
1840 | |||
1841 | /* | ||
1842 | * Common code that does the actual sending of the message by advancing the | ||
1843 | * local message queue's Put value and sends an IPI to the partition the | ||
1844 | * message is being sent to. | ||
1845 | */ | ||
1846 | static enum xpc_retval | ||
1847 | xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, | ||
1848 | xpc_notify_func func, void *key) | ||
1849 | { | ||
1850 | enum xpc_retval ret = xpcSuccess; | ||
1851 | struct xpc_notify *notify = NULL; // >>> to keep the compiler happy!! | ||
1852 | s64 put, msg_number = msg->number; | ||
1853 | |||
1854 | |||
1855 | DBUG_ON(notify_type == XPC_N_CALL && func == NULL); | ||
1856 | DBUG_ON((((u64) msg - (u64) ch->local_msgqueue) / ch->msg_size) != | ||
1857 | msg_number % ch->local_nentries); | ||
1858 | DBUG_ON(msg->flags & XPC_M_READY); | ||
1859 | |||
1860 | if (ch->flags & XPC_C_DISCONNECTING) { | ||
1861 | /* drop the reference grabbed in xpc_allocate_msg() */ | ||
1862 | xpc_msgqueue_deref(ch); | ||
1863 | return ch->reason; | ||
1864 | } | ||
1865 | |||
1866 | if (notify_type != 0) { | ||
1867 | /* | ||
1868 | * Tell the remote side to send an ACK interrupt when the | ||
1869 | * message has been delivered. | ||
1870 | */ | ||
1871 | msg->flags |= XPC_M_INTERRUPT; | ||
1872 | |||
1873 | atomic_inc(&ch->n_to_notify); | ||
1874 | |||
1875 | notify = &ch->notify_queue[msg_number % ch->local_nentries]; | ||
1876 | notify->func = func; | ||
1877 | notify->key = key; | ||
1878 | (volatile u8) notify->type = notify_type; | ||
1879 | |||
1880 | // >>> is a mb() needed here? | ||
1881 | |||
1882 | if (ch->flags & XPC_C_DISCONNECTING) { | ||
1883 | /* | ||
1884 | * An error occurred between our last error check and | ||
1885 | * this one. We will try to clear the type field from | ||
1886 | * the notify entry. If we succeed then | ||
1887 | * xpc_disconnect_channel() didn't already process | ||
1888 | * the notify entry. | ||
1889 | */ | ||
1890 | if (cmpxchg(¬ify->type, notify_type, 0) == | ||
1891 | notify_type) { | ||
1892 | atomic_dec(&ch->n_to_notify); | ||
1893 | ret = ch->reason; | ||
1894 | } | ||
1895 | |||
1896 | /* drop the reference grabbed in xpc_allocate_msg() */ | ||
1897 | xpc_msgqueue_deref(ch); | ||
1898 | return ret; | ||
1899 | } | ||
1900 | } | ||
1901 | |||
1902 | msg->flags |= XPC_M_READY; | ||
1903 | |||
1904 | /* | ||
1905 | * The preceding store of msg->flags must occur before the following | ||
1906 | * load of ch->local_GP->put. | ||
1907 | */ | ||
1908 | mb(); | ||
1909 | |||
1910 | /* see if the message is next in line to be sent, if so send it */ | ||
1911 | |||
1912 | put = ch->local_GP->put; | ||
1913 | if (put == msg_number) { | ||
1914 | xpc_send_msgs(ch, put); | ||
1915 | } | ||
1916 | |||
1917 | /* drop the reference grabbed in xpc_allocate_msg() */ | ||
1918 | xpc_msgqueue_deref(ch); | ||
1919 | return ret; | ||
1920 | } | ||
1921 | |||
1922 | |||
1923 | /* | ||
1924 | * Send a message previously allocated using xpc_initiate_allocate() on the | ||
1925 | * specified channel connected to the specified partition. | ||
1926 | * | ||
1927 | * This routine will not wait for the message to be received, nor will | ||
1928 | * notification be given when it does happen. Once this routine has returned | ||
1929 | * the message entry allocated via xpc_initiate_allocate() is no longer | ||
1930 | * accessable to the caller. | ||
1931 | * | ||
1932 | * This routine, although called by users, does not call xpc_part_ref() to | ||
1933 | * ensure that the partition infrastructure is in place. It relies on the | ||
1934 | * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). | ||
1935 | * | ||
1936 | * Arguments: | ||
1937 | * | ||
1938 | * partid - ID of partition to which the channel is connected. | ||
1939 | * ch_number - channel # to send message on. | ||
1940 | * payload - pointer to the payload area allocated via | ||
1941 | * xpc_initiate_allocate(). | ||
1942 | */ | ||
1943 | enum xpc_retval | ||
1944 | xpc_initiate_send(partid_t partid, int ch_number, void *payload) | ||
1945 | { | ||
1946 | struct xpc_partition *part = &xpc_partitions[partid]; | ||
1947 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); | ||
1948 | enum xpc_retval ret; | ||
1949 | |||
1950 | |||
1951 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg, | ||
1952 | partid, ch_number); | ||
1953 | |||
1954 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | ||
1955 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); | ||
1956 | DBUG_ON(msg == NULL); | ||
1957 | |||
1958 | ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL); | ||
1959 | |||
1960 | return ret; | ||
1961 | } | ||
1962 | |||
1963 | |||
1964 | /* | ||
1965 | * Send a message previously allocated using xpc_initiate_allocate on the | ||
1966 | * specified channel connected to the specified partition. | ||
1967 | * | ||
1968 | * This routine will not wait for the message to be sent. Once this routine | ||
1969 | * has returned the message entry allocated via xpc_initiate_allocate() is no | ||
1970 | * longer accessable to the caller. | ||
1971 | * | ||
1972 | * Once the remote end of the channel has received the message, the function | ||
1973 | * passed as an argument to xpc_initiate_send_notify() will be called. This | ||
1974 | * allows the sender to free up or re-use any buffers referenced by the | ||
1975 | * message, but does NOT mean the message has been processed at the remote | ||
1976 | * end by a receiver. | ||
1977 | * | ||
1978 | * If this routine returns an error, the caller's function will NOT be called. | ||
1979 | * | ||
1980 | * This routine, although called by users, does not call xpc_part_ref() to | ||
1981 | * ensure that the partition infrastructure is in place. It relies on the | ||
1982 | * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). | ||
1983 | * | ||
1984 | * Arguments: | ||
1985 | * | ||
1986 | * partid - ID of partition to which the channel is connected. | ||
1987 | * ch_number - channel # to send message on. | ||
1988 | * payload - pointer to the payload area allocated via | ||
1989 | * xpc_initiate_allocate(). | ||
1990 | * func - function to call with asynchronous notification of message | ||
1991 | * receipt. THIS FUNCTION MUST BE NON-BLOCKING. | ||
1992 | * key - user-defined key to be passed to the function when it's called. | ||
1993 | */ | ||
1994 | enum xpc_retval | ||
1995 | xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, | ||
1996 | xpc_notify_func func, void *key) | ||
1997 | { | ||
1998 | struct xpc_partition *part = &xpc_partitions[partid]; | ||
1999 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); | ||
2000 | enum xpc_retval ret; | ||
2001 | |||
2002 | |||
2003 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *) msg, | ||
2004 | partid, ch_number); | ||
2005 | |||
2006 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | ||
2007 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); | ||
2008 | DBUG_ON(msg == NULL); | ||
2009 | DBUG_ON(func == NULL); | ||
2010 | |||
2011 | ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, | ||
2012 | func, key); | ||
2013 | return ret; | ||
2014 | } | ||
2015 | |||
2016 | |||
2017 | static struct xpc_msg * | ||
2018 | xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | ||
2019 | { | ||
2020 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
2021 | struct xpc_msg *remote_msg, *msg; | ||
2022 | u32 msg_index, nmsgs; | ||
2023 | u64 msg_offset; | ||
2024 | enum xpc_retval ret; | ||
2025 | |||
2026 | |||
2027 | if (down_interruptible(&ch->msg_to_pull_sema) != 0) { | ||
2028 | /* we were interrupted by a signal */ | ||
2029 | return NULL; | ||
2030 | } | ||
2031 | |||
2032 | while (get >= ch->next_msg_to_pull) { | ||
2033 | |||
2034 | /* pull as many messages as are ready and able to be pulled */ | ||
2035 | |||
2036 | msg_index = ch->next_msg_to_pull % ch->remote_nentries; | ||
2037 | |||
2038 | DBUG_ON(ch->next_msg_to_pull >= | ||
2039 | (volatile s64) ch->w_remote_GP.put); | ||
2040 | nmsgs = (volatile s64) ch->w_remote_GP.put - | ||
2041 | ch->next_msg_to_pull; | ||
2042 | if (msg_index + nmsgs > ch->remote_nentries) { | ||
2043 | /* ignore the ones that wrap the msg queue for now */ | ||
2044 | nmsgs = ch->remote_nentries - msg_index; | ||
2045 | } | ||
2046 | |||
2047 | msg_offset = msg_index * ch->msg_size; | ||
2048 | msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + | ||
2049 | msg_offset); | ||
2050 | remote_msg = (struct xpc_msg *) (ch->remote_msgqueue_pa + | ||
2051 | msg_offset); | ||
2052 | |||
2053 | if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg, | ||
2054 | nmsgs * ch->msg_size)) != xpcSuccess) { | ||
2055 | |||
2056 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" | ||
2057 | " msg %ld from partition %d, channel=%d, " | ||
2058 | "ret=%d\n", nmsgs, ch->next_msg_to_pull, | ||
2059 | ch->partid, ch->number, ret); | ||
2060 | |||
2061 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
2062 | |||
2063 | up(&ch->msg_to_pull_sema); | ||
2064 | return NULL; | ||
2065 | } | ||
2066 | |||
2067 | mb(); /* >>> this may not be needed, we're not sure */ | ||
2068 | |||
2069 | ch->next_msg_to_pull += nmsgs; | ||
2070 | } | ||
2071 | |||
2072 | up(&ch->msg_to_pull_sema); | ||
2073 | |||
2074 | /* return the message we were looking for */ | ||
2075 | msg_offset = (get % ch->remote_nentries) * ch->msg_size; | ||
2076 | msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + msg_offset); | ||
2077 | |||
2078 | return msg; | ||
2079 | } | ||
2080 | |||
2081 | |||
2082 | /* | ||
2083 | * Get a message to be delivered. | ||
2084 | */ | ||
2085 | static struct xpc_msg * | ||
2086 | xpc_get_deliverable_msg(struct xpc_channel *ch) | ||
2087 | { | ||
2088 | struct xpc_msg *msg = NULL; | ||
2089 | s64 get; | ||
2090 | |||
2091 | |||
2092 | do { | ||
2093 | if ((volatile u32) ch->flags & XPC_C_DISCONNECTING) { | ||
2094 | break; | ||
2095 | } | ||
2096 | |||
2097 | get = (volatile s64) ch->w_local_GP.get; | ||
2098 | if (get == (volatile s64) ch->w_remote_GP.put) { | ||
2099 | break; | ||
2100 | } | ||
2101 | |||
2102 | /* There are messages waiting to be pulled and delivered. | ||
2103 | * We need to try to secure one for ourselves. We'll do this | ||
2104 | * by trying to increment w_local_GP.get and hope that no one | ||
2105 | * else beats us to it. If they do, we'll we'll simply have | ||
2106 | * to try again for the next one. | ||
2107 | */ | ||
2108 | |||
2109 | if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { | ||
2110 | /* we got the entry referenced by get */ | ||
2111 | |||
2112 | dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " | ||
2113 | "partid=%d, channel=%d\n", get + 1, | ||
2114 | ch->partid, ch->number); | ||
2115 | |||
2116 | /* pull the message from the remote partition */ | ||
2117 | |||
2118 | msg = xpc_pull_remote_msg(ch, get); | ||
2119 | |||
2120 | DBUG_ON(msg != NULL && msg->number != get); | ||
2121 | DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE)); | ||
2122 | DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY)); | ||
2123 | |||
2124 | break; | ||
2125 | } | ||
2126 | |||
2127 | } while (1); | ||
2128 | |||
2129 | return msg; | ||
2130 | } | ||
2131 | |||
2132 | |||
2133 | /* | ||
2134 | * Deliver a message to its intended recipient. | ||
2135 | */ | ||
2136 | void | ||
2137 | xpc_deliver_msg(struct xpc_channel *ch) | ||
2138 | { | ||
2139 | struct xpc_msg *msg; | ||
2140 | |||
2141 | |||
2142 | if ((msg = xpc_get_deliverable_msg(ch)) != NULL) { | ||
2143 | |||
2144 | /* | ||
2145 | * This ref is taken to protect the payload itself from being | ||
2146 | * freed before the user is finished with it, which the user | ||
2147 | * indicates by calling xpc_initiate_received(). | ||
2148 | */ | ||
2149 | xpc_msgqueue_ref(ch); | ||
2150 | |||
2151 | atomic_inc(&ch->kthreads_active); | ||
2152 | |||
2153 | if (ch->func != NULL) { | ||
2154 | dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " | ||
2155 | "msg_number=%ld, partid=%d, channel=%d\n", | ||
2156 | (void *) msg, msg->number, ch->partid, | ||
2157 | ch->number); | ||
2158 | |||
2159 | /* deliver the message to its intended recipient */ | ||
2160 | ch->func(xpcMsgReceived, ch->partid, ch->number, | ||
2161 | &msg->payload, ch->key); | ||
2162 | |||
2163 | dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " | ||
2164 | "msg_number=%ld, partid=%d, channel=%d\n", | ||
2165 | (void *) msg, msg->number, ch->partid, | ||
2166 | ch->number); | ||
2167 | } | ||
2168 | |||
2169 | atomic_dec(&ch->kthreads_active); | ||
2170 | } | ||
2171 | } | ||
2172 | |||
2173 | |||
2174 | /* | ||
2175 | * Now we actually acknowledge the messages that have been delivered and ack'd | ||
2176 | * by advancing the cached remote message queue's Get value and if requested | ||
2177 | * send an IPI to the message sender's partition. | ||
2178 | */ | ||
2179 | static void | ||
2180 | xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) | ||
2181 | { | ||
2182 | struct xpc_msg *msg; | ||
2183 | s64 get = initial_get + 1; | ||
2184 | int send_IPI = 0; | ||
2185 | |||
2186 | |||
2187 | while (1) { | ||
2188 | |||
2189 | while (1) { | ||
2190 | if (get == (volatile s64) ch->w_local_GP.get) { | ||
2191 | break; | ||
2192 | } | ||
2193 | |||
2194 | msg = (struct xpc_msg *) ((u64) ch->remote_msgqueue + | ||
2195 | (get % ch->remote_nentries) * ch->msg_size); | ||
2196 | |||
2197 | if (!(msg->flags & XPC_M_DONE)) { | ||
2198 | break; | ||
2199 | } | ||
2200 | |||
2201 | msg_flags |= msg->flags; | ||
2202 | get++; | ||
2203 | } | ||
2204 | |||
2205 | if (get == initial_get) { | ||
2206 | /* nothing's changed */ | ||
2207 | break; | ||
2208 | } | ||
2209 | |||
2210 | if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != | ||
2211 | initial_get) { | ||
2212 | /* someone else beat us to it */ | ||
2213 | DBUG_ON((volatile s64) ch->local_GP->get <= | ||
2214 | initial_get); | ||
2215 | break; | ||
2216 | } | ||
2217 | |||
2218 | /* we just set the new value of local_GP->get */ | ||
2219 | |||
2220 | dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " | ||
2221 | "channel=%d\n", get, ch->partid, ch->number); | ||
2222 | |||
2223 | send_IPI = (msg_flags & XPC_M_INTERRUPT); | ||
2224 | |||
2225 | /* | ||
2226 | * We need to ensure that the message referenced by | ||
2227 | * local_GP->get is not XPC_M_DONE or that local_GP->get | ||
2228 | * equals w_local_GP.get, so we'll go have a look. | ||
2229 | */ | ||
2230 | initial_get = get; | ||
2231 | } | ||
2232 | |||
2233 | if (send_IPI) { | ||
2234 | xpc_IPI_send_msgrequest(ch); | ||
2235 | } | ||
2236 | } | ||
2237 | |||
2238 | |||
2239 | /* | ||
2240 | * Acknowledge receipt of a delivered message. | ||
2241 | * | ||
2242 | * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition | ||
2243 | * that sent the message. | ||
2244 | * | ||
2245 | * This function, although called by users, does not call xpc_part_ref() to | ||
2246 | * ensure that the partition infrastructure is in place. It relies on the | ||
2247 | * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg(). | ||
2248 | * | ||
2249 | * Arguments: | ||
2250 | * | ||
2251 | * partid - ID of partition to which the channel is connected. | ||
2252 | * ch_number - channel # message received on. | ||
2253 | * payload - pointer to the payload area allocated via | ||
2254 | * xpc_initiate_allocate(). | ||
2255 | */ | ||
2256 | void | ||
2257 | xpc_initiate_received(partid_t partid, int ch_number, void *payload) | ||
2258 | { | ||
2259 | struct xpc_partition *part = &xpc_partitions[partid]; | ||
2260 | struct xpc_channel *ch; | ||
2261 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); | ||
2262 | s64 get, msg_number = msg->number; | ||
2263 | |||
2264 | |||
2265 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | ||
2266 | DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); | ||
2267 | |||
2268 | ch = &part->channels[ch_number]; | ||
2269 | |||
2270 | dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", | ||
2271 | (void *) msg, msg_number, ch->partid, ch->number); | ||
2272 | |||
2273 | DBUG_ON((((u64) msg - (u64) ch->remote_msgqueue) / ch->msg_size) != | ||
2274 | msg_number % ch->remote_nentries); | ||
2275 | DBUG_ON(msg->flags & XPC_M_DONE); | ||
2276 | |||
2277 | msg->flags |= XPC_M_DONE; | ||
2278 | |||
2279 | /* | ||
2280 | * The preceding store of msg->flags must occur before the following | ||
2281 | * load of ch->local_GP->get. | ||
2282 | */ | ||
2283 | mb(); | ||
2284 | |||
2285 | /* | ||
2286 | * See if this message is next in line to be acknowledged as having | ||
2287 | * been delivered. | ||
2288 | */ | ||
2289 | get = ch->local_GP->get; | ||
2290 | if (get == msg_number) { | ||
2291 | xpc_acknowledge_msgs(ch, get, msg->flags); | ||
2292 | } | ||
2293 | |||
2294 | /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ | ||
2295 | xpc_msgqueue_deref(ch); | ||
2296 | } | ||
2297 | |||
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c new file mode 100644 index 000000000000..177ddb748ebe --- /dev/null +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -0,0 +1,1064 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | |||
10 | /* | ||
11 | * Cross Partition Communication (XPC) support - standard version. | ||
12 | * | ||
13 | * XPC provides a message passing capability that crosses partition | ||
14 | * boundaries. This module is made up of two parts: | ||
15 | * | ||
16 | * partition This part detects the presence/absence of other | ||
17 | * partitions. It provides a heartbeat and monitors | ||
18 | * the heartbeats of other partitions. | ||
19 | * | ||
20 | * channel This part manages the channels and sends/receives | ||
21 | * messages across them to/from other partitions. | ||
22 | * | ||
23 | * There are a couple of additional functions residing in XP, which | ||
24 | * provide an interface to XPC for its users. | ||
25 | * | ||
26 | * | ||
27 | * Caveats: | ||
28 | * | ||
29 | * . We currently have no way to determine which nasid an IPI came | ||
30 | * from. Thus, xpc_IPI_send() does a remote AMO write followed by | ||
31 | * an IPI. The AMO indicates where data is to be pulled from, so | ||
32 | * after the IPI arrives, the remote partition checks the AMO word. | ||
33 | * The IPI can actually arrive before the AMO however, so other code | ||
34 | * must periodically check for this case. Also, remote AMO operations | ||
35 | * do not reliably time out. Thus we do a remote PIO read solely to | ||
36 | * know whether the remote partition is down and whether we should | ||
37 | * stop sending IPIs to it. This remote PIO read operation is set up | ||
38 | * in a special nofault region so SAL knows to ignore (and cleanup) | ||
39 | * any errors due to the remote AMO write, PIO read, and/or PIO | ||
40 | * write operations. | ||
41 | * | ||
42 | * If/when new hardware solves this IPI problem, we should abandon | ||
43 | * the current approach. | ||
44 | * | ||
45 | */ | ||
46 | |||
47 | |||
48 | #include <linux/kernel.h> | ||
49 | #include <linux/module.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/sched.h> | ||
52 | #include <linux/syscalls.h> | ||
53 | #include <linux/cache.h> | ||
54 | #include <linux/interrupt.h> | ||
55 | #include <linux/slab.h> | ||
56 | #include <asm/sn/intr.h> | ||
57 | #include <asm/sn/sn_sal.h> | ||
58 | #include <asm/uaccess.h> | ||
59 | #include "xpc.h" | ||
60 | |||
61 | |||
62 | /* define two XPC debug device structures to be used with dev_dbg() et al */ | ||
63 | |||
64 | struct device_driver xpc_dbg_name = { | ||
65 | .name = "xpc" | ||
66 | }; | ||
67 | |||
68 | struct device xpc_part_dbg_subname = { | ||
69 | .bus_id = {0}, /* set to "part" at xpc_init() time */ | ||
70 | .driver = &xpc_dbg_name | ||
71 | }; | ||
72 | |||
73 | struct device xpc_chan_dbg_subname = { | ||
74 | .bus_id = {0}, /* set to "chan" at xpc_init() time */ | ||
75 | .driver = &xpc_dbg_name | ||
76 | }; | ||
77 | |||
78 | struct device *xpc_part = &xpc_part_dbg_subname; | ||
79 | struct device *xpc_chan = &xpc_chan_dbg_subname; | ||
80 | |||
81 | |||
82 | /* systune related variables for /proc/sys directories */ | ||
83 | |||
84 | static int xpc_hb_min = 1; | ||
85 | static int xpc_hb_max = 10; | ||
86 | |||
87 | static int xpc_hb_check_min = 10; | ||
88 | static int xpc_hb_check_max = 120; | ||
89 | |||
90 | static ctl_table xpc_sys_xpc_hb_dir[] = { | ||
91 | { | ||
92 | 1, | ||
93 | "hb_interval", | ||
94 | &xpc_hb_interval, | ||
95 | sizeof(int), | ||
96 | 0644, | ||
97 | NULL, | ||
98 | &proc_dointvec_minmax, | ||
99 | &sysctl_intvec, | ||
100 | NULL, | ||
101 | &xpc_hb_min, &xpc_hb_max | ||
102 | }, | ||
103 | { | ||
104 | 2, | ||
105 | "hb_check_interval", | ||
106 | &xpc_hb_check_interval, | ||
107 | sizeof(int), | ||
108 | 0644, | ||
109 | NULL, | ||
110 | &proc_dointvec_minmax, | ||
111 | &sysctl_intvec, | ||
112 | NULL, | ||
113 | &xpc_hb_check_min, &xpc_hb_check_max | ||
114 | }, | ||
115 | {0} | ||
116 | }; | ||
117 | static ctl_table xpc_sys_xpc_dir[] = { | ||
118 | { | ||
119 | 1, | ||
120 | "hb", | ||
121 | NULL, | ||
122 | 0, | ||
123 | 0555, | ||
124 | xpc_sys_xpc_hb_dir | ||
125 | }, | ||
126 | {0} | ||
127 | }; | ||
128 | static ctl_table xpc_sys_dir[] = { | ||
129 | { | ||
130 | 1, | ||
131 | "xpc", | ||
132 | NULL, | ||
133 | 0, | ||
134 | 0555, | ||
135 | xpc_sys_xpc_dir | ||
136 | }, | ||
137 | {0} | ||
138 | }; | ||
139 | static struct ctl_table_header *xpc_sysctl; | ||
140 | |||
141 | |||
142 | /* #of IRQs received */ | ||
143 | static atomic_t xpc_act_IRQ_rcvd; | ||
144 | |||
145 | /* IRQ handler notifies this wait queue on receipt of an IRQ */ | ||
146 | static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); | ||
147 | |||
148 | static unsigned long xpc_hb_check_timeout; | ||
149 | |||
150 | /* xpc_hb_checker thread exited notification */ | ||
151 | static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); | ||
152 | |||
153 | /* xpc_discovery thread exited notification */ | ||
154 | static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); | ||
155 | |||
156 | |||
157 | static struct timer_list xpc_hb_timer; | ||
158 | |||
159 | |||
160 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); | ||
161 | |||
162 | |||
163 | /* | ||
164 | * Notify the heartbeat check thread that an IRQ has been received. | ||
165 | */ | ||
166 | static irqreturn_t | ||
167 | xpc_act_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs) | ||
168 | { | ||
169 | atomic_inc(&xpc_act_IRQ_rcvd); | ||
170 | wake_up_interruptible(&xpc_act_IRQ_wq); | ||
171 | return IRQ_HANDLED; | ||
172 | } | ||
173 | |||
174 | |||
175 | /* | ||
176 | * Timer to produce the heartbeat. The timer structures function is | ||
177 | * already set when this is initially called. A tunable is used to | ||
178 | * specify when the next timeout should occur. | ||
179 | */ | ||
180 | static void | ||
181 | xpc_hb_beater(unsigned long dummy) | ||
182 | { | ||
183 | xpc_vars->heartbeat++; | ||
184 | |||
185 | if (jiffies >= xpc_hb_check_timeout) { | ||
186 | wake_up_interruptible(&xpc_act_IRQ_wq); | ||
187 | } | ||
188 | |||
189 | xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ); | ||
190 | add_timer(&xpc_hb_timer); | ||
191 | } | ||
192 | |||
193 | |||
194 | /* | ||
195 | * This thread is responsible for nearly all of the partition | ||
196 | * activation/deactivation. | ||
197 | */ | ||
198 | static int | ||
199 | xpc_hb_checker(void *ignore) | ||
200 | { | ||
201 | int last_IRQ_count = 0; | ||
202 | int new_IRQ_count; | ||
203 | int force_IRQ=0; | ||
204 | |||
205 | |||
206 | /* this thread was marked active by xpc_hb_init() */ | ||
207 | |||
208 | daemonize(XPC_HB_CHECK_THREAD_NAME); | ||
209 | |||
210 | set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); | ||
211 | |||
212 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); | ||
213 | |||
214 | while (!(volatile int) xpc_exiting) { | ||
215 | |||
216 | /* wait for IRQ or timeout */ | ||
217 | (void) wait_event_interruptible(xpc_act_IRQ_wq, | ||
218 | (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || | ||
219 | jiffies >= xpc_hb_check_timeout || | ||
220 | (volatile int) xpc_exiting)); | ||
221 | |||
222 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " | ||
223 | "been received\n", | ||
224 | (int) (xpc_hb_check_timeout - jiffies), | ||
225 | atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count); | ||
226 | |||
227 | |||
228 | /* checking of remote heartbeats is skewed by IRQ handling */ | ||
229 | if (jiffies >= xpc_hb_check_timeout) { | ||
230 | dev_dbg(xpc_part, "checking remote heartbeats\n"); | ||
231 | xpc_check_remote_hb(); | ||
232 | |||
233 | /* | ||
234 | * We need to periodically recheck to ensure no | ||
235 | * IPI/AMO pairs have been missed. That check | ||
236 | * must always reset xpc_hb_check_timeout. | ||
237 | */ | ||
238 | force_IRQ = 1; | ||
239 | } | ||
240 | |||
241 | |||
242 | new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); | ||
243 | if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { | ||
244 | force_IRQ = 0; | ||
245 | |||
246 | dev_dbg(xpc_part, "found an IRQ to process; will be " | ||
247 | "resetting xpc_hb_check_timeout\n"); | ||
248 | |||
249 | last_IRQ_count += xpc_identify_act_IRQ_sender(); | ||
250 | if (last_IRQ_count < new_IRQ_count) { | ||
251 | /* retry once to help avoid missing AMO */ | ||
252 | (void) xpc_identify_act_IRQ_sender(); | ||
253 | } | ||
254 | last_IRQ_count = new_IRQ_count; | ||
255 | |||
256 | xpc_hb_check_timeout = jiffies + | ||
257 | (xpc_hb_check_interval * HZ); | ||
258 | } | ||
259 | } | ||
260 | |||
261 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); | ||
262 | |||
263 | |||
264 | /* mark this thread as inactive */ | ||
265 | up(&xpc_hb_checker_exited); | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | |||
270 | /* | ||
271 | * This thread will attempt to discover other partitions to activate | ||
272 | * based on info provided by SAL. This new thread is short lived and | ||
273 | * will exit once discovery is complete. | ||
274 | */ | ||
275 | static int | ||
276 | xpc_initiate_discovery(void *ignore) | ||
277 | { | ||
278 | daemonize(XPC_DISCOVERY_THREAD_NAME); | ||
279 | |||
280 | xpc_discovery(); | ||
281 | |||
282 | dev_dbg(xpc_part, "discovery thread is exiting\n"); | ||
283 | |||
284 | /* mark this thread as inactive */ | ||
285 | up(&xpc_discovery_exited); | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | |||
290 | /* | ||
291 | * Establish first contact with the remote partititon. This involves pulling | ||
292 | * the XPC per partition variables from the remote partition and waiting for | ||
293 | * the remote partition to pull ours. | ||
294 | */ | ||
295 | static enum xpc_retval | ||
296 | xpc_make_first_contact(struct xpc_partition *part) | ||
297 | { | ||
298 | enum xpc_retval ret; | ||
299 | |||
300 | |||
301 | while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { | ||
302 | if (ret != xpcRetry) { | ||
303 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
304 | return ret; | ||
305 | } | ||
306 | |||
307 | dev_dbg(xpc_chan, "waiting to make first contact with " | ||
308 | "partition %d\n", XPC_PARTID(part)); | ||
309 | |||
310 | /* wait a 1/4 of a second or so */ | ||
311 | set_current_state(TASK_INTERRUPTIBLE); | ||
312 | (void) schedule_timeout(0.25 * HZ); | ||
313 | |||
314 | if (part->act_state == XPC_P_DEACTIVATING) { | ||
315 | return part->reason; | ||
316 | } | ||
317 | } | ||
318 | |||
319 | return xpc_mark_partition_active(part); | ||
320 | } | ||
321 | |||
322 | |||
323 | /* | ||
324 | * The first kthread assigned to a newly activated partition is the one | ||
325 | * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to | ||
326 | * that kthread until the partition is brought down, at which time that kthread | ||
327 | * returns back to XPC HB. (The return of that kthread will signify to XPC HB | ||
328 | * that XPC has dismantled all communication infrastructure for the associated | ||
329 | * partition.) This kthread becomes the channel manager for that partition. | ||
330 | * | ||
331 | * Each active partition has a channel manager, who, besides connecting and | ||
332 | * disconnecting channels, will ensure that each of the partition's connected | ||
333 | * channels has the required number of assigned kthreads to get the work done. | ||
334 | */ | ||
335 | static void | ||
336 | xpc_channel_mgr(struct xpc_partition *part) | ||
337 | { | ||
338 | while (part->act_state != XPC_P_DEACTIVATING || | ||
339 | atomic_read(&part->nchannels_active) > 0) { | ||
340 | |||
341 | xpc_process_channel_activity(part); | ||
342 | |||
343 | |||
344 | /* | ||
345 | * Wait until we've been requested to activate kthreads or | ||
346 | * all of the channel's message queues have been torn down or | ||
347 | * a signal is pending. | ||
348 | * | ||
349 | * The channel_mgr_requests is set to 1 after being awakened, | ||
350 | * This is done to prevent the channel mgr from making one pass | ||
351 | * through the loop for each request, since he will | ||
352 | * be servicing all the requests in one pass. The reason it's | ||
353 | * set to 1 instead of 0 is so that other kthreads will know | ||
354 | * that the channel mgr is running and won't bother trying to | ||
355 | * wake him up. | ||
356 | */ | ||
357 | atomic_dec(&part->channel_mgr_requests); | ||
358 | (void) wait_event_interruptible(part->channel_mgr_wq, | ||
359 | (atomic_read(&part->channel_mgr_requests) > 0 || | ||
360 | (volatile u64) part->local_IPI_amo != 0 || | ||
361 | ((volatile u8) part->act_state == | ||
362 | XPC_P_DEACTIVATING && | ||
363 | atomic_read(&part->nchannels_active) == 0))); | ||
364 | atomic_set(&part->channel_mgr_requests, 1); | ||
365 | |||
366 | // >>> Does it need to wakeup periodically as well? In case we | ||
367 | // >>> miscalculated the #of kthreads to wakeup or create? | ||
368 | } | ||
369 | } | ||
370 | |||
371 | |||
372 | /* | ||
373 | * When XPC HB determines that a partition has come up, it will create a new | ||
374 | * kthread and that kthread will call this function to attempt to set up the | ||
375 | * basic infrastructure used for Cross Partition Communication with the newly | ||
376 | * upped partition. | ||
377 | * | ||
378 | * The kthread that was created by XPC HB and which setup the XPC | ||
379 | * infrastructure will remain assigned to the partition until the partition | ||
380 | * goes down. At which time the kthread will teardown the XPC infrastructure | ||
381 | * and then exit. | ||
382 | * | ||
383 | * XPC HB will put the remote partition's XPC per partition specific variables | ||
384 | * physical address into xpc_partitions[partid].remote_vars_part_pa prior to | ||
385 | * calling xpc_partition_up(). | ||
386 | */ | ||
387 | static void | ||
388 | xpc_partition_up(struct xpc_partition *part) | ||
389 | { | ||
390 | DBUG_ON(part->channels != NULL); | ||
391 | |||
392 | dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); | ||
393 | |||
394 | if (xpc_setup_infrastructure(part) != xpcSuccess) { | ||
395 | return; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * The kthread that XPC HB called us with will become the | ||
400 | * channel manager for this partition. It will not return | ||
401 | * back to XPC HB until the partition's XPC infrastructure | ||
402 | * has been dismantled. | ||
403 | */ | ||
404 | |||
405 | (void) xpc_part_ref(part); /* this will always succeed */ | ||
406 | |||
407 | if (xpc_make_first_contact(part) == xpcSuccess) { | ||
408 | xpc_channel_mgr(part); | ||
409 | } | ||
410 | |||
411 | xpc_part_deref(part); | ||
412 | |||
413 | xpc_teardown_infrastructure(part); | ||
414 | } | ||
415 | |||
416 | |||
417 | static int | ||
418 | xpc_activating(void *__partid) | ||
419 | { | ||
420 | partid_t partid = (u64) __partid; | ||
421 | struct xpc_partition *part = &xpc_partitions[partid]; | ||
422 | unsigned long irq_flags; | ||
423 | struct sched_param param = { sched_priority: MAX_USER_RT_PRIO - 1 }; | ||
424 | int ret; | ||
425 | |||
426 | |||
427 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | ||
428 | |||
429 | spin_lock_irqsave(&part->act_lock, irq_flags); | ||
430 | |||
431 | if (part->act_state == XPC_P_DEACTIVATING) { | ||
432 | part->act_state = XPC_P_INACTIVE; | ||
433 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | ||
434 | part->remote_rp_pa = 0; | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | /* indicate the thread is activating */ | ||
439 | DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ); | ||
440 | part->act_state = XPC_P_ACTIVATING; | ||
441 | |||
442 | XPC_SET_REASON(part, 0, 0); | ||
443 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | ||
444 | |||
445 | dev_dbg(xpc_part, "bringing partition %d up\n", partid); | ||
446 | |||
447 | daemonize("xpc%02d", partid); | ||
448 | |||
449 | /* | ||
450 | * This thread needs to run at a realtime priority to prevent a | ||
451 | * significant performance degradation. | ||
452 | */ | ||
453 | ret = sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
454 | if (ret != 0) { | ||
455 | dev_warn(xpc_part, "unable to set pid %d to a realtime " | ||
456 | "priority, ret=%d\n", current->pid, ret); | ||
457 | } | ||
458 | |||
459 | /* allow this thread and its children to run on any CPU */ | ||
460 | set_cpus_allowed(current, CPU_MASK_ALL); | ||
461 | |||
462 | /* | ||
463 | * Register the remote partition's AMOs with SAL so it can handle | ||
464 | * and cleanup errors within that address range should the remote | ||
465 | * partition go down. We don't unregister this range because it is | ||
466 | * difficult to tell when outstanding writes to the remote partition | ||
467 | * are finished and thus when it is safe to unregister. This should | ||
468 | * not result in wasted space in the SAL xp_addr_region table because | ||
469 | * we should get the same page for remote_amos_page_pa after module | ||
470 | * reloads and system reboots. | ||
471 | */ | ||
472 | if (sn_register_xp_addr_region(part->remote_amos_page_pa, | ||
473 | PAGE_SIZE, 1) < 0) { | ||
474 | dev_warn(xpc_part, "xpc_partition_up(%d) failed to register " | ||
475 | "xp_addr region\n", partid); | ||
476 | |||
477 | spin_lock_irqsave(&part->act_lock, irq_flags); | ||
478 | part->act_state = XPC_P_INACTIVE; | ||
479 | XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__); | ||
480 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | ||
481 | part->remote_rp_pa = 0; | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | XPC_ALLOW_HB(partid, xpc_vars); | ||
486 | xpc_IPI_send_activated(part); | ||
487 | |||
488 | |||
489 | /* | ||
490 | * xpc_partition_up() holds this thread and marks this partition as | ||
491 | * XPC_P_ACTIVE by calling xpc_hb_mark_active(). | ||
492 | */ | ||
493 | (void) xpc_partition_up(part); | ||
494 | |||
495 | xpc_mark_partition_inactive(part); | ||
496 | |||
497 | if (part->reason == xpcReactivating) { | ||
498 | /* interrupting ourselves results in activating partition */ | ||
499 | xpc_IPI_send_reactivate(part); | ||
500 | } | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | |||
506 | void | ||
507 | xpc_activate_partition(struct xpc_partition *part) | ||
508 | { | ||
509 | partid_t partid = XPC_PARTID(part); | ||
510 | unsigned long irq_flags; | ||
511 | pid_t pid; | ||
512 | |||
513 | |||
514 | spin_lock_irqsave(&part->act_lock, irq_flags); | ||
515 | |||
516 | pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0); | ||
517 | |||
518 | DBUG_ON(part->act_state != XPC_P_INACTIVE); | ||
519 | |||
520 | if (pid > 0) { | ||
521 | part->act_state = XPC_P_ACTIVATION_REQ; | ||
522 | XPC_SET_REASON(part, xpcCloneKThread, __LINE__); | ||
523 | } else { | ||
524 | XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); | ||
525 | } | ||
526 | |||
527 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | ||
528 | } | ||
529 | |||
530 | |||
531 | /* | ||
532 | * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified | ||
533 | * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more | ||
534 | * than one partition, we use an AMO_t structure per partition to indicate | ||
535 | * whether a partition has sent an IPI or not. >>> If it has, then wake up the | ||
536 | * associated kthread to handle it. | ||
537 | * | ||
538 | * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC | ||
539 | * running on other partitions. | ||
540 | * | ||
541 | * Noteworthy Arguments: | ||
542 | * | ||
543 | * irq - Interrupt ReQuest number. NOT USED. | ||
544 | * | ||
545 | * dev_id - partid of IPI's potential sender. | ||
546 | * | ||
547 | * regs - processor's context before the processor entered | ||
548 | * interrupt code. NOT USED. | ||
549 | */ | ||
550 | irqreturn_t | ||
551 | xpc_notify_IRQ_handler(int irq, void *dev_id, struct pt_regs *regs) | ||
552 | { | ||
553 | partid_t partid = (partid_t) (u64) dev_id; | ||
554 | struct xpc_partition *part = &xpc_partitions[partid]; | ||
555 | |||
556 | |||
557 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | ||
558 | |||
559 | if (xpc_part_ref(part)) { | ||
560 | xpc_check_for_channel_activity(part); | ||
561 | |||
562 | xpc_part_deref(part); | ||
563 | } | ||
564 | return IRQ_HANDLED; | ||
565 | } | ||
566 | |||
567 | |||
568 | /* | ||
569 | * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor | ||
570 | * because the write to their associated IPI amo completed after the IRQ/IPI | ||
571 | * was received. | ||
572 | */ | ||
573 | void | ||
574 | xpc_dropped_IPI_check(struct xpc_partition *part) | ||
575 | { | ||
576 | if (xpc_part_ref(part)) { | ||
577 | xpc_check_for_channel_activity(part); | ||
578 | |||
579 | part->dropped_IPI_timer.expires = jiffies + | ||
580 | XPC_P_DROPPED_IPI_WAIT; | ||
581 | add_timer(&part->dropped_IPI_timer); | ||
582 | xpc_part_deref(part); | ||
583 | } | ||
584 | } | ||
585 | |||
586 | |||
587 | void | ||
588 | xpc_activate_kthreads(struct xpc_channel *ch, int needed) | ||
589 | { | ||
590 | int idle = atomic_read(&ch->kthreads_idle); | ||
591 | int assigned = atomic_read(&ch->kthreads_assigned); | ||
592 | int wakeup; | ||
593 | |||
594 | |||
595 | DBUG_ON(needed <= 0); | ||
596 | |||
597 | if (idle > 0) { | ||
598 | wakeup = (needed > idle) ? idle : needed; | ||
599 | needed -= wakeup; | ||
600 | |||
601 | dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, " | ||
602 | "channel=%d\n", wakeup, ch->partid, ch->number); | ||
603 | |||
604 | /* only wakeup the requested number of kthreads */ | ||
605 | wake_up_nr(&ch->idle_wq, wakeup); | ||
606 | } | ||
607 | |||
608 | if (needed <= 0) { | ||
609 | return; | ||
610 | } | ||
611 | |||
612 | if (needed + assigned > ch->kthreads_assigned_limit) { | ||
613 | needed = ch->kthreads_assigned_limit - assigned; | ||
614 | // >>>should never be less than 0 | ||
615 | if (needed <= 0) { | ||
616 | return; | ||
617 | } | ||
618 | } | ||
619 | |||
620 | dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", | ||
621 | needed, ch->partid, ch->number); | ||
622 | |||
623 | xpc_create_kthreads(ch, needed); | ||
624 | } | ||
625 | |||
626 | |||
627 | /* | ||
628 | * This function is where XPC's kthreads wait for messages to deliver. | ||
629 | */ | ||
630 | static void | ||
631 | xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) | ||
632 | { | ||
633 | do { | ||
634 | /* deliver messages to their intended recipients */ | ||
635 | |||
636 | while ((volatile s64) ch->w_local_GP.get < | ||
637 | (volatile s64) ch->w_remote_GP.put && | ||
638 | !((volatile u32) ch->flags & | ||
639 | XPC_C_DISCONNECTING)) { | ||
640 | xpc_deliver_msg(ch); | ||
641 | } | ||
642 | |||
643 | if (atomic_inc_return(&ch->kthreads_idle) > | ||
644 | ch->kthreads_idle_limit) { | ||
645 | /* too many idle kthreads on this channel */ | ||
646 | atomic_dec(&ch->kthreads_idle); | ||
647 | break; | ||
648 | } | ||
649 | |||
650 | dev_dbg(xpc_chan, "idle kthread calling " | ||
651 | "wait_event_interruptible_exclusive()\n"); | ||
652 | |||
653 | (void) wait_event_interruptible_exclusive(ch->idle_wq, | ||
654 | ((volatile s64) ch->w_local_GP.get < | ||
655 | (volatile s64) ch->w_remote_GP.put || | ||
656 | ((volatile u32) ch->flags & | ||
657 | XPC_C_DISCONNECTING))); | ||
658 | |||
659 | atomic_dec(&ch->kthreads_idle); | ||
660 | |||
661 | } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING)); | ||
662 | } | ||
663 | |||
664 | |||
665 | static int | ||
666 | xpc_daemonize_kthread(void *args) | ||
667 | { | ||
668 | partid_t partid = XPC_UNPACK_ARG1(args); | ||
669 | u16 ch_number = XPC_UNPACK_ARG2(args); | ||
670 | struct xpc_partition *part = &xpc_partitions[partid]; | ||
671 | struct xpc_channel *ch; | ||
672 | int n_needed; | ||
673 | |||
674 | |||
675 | daemonize("xpc%02dc%d", partid, ch_number); | ||
676 | |||
677 | dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", | ||
678 | partid, ch_number); | ||
679 | |||
680 | ch = &part->channels[ch_number]; | ||
681 | |||
682 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | ||
683 | DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); | ||
684 | |||
685 | /* let registerer know that connection has been established */ | ||
686 | |||
687 | if (atomic_read(&ch->kthreads_assigned) == 1) { | ||
688 | xpc_connected_callout(ch); | ||
689 | |||
690 | /* | ||
691 | * It is possible that while the callout was being | ||
692 | * made that the remote partition sent some messages. | ||
693 | * If that is the case, we may need to activate | ||
694 | * additional kthreads to help deliver them. We only | ||
695 | * need one less than total #of messages to deliver. | ||
696 | */ | ||
697 | n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; | ||
698 | if (n_needed > 0 && | ||
699 | !(ch->flags & XPC_C_DISCONNECTING)) { | ||
700 | xpc_activate_kthreads(ch, n_needed); | ||
701 | } | ||
702 | } | ||
703 | |||
704 | xpc_kthread_waitmsgs(part, ch); | ||
705 | } | ||
706 | |||
707 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | ||
708 | ((ch->flags & XPC_C_CONNECTCALLOUT) || | ||
709 | (ch->reason != xpcUnregistering && | ||
710 | ch->reason != xpcOtherUnregistering))) { | ||
711 | xpc_disconnected_callout(ch); | ||
712 | } | ||
713 | |||
714 | |||
715 | xpc_msgqueue_deref(ch); | ||
716 | |||
717 | dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", | ||
718 | partid, ch_number); | ||
719 | |||
720 | xpc_part_deref(part); | ||
721 | return 0; | ||
722 | } | ||
723 | |||
724 | |||
725 | /* | ||
726 | * For each partition that XPC has established communications with, there is | ||
727 | * a minimum of one kernel thread assigned to perform any operation that | ||
728 | * may potentially sleep or block (basically the callouts to the asynchronous | ||
729 | * functions registered via xpc_connect()). | ||
730 | * | ||
731 | * Additional kthreads are created and destroyed by XPC as the workload | ||
732 | * demands. | ||
733 | * | ||
734 | * A kthread is assigned to one of the active channels that exists for a given | ||
735 | * partition. | ||
736 | */ | ||
737 | void | ||
738 | xpc_create_kthreads(struct xpc_channel *ch, int needed) | ||
739 | { | ||
740 | unsigned long irq_flags; | ||
741 | pid_t pid; | ||
742 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); | ||
743 | |||
744 | |||
745 | while (needed-- > 0) { | ||
746 | pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); | ||
747 | if (pid < 0) { | ||
748 | /* the fork failed */ | ||
749 | |||
750 | if (atomic_read(&ch->kthreads_assigned) < | ||
751 | ch->kthreads_idle_limit) { | ||
752 | /* | ||
753 | * Flag this as an error only if we have an | ||
754 | * insufficient #of kthreads for the channel | ||
755 | * to function. | ||
756 | * | ||
757 | * No xpc_msgqueue_ref() is needed here since | ||
758 | * the channel mgr is doing this. | ||
759 | */ | ||
760 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
761 | XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, | ||
762 | &irq_flags); | ||
763 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
764 | } | ||
765 | break; | ||
766 | } | ||
767 | |||
768 | /* | ||
769 | * The following is done on behalf of the newly created | ||
770 | * kthread. That kthread is responsible for doing the | ||
771 | * counterpart to the following before it exits. | ||
772 | */ | ||
773 | (void) xpc_part_ref(&xpc_partitions[ch->partid]); | ||
774 | xpc_msgqueue_ref(ch); | ||
775 | atomic_inc(&ch->kthreads_assigned); | ||
776 | ch->kthreads_created++; // >>> temporary debug only!!! | ||
777 | } | ||
778 | } | ||
779 | |||
780 | |||
781 | void | ||
782 | xpc_disconnect_wait(int ch_number) | ||
783 | { | ||
784 | partid_t partid; | ||
785 | struct xpc_partition *part; | ||
786 | struct xpc_channel *ch; | ||
787 | |||
788 | |||
789 | /* now wait for all callouts to the caller's function to cease */ | ||
790 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | ||
791 | part = &xpc_partitions[partid]; | ||
792 | |||
793 | if (xpc_part_ref(part)) { | ||
794 | ch = &part->channels[ch_number]; | ||
795 | |||
796 | // >>> how do we keep from falling into the window between our check and going | ||
797 | // >>> down and coming back up where sema is re-inited? | ||
798 | if (ch->flags & XPC_C_SETUP) { | ||
799 | (void) down(&ch->teardown_sema); | ||
800 | } | ||
801 | |||
802 | xpc_part_deref(part); | ||
803 | } | ||
804 | } | ||
805 | } | ||
806 | |||
807 | |||
808 | static void | ||
809 | xpc_do_exit(void) | ||
810 | { | ||
811 | partid_t partid; | ||
812 | int active_part_count; | ||
813 | struct xpc_partition *part; | ||
814 | |||
815 | |||
816 | /* now it's time to eliminate our heartbeat */ | ||
817 | del_timer_sync(&xpc_hb_timer); | ||
818 | xpc_vars->heartbeating_to_mask = 0; | ||
819 | |||
820 | /* indicate to others that our reserved page is uninitialized */ | ||
821 | xpc_rsvd_page->vars_pa = 0; | ||
822 | |||
823 | /* | ||
824 | * Ignore all incoming interrupts. Without interupts the heartbeat | ||
825 | * checker won't activate any new partitions that may come up. | ||
826 | */ | ||
827 | free_irq(SGI_XPC_ACTIVATE, NULL); | ||
828 | |||
829 | /* | ||
830 | * Cause the heartbeat checker and the discovery threads to exit. | ||
831 | * We don't want them attempting to activate new partitions as we | ||
832 | * try to deactivate the existing ones. | ||
833 | */ | ||
834 | xpc_exiting = 1; | ||
835 | wake_up_interruptible(&xpc_act_IRQ_wq); | ||
836 | |||
837 | /* wait for the heartbeat checker thread to mark itself inactive */ | ||
838 | down(&xpc_hb_checker_exited); | ||
839 | |||
840 | /* wait for the discovery thread to mark itself inactive */ | ||
841 | down(&xpc_discovery_exited); | ||
842 | |||
843 | |||
844 | set_current_state(TASK_INTERRUPTIBLE); | ||
845 | schedule_timeout(0.3 * HZ); | ||
846 | set_current_state(TASK_RUNNING); | ||
847 | |||
848 | |||
849 | /* wait for all partitions to become inactive */ | ||
850 | |||
851 | do { | ||
852 | active_part_count = 0; | ||
853 | |||
854 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | ||
855 | part = &xpc_partitions[partid]; | ||
856 | if (part->act_state != XPC_P_INACTIVE) { | ||
857 | active_part_count++; | ||
858 | |||
859 | XPC_DEACTIVATE_PARTITION(part, xpcUnloading); | ||
860 | } | ||
861 | } | ||
862 | |||
863 | if (active_part_count) { | ||
864 | set_current_state(TASK_INTERRUPTIBLE); | ||
865 | schedule_timeout(0.3 * HZ); | ||
866 | set_current_state(TASK_RUNNING); | ||
867 | } | ||
868 | |||
869 | } while (active_part_count > 0); | ||
870 | |||
871 | |||
872 | /* close down protections for IPI operations */ | ||
873 | xpc_restrict_IPI_ops(); | ||
874 | |||
875 | |||
876 | /* clear the interface to XPC's functions */ | ||
877 | xpc_clear_interface(); | ||
878 | |||
879 | if (xpc_sysctl) { | ||
880 | unregister_sysctl_table(xpc_sysctl); | ||
881 | } | ||
882 | } | ||
883 | |||
884 | |||
885 | int __init | ||
886 | xpc_init(void) | ||
887 | { | ||
888 | int ret; | ||
889 | partid_t partid; | ||
890 | struct xpc_partition *part; | ||
891 | pid_t pid; | ||
892 | |||
893 | |||
894 | /* | ||
895 | * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng | ||
896 | * both a partition's reserved page and its XPC variables. Its size was | ||
897 | * based on the size of a reserved page. So we need to ensure that the | ||
898 | * XPC variables will fit as well. | ||
899 | */ | ||
900 | if (XPC_VARS_ALIGNED_SIZE > XPC_RSVD_PAGE_ALIGNED_SIZE) { | ||
901 | dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n"); | ||
902 | return -EPERM; | ||
903 | } | ||
904 | DBUG_ON((u64) xpc_remote_copy_buffer != | ||
905 | L1_CACHE_ALIGN((u64) xpc_remote_copy_buffer)); | ||
906 | |||
907 | snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); | ||
908 | snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); | ||
909 | |||
910 | xpc_sysctl = register_sysctl_table(xpc_sys_dir, 1); | ||
911 | |||
912 | /* | ||
913 | * The first few fields of each entry of xpc_partitions[] need to | ||
914 | * be initialized now so that calls to xpc_connect() and | ||
915 | * xpc_disconnect() can be made prior to the activation of any remote | ||
916 | * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE | ||
917 | * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING | ||
918 | * PARTITION HAS BEEN ACTIVATED. | ||
919 | */ | ||
920 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | ||
921 | part = &xpc_partitions[partid]; | ||
922 | |||
923 | DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part)); | ||
924 | |||
925 | part->act_IRQ_rcvd = 0; | ||
926 | spin_lock_init(&part->act_lock); | ||
927 | part->act_state = XPC_P_INACTIVE; | ||
928 | XPC_SET_REASON(part, 0, 0); | ||
929 | part->setup_state = XPC_P_UNSET; | ||
930 | init_waitqueue_head(&part->teardown_wq); | ||
931 | atomic_set(&part->references, 0); | ||
932 | } | ||
933 | |||
934 | /* | ||
935 | * Open up protections for IPI operations (and AMO operations on | ||
936 | * Shub 1.1 systems). | ||
937 | */ | ||
938 | xpc_allow_IPI_ops(); | ||
939 | |||
940 | /* | ||
941 | * Interrupts being processed will increment this atomic variable and | ||
942 | * awaken the heartbeat thread which will process the interrupts. | ||
943 | */ | ||
944 | atomic_set(&xpc_act_IRQ_rcvd, 0); | ||
945 | |||
946 | /* | ||
947 | * This is safe to do before the xpc_hb_checker thread has started | ||
948 | * because the handler releases a wait queue. If an interrupt is | ||
949 | * received before the thread is waiting, it will not go to sleep, | ||
950 | * but rather immediately process the interrupt. | ||
951 | */ | ||
952 | ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0, | ||
953 | "xpc hb", NULL); | ||
954 | if (ret != 0) { | ||
955 | dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " | ||
956 | "errno=%d\n", -ret); | ||
957 | |||
958 | xpc_restrict_IPI_ops(); | ||
959 | |||
960 | if (xpc_sysctl) { | ||
961 | unregister_sysctl_table(xpc_sysctl); | ||
962 | } | ||
963 | return -EBUSY; | ||
964 | } | ||
965 | |||
966 | /* | ||
967 | * Fill the partition reserved page with the information needed by | ||
968 | * other partitions to discover we are alive and establish initial | ||
969 | * communications. | ||
970 | */ | ||
971 | xpc_rsvd_page = xpc_rsvd_page_init(); | ||
972 | if (xpc_rsvd_page == NULL) { | ||
973 | dev_err(xpc_part, "could not setup our reserved page\n"); | ||
974 | |||
975 | free_irq(SGI_XPC_ACTIVATE, NULL); | ||
976 | xpc_restrict_IPI_ops(); | ||
977 | |||
978 | if (xpc_sysctl) { | ||
979 | unregister_sysctl_table(xpc_sysctl); | ||
980 | } | ||
981 | return -EBUSY; | ||
982 | } | ||
983 | |||
984 | |||
985 | /* | ||
986 | * Set the beating to other partitions into motion. This is | ||
987 | * the last requirement for other partitions' discovery to | ||
988 | * initiate communications with us. | ||
989 | */ | ||
990 | init_timer(&xpc_hb_timer); | ||
991 | xpc_hb_timer.function = xpc_hb_beater; | ||
992 | xpc_hb_beater(0); | ||
993 | |||
994 | |||
995 | /* | ||
996 | * The real work-horse behind xpc. This processes incoming | ||
997 | * interrupts and monitors remote heartbeats. | ||
998 | */ | ||
999 | pid = kernel_thread(xpc_hb_checker, NULL, 0); | ||
1000 | if (pid < 0) { | ||
1001 | dev_err(xpc_part, "failed while forking hb check thread\n"); | ||
1002 | |||
1003 | /* indicate to others that our reserved page is uninitialized */ | ||
1004 | xpc_rsvd_page->vars_pa = 0; | ||
1005 | |||
1006 | del_timer_sync(&xpc_hb_timer); | ||
1007 | free_irq(SGI_XPC_ACTIVATE, NULL); | ||
1008 | xpc_restrict_IPI_ops(); | ||
1009 | |||
1010 | if (xpc_sysctl) { | ||
1011 | unregister_sysctl_table(xpc_sysctl); | ||
1012 | } | ||
1013 | return -EBUSY; | ||
1014 | } | ||
1015 | |||
1016 | |||
1017 | /* | ||
1018 | * Startup a thread that will attempt to discover other partitions to | ||
1019 | * activate based on info provided by SAL. This new thread is short | ||
1020 | * lived and will exit once discovery is complete. | ||
1021 | */ | ||
1022 | pid = kernel_thread(xpc_initiate_discovery, NULL, 0); | ||
1023 | if (pid < 0) { | ||
1024 | dev_err(xpc_part, "failed while forking discovery thread\n"); | ||
1025 | |||
1026 | /* mark this new thread as a non-starter */ | ||
1027 | up(&xpc_discovery_exited); | ||
1028 | |||
1029 | xpc_do_exit(); | ||
1030 | return -EBUSY; | ||
1031 | } | ||
1032 | |||
1033 | |||
1034 | /* set the interface to point at XPC's functions */ | ||
1035 | xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, | ||
1036 | xpc_initiate_allocate, xpc_initiate_send, | ||
1037 | xpc_initiate_send_notify, xpc_initiate_received, | ||
1038 | xpc_initiate_partid_to_nasids); | ||
1039 | |||
1040 | return 0; | ||
1041 | } | ||
1042 | module_init(xpc_init); | ||
1043 | |||
1044 | |||
1045 | void __exit | ||
1046 | xpc_exit(void) | ||
1047 | { | ||
1048 | xpc_do_exit(); | ||
1049 | } | ||
1050 | module_exit(xpc_exit); | ||
1051 | |||
1052 | |||
1053 | MODULE_AUTHOR("Silicon Graphics, Inc."); | ||
1054 | MODULE_DESCRIPTION("Cross Partition Communication (XPC) support"); | ||
1055 | MODULE_LICENSE("GPL"); | ||
1056 | |||
1057 | module_param(xpc_hb_interval, int, 0); | ||
1058 | MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between " | ||
1059 | "heartbeat increments."); | ||
1060 | |||
1061 | module_param(xpc_hb_check_interval, int, 0); | ||
1062 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " | ||
1063 | "heartbeat checks."); | ||
1064 | |||
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c new file mode 100644 index 000000000000..2c3c4a8af553 --- /dev/null +++ b/arch/ia64/sn/kernel/xpc_partition.c | |||
@@ -0,0 +1,984 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | |||
10 | /* | ||
11 | * Cross Partition Communication (XPC) partition support. | ||
12 | * | ||
13 | * This is the part of XPC that detects the presence/absence of | ||
14 | * other partitions. It provides a heartbeat and monitors the | ||
15 | * heartbeats of other partitions. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/sysctl.h> | ||
22 | #include <linux/cache.h> | ||
23 | #include <linux/mmzone.h> | ||
24 | #include <linux/nodemask.h> | ||
25 | #include <asm/sn/bte.h> | ||
26 | #include <asm/sn/intr.h> | ||
27 | #include <asm/sn/sn_sal.h> | ||
28 | #include <asm/sn/nodepda.h> | ||
29 | #include <asm/sn/addrs.h> | ||
30 | #include "xpc.h" | ||
31 | |||
32 | |||
33 | /* XPC is exiting flag */ | ||
34 | int xpc_exiting; | ||
35 | |||
36 | |||
37 | /* SH_IPI_ACCESS shub register value on startup */ | ||
38 | static u64 xpc_sh1_IPI_access; | ||
39 | static u64 xpc_sh2_IPI_access0; | ||
40 | static u64 xpc_sh2_IPI_access1; | ||
41 | static u64 xpc_sh2_IPI_access2; | ||
42 | static u64 xpc_sh2_IPI_access3; | ||
43 | |||
44 | |||
45 | /* original protection values for each node */ | ||
46 | u64 xpc_prot_vec[MAX_COMPACT_NODES]; | ||
47 | |||
48 | |||
49 | /* this partition's reserved page */ | ||
50 | struct xpc_rsvd_page *xpc_rsvd_page; | ||
51 | |||
52 | /* this partition's XPC variables (within the reserved page) */ | ||
53 | struct xpc_vars *xpc_vars; | ||
54 | struct xpc_vars_part *xpc_vars_part; | ||
55 | |||
56 | |||
57 | /* | ||
58 | * For performance reasons, each entry of xpc_partitions[] is cacheline | ||
59 | * aligned. And xpc_partitions[] is padded with an additional entry at the | ||
60 | * end so that the last legitimate entry doesn't share its cacheline with | ||
61 | * another variable. | ||
62 | */ | ||
63 | struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; | ||
64 | |||
65 | |||
66 | /* | ||
67 | * Generic buffer used to store a local copy of the remote partitions | ||
68 | * reserved page or XPC variables. | ||
69 | * | ||
70 | * xpc_discovery runs only once and is a seperate thread that is | ||
71 | * very likely going to be processing in parallel with receiving | ||
72 | * interrupts. | ||
73 | */ | ||
74 | char ____cacheline_aligned | ||
75 | xpc_remote_copy_buffer[XPC_RSVD_PAGE_ALIGNED_SIZE]; | ||
76 | |||
77 | |||
78 | /* systune related variables */ | ||
79 | int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; | ||
80 | int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_TIMEOUT; | ||
81 | |||
82 | |||
83 | /* | ||
84 | * Given a nasid, get the physical address of the partition's reserved page | ||
85 | * for that nasid. This function returns 0 on any error. | ||
86 | */ | ||
87 | static u64 | ||
88 | xpc_get_rsvd_page_pa(int nasid, u64 buf, u64 buf_size) | ||
89 | { | ||
90 | bte_result_t bte_res; | ||
91 | s64 status; | ||
92 | u64 cookie = 0; | ||
93 | u64 rp_pa = nasid; /* seed with nasid */ | ||
94 | u64 len = 0; | ||
95 | |||
96 | |||
97 | while (1) { | ||
98 | |||
99 | status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, | ||
100 | &len); | ||
101 | |||
102 | dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" | ||
103 | "0x%016lx, address=0x%016lx, len=0x%016lx\n", | ||
104 | status, cookie, rp_pa, len); | ||
105 | |||
106 | if (status != SALRET_MORE_PASSES) { | ||
107 | break; | ||
108 | } | ||
109 | |||
110 | if (len > buf_size) { | ||
111 | dev_err(xpc_part, "len (=0x%016lx) > buf_size\n", len); | ||
112 | status = SALRET_ERROR; | ||
113 | break; | ||
114 | } | ||
115 | |||
116 | bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_size, | ||
117 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | ||
118 | if (bte_res != BTE_SUCCESS) { | ||
119 | dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); | ||
120 | status = SALRET_ERROR; | ||
121 | break; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | if (status != SALRET_OK) { | ||
126 | rp_pa = 0; | ||
127 | } | ||
128 | dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); | ||
129 | return rp_pa; | ||
130 | } | ||
131 | |||
132 | |||
133 | /* | ||
134 | * Fill the partition reserved page with the information needed by | ||
135 | * other partitions to discover we are alive and establish initial | ||
136 | * communications. | ||
137 | */ | ||
138 | struct xpc_rsvd_page * | ||
139 | xpc_rsvd_page_init(void) | ||
140 | { | ||
141 | struct xpc_rsvd_page *rp; | ||
142 | AMO_t *amos_page; | ||
143 | u64 rp_pa, next_cl, nasid_array = 0; | ||
144 | int i, ret; | ||
145 | |||
146 | |||
147 | /* get the local reserved page's address */ | ||
148 | |||
149 | rp_pa = xpc_get_rsvd_page_pa(cnodeid_to_nasid(0), | ||
150 | (u64) xpc_remote_copy_buffer, | ||
151 | XPC_RSVD_PAGE_ALIGNED_SIZE); | ||
152 | if (rp_pa == 0) { | ||
153 | dev_err(xpc_part, "SAL failed to locate the reserved page\n"); | ||
154 | return NULL; | ||
155 | } | ||
156 | rp = (struct xpc_rsvd_page *) __va(rp_pa); | ||
157 | |||
158 | if (rp->partid != sn_partition_id) { | ||
159 | dev_err(xpc_part, "the reserved page's partid of %d should be " | ||
160 | "%d\n", rp->partid, sn_partition_id); | ||
161 | return NULL; | ||
162 | } | ||
163 | |||
164 | rp->version = XPC_RP_VERSION; | ||
165 | |||
166 | /* | ||
167 | * Place the XPC variables on the cache line following the | ||
168 | * reserved page structure. | ||
169 | */ | ||
170 | next_cl = (u64) rp + XPC_RSVD_PAGE_ALIGNED_SIZE; | ||
171 | xpc_vars = (struct xpc_vars *) next_cl; | ||
172 | |||
173 | /* | ||
174 | * Before clearing xpc_vars, see if a page of AMOs had been previously | ||
175 | * allocated. If not we'll need to allocate one and set permissions | ||
176 | * so that cross-partition AMOs are allowed. | ||
177 | * | ||
178 | * The allocated AMO page needs MCA reporting to remain disabled after | ||
179 | * XPC has unloaded. To make this work, we keep a copy of the pointer | ||
180 | * to this page (i.e., amos_page) in the struct xpc_vars structure, | ||
181 | * which is pointed to by the reserved page, and re-use that saved copy | ||
182 | * on subsequent loads of XPC. This AMO page is never freed, and its | ||
183 | * memory protections are never restricted. | ||
184 | */ | ||
185 | if ((amos_page = xpc_vars->amos_page) == NULL) { | ||
186 | amos_page = (AMO_t *) mspec_kalloc_page(0); | ||
187 | if (amos_page == NULL) { | ||
188 | dev_err(xpc_part, "can't allocate page of AMOs\n"); | ||
189 | return NULL; | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems | ||
194 | * when xpc_allow_IPI_ops() is called via xpc_hb_init(). | ||
195 | */ | ||
196 | if (!enable_shub_wars_1_1()) { | ||
197 | ret = sn_change_memprotect(ia64_tpa((u64) amos_page), | ||
198 | PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1, | ||
199 | &nasid_array); | ||
200 | if (ret != 0) { | ||
201 | dev_err(xpc_part, "can't change memory " | ||
202 | "protections\n"); | ||
203 | mspec_kfree_page((unsigned long) amos_page); | ||
204 | return NULL; | ||
205 | } | ||
206 | } | ||
207 | } else if (!IS_AMO_ADDRESS((u64) amos_page)) { | ||
208 | /* | ||
209 | * EFI's XPBOOT can also set amos_page in the reserved page, | ||
210 | * but it happens to leave it as an uncached physical address | ||
211 | * and we need it to be an uncached virtual, so we'll have to | ||
212 | * convert it. | ||
213 | */ | ||
214 | if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) { | ||
215 | dev_err(xpc_part, "previously used amos_page address " | ||
216 | "is bad = 0x%p\n", (void *) amos_page); | ||
217 | return NULL; | ||
218 | } | ||
219 | amos_page = (AMO_t *) TO_AMO((u64) amos_page); | ||
220 | } | ||
221 | |||
222 | memset(xpc_vars, 0, sizeof(struct xpc_vars)); | ||
223 | |||
224 | /* | ||
225 | * Place the XPC per partition specific variables on the cache line | ||
226 | * following the XPC variables structure. | ||
227 | */ | ||
228 | next_cl += XPC_VARS_ALIGNED_SIZE; | ||
229 | memset((u64 *) next_cl, 0, sizeof(struct xpc_vars_part) * | ||
230 | XP_MAX_PARTITIONS); | ||
231 | xpc_vars_part = (struct xpc_vars_part *) next_cl; | ||
232 | xpc_vars->vars_part_pa = __pa(next_cl); | ||
233 | |||
234 | xpc_vars->version = XPC_V_VERSION; | ||
235 | xpc_vars->act_nasid = cpuid_to_nasid(0); | ||
236 | xpc_vars->act_phys_cpuid = cpu_physical_id(0); | ||
237 | xpc_vars->amos_page = amos_page; /* save for next load of XPC */ | ||
238 | |||
239 | |||
240 | /* | ||
241 | * Initialize the activation related AMO variables. | ||
242 | */ | ||
243 | xpc_vars->act_amos = xpc_IPI_init(XP_MAX_PARTITIONS); | ||
244 | for (i = 1; i < XP_NASID_MASK_WORDS; i++) { | ||
245 | xpc_IPI_init(i + XP_MAX_PARTITIONS); | ||
246 | } | ||
247 | /* export AMO page's physical address to other partitions */ | ||
248 | xpc_vars->amos_page_pa = ia64_tpa((u64) xpc_vars->amos_page); | ||
249 | |||
250 | /* | ||
251 | * This signifies to the remote partition that our reserved | ||
252 | * page is initialized. | ||
253 | */ | ||
254 | (volatile u64) rp->vars_pa = __pa(xpc_vars); | ||
255 | |||
256 | return rp; | ||
257 | } | ||
258 | |||
259 | |||
260 | /* | ||
261 | * Change protections to allow IPI operations (and AMO operations on | ||
262 | * Shub 1.1 systems). | ||
263 | */ | ||
264 | void | ||
265 | xpc_allow_IPI_ops(void) | ||
266 | { | ||
267 | int node; | ||
268 | int nasid; | ||
269 | |||
270 | |||
271 | // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. | ||
272 | |||
273 | if (is_shub2()) { | ||
274 | xpc_sh2_IPI_access0 = | ||
275 | (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); | ||
276 | xpc_sh2_IPI_access1 = | ||
277 | (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); | ||
278 | xpc_sh2_IPI_access2 = | ||
279 | (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); | ||
280 | xpc_sh2_IPI_access3 = | ||
281 | (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); | ||
282 | |||
283 | for_each_online_node(node) { | ||
284 | nasid = cnodeid_to_nasid(node); | ||
285 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), | ||
286 | -1UL); | ||
287 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), | ||
288 | -1UL); | ||
289 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), | ||
290 | -1UL); | ||
291 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), | ||
292 | -1UL); | ||
293 | } | ||
294 | |||
295 | } else { | ||
296 | xpc_sh1_IPI_access = | ||
297 | (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); | ||
298 | |||
299 | for_each_online_node(node) { | ||
300 | nasid = cnodeid_to_nasid(node); | ||
301 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), | ||
302 | -1UL); | ||
303 | |||
304 | /* | ||
305 | * Since the BIST collides with memory operations on | ||
306 | * SHUB 1.1 sn_change_memprotect() cannot be used. | ||
307 | */ | ||
308 | if (enable_shub_wars_1_1()) { | ||
309 | /* open up everything */ | ||
310 | xpc_prot_vec[node] = (u64) HUB_L((u64 *) | ||
311 | GLOBAL_MMR_ADDR(nasid, | ||
312 | SH1_MD_DQLP_MMR_DIR_PRIVEC0)); | ||
313 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, | ||
314 | SH1_MD_DQLP_MMR_DIR_PRIVEC0), | ||
315 | -1UL); | ||
316 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, | ||
317 | SH1_MD_DQRP_MMR_DIR_PRIVEC0), | ||
318 | -1UL); | ||
319 | } | ||
320 | } | ||
321 | } | ||
322 | } | ||
323 | |||
324 | |||
325 | /* | ||
326 | * Restrict protections to disallow IPI operations (and AMO operations on | ||
327 | * Shub 1.1 systems). | ||
328 | */ | ||
329 | void | ||
330 | xpc_restrict_IPI_ops(void) | ||
331 | { | ||
332 | int node; | ||
333 | int nasid; | ||
334 | |||
335 | |||
336 | // >>> Change SH_IPI_ACCESS code to use SAL call once it is available. | ||
337 | |||
338 | if (is_shub2()) { | ||
339 | |||
340 | for_each_online_node(node) { | ||
341 | nasid = cnodeid_to_nasid(node); | ||
342 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), | ||
343 | xpc_sh2_IPI_access0); | ||
344 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), | ||
345 | xpc_sh2_IPI_access1); | ||
346 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), | ||
347 | xpc_sh2_IPI_access2); | ||
348 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), | ||
349 | xpc_sh2_IPI_access3); | ||
350 | } | ||
351 | |||
352 | } else { | ||
353 | |||
354 | for_each_online_node(node) { | ||
355 | nasid = cnodeid_to_nasid(node); | ||
356 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), | ||
357 | xpc_sh1_IPI_access); | ||
358 | |||
359 | if (enable_shub_wars_1_1()) { | ||
360 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, | ||
361 | SH1_MD_DQLP_MMR_DIR_PRIVEC0), | ||
362 | xpc_prot_vec[node]); | ||
363 | HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, | ||
364 | SH1_MD_DQRP_MMR_DIR_PRIVEC0), | ||
365 | xpc_prot_vec[node]); | ||
366 | } | ||
367 | } | ||
368 | } | ||
369 | } | ||
370 | |||
371 | |||
372 | /* | ||
373 | * At periodic intervals, scan through all active partitions and ensure | ||
374 | * their heartbeat is still active. If not, the partition is deactivated. | ||
375 | */ | ||
376 | void | ||
377 | xpc_check_remote_hb(void) | ||
378 | { | ||
379 | struct xpc_vars *remote_vars; | ||
380 | struct xpc_partition *part; | ||
381 | partid_t partid; | ||
382 | bte_result_t bres; | ||
383 | |||
384 | |||
385 | remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; | ||
386 | |||
387 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | ||
388 | if (partid == sn_partition_id) { | ||
389 | continue; | ||
390 | } | ||
391 | |||
392 | part = &xpc_partitions[partid]; | ||
393 | |||
394 | if (part->act_state == XPC_P_INACTIVE || | ||
395 | part->act_state == XPC_P_DEACTIVATING) { | ||
396 | continue; | ||
397 | } | ||
398 | |||
399 | /* pull the remote_hb cache line */ | ||
400 | bres = xp_bte_copy(part->remote_vars_pa, | ||
401 | ia64_tpa((u64) remote_vars), | ||
402 | XPC_VARS_ALIGNED_SIZE, | ||
403 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | ||
404 | if (bres != BTE_SUCCESS) { | ||
405 | XPC_DEACTIVATE_PARTITION(part, | ||
406 | xpc_map_bte_errors(bres)); | ||
407 | continue; | ||
408 | } | ||
409 | |||
410 | dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat" | ||
411 | " = %ld, kdb_status = %ld, HB_mask = 0x%lx\n", partid, | ||
412 | remote_vars->heartbeat, part->last_heartbeat, | ||
413 | remote_vars->kdb_status, | ||
414 | remote_vars->heartbeating_to_mask); | ||
415 | |||
416 | if (((remote_vars->heartbeat == part->last_heartbeat) && | ||
417 | (remote_vars->kdb_status == 0)) || | ||
418 | !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) { | ||
419 | |||
420 | XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); | ||
421 | continue; | ||
422 | } | ||
423 | |||
424 | part->last_heartbeat = remote_vars->heartbeat; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | |||
429 | /* | ||
430 | * Get a copy of the remote partition's rsvd page. | ||
431 | * | ||
432 | * remote_rp points to a buffer that is cacheline aligned for BTE copies and | ||
433 | * assumed to be of size XPC_RSVD_PAGE_ALIGNED_SIZE. | ||
434 | */ | ||
435 | static enum xpc_retval | ||
436 | xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | ||
437 | struct xpc_rsvd_page *remote_rp, u64 *remote_rsvd_page_pa) | ||
438 | { | ||
439 | int bres, i; | ||
440 | |||
441 | |||
442 | /* get the reserved page's physical address */ | ||
443 | |||
444 | *remote_rsvd_page_pa = xpc_get_rsvd_page_pa(nasid, (u64) remote_rp, | ||
445 | XPC_RSVD_PAGE_ALIGNED_SIZE); | ||
446 | if (*remote_rsvd_page_pa == 0) { | ||
447 | return xpcNoRsvdPageAddr; | ||
448 | } | ||
449 | |||
450 | |||
451 | /* pull over the reserved page structure */ | ||
452 | |||
453 | bres = xp_bte_copy(*remote_rsvd_page_pa, ia64_tpa((u64) remote_rp), | ||
454 | XPC_RSVD_PAGE_ALIGNED_SIZE, | ||
455 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | ||
456 | if (bres != BTE_SUCCESS) { | ||
457 | return xpc_map_bte_errors(bres); | ||
458 | } | ||
459 | |||
460 | |||
461 | if (discovered_nasids != NULL) { | ||
462 | for (i = 0; i < XP_NASID_MASK_WORDS; i++) { | ||
463 | discovered_nasids[i] |= remote_rp->part_nasids[i]; | ||
464 | } | ||
465 | } | ||
466 | |||
467 | |||
468 | /* check that the partid is for another partition */ | ||
469 | |||
470 | if (remote_rp->partid < 1 || | ||
471 | remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { | ||
472 | return xpcInvalidPartid; | ||
473 | } | ||
474 | |||
475 | if (remote_rp->partid == sn_partition_id) { | ||
476 | return xpcLocalPartid; | ||
477 | } | ||
478 | |||
479 | |||
480 | if (XPC_VERSION_MAJOR(remote_rp->version) != | ||
481 | XPC_VERSION_MAJOR(XPC_RP_VERSION)) { | ||
482 | return xpcBadVersion; | ||
483 | } | ||
484 | |||
485 | return xpcSuccess; | ||
486 | } | ||
487 | |||
488 | |||
489 | /* | ||
490 | * Get a copy of the remote partition's XPC variables. | ||
491 | * | ||
492 | * remote_vars points to a buffer that is cacheline aligned for BTE copies and | ||
493 | * assumed to be of size XPC_VARS_ALIGNED_SIZE. | ||
494 | */ | ||
495 | static enum xpc_retval | ||
496 | xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) | ||
497 | { | ||
498 | int bres; | ||
499 | |||
500 | |||
501 | if (remote_vars_pa == 0) { | ||
502 | return xpcVarsNotSet; | ||
503 | } | ||
504 | |||
505 | |||
506 | /* pull over the cross partition variables */ | ||
507 | |||
508 | bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars), | ||
509 | XPC_VARS_ALIGNED_SIZE, | ||
510 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | ||
511 | if (bres != BTE_SUCCESS) { | ||
512 | return xpc_map_bte_errors(bres); | ||
513 | } | ||
514 | |||
515 | if (XPC_VERSION_MAJOR(remote_vars->version) != | ||
516 | XPC_VERSION_MAJOR(XPC_V_VERSION)) { | ||
517 | return xpcBadVersion; | ||
518 | } | ||
519 | |||
520 | return xpcSuccess; | ||
521 | } | ||
522 | |||
523 | |||
524 | /* | ||
525 | * Prior code has determine the nasid which generated an IPI. Inspect | ||
526 | * that nasid to determine if its partition needs to be activated or | ||
527 | * deactivated. | ||
528 | * | ||
529 | * A partition is consider "awaiting activation" if our partition | ||
530 | * flags indicate it is not active and it has a heartbeat. A | ||
531 | * partition is considered "awaiting deactivation" if our partition | ||
532 | * flags indicate it is active but it has no heartbeat or it is not | ||
533 | * sending its heartbeat to us. | ||
534 | * | ||
535 | * To determine the heartbeat, the remote nasid must have a properly | ||
536 | * initialized reserved page. | ||
537 | */ | ||
538 | static void | ||
539 | xpc_identify_act_IRQ_req(int nasid) | ||
540 | { | ||
541 | struct xpc_rsvd_page *remote_rp; | ||
542 | struct xpc_vars *remote_vars; | ||
543 | u64 remote_rsvd_page_pa; | ||
544 | u64 remote_vars_pa; | ||
545 | partid_t partid; | ||
546 | struct xpc_partition *part; | ||
547 | enum xpc_retval ret; | ||
548 | |||
549 | |||
550 | /* pull over the reserved page structure */ | ||
551 | |||
552 | remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer; | ||
553 | |||
554 | ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rsvd_page_pa); | ||
555 | if (ret != xpcSuccess) { | ||
556 | dev_warn(xpc_part, "unable to get reserved page from nasid %d, " | ||
557 | "which sent interrupt, reason=%d\n", nasid, ret); | ||
558 | return; | ||
559 | } | ||
560 | |||
561 | remote_vars_pa = remote_rp->vars_pa; | ||
562 | partid = remote_rp->partid; | ||
563 | part = &xpc_partitions[partid]; | ||
564 | |||
565 | |||
566 | /* pull over the cross partition variables */ | ||
567 | |||
568 | remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer; | ||
569 | |||
570 | ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); | ||
571 | if (ret != xpcSuccess) { | ||
572 | |||
573 | dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " | ||
574 | "which sent interrupt, reason=%d\n", nasid, ret); | ||
575 | |||
576 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
577 | return; | ||
578 | } | ||
579 | |||
580 | |||
581 | part->act_IRQ_rcvd++; | ||
582 | |||
583 | dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " | ||
584 | "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd, | ||
585 | remote_vars->heartbeat, remote_vars->heartbeating_to_mask); | ||
586 | |||
587 | |||
588 | if (part->act_state == XPC_P_INACTIVE) { | ||
589 | |||
590 | part->remote_rp_pa = remote_rsvd_page_pa; | ||
591 | dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", | ||
592 | part->remote_rp_pa); | ||
593 | |||
594 | part->remote_vars_pa = remote_vars_pa; | ||
595 | dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n", | ||
596 | part->remote_vars_pa); | ||
597 | |||
598 | part->last_heartbeat = remote_vars->heartbeat; | ||
599 | dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", | ||
600 | part->last_heartbeat); | ||
601 | |||
602 | part->remote_vars_part_pa = remote_vars->vars_part_pa; | ||
603 | dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", | ||
604 | part->remote_vars_part_pa); | ||
605 | |||
606 | part->remote_act_nasid = remote_vars->act_nasid; | ||
607 | dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n", | ||
608 | part->remote_act_nasid); | ||
609 | |||
610 | part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid; | ||
611 | dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n", | ||
612 | part->remote_act_phys_cpuid); | ||
613 | |||
614 | part->remote_amos_page_pa = remote_vars->amos_page_pa; | ||
615 | dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n", | ||
616 | part->remote_amos_page_pa); | ||
617 | |||
618 | xpc_activate_partition(part); | ||
619 | |||
620 | } else if (part->remote_amos_page_pa != remote_vars->amos_page_pa || | ||
621 | !XPC_HB_ALLOWED(sn_partition_id, remote_vars)) { | ||
622 | |||
623 | part->reactivate_nasid = nasid; | ||
624 | XPC_DEACTIVATE_PARTITION(part, xpcReactivating); | ||
625 | } | ||
626 | } | ||
627 | |||
628 | |||
629 | /* | ||
630 | * Loop through the activation AMO variables and process any bits | ||
631 | * which are set. Each bit indicates a nasid sending a partition | ||
632 | * activation or deactivation request. | ||
633 | * | ||
634 | * Return #of IRQs detected. | ||
635 | */ | ||
636 | int | ||
637 | xpc_identify_act_IRQ_sender(void) | ||
638 | { | ||
639 | int word, bit; | ||
640 | u64 nasid_mask; | ||
641 | u64 nasid; /* remote nasid */ | ||
642 | int n_IRQs_detected = 0; | ||
643 | AMO_t *act_amos; | ||
644 | struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page; | ||
645 | |||
646 | |||
647 | act_amos = xpc_vars->act_amos; | ||
648 | |||
649 | |||
650 | /* scan through act AMO variable looking for non-zero entries */ | ||
651 | for (word = 0; word < XP_NASID_MASK_WORDS; word++) { | ||
652 | |||
653 | nasid_mask = xpc_IPI_receive(&act_amos[word]); | ||
654 | if (nasid_mask == 0) { | ||
655 | /* no IRQs from nasids in this variable */ | ||
656 | continue; | ||
657 | } | ||
658 | |||
659 | dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, | ||
660 | nasid_mask); | ||
661 | |||
662 | |||
663 | /* | ||
664 | * If this nasid has been added to the machine since | ||
665 | * our partition was reset, this will retain the | ||
666 | * remote nasid in our reserved pages machine mask. | ||
667 | * This is used in the event of module reload. | ||
668 | */ | ||
669 | rp->mach_nasids[word] |= nasid_mask; | ||
670 | |||
671 | |||
672 | /* locate the nasid(s) which sent interrupts */ | ||
673 | |||
674 | for (bit = 0; bit < (8 * sizeof(u64)); bit++) { | ||
675 | if (nasid_mask & (1UL << bit)) { | ||
676 | n_IRQs_detected++; | ||
677 | nasid = XPC_NASID_FROM_W_B(word, bit); | ||
678 | dev_dbg(xpc_part, "interrupt from nasid %ld\n", | ||
679 | nasid); | ||
680 | xpc_identify_act_IRQ_req(nasid); | ||
681 | } | ||
682 | } | ||
683 | } | ||
684 | return n_IRQs_detected; | ||
685 | } | ||
686 | |||
687 | |||
688 | /* | ||
689 | * Mark specified partition as active. | ||
690 | */ | ||
691 | enum xpc_retval | ||
692 | xpc_mark_partition_active(struct xpc_partition *part) | ||
693 | { | ||
694 | unsigned long irq_flags; | ||
695 | enum xpc_retval ret; | ||
696 | |||
697 | |||
698 | dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); | ||
699 | |||
700 | spin_lock_irqsave(&part->act_lock, irq_flags); | ||
701 | if (part->act_state == XPC_P_ACTIVATING) { | ||
702 | part->act_state = XPC_P_ACTIVE; | ||
703 | ret = xpcSuccess; | ||
704 | } else { | ||
705 | DBUG_ON(part->reason == xpcSuccess); | ||
706 | ret = part->reason; | ||
707 | } | ||
708 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | ||
709 | |||
710 | return ret; | ||
711 | } | ||
712 | |||
713 | |||
714 | /* | ||
715 | * Notify XPC that the partition is down. | ||
716 | */ | ||
717 | void | ||
718 | xpc_deactivate_partition(const int line, struct xpc_partition *part, | ||
719 | enum xpc_retval reason) | ||
720 | { | ||
721 | unsigned long irq_flags; | ||
722 | partid_t partid = XPC_PARTID(part); | ||
723 | |||
724 | |||
725 | spin_lock_irqsave(&part->act_lock, irq_flags); | ||
726 | |||
727 | if (part->act_state == XPC_P_INACTIVE) { | ||
728 | XPC_SET_REASON(part, reason, line); | ||
729 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | ||
730 | if (reason == xpcReactivating) { | ||
731 | /* we interrupt ourselves to reactivate partition */ | ||
732 | xpc_IPI_send_reactivate(part); | ||
733 | } | ||
734 | return; | ||
735 | } | ||
736 | if (part->act_state == XPC_P_DEACTIVATING) { | ||
737 | if ((part->reason == xpcUnloading && reason != xpcUnloading) || | ||
738 | reason == xpcReactivating) { | ||
739 | XPC_SET_REASON(part, reason, line); | ||
740 | } | ||
741 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | ||
742 | return; | ||
743 | } | ||
744 | |||
745 | part->act_state = XPC_P_DEACTIVATING; | ||
746 | XPC_SET_REASON(part, reason, line); | ||
747 | |||
748 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | ||
749 | |||
750 | XPC_DISALLOW_HB(partid, xpc_vars); | ||
751 | |||
752 | dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", partid, | ||
753 | reason); | ||
754 | |||
755 | xpc_partition_down(part, reason); | ||
756 | } | ||
757 | |||
758 | |||
759 | /* | ||
760 | * Mark specified partition as active. | ||
761 | */ | ||
762 | void | ||
763 | xpc_mark_partition_inactive(struct xpc_partition *part) | ||
764 | { | ||
765 | unsigned long irq_flags; | ||
766 | |||
767 | |||
768 | dev_dbg(xpc_part, "setting partition %d to INACTIVE\n", | ||
769 | XPC_PARTID(part)); | ||
770 | |||
771 | spin_lock_irqsave(&part->act_lock, irq_flags); | ||
772 | part->act_state = XPC_P_INACTIVE; | ||
773 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | ||
774 | part->remote_rp_pa = 0; | ||
775 | } | ||
776 | |||
777 | |||
778 | /* | ||
779 | * SAL has provided a partition and machine mask. The partition mask | ||
780 | * contains a bit for each even nasid in our partition. The machine | ||
781 | * mask contains a bit for each even nasid in the entire machine. | ||
782 | * | ||
783 | * Using those two bit arrays, we can determine which nasids are | ||
784 | * known in the machine. Each should also have a reserved page | ||
785 | * initialized if they are available for partitioning. | ||
786 | */ | ||
787 | void | ||
788 | xpc_discovery(void) | ||
789 | { | ||
790 | void *remote_rp_base; | ||
791 | struct xpc_rsvd_page *remote_rp; | ||
792 | struct xpc_vars *remote_vars; | ||
793 | u64 remote_rsvd_page_pa; | ||
794 | u64 remote_vars_pa; | ||
795 | int region; | ||
796 | int max_regions; | ||
797 | int nasid; | ||
798 | struct xpc_rsvd_page *rp; | ||
799 | partid_t partid; | ||
800 | struct xpc_partition *part; | ||
801 | u64 *discovered_nasids; | ||
802 | enum xpc_retval ret; | ||
803 | |||
804 | |||
805 | remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RSVD_PAGE_ALIGNED_SIZE, | ||
806 | GFP_KERNEL, &remote_rp_base); | ||
807 | if (remote_rp == NULL) { | ||
808 | return; | ||
809 | } | ||
810 | remote_vars = (struct xpc_vars *) remote_rp; | ||
811 | |||
812 | |||
813 | discovered_nasids = kmalloc(sizeof(u64) * XP_NASID_MASK_WORDS, | ||
814 | GFP_KERNEL); | ||
815 | if (discovered_nasids == NULL) { | ||
816 | kfree(remote_rp_base); | ||
817 | return; | ||
818 | } | ||
819 | memset(discovered_nasids, 0, sizeof(u64) * XP_NASID_MASK_WORDS); | ||
820 | |||
821 | rp = (struct xpc_rsvd_page *) xpc_rsvd_page; | ||
822 | |||
823 | /* | ||
824 | * The term 'region' in this context refers to the minimum number of | ||
825 | * nodes that can comprise an access protection grouping. The access | ||
826 | * protection is in regards to memory, IOI and IPI. | ||
827 | */ | ||
828 | //>>> move the next two #defines into either include/asm-ia64/sn/arch.h or | ||
829 | //>>> include/asm-ia64/sn/addrs.h | ||
830 | #define SH1_MAX_REGIONS 64 | ||
831 | #define SH2_MAX_REGIONS 256 | ||
832 | max_regions = is_shub2() ? SH2_MAX_REGIONS : SH1_MAX_REGIONS; | ||
833 | |||
834 | for (region = 0; region < max_regions; region++) { | ||
835 | |||
836 | if ((volatile int) xpc_exiting) { | ||
837 | break; | ||
838 | } | ||
839 | |||
840 | dev_dbg(xpc_part, "searching region %d\n", region); | ||
841 | |||
842 | for (nasid = (region * sn_region_size * 2); | ||
843 | nasid < ((region + 1) * sn_region_size * 2); | ||
844 | nasid += 2) { | ||
845 | |||
846 | if ((volatile int) xpc_exiting) { | ||
847 | break; | ||
848 | } | ||
849 | |||
850 | dev_dbg(xpc_part, "checking nasid %d\n", nasid); | ||
851 | |||
852 | |||
853 | if (XPC_NASID_IN_ARRAY(nasid, rp->part_nasids)) { | ||
854 | dev_dbg(xpc_part, "PROM indicates Nasid %d is " | ||
855 | "part of the local partition; skipping " | ||
856 | "region\n", nasid); | ||
857 | break; | ||
858 | } | ||
859 | |||
860 | if (!(XPC_NASID_IN_ARRAY(nasid, rp->mach_nasids))) { | ||
861 | dev_dbg(xpc_part, "PROM indicates Nasid %d was " | ||
862 | "not on Numa-Link network at reset\n", | ||
863 | nasid); | ||
864 | continue; | ||
865 | } | ||
866 | |||
867 | if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) { | ||
868 | dev_dbg(xpc_part, "Nasid %d is part of a " | ||
869 | "partition which was previously " | ||
870 | "discovered\n", nasid); | ||
871 | continue; | ||
872 | } | ||
873 | |||
874 | |||
875 | /* pull over the reserved page structure */ | ||
876 | |||
877 | ret = xpc_get_remote_rp(nasid, discovered_nasids, | ||
878 | remote_rp, &remote_rsvd_page_pa); | ||
879 | if (ret != xpcSuccess) { | ||
880 | dev_dbg(xpc_part, "unable to get reserved page " | ||
881 | "from nasid %d, reason=%d\n", nasid, | ||
882 | ret); | ||
883 | |||
884 | if (ret == xpcLocalPartid) { | ||
885 | break; | ||
886 | } | ||
887 | continue; | ||
888 | } | ||
889 | |||
890 | remote_vars_pa = remote_rp->vars_pa; | ||
891 | |||
892 | partid = remote_rp->partid; | ||
893 | part = &xpc_partitions[partid]; | ||
894 | |||
895 | |||
896 | /* pull over the cross partition variables */ | ||
897 | |||
898 | ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); | ||
899 | if (ret != xpcSuccess) { | ||
900 | dev_dbg(xpc_part, "unable to get XPC variables " | ||
901 | "from nasid %d, reason=%d\n", nasid, | ||
902 | ret); | ||
903 | |||
904 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
905 | continue; | ||
906 | } | ||
907 | |||
908 | if (part->act_state != XPC_P_INACTIVE) { | ||
909 | dev_dbg(xpc_part, "partition %d on nasid %d is " | ||
910 | "already activating\n", partid, nasid); | ||
911 | break; | ||
912 | } | ||
913 | |||
914 | /* | ||
915 | * Register the remote partition's AMOs with SAL so it | ||
916 | * can handle and cleanup errors within that address | ||
917 | * range should the remote partition go down. We don't | ||
918 | * unregister this range because it is difficult to | ||
919 | * tell when outstanding writes to the remote partition | ||
920 | * are finished and thus when it is thus safe to | ||
921 | * unregister. This should not result in wasted space | ||
922 | * in the SAL xp_addr_region table because we should | ||
923 | * get the same page for remote_act_amos_pa after | ||
924 | * module reloads and system reboots. | ||
925 | */ | ||
926 | if (sn_register_xp_addr_region( | ||
927 | remote_vars->amos_page_pa, | ||
928 | PAGE_SIZE, 1) < 0) { | ||
929 | dev_dbg(xpc_part, "partition %d failed to " | ||
930 | "register xp_addr region 0x%016lx\n", | ||
931 | partid, remote_vars->amos_page_pa); | ||
932 | |||
933 | XPC_SET_REASON(part, xpcPhysAddrRegFailed, | ||
934 | __LINE__); | ||
935 | break; | ||
936 | } | ||
937 | |||
938 | /* | ||
939 | * The remote nasid is valid and available. | ||
940 | * Send an interrupt to that nasid to notify | ||
941 | * it that we are ready to begin activation. | ||
942 | */ | ||
943 | dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, " | ||
944 | "nasid %d, phys_cpuid 0x%x\n", | ||
945 | remote_vars->amos_page_pa, | ||
946 | remote_vars->act_nasid, | ||
947 | remote_vars->act_phys_cpuid); | ||
948 | |||
949 | xpc_IPI_send_activate(remote_vars); | ||
950 | } | ||
951 | } | ||
952 | |||
953 | kfree(discovered_nasids); | ||
954 | kfree(remote_rp_base); | ||
955 | } | ||
956 | |||
957 | |||
958 | /* | ||
959 | * Given a partid, get the nasids owned by that partition from the | ||
960 | * remote partition's reserved page. | ||
961 | */ | ||
962 | enum xpc_retval | ||
963 | xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) | ||
964 | { | ||
965 | struct xpc_partition *part; | ||
966 | u64 part_nasid_pa; | ||
967 | int bte_res; | ||
968 | |||
969 | |||
970 | part = &xpc_partitions[partid]; | ||
971 | if (part->remote_rp_pa == 0) { | ||
972 | return xpcPartitionDown; | ||
973 | } | ||
974 | |||
975 | part_nasid_pa = part->remote_rp_pa + | ||
976 | (u64) &((struct xpc_rsvd_page *) 0)->part_nasids; | ||
977 | |||
978 | bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask), | ||
979 | L1_CACHE_ALIGN(XP_NASID_MASK_BYTES), | ||
980 | (BTE_NOTIFY | BTE_WACQUIRE), NULL); | ||
981 | |||
982 | return xpc_map_bte_errors(bte_res); | ||
983 | } | ||
984 | |||
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c new file mode 100644 index 000000000000..78c13d676fa6 --- /dev/null +++ b/arch/ia64/sn/kernel/xpnet.c | |||
@@ -0,0 +1,715 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1999,2001-2005 Silicon Graphics, Inc. All rights reserved. | ||
7 | */ | ||
8 | |||
9 | |||
10 | /* | ||
11 | * Cross Partition Network Interface (XPNET) support | ||
12 | * | ||
13 | * XPNET provides a virtual network layered on top of the Cross | ||
14 | * Partition communication layer. | ||
15 | * | ||
16 | * XPNET provides direct point-to-point and broadcast-like support | ||
17 | * for an ethernet-like device. The ethernet broadcast medium is | ||
18 | * replaced with a point-to-point message structure which passes | ||
19 | * pointers to a DMA-capable block that a remote partition should | ||
20 | * retrieve and pass to the upper level networking layer. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | |||
25 | #include <linux/config.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/pci.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/ioport.h> | ||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/etherdevice.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/ethtool.h> | ||
35 | #include <linux/mii.h> | ||
36 | #include <linux/smp.h> | ||
37 | #include <linux/string.h> | ||
38 | #include <asm/sn/bte.h> | ||
39 | #include <asm/sn/io.h> | ||
40 | #include <asm/sn/sn_sal.h> | ||
41 | #include <asm/types.h> | ||
42 | #include <asm/atomic.h> | ||
43 | #include <asm/sn/xp.h> | ||
44 | |||
45 | |||
46 | /* | ||
47 | * The message payload transferred by XPC. | ||
48 | * | ||
49 | * buf_pa is the physical address where the DMA should pull from. | ||
50 | * | ||
51 | * NOTE: for performance reasons, buf_pa should _ALWAYS_ begin on a | ||
52 | * cacheline boundary. To accomplish this, we record the number of | ||
53 | * bytes from the beginning of the first cacheline to the first useful | ||
54 | * byte of the skb (leadin_ignore) and the number of bytes from the | ||
55 | * last useful byte of the skb to the end of the last cacheline | ||
56 | * (tailout_ignore). | ||
57 | * | ||
58 | * size is the number of bytes to transfer which includes the skb->len | ||
59 | * (useful bytes of the senders skb) plus the leadin and tailout | ||
60 | */ | ||
61 | struct xpnet_message { | ||
62 | u16 version; /* Version for this message */ | ||
63 | u16 embedded_bytes; /* #of bytes embedded in XPC message */ | ||
64 | u32 magic; /* Special number indicating this is xpnet */ | ||
65 | u64 buf_pa; /* phys address of buffer to retrieve */ | ||
66 | u32 size; /* #of bytes in buffer */ | ||
67 | u8 leadin_ignore; /* #of bytes to ignore at the beginning */ | ||
68 | u8 tailout_ignore; /* #of bytes to ignore at the end */ | ||
69 | unsigned char data; /* body of small packets */ | ||
70 | }; | ||
71 | |||
72 | /* | ||
73 | * Determine the size of our message, the cacheline aligned size, | ||
74 | * and then the number of message will request from XPC. | ||
75 | * | ||
76 | * XPC expects each message to exist in an individual cacheline. | ||
77 | */ | ||
78 | #define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET) | ||
79 | #define XPNET_MSG_DATA_MAX \ | ||
80 | (XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data)) | ||
81 | #define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) | ||
82 | #define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE) | ||
83 | |||
84 | |||
85 | #define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) | ||
86 | #define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) | ||
87 | |||
88 | /* | ||
89 | * Version number of XPNET implementation. XPNET can always talk to versions | ||
90 | * with same major #, and never talk to versions with a different version. | ||
91 | */ | ||
92 | #define _XPNET_VERSION(_major, _minor) (((_major) << 4) | (_minor)) | ||
93 | #define XPNET_VERSION_MAJOR(_v) ((_v) >> 4) | ||
94 | #define XPNET_VERSION_MINOR(_v) ((_v) & 0xf) | ||
95 | |||
96 | #define XPNET_VERSION _XPNET_VERSION(1,0) /* version 1.0 */ | ||
97 | #define XPNET_VERSION_EMBED _XPNET_VERSION(1,1) /* version 1.1 */ | ||
98 | #define XPNET_MAGIC 0x88786984 /* "XNET" */ | ||
99 | |||
100 | #define XPNET_VALID_MSG(_m) \ | ||
101 | ((XPNET_VERSION_MAJOR(_m->version) == XPNET_VERSION_MAJOR(XPNET_VERSION)) \ | ||
102 | && (msg->magic == XPNET_MAGIC)) | ||
103 | |||
104 | #define XPNET_DEVICE_NAME "xp0" | ||
105 | |||
106 | |||
107 | /* | ||
108 | * When messages are queued with xpc_send_notify, a kmalloc'd buffer | ||
109 | * of the following type is passed as a notification cookie. When the | ||
110 | * notification function is called, we use the cookie to decide | ||
111 | * whether all outstanding message sends have completed. The skb can | ||
112 | * then be released. | ||
113 | */ | ||
114 | struct xpnet_pending_msg { | ||
115 | struct list_head free_list; | ||
116 | struct sk_buff *skb; | ||
117 | atomic_t use_count; | ||
118 | }; | ||
119 | |||
120 | /* driver specific structure pointed to by the device structure */ | ||
121 | struct xpnet_dev_private { | ||
122 | struct net_device_stats stats; | ||
123 | }; | ||
124 | |||
125 | struct net_device *xpnet_device; | ||
126 | |||
127 | /* | ||
128 | * When we are notified of other partitions activating, we add them to | ||
129 | * our bitmask of partitions to which we broadcast. | ||
130 | */ | ||
131 | static u64 xpnet_broadcast_partitions; | ||
132 | /* protect above */ | ||
133 | static spinlock_t xpnet_broadcast_lock = SPIN_LOCK_UNLOCKED; | ||
134 | |||
135 | /* | ||
136 | * Since the Block Transfer Engine (BTE) is being used for the transfer | ||
137 | * and it relies upon cache-line size transfers, we need to reserve at | ||
138 | * least one cache-line for head and tail alignment. The BTE is | ||
139 | * limited to 8MB transfers. | ||
140 | * | ||
141 | * Testing has shown that changing MTU to greater than 64KB has no effect | ||
142 | * on TCP as the two sides negotiate a Max Segment Size that is limited | ||
143 | * to 64K. Other protocols May use packets greater than this, but for | ||
144 | * now, the default is 64KB. | ||
145 | */ | ||
146 | #define XPNET_MAX_MTU (0x800000UL - L1_CACHE_BYTES) | ||
147 | /* 32KB has been determined to be the ideal */ | ||
148 | #define XPNET_DEF_MTU (0x8000UL) | ||
149 | |||
150 | |||
151 | /* | ||
152 | * The partition id is encapsulated in the MAC address. The following | ||
153 | * define locates the octet the partid is in. | ||
154 | */ | ||
155 | #define XPNET_PARTID_OCTET 1 | ||
156 | #define XPNET_LICENSE_OCTET 2 | ||
157 | |||
158 | |||
159 | /* | ||
160 | * Define the XPNET debug device structure that is to be used with dev_dbg(), | ||
161 | * dev_err(), dev_warn(), and dev_info(). | ||
162 | */ | ||
163 | struct device_driver xpnet_dbg_name = { | ||
164 | .name = "xpnet" | ||
165 | }; | ||
166 | |||
167 | struct device xpnet_dbg_subname = { | ||
168 | .bus_id = {0}, /* set to "" */ | ||
169 | .driver = &xpnet_dbg_name | ||
170 | }; | ||
171 | |||
172 | struct device *xpnet = &xpnet_dbg_subname; | ||
173 | |||
174 | /* | ||
175 | * Packet was recevied by XPC and forwarded to us. | ||
176 | */ | ||
177 | static void | ||
178 | xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) | ||
179 | { | ||
180 | struct sk_buff *skb; | ||
181 | bte_result_t bret; | ||
182 | struct xpnet_dev_private *priv = | ||
183 | (struct xpnet_dev_private *) xpnet_device->priv; | ||
184 | |||
185 | |||
186 | if (!XPNET_VALID_MSG(msg)) { | ||
187 | /* | ||
188 | * Packet with a different XPC version. Ignore. | ||
189 | */ | ||
190 | xpc_received(partid, channel, (void *) msg); | ||
191 | |||
192 | priv->stats.rx_errors++; | ||
193 | |||
194 | return; | ||
195 | } | ||
196 | dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, | ||
197 | msg->leadin_ignore, msg->tailout_ignore); | ||
198 | |||
199 | |||
200 | /* reserve an extra cache line */ | ||
201 | skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); | ||
202 | if (!skb) { | ||
203 | dev_err(xpnet, "failed on dev_alloc_skb(%d)\n", | ||
204 | msg->size + L1_CACHE_BYTES); | ||
205 | |||
206 | xpc_received(partid, channel, (void *) msg); | ||
207 | |||
208 | priv->stats.rx_errors++; | ||
209 | |||
210 | return; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * The allocated skb has some reserved space. | ||
215 | * In order to use bte_copy, we need to get the | ||
216 | * skb->data pointer moved forward. | ||
217 | */ | ||
218 | skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & | ||
219 | (L1_CACHE_BYTES - 1)) + | ||
220 | msg->leadin_ignore)); | ||
221 | |||
222 | /* | ||
223 | * Update the tail pointer to indicate data actually | ||
224 | * transferred. | ||
225 | */ | ||
226 | skb_put(skb, (msg->size - msg->leadin_ignore - msg->tailout_ignore)); | ||
227 | |||
228 | /* | ||
229 | * Move the data over from the the other side. | ||
230 | */ | ||
231 | if ((XPNET_VERSION_MINOR(msg->version) == 1) && | ||
232 | (msg->embedded_bytes != 0)) { | ||
233 | dev_dbg(xpnet, "copying embedded message. memcpy(0x%p, 0x%p, " | ||
234 | "%lu)\n", skb->data, &msg->data, | ||
235 | (size_t) msg->embedded_bytes); | ||
236 | |||
237 | memcpy(skb->data, &msg->data, (size_t) msg->embedded_bytes); | ||
238 | } else { | ||
239 | dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" | ||
240 | "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, | ||
241 | (void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), | ||
242 | msg->size); | ||
243 | |||
244 | bret = bte_copy(msg->buf_pa, | ||
245 | __pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), | ||
246 | msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); | ||
247 | |||
248 | if (bret != BTE_SUCCESS) { | ||
249 | // >>> Need better way of cleaning skb. Currently skb | ||
250 | // >>> appears in_use and we can't just call | ||
251 | // >>> dev_kfree_skb. | ||
252 | dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " | ||
253 | "error=0x%x\n", (void *)msg->buf_pa, | ||
254 | (void *)__pa((u64)skb->data & | ||
255 | ~(L1_CACHE_BYTES - 1)), | ||
256 | msg->size, bret); | ||
257 | |||
258 | xpc_received(partid, channel, (void *) msg); | ||
259 | |||
260 | priv->stats.rx_errors++; | ||
261 | |||
262 | return; | ||
263 | } | ||
264 | } | ||
265 | |||
266 | dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p " | ||
267 | "skb->end=0x%p skb->len=%d\n", (void *) skb->head, | ||
268 | (void *) skb->data, (void *) skb->tail, (void *) skb->end, | ||
269 | skb->len); | ||
270 | |||
271 | skb->dev = xpnet_device; | ||
272 | skb->protocol = eth_type_trans(skb, xpnet_device); | ||
273 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
274 | |||
275 | dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p " | ||
276 | "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n", | ||
277 | (void *) skb->head, (void *) skb->data, (void *) skb->tail, | ||
278 | (void *) skb->end, skb->len); | ||
279 | |||
280 | |||
281 | xpnet_device->last_rx = jiffies; | ||
282 | priv->stats.rx_packets++; | ||
283 | priv->stats.rx_bytes += skb->len + ETH_HLEN; | ||
284 | |||
285 | netif_rx_ni(skb); | ||
286 | xpc_received(partid, channel, (void *) msg); | ||
287 | } | ||
288 | |||
289 | |||
290 | /* | ||
291 | * This is the handler which XPC calls during any sort of change in | ||
292 | * state or message reception on a connection. | ||
293 | */ | ||
294 | static void | ||
295 | xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, | ||
296 | void *data, void *key) | ||
297 | { | ||
298 | long bp; | ||
299 | |||
300 | |||
301 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | ||
302 | DBUG_ON(channel != XPC_NET_CHANNEL); | ||
303 | |||
304 | switch(reason) { | ||
305 | case xpcMsgReceived: /* message received */ | ||
306 | DBUG_ON(data == NULL); | ||
307 | |||
308 | xpnet_receive(partid, channel, (struct xpnet_message *) data); | ||
309 | break; | ||
310 | |||
311 | case xpcConnected: /* connection completed to a partition */ | ||
312 | spin_lock_bh(&xpnet_broadcast_lock); | ||
313 | xpnet_broadcast_partitions |= 1UL << (partid -1 ); | ||
314 | bp = xpnet_broadcast_partitions; | ||
315 | spin_unlock_bh(&xpnet_broadcast_lock); | ||
316 | |||
317 | netif_carrier_on(xpnet_device); | ||
318 | |||
319 | dev_dbg(xpnet, "%s connection created to partition %d; " | ||
320 | "xpnet_broadcast_partitions=0x%lx\n", | ||
321 | xpnet_device->name, partid, bp); | ||
322 | break; | ||
323 | |||
324 | default: | ||
325 | spin_lock_bh(&xpnet_broadcast_lock); | ||
326 | xpnet_broadcast_partitions &= ~(1UL << (partid -1 )); | ||
327 | bp = xpnet_broadcast_partitions; | ||
328 | spin_unlock_bh(&xpnet_broadcast_lock); | ||
329 | |||
330 | if (bp == 0) { | ||
331 | netif_carrier_off(xpnet_device); | ||
332 | } | ||
333 | |||
334 | dev_dbg(xpnet, "%s disconnected from partition %d; " | ||
335 | "xpnet_broadcast_partitions=0x%lx\n", | ||
336 | xpnet_device->name, partid, bp); | ||
337 | break; | ||
338 | |||
339 | } | ||
340 | } | ||
341 | |||
342 | |||
343 | static int | ||
344 | xpnet_dev_open(struct net_device *dev) | ||
345 | { | ||
346 | enum xpc_retval ret; | ||
347 | |||
348 | |||
349 | dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %d, " | ||
350 | "%d)\n", XPC_NET_CHANNEL, xpnet_connection_activity, | ||
351 | XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, | ||
352 | XPNET_MAX_IDLE_KTHREADS); | ||
353 | |||
354 | ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, | ||
355 | XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, | ||
356 | XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); | ||
357 | if (ret != xpcSuccess) { | ||
358 | dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " | ||
359 | "ret=%d\n", dev->name, ret); | ||
360 | |||
361 | return -ENOMEM; | ||
362 | } | ||
363 | |||
364 | dev_dbg(xpnet, "ifconfig up of %s; XPC connected\n", dev->name); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | |||
370 | static int | ||
371 | xpnet_dev_stop(struct net_device *dev) | ||
372 | { | ||
373 | xpc_disconnect(XPC_NET_CHANNEL); | ||
374 | |||
375 | dev_dbg(xpnet, "ifconfig down of %s; XPC disconnected\n", dev->name); | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | |||
381 | static int | ||
382 | xpnet_dev_change_mtu(struct net_device *dev, int new_mtu) | ||
383 | { | ||
384 | /* 68 comes from min TCP+IP+MAC header */ | ||
385 | if ((new_mtu < 68) || (new_mtu > XPNET_MAX_MTU)) { | ||
386 | dev_err(xpnet, "ifconfig %s mtu %d failed; value must be " | ||
387 | "between 68 and %ld\n", dev->name, new_mtu, | ||
388 | XPNET_MAX_MTU); | ||
389 | return -EINVAL; | ||
390 | } | ||
391 | |||
392 | dev->mtu = new_mtu; | ||
393 | dev_dbg(xpnet, "ifconfig %s mtu set to %d\n", dev->name, new_mtu); | ||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | |||
398 | /* | ||
399 | * Required for the net_device structure. | ||
400 | */ | ||
401 | static int | ||
402 | xpnet_dev_set_config(struct net_device *dev, struct ifmap *new_map) | ||
403 | { | ||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | |||
408 | /* | ||
409 | * Return statistics to the caller. | ||
410 | */ | ||
411 | static struct net_device_stats * | ||
412 | xpnet_dev_get_stats(struct net_device *dev) | ||
413 | { | ||
414 | struct xpnet_dev_private *priv; | ||
415 | |||
416 | |||
417 | priv = (struct xpnet_dev_private *) dev->priv; | ||
418 | |||
419 | return &priv->stats; | ||
420 | } | ||
421 | |||
422 | |||
423 | /* | ||
424 | * Notification that the other end has received the message and | ||
425 | * DMA'd the skb information. At this point, they are done with | ||
426 | * our side. When all recipients are done processing, we | ||
427 | * release the skb and then release our pending message structure. | ||
428 | */ | ||
429 | static void | ||
430 | xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, | ||
431 | void *__qm) | ||
432 | { | ||
433 | struct xpnet_pending_msg *queued_msg = | ||
434 | (struct xpnet_pending_msg *) __qm; | ||
435 | |||
436 | |||
437 | DBUG_ON(queued_msg == NULL); | ||
438 | |||
439 | dev_dbg(xpnet, "message to %d notified with reason %d\n", | ||
440 | partid, reason); | ||
441 | |||
442 | if (atomic_dec_return(&queued_msg->use_count) == 0) { | ||
443 | dev_dbg(xpnet, "all acks for skb->head=-x%p\n", | ||
444 | (void *) queued_msg->skb->head); | ||
445 | |||
446 | dev_kfree_skb_any(queued_msg->skb); | ||
447 | kfree(queued_msg); | ||
448 | } | ||
449 | } | ||
450 | |||
451 | |||
452 | /* | ||
453 | * Network layer has formatted a packet (skb) and is ready to place it | ||
454 | * "on the wire". Prepare and send an xpnet_message to all partitions | ||
455 | * which have connected with us and are targets of this packet. | ||
456 | * | ||
457 | * MAC-NOTE: For the XPNET driver, the MAC address contains the | ||
458 | * destination partition_id. If the destination partition id word | ||
459 | * is 0xff, this packet is to broadcast to all partitions. | ||
460 | */ | ||
461 | static int | ||
462 | xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
463 | { | ||
464 | struct xpnet_pending_msg *queued_msg; | ||
465 | enum xpc_retval ret; | ||
466 | struct xpnet_message *msg; | ||
467 | u64 start_addr, end_addr; | ||
468 | long dp; | ||
469 | u8 second_mac_octet; | ||
470 | partid_t dest_partid; | ||
471 | struct xpnet_dev_private *priv; | ||
472 | u16 embedded_bytes; | ||
473 | |||
474 | |||
475 | priv = (struct xpnet_dev_private *) dev->priv; | ||
476 | |||
477 | |||
478 | dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " | ||
479 | "skb->end=0x%p skb->len=%d\n", (void *) skb->head, | ||
480 | (void *) skb->data, (void *) skb->tail, (void *) skb->end, | ||
481 | skb->len); | ||
482 | |||
483 | |||
484 | /* | ||
485 | * The xpnet_pending_msg tracks how many outstanding | ||
486 | * xpc_send_notifies are relying on this skb. When none | ||
487 | * remain, release the skb. | ||
488 | */ | ||
489 | queued_msg = kmalloc(sizeof(struct xpnet_pending_msg), GFP_ATOMIC); | ||
490 | if (queued_msg == NULL) { | ||
491 | dev_warn(xpnet, "failed to kmalloc %ld bytes; dropping " | ||
492 | "packet\n", sizeof(struct xpnet_pending_msg)); | ||
493 | |||
494 | priv->stats.tx_errors++; | ||
495 | |||
496 | return -ENOMEM; | ||
497 | } | ||
498 | |||
499 | |||
500 | /* get the beginning of the first cacheline and end of last */ | ||
501 | start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1)); | ||
502 | end_addr = L1_CACHE_ALIGN((u64) skb->tail); | ||
503 | |||
504 | /* calculate how many bytes to embed in the XPC message */ | ||
505 | embedded_bytes = 0; | ||
506 | if (unlikely(skb->len <= XPNET_MSG_DATA_MAX)) { | ||
507 | /* skb->data does fit so embed */ | ||
508 | embedded_bytes = skb->len; | ||
509 | } | ||
510 | |||
511 | |||
512 | /* | ||
513 | * Since the send occurs asynchronously, we set the count to one | ||
514 | * and begin sending. Any sends that happen to complete before | ||
515 | * we are done sending will not free the skb. We will be left | ||
516 | * with that task during exit. This also handles the case of | ||
517 | * a packet destined for a partition which is no longer up. | ||
518 | */ | ||
519 | atomic_set(&queued_msg->use_count, 1); | ||
520 | queued_msg->skb = skb; | ||
521 | |||
522 | |||
523 | second_mac_octet = skb->data[XPNET_PARTID_OCTET]; | ||
524 | if (second_mac_octet == 0xff) { | ||
525 | /* we are being asked to broadcast to all partitions */ | ||
526 | dp = xpnet_broadcast_partitions; | ||
527 | } else if (second_mac_octet != 0) { | ||
528 | dp = xpnet_broadcast_partitions & | ||
529 | (1UL << (second_mac_octet - 1)); | ||
530 | } else { | ||
531 | /* 0 is an invalid partid. Ignore */ | ||
532 | dp = 0; | ||
533 | } | ||
534 | dev_dbg(xpnet, "destination Partitions mask (dp) = 0x%lx\n", dp); | ||
535 | |||
536 | /* | ||
537 | * If we wanted to allow promiscous mode to work like an | ||
538 | * unswitched network, this would be a good point to OR in a | ||
539 | * mask of partitions which should be receiving all packets. | ||
540 | */ | ||
541 | |||
542 | /* | ||
543 | * Main send loop. | ||
544 | */ | ||
545 | for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; | ||
546 | dest_partid++) { | ||
547 | |||
548 | |||
549 | if (!(dp & (1UL << (dest_partid - 1)))) { | ||
550 | /* not destined for this partition */ | ||
551 | continue; | ||
552 | } | ||
553 | |||
554 | /* remove this partition from the destinations mask */ | ||
555 | dp &= ~(1UL << (dest_partid - 1)); | ||
556 | |||
557 | |||
558 | /* found a partition to send to */ | ||
559 | |||
560 | ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, | ||
561 | XPC_NOWAIT, (void **)&msg); | ||
562 | if (unlikely(ret != xpcSuccess)) { | ||
563 | continue; | ||
564 | } | ||
565 | |||
566 | msg->embedded_bytes = embedded_bytes; | ||
567 | if (unlikely(embedded_bytes != 0)) { | ||
568 | msg->version = XPNET_VERSION_EMBED; | ||
569 | dev_dbg(xpnet, "calling memcpy(0x%p, 0x%p, 0x%lx)\n", | ||
570 | &msg->data, skb->data, (size_t) embedded_bytes); | ||
571 | memcpy(&msg->data, skb->data, (size_t) embedded_bytes); | ||
572 | } else { | ||
573 | msg->version = XPNET_VERSION; | ||
574 | } | ||
575 | msg->magic = XPNET_MAGIC; | ||
576 | msg->size = end_addr - start_addr; | ||
577 | msg->leadin_ignore = (u64) skb->data - start_addr; | ||
578 | msg->tailout_ignore = end_addr - (u64) skb->tail; | ||
579 | msg->buf_pa = __pa(start_addr); | ||
580 | |||
581 | dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa=" | ||
582 | "0x%lx, msg->size=%u, msg->leadin_ignore=%u, " | ||
583 | "msg->tailout_ignore=%u\n", dest_partid, | ||
584 | XPC_NET_CHANNEL, msg->buf_pa, msg->size, | ||
585 | msg->leadin_ignore, msg->tailout_ignore); | ||
586 | |||
587 | |||
588 | atomic_inc(&queued_msg->use_count); | ||
589 | |||
590 | ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, | ||
591 | xpnet_send_completed, queued_msg); | ||
592 | if (unlikely(ret != xpcSuccess)) { | ||
593 | atomic_dec(&queued_msg->use_count); | ||
594 | continue; | ||
595 | } | ||
596 | |||
597 | } | ||
598 | |||
599 | if (atomic_dec_return(&queued_msg->use_count) == 0) { | ||
600 | dev_dbg(xpnet, "no partitions to receive packet destined for " | ||
601 | "%d\n", dest_partid); | ||
602 | |||
603 | |||
604 | dev_kfree_skb(skb); | ||
605 | kfree(queued_msg); | ||
606 | } | ||
607 | |||
608 | priv->stats.tx_packets++; | ||
609 | priv->stats.tx_bytes += skb->len; | ||
610 | |||
611 | return 0; | ||
612 | } | ||
613 | |||
614 | |||
615 | /* | ||
616 | * Deal with transmit timeouts coming from the network layer. | ||
617 | */ | ||
618 | static void | ||
619 | xpnet_dev_tx_timeout (struct net_device *dev) | ||
620 | { | ||
621 | struct xpnet_dev_private *priv; | ||
622 | |||
623 | |||
624 | priv = (struct xpnet_dev_private *) dev->priv; | ||
625 | |||
626 | priv->stats.tx_errors++; | ||
627 | return; | ||
628 | } | ||
629 | |||
630 | |||
631 | static int __init | ||
632 | xpnet_init(void) | ||
633 | { | ||
634 | int i; | ||
635 | u32 license_num; | ||
636 | int result = -ENOMEM; | ||
637 | |||
638 | |||
639 | dev_info(xpnet, "registering network device %s\n", XPNET_DEVICE_NAME); | ||
640 | |||
641 | /* | ||
642 | * use ether_setup() to init the majority of our device | ||
643 | * structure and then override the necessary pieces. | ||
644 | */ | ||
645 | xpnet_device = alloc_netdev(sizeof(struct xpnet_dev_private), | ||
646 | XPNET_DEVICE_NAME, ether_setup); | ||
647 | if (xpnet_device == NULL) { | ||
648 | return -ENOMEM; | ||
649 | } | ||
650 | |||
651 | netif_carrier_off(xpnet_device); | ||
652 | |||
653 | xpnet_device->mtu = XPNET_DEF_MTU; | ||
654 | xpnet_device->change_mtu = xpnet_dev_change_mtu; | ||
655 | xpnet_device->open = xpnet_dev_open; | ||
656 | xpnet_device->get_stats = xpnet_dev_get_stats; | ||
657 | xpnet_device->stop = xpnet_dev_stop; | ||
658 | xpnet_device->hard_start_xmit = xpnet_dev_hard_start_xmit; | ||
659 | xpnet_device->tx_timeout = xpnet_dev_tx_timeout; | ||
660 | xpnet_device->set_config = xpnet_dev_set_config; | ||
661 | |||
662 | /* | ||
663 | * Multicast assumes the LSB of the first octet is set for multicast | ||
664 | * MAC addresses. We chose the first octet of the MAC to be unlikely | ||
665 | * to collide with any vendor's officially issued MAC. | ||
666 | */ | ||
667 | xpnet_device->dev_addr[0] = 0xfe; | ||
668 | xpnet_device->dev_addr[XPNET_PARTID_OCTET] = sn_partition_id; | ||
669 | license_num = sn_partition_serial_number_val(); | ||
670 | for (i = 3; i >= 0; i--) { | ||
671 | xpnet_device->dev_addr[XPNET_LICENSE_OCTET + i] = | ||
672 | license_num & 0xff; | ||
673 | license_num = license_num >> 8; | ||
674 | } | ||
675 | |||
676 | /* | ||
677 | * ether_setup() sets this to a multicast device. We are | ||
678 | * really not supporting multicast at this time. | ||
679 | */ | ||
680 | xpnet_device->flags &= ~IFF_MULTICAST; | ||
681 | |||
682 | /* | ||
683 | * No need to checksum as it is a DMA transfer. The BTE will | ||
684 | * report an error if the data is not retrievable and the | ||
685 | * packet will be dropped. | ||
686 | */ | ||
687 | xpnet_device->features = NETIF_F_NO_CSUM; | ||
688 | |||
689 | result = register_netdev(xpnet_device); | ||
690 | if (result != 0) { | ||
691 | free_netdev(xpnet_device); | ||
692 | } | ||
693 | |||
694 | return result; | ||
695 | } | ||
696 | module_init(xpnet_init); | ||
697 | |||
698 | |||
699 | static void __exit | ||
700 | xpnet_exit(void) | ||
701 | { | ||
702 | dev_info(xpnet, "unregistering network device %s\n", | ||
703 | xpnet_device[0].name); | ||
704 | |||
705 | unregister_netdev(xpnet_device); | ||
706 | |||
707 | free_netdev(xpnet_device); | ||
708 | } | ||
709 | module_exit(xpnet_exit); | ||
710 | |||
711 | |||
712 | MODULE_AUTHOR("Silicon Graphics, Inc."); | ||
713 | MODULE_DESCRIPTION("Cross Partition Network adapter (XPNET)"); | ||
714 | MODULE_LICENSE("GPL"); | ||
715 | |||
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile index b5dca0097a8e..2f915bce25f9 100644 --- a/arch/ia64/sn/pci/Makefile +++ b/arch/ia64/sn/pci/Makefile | |||
@@ -7,4 +7,4 @@ | |||
7 | # | 7 | # |
8 | # Makefile for the sn pci general routines. | 8 | # Makefile for the sn pci general routines. |
9 | 9 | ||
10 | obj-y := pci_dma.o pcibr/ | 10 | obj-y := pci_dma.o tioca_provider.o pcibr/ |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index f680824f819d..5da9bdbde7cb 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -12,9 +12,8 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <asm/dma.h> | 13 | #include <asm/dma.h> |
14 | #include <asm/sn/sn_sal.h> | 14 | #include <asm/sn/sn_sal.h> |
15 | #include "pci/pcibus_provider_defs.h" | 15 | #include <asm/sn/pcibus_provider_defs.h> |
16 | #include "pci/pcidev.h" | 16 | #include <asm/sn/pcidev.h> |
17 | #include "pci/pcibr_provider.h" | ||
18 | 17 | ||
19 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) | 18 | #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) |
20 | #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) | 19 | #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) |
@@ -79,7 +78,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
79 | { | 78 | { |
80 | void *cpuaddr; | 79 | void *cpuaddr; |
81 | unsigned long phys_addr; | 80 | unsigned long phys_addr; |
82 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | 81 | struct pci_dev *pdev = to_pci_dev(dev); |
82 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
83 | 83 | ||
84 | BUG_ON(dev->bus != &pci_bus_type); | 84 | BUG_ON(dev->bus != &pci_bus_type); |
85 | 85 | ||
@@ -102,8 +102,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
102 | * resources. | 102 | * resources. |
103 | */ | 103 | */ |
104 | 104 | ||
105 | *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size, | 105 | *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size); |
106 | SN_PCIDMA_CONSISTENT); | ||
107 | if (!*dma_handle) { | 106 | if (!*dma_handle) { |
108 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 107 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); |
109 | free_pages((unsigned long)cpuaddr, get_order(size)); | 108 | free_pages((unsigned long)cpuaddr, get_order(size)); |
@@ -127,11 +126,12 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent); | |||
127 | void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | 126 | void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
128 | dma_addr_t dma_handle) | 127 | dma_addr_t dma_handle) |
129 | { | 128 | { |
130 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | 129 | struct pci_dev *pdev = to_pci_dev(dev); |
130 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
131 | 131 | ||
132 | BUG_ON(dev->bus != &pci_bus_type); | 132 | BUG_ON(dev->bus != &pci_bus_type); |
133 | 133 | ||
134 | pcibr_dma_unmap(pcidev_info, dma_handle, 0); | 134 | provider->dma_unmap(pdev, dma_handle, 0); |
135 | free_pages((unsigned long)cpu_addr, get_order(size)); | 135 | free_pages((unsigned long)cpu_addr, get_order(size)); |
136 | } | 136 | } |
137 | EXPORT_SYMBOL(sn_dma_free_coherent); | 137 | EXPORT_SYMBOL(sn_dma_free_coherent); |
@@ -159,12 +159,13 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |||
159 | { | 159 | { |
160 | dma_addr_t dma_addr; | 160 | dma_addr_t dma_addr; |
161 | unsigned long phys_addr; | 161 | unsigned long phys_addr; |
162 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | 162 | struct pci_dev *pdev = to_pci_dev(dev); |
163 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
163 | 164 | ||
164 | BUG_ON(dev->bus != &pci_bus_type); | 165 | BUG_ON(dev->bus != &pci_bus_type); |
165 | 166 | ||
166 | phys_addr = __pa(cpu_addr); | 167 | phys_addr = __pa(cpu_addr); |
167 | dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0); | 168 | dma_addr = provider->dma_map(pdev, phys_addr, size); |
168 | if (!dma_addr) { | 169 | if (!dma_addr) { |
169 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 170 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); |
170 | return 0; | 171 | return 0; |
@@ -187,10 +188,12 @@ EXPORT_SYMBOL(sn_dma_map_single); | |||
187 | void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 188 | void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
188 | int direction) | 189 | int direction) |
189 | { | 190 | { |
190 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | 191 | struct pci_dev *pdev = to_pci_dev(dev); |
192 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
191 | 193 | ||
192 | BUG_ON(dev->bus != &pci_bus_type); | 194 | BUG_ON(dev->bus != &pci_bus_type); |
193 | pcibr_dma_unmap(pcidev_info, dma_addr, direction); | 195 | |
196 | provider->dma_unmap(pdev, dma_addr, direction); | ||
194 | } | 197 | } |
195 | EXPORT_SYMBOL(sn_dma_unmap_single); | 198 | EXPORT_SYMBOL(sn_dma_unmap_single); |
196 | 199 | ||
@@ -207,12 +210,13 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
207 | int nhwentries, int direction) | 210 | int nhwentries, int direction) |
208 | { | 211 | { |
209 | int i; | 212 | int i; |
210 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | 213 | struct pci_dev *pdev = to_pci_dev(dev); |
214 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
211 | 215 | ||
212 | BUG_ON(dev->bus != &pci_bus_type); | 216 | BUG_ON(dev->bus != &pci_bus_type); |
213 | 217 | ||
214 | for (i = 0; i < nhwentries; i++, sg++) { | 218 | for (i = 0; i < nhwentries; i++, sg++) { |
215 | pcibr_dma_unmap(pcidev_info, sg->dma_address, direction); | 219 | provider->dma_unmap(pdev, sg->dma_address, direction); |
216 | sg->dma_address = (dma_addr_t) NULL; | 220 | sg->dma_address = (dma_addr_t) NULL; |
217 | sg->dma_length = 0; | 221 | sg->dma_length = 0; |
218 | } | 222 | } |
@@ -233,7 +237,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
233 | { | 237 | { |
234 | unsigned long phys_addr; | 238 | unsigned long phys_addr; |
235 | struct scatterlist *saved_sg = sg; | 239 | struct scatterlist *saved_sg = sg; |
236 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); | 240 | struct pci_dev *pdev = to_pci_dev(dev); |
241 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | ||
237 | int i; | 242 | int i; |
238 | 243 | ||
239 | BUG_ON(dev->bus != &pci_bus_type); | 244 | BUG_ON(dev->bus != &pci_bus_type); |
@@ -243,8 +248,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
243 | */ | 248 | */ |
244 | for (i = 0; i < nhwentries; i++, sg++) { | 249 | for (i = 0; i < nhwentries; i++, sg++) { |
245 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); | 250 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); |
246 | sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr, | 251 | sg->dma_address = provider->dma_map(pdev, |
247 | sg->length, 0); | 252 | phys_addr, sg->length); |
248 | 253 | ||
249 | if (!sg->dma_address) { | 254 | if (!sg->dma_address) { |
250 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 255 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c index 9d6854666f9b..0e47bce85f2d 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c | |||
@@ -8,8 +8,8 @@ | |||
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <asm/sn/sn_sal.h> | 10 | #include <asm/sn/sn_sal.h> |
11 | #include "pci/pcibus_provider_defs.h" | 11 | #include <asm/sn/pcibus_provider_defs.h> |
12 | #include "pci/pcidev.h" | 12 | #include <asm/sn/pcidev.h> |
13 | #include "pci/pcibr_provider.h" | 13 | #include "pci/pcibr_provider.h" |
14 | 14 | ||
15 | int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ | 15 | int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c index b1d66ac065c8..64af2b2c1787 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c | |||
@@ -12,8 +12,8 @@ | |||
12 | #include <asm/sn/geo.h> | 12 | #include <asm/sn/geo.h> |
13 | #include "xtalk/xwidgetdev.h" | 13 | #include "xtalk/xwidgetdev.h" |
14 | #include "xtalk/hubdev.h" | 14 | #include "xtalk/hubdev.h" |
15 | #include "pci/pcibus_provider_defs.h" | 15 | #include <asm/sn/pcibus_provider_defs.h> |
16 | #include "pci/pcidev.h" | 16 | #include <asm/sn/pcidev.h> |
17 | #include "pci/tiocp.h" | 17 | #include "pci/tiocp.h" |
18 | #include "pci/pic.h" | 18 | #include "pci/pic.h" |
19 | #include "pci/pcibr_provider.h" | 19 | #include "pci/pcibr_provider.h" |
@@ -40,7 +40,7 @@ extern int sn_ioif_inited; | |||
40 | * we do not have to allocate entries in the PMU. | 40 | * we do not have to allocate entries in the PMU. |
41 | */ | 41 | */ |
42 | 42 | ||
43 | static uint64_t | 43 | static dma_addr_t |
44 | pcibr_dmamap_ate32(struct pcidev_info *info, | 44 | pcibr_dmamap_ate32(struct pcidev_info *info, |
45 | uint64_t paddr, size_t req_size, uint64_t flags) | 45 | uint64_t paddr, size_t req_size, uint64_t flags) |
46 | { | 46 | { |
@@ -109,7 +109,7 @@ pcibr_dmamap_ate32(struct pcidev_info *info, | |||
109 | return pci_addr; | 109 | return pci_addr; |
110 | } | 110 | } |
111 | 111 | ||
112 | static uint64_t | 112 | static dma_addr_t |
113 | pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, | 113 | pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, |
114 | uint64_t dma_attributes) | 114 | uint64_t dma_attributes) |
115 | { | 115 | { |
@@ -141,7 +141,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, | |||
141 | 141 | ||
142 | } | 142 | } |
143 | 143 | ||
144 | static uint64_t | 144 | static dma_addr_t |
145 | pcibr_dmatrans_direct32(struct pcidev_info * info, | 145 | pcibr_dmatrans_direct32(struct pcidev_info * info, |
146 | uint64_t paddr, size_t req_size, uint64_t flags) | 146 | uint64_t paddr, size_t req_size, uint64_t flags) |
147 | { | 147 | { |
@@ -180,11 +180,11 @@ pcibr_dmatrans_direct32(struct pcidev_info * info, | |||
180 | * DMA mappings for Direct 64 and 32 do not have any DMA maps. | 180 | * DMA mappings for Direct 64 and 32 do not have any DMA maps. |
181 | */ | 181 | */ |
182 | void | 182 | void |
183 | pcibr_dma_unmap(struct pcidev_info *pcidev_info, dma_addr_t dma_handle, | 183 | pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction) |
184 | int direction) | ||
185 | { | 184 | { |
186 | struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> | 185 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); |
187 | pdi_pcibus_info; | 186 | struct pcibus_info *pcibus_info = |
187 | (struct pcibus_info *)pcidev_info->pdi_pcibus_info; | ||
188 | 188 | ||
189 | if (IS_PCI32_MAPPED(dma_handle)) { | 189 | if (IS_PCI32_MAPPED(dma_handle)) { |
190 | int ate_index; | 190 | int ate_index; |
@@ -301,7 +301,7 @@ void sn_dma_flush(uint64_t addr) | |||
301 | spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> | 301 | spin_lock_irqsave(&((struct sn_flush_device_list *)p)-> |
302 | sfdl_flush_lock, flags); | 302 | sfdl_flush_lock, flags); |
303 | 303 | ||
304 | p->sfdl_flush_value = 0; | 304 | *p->sfdl_flush_addr = 0; |
305 | 305 | ||
306 | /* force an interrupt. */ | 306 | /* force an interrupt. */ |
307 | *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; | 307 | *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1; |
@@ -316,64 +316,63 @@ void sn_dma_flush(uint64_t addr) | |||
316 | } | 316 | } |
317 | 317 | ||
318 | /* | 318 | /* |
319 | * Wrapper DMA interface. Called from pci_dma.c routines. | 319 | * DMA interfaces. Called from pci_dma.c routines. |
320 | */ | 320 | */ |
321 | 321 | ||
322 | uint64_t | 322 | dma_addr_t |
323 | pcibr_dma_map(struct pcidev_info * pcidev_info, unsigned long phys_addr, | 323 | pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size) |
324 | size_t size, unsigned int flags) | ||
325 | { | 324 | { |
326 | dma_addr_t dma_handle; | 325 | dma_addr_t dma_handle; |
327 | struct pci_dev *pcidev = pcidev_info->pdi_linux_pcidev; | 326 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); |
328 | |||
329 | if (flags & SN_PCIDMA_CONSISTENT) { | ||
330 | /* sn_pci_alloc_consistent interfaces */ | ||
331 | if (pcidev->dev.coherent_dma_mask == ~0UL) { | ||
332 | dma_handle = | ||
333 | pcibr_dmatrans_direct64(pcidev_info, phys_addr, | ||
334 | PCI64_ATTR_BAR); | ||
335 | } else { | ||
336 | dma_handle = | ||
337 | (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, | ||
338 | phys_addr, size, | ||
339 | PCI32_ATE_BAR); | ||
340 | } | ||
341 | } else { | ||
342 | /* map_sg/map_single interfaces */ | ||
343 | 327 | ||
344 | /* SN cannot support DMA addresses smaller than 32 bits. */ | 328 | /* SN cannot support DMA addresses smaller than 32 bits. */ |
345 | if (pcidev->dma_mask < 0x7fffffff) { | 329 | if (hwdev->dma_mask < 0x7fffffff) { |
346 | return 0; | 330 | return 0; |
347 | } | 331 | } |
348 | 332 | ||
349 | if (pcidev->dma_mask == ~0UL) { | 333 | if (hwdev->dma_mask == ~0UL) { |
334 | /* | ||
335 | * Handle the most common case: 64 bit cards. This | ||
336 | * call should always succeed. | ||
337 | */ | ||
338 | |||
339 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, | ||
340 | PCI64_ATTR_PREF); | ||
341 | } else { | ||
342 | /* Handle 32-63 bit cards via direct mapping */ | ||
343 | dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr, | ||
344 | size, 0); | ||
345 | if (!dma_handle) { | ||
350 | /* | 346 | /* |
351 | * Handle the most common case: 64 bit cards. This | 347 | * It is a 32 bit card and we cannot do direct mapping, |
352 | * call should always succeed. | 348 | * so we use an ATE. |
353 | */ | 349 | */ |
354 | 350 | ||
355 | dma_handle = | 351 | dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr, |
356 | pcibr_dmatrans_direct64(pcidev_info, phys_addr, | 352 | size, PCI32_ATE_PREF); |
357 | PCI64_ATTR_PREF); | ||
358 | } else { | ||
359 | /* Handle 32-63 bit cards via direct mapping */ | ||
360 | dma_handle = | ||
361 | pcibr_dmatrans_direct32(pcidev_info, phys_addr, | ||
362 | size, 0); | ||
363 | if (!dma_handle) { | ||
364 | /* | ||
365 | * It is a 32 bit card and we cannot do direct mapping, | ||
366 | * so we use an ATE. | ||
367 | */ | ||
368 | |||
369 | dma_handle = | ||
370 | pcibr_dmamap_ate32(pcidev_info, phys_addr, | ||
371 | size, PCI32_ATE_PREF); | ||
372 | } | ||
373 | } | 353 | } |
374 | } | 354 | } |
375 | 355 | ||
376 | return dma_handle; | 356 | return dma_handle; |
377 | } | 357 | } |
378 | 358 | ||
359 | dma_addr_t | ||
360 | pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr, | ||
361 | size_t size) | ||
362 | { | ||
363 | dma_addr_t dma_handle; | ||
364 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev); | ||
365 | |||
366 | if (hwdev->dev.coherent_dma_mask == ~0UL) { | ||
367 | dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr, | ||
368 | PCI64_ATTR_BAR); | ||
369 | } else { | ||
370 | dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info, | ||
371 | phys_addr, size, | ||
372 | PCI32_ATE_BAR); | ||
373 | } | ||
374 | |||
375 | return dma_handle; | ||
376 | } | ||
377 | |||
379 | EXPORT_SYMBOL(sn_dma_flush); | 378 | EXPORT_SYMBOL(sn_dma_flush); |
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 92bd278cf7ff..3893999d23d8 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c | |||
@@ -13,8 +13,8 @@ | |||
13 | #include "xtalk/xwidgetdev.h" | 13 | #include "xtalk/xwidgetdev.h" |
14 | #include <asm/sn/geo.h> | 14 | #include <asm/sn/geo.h> |
15 | #include "xtalk/hubdev.h" | 15 | #include "xtalk/hubdev.h" |
16 | #include "pci/pcibus_provider_defs.h" | 16 | #include <asm/sn/pcibus_provider_defs.h> |
17 | #include "pci/pcidev.h" | 17 | #include <asm/sn/pcidev.h> |
18 | #include "pci/pcibr_provider.h" | 18 | #include "pci/pcibr_provider.h" |
19 | #include <asm/sn/addrs.h> | 19 | #include <asm/sn/addrs.h> |
20 | 20 | ||
@@ -168,3 +168,23 @@ void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info) | |||
168 | pcibr_force_interrupt(sn_irq_info); | 168 | pcibr_force_interrupt(sn_irq_info); |
169 | } | 169 | } |
170 | } | 170 | } |
171 | |||
172 | /* | ||
173 | * Provider entries for PIC/CP | ||
174 | */ | ||
175 | |||
176 | struct sn_pcibus_provider pcibr_provider = { | ||
177 | .dma_map = pcibr_dma_map, | ||
178 | .dma_map_consistent = pcibr_dma_map_consistent, | ||
179 | .dma_unmap = pcibr_dma_unmap, | ||
180 | .bus_fixup = pcibr_bus_fixup, | ||
181 | }; | ||
182 | |||
183 | int | ||
184 | pcibr_init_provider(void) | ||
185 | { | ||
186 | sn_pci_provider[PCIIO_ASIC_TYPE_PIC] = &pcibr_provider; | ||
187 | sn_pci_provider[PCIIO_ASIC_TYPE_TIOCP] = &pcibr_provider; | ||
188 | |||
189 | return 0; | ||
190 | } | ||
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c index 74a74a7d2a13..865c11c3b50a 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_reg.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c | |||
@@ -8,8 +8,8 @@ | |||
8 | 8 | ||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
11 | #include "pci/pcibus_provider_defs.h" | 11 | #include <asm/sn/pcibus_provider_defs.h> |
12 | #include "pci/pcidev.h" | 12 | #include <asm/sn/pcidev.h> |
13 | #include "pci/tiocp.h" | 13 | #include "pci/tiocp.h" |
14 | #include "pci/pic.h" | 14 | #include "pci/pic.h" |
15 | #include "pci/pcibr_provider.h" | 15 | #include "pci/pcibr_provider.h" |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c new file mode 100644 index 000000000000..8dae9eb45456 --- /dev/null +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -0,0 +1,668 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/pci.h> | ||
12 | #include <asm/sn/sn_sal.h> | ||
13 | #include <asm/sn/addrs.h> | ||
14 | #include <asm/sn/pcidev.h> | ||
15 | #include <asm/sn/pcibus_provider_defs.h> | ||
16 | #include <asm/sn/tioca_provider.h> | ||
17 | |||
18 | uint32_t tioca_gart_found; | ||
19 | EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */ | ||
20 | |||
21 | LIST_HEAD(tioca_list); | ||
22 | EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */ | ||
23 | |||
24 | static int tioca_gart_init(struct tioca_kernel *); | ||
25 | |||
26 | /** | ||
27 | * tioca_gart_init - Initialize SGI TIOCA GART | ||
28 | * @tioca_common: ptr to common prom/kernel struct identifying the | ||
29 | * | ||
30 | * If the indicated tioca has devices present, initialize its associated | ||
31 | * GART MMR's and kernel memory. | ||
32 | */ | ||
33 | static int | ||
34 | tioca_gart_init(struct tioca_kernel *tioca_kern) | ||
35 | { | ||
36 | uint64_t ap_reg; | ||
37 | uint64_t offset; | ||
38 | struct page *tmp; | ||
39 | struct tioca_common *tioca_common; | ||
40 | volatile struct tioca *ca_base; | ||
41 | |||
42 | tioca_common = tioca_kern->ca_common; | ||
43 | ca_base = (struct tioca *)tioca_common->ca_common.bs_base; | ||
44 | |||
45 | if (list_empty(tioca_kern->ca_devices)) | ||
46 | return 0; | ||
47 | |||
48 | ap_reg = 0; | ||
49 | |||
50 | /* | ||
51 | * Validate aperature size | ||
52 | */ | ||
53 | |||
54 | switch (CA_APERATURE_SIZE >> 20) { | ||
55 | case 4: | ||
56 | ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */ | ||
57 | break; | ||
58 | case 8: | ||
59 | ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */ | ||
60 | break; | ||
61 | case 16: | ||
62 | ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */ | ||
63 | break; | ||
64 | case 32: | ||
65 | ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */ | ||
66 | break; | ||
67 | case 64: | ||
68 | ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */ | ||
69 | break; | ||
70 | case 128: | ||
71 | ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */ | ||
72 | break; | ||
73 | case 256: | ||
74 | ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */ | ||
75 | break; | ||
76 | case 512: | ||
77 | ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */ | ||
78 | break; | ||
79 | case 1024: | ||
80 | ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */ | ||
81 | break; | ||
82 | case 2048: | ||
83 | ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */ | ||
84 | break; | ||
85 | case 4096: | ||
86 | ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */ | ||
87 | break; | ||
88 | default: | ||
89 | printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " | ||
90 | "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE); | ||
91 | return -1; | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Set up other aperature parameters | ||
96 | */ | ||
97 | |||
98 | if (PAGE_SIZE >= 16384) { | ||
99 | tioca_kern->ca_ap_pagesize = 16384; | ||
100 | ap_reg |= CA_GART_PAGE_SIZE; | ||
101 | } else { | ||
102 | tioca_kern->ca_ap_pagesize = 4096; | ||
103 | } | ||
104 | |||
105 | tioca_kern->ca_ap_size = CA_APERATURE_SIZE; | ||
106 | tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE; | ||
107 | tioca_kern->ca_gart_entries = | ||
108 | tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize; | ||
109 | |||
110 | ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI); | ||
111 | ap_reg |= tioca_kern->ca_ap_bus_base; | ||
112 | |||
113 | /* | ||
114 | * Allocate and set up the GART | ||
115 | */ | ||
116 | |||
117 | tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64); | ||
118 | tmp = | ||
119 | alloc_pages_node(tioca_kern->ca_closest_node, | ||
120 | GFP_KERNEL | __GFP_ZERO, | ||
121 | get_order(tioca_kern->ca_gart_size)); | ||
122 | |||
123 | if (!tmp) { | ||
124 | printk(KERN_ERR "%s: Could not allocate " | ||
125 | "%lu bytes (order %d) for GART\n", | ||
126 | __FUNCTION__, | ||
127 | tioca_kern->ca_gart_size, | ||
128 | get_order(tioca_kern->ca_gart_size)); | ||
129 | return -ENOMEM; | ||
130 | } | ||
131 | |||
132 | tioca_kern->ca_gart = page_address(tmp); | ||
133 | tioca_kern->ca_gart_coretalk_addr = | ||
134 | PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart)); | ||
135 | |||
136 | /* | ||
137 | * Compute PCI/AGP convenience fields | ||
138 | */ | ||
139 | |||
140 | offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE; | ||
141 | tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE; | ||
142 | tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE; | ||
143 | tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize; | ||
144 | tioca_kern->ca_pcigart_base = | ||
145 | tioca_kern->ca_gart_coretalk_addr + offset; | ||
146 | tioca_kern->ca_pcigart = | ||
147 | &tioca_kern->ca_gart[tioca_kern->ca_pcigart_start]; | ||
148 | tioca_kern->ca_pcigart_entries = | ||
149 | tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; | ||
150 | tioca_kern->ca_pcigart_pagemap = | ||
151 | kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); | ||
152 | if (!tioca_kern->ca_pcigart_pagemap) { | ||
153 | free_pages((unsigned long)tioca_kern->ca_gart, | ||
154 | get_order(tioca_kern->ca_gart_size)); | ||
155 | return -1; | ||
156 | } | ||
157 | |||
158 | offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE; | ||
159 | tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE; | ||
160 | tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE; | ||
161 | tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize; | ||
162 | tioca_kern->ca_gfxgart_base = | ||
163 | tioca_kern->ca_gart_coretalk_addr + offset; | ||
164 | tioca_kern->ca_gfxgart = | ||
165 | &tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start]; | ||
166 | tioca_kern->ca_gfxgart_entries = | ||
167 | tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize; | ||
168 | |||
169 | /* | ||
170 | * various control settings: | ||
171 | * use agp op-combining | ||
172 | * use GET semantics to fetch memory | ||
173 | * participate in coherency domain | ||
174 | * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029 | ||
175 | */ | ||
176 | |||
177 | ca_base->ca_control1 |= CA_AGPDMA_OP_ENB_COMBDELAY; /* PV895469 ? */ | ||
178 | ca_base->ca_control2 &= ~(CA_GART_MEM_PARAM); | ||
179 | ca_base->ca_control2 |= (0x2ull << CA_GART_MEM_PARAM_SHFT); | ||
180 | tioca_kern->ca_gart_iscoherent = 1; | ||
181 | ca_base->ca_control2 &= | ||
182 | ~(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB); | ||
183 | |||
184 | /* | ||
185 | * Unmask GART fetch error interrupts. Clear residual errors first. | ||
186 | */ | ||
187 | |||
188 | ca_base->ca_int_status_alias = CA_GART_FETCH_ERR; | ||
189 | ca_base->ca_mult_error_alias = CA_GART_FETCH_ERR; | ||
190 | ca_base->ca_int_mask &= ~CA_GART_FETCH_ERR; | ||
191 | |||
192 | /* | ||
193 | * Program the aperature and gart registers in TIOCA | ||
194 | */ | ||
195 | |||
196 | ca_base->ca_gart_aperature = ap_reg; | ||
197 | ca_base->ca_gart_ptr_table = tioca_kern->ca_gart_coretalk_addr | 1; | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | /** | ||
203 | * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions | ||
204 | * @tioca_kernel: structure representing the CA | ||
205 | * | ||
206 | * Given a CA, scan all attached functions making sure they all support | ||
207 | * FastWrite. If so, enable FastWrite for all functions and the CA itself. | ||
208 | */ | ||
209 | |||
210 | void | ||
211 | tioca_fastwrite_enable(struct tioca_kernel *tioca_kern) | ||
212 | { | ||
213 | int cap_ptr; | ||
214 | uint64_t ca_control1; | ||
215 | uint32_t reg; | ||
216 | struct tioca *tioca_base; | ||
217 | struct pci_dev *pdev; | ||
218 | struct tioca_common *common; | ||
219 | |||
220 | common = tioca_kern->ca_common; | ||
221 | |||
222 | /* | ||
223 | * Scan all vga controllers on this bus making sure they all | ||
224 | * suport FW. If not, return. | ||
225 | */ | ||
226 | |||
227 | list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { | ||
228 | if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) | ||
229 | continue; | ||
230 | |||
231 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); | ||
232 | if (!cap_ptr) | ||
233 | return; /* no AGP CAP means no FW */ | ||
234 | |||
235 | pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, ®); | ||
236 | if (!(reg & PCI_AGP_STATUS_FW)) | ||
237 | return; /* function doesn't support FW */ | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * Set fw for all vga fn's | ||
242 | */ | ||
243 | |||
244 | list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) { | ||
245 | if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8)) | ||
246 | continue; | ||
247 | |||
248 | cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); | ||
249 | pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, ®); | ||
250 | reg |= PCI_AGP_COMMAND_FW; | ||
251 | pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg); | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Set ca's fw to match | ||
256 | */ | ||
257 | |||
258 | tioca_base = (struct tioca *)common->ca_common.bs_base; | ||
259 | ca_control1 = tioca_base->ca_control1; | ||
260 | ca_control1 |= CA_AGP_FW_ENABLE; | ||
261 | tioca_base->ca_control1 = ca_control1; | ||
262 | } | ||
263 | |||
264 | EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */ | ||
265 | |||
266 | /** | ||
267 | * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode | ||
268 | * @paddr: system physical address | ||
269 | * | ||
270 | * Map @paddr into 64-bit CA bus space. No device context is necessary. | ||
271 | * Bits 53:0 come from the coretalk address. We just need to mask in the | ||
272 | * following optional bits of the 64-bit pci address: | ||
273 | * | ||
274 | * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent) | ||
275 | * 0x2 for PIO (non-coherent) | ||
276 | * We will always use 0x1 | ||
277 | * 55:55 - Swap bytes Currently unused | ||
278 | */ | ||
279 | static uint64_t | ||
280 | tioca_dma_d64(unsigned long paddr) | ||
281 | { | ||
282 | dma_addr_t bus_addr; | ||
283 | |||
284 | bus_addr = PHYS_TO_TIODMA(paddr); | ||
285 | |||
286 | BUG_ON(!bus_addr); | ||
287 | BUG_ON(bus_addr >> 54); | ||
288 | |||
289 | /* Set upper nibble to Cache Coherent Memory op */ | ||
290 | bus_addr |= (1UL << 60); | ||
291 | |||
292 | return bus_addr; | ||
293 | } | ||
294 | |||
295 | /** | ||
296 | * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode | ||
297 | * @pdev: linux pci_dev representing the function | ||
298 | * @paddr: system physical address | ||
299 | * | ||
300 | * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info. | ||
301 | * | ||
302 | * The CA agp 48 bit direct address falls out as follows: | ||
303 | * | ||
304 | * When direct mapping AGP addresses, the 48 bit AGP address is | ||
305 | * constructed as follows: | ||
306 | * | ||
307 | * [47:40] - Low 8 bits of the page Node ID extracted from coretalk | ||
308 | * address [47:40]. The upper 8 node bits are fixed | ||
309 | * and come from the xxx register bits [5:0] | ||
310 | * [39:38] - Chiplet ID extracted from coretalk address [39:38] | ||
311 | * [37:00] - node offset extracted from coretalk address [37:00] | ||
312 | * | ||
313 | * Since the node id in general will be non-zero, and the chiplet id | ||
314 | * will always be non-zero, it follows that the device must support | ||
315 | * a dma mask of at least 0xffffffffff (40 bits) to target node 0 | ||
316 | * and in general should be 0xffffffffffff (48 bits) to target nodes | ||
317 | * up to 255. Nodes above 255 need the support of the xxx register, | ||
318 | * and so a given CA can only directly target nodes in the range | ||
319 | * xxx - xxx+255. | ||
320 | */ | ||
321 | static uint64_t | ||
322 | tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr) | ||
323 | { | ||
324 | struct tioca_common *tioca_common; | ||
325 | struct tioca *ca_base; | ||
326 | uint64_t ct_addr; | ||
327 | dma_addr_t bus_addr; | ||
328 | uint32_t node_upper; | ||
329 | uint64_t agp_dma_extn; | ||
330 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); | ||
331 | |||
332 | tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; | ||
333 | ca_base = (struct tioca *)tioca_common->ca_common.bs_base; | ||
334 | |||
335 | ct_addr = PHYS_TO_TIODMA(paddr); | ||
336 | if (!ct_addr) | ||
337 | return 0; | ||
338 | |||
339 | bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffff); | ||
340 | node_upper = ct_addr >> 48; | ||
341 | |||
342 | if (node_upper > 64) { | ||
343 | printk(KERN_ERR "%s: coretalk addr 0x%p node id out " | ||
344 | "of range\n", __FUNCTION__, (void *)ct_addr); | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | agp_dma_extn = ca_base->ca_agp_dma_addr_extn; | ||
349 | if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { | ||
350 | printk(KERN_ERR "%s: coretalk upper node (%u) " | ||
351 | "mismatch with ca_agp_dma_addr_extn (%lu)\n", | ||
352 | __FUNCTION__, | ||
353 | node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | return bus_addr; | ||
358 | } | ||
359 | |||
360 | /** | ||
361 | * tioca_dma_mapped - create a DMA mapping using a CA GART | ||
362 | * @pdev: linux pci_dev representing the function | ||
363 | * @paddr: host physical address to map | ||
364 | * @req_size: len (bytes) to map | ||
365 | * | ||
366 | * Map @paddr into CA address space using the GART mechanism. The mapped | ||
367 | * dma_addr_t is guarenteed to be contiguous in CA bus space. | ||
368 | */ | ||
369 | static dma_addr_t | ||
370 | tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size) | ||
371 | { | ||
372 | int i, ps, ps_shift, entry, entries, mapsize, last_entry; | ||
373 | uint64_t xio_addr, end_xio_addr; | ||
374 | struct tioca_common *tioca_common; | ||
375 | struct tioca_kernel *tioca_kern; | ||
376 | dma_addr_t bus_addr = 0; | ||
377 | struct tioca_dmamap *ca_dmamap; | ||
378 | void *map; | ||
379 | unsigned long flags; | ||
380 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);; | ||
381 | |||
382 | tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; | ||
383 | tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; | ||
384 | |||
385 | xio_addr = PHYS_TO_TIODMA(paddr); | ||
386 | if (!xio_addr) | ||
387 | return 0; | ||
388 | |||
389 | spin_lock_irqsave(&tioca_kern->ca_lock, flags); | ||
390 | |||
391 | /* | ||
392 | * allocate a map struct | ||
393 | */ | ||
394 | |||
395 | ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC); | ||
396 | if (!ca_dmamap) | ||
397 | goto map_return; | ||
398 | |||
399 | /* | ||
400 | * Locate free entries that can hold req_size. Account for | ||
401 | * unaligned start/length when allocating. | ||
402 | */ | ||
403 | |||
404 | ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */ | ||
405 | ps_shift = ffs(ps) - 1; | ||
406 | end_xio_addr = xio_addr + req_size - 1; | ||
407 | |||
408 | entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1; | ||
409 | |||
410 | map = tioca_kern->ca_pcigart_pagemap; | ||
411 | mapsize = tioca_kern->ca_pcigart_entries; | ||
412 | |||
413 | entry = find_first_zero_bit(map, mapsize); | ||
414 | while (entry < mapsize) { | ||
415 | last_entry = find_next_bit(map, mapsize, entry); | ||
416 | |||
417 | if (last_entry - entry >= entries) | ||
418 | break; | ||
419 | |||
420 | entry = find_next_zero_bit(map, mapsize, last_entry); | ||
421 | } | ||
422 | |||
423 | if (entry > mapsize) | ||
424 | goto map_return; | ||
425 | |||
426 | for (i = 0; i < entries; i++) | ||
427 | set_bit(entry + i, map); | ||
428 | |||
429 | bus_addr = tioca_kern->ca_pciap_base + (entry * ps); | ||
430 | |||
431 | ca_dmamap->cad_dma_addr = bus_addr; | ||
432 | ca_dmamap->cad_gart_size = entries; | ||
433 | ca_dmamap->cad_gart_entry = entry; | ||
434 | list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps); | ||
435 | |||
436 | if (xio_addr % ps) { | ||
437 | tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); | ||
438 | bus_addr += xio_addr & (ps - 1); | ||
439 | xio_addr &= ~(ps - 1); | ||
440 | xio_addr += ps; | ||
441 | entry++; | ||
442 | } | ||
443 | |||
444 | while (xio_addr < end_xio_addr) { | ||
445 | tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr); | ||
446 | xio_addr += ps; | ||
447 | entry++; | ||
448 | } | ||
449 | |||
450 | tioca_tlbflush(tioca_kern); | ||
451 | |||
452 | map_return: | ||
453 | spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); | ||
454 | return bus_addr; | ||
455 | } | ||
456 | |||
457 | /** | ||
458 | * tioca_dma_unmap - release CA mapping resources | ||
459 | * @pdev: linux pci_dev representing the function | ||
460 | * @bus_addr: bus address returned by an earlier tioca_dma_map | ||
461 | * @dir: mapping direction (unused) | ||
462 | * | ||
463 | * Locate mapping resources associated with @bus_addr and release them. | ||
464 | * For mappings created using the direct modes (64 or 48) there are no | ||
465 | * resources to release. | ||
466 | */ | ||
467 | void | ||
468 | tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | ||
469 | { | ||
470 | int i, entry; | ||
471 | struct tioca_common *tioca_common; | ||
472 | struct tioca_kernel *tioca_kern; | ||
473 | struct tioca_dmamap *map; | ||
474 | struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev); | ||
475 | unsigned long flags; | ||
476 | |||
477 | tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info; | ||
478 | tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private; | ||
479 | |||
480 | /* return straight away if this isn't be a mapped address */ | ||
481 | |||
482 | if (bus_addr < tioca_kern->ca_pciap_base || | ||
483 | bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size)) | ||
484 | return; | ||
485 | |||
486 | spin_lock_irqsave(&tioca_kern->ca_lock, flags); | ||
487 | |||
488 | list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list) | ||
489 | if (map->cad_dma_addr == bus_addr) | ||
490 | break; | ||
491 | |||
492 | BUG_ON(map == NULL); | ||
493 | |||
494 | entry = map->cad_gart_entry; | ||
495 | |||
496 | for (i = 0; i < map->cad_gart_size; i++, entry++) { | ||
497 | clear_bit(entry, tioca_kern->ca_pcigart_pagemap); | ||
498 | tioca_kern->ca_pcigart[entry] = 0; | ||
499 | } | ||
500 | tioca_tlbflush(tioca_kern); | ||
501 | |||
502 | list_del(&map->cad_list); | ||
503 | spin_unlock_irqrestore(&tioca_kern->ca_lock, flags); | ||
504 | kfree(map); | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * tioca_dma_map - map pages for PCI DMA | ||
509 | * @pdev: linux pci_dev representing the function | ||
510 | * @paddr: host physical address to map | ||
511 | * @byte_count: bytes to map | ||
512 | * | ||
513 | * This is the main wrapper for mapping host physical pages to CA PCI space. | ||
514 | * The mapping mode used is based on the devices dma_mask. As a last resort | ||
515 | * use the GART mapped mode. | ||
516 | */ | ||
517 | uint64_t | ||
518 | tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count) | ||
519 | { | ||
520 | uint64_t mapaddr; | ||
521 | |||
522 | /* | ||
523 | * If card is 64 or 48 bit addresable, use a direct mapping. 32 | ||
524 | * bit direct is so restrictive w.r.t. where the memory resides that | ||
525 | * we don't use it even though CA has some support. | ||
526 | */ | ||
527 | |||
528 | if (pdev->dma_mask == ~0UL) | ||
529 | mapaddr = tioca_dma_d64(paddr); | ||
530 | else if (pdev->dma_mask == 0xffffffffffffUL) | ||
531 | mapaddr = tioca_dma_d48(pdev, paddr); | ||
532 | else | ||
533 | mapaddr = 0; | ||
534 | |||
535 | /* Last resort ... use PCI portion of CA GART */ | ||
536 | |||
537 | if (mapaddr == 0) | ||
538 | mapaddr = tioca_dma_mapped(pdev, paddr, byte_count); | ||
539 | |||
540 | return mapaddr; | ||
541 | } | ||
542 | |||
543 | /** | ||
544 | * tioca_error_intr_handler - SGI TIO CA error interrupt handler | ||
545 | * @irq: unused | ||
546 | * @arg: pointer to tioca_common struct for the given CA | ||
547 | * @pt: unused | ||
548 | * | ||
549 | * Handle a CA error interrupt. Simply a wrapper around a SAL call which | ||
550 | * defers processing to the SGI prom. | ||
551 | */ | ||
552 | static irqreturn_t | ||
553 | tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt) | ||
554 | { | ||
555 | struct tioca_common *soft = arg; | ||
556 | struct ia64_sal_retval ret_stuff; | ||
557 | uint64_t segment; | ||
558 | uint64_t busnum; | ||
559 | ret_stuff.status = 0; | ||
560 | ret_stuff.v0 = 0; | ||
561 | |||
562 | segment = 0; | ||
563 | busnum = soft->ca_common.bs_persist_busnum; | ||
564 | |||
565 | SAL_CALL_NOLOCK(ret_stuff, | ||
566 | (u64) SN_SAL_IOIF_ERROR_INTERRUPT, | ||
567 | segment, busnum, 0, 0, 0, 0, 0); | ||
568 | |||
569 | return IRQ_HANDLED; | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus | ||
574 | * @prom_bussoft: Common prom/kernel struct representing the bus | ||
575 | * | ||
576 | * Replicates the tioca_common pointed to by @prom_bussoft in kernel | ||
577 | * space. Allocates and initializes a kernel-only area for a given CA, | ||
578 | * and sets up an irq for handling CA error interrupts. | ||
579 | * | ||
580 | * On successful setup, returns the kernel version of tioca_common back to | ||
581 | * the caller. | ||
582 | */ | ||
583 | void * | ||
584 | tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft) | ||
585 | { | ||
586 | struct tioca_common *tioca_common; | ||
587 | struct tioca_kernel *tioca_kern; | ||
588 | struct pci_bus *bus; | ||
589 | |||
590 | /* sanity check prom rev */ | ||
591 | |||
592 | if (sn_sal_rev_major() < 4 || | ||
593 | (sn_sal_rev_major() == 4 && sn_sal_rev_minor() < 6)) { | ||
594 | printk | ||
595 | (KERN_ERR "%s: SGI prom rev 4.06 or greater required " | ||
596 | "for tioca support\n", __FUNCTION__); | ||
597 | return NULL; | ||
598 | } | ||
599 | |||
600 | /* | ||
601 | * Allocate kernel bus soft and copy from prom. | ||
602 | */ | ||
603 | |||
604 | tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL); | ||
605 | if (!tioca_common) | ||
606 | return NULL; | ||
607 | |||
608 | memcpy(tioca_common, prom_bussoft, sizeof(struct tioca_common)); | ||
609 | tioca_common->ca_common.bs_base |= __IA64_UNCACHED_OFFSET; | ||
610 | |||
611 | /* init kernel-private area */ | ||
612 | |||
613 | tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL); | ||
614 | if (!tioca_kern) { | ||
615 | kfree(tioca_common); | ||
616 | return NULL; | ||
617 | } | ||
618 | |||
619 | tioca_kern->ca_common = tioca_common; | ||
620 | spin_lock_init(&tioca_kern->ca_lock); | ||
621 | INIT_LIST_HEAD(&tioca_kern->ca_dmamaps); | ||
622 | tioca_kern->ca_closest_node = | ||
623 | nasid_to_cnodeid(tioca_common->ca_closest_nasid); | ||
624 | tioca_common->ca_kernel_private = (uint64_t) tioca_kern; | ||
625 | |||
626 | bus = pci_find_bus(0, tioca_common->ca_common.bs_persist_busnum); | ||
627 | BUG_ON(!bus); | ||
628 | tioca_kern->ca_devices = &bus->devices; | ||
629 | |||
630 | /* init GART */ | ||
631 | |||
632 | if (tioca_gart_init(tioca_kern) < 0) { | ||
633 | kfree(tioca_kern); | ||
634 | kfree(tioca_common); | ||
635 | return NULL; | ||
636 | } | ||
637 | |||
638 | tioca_gart_found++; | ||
639 | list_add(&tioca_kern->ca_list, &tioca_list); | ||
640 | |||
641 | if (request_irq(SGI_TIOCA_ERROR, | ||
642 | tioca_error_intr_handler, | ||
643 | SA_SHIRQ, "TIOCA error", (void *)tioca_common)) | ||
644 | printk(KERN_WARNING | ||
645 | "%s: Unable to get irq %d. " | ||
646 | "Error interrupts won't be routed for TIOCA bus %d\n", | ||
647 | __FUNCTION__, SGI_TIOCA_ERROR, | ||
648 | (int)tioca_common->ca_common.bs_persist_busnum); | ||
649 | |||
650 | return tioca_common; | ||
651 | } | ||
652 | |||
653 | static struct sn_pcibus_provider tioca_pci_interfaces = { | ||
654 | .dma_map = tioca_dma_map, | ||
655 | .dma_map_consistent = tioca_dma_map, | ||
656 | .dma_unmap = tioca_dma_unmap, | ||
657 | .bus_fixup = tioca_bus_fixup, | ||
658 | }; | ||
659 | |||
660 | /** | ||
661 | * tioca_init_provider - init SN PCI provider ops for TIO CA | ||
662 | */ | ||
663 | int | ||
664 | tioca_init_provider(void) | ||
665 | { | ||
666 | sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces; | ||
667 | return 0; | ||
668 | } | ||
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c index 8b40f362dd6f..124f7c1b775e 100644 --- a/arch/m32r/kernel/ptrace.c +++ b/arch/m32r/kernel/ptrace.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/ptrace.h> | 24 | #include <linux/ptrace.h> |
25 | #include <linux/user.h> | 25 | #include <linux/user.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/signal.h> | ||
27 | 28 | ||
28 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
29 | #include <asm/io.h> | 30 | #include <asm/io.h> |
@@ -665,7 +666,7 @@ do_ptrace(long request, struct task_struct *child, long addr, long data) | |||
665 | case PTRACE_SYSCALL: | 666 | case PTRACE_SYSCALL: |
666 | case PTRACE_CONT: | 667 | case PTRACE_CONT: |
667 | ret = -EIO; | 668 | ret = -EIO; |
668 | if ((unsigned long) data > _NSIG) | 669 | if (!valid_signal(data)) |
669 | break; | 670 | break; |
670 | if (request == PTRACE_SYSCALL) | 671 | if (request == PTRACE_SYSCALL) |
671 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 672 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -700,7 +701,7 @@ do_ptrace(long request, struct task_struct *child, long addr, long data) | |||
700 | unsigned long pc, insn; | 701 | unsigned long pc, insn; |
701 | 702 | ||
702 | ret = -EIO; | 703 | ret = -EIO; |
703 | if ((unsigned long) data > _NSIG) | 704 | if (!valid_signal(data)) |
704 | break; | 705 | break; |
705 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 706 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
706 | if ((child->ptrace & PT_DTRACE) == 0) { | 707 | if ((child->ptrace & PT_DTRACE) == 0) { |
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c index 0beb53333ba3..f4e1e5eb8e12 100644 --- a/arch/m68k/kernel/ptrace.c +++ b/arch/m68k/kernel/ptrace.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/ptrace.h> | 19 | #include <linux/ptrace.h> |
20 | #include <linux/user.h> | 20 | #include <linux/user.h> |
21 | #include <linux/config.h> | 21 | #include <linux/config.h> |
22 | #include <linux/signal.h> | ||
22 | 23 | ||
23 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
24 | #include <asm/page.h> | 25 | #include <asm/page.h> |
@@ -251,7 +252,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
251 | long tmp; | 252 | long tmp; |
252 | 253 | ||
253 | ret = -EIO; | 254 | ret = -EIO; |
254 | if ((unsigned long) data > _NSIG) | 255 | if (!valid_signal(data)) |
255 | break; | 256 | break; |
256 | if (request == PTRACE_SYSCALL) { | 257 | if (request == PTRACE_SYSCALL) { |
257 | child->thread.work.syscall_trace = ~0; | 258 | child->thread.work.syscall_trace = ~0; |
@@ -292,7 +293,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
292 | long tmp; | 293 | long tmp; |
293 | 294 | ||
294 | ret = -EIO; | 295 | ret = -EIO; |
295 | if ((unsigned long) data > _NSIG) | 296 | if (!valid_signal(data)) |
296 | break; | 297 | break; |
297 | child->thread.work.syscall_trace = 0; | 298 | child->thread.work.syscall_trace = 0; |
298 | tmp = get_reg(child, PT_SR) | (TRACE_BITS << 16); | 299 | tmp = get_reg(child, PT_SR) | (TRACE_BITS << 16); |
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index fc4615b6d3a9..e729bd280623 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig | |||
@@ -534,6 +534,11 @@ endchoice | |||
534 | 534 | ||
535 | endmenu | 535 | endmenu |
536 | 536 | ||
537 | config ISA_DMA_API | ||
538 | bool | ||
539 | depends on !M5272 | ||
540 | default y | ||
541 | |||
537 | menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" | 542 | menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" |
538 | 543 | ||
539 | config PCI | 544 | config PCI |
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c index 15cf79080b15..9724e1cd82e5 100644 --- a/arch/m68knommu/kernel/ptrace.c +++ b/arch/m68knommu/kernel/ptrace.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/ptrace.h> | 19 | #include <linux/ptrace.h> |
20 | #include <linux/user.h> | 20 | #include <linux/user.h> |
21 | #include <linux/config.h> | 21 | #include <linux/config.h> |
22 | #include <linux/signal.h> | ||
22 | 23 | ||
23 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
24 | #include <asm/page.h> | 25 | #include <asm/page.h> |
@@ -240,7 +241,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
240 | long tmp; | 241 | long tmp; |
241 | 242 | ||
242 | ret = -EIO; | 243 | ret = -EIO; |
243 | if ((unsigned long) data > _NSIG) | 244 | if (!valid_signal(data)) |
244 | break; | 245 | break; |
245 | if (request == PTRACE_SYSCALL) | 246 | if (request == PTRACE_SYSCALL) |
246 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 247 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -278,7 +279,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
278 | long tmp; | 279 | long tmp; |
279 | 280 | ||
280 | ret = -EIO; | 281 | ret = -EIO; |
281 | if ((unsigned long) data > _NSIG) | 282 | if (!valid_signal(data)) |
282 | break; | 283 | break; |
283 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 284 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
284 | tmp = get_reg(child, PT_SR) | (TRACE_BITS << 16); | 285 | tmp = get_reg(child, PT_SR) | (TRACE_BITS << 16); |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5e666aad8815..ab9944693f1f 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1656,3 +1656,7 @@ config GENERIC_HARDIRQS | |||
1656 | config GENERIC_IRQ_PROBE | 1656 | config GENERIC_IRQ_PROBE |
1657 | bool | 1657 | bool |
1658 | default y | 1658 | default y |
1659 | |||
1660 | config ISA_DMA_API | ||
1661 | bool | ||
1662 | default y | ||
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 92f2c39afe27..92e70ca3bff9 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/smp_lock.h> | 26 | #include <linux/smp_lock.h> |
27 | #include <linux/user.h> | 27 | #include <linux/user.h> |
28 | #include <linux/security.h> | 28 | #include <linux/security.h> |
29 | #include <linux/signal.h> | ||
29 | 30 | ||
30 | #include <asm/cpu.h> | 31 | #include <asm/cpu.h> |
31 | #include <asm/fpu.h> | 32 | #include <asm/fpu.h> |
@@ -257,7 +258,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
257 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 258 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
258 | case PTRACE_CONT: { /* restart after signal. */ | 259 | case PTRACE_CONT: { /* restart after signal. */ |
259 | ret = -EIO; | 260 | ret = -EIO; |
260 | if ((unsigned long) data > _NSIG) | 261 | if (!valid_signal(data)) |
261 | break; | 262 | break; |
262 | if (request == PTRACE_SYSCALL) { | 263 | if (request == PTRACE_SYSCALL) { |
263 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 264 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -300,25 +301,38 @@ out: | |||
300 | return ret; | 301 | return ret; |
301 | } | 302 | } |
302 | 303 | ||
304 | static inline int audit_arch(void) | ||
305 | { | ||
306 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
307 | #ifdef CONFIG_MIPS64 | ||
308 | if (!(current->thread.mflags & MF_32BIT_REGS)) | ||
309 | return AUDIT_ARCH_MIPSEL64; | ||
310 | #endif /* MIPS64 */ | ||
311 | return AUDIT_ARCH_MIPSEL; | ||
312 | |||
313 | #else /* big endian... */ | ||
314 | #ifdef CONFIG_MIPS64 | ||
315 | if (!(current->thread.mflags & MF_32BIT_REGS)) | ||
316 | return AUDIT_ARCH_MIPS64; | ||
317 | #endif /* MIPS64 */ | ||
318 | return AUDIT_ARCH_MIPS; | ||
319 | |||
320 | #endif /* endian */ | ||
321 | } | ||
322 | |||
303 | /* | 323 | /* |
304 | * Notification of system call entry/exit | 324 | * Notification of system call entry/exit |
305 | * - triggered by current->work.syscall_trace | 325 | * - triggered by current->work.syscall_trace |
306 | */ | 326 | */ |
307 | asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | 327 | asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) |
308 | { | 328 | { |
309 | if (unlikely(current->audit_context)) { | 329 | if (unlikely(current->audit_context) && entryexit) |
310 | if (!entryexit) | 330 | audit_syscall_exit(current, AUDITSC_RESULT(regs->regs[2]), regs->regs[2]); |
311 | audit_syscall_entry(current, regs->regs[2], | ||
312 | regs->regs[4], regs->regs[5], | ||
313 | regs->regs[6], regs->regs[7]); | ||
314 | else | ||
315 | audit_syscall_exit(current, regs->regs[2]); | ||
316 | } | ||
317 | 331 | ||
318 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 332 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) |
319 | return; | 333 | goto out; |
320 | if (!(current->ptrace & PT_PTRACED)) | 334 | if (!(current->ptrace & PT_PTRACED)) |
321 | return; | 335 | goto out; |
322 | 336 | ||
323 | /* The 0x80 provides a way for the tracing parent to distinguish | 337 | /* The 0x80 provides a way for the tracing parent to distinguish |
324 | between a syscall stop and SIGTRAP delivery */ | 338 | between a syscall stop and SIGTRAP delivery */ |
@@ -334,4 +348,9 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | |||
334 | send_sig(current->exit_code, current, 1); | 348 | send_sig(current->exit_code, current, 1); |
335 | current->exit_code = 0; | 349 | current->exit_code = 0; |
336 | } | 350 | } |
351 | out: | ||
352 | if (unlikely(current->audit_context) && !entryexit) | ||
353 | audit_syscall_entry(current, audit_arch(), regs->regs[2], | ||
354 | regs->regs[4], regs->regs[5], | ||
355 | regs->regs[6], regs->regs[7]); | ||
337 | } | 356 | } |
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c index 611dee919d50..eee207969c21 100644 --- a/arch/mips/kernel/ptrace32.c +++ b/arch/mips/kernel/ptrace32.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/smp_lock.h> | 24 | #include <linux/smp_lock.h> |
25 | #include <linux/user.h> | 25 | #include <linux/user.h> |
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | #include <linux/signal.h> | ||
27 | 28 | ||
28 | #include <asm/cpu.h> | 29 | #include <asm/cpu.h> |
29 | #include <asm/fpu.h> | 30 | #include <asm/fpu.h> |
@@ -241,7 +242,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data) | |||
241 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 242 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
242 | case PTRACE_CONT: { /* restart after signal. */ | 243 | case PTRACE_CONT: { /* restart after signal. */ |
243 | ret = -EIO; | 244 | ret = -EIO; |
244 | if ((unsigned int) data > _NSIG) | 245 | if (!valid_signal(data)) |
245 | break; | 246 | break; |
246 | if (request == PTRACE_SYSCALL) { | 247 | if (request == PTRACE_SYSCALL) { |
247 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 248 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 598bfe7426a2..ae2a1312d4ef 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -374,22 +374,6 @@ asmlinkage int sys_ipc (uint call, int first, int second, | |||
374 | } | 374 | } |
375 | 375 | ||
376 | /* | 376 | /* |
377 | * Native ABI that is O32 or N64 version | ||
378 | */ | ||
379 | asmlinkage long sys_shmat(int shmid, char __user *shmaddr, | ||
380 | int shmflg, unsigned long *addr) | ||
381 | { | ||
382 | unsigned long raddr; | ||
383 | int err; | ||
384 | |||
385 | err = do_shmat(shmid, shmaddr, shmflg, &raddr); | ||
386 | if (err) | ||
387 | return err; | ||
388 | |||
389 | return put_user(raddr, addr); | ||
390 | } | ||
391 | |||
392 | /* | ||
393 | * No implemented yet ... | 377 | * No implemented yet ... |
394 | */ | 378 | */ |
395 | asmlinkage int sys_cachectl(char *addr, int nbytes, int op) | 379 | asmlinkage int sys_cachectl(char *addr, int nbytes, int op) |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 5b5cd00d98ca..e7e7c56fc212 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -45,6 +45,10 @@ config GENERIC_IRQ_PROBE | |||
45 | config PM | 45 | config PM |
46 | bool | 46 | bool |
47 | 47 | ||
48 | config ISA_DMA_API | ||
49 | bool | ||
50 | default y | ||
51 | |||
48 | source "init/Kconfig" | 52 | source "init/Kconfig" |
49 | 53 | ||
50 | 54 | ||
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2937a9236384..c07db9dff7cd 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/personality.h> | 17 | #include <linux/personality.h> |
18 | #include <linux/security.h> | 18 | #include <linux/security.h> |
19 | #include <linux/compat.h> | 19 | #include <linux/compat.h> |
20 | #include <linux/signal.h> | ||
20 | 21 | ||
21 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
22 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
@@ -285,7 +286,7 @@ long sys_ptrace(long request, pid_t pid, long addr, long data) | |||
285 | ret = -EIO; | 286 | ret = -EIO; |
286 | DBG("sys_ptrace(%s)\n", | 287 | DBG("sys_ptrace(%s)\n", |
287 | request == PTRACE_SYSCALL ? "SYSCALL" : "CONT"); | 288 | request == PTRACE_SYSCALL ? "SYSCALL" : "CONT"); |
288 | if ((unsigned long) data > _NSIG) | 289 | if (!valid_signal(data)) |
289 | goto out_tsk; | 290 | goto out_tsk; |
290 | child->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); | 291 | child->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); |
291 | if (request == PTRACE_SYSCALL) { | 292 | if (request == PTRACE_SYSCALL) { |
@@ -311,7 +312,7 @@ long sys_ptrace(long request, pid_t pid, long addr, long data) | |||
311 | case PTRACE_SINGLEBLOCK: | 312 | case PTRACE_SINGLEBLOCK: |
312 | DBG("sys_ptrace(SINGLEBLOCK)\n"); | 313 | DBG("sys_ptrace(SINGLEBLOCK)\n"); |
313 | ret = -EIO; | 314 | ret = -EIO; |
314 | if ((unsigned long) data > _NSIG) | 315 | if (!valid_signal(data)) |
315 | goto out_tsk; | 316 | goto out_tsk; |
316 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 317 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
317 | child->ptrace &= ~PT_SINGLESTEP; | 318 | child->ptrace &= ~PT_SINGLESTEP; |
@@ -328,7 +329,7 @@ long sys_ptrace(long request, pid_t pid, long addr, long data) | |||
328 | case PTRACE_SINGLESTEP: | 329 | case PTRACE_SINGLESTEP: |
329 | DBG("sys_ptrace(SINGLESTEP)\n"); | 330 | DBG("sys_ptrace(SINGLESTEP)\n"); |
330 | ret = -EIO; | 331 | ret = -EIO; |
331 | if ((unsigned long) data > _NSIG) | 332 | if (!valid_signal(data)) |
332 | goto out_tsk; | 333 | goto out_tsk; |
333 | 334 | ||
334 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 335 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 7958cd8c8bf8..d15a1d53e101 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c | |||
@@ -161,17 +161,6 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, | |||
161 | } | 161 | } |
162 | } | 162 | } |
163 | 163 | ||
164 | long sys_shmat_wrapper(int shmid, char __user *shmaddr, int shmflag) | ||
165 | { | ||
166 | unsigned long raddr; | ||
167 | int r; | ||
168 | |||
169 | r = do_shmat(shmid, shmaddr, shmflag, &raddr); | ||
170 | if (r < 0) | ||
171 | return r; | ||
172 | return raddr; | ||
173 | } | ||
174 | |||
175 | /* Fucking broken ABI */ | 164 | /* Fucking broken ABI */ |
176 | 165 | ||
177 | #ifdef CONFIG_64BIT | 166 | #ifdef CONFIG_64BIT |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 779b537100ec..dcfa4d3d0e7d 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -297,7 +297,7 @@ | |||
297 | ENTRY_DIFF(msgrcv) | 297 | ENTRY_DIFF(msgrcv) |
298 | ENTRY_SAME(msgget) /* 190 */ | 298 | ENTRY_SAME(msgget) /* 190 */ |
299 | ENTRY_SAME(msgctl) | 299 | ENTRY_SAME(msgctl) |
300 | ENTRY_SAME(shmat_wrapper) | 300 | ENTRY_SAME(shmat) |
301 | ENTRY_SAME(shmdt) | 301 | ENTRY_SAME(shmdt) |
302 | ENTRY_SAME(shmget) | 302 | ENTRY_SAME(shmget) |
303 | ENTRY_SAME(shmctl) /* 195 */ | 303 | ENTRY_SAME(shmctl) /* 195 */ |
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index 74aa1e92a395..600f23d7fd33 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig | |||
@@ -43,6 +43,10 @@ config GENERIC_NVRAM | |||
43 | bool | 43 | bool |
44 | default y | 44 | default y |
45 | 45 | ||
46 | config SCHED_NO_NO_OMIT_FRAME_POINTER | ||
47 | bool | ||
48 | default y | ||
49 | |||
46 | source "init/Kconfig" | 50 | source "init/Kconfig" |
47 | 51 | ||
48 | menu "Processor" | 52 | menu "Processor" |
@@ -53,6 +57,7 @@ choice | |||
53 | 57 | ||
54 | config 6xx | 58 | config 6xx |
55 | bool "6xx/7xx/74xx/52xx/82xx/83xx" | 59 | bool "6xx/7xx/74xx/52xx/82xx/83xx" |
60 | select PPC_FPU | ||
56 | help | 61 | help |
57 | There are four types of PowerPC chips supported. The more common | 62 | There are four types of PowerPC chips supported. The more common |
58 | types (601, 603, 604, 740, 750, 7400), the Motorola embedded | 63 | types (601, 603, 604, 740, 750, 7400), the Motorola embedded |
@@ -72,9 +77,11 @@ config 44x | |||
72 | bool "44x" | 77 | bool "44x" |
73 | 78 | ||
74 | config POWER3 | 79 | config POWER3 |
80 | select PPC_FPU | ||
75 | bool "POWER3" | 81 | bool "POWER3" |
76 | 82 | ||
77 | config POWER4 | 83 | config POWER4 |
84 | select PPC_FPU | ||
78 | bool "POWER4 and 970 (G5)" | 85 | bool "POWER4 and 970 (G5)" |
79 | 86 | ||
80 | config 8xx | 87 | config 8xx |
@@ -86,6 +93,9 @@ config E500 | |||
86 | 93 | ||
87 | endchoice | 94 | endchoice |
88 | 95 | ||
96 | config PPC_FPU | ||
97 | bool | ||
98 | |||
89 | config BOOKE | 99 | config BOOKE |
90 | bool | 100 | bool |
91 | depends on E500 | 101 | depends on E500 |
@@ -1075,6 +1085,10 @@ source kernel/power/Kconfig | |||
1075 | 1085 | ||
1076 | endmenu | 1086 | endmenu |
1077 | 1087 | ||
1088 | config ISA_DMA_API | ||
1089 | bool | ||
1090 | default y | ||
1091 | |||
1078 | menu "Bus options" | 1092 | menu "Bus options" |
1079 | 1093 | ||
1080 | config ISA | 1094 | config ISA |
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile index 73cbdda5b597..0432a25b4735 100644 --- a/arch/ppc/Makefile +++ b/arch/ppc/Makefile | |||
@@ -53,6 +53,7 @@ head-$(CONFIG_FSL_BOOKE) := arch/ppc/kernel/head_fsl_booke.o | |||
53 | 53 | ||
54 | head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o | 54 | head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o |
55 | head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o | 55 | head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o |
56 | head-$(CONFIG_PPC_FPU) += arch/ppc/kernel/fpu.o | ||
56 | 57 | ||
57 | core-y += arch/ppc/kernel/ arch/ppc/platforms/ \ | 58 | core-y += arch/ppc/kernel/ arch/ppc/platforms/ \ |
58 | arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/ | 59 | arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/ |
diff --git a/arch/ppc/boot/images/Makefile b/arch/ppc/boot/images/Makefile index 774de8e23871..f850fb0fb511 100644 --- a/arch/ppc/boot/images/Makefile +++ b/arch/ppc/boot/images/Makefile | |||
@@ -20,8 +20,9 @@ quiet_cmd_uimage = UIMAGE $@ | |||
20 | 20 | ||
21 | targets += uImage | 21 | targets += uImage |
22 | $(obj)/uImage: $(obj)/vmlinux.gz | 22 | $(obj)/uImage: $(obj)/vmlinux.gz |
23 | $(Q)rm -f $@ | ||
23 | $(call if_changed,uimage) | 24 | $(call if_changed,uimage) |
24 | @echo ' Image $@ is ready' | 25 | @echo ' Image: $@' $(if $(wildcard $@),'is ready','not made') |
25 | 26 | ||
26 | # Files generated that shall be removed upon make clean | 27 | # Files generated that shall be removed upon make clean |
27 | clean-files := sImage vmapus vmlinux* miboot* zImage* uImage | 28 | clean-files := sImage vmapus vmlinux* miboot* zImage* uImage |
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile index 86bc878cb3ee..b284451802c9 100644 --- a/arch/ppc/kernel/Makefile +++ b/arch/ppc/kernel/Makefile | |||
@@ -9,6 +9,7 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o | |||
9 | extra-$(CONFIG_8xx) := head_8xx.o | 9 | extra-$(CONFIG_8xx) := head_8xx.o |
10 | extra-$(CONFIG_6xx) += idle_6xx.o | 10 | extra-$(CONFIG_6xx) += idle_6xx.o |
11 | extra-$(CONFIG_POWER4) += idle_power4.o | 11 | extra-$(CONFIG_POWER4) += idle_power4.o |
12 | extra-$(CONFIG_PPC_FPU) += fpu.o | ||
12 | extra-y += vmlinux.lds | 13 | extra-y += vmlinux.lds |
13 | 14 | ||
14 | obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ | 15 | obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ |
diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c index 79c929475037..ff81da9598d8 100644 --- a/arch/ppc/kernel/align.c +++ b/arch/ppc/kernel/align.c | |||
@@ -290,6 +290,10 @@ fix_alignment(struct pt_regs *regs) | |||
290 | /* lwm, stmw */ | 290 | /* lwm, stmw */ |
291 | nb = (32 - reg) * 4; | 291 | nb = (32 - reg) * 4; |
292 | } | 292 | } |
293 | |||
294 | if (!access_ok((flags & ST? VERIFY_WRITE: VERIFY_READ), addr, nb+nb0)) | ||
295 | return -EFAULT; /* bad address */ | ||
296 | |||
293 | rptr = (unsigned char *) ®s->gpr[reg]; | 297 | rptr = (unsigned char *) ®s->gpr[reg]; |
294 | if (flags & LD) { | 298 | if (flags & LD) { |
295 | for (i = 0; i < nb; ++i) | 299 | for (i = 0; i < nb; ++i) |
@@ -368,16 +372,24 @@ fix_alignment(struct pt_regs *regs) | |||
368 | 372 | ||
369 | /* Single-precision FP load and store require conversions... */ | 373 | /* Single-precision FP load and store require conversions... */ |
370 | case LD+F+S: | 374 | case LD+F+S: |
375 | #ifdef CONFIG_PPC_FPU | ||
371 | preempt_disable(); | 376 | preempt_disable(); |
372 | enable_kernel_fp(); | 377 | enable_kernel_fp(); |
373 | cvt_fd(&data.f, &data.d, ¤t->thread.fpscr); | 378 | cvt_fd(&data.f, &data.d, ¤t->thread.fpscr); |
374 | preempt_enable(); | 379 | preempt_enable(); |
380 | #else | ||
381 | return 0; | ||
382 | #endif | ||
375 | break; | 383 | break; |
376 | case ST+F+S: | 384 | case ST+F+S: |
385 | #ifdef CONFIG_PPC_FPU | ||
377 | preempt_disable(); | 386 | preempt_disable(); |
378 | enable_kernel_fp(); | 387 | enable_kernel_fp(); |
379 | cvt_df(&data.d, &data.f, ¤t->thread.fpscr); | 388 | cvt_df(&data.d, &data.f, ¤t->thread.fpscr); |
380 | preempt_enable(); | 389 | preempt_enable(); |
390 | #else | ||
391 | return 0; | ||
392 | #endif | ||
381 | break; | 393 | break; |
382 | } | 394 | } |
383 | 395 | ||
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S index 74f781b486a3..468721d9ebd2 100644 --- a/arch/ppc/kernel/cpu_setup_6xx.S +++ b/arch/ppc/kernel/cpu_setup_6xx.S | |||
@@ -30,12 +30,14 @@ _GLOBAL(__setup_cpu_604) | |||
30 | blr | 30 | blr |
31 | _GLOBAL(__setup_cpu_750) | 31 | _GLOBAL(__setup_cpu_750) |
32 | mflr r4 | 32 | mflr r4 |
33 | bl __init_fpu_registers | ||
33 | bl setup_common_caches | 34 | bl setup_common_caches |
34 | bl setup_750_7400_hid0 | 35 | bl setup_750_7400_hid0 |
35 | mtlr r4 | 36 | mtlr r4 |
36 | blr | 37 | blr |
37 | _GLOBAL(__setup_cpu_750cx) | 38 | _GLOBAL(__setup_cpu_750cx) |
38 | mflr r4 | 39 | mflr r4 |
40 | bl __init_fpu_registers | ||
39 | bl setup_common_caches | 41 | bl setup_common_caches |
40 | bl setup_750_7400_hid0 | 42 | bl setup_750_7400_hid0 |
41 | bl setup_750cx | 43 | bl setup_750cx |
@@ -43,6 +45,7 @@ _GLOBAL(__setup_cpu_750cx) | |||
43 | blr | 45 | blr |
44 | _GLOBAL(__setup_cpu_750fx) | 46 | _GLOBAL(__setup_cpu_750fx) |
45 | mflr r4 | 47 | mflr r4 |
48 | bl __init_fpu_registers | ||
46 | bl setup_common_caches | 49 | bl setup_common_caches |
47 | bl setup_750_7400_hid0 | 50 | bl setup_750_7400_hid0 |
48 | bl setup_750fx | 51 | bl setup_750fx |
@@ -50,6 +53,7 @@ _GLOBAL(__setup_cpu_750fx) | |||
50 | blr | 53 | blr |
51 | _GLOBAL(__setup_cpu_7400) | 54 | _GLOBAL(__setup_cpu_7400) |
52 | mflr r4 | 55 | mflr r4 |
56 | bl __init_fpu_registers | ||
53 | bl setup_7400_workarounds | 57 | bl setup_7400_workarounds |
54 | bl setup_common_caches | 58 | bl setup_common_caches |
55 | bl setup_750_7400_hid0 | 59 | bl setup_750_7400_hid0 |
@@ -57,6 +61,7 @@ _GLOBAL(__setup_cpu_7400) | |||
57 | blr | 61 | blr |
58 | _GLOBAL(__setup_cpu_7410) | 62 | _GLOBAL(__setup_cpu_7410) |
59 | mflr r4 | 63 | mflr r4 |
64 | bl __init_fpu_registers | ||
60 | bl setup_7410_workarounds | 65 | bl setup_7410_workarounds |
61 | bl setup_common_caches | 66 | bl setup_common_caches |
62 | bl setup_750_7400_hid0 | 67 | bl setup_750_7400_hid0 |
@@ -80,7 +85,7 @@ setup_common_caches: | |||
80 | bne 1f /* don't invalidate the D-cache */ | 85 | bne 1f /* don't invalidate the D-cache */ |
81 | ori r8,r8,HID0_DCI /* unless it wasn't enabled */ | 86 | ori r8,r8,HID0_DCI /* unless it wasn't enabled */ |
82 | 1: sync | 87 | 1: sync |
83 | mtspr SPRN_HID0,r8 /* enable and invalidate caches */ | 88 | mtspr SPRN_HID0,r8 /* enable and invalidate caches */ |
84 | sync | 89 | sync |
85 | mtspr SPRN_HID0,r11 /* enable caches */ | 90 | mtspr SPRN_HID0,r11 /* enable caches */ |
86 | sync | 91 | sync |
@@ -152,9 +157,13 @@ setup_7410_workarounds: | |||
152 | setup_750_7400_hid0: | 157 | setup_750_7400_hid0: |
153 | mfspr r11,SPRN_HID0 | 158 | mfspr r11,SPRN_HID0 |
154 | ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC | 159 | ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC |
160 | oris r11,r11,HID0_DPM@h | ||
155 | BEGIN_FTR_SECTION | 161 | BEGIN_FTR_SECTION |
156 | oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */ | 162 | xori r11,r11,HID0_BTIC |
157 | END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) | 163 | END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) |
164 | BEGIN_FTR_SECTION | ||
165 | xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ | ||
166 | END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) | ||
158 | li r3,HID0_SPD | 167 | li r3,HID0_SPD |
159 | andc r11,r11,r3 /* clear SPD: enable speculative */ | 168 | andc r11,r11,r3 /* clear SPD: enable speculative */ |
160 | li r3,0 | 169 | li r3,0 |
@@ -218,13 +227,15 @@ setup_745x_specifics: | |||
218 | 227 | ||
219 | /* All of the bits we have to set..... | 228 | /* All of the bits we have to set..... |
220 | */ | 229 | */ |
221 | ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_LRSTK | HID0_BTIC | 230 | ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE |
231 | ori r11,r11,HID0_LRSTK | HID0_BTIC | ||
232 | oris r11,r11,HID0_DPM@h | ||
222 | BEGIN_FTR_SECTION | 233 | BEGIN_FTR_SECTION |
223 | xori r11,r11,HID0_BTIC | 234 | xori r11,r11,HID0_BTIC |
224 | END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) | 235 | END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC) |
225 | BEGIN_FTR_SECTION | 236 | BEGIN_FTR_SECTION |
226 | oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */ | 237 | xoris r11,r11,HID0_DPM@h /* disable dynamic power mgmt */ |
227 | END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) | 238 | END_FTR_SECTION_IFSET(CPU_FTR_NO_DPM) |
228 | 239 | ||
229 | /* All of the bits we have to clear.... | 240 | /* All of the bits we have to clear.... |
230 | */ | 241 | */ |
@@ -248,6 +259,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM) | |||
248 | isync | 259 | isync |
249 | blr | 260 | blr |
250 | 261 | ||
262 | /* | ||
263 | * Initialize the FPU registers. This is needed to work around an errata | ||
264 | * in some 750 cpus where using a not yet initialized FPU register after | ||
265 | * power on reset may hang the CPU | ||
266 | */ | ||
267 | _GLOBAL(__init_fpu_registers) | ||
268 | mfmsr r10 | ||
269 | ori r11,r10,MSR_FP | ||
270 | mtmsr r11 | ||
271 | isync | ||
272 | addis r9,r3,empty_zero_page@ha | ||
273 | addi r9,r9,empty_zero_page@l | ||
274 | REST_32FPRS(0,r9) | ||
275 | sync | ||
276 | mtmsr r10 | ||
277 | isync | ||
278 | blr | ||
279 | |||
280 | |||
251 | /* Definitions for the table use to save CPU states */ | 281 | /* Definitions for the table use to save CPU states */ |
252 | #define CS_HID0 0 | 282 | #define CS_HID0 0 |
253 | #define CS_HID1 4 | 283 | #define CS_HID1 4 |
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S index 035217d6c0f1..5f075dbc4ee7 100644 --- a/arch/ppc/kernel/entry.S +++ b/arch/ppc/kernel/entry.S | |||
@@ -563,6 +563,65 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
563 | addi r1,r1,INT_FRAME_SIZE | 563 | addi r1,r1,INT_FRAME_SIZE |
564 | blr | 564 | blr |
565 | 565 | ||
566 | .globl fast_exception_return | ||
567 | fast_exception_return: | ||
568 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | ||
569 | andi. r10,r9,MSR_RI /* check for recoverable interrupt */ | ||
570 | beq 1f /* if not, we've got problems */ | ||
571 | #endif | ||
572 | |||
573 | 2: REST_4GPRS(3, r11) | ||
574 | lwz r10,_CCR(r11) | ||
575 | REST_GPR(1, r11) | ||
576 | mtcr r10 | ||
577 | lwz r10,_LINK(r11) | ||
578 | mtlr r10 | ||
579 | REST_GPR(10, r11) | ||
580 | mtspr SPRN_SRR1,r9 | ||
581 | mtspr SPRN_SRR0,r12 | ||
582 | REST_GPR(9, r11) | ||
583 | REST_GPR(12, r11) | ||
584 | lwz r11,GPR11(r11) | ||
585 | SYNC | ||
586 | RFI | ||
587 | |||
588 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | ||
589 | /* check if the exception happened in a restartable section */ | ||
590 | 1: lis r3,exc_exit_restart_end@ha | ||
591 | addi r3,r3,exc_exit_restart_end@l | ||
592 | cmplw r12,r3 | ||
593 | bge 3f | ||
594 | lis r4,exc_exit_restart@ha | ||
595 | addi r4,r4,exc_exit_restart@l | ||
596 | cmplw r12,r4 | ||
597 | blt 3f | ||
598 | lis r3,fee_restarts@ha | ||
599 | tophys(r3,r3) | ||
600 | lwz r5,fee_restarts@l(r3) | ||
601 | addi r5,r5,1 | ||
602 | stw r5,fee_restarts@l(r3) | ||
603 | mr r12,r4 /* restart at exc_exit_restart */ | ||
604 | b 2b | ||
605 | |||
606 | .comm fee_restarts,4 | ||
607 | |||
608 | /* aargh, a nonrecoverable interrupt, panic */ | ||
609 | /* aargh, we don't know which trap this is */ | ||
610 | /* but the 601 doesn't implement the RI bit, so assume it's OK */ | ||
611 | 3: | ||
612 | BEGIN_FTR_SECTION | ||
613 | b 2b | ||
614 | END_FTR_SECTION_IFSET(CPU_FTR_601) | ||
615 | li r10,-1 | ||
616 | stw r10,TRAP(r11) | ||
617 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
618 | lis r10,MSR_KERNEL@h | ||
619 | ori r10,r10,MSR_KERNEL@l | ||
620 | bl transfer_to_handler_full | ||
621 | .long nonrecoverable_exception | ||
622 | .long ret_from_except | ||
623 | #endif | ||
624 | |||
566 | .globl sigreturn_exit | 625 | .globl sigreturn_exit |
567 | sigreturn_exit: | 626 | sigreturn_exit: |
568 | subi r1,r3,STACK_FRAME_OVERHEAD | 627 | subi r1,r3,STACK_FRAME_OVERHEAD |
diff --git a/arch/ppc/kernel/fpu.S b/arch/ppc/kernel/fpu.S new file mode 100644 index 000000000000..6189b26f640f --- /dev/null +++ b/arch/ppc/kernel/fpu.S | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * FPU support code, moved here from head.S so that it can be used | ||
3 | * by chips which use other head-whatever.S files. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <asm/processor.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/mmu.h> | ||
16 | #include <asm/pgtable.h> | ||
17 | #include <asm/cputable.h> | ||
18 | #include <asm/cache.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/offsets.h> | ||
22 | |||
23 | /* | ||
24 | * This task wants to use the FPU now. | ||
25 | * On UP, disable FP for the task which had the FPU previously, | ||
26 | * and save its floating-point registers in its thread_struct. | ||
27 | * Load up this task's FP registers from its thread_struct, | ||
28 | * enable the FPU for the current task and return to the task. | ||
29 | */ | ||
30 | .globl load_up_fpu | ||
31 | load_up_fpu: | ||
32 | mfmsr r5 | ||
33 | ori r5,r5,MSR_FP | ||
34 | #ifdef CONFIG_PPC64BRIDGE | ||
35 | clrldi r5,r5,1 /* turn off 64-bit mode */ | ||
36 | #endif /* CONFIG_PPC64BRIDGE */ | ||
37 | SYNC | ||
38 | MTMSRD(r5) /* enable use of fpu now */ | ||
39 | isync | ||
40 | /* | ||
41 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
42 | * horrendously complex, especially when a task switches from one CPU | ||
43 | * to another. Instead we call giveup_fpu in switch_to. | ||
44 | */ | ||
45 | #ifndef CONFIG_SMP | ||
46 | tophys(r6,0) /* get __pa constant */ | ||
47 | addis r3,r6,last_task_used_math@ha | ||
48 | lwz r4,last_task_used_math@l(r3) | ||
49 | cmpwi 0,r4,0 | ||
50 | beq 1f | ||
51 | add r4,r4,r6 | ||
52 | addi r4,r4,THREAD /* want last_task_used_math->thread */ | ||
53 | SAVE_32FPRS(0, r4) | ||
54 | mffs fr0 | ||
55 | stfd fr0,THREAD_FPSCR-4(r4) | ||
56 | lwz r5,PT_REGS(r4) | ||
57 | add r5,r5,r6 | ||
58 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
59 | li r10,MSR_FP|MSR_FE0|MSR_FE1 | ||
60 | andc r4,r4,r10 /* disable FP for previous task */ | ||
61 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
62 | 1: | ||
63 | #endif /* CONFIG_SMP */ | ||
64 | /* enable use of FP after return */ | ||
65 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
66 | lwz r4,THREAD_FPEXC_MODE(r5) | ||
67 | ori r9,r9,MSR_FP /* enable FP for current */ | ||
68 | or r9,r9,r4 | ||
69 | lfd fr0,THREAD_FPSCR-4(r5) | ||
70 | mtfsf 0xff,fr0 | ||
71 | REST_32FPRS(0, r5) | ||
72 | #ifndef CONFIG_SMP | ||
73 | subi r4,r5,THREAD | ||
74 | sub r4,r4,r6 | ||
75 | stw r4,last_task_used_math@l(r3) | ||
76 | #endif /* CONFIG_SMP */ | ||
77 | /* restore registers and return */ | ||
78 | /* we haven't used ctr or xer or lr */ | ||
79 | b fast_exception_return | ||
80 | |||
81 | /* | ||
82 | * FP unavailable trap from kernel - print a message, but let | ||
83 | * the task use FP in the kernel until it returns to user mode. | ||
84 | */ | ||
85 | .globl KernelFP | ||
86 | KernelFP: | ||
87 | lwz r3,_MSR(r1) | ||
88 | ori r3,r3,MSR_FP | ||
89 | stw r3,_MSR(r1) /* enable use of FP after return */ | ||
90 | lis r3,86f@h | ||
91 | ori r3,r3,86f@l | ||
92 | mr r4,r2 /* current */ | ||
93 | lwz r5,_NIP(r1) | ||
94 | bl printk | ||
95 | b ret_from_except | ||
96 | 86: .string "floating point used in kernel (task=%p, pc=%x)\n" | ||
97 | .align 4,0 | ||
98 | |||
99 | /* | ||
100 | * giveup_fpu(tsk) | ||
101 | * Disable FP for the task given as the argument, | ||
102 | * and save the floating-point registers in its thread_struct. | ||
103 | * Enables the FPU for use in the kernel on return. | ||
104 | */ | ||
105 | .globl giveup_fpu | ||
106 | giveup_fpu: | ||
107 | mfmsr r5 | ||
108 | ori r5,r5,MSR_FP | ||
109 | SYNC_601 | ||
110 | ISYNC_601 | ||
111 | MTMSRD(r5) /* enable use of fpu now */ | ||
112 | SYNC_601 | ||
113 | isync | ||
114 | cmpwi 0,r3,0 | ||
115 | beqlr- /* if no previous owner, done */ | ||
116 | addi r3,r3,THREAD /* want THREAD of task */ | ||
117 | lwz r5,PT_REGS(r3) | ||
118 | cmpwi 0,r5,0 | ||
119 | SAVE_32FPRS(0, r3) | ||
120 | mffs fr0 | ||
121 | stfd fr0,THREAD_FPSCR-4(r3) | ||
122 | beq 1f | ||
123 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
124 | li r3,MSR_FP|MSR_FE0|MSR_FE1 | ||
125 | andc r4,r4,r3 /* disable FP for previous task */ | ||
126 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
127 | 1: | ||
128 | #ifndef CONFIG_SMP | ||
129 | li r5,0 | ||
130 | lis r4,last_task_used_math@ha | ||
131 | stw r5,last_task_used_math@l(r4) | ||
132 | #endif /* CONFIG_SMP */ | ||
133 | blr | ||
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S index 1a89a71e0acc..a931d773715f 100644 --- a/arch/ppc/kernel/head.S +++ b/arch/ppc/kernel/head.S | |||
@@ -775,133 +775,6 @@ InstructionSegment: | |||
775 | EXC_XFER_STD(0x480, UnknownException) | 775 | EXC_XFER_STD(0x480, UnknownException) |
776 | #endif /* CONFIG_PPC64BRIDGE */ | 776 | #endif /* CONFIG_PPC64BRIDGE */ |
777 | 777 | ||
778 | /* | ||
779 | * This task wants to use the FPU now. | ||
780 | * On UP, disable FP for the task which had the FPU previously, | ||
781 | * and save its floating-point registers in its thread_struct. | ||
782 | * Load up this task's FP registers from its thread_struct, | ||
783 | * enable the FPU for the current task and return to the task. | ||
784 | */ | ||
785 | load_up_fpu: | ||
786 | mfmsr r5 | ||
787 | ori r5,r5,MSR_FP | ||
788 | #ifdef CONFIG_PPC64BRIDGE | ||
789 | clrldi r5,r5,1 /* turn off 64-bit mode */ | ||
790 | #endif /* CONFIG_PPC64BRIDGE */ | ||
791 | SYNC | ||
792 | MTMSRD(r5) /* enable use of fpu now */ | ||
793 | isync | ||
794 | /* | ||
795 | * For SMP, we don't do lazy FPU switching because it just gets too | ||
796 | * horrendously complex, especially when a task switches from one CPU | ||
797 | * to another. Instead we call giveup_fpu in switch_to. | ||
798 | */ | ||
799 | #ifndef CONFIG_SMP | ||
800 | tophys(r6,0) /* get __pa constant */ | ||
801 | addis r3,r6,last_task_used_math@ha | ||
802 | lwz r4,last_task_used_math@l(r3) | ||
803 | cmpwi 0,r4,0 | ||
804 | beq 1f | ||
805 | add r4,r4,r6 | ||
806 | addi r4,r4,THREAD /* want last_task_used_math->thread */ | ||
807 | SAVE_32FPRS(0, r4) | ||
808 | mffs fr0 | ||
809 | stfd fr0,THREAD_FPSCR-4(r4) | ||
810 | lwz r5,PT_REGS(r4) | ||
811 | add r5,r5,r6 | ||
812 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
813 | li r10,MSR_FP|MSR_FE0|MSR_FE1 | ||
814 | andc r4,r4,r10 /* disable FP for previous task */ | ||
815 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
816 | 1: | ||
817 | #endif /* CONFIG_SMP */ | ||
818 | /* enable use of FP after return */ | ||
819 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
820 | lwz r4,THREAD_FPEXC_MODE(r5) | ||
821 | ori r9,r9,MSR_FP /* enable FP for current */ | ||
822 | or r9,r9,r4 | ||
823 | lfd fr0,THREAD_FPSCR-4(r5) | ||
824 | mtfsf 0xff,fr0 | ||
825 | REST_32FPRS(0, r5) | ||
826 | #ifndef CONFIG_SMP | ||
827 | subi r4,r5,THREAD | ||
828 | sub r4,r4,r6 | ||
829 | stw r4,last_task_used_math@l(r3) | ||
830 | #endif /* CONFIG_SMP */ | ||
831 | /* restore registers and return */ | ||
832 | /* we haven't used ctr or xer or lr */ | ||
833 | /* fall through to fast_exception_return */ | ||
834 | |||
835 | .globl fast_exception_return | ||
836 | fast_exception_return: | ||
837 | andi. r10,r9,MSR_RI /* check for recoverable interrupt */ | ||
838 | beq 1f /* if not, we've got problems */ | ||
839 | 2: REST_4GPRS(3, r11) | ||
840 | lwz r10,_CCR(r11) | ||
841 | REST_GPR(1, r11) | ||
842 | mtcr r10 | ||
843 | lwz r10,_LINK(r11) | ||
844 | mtlr r10 | ||
845 | REST_GPR(10, r11) | ||
846 | mtspr SPRN_SRR1,r9 | ||
847 | mtspr SPRN_SRR0,r12 | ||
848 | REST_GPR(9, r11) | ||
849 | REST_GPR(12, r11) | ||
850 | lwz r11,GPR11(r11) | ||
851 | SYNC | ||
852 | RFI | ||
853 | |||
854 | /* check if the exception happened in a restartable section */ | ||
855 | 1: lis r3,exc_exit_restart_end@ha | ||
856 | addi r3,r3,exc_exit_restart_end@l | ||
857 | cmplw r12,r3 | ||
858 | bge 3f | ||
859 | lis r4,exc_exit_restart@ha | ||
860 | addi r4,r4,exc_exit_restart@l | ||
861 | cmplw r12,r4 | ||
862 | blt 3f | ||
863 | lis r3,fee_restarts@ha | ||
864 | tophys(r3,r3) | ||
865 | lwz r5,fee_restarts@l(r3) | ||
866 | addi r5,r5,1 | ||
867 | stw r5,fee_restarts@l(r3) | ||
868 | mr r12,r4 /* restart at exc_exit_restart */ | ||
869 | b 2b | ||
870 | |||
871 | .comm fee_restarts,4 | ||
872 | |||
873 | /* aargh, a nonrecoverable interrupt, panic */ | ||
874 | /* aargh, we don't know which trap this is */ | ||
875 | /* but the 601 doesn't implement the RI bit, so assume it's OK */ | ||
876 | 3: | ||
877 | BEGIN_FTR_SECTION | ||
878 | b 2b | ||
879 | END_FTR_SECTION_IFSET(CPU_FTR_601) | ||
880 | li r10,-1 | ||
881 | stw r10,TRAP(r11) | ||
882 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
883 | li r10,MSR_KERNEL | ||
884 | bl transfer_to_handler_full | ||
885 | .long nonrecoverable_exception | ||
886 | .long ret_from_except | ||
887 | |||
888 | /* | ||
889 | * FP unavailable trap from kernel - print a message, but let | ||
890 | * the task use FP in the kernel until it returns to user mode. | ||
891 | */ | ||
892 | KernelFP: | ||
893 | lwz r3,_MSR(r1) | ||
894 | ori r3,r3,MSR_FP | ||
895 | stw r3,_MSR(r1) /* enable use of FP after return */ | ||
896 | lis r3,86f@h | ||
897 | ori r3,r3,86f@l | ||
898 | mr r4,r2 /* current */ | ||
899 | lwz r5,_NIP(r1) | ||
900 | bl printk | ||
901 | b ret_from_except | ||
902 | 86: .string "floating point used in kernel (task=%p, pc=%x)\n" | ||
903 | .align 4,0 | ||
904 | |||
905 | #ifdef CONFIG_ALTIVEC | 778 | #ifdef CONFIG_ALTIVEC |
906 | /* Note that the AltiVec support is closely modeled after the FP | 779 | /* Note that the AltiVec support is closely modeled after the FP |
907 | * support. Changes to one are likely to be applicable to the | 780 | * support. Changes to one are likely to be applicable to the |
@@ -1016,42 +889,6 @@ giveup_altivec: | |||
1016 | #endif /* CONFIG_ALTIVEC */ | 889 | #endif /* CONFIG_ALTIVEC */ |
1017 | 890 | ||
1018 | /* | 891 | /* |
1019 | * giveup_fpu(tsk) | ||
1020 | * Disable FP for the task given as the argument, | ||
1021 | * and save the floating-point registers in its thread_struct. | ||
1022 | * Enables the FPU for use in the kernel on return. | ||
1023 | */ | ||
1024 | .globl giveup_fpu | ||
1025 | giveup_fpu: | ||
1026 | mfmsr r5 | ||
1027 | ori r5,r5,MSR_FP | ||
1028 | SYNC_601 | ||
1029 | ISYNC_601 | ||
1030 | MTMSRD(r5) /* enable use of fpu now */ | ||
1031 | SYNC_601 | ||
1032 | isync | ||
1033 | cmpwi 0,r3,0 | ||
1034 | beqlr- /* if no previous owner, done */ | ||
1035 | addi r3,r3,THREAD /* want THREAD of task */ | ||
1036 | lwz r5,PT_REGS(r3) | ||
1037 | cmpwi 0,r5,0 | ||
1038 | SAVE_32FPRS(0, r3) | ||
1039 | mffs fr0 | ||
1040 | stfd fr0,THREAD_FPSCR-4(r3) | ||
1041 | beq 1f | ||
1042 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
1043 | li r3,MSR_FP|MSR_FE0|MSR_FE1 | ||
1044 | andc r4,r4,r3 /* disable FP for previous task */ | ||
1045 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
1046 | 1: | ||
1047 | #ifndef CONFIG_SMP | ||
1048 | li r5,0 | ||
1049 | lis r4,last_task_used_math@ha | ||
1050 | stw r5,last_task_used_math@l(r4) | ||
1051 | #endif /* CONFIG_SMP */ | ||
1052 | blr | ||
1053 | |||
1054 | /* | ||
1055 | * This code is jumped to from the startup code to copy | 892 | * This code is jumped to from the startup code to copy |
1056 | * the kernel image to physical address 0. | 893 | * the kernel image to physical address 0. |
1057 | */ | 894 | */ |
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S index 9ed8165a3d6c..9b6a8e513657 100644 --- a/arch/ppc/kernel/head_44x.S +++ b/arch/ppc/kernel/head_44x.S | |||
@@ -426,7 +426,11 @@ interrupt_base: | |||
426 | PROGRAM_EXCEPTION | 426 | PROGRAM_EXCEPTION |
427 | 427 | ||
428 | /* Floating Point Unavailable Interrupt */ | 428 | /* Floating Point Unavailable Interrupt */ |
429 | #ifdef CONFIG_PPC_FPU | ||
430 | FP_UNAVAILABLE_EXCEPTION | ||
431 | #else | ||
429 | EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) | 432 | EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) |
433 | #endif | ||
430 | 434 | ||
431 | /* System Call Interrupt */ | 435 | /* System Call Interrupt */ |
432 | START_EXCEPTION(SystemCall) | 436 | START_EXCEPTION(SystemCall) |
@@ -686,8 +690,10 @@ _GLOBAL(giveup_altivec) | |||
686 | * | 690 | * |
687 | * The 44x core does not have an FPU. | 691 | * The 44x core does not have an FPU. |
688 | */ | 692 | */ |
693 | #ifndef CONFIG_PPC_FPU | ||
689 | _GLOBAL(giveup_fpu) | 694 | _GLOBAL(giveup_fpu) |
690 | blr | 695 | blr |
696 | #endif | ||
691 | 697 | ||
692 | /* | 698 | /* |
693 | * extern void abort(void) | 699 | * extern void abort(void) |
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h index 884dac916bce..f213d12eec08 100644 --- a/arch/ppc/kernel/head_booke.h +++ b/arch/ppc/kernel/head_booke.h | |||
@@ -337,4 +337,11 @@ label: | |||
337 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 337 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
338 | EXC_XFER_LITE(0x0900, timer_interrupt) | 338 | EXC_XFER_LITE(0x0900, timer_interrupt) |
339 | 339 | ||
340 | #define FP_UNAVAILABLE_EXCEPTION \ | ||
341 | START_EXCEPTION(FloatingPointUnavailable) \ | ||
342 | NORMAL_EXCEPTION_PROLOG; \ | ||
343 | bne load_up_fpu; /* if from user, just load it up */ \ | ||
344 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | ||
345 | EXC_XFER_EE_LITE(0x800, KernelFP) | ||
346 | |||
340 | #endif /* __HEAD_BOOKE_H__ */ | 347 | #endif /* __HEAD_BOOKE_H__ */ |
diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S index d64bf61d2b1f..f22ddce36135 100644 --- a/arch/ppc/kernel/head_fsl_booke.S +++ b/arch/ppc/kernel/head_fsl_booke.S | |||
@@ -504,7 +504,11 @@ interrupt_base: | |||
504 | PROGRAM_EXCEPTION | 504 | PROGRAM_EXCEPTION |
505 | 505 | ||
506 | /* Floating Point Unavailable Interrupt */ | 506 | /* Floating Point Unavailable Interrupt */ |
507 | #ifdef CONFIG_PPC_FPU | ||
508 | FP_UNAVAILABLE_EXCEPTION | ||
509 | #else | ||
507 | EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) | 510 | EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) |
511 | #endif | ||
508 | 512 | ||
509 | /* System Call Interrupt */ | 513 | /* System Call Interrupt */ |
510 | START_EXCEPTION(SystemCall) | 514 | START_EXCEPTION(SystemCall) |
@@ -916,10 +920,12 @@ _GLOBAL(giveup_spe) | |||
916 | /* | 920 | /* |
917 | * extern void giveup_fpu(struct task_struct *prev) | 921 | * extern void giveup_fpu(struct task_struct *prev) |
918 | * | 922 | * |
919 | * The e500 core does not have an FPU. | 923 | * Not all FSL Book-E cores have an FPU |
920 | */ | 924 | */ |
925 | #ifndef CONFIG_PPC_FPU | ||
921 | _GLOBAL(giveup_fpu) | 926 | _GLOBAL(giveup_fpu) |
922 | blr | 927 | blr |
928 | #endif | ||
923 | 929 | ||
924 | /* | 930 | /* |
925 | * extern void abort(void) | 931 | * extern void abort(void) |
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index 73f7c23b0dd4..e4f1615ec13f 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S | |||
@@ -1096,17 +1096,7 @@ _GLOBAL(_get_SP) | |||
1096 | * and exceptions as if the cpu had performed the load or store. | 1096 | * and exceptions as if the cpu had performed the load or store. |
1097 | */ | 1097 | */ |
1098 | 1098 | ||
1099 | #if defined(CONFIG_4xx) || defined(CONFIG_E500) | 1099 | #ifdef CONFIG_PPC_FPU |
1100 | _GLOBAL(cvt_fd) | ||
1101 | lfs 0,0(r3) | ||
1102 | stfd 0,0(r4) | ||
1103 | blr | ||
1104 | |||
1105 | _GLOBAL(cvt_df) | ||
1106 | lfd 0,0(r3) | ||
1107 | stfs 0,0(r4) | ||
1108 | blr | ||
1109 | #else | ||
1110 | _GLOBAL(cvt_fd) | 1100 | _GLOBAL(cvt_fd) |
1111 | lfd 0,-4(r5) /* load up fpscr value */ | 1101 | lfd 0,-4(r5) /* load up fpscr value */ |
1112 | mtfsf 0xff,0 | 1102 | mtfsf 0xff,0 |
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c index 98f94b60204c..47a15306823a 100644 --- a/arch/ppc/kernel/pci.c +++ b/arch/ppc/kernel/pci.c | |||
@@ -1432,7 +1432,7 @@ pci_bus_to_hose(int bus) | |||
1432 | return NULL; | 1432 | return NULL; |
1433 | } | 1433 | } |
1434 | 1434 | ||
1435 | void* | 1435 | void __iomem * |
1436 | pci_bus_io_base(unsigned int bus) | 1436 | pci_bus_io_base(unsigned int bus) |
1437 | { | 1437 | { |
1438 | struct pci_controller *hose; | 1438 | struct pci_controller *hose; |
diff --git a/arch/ppc/kernel/ptrace.c b/arch/ppc/kernel/ptrace.c index 426b6f7d9de3..59d59a8dc249 100644 --- a/arch/ppc/kernel/ptrace.c +++ b/arch/ppc/kernel/ptrace.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/ptrace.h> | 26 | #include <linux/ptrace.h> |
27 | #include <linux/user.h> | 27 | #include <linux/user.h> |
28 | #include <linux/security.h> | 28 | #include <linux/security.h> |
29 | #include <linux/signal.h> | ||
29 | 30 | ||
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
31 | #include <asm/page.h> | 32 | #include <asm/page.h> |
@@ -356,7 +357,7 @@ int sys_ptrace(long request, long pid, long addr, long data) | |||
356 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 357 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
357 | case PTRACE_CONT: { /* restart after signal. */ | 358 | case PTRACE_CONT: { /* restart after signal. */ |
358 | ret = -EIO; | 359 | ret = -EIO; |
359 | if ((unsigned long) data > _NSIG) | 360 | if (!valid_signal(data)) |
360 | break; | 361 | break; |
361 | if (request == PTRACE_SYSCALL) { | 362 | if (request == PTRACE_SYSCALL) { |
362 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 363 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -389,7 +390,7 @@ int sys_ptrace(long request, long pid, long addr, long data) | |||
389 | 390 | ||
390 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ | 391 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ |
391 | ret = -EIO; | 392 | ret = -EIO; |
392 | if ((unsigned long) data > _NSIG) | 393 | if (!valid_signal(data)) |
393 | break; | 394 | break; |
394 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 395 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
395 | set_single_step(child); | 396 | set_single_step(child); |
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index e97ce635b99e..5dfb42f1a152 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c | |||
@@ -221,27 +221,26 @@ int show_cpuinfo(struct seq_file *m, void *v) | |||
221 | return err; | 221 | return err; |
222 | } | 222 | } |
223 | 223 | ||
224 | switch (PVR_VER(pvr)) { | 224 | /* If we are a Freescale core do a simple check so |
225 | case 0x0020: /* 403 family */ | 225 | * we dont have to keep adding cases in the future */ |
226 | maj = PVR_MAJ(pvr) + 1; | 226 | if ((PVR_VER(pvr) & 0x8000) == 0x8000) { |
227 | min = PVR_MIN(pvr); | ||
228 | break; | ||
229 | case 0x1008: /* 740P/750P ?? */ | ||
230 | maj = ((pvr >> 8) & 0xFF) - 1; | ||
231 | min = pvr & 0xFF; | ||
232 | break; | ||
233 | case 0x8083: /* e300 */ | ||
234 | maj = PVR_MAJ(pvr); | ||
235 | min = PVR_MIN(pvr); | ||
236 | break; | ||
237 | case 0x8020: /* e500 */ | ||
238 | maj = PVR_MAJ(pvr); | 227 | maj = PVR_MAJ(pvr); |
239 | min = PVR_MIN(pvr); | 228 | min = PVR_MIN(pvr); |
240 | break; | 229 | } else { |
241 | default: | 230 | switch (PVR_VER(pvr)) { |
242 | maj = (pvr >> 8) & 0xFF; | 231 | case 0x0020: /* 403 family */ |
243 | min = pvr & 0xFF; | 232 | maj = PVR_MAJ(pvr) + 1; |
244 | break; | 233 | min = PVR_MIN(pvr); |
234 | break; | ||
235 | case 0x1008: /* 740P/750P ?? */ | ||
236 | maj = ((pvr >> 8) & 0xFF) - 1; | ||
237 | min = pvr & 0xFF; | ||
238 | break; | ||
239 | default: | ||
240 | maj = (pvr >> 8) & 0xFF; | ||
241 | min = pvr & 0xFF; | ||
242 | break; | ||
243 | } | ||
245 | } | 244 | } |
246 | 245 | ||
247 | seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", | 246 | seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", |
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c index 645eae19805c..7c8437da09d5 100644 --- a/arch/ppc/kernel/signal.c +++ b/arch/ppc/kernel/signal.c | |||
@@ -511,7 +511,7 @@ int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | |||
511 | } | 511 | } |
512 | 512 | ||
513 | int sys_debug_setcontext(struct ucontext __user *ctx, | 513 | int sys_debug_setcontext(struct ucontext __user *ctx, |
514 | int ndbg, struct sig_dbg_op *dbg, | 514 | int ndbg, struct sig_dbg_op __user *dbg, |
515 | int r6, int r7, int r8, | 515 | int r6, int r7, int r8, |
516 | struct pt_regs *regs) | 516 | struct pt_regs *regs) |
517 | { | 517 | { |
@@ -632,7 +632,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
632 | if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler) | 632 | if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler) |
633 | || __put_user(oldset->sig[0], &sc->oldmask) | 633 | || __put_user(oldset->sig[0], &sc->oldmask) |
634 | || __put_user(oldset->sig[1], &sc->_unused[3]) | 634 | || __put_user(oldset->sig[1], &sc->_unused[3]) |
635 | || __put_user((struct pt_regs *)frame, &sc->regs) | 635 | || __put_user((struct pt_regs __user *)frame, &sc->regs) |
636 | || __put_user(sig, &sc->signal)) | 636 | || __put_user(sig, &sc->signal)) |
637 | goto badframe; | 637 | goto badframe; |
638 | 638 | ||
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c index 002322a1f3ce..f8e7e324a173 100644 --- a/arch/ppc/kernel/traps.c +++ b/arch/ppc/kernel/traps.c | |||
@@ -176,7 +176,7 @@ static inline int check_io_access(struct pt_regs *regs) | |||
176 | #else | 176 | #else |
177 | #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) | 177 | #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) |
178 | #endif | 178 | #endif |
179 | #define REASON_FP 0 | 179 | #define REASON_FP ESR_FP |
180 | #define REASON_ILLEGAL ESR_PIL | 180 | #define REASON_ILLEGAL ESR_PIL |
181 | #define REASON_PRIVILEGED ESR_PPR | 181 | #define REASON_PRIVILEGED ESR_PPR |
182 | #define REASON_TRAP ESR_PTR | 182 | #define REASON_TRAP ESR_PTR |
@@ -403,7 +403,7 @@ static int emulate_string_inst(struct pt_regs *regs, u32 instword) | |||
403 | u8 rA = (instword >> 16) & 0x1f; | 403 | u8 rA = (instword >> 16) & 0x1f; |
404 | u8 NB_RB = (instword >> 11) & 0x1f; | 404 | u8 NB_RB = (instword >> 11) & 0x1f; |
405 | u32 num_bytes; | 405 | u32 num_bytes; |
406 | u32 EA; | 406 | unsigned long EA; |
407 | int pos = 0; | 407 | int pos = 0; |
408 | 408 | ||
409 | /* Early out if we are an invalid form of lswx */ | 409 | /* Early out if we are an invalid form of lswx */ |
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S index 0c0e714b84de..9353584fb710 100644 --- a/arch/ppc/kernel/vmlinux.lds.S +++ b/arch/ppc/kernel/vmlinux.lds.S | |||
@@ -145,6 +145,7 @@ SECTIONS | |||
145 | __init_end = .; | 145 | __init_end = .; |
146 | 146 | ||
147 | . = ALIGN(4096); | 147 | . = ALIGN(4096); |
148 | _sextratext = .; | ||
148 | __pmac_begin = .; | 149 | __pmac_begin = .; |
149 | .pmac.text : { *(.pmac.text) } | 150 | .pmac.text : { *(.pmac.text) } |
150 | .pmac.data : { *(.pmac.data) } | 151 | .pmac.data : { *(.pmac.data) } |
@@ -171,6 +172,7 @@ SECTIONS | |||
171 | .openfirmware.data : { *(.openfirmware.data) } | 172 | .openfirmware.data : { *(.openfirmware.data) } |
172 | . = ALIGN(4096); | 173 | . = ALIGN(4096); |
173 | __openfirmware_end = .; | 174 | __openfirmware_end = .; |
175 | _eextratext = .; | ||
174 | 176 | ||
175 | __bss_start = .; | 177 | __bss_start = .; |
176 | .bss : | 178 | .bss : |
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c index f63bca83e757..cd11734ef7c5 100644 --- a/arch/ppc/platforms/4xx/ebony.c +++ b/arch/ppc/platforms/4xx/ebony.c | |||
@@ -149,7 +149,7 @@ ebony_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) | |||
149 | static void __init | 149 | static void __init |
150 | ebony_setup_pcix(void) | 150 | ebony_setup_pcix(void) |
151 | { | 151 | { |
152 | void *pcix_reg_base; | 152 | void __iomem *pcix_reg_base; |
153 | 153 | ||
154 | pcix_reg_base = ioremap64(PCIX0_REG_BASE, PCIX_REG_SIZE); | 154 | pcix_reg_base = ioremap64(PCIX0_REG_BASE, PCIX_REG_SIZE); |
155 | 155 | ||
@@ -210,9 +210,8 @@ ebony_setup_hose(void) | |||
210 | hose->io_space.end = EBONY_PCI_UPPER_IO; | 210 | hose->io_space.end = EBONY_PCI_UPPER_IO; |
211 | hose->mem_space.start = EBONY_PCI_LOWER_MEM; | 211 | hose->mem_space.start = EBONY_PCI_LOWER_MEM; |
212 | hose->mem_space.end = EBONY_PCI_UPPER_MEM; | 212 | hose->mem_space.end = EBONY_PCI_UPPER_MEM; |
213 | isa_io_base = | 213 | hose->io_base_virt = ioremap64(EBONY_PCI_IO_BASE, EBONY_PCI_IO_SIZE); |
214 | (unsigned long)ioremap64(EBONY_PCI_IO_BASE, EBONY_PCI_IO_SIZE); | 214 | isa_io_base = (unsigned long)hose->io_base_virt; |
215 | hose->io_base_virt = (void *)isa_io_base; | ||
216 | 215 | ||
217 | setup_indirect_pci(hose, | 216 | setup_indirect_pci(hose, |
218 | EBONY_PCI_CFGA_PLB32, | 217 | EBONY_PCI_CFGA_PLB32, |
diff --git a/arch/ppc/platforms/4xx/luan.c b/arch/ppc/platforms/4xx/luan.c index 1df2339f1f6c..95359f748e7b 100644 --- a/arch/ppc/platforms/4xx/luan.c +++ b/arch/ppc/platforms/4xx/luan.c | |||
@@ -223,9 +223,8 @@ luan_setup_hose(struct pci_controller *hose, | |||
223 | hose->io_space.end = LUAN_PCIX_UPPER_IO; | 223 | hose->io_space.end = LUAN_PCIX_UPPER_IO; |
224 | hose->mem_space.start = lower_mem; | 224 | hose->mem_space.start = lower_mem; |
225 | hose->mem_space.end = upper_mem; | 225 | hose->mem_space.end = upper_mem; |
226 | isa_io_base = | 226 | hose->io_base_virt = ioremap64(pcix_io_base, PCIX_IO_SIZE); |
227 | (unsigned long)ioremap64(pcix_io_base, PCIX_IO_SIZE); | 227 | isa_io_base = (unsigned long) hose->io_base_virt; |
228 | hose->io_base_virt = (void *)isa_io_base; | ||
229 | 228 | ||
230 | setup_indirect_pci(hose, cfga, cfgd); | 229 | setup_indirect_pci(hose, cfga, cfgd); |
231 | hose->set_cfg_type = 1; | 230 | hose->set_cfg_type = 1; |
diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c index 28de707434f1..5f82a6bc7046 100644 --- a/arch/ppc/platforms/4xx/ocotea.c +++ b/arch/ppc/platforms/4xx/ocotea.c | |||
@@ -227,9 +227,8 @@ ocotea_setup_hose(void) | |||
227 | hose->io_space.end = OCOTEA_PCI_UPPER_IO; | 227 | hose->io_space.end = OCOTEA_PCI_UPPER_IO; |
228 | hose->mem_space.start = OCOTEA_PCI_LOWER_MEM; | 228 | hose->mem_space.start = OCOTEA_PCI_LOWER_MEM; |
229 | hose->mem_space.end = OCOTEA_PCI_UPPER_MEM; | 229 | hose->mem_space.end = OCOTEA_PCI_UPPER_MEM; |
230 | isa_io_base = | 230 | hose->io_base_virt = ioremap64(OCOTEA_PCI_IO_BASE, OCOTEA_PCI_IO_SIZE); |
231 | (unsigned long)ioremap64(OCOTEA_PCI_IO_BASE, OCOTEA_PCI_IO_SIZE); | 231 | isa_io_base = (unsigned long) hose->io_base_virt; |
232 | hose->io_base_virt = (void *)isa_io_base; | ||
233 | 232 | ||
234 | setup_indirect_pci(hose, | 233 | setup_indirect_pci(hose, |
235 | OCOTEA_PCI_CFGA_PLB32, | 234 | OCOTEA_PCI_CFGA_PLB32, |
diff --git a/arch/ppc/platforms/chrp_pci.c b/arch/ppc/platforms/chrp_pci.c index 5bb6492ecf8c..7d0ee308f662 100644 --- a/arch/ppc/platforms/chrp_pci.c +++ b/arch/ppc/platforms/chrp_pci.c | |||
@@ -129,7 +129,7 @@ static struct pci_ops rtas_pci_ops = | |||
129 | rtas_write_config | 129 | rtas_write_config |
130 | }; | 130 | }; |
131 | 131 | ||
132 | volatile struct Hydra *Hydra = NULL; | 132 | volatile struct Hydra __iomem *Hydra = NULL; |
133 | 133 | ||
134 | int __init | 134 | int __init |
135 | hydra_init(void) | 135 | hydra_init(void) |
@@ -175,13 +175,14 @@ chrp_pcibios_fixup(void) | |||
175 | static void __init | 175 | static void __init |
176 | setup_python(struct pci_controller *hose, struct device_node *dev) | 176 | setup_python(struct pci_controller *hose, struct device_node *dev) |
177 | { | 177 | { |
178 | u32 *reg, val; | 178 | u32 __iomem *reg; |
179 | u32 val; | ||
179 | unsigned long addr = dev->addrs[0].address; | 180 | unsigned long addr = dev->addrs[0].address; |
180 | 181 | ||
181 | setup_indirect_pci(hose, addr + 0xf8000, addr + 0xf8010); | 182 | setup_indirect_pci(hose, addr + 0xf8000, addr + 0xf8010); |
182 | 183 | ||
183 | /* Clear the magic go-slow bit */ | 184 | /* Clear the magic go-slow bit */ |
184 | reg = (u32 *) ioremap(dev->addrs[0].address + 0xf6000, 0x40); | 185 | reg = ioremap(dev->addrs[0].address + 0xf6000, 0x40); |
185 | val = in_be32(®[12]); | 186 | val = in_be32(®[12]); |
186 | if (val & PRG_CL_RESET_VALID) { | 187 | if (val & PRG_CL_RESET_VALID) { |
187 | out_be32(®[12], val & ~PRG_CL_RESET_VALID); | 188 | out_be32(®[12], val & ~PRG_CL_RESET_VALID); |
diff --git a/arch/ppc/platforms/chrp_setup.c b/arch/ppc/platforms/chrp_setup.c index f23c4f320760..57f29ab29bda 100644 --- a/arch/ppc/platforms/chrp_setup.c +++ b/arch/ppc/platforms/chrp_setup.c | |||
@@ -356,7 +356,7 @@ static void __init chrp_find_openpic(void) | |||
356 | struct device_node *np; | 356 | struct device_node *np; |
357 | int len, i; | 357 | int len, i; |
358 | unsigned int *iranges; | 358 | unsigned int *iranges; |
359 | void *isu; | 359 | void __iomem *isu; |
360 | 360 | ||
361 | np = find_type_devices("open-pic"); | 361 | np = find_type_devices("open-pic"); |
362 | if (np == NULL || np->n_addrs == 0) | 362 | if (np == NULL || np->n_addrs == 0) |
diff --git a/arch/ppc/platforms/pmac_cache.S b/arch/ppc/platforms/pmac_cache.S index da34a9bc9299..fb977de6b704 100644 --- a/arch/ppc/platforms/pmac_cache.S +++ b/arch/ppc/platforms/pmac_cache.S | |||
@@ -64,27 +64,39 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
64 | mtspr SPRN_HID0,r4 /* Disable DPM */ | 64 | mtspr SPRN_HID0,r4 /* Disable DPM */ |
65 | sync | 65 | sync |
66 | 66 | ||
67 | /* disp-flush L1 */ | 67 | /* Disp-flush L1. We have a weird problem here that I never |
68 | li r4,0x4000 | 68 | * totally figured out. On 750FX, using the ROM for the flush |
69 | mtctr r4 | 69 | * results in a non-working flush. We use that workaround for |
70 | * now until I finally understand what's going on. --BenH | ||
71 | */ | ||
72 | |||
73 | /* ROM base by default */ | ||
70 | lis r4,0xfff0 | 74 | lis r4,0xfff0 |
71 | 1: lwzx r0,r0,r4 | 75 | mfpvr r3 |
76 | srwi r3,r3,16 | ||
77 | cmplwi cr0,r3,0x7000 | ||
78 | bne+ 1f | ||
79 | /* RAM base on 750FX */ | ||
80 | li r4,0 | ||
81 | 1: li r4,0x4000 | ||
82 | mtctr r4 | ||
83 | 1: lwz r0,0(r4) | ||
72 | addi r4,r4,32 | 84 | addi r4,r4,32 |
73 | bdnz 1b | 85 | bdnz 1b |
74 | sync | 86 | sync |
75 | isync | 87 | isync |
76 | 88 | ||
77 | /* disable / invalidate / enable L1 data */ | 89 | /* Disable / invalidate / enable L1 data */ |
78 | mfspr r3,SPRN_HID0 | 90 | mfspr r3,SPRN_HID0 |
79 | rlwinm r0,r0,0,~HID0_DCE | 91 | rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE) |
80 | mtspr SPRN_HID0,r3 | 92 | mtspr SPRN_HID0,r3 |
81 | sync | 93 | sync |
82 | isync | 94 | isync |
83 | ori r3,r3,HID0_DCE|HID0_DCI | 95 | ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI) |
84 | sync | 96 | sync |
85 | isync | 97 | isync |
86 | mtspr SPRN_HID0,r3 | 98 | mtspr SPRN_HID0,r3 |
87 | xori r3,r3,HID0_DCI | 99 | xori r3,r3,(HID0_DCI|HID0_ICFI) |
88 | mtspr SPRN_HID0,r3 | 100 | mtspr SPRN_HID0,r3 |
89 | sync | 101 | sync |
90 | 102 | ||
@@ -110,11 +122,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
110 | lis r4,2 | 122 | lis r4,2 |
111 | mtctr r4 | 123 | mtctr r4 |
112 | lis r4,0xfff0 | 124 | lis r4,0xfff0 |
113 | 1: lwzx r0,r0,r4 | 125 | 1: lwz r0,0(r4) |
126 | addi r4,r4,32 | ||
127 | bdnz 1b | ||
128 | sync | ||
129 | isync | ||
130 | lis r4,2 | ||
131 | mtctr r4 | ||
132 | lis r4,0xfff0 | ||
133 | 1: dcbf 0,r4 | ||
114 | addi r4,r4,32 | 134 | addi r4,r4,32 |
115 | bdnz 1b | 135 | bdnz 1b |
116 | sync | 136 | sync |
117 | isync | 137 | isync |
138 | |||
118 | /* now disable L2 */ | 139 | /* now disable L2 */ |
119 | rlwinm r5,r5,0,~L2CR_L2E | 140 | rlwinm r5,r5,0,~L2CR_L2E |
120 | b 2f | 141 | b 2f |
@@ -135,6 +156,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
135 | mtspr SPRN_L2CR,r4 | 156 | mtspr SPRN_L2CR,r4 |
136 | sync | 157 | sync |
137 | isync | 158 | isync |
159 | |||
160 | /* Wait for the invalidation to complete */ | ||
161 | 1: mfspr r3,SPRN_L2CR | ||
162 | rlwinm. r0,r3,0,31,31 | ||
163 | bne 1b | ||
164 | |||
165 | /* Clear L2I */ | ||
138 | xoris r4,r4,L2CR_L2I@h | 166 | xoris r4,r4,L2CR_L2I@h |
139 | sync | 167 | sync |
140 | mtspr SPRN_L2CR,r4 | 168 | mtspr SPRN_L2CR,r4 |
@@ -142,14 +170,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
142 | 170 | ||
143 | /* now disable the L1 data cache */ | 171 | /* now disable the L1 data cache */ |
144 | mfspr r0,SPRN_HID0 | 172 | mfspr r0,SPRN_HID0 |
145 | rlwinm r0,r0,0,~HID0_DCE | 173 | rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE) |
146 | mtspr SPRN_HID0,r0 | 174 | mtspr SPRN_HID0,r0 |
147 | sync | 175 | sync |
148 | isync | 176 | isync |
149 | 177 | ||
150 | /* Restore HID0[DPM] to whatever it was before */ | 178 | /* Restore HID0[DPM] to whatever it was before */ |
151 | sync | 179 | sync |
152 | mtspr SPRN_HID0,r8 | 180 | mfspr r0,SPRN_HID0 |
181 | rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */ | ||
182 | mtspr SPRN_HID0,r0 | ||
153 | sync | 183 | sync |
154 | 184 | ||
155 | /* restore DR and EE */ | 185 | /* restore DR and EE */ |
@@ -201,7 +231,7 @@ flush_disable_745x: | |||
201 | mtctr r4 | 231 | mtctr r4 |
202 | li r4,0 | 232 | li r4,0 |
203 | 1: | 233 | 1: |
204 | lwzx r0,r0,r4 | 234 | lwz r0,0(r4) |
205 | addi r4,r4,32 /* Go to start of next cache line */ | 235 | addi r4,r4,32 /* Go to start of next cache line */ |
206 | bdnz 1b | 236 | bdnz 1b |
207 | isync | 237 | isync |
diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c index 46cbf36722db..867336ad5d36 100644 --- a/arch/ppc/platforms/pmac_feature.c +++ b/arch/ppc/platforms/pmac_feature.c | |||
@@ -1590,6 +1590,114 @@ intrepid_shutdown(struct macio_chip* macio, int sleep_mode) | |||
1590 | mdelay(10); | 1590 | mdelay(10); |
1591 | } | 1591 | } |
1592 | 1592 | ||
1593 | |||
1594 | void __pmac pmac_tweak_clock_spreading(int enable) | ||
1595 | { | ||
1596 | struct macio_chip* macio = &macio_chips[0]; | ||
1597 | |||
1598 | /* Hack for doing clock spreading on some machines PowerBooks and | ||
1599 | * iBooks. This implements the "platform-do-clockspreading" OF | ||
1600 | * property as decoded manually on various models. For safety, we also | ||
1601 | * check the product ID in the device-tree in cases we'll whack the i2c | ||
1602 | * chip to make reasonably sure we won't set wrong values in there | ||
1603 | * | ||
1604 | * Of course, ultimately, we have to implement a real parser for | ||
1605 | * the platform-do-* stuff... | ||
1606 | */ | ||
1607 | |||
1608 | if (macio->type == macio_intrepid) { | ||
1609 | if (enable) | ||
1610 | UN_OUT(UNI_N_CLOCK_SPREADING, 2); | ||
1611 | else | ||
1612 | UN_OUT(UNI_N_CLOCK_SPREADING, 0); | ||
1613 | mdelay(40); | ||
1614 | } | ||
1615 | |||
1616 | while (machine_is_compatible("PowerBook5,2") || | ||
1617 | machine_is_compatible("PowerBook5,3") || | ||
1618 | machine_is_compatible("PowerBook6,2") || | ||
1619 | machine_is_compatible("PowerBook6,3")) { | ||
1620 | struct device_node *ui2c = of_find_node_by_type(NULL, "i2c"); | ||
1621 | struct device_node *dt = of_find_node_by_name(NULL, "device-tree"); | ||
1622 | u8 buffer[9]; | ||
1623 | u32 *productID; | ||
1624 | int i, rc, changed = 0; | ||
1625 | |||
1626 | if (dt == NULL) | ||
1627 | break; | ||
1628 | productID = (u32 *)get_property(dt, "pid#", NULL); | ||
1629 | if (productID == NULL) | ||
1630 | break; | ||
1631 | while(ui2c) { | ||
1632 | struct device_node *p = of_get_parent(ui2c); | ||
1633 | if (p && !strcmp(p->name, "uni-n")) | ||
1634 | break; | ||
1635 | ui2c = of_find_node_by_type(ui2c, "i2c"); | ||
1636 | } | ||
1637 | if (ui2c == NULL) | ||
1638 | break; | ||
1639 | DBG("Trying to bump clock speed for PID: %08x...\n", *productID); | ||
1640 | rc = pmac_low_i2c_open(ui2c, 1); | ||
1641 | if (rc != 0) | ||
1642 | break; | ||
1643 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); | ||
1644 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); | ||
1645 | DBG("read result: %d,", rc); | ||
1646 | if (rc != 0) { | ||
1647 | pmac_low_i2c_close(ui2c); | ||
1648 | break; | ||
1649 | } | ||
1650 | for (i=0; i<9; i++) | ||
1651 | DBG(" %02x", buffer[i]); | ||
1652 | DBG("\n"); | ||
1653 | |||
1654 | switch(*productID) { | ||
1655 | case 0x1182: /* AlBook 12" rev 2 */ | ||
1656 | case 0x1183: /* iBook G4 12" */ | ||
1657 | buffer[0] = (buffer[0] & 0x8f) | 0x70; | ||
1658 | buffer[2] = (buffer[2] & 0x7f) | 0x00; | ||
1659 | buffer[5] = (buffer[5] & 0x80) | 0x31; | ||
1660 | buffer[6] = (buffer[6] & 0x40) | 0xb0; | ||
1661 | buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba); | ||
1662 | buffer[8] = (buffer[8] & 0x00) | 0x30; | ||
1663 | changed = 1; | ||
1664 | break; | ||
1665 | case 0x3142: /* AlBook 15" (ATI M10) */ | ||
1666 | case 0x3143: /* AlBook 17" (ATI M10) */ | ||
1667 | buffer[0] = (buffer[0] & 0xaf) | 0x50; | ||
1668 | buffer[2] = (buffer[2] & 0x7f) | 0x00; | ||
1669 | buffer[5] = (buffer[5] & 0x80) | 0x31; | ||
1670 | buffer[6] = (buffer[6] & 0x40) | 0xb0; | ||
1671 | buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0); | ||
1672 | buffer[8] = (buffer[8] & 0x00) | 0x30; | ||
1673 | changed = 1; | ||
1674 | break; | ||
1675 | default: | ||
1676 | DBG("i2c-hwclock: Machine model not handled\n"); | ||
1677 | break; | ||
1678 | } | ||
1679 | if (!changed) { | ||
1680 | pmac_low_i2c_close(ui2c); | ||
1681 | break; | ||
1682 | } | ||
1683 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); | ||
1684 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); | ||
1685 | DBG("write result: %d,", rc); | ||
1686 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); | ||
1687 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); | ||
1688 | DBG("read result: %d,", rc); | ||
1689 | if (rc != 0) { | ||
1690 | pmac_low_i2c_close(ui2c); | ||
1691 | break; | ||
1692 | } | ||
1693 | for (i=0; i<9; i++) | ||
1694 | DBG(" %02x", buffer[i]); | ||
1695 | pmac_low_i2c_close(ui2c); | ||
1696 | break; | ||
1697 | } | ||
1698 | } | ||
1699 | |||
1700 | |||
1593 | static int __pmac | 1701 | static int __pmac |
1594 | core99_sleep(void) | 1702 | core99_sleep(void) |
1595 | { | 1703 | { |
@@ -1601,12 +1709,6 @@ core99_sleep(void) | |||
1601 | macio->type != macio_intrepid) | 1709 | macio->type != macio_intrepid) |
1602 | return -ENODEV; | 1710 | return -ENODEV; |
1603 | 1711 | ||
1604 | /* The device-tree contains that in the hwclock node */ | ||
1605 | if (macio->type == macio_intrepid) { | ||
1606 | UN_OUT(UNI_N_CLOCK_SPREADING, 0); | ||
1607 | mdelay(40); | ||
1608 | } | ||
1609 | |||
1610 | /* We power off the wireless slot in case it was not done | 1712 | /* We power off the wireless slot in case it was not done |
1611 | * by the driver. We don't power it on automatically however | 1713 | * by the driver. We don't power it on automatically however |
1612 | */ | 1714 | */ |
@@ -1749,12 +1851,6 @@ core99_wake_up(void) | |||
1749 | UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl); | 1851 | UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl); |
1750 | udelay(100); | 1852 | udelay(100); |
1751 | 1853 | ||
1752 | /* Restore clock spreading */ | ||
1753 | if (macio->type == macio_intrepid) { | ||
1754 | UN_OUT(UNI_N_CLOCK_SPREADING, 2); | ||
1755 | mdelay(40); | ||
1756 | } | ||
1757 | |||
1758 | return 0; | 1854 | return 0; |
1759 | } | 1855 | } |
1760 | 1856 | ||
@@ -2149,7 +2245,7 @@ static struct pmac_mb_def pmac_mb_defs[] __pmacdata = { | |||
2149 | }, | 2245 | }, |
2150 | { "PowerBook1,1", "PowerBook 101 (Lombard)", | 2246 | { "PowerBook1,1", "PowerBook 101 (Lombard)", |
2151 | PMAC_TYPE_101_PBOOK, paddington_features, | 2247 | PMAC_TYPE_101_PBOOK, paddington_features, |
2152 | PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE | 2248 | PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE |
2153 | }, | 2249 | }, |
2154 | { "PowerBook2,1", "iBook (first generation)", | 2250 | { "PowerBook2,1", "iBook (first generation)", |
2155 | PMAC_TYPE_ORIG_IBOOK, core99_features, | 2251 | PMAC_TYPE_ORIG_IBOOK, core99_features, |
@@ -2718,97 +2814,11 @@ set_initial_features(void) | |||
2718 | MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); | 2814 | MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); |
2719 | } | 2815 | } |
2720 | 2816 | ||
2721 | /* Hack for bumping clock speed on the new PowerBooks and the | 2817 | /* Some machine models need the clock chip to be properly setup for |
2722 | * iBook G4. This implements the "platform-do-clockspreading" OF | 2818 | * clock spreading now. This should be a platform function but we |
2723 | * property. For safety, we also check the product ID in the | 2819 | * don't do these at the moment |
2724 | * device-tree to make reasonably sure we won't set wrong values | ||
2725 | * in the clock chip. | ||
2726 | * | ||
2727 | * Of course, ultimately, we have to implement a real parser for | ||
2728 | * the platform-do-* stuff... | ||
2729 | */ | 2820 | */ |
2730 | while (machine_is_compatible("PowerBook5,2") || | 2821 | pmac_tweak_clock_spreading(1); |
2731 | machine_is_compatible("PowerBook5,3") || | ||
2732 | machine_is_compatible("PowerBook6,2") || | ||
2733 | machine_is_compatible("PowerBook6,3")) { | ||
2734 | struct device_node *ui2c = of_find_node_by_type(NULL, "i2c"); | ||
2735 | struct device_node *dt = of_find_node_by_name(NULL, "device-tree"); | ||
2736 | u8 buffer[9]; | ||
2737 | u32 *productID; | ||
2738 | int i, rc, changed = 0; | ||
2739 | |||
2740 | if (dt == NULL) | ||
2741 | break; | ||
2742 | productID = (u32 *)get_property(dt, "pid#", NULL); | ||
2743 | if (productID == NULL) | ||
2744 | break; | ||
2745 | while(ui2c) { | ||
2746 | struct device_node *p = of_get_parent(ui2c); | ||
2747 | if (p && !strcmp(p->name, "uni-n")) | ||
2748 | break; | ||
2749 | ui2c = of_find_node_by_type(ui2c, "i2c"); | ||
2750 | } | ||
2751 | if (ui2c == NULL) | ||
2752 | break; | ||
2753 | DBG("Trying to bump clock speed for PID: %08x...\n", *productID); | ||
2754 | rc = pmac_low_i2c_open(ui2c, 1); | ||
2755 | if (rc != 0) | ||
2756 | break; | ||
2757 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); | ||
2758 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); | ||
2759 | DBG("read result: %d,", rc); | ||
2760 | if (rc != 0) { | ||
2761 | pmac_low_i2c_close(ui2c); | ||
2762 | break; | ||
2763 | } | ||
2764 | for (i=0; i<9; i++) | ||
2765 | DBG(" %02x", buffer[i]); | ||
2766 | DBG("\n"); | ||
2767 | |||
2768 | switch(*productID) { | ||
2769 | case 0x1182: /* AlBook 12" rev 2 */ | ||
2770 | case 0x1183: /* iBook G4 12" */ | ||
2771 | buffer[0] = (buffer[0] & 0x8f) | 0x70; | ||
2772 | buffer[2] = (buffer[2] & 0x7f) | 0x00; | ||
2773 | buffer[5] = (buffer[5] & 0x80) | 0x31; | ||
2774 | buffer[6] = (buffer[6] & 0x40) | 0xb0; | ||
2775 | buffer[7] = (buffer[7] & 0x00) | 0xc0; | ||
2776 | buffer[8] = (buffer[8] & 0x00) | 0x30; | ||
2777 | changed = 1; | ||
2778 | break; | ||
2779 | case 0x3142: /* AlBook 15" (ATI M10) */ | ||
2780 | case 0x3143: /* AlBook 17" (ATI M10) */ | ||
2781 | buffer[0] = (buffer[0] & 0xaf) | 0x50; | ||
2782 | buffer[2] = (buffer[2] & 0x7f) | 0x00; | ||
2783 | buffer[5] = (buffer[5] & 0x80) | 0x31; | ||
2784 | buffer[6] = (buffer[6] & 0x40) | 0xb0; | ||
2785 | buffer[7] = (buffer[7] & 0x00) | 0xd0; | ||
2786 | buffer[8] = (buffer[8] & 0x00) | 0x30; | ||
2787 | changed = 1; | ||
2788 | break; | ||
2789 | default: | ||
2790 | DBG("i2c-hwclock: Machine model not handled\n"); | ||
2791 | break; | ||
2792 | } | ||
2793 | if (!changed) { | ||
2794 | pmac_low_i2c_close(ui2c); | ||
2795 | break; | ||
2796 | } | ||
2797 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); | ||
2798 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); | ||
2799 | DBG("write result: %d,", rc); | ||
2800 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); | ||
2801 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); | ||
2802 | DBG("read result: %d,", rc); | ||
2803 | if (rc != 0) { | ||
2804 | pmac_low_i2c_close(ui2c); | ||
2805 | break; | ||
2806 | } | ||
2807 | for (i=0; i<9; i++) | ||
2808 | DBG(" %02x", buffer[i]); | ||
2809 | pmac_low_i2c_close(ui2c); | ||
2810 | break; | ||
2811 | } | ||
2812 | 2822 | ||
2813 | #endif /* CONFIG_POWER4 */ | 2823 | #endif /* CONFIG_POWER4 */ |
2814 | 2824 | ||
diff --git a/arch/ppc/platforms/pmac_low_i2c.c b/arch/ppc/platforms/pmac_low_i2c.c index d07579f2b8b9..08583fce1692 100644 --- a/arch/ppc/platforms/pmac_low_i2c.c +++ b/arch/ppc/platforms/pmac_low_i2c.c | |||
@@ -54,7 +54,7 @@ struct low_i2c_host | |||
54 | int mode; /* Current mode */ | 54 | int mode; /* Current mode */ |
55 | int channel; /* Current channel */ | 55 | int channel; /* Current channel */ |
56 | int num_channels; /* Number of channels */ | 56 | int num_channels; /* Number of channels */ |
57 | unsigned long base; /* For keywest-i2c, base address */ | 57 | void __iomem * base; /* For keywest-i2c, base address */ |
58 | int bsteps; /* And register stepping */ | 58 | int bsteps; /* And register stepping */ |
59 | int speed; /* And speed */ | 59 | int speed; /* And speed */ |
60 | }; | 60 | }; |
@@ -154,14 +154,12 @@ static const char *__kw_state_names[] = { | |||
154 | 154 | ||
155 | static inline u8 __kw_read_reg(struct low_i2c_host *host, reg_t reg) | 155 | static inline u8 __kw_read_reg(struct low_i2c_host *host, reg_t reg) |
156 | { | 156 | { |
157 | return in_8(((volatile u8 *)host->base) | 157 | return in_8(host->base + (((unsigned)reg) << host->bsteps)); |
158 | + (((unsigned)reg) << host->bsteps)); | ||
159 | } | 158 | } |
160 | 159 | ||
161 | static inline void __kw_write_reg(struct low_i2c_host *host, reg_t reg, u8 val) | 160 | static inline void __kw_write_reg(struct low_i2c_host *host, reg_t reg, u8 val) |
162 | { | 161 | { |
163 | out_8(((volatile u8 *)host->base) | 162 | out_8(host->base + (((unsigned)reg) << host->bsteps), val); |
164 | + (((unsigned)reg) << host->bsteps), val); | ||
165 | (void)__kw_read_reg(host, reg_subaddr); | 163 | (void)__kw_read_reg(host, reg_subaddr); |
166 | } | 164 | } |
167 | 165 | ||
@@ -370,7 +368,7 @@ static void keywest_low_i2c_add(struct device_node *np) | |||
370 | break; | 368 | break; |
371 | } | 369 | } |
372 | host->mode = pmac_low_i2c_mode_std; | 370 | host->mode = pmac_low_i2c_mode_std; |
373 | host->base = (unsigned long)ioremap(np->addrs[0].address + aoffset, | 371 | host->base = ioremap(np->addrs[0].address + aoffset, |
374 | np->addrs[0].size); | 372 | np->addrs[0].size); |
375 | host->func = keywest_low_i2c_func; | 373 | host->func = keywest_low_i2c_func; |
376 | } | 374 | } |
diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S index 3139b6766ad3..f459ade1bd63 100644 --- a/arch/ppc/platforms/pmac_sleep.S +++ b/arch/ppc/platforms/pmac_sleep.S | |||
@@ -267,6 +267,10 @@ grackle_wake_up: | |||
267 | /* Restore various CPU config stuffs */ | 267 | /* Restore various CPU config stuffs */ |
268 | bl __restore_cpu_setup | 268 | bl __restore_cpu_setup |
269 | 269 | ||
270 | /* Make sure all FPRs have been initialized */ | ||
271 | bl reloc_offset | ||
272 | bl __init_fpu_registers | ||
273 | |||
270 | /* Invalidate & enable L1 cache, we don't care about | 274 | /* Invalidate & enable L1 cache, we don't care about |
271 | * whatever the ROM may have tried to write to memory | 275 | * whatever the ROM may have tried to write to memory |
272 | */ | 276 | */ |
diff --git a/arch/ppc/platforms/pmac_smp.c b/arch/ppc/platforms/pmac_smp.c index 731841f9a5b8..8e049dab4e63 100644 --- a/arch/ppc/platforms/pmac_smp.c +++ b/arch/ppc/platforms/pmac_smp.c | |||
@@ -91,11 +91,11 @@ extern void __secondary_start_psurge3(void); /* Temporary horrible hack */ | |||
91 | #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v))) | 91 | #define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v))) |
92 | 92 | ||
93 | /* virtual addresses for the above */ | 93 | /* virtual addresses for the above */ |
94 | static volatile u8 *hhead_base; | 94 | static volatile u8 __iomem *hhead_base; |
95 | static volatile u8 *quad_base; | 95 | static volatile u8 __iomem *quad_base; |
96 | static volatile u32 *psurge_pri_intr; | 96 | static volatile u32 __iomem *psurge_pri_intr; |
97 | static volatile u8 *psurge_sec_intr; | 97 | static volatile u8 __iomem *psurge_sec_intr; |
98 | static volatile u32 *psurge_start; | 98 | static volatile u32 __iomem *psurge_start; |
99 | 99 | ||
100 | /* values for psurge_type */ | 100 | /* values for psurge_type */ |
101 | #define PSURGE_NONE -1 | 101 | #define PSURGE_NONE -1 |
@@ -322,10 +322,10 @@ static int __init smp_psurge_probe(void) | |||
322 | /* All released cards using this HW design have 4 CPUs */ | 322 | /* All released cards using this HW design have 4 CPUs */ |
323 | ncpus = 4; | 323 | ncpus = 4; |
324 | } else { | 324 | } else { |
325 | iounmap((void *) quad_base); | 325 | iounmap(quad_base); |
326 | if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) { | 326 | if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) { |
327 | /* not a dual-cpu card */ | 327 | /* not a dual-cpu card */ |
328 | iounmap((void *) hhead_base); | 328 | iounmap(hhead_base); |
329 | psurge_type = PSURGE_NONE; | 329 | psurge_type = PSURGE_NONE; |
330 | return 1; | 330 | return 1; |
331 | } | 331 | } |
diff --git a/arch/ppc/platforms/pmac_time.c b/arch/ppc/platforms/pmac_time.c index 09636546f44e..de60ccc7db9f 100644 --- a/arch/ppc/platforms/pmac_time.c +++ b/arch/ppc/platforms/pmac_time.c | |||
@@ -165,7 +165,7 @@ int __init | |||
165 | via_calibrate_decr(void) | 165 | via_calibrate_decr(void) |
166 | { | 166 | { |
167 | struct device_node *vias; | 167 | struct device_node *vias; |
168 | volatile unsigned char *via; | 168 | volatile unsigned char __iomem *via; |
169 | int count = VIA_TIMER_FREQ_6 / 100; | 169 | int count = VIA_TIMER_FREQ_6 / 100; |
170 | unsigned int dstart, dend; | 170 | unsigned int dstart, dend; |
171 | 171 | ||
@@ -176,8 +176,7 @@ via_calibrate_decr(void) | |||
176 | vias = find_devices("via"); | 176 | vias = find_devices("via"); |
177 | if (vias == 0 || vias->n_addrs == 0) | 177 | if (vias == 0 || vias->n_addrs == 0) |
178 | return 0; | 178 | return 0; |
179 | via = (volatile unsigned char *) | 179 | via = ioremap(vias->addrs[0].address, vias->addrs[0].size); |
180 | ioremap(vias->addrs[0].address, vias->addrs[0].size); | ||
181 | 180 | ||
182 | /* set timer 1 for continuous interrupts */ | 181 | /* set timer 1 for continuous interrupts */ |
183 | out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT); | 182 | out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT); |
@@ -202,7 +201,7 @@ via_calibrate_decr(void) | |||
202 | printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n", | 201 | printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n", |
203 | tb_ticks_per_jiffy, dstart - dend); | 202 | tb_ticks_per_jiffy, dstart - dend); |
204 | 203 | ||
205 | iounmap((void*)via); | 204 | iounmap(via); |
206 | 205 | ||
207 | return 1; | 206 | return 1; |
208 | } | 207 | } |
diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c index 2a99b43737a8..c30607a972d8 100644 --- a/arch/ppc/platforms/radstone_ppc7d.c +++ b/arch/ppc/platforms/radstone_ppc7d.c | |||
@@ -68,6 +68,7 @@ | |||
68 | #define PPC7D_RST_PIN 17 /* GPP17 */ | 68 | #define PPC7D_RST_PIN 17 /* GPP17 */ |
69 | 69 | ||
70 | extern u32 mv64360_irq_base; | 70 | extern u32 mv64360_irq_base; |
71 | extern spinlock_t rtc_lock; | ||
71 | 72 | ||
72 | static struct mv64x60_handle bh; | 73 | static struct mv64x60_handle bh; |
73 | static int ppc7d_has_alma; | 74 | static int ppc7d_has_alma; |
@@ -75,6 +76,11 @@ static int ppc7d_has_alma; | |||
75 | extern void gen550_progress(char *, unsigned short); | 76 | extern void gen550_progress(char *, unsigned short); |
76 | extern void gen550_init(int, struct uart_port *); | 77 | extern void gen550_init(int, struct uart_port *); |
77 | 78 | ||
79 | /* FIXME - move to h file */ | ||
80 | extern int ds1337_do_command(int id, int cmd, void *arg); | ||
81 | #define DS1337_GET_DATE 0 | ||
82 | #define DS1337_SET_DATE 1 | ||
83 | |||
78 | /* residual data */ | 84 | /* residual data */ |
79 | unsigned char __res[sizeof(bd_t)]; | 85 | unsigned char __res[sizeof(bd_t)]; |
80 | 86 | ||
@@ -253,6 +259,8 @@ static int ppc7d_show_cpuinfo(struct seq_file *m) | |||
253 | u8 val1, val2; | 259 | u8 val1, val2; |
254 | static int flash_sizes[4] = { 64, 32, 0, 16 }; | 260 | static int flash_sizes[4] = { 64, 32, 0, 16 }; |
255 | static int flash_banks[4] = { 4, 3, 2, 1 }; | 261 | static int flash_banks[4] = { 4, 3, 2, 1 }; |
262 | static int sdram_bank_sizes[4] = { 128, 256, 512, 1 }; | ||
263 | int sdram_num_banks = 2; | ||
256 | static char *pci_modes[] = { "PCI33", "PCI66", | 264 | static char *pci_modes[] = { "PCI33", "PCI66", |
257 | "Unknown", "Unknown", | 265 | "Unknown", "Unknown", |
258 | "PCIX33", "PCIX66", | 266 | "PCIX33", "PCIX66", |
@@ -279,13 +287,17 @@ static int ppc7d_show_cpuinfo(struct seq_file *m) | |||
279 | (val1 == PPC7D_CPLD_MB_TYPE_PLL_100) ? 100 : | 287 | (val1 == PPC7D_CPLD_MB_TYPE_PLL_100) ? 100 : |
280 | (val1 == PPC7D_CPLD_MB_TYPE_PLL_64) ? 64 : 0); | 288 | (val1 == PPC7D_CPLD_MB_TYPE_PLL_64) ? 64 : 0); |
281 | 289 | ||
290 | val = inb(PPC7D_CPLD_MEM_CONFIG); | ||
291 | if (val & PPC7D_CPLD_SDRAM_BANK_NUM_MASK) sdram_num_banks--; | ||
292 | |||
282 | val = inb(PPC7D_CPLD_MEM_CONFIG_EXTEND); | 293 | val = inb(PPC7D_CPLD_MEM_CONFIG_EXTEND); |
283 | val1 = val & PPC7D_CPLD_SDRAM_BANK_SIZE_MASK; | 294 | val1 = (val & PPC7D_CPLD_SDRAM_BANK_SIZE_MASK) >> 6; |
284 | seq_printf(m, "SDRAM\t\t: %d%c", | 295 | seq_printf(m, "SDRAM\t\t: %d banks of %d%c, total %d%c", |
285 | (val1 == PPC7D_CPLD_SDRAM_BANK_SIZE_128M) ? 128 : | 296 | sdram_num_banks, |
286 | (val1 == PPC7D_CPLD_SDRAM_BANK_SIZE_256M) ? 256 : | 297 | sdram_bank_sizes[val1], |
287 | (val1 == PPC7D_CPLD_SDRAM_BANK_SIZE_512M) ? 512 : 1, | 298 | (sdram_bank_sizes[val1] < 128) ? 'G' : 'M', |
288 | (val1 == PPC7D_CPLD_SDRAM_BANK_SIZE_1G) ? 'G' : 'M'); | 299 | sdram_num_banks * sdram_bank_sizes[val1], |
300 | (sdram_bank_sizes[val1] < 128) ? 'G' : 'M'); | ||
289 | if (val2 & PPC7D_CPLD_MB_TYPE_ECC_FITTED_MASK) { | 301 | if (val2 & PPC7D_CPLD_MB_TYPE_ECC_FITTED_MASK) { |
290 | seq_printf(m, " [ECC %sabled]", | 302 | seq_printf(m, " [ECC %sabled]", |
291 | (val2 & PPC7D_CPLD_MB_TYPE_ECC_ENABLE_MASK) ? "en" : | 303 | (val2 & PPC7D_CPLD_MB_TYPE_ECC_ENABLE_MASK) ? "en" : |
@@ -1236,6 +1248,38 @@ static void __init ppc7d_setup_arch(void) | |||
1236 | printk(KERN_INFO "Radstone Technology PPC7D\n"); | 1248 | printk(KERN_INFO "Radstone Technology PPC7D\n"); |
1237 | if (ppc_md.progress) | 1249 | if (ppc_md.progress) |
1238 | ppc_md.progress("ppc7d_setup_arch: exit", 0); | 1250 | ppc_md.progress("ppc7d_setup_arch: exit", 0); |
1251 | |||
1252 | } | ||
1253 | |||
1254 | /* Real Time Clock support. | ||
1255 | * PPC7D has a DS1337 accessed by I2C. | ||
1256 | */ | ||
1257 | static ulong ppc7d_get_rtc_time(void) | ||
1258 | { | ||
1259 | struct rtc_time tm; | ||
1260 | int result; | ||
1261 | |||
1262 | spin_lock(&rtc_lock); | ||
1263 | result = ds1337_do_command(0, DS1337_GET_DATE, &tm); | ||
1264 | spin_unlock(&rtc_lock); | ||
1265 | |||
1266 | if (result == 0) | ||
1267 | result = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); | ||
1268 | |||
1269 | return result; | ||
1270 | } | ||
1271 | |||
1272 | static int ppc7d_set_rtc_time(unsigned long nowtime) | ||
1273 | { | ||
1274 | struct rtc_time tm; | ||
1275 | int result; | ||
1276 | |||
1277 | spin_lock(&rtc_lock); | ||
1278 | to_tm(nowtime, &tm); | ||
1279 | result = ds1337_do_command(0, DS1337_SET_DATE, &tm); | ||
1280 | spin_unlock(&rtc_lock); | ||
1281 | |||
1282 | return result; | ||
1239 | } | 1283 | } |
1240 | 1284 | ||
1241 | /* This kernel command line parameter can be used to have the target | 1285 | /* This kernel command line parameter can be used to have the target |
@@ -1293,6 +1337,10 @@ static void ppc7d_init2(void) | |||
1293 | data8 |= 0x07; | 1337 | data8 |= 0x07; |
1294 | outb(data8, PPC7D_CPLD_LEDS); | 1338 | outb(data8, PPC7D_CPLD_LEDS); |
1295 | 1339 | ||
1340 | /* Hook up RTC. We couldn't do this earlier because we need the I2C subsystem */ | ||
1341 | ppc_md.set_rtc_time = ppc7d_set_rtc_time; | ||
1342 | ppc_md.get_rtc_time = ppc7d_get_rtc_time; | ||
1343 | |||
1296 | pr_debug("%s: exit\n", __FUNCTION__); | 1344 | pr_debug("%s: exit\n", __FUNCTION__); |
1297 | } | 1345 | } |
1298 | 1346 | ||
diff --git a/arch/ppc/platforms/radstone_ppc7d.h b/arch/ppc/platforms/radstone_ppc7d.h index 4546fff2b0c3..938375510be4 100644 --- a/arch/ppc/platforms/radstone_ppc7d.h +++ b/arch/ppc/platforms/radstone_ppc7d.h | |||
@@ -240,6 +240,7 @@ | |||
240 | #define PPC7D_CPLD_FLASH_CNTL 0x086E | 240 | #define PPC7D_CPLD_FLASH_CNTL 0x086E |
241 | 241 | ||
242 | /* MEMORY_CONFIG_EXTEND */ | 242 | /* MEMORY_CONFIG_EXTEND */ |
243 | #define PPC7D_CPLD_SDRAM_BANK_NUM_MASK 0x02 | ||
243 | #define PPC7D_CPLD_SDRAM_BANK_SIZE_MASK 0xc0 | 244 | #define PPC7D_CPLD_SDRAM_BANK_SIZE_MASK 0xc0 |
244 | #define PPC7D_CPLD_SDRAM_BANK_SIZE_128M 0 | 245 | #define PPC7D_CPLD_SDRAM_BANK_SIZE_128M 0 |
245 | #define PPC7D_CPLD_SDRAM_BANK_SIZE_256M 0x40 | 246 | #define PPC7D_CPLD_SDRAM_BANK_SIZE_256M 0x40 |
diff --git a/arch/ppc/syslib/cpm2_pic.c b/arch/ppc/syslib/cpm2_pic.c index 954b07fc1df3..c867be6981cb 100644 --- a/arch/ppc/syslib/cpm2_pic.c +++ b/arch/ppc/syslib/cpm2_pic.c | |||
@@ -107,6 +107,11 @@ static void cpm2_end_irq(unsigned int irq_nr) | |||
107 | simr = &(cpm2_immr->im_intctl.ic_simrh); | 107 | simr = &(cpm2_immr->im_intctl.ic_simrh); |
108 | ppc_cached_irq_mask[word] |= 1 << bit; | 108 | ppc_cached_irq_mask[word] |= 1 << bit; |
109 | simr[word] = ppc_cached_irq_mask[word]; | 109 | simr[word] = ppc_cached_irq_mask[word]; |
110 | /* | ||
111 | * Work around large numbers of spurious IRQs on PowerPC 82xx | ||
112 | * systems. | ||
113 | */ | ||
114 | mb(); | ||
110 | } | 115 | } |
111 | } | 116 | } |
112 | 117 | ||
diff --git a/arch/ppc/syslib/m8260_pci.c b/arch/ppc/syslib/m8260_pci.c index bd564fb35ab6..057cc3f8ff37 100644 --- a/arch/ppc/syslib/m8260_pci.c +++ b/arch/ppc/syslib/m8260_pci.c | |||
@@ -171,10 +171,9 @@ void __init m8260_find_bridges(void) | |||
171 | m8260_setup_pci(hose); | 171 | m8260_setup_pci(hose); |
172 | hose->pci_mem_offset = MPC826x_PCI_MEM_OFFSET; | 172 | hose->pci_mem_offset = MPC826x_PCI_MEM_OFFSET; |
173 | 173 | ||
174 | isa_io_base = | 174 | hose->io_base_virt = ioremap(MPC826x_PCI_IO_BASE, |
175 | (unsigned long) ioremap(MPC826x_PCI_IO_BASE, | ||
176 | MPC826x_PCI_IO_SIZE); | 175 | MPC826x_PCI_IO_SIZE); |
177 | hose->io_base_virt = (void *) isa_io_base; | 176 | isa_io_base = (unsigned long) hose->io_base_virt; |
178 | 177 | ||
179 | /* setup resources */ | 178 | /* setup resources */ |
180 | pci_init_resource(&hose->mem_resources[0], | 179 | pci_init_resource(&hose->mem_resources[0], |
diff --git a/arch/ppc/syslib/mpc52xx_pci.c b/arch/ppc/syslib/mpc52xx_pci.c index c723efd954a6..59cf3e8bd1a0 100644 --- a/arch/ppc/syslib/mpc52xx_pci.c +++ b/arch/ppc/syslib/mpc52xx_pci.c | |||
@@ -205,13 +205,11 @@ mpc52xx_find_bridges(void) | |||
205 | 205 | ||
206 | hose->pci_mem_offset = MPC52xx_PCI_MEM_OFFSET; | 206 | hose->pci_mem_offset = MPC52xx_PCI_MEM_OFFSET; |
207 | 207 | ||
208 | isa_io_base = | 208 | hose->io_base_virt = ioremap(MPC52xx_PCI_IO_BASE, MPC52xx_PCI_IO_SIZE); |
209 | (unsigned long) ioremap(MPC52xx_PCI_IO_BASE, | 209 | isa_io_base = (unsigned long) hose->io_base_virt; |
210 | MPC52xx_PCI_IO_SIZE); | ||
211 | hose->io_base_virt = (void *) isa_io_base; | ||
212 | 210 | ||
213 | hose->cfg_addr = &pci_regs->car; | 211 | hose->cfg_addr = &pci_regs->car; |
214 | hose->cfg_data = (void __iomem *) isa_io_base; | 212 | hose->cfg_data = hose->io_base_virt; |
215 | 213 | ||
216 | /* Setup resources */ | 214 | /* Setup resources */ |
217 | pci_init_resource(&hose->mem_resources[0], | 215 | pci_init_resource(&hose->mem_resources[0], |
diff --git a/arch/ppc/syslib/ppc85xx_setup.c b/arch/ppc/syslib/ppc85xx_setup.c index 81f1968c3269..152c3ef1312a 100644 --- a/arch/ppc/syslib/ppc85xx_setup.c +++ b/arch/ppc/syslib/ppc85xx_setup.c | |||
@@ -280,16 +280,14 @@ mpc85xx_setup_hose(void) | |||
280 | hose_a->io_space.end = MPC85XX_PCI1_UPPER_IO; | 280 | hose_a->io_space.end = MPC85XX_PCI1_UPPER_IO; |
281 | hose_a->io_base_phys = MPC85XX_PCI1_IO_BASE; | 281 | hose_a->io_base_phys = MPC85XX_PCI1_IO_BASE; |
282 | #ifdef CONFIG_85xx_PCI2 | 282 | #ifdef CONFIG_85xx_PCI2 |
283 | isa_io_base = | 283 | hose_a->io_base_virt = ioremap(MPC85XX_PCI1_IO_BASE, |
284 | (unsigned long) ioremap(MPC85XX_PCI1_IO_BASE, | ||
285 | MPC85XX_PCI1_IO_SIZE + | 284 | MPC85XX_PCI1_IO_SIZE + |
286 | MPC85XX_PCI2_IO_SIZE); | 285 | MPC85XX_PCI2_IO_SIZE); |
287 | #else | 286 | #else |
288 | isa_io_base = | 287 | hose_a->io_base_virt = ioremap(MPC85XX_PCI1_IO_BASE, |
289 | (unsigned long) ioremap(MPC85XX_PCI1_IO_BASE, | ||
290 | MPC85XX_PCI1_IO_SIZE); | 288 | MPC85XX_PCI1_IO_SIZE); |
291 | #endif | 289 | #endif |
292 | hose_a->io_base_virt = (void *) isa_io_base; | 290 | isa_io_base = (unsigned long)hose_a->io_base_virt; |
293 | 291 | ||
294 | /* setup resources */ | 292 | /* setup resources */ |
295 | pci_init_resource(&hose_a->mem_resources[0], | 293 | pci_init_resource(&hose_a->mem_resources[0], |
@@ -329,8 +327,8 @@ mpc85xx_setup_hose(void) | |||
329 | hose_b->io_space.start = MPC85XX_PCI2_LOWER_IO; | 327 | hose_b->io_space.start = MPC85XX_PCI2_LOWER_IO; |
330 | hose_b->io_space.end = MPC85XX_PCI2_UPPER_IO; | 328 | hose_b->io_space.end = MPC85XX_PCI2_UPPER_IO; |
331 | hose_b->io_base_phys = MPC85XX_PCI2_IO_BASE; | 329 | hose_b->io_base_phys = MPC85XX_PCI2_IO_BASE; |
332 | hose_b->io_base_virt = (void *) isa_io_base + MPC85XX_PCI1_IO_SIZE; | 330 | hose_b->io_base_virt = hose_a->io_base_virt + MPC85XX_PCI1_IO_SIZE; |
333 | 331 | ||
334 | /* setup resources */ | 332 | /* setup resources */ |
335 | pci_init_resource(&hose_b->mem_resources[0], | 333 | pci_init_resource(&hose_b->mem_resources[0], |
336 | MPC85XX_PCI2_LOWER_MEM, | 334 | MPC85XX_PCI2_LOWER_MEM, |
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig index ef1f05e437c4..5cb343883e4d 100644 --- a/arch/ppc64/Kconfig +++ b/arch/ppc64/Kconfig | |||
@@ -40,6 +40,10 @@ config COMPAT | |||
40 | bool | 40 | bool |
41 | default y | 41 | default y |
42 | 42 | ||
43 | config SCHED_NO_NO_OMIT_FRAME_POINTER | ||
44 | bool | ||
45 | default y | ||
46 | |||
43 | # We optimistically allocate largepages from the VM, so make the limit | 47 | # We optimistically allocate largepages from the VM, so make the limit |
44 | # large enough (16MB). This badly named config option is actually | 48 | # large enough (16MB). This badly named config option is actually |
45 | # max order + 1 | 49 | # max order + 1 |
@@ -258,6 +262,7 @@ config PPC_RTAS | |||
258 | config RTAS_PROC | 262 | config RTAS_PROC |
259 | bool "Proc interface to RTAS" | 263 | bool "Proc interface to RTAS" |
260 | depends on PPC_RTAS | 264 | depends on PPC_RTAS |
265 | default y | ||
261 | 266 | ||
262 | config RTAS_FLASH | 267 | config RTAS_FLASH |
263 | tristate "Firmware flash interface" | 268 | tristate "Firmware flash interface" |
@@ -293,6 +298,9 @@ config SECCOMP | |||
293 | 298 | ||
294 | endmenu | 299 | endmenu |
295 | 300 | ||
301 | config ISA_DMA_API | ||
302 | bool | ||
303 | default y | ||
296 | 304 | ||
297 | menu "General setup" | 305 | menu "General setup" |
298 | 306 | ||
diff --git a/arch/ppc64/Kconfig.debug b/arch/ppc64/Kconfig.debug index e341a129da80..46b1ce58da3b 100644 --- a/arch/ppc64/Kconfig.debug +++ b/arch/ppc64/Kconfig.debug | |||
@@ -5,6 +5,9 @@ source "lib/Kconfig.debug" | |||
5 | config DEBUG_STACKOVERFLOW | 5 | config DEBUG_STACKOVERFLOW |
6 | bool "Check for stack overflows" | 6 | bool "Check for stack overflows" |
7 | depends on DEBUG_KERNEL | 7 | depends on DEBUG_KERNEL |
8 | help | ||
9 | This option will cause messages to be printed if free stack space | ||
10 | drops below a certain limit. | ||
8 | 11 | ||
9 | config KPROBES | 12 | config KPROBES |
10 | bool "Kprobes" | 13 | bool "Kprobes" |
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile index d33e20bcc52f..691f3008e698 100644 --- a/arch/ppc64/Makefile +++ b/arch/ppc64/Makefile | |||
@@ -56,13 +56,20 @@ LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD) | |||
56 | CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \ | 56 | CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \ |
57 | -mcall-aixdesc | 57 | -mcall-aixdesc |
58 | 58 | ||
59 | GCC_VERSION := $(call cc-version) | ||
60 | GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;) | ||
61 | |||
59 | ifeq ($(CONFIG_POWER4_ONLY),y) | 62 | ifeq ($(CONFIG_POWER4_ONLY),y) |
60 | ifeq ($(CONFIG_ALTIVEC),y) | 63 | ifeq ($(CONFIG_ALTIVEC),y) |
64 | ifeq ($(GCC_BROKEN_VEC),y) | ||
61 | CFLAGS += $(call cc-option,-mcpu=970) | 65 | CFLAGS += $(call cc-option,-mcpu=970) |
62 | else | 66 | else |
63 | CFLAGS += $(call cc-option,-mcpu=power4) | 67 | CFLAGS += $(call cc-option,-mcpu=power4) |
64 | endif | 68 | endif |
65 | else | 69 | else |
70 | CFLAGS += $(call cc-option,-mcpu=power4) | ||
71 | endif | ||
72 | else | ||
66 | CFLAGS += $(call cc-option,-mtune=power4) | 73 | CFLAGS += $(call cc-option,-mtune=power4) |
67 | endif | 74 | endif |
68 | 75 | ||
diff --git a/arch/ppc64/boot/addnote.c b/arch/ppc64/boot/addnote.c index 66ff8103bf4d..719663a694bb 100644 --- a/arch/ppc64/boot/addnote.c +++ b/arch/ppc64/boot/addnote.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <unistd.h> | 19 | #include <unistd.h> |
20 | #include <string.h> | 20 | #include <string.h> |
21 | 21 | ||
22 | /* CHRP note section */ | ||
22 | char arch[] = "PowerPC"; | 23 | char arch[] = "PowerPC"; |
23 | 24 | ||
24 | #define N_DESCR 6 | 25 | #define N_DESCR 6 |
@@ -31,6 +32,29 @@ unsigned int descr[N_DESCR] = { | |||
31 | 0x4000, /* load-base */ | 32 | 0x4000, /* load-base */ |
32 | }; | 33 | }; |
33 | 34 | ||
35 | /* RPA note section */ | ||
36 | char rpaname[] = "IBM,RPA-Client-Config"; | ||
37 | |||
38 | /* | ||
39 | * Note: setting ignore_my_client_config *should* mean that OF ignores | ||
40 | * all the other fields, but there is a firmware bug which means that | ||
41 | * it looks at the splpar field at least. So these values need to be | ||
42 | * reasonable. | ||
43 | */ | ||
44 | #define N_RPA_DESCR 8 | ||
45 | unsigned int rpanote[N_RPA_DESCR] = { | ||
46 | 0, /* lparaffinity */ | ||
47 | 64, /* min_rmo_size */ | ||
48 | 0, /* min_rmo_percent */ | ||
49 | 40, /* max_pft_size */ | ||
50 | 1, /* splpar */ | ||
51 | -1, /* min_load */ | ||
52 | 0, /* new_mem_def */ | ||
53 | 1, /* ignore_my_client_config */ | ||
54 | }; | ||
55 | |||
56 | #define ROUNDUP(len) (((len) + 3) & ~3) | ||
57 | |||
34 | unsigned char buf[512]; | 58 | unsigned char buf[512]; |
35 | 59 | ||
36 | #define GET_16BE(off) ((buf[off] << 8) + (buf[(off)+1])) | 60 | #define GET_16BE(off) ((buf[off] << 8) + (buf[(off)+1])) |
@@ -69,7 +93,7 @@ main(int ac, char **av) | |||
69 | { | 93 | { |
70 | int fd, n, i; | 94 | int fd, n, i; |
71 | int ph, ps, np; | 95 | int ph, ps, np; |
72 | int nnote, ns; | 96 | int nnote, nnote2, ns; |
73 | 97 | ||
74 | if (ac != 2) { | 98 | if (ac != 2) { |
75 | fprintf(stderr, "Usage: %s elf-file\n", av[0]); | 99 | fprintf(stderr, "Usage: %s elf-file\n", av[0]); |
@@ -81,7 +105,8 @@ main(int ac, char **av) | |||
81 | exit(1); | 105 | exit(1); |
82 | } | 106 | } |
83 | 107 | ||
84 | nnote = strlen(arch) + 1 + (N_DESCR + 3) * 4; | 108 | nnote = 12 + ROUNDUP(strlen(arch) + 1) + sizeof(descr); |
109 | nnote2 = 12 + ROUNDUP(strlen(rpaname) + 1) + sizeof(rpanote); | ||
85 | 110 | ||
86 | n = read(fd, buf, sizeof(buf)); | 111 | n = read(fd, buf, sizeof(buf)); |
87 | if (n < 0) { | 112 | if (n < 0) { |
@@ -104,7 +129,7 @@ main(int ac, char **av) | |||
104 | np = GET_16BE(E_PHNUM); | 129 | np = GET_16BE(E_PHNUM); |
105 | if (ph < E_HSIZE || ps < PH_HSIZE || np < 1) | 130 | if (ph < E_HSIZE || ps < PH_HSIZE || np < 1) |
106 | goto notelf; | 131 | goto notelf; |
107 | if (ph + (np + 1) * ps + nnote > n) | 132 | if (ph + (np + 2) * ps + nnote + nnote2 > n) |
108 | goto nospace; | 133 | goto nospace; |
109 | 134 | ||
110 | for (i = 0; i < np; ++i) { | 135 | for (i = 0; i < np; ++i) { |
@@ -117,12 +142,12 @@ main(int ac, char **av) | |||
117 | } | 142 | } |
118 | 143 | ||
119 | /* XXX check that the area we want to use is all zeroes */ | 144 | /* XXX check that the area we want to use is all zeroes */ |
120 | for (i = 0; i < ps + nnote; ++i) | 145 | for (i = 0; i < 2 * ps + nnote + nnote2; ++i) |
121 | if (buf[ph + i] != 0) | 146 | if (buf[ph + i] != 0) |
122 | goto nospace; | 147 | goto nospace; |
123 | 148 | ||
124 | /* fill in the program header entry */ | 149 | /* fill in the program header entry */ |
125 | ns = ph + ps; | 150 | ns = ph + 2 * ps; |
126 | PUT_32BE(ph + PH_TYPE, PT_NOTE); | 151 | PUT_32BE(ph + PH_TYPE, PT_NOTE); |
127 | PUT_32BE(ph + PH_OFFSET, ns); | 152 | PUT_32BE(ph + PH_OFFSET, ns); |
128 | PUT_32BE(ph + PH_FILESZ, nnote); | 153 | PUT_32BE(ph + PH_FILESZ, nnote); |
@@ -134,11 +159,26 @@ main(int ac, char **av) | |||
134 | PUT_32BE(ns + 8, 0x1275); | 159 | PUT_32BE(ns + 8, 0x1275); |
135 | strcpy(&buf[ns + 12], arch); | 160 | strcpy(&buf[ns + 12], arch); |
136 | ns += 12 + strlen(arch) + 1; | 161 | ns += 12 + strlen(arch) + 1; |
137 | for (i = 0; i < N_DESCR; ++i) | 162 | for (i = 0; i < N_DESCR; ++i, ns += 4) |
138 | PUT_32BE(ns + i * 4, descr[i]); | 163 | PUT_32BE(ns, descr[i]); |
164 | |||
165 | /* fill in the second program header entry and the RPA note area */ | ||
166 | ph += ps; | ||
167 | PUT_32BE(ph + PH_TYPE, PT_NOTE); | ||
168 | PUT_32BE(ph + PH_OFFSET, ns); | ||
169 | PUT_32BE(ph + PH_FILESZ, nnote2); | ||
170 | |||
171 | /* fill in the note area we point to */ | ||
172 | PUT_32BE(ns, strlen(rpaname) + 1); | ||
173 | PUT_32BE(ns + 4, sizeof(rpanote)); | ||
174 | PUT_32BE(ns + 8, 0x12759999); | ||
175 | strcpy(&buf[ns + 12], rpaname); | ||
176 | ns += 12 + ROUNDUP(strlen(rpaname) + 1); | ||
177 | for (i = 0; i < N_RPA_DESCR; ++i, ns += 4) | ||
178 | PUT_32BE(ns, rpanote[i]); | ||
139 | 179 | ||
140 | /* Update the number of program headers */ | 180 | /* Update the number of program headers */ |
141 | PUT_16BE(E_PHNUM, np + 1); | 181 | PUT_16BE(E_PHNUM, np + 2); |
142 | 182 | ||
143 | /* write back */ | 183 | /* write back */ |
144 | lseek(fd, (long) 0, SEEK_SET); | 184 | lseek(fd, (long) 0, SEEK_SET); |
@@ -155,11 +195,11 @@ main(int ac, char **av) | |||
155 | exit(0); | 195 | exit(0); |
156 | 196 | ||
157 | notelf: | 197 | notelf: |
158 | fprintf(stderr, "%s does not appear to be an ELF file\n", av[0]); | 198 | fprintf(stderr, "%s does not appear to be an ELF file\n", av[1]); |
159 | exit(1); | 199 | exit(1); |
160 | 200 | ||
161 | nospace: | 201 | nospace: |
162 | fprintf(stderr, "sorry, I can't find space in %s to put the note\n", | 202 | fprintf(stderr, "sorry, I can't find space in %s to put the note\n", |
163 | av[0]); | 203 | av[1]); |
164 | exit(1); | 204 | exit(1); |
165 | } | 205 | } |
diff --git a/arch/ppc64/boot/main.c b/arch/ppc64/boot/main.c index b0fa86ad8b1b..da12ea2ca464 100644 --- a/arch/ppc64/boot/main.c +++ b/arch/ppc64/boot/main.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | #include <asm/page.h> | 16 | #include <asm/page.h> |
17 | #include <asm/bootinfo.h> | ||
18 | 17 | ||
19 | extern void *finddevice(const char *); | 18 | extern void *finddevice(const char *); |
20 | extern int getprop(void *, const char *, void *, int); | 19 | extern int getprop(void *, const char *, void *, int); |
diff --git a/arch/ppc64/boot/start.c b/arch/ppc64/boot/start.c deleted file mode 100644 index ea247e79b55e..000000000000 --- a/arch/ppc64/boot/start.c +++ /dev/null | |||
@@ -1,654 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) Paul Mackerras 1997. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | #include <stdarg.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/ctype.h> | ||
13 | |||
14 | #include <asm/div64.h> | ||
15 | |||
16 | int (*prom)(void *); | ||
17 | |||
18 | void *chosen_handle; | ||
19 | void *stdin; | ||
20 | void *stdout; | ||
21 | void *stderr; | ||
22 | |||
23 | void exit(void); | ||
24 | void *finddevice(const char *name); | ||
25 | int getprop(void *phandle, const char *name, void *buf, int buflen); | ||
26 | void chrpboot(int a1, int a2, void *prom); /* in main.c */ | ||
27 | |||
28 | void printk(char *fmt, ...); | ||
29 | |||
30 | void | ||
31 | start(int a1, int a2, void *promptr) | ||
32 | { | ||
33 | prom = (int (*)(void *)) promptr; | ||
34 | chosen_handle = finddevice("/chosen"); | ||
35 | if (chosen_handle == (void *) -1) | ||
36 | exit(); | ||
37 | if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4) | ||
38 | exit(); | ||
39 | stderr = stdout; | ||
40 | if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4) | ||
41 | exit(); | ||
42 | |||
43 | chrpboot(a1, a2, promptr); | ||
44 | for (;;) | ||
45 | exit(); | ||
46 | } | ||
47 | |||
48 | int | ||
49 | write(void *handle, void *ptr, int nb) | ||
50 | { | ||
51 | struct prom_args { | ||
52 | char *service; | ||
53 | int nargs; | ||
54 | int nret; | ||
55 | void *ihandle; | ||
56 | void *addr; | ||
57 | int len; | ||
58 | int actual; | ||
59 | } args; | ||
60 | |||
61 | args.service = "write"; | ||
62 | args.nargs = 3; | ||
63 | args.nret = 1; | ||
64 | args.ihandle = handle; | ||
65 | args.addr = ptr; | ||
66 | args.len = nb; | ||
67 | args.actual = -1; | ||
68 | (*prom)(&args); | ||
69 | return args.actual; | ||
70 | } | ||
71 | |||
72 | int | ||
73 | read(void *handle, void *ptr, int nb) | ||
74 | { | ||
75 | struct prom_args { | ||
76 | char *service; | ||
77 | int nargs; | ||
78 | int nret; | ||
79 | void *ihandle; | ||
80 | void *addr; | ||
81 | int len; | ||
82 | int actual; | ||
83 | } args; | ||
84 | |||
85 | args.service = "read"; | ||
86 | args.nargs = 3; | ||
87 | args.nret = 1; | ||
88 | args.ihandle = handle; | ||
89 | args.addr = ptr; | ||
90 | args.len = nb; | ||
91 | args.actual = -1; | ||
92 | (*prom)(&args); | ||
93 | return args.actual; | ||
94 | } | ||
95 | |||
96 | void | ||
97 | exit() | ||
98 | { | ||
99 | struct prom_args { | ||
100 | char *service; | ||
101 | } args; | ||
102 | |||
103 | for (;;) { | ||
104 | args.service = "exit"; | ||
105 | (*prom)(&args); | ||
106 | } | ||
107 | } | ||
108 | |||
109 | void | ||
110 | pause(void) | ||
111 | { | ||
112 | struct prom_args { | ||
113 | char *service; | ||
114 | } args; | ||
115 | |||
116 | args.service = "enter"; | ||
117 | (*prom)(&args); | ||
118 | } | ||
119 | |||
120 | void * | ||
121 | finddevice(const char *name) | ||
122 | { | ||
123 | struct prom_args { | ||
124 | char *service; | ||
125 | int nargs; | ||
126 | int nret; | ||
127 | const char *devspec; | ||
128 | void *phandle; | ||
129 | } args; | ||
130 | |||
131 | args.service = "finddevice"; | ||
132 | args.nargs = 1; | ||
133 | args.nret = 1; | ||
134 | args.devspec = name; | ||
135 | args.phandle = (void *) -1; | ||
136 | (*prom)(&args); | ||
137 | return args.phandle; | ||
138 | } | ||
139 | |||
140 | void * | ||
141 | claim(unsigned long virt, unsigned long size, unsigned long align) | ||
142 | { | ||
143 | struct prom_args { | ||
144 | char *service; | ||
145 | int nargs; | ||
146 | int nret; | ||
147 | unsigned int virt; | ||
148 | unsigned int size; | ||
149 | unsigned int align; | ||
150 | void *ret; | ||
151 | } args; | ||
152 | |||
153 | args.service = "claim"; | ||
154 | args.nargs = 3; | ||
155 | args.nret = 1; | ||
156 | args.virt = virt; | ||
157 | args.size = size; | ||
158 | args.align = align; | ||
159 | (*prom)(&args); | ||
160 | return args.ret; | ||
161 | } | ||
162 | |||
163 | int | ||
164 | getprop(void *phandle, const char *name, void *buf, int buflen) | ||
165 | { | ||
166 | struct prom_args { | ||
167 | char *service; | ||
168 | int nargs; | ||
169 | int nret; | ||
170 | void *phandle; | ||
171 | const char *name; | ||
172 | void *buf; | ||
173 | int buflen; | ||
174 | int size; | ||
175 | } args; | ||
176 | |||
177 | args.service = "getprop"; | ||
178 | args.nargs = 4; | ||
179 | args.nret = 1; | ||
180 | args.phandle = phandle; | ||
181 | args.name = name; | ||
182 | args.buf = buf; | ||
183 | args.buflen = buflen; | ||
184 | args.size = -1; | ||
185 | (*prom)(&args); | ||
186 | return args.size; | ||
187 | } | ||
188 | |||
189 | int | ||
190 | putc(int c, void *f) | ||
191 | { | ||
192 | char ch = c; | ||
193 | |||
194 | if (c == '\n') | ||
195 | putc('\r', f); | ||
196 | return write(f, &ch, 1) == 1? c: -1; | ||
197 | } | ||
198 | |||
199 | int | ||
200 | putchar(int c) | ||
201 | { | ||
202 | return putc(c, stdout); | ||
203 | } | ||
204 | |||
205 | int | ||
206 | fputs(char *str, void *f) | ||
207 | { | ||
208 | int n = strlen(str); | ||
209 | |||
210 | return write(f, str, n) == n? 0: -1; | ||
211 | } | ||
212 | |||
213 | int | ||
214 | readchar(void) | ||
215 | { | ||
216 | char ch; | ||
217 | |||
218 | for (;;) { | ||
219 | switch (read(stdin, &ch, 1)) { | ||
220 | case 1: | ||
221 | return ch; | ||
222 | case -1: | ||
223 | printk("read(stdin) returned -1\r\n"); | ||
224 | return -1; | ||
225 | } | ||
226 | } | ||
227 | } | ||
228 | |||
229 | static char line[256]; | ||
230 | static char *lineptr; | ||
231 | static int lineleft; | ||
232 | |||
233 | int | ||
234 | getchar(void) | ||
235 | { | ||
236 | int c; | ||
237 | |||
238 | if (lineleft == 0) { | ||
239 | lineptr = line; | ||
240 | for (;;) { | ||
241 | c = readchar(); | ||
242 | if (c == -1 || c == 4) | ||
243 | break; | ||
244 | if (c == '\r' || c == '\n') { | ||
245 | *lineptr++ = '\n'; | ||
246 | putchar('\n'); | ||
247 | break; | ||
248 | } | ||
249 | switch (c) { | ||
250 | case 0177: | ||
251 | case '\b': | ||
252 | if (lineptr > line) { | ||
253 | putchar('\b'); | ||
254 | putchar(' '); | ||
255 | putchar('\b'); | ||
256 | --lineptr; | ||
257 | } | ||
258 | break; | ||
259 | case 'U' & 0x1F: | ||
260 | while (lineptr > line) { | ||
261 | putchar('\b'); | ||
262 | putchar(' '); | ||
263 | putchar('\b'); | ||
264 | --lineptr; | ||
265 | } | ||
266 | break; | ||
267 | default: | ||
268 | if (lineptr >= &line[sizeof(line) - 1]) | ||
269 | putchar('\a'); | ||
270 | else { | ||
271 | putchar(c); | ||
272 | *lineptr++ = c; | ||
273 | } | ||
274 | } | ||
275 | } | ||
276 | lineleft = lineptr - line; | ||
277 | lineptr = line; | ||
278 | } | ||
279 | if (lineleft == 0) | ||
280 | return -1; | ||
281 | --lineleft; | ||
282 | return *lineptr++; | ||
283 | } | ||
284 | |||
285 | |||
286 | |||
287 | /* String functions lifted from lib/vsprintf.c and lib/ctype.c */ | ||
288 | unsigned char _ctype[] = { | ||
289 | _C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ | ||
290 | _C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ | ||
291 | _C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ | ||
292 | _C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ | ||
293 | _S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ | ||
294 | _P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ | ||
295 | _D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ | ||
296 | _D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ | ||
297 | _P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ | ||
298 | _U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ | ||
299 | _U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ | ||
300 | _U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ | ||
301 | _P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ | ||
302 | _L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ | ||
303 | _L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ | ||
304 | _L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ | ||
305 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ | ||
306 | 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ | ||
307 | _S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ | ||
308 | _P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ | ||
309 | _U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ | ||
310 | _U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ | ||
311 | _L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ | ||
312 | _L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ | ||
313 | |||
314 | size_t strnlen(const char * s, size_t count) | ||
315 | { | ||
316 | const char *sc; | ||
317 | |||
318 | for (sc = s; count-- && *sc != '\0'; ++sc) | ||
319 | /* nothing */; | ||
320 | return sc - s; | ||
321 | } | ||
322 | |||
323 | unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base) | ||
324 | { | ||
325 | unsigned long result = 0,value; | ||
326 | |||
327 | if (!base) { | ||
328 | base = 10; | ||
329 | if (*cp == '0') { | ||
330 | base = 8; | ||
331 | cp++; | ||
332 | if ((*cp == 'x') && isxdigit(cp[1])) { | ||
333 | cp++; | ||
334 | base = 16; | ||
335 | } | ||
336 | } | ||
337 | } | ||
338 | while (isxdigit(*cp) && | ||
339 | (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) { | ||
340 | result = result*base + value; | ||
341 | cp++; | ||
342 | } | ||
343 | if (endp) | ||
344 | *endp = (char *)cp; | ||
345 | return result; | ||
346 | } | ||
347 | |||
348 | long simple_strtol(const char *cp,char **endp,unsigned int base) | ||
349 | { | ||
350 | if(*cp=='-') | ||
351 | return -simple_strtoul(cp+1,endp,base); | ||
352 | return simple_strtoul(cp,endp,base); | ||
353 | } | ||
354 | |||
355 | static int skip_atoi(const char **s) | ||
356 | { | ||
357 | int i=0; | ||
358 | |||
359 | while (isdigit(**s)) | ||
360 | i = i*10 + *((*s)++) - '0'; | ||
361 | return i; | ||
362 | } | ||
363 | |||
364 | #define ZEROPAD 1 /* pad with zero */ | ||
365 | #define SIGN 2 /* unsigned/signed long */ | ||
366 | #define PLUS 4 /* show plus */ | ||
367 | #define SPACE 8 /* space if plus */ | ||
368 | #define LEFT 16 /* left justified */ | ||
369 | #define SPECIAL 32 /* 0x */ | ||
370 | #define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ | ||
371 | |||
372 | static char * number(char * str, long long num, int base, int size, int precision, int type) | ||
373 | { | ||
374 | char c,sign,tmp[66]; | ||
375 | const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; | ||
376 | int i; | ||
377 | |||
378 | if (type & LARGE) | ||
379 | digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; | ||
380 | if (type & LEFT) | ||
381 | type &= ~ZEROPAD; | ||
382 | if (base < 2 || base > 36) | ||
383 | return 0; | ||
384 | c = (type & ZEROPAD) ? '0' : ' '; | ||
385 | sign = 0; | ||
386 | if (type & SIGN) { | ||
387 | if (num < 0) { | ||
388 | sign = '-'; | ||
389 | num = -num; | ||
390 | size--; | ||
391 | } else if (type & PLUS) { | ||
392 | sign = '+'; | ||
393 | size--; | ||
394 | } else if (type & SPACE) { | ||
395 | sign = ' '; | ||
396 | size--; | ||
397 | } | ||
398 | } | ||
399 | if (type & SPECIAL) { | ||
400 | if (base == 16) | ||
401 | size -= 2; | ||
402 | else if (base == 8) | ||
403 | size--; | ||
404 | } | ||
405 | i = 0; | ||
406 | if (num == 0) | ||
407 | tmp[i++]='0'; | ||
408 | else while (num != 0) | ||
409 | tmp[i++] = digits[do_div(num,base)]; | ||
410 | if (i > precision) | ||
411 | precision = i; | ||
412 | size -= precision; | ||
413 | if (!(type&(ZEROPAD+LEFT))) | ||
414 | while(size-->0) | ||
415 | *str++ = ' '; | ||
416 | if (sign) | ||
417 | *str++ = sign; | ||
418 | if (type & SPECIAL) { | ||
419 | if (base==8) | ||
420 | *str++ = '0'; | ||
421 | else if (base==16) { | ||
422 | *str++ = '0'; | ||
423 | *str++ = digits[33]; | ||
424 | } | ||
425 | } | ||
426 | if (!(type & LEFT)) | ||
427 | while (size-- > 0) | ||
428 | *str++ = c; | ||
429 | while (i < precision--) | ||
430 | *str++ = '0'; | ||
431 | while (i-- > 0) | ||
432 | *str++ = tmp[i]; | ||
433 | while (size-- > 0) | ||
434 | *str++ = ' '; | ||
435 | return str; | ||
436 | } | ||
437 | |||
438 | /* Forward decl. needed for IP address printing stuff... */ | ||
439 | int sprintf(char * buf, const char *fmt, ...); | ||
440 | |||
441 | int vsprintf(char *buf, const char *fmt, va_list args) | ||
442 | { | ||
443 | int len; | ||
444 | unsigned long long num; | ||
445 | int i, base; | ||
446 | char * str; | ||
447 | const char *s; | ||
448 | |||
449 | int flags; /* flags to number() */ | ||
450 | |||
451 | int field_width; /* width of output field */ | ||
452 | int precision; /* min. # of digits for integers; max | ||
453 | number of chars for from string */ | ||
454 | int qualifier; /* 'h', 'l', or 'L' for integer fields */ | ||
455 | /* 'z' support added 23/7/1999 S.H. */ | ||
456 | /* 'z' changed to 'Z' --davidm 1/25/99 */ | ||
457 | |||
458 | |||
459 | for (str=buf ; *fmt ; ++fmt) { | ||
460 | if (*fmt != '%') { | ||
461 | *str++ = *fmt; | ||
462 | continue; | ||
463 | } | ||
464 | |||
465 | /* process flags */ | ||
466 | flags = 0; | ||
467 | repeat: | ||
468 | ++fmt; /* this also skips first '%' */ | ||
469 | switch (*fmt) { | ||
470 | case '-': flags |= LEFT; goto repeat; | ||
471 | case '+': flags |= PLUS; goto repeat; | ||
472 | case ' ': flags |= SPACE; goto repeat; | ||
473 | case '#': flags |= SPECIAL; goto repeat; | ||
474 | case '0': flags |= ZEROPAD; goto repeat; | ||
475 | } | ||
476 | |||
477 | /* get field width */ | ||
478 | field_width = -1; | ||
479 | if (isdigit(*fmt)) | ||
480 | field_width = skip_atoi(&fmt); | ||
481 | else if (*fmt == '*') { | ||
482 | ++fmt; | ||
483 | /* it's the next argument */ | ||
484 | field_width = va_arg(args, int); | ||
485 | if (field_width < 0) { | ||
486 | field_width = -field_width; | ||
487 | flags |= LEFT; | ||
488 | } | ||
489 | } | ||
490 | |||
491 | /* get the precision */ | ||
492 | precision = -1; | ||
493 | if (*fmt == '.') { | ||
494 | ++fmt; | ||
495 | if (isdigit(*fmt)) | ||
496 | precision = skip_atoi(&fmt); | ||
497 | else if (*fmt == '*') { | ||
498 | ++fmt; | ||
499 | /* it's the next argument */ | ||
500 | precision = va_arg(args, int); | ||
501 | } | ||
502 | if (precision < 0) | ||
503 | precision = 0; | ||
504 | } | ||
505 | |||
506 | /* get the conversion qualifier */ | ||
507 | qualifier = -1; | ||
508 | if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { | ||
509 | qualifier = *fmt; | ||
510 | ++fmt; | ||
511 | } | ||
512 | |||
513 | /* default base */ | ||
514 | base = 10; | ||
515 | |||
516 | switch (*fmt) { | ||
517 | case 'c': | ||
518 | if (!(flags & LEFT)) | ||
519 | while (--field_width > 0) | ||
520 | *str++ = ' '; | ||
521 | *str++ = (unsigned char) va_arg(args, int); | ||
522 | while (--field_width > 0) | ||
523 | *str++ = ' '; | ||
524 | continue; | ||
525 | |||
526 | case 's': | ||
527 | s = va_arg(args, char *); | ||
528 | if (!s) | ||
529 | s = "<NULL>"; | ||
530 | |||
531 | len = strnlen(s, precision); | ||
532 | |||
533 | if (!(flags & LEFT)) | ||
534 | while (len < field_width--) | ||
535 | *str++ = ' '; | ||
536 | for (i = 0; i < len; ++i) | ||
537 | *str++ = *s++; | ||
538 | while (len < field_width--) | ||
539 | *str++ = ' '; | ||
540 | continue; | ||
541 | |||
542 | case 'p': | ||
543 | if (field_width == -1) { | ||
544 | field_width = 2*sizeof(void *); | ||
545 | flags |= ZEROPAD; | ||
546 | } | ||
547 | str = number(str, | ||
548 | (unsigned long) va_arg(args, void *), 16, | ||
549 | field_width, precision, flags); | ||
550 | continue; | ||
551 | |||
552 | |||
553 | case 'n': | ||
554 | if (qualifier == 'l') { | ||
555 | long * ip = va_arg(args, long *); | ||
556 | *ip = (str - buf); | ||
557 | } else if (qualifier == 'Z') { | ||
558 | size_t * ip = va_arg(args, size_t *); | ||
559 | *ip = (str - buf); | ||
560 | } else { | ||
561 | int * ip = va_arg(args, int *); | ||
562 | *ip = (str - buf); | ||
563 | } | ||
564 | continue; | ||
565 | |||
566 | case '%': | ||
567 | *str++ = '%'; | ||
568 | continue; | ||
569 | |||
570 | /* integer number formats - set up the flags and "break" */ | ||
571 | case 'o': | ||
572 | base = 8; | ||
573 | break; | ||
574 | |||
575 | case 'X': | ||
576 | flags |= LARGE; | ||
577 | case 'x': | ||
578 | base = 16; | ||
579 | break; | ||
580 | |||
581 | case 'd': | ||
582 | case 'i': | ||
583 | flags |= SIGN; | ||
584 | case 'u': | ||
585 | break; | ||
586 | |||
587 | default: | ||
588 | *str++ = '%'; | ||
589 | if (*fmt) | ||
590 | *str++ = *fmt; | ||
591 | else | ||
592 | --fmt; | ||
593 | continue; | ||
594 | } | ||
595 | if (qualifier == 'L') | ||
596 | num = va_arg(args, long long); | ||
597 | else if (qualifier == 'l') { | ||
598 | num = va_arg(args, unsigned long); | ||
599 | if (flags & SIGN) | ||
600 | num = (signed long) num; | ||
601 | } else if (qualifier == 'Z') { | ||
602 | num = va_arg(args, size_t); | ||
603 | } else if (qualifier == 'h') { | ||
604 | num = (unsigned short) va_arg(args, int); | ||
605 | if (flags & SIGN) | ||
606 | num = (signed short) num; | ||
607 | } else { | ||
608 | num = va_arg(args, unsigned int); | ||
609 | if (flags & SIGN) | ||
610 | num = (signed int) num; | ||
611 | } | ||
612 | str = number(str, num, base, field_width, precision, flags); | ||
613 | } | ||
614 | *str = '\0'; | ||
615 | return str-buf; | ||
616 | } | ||
617 | |||
618 | int sprintf(char * buf, const char *fmt, ...) | ||
619 | { | ||
620 | va_list args; | ||
621 | int i; | ||
622 | |||
623 | va_start(args, fmt); | ||
624 | i=vsprintf(buf,fmt,args); | ||
625 | va_end(args); | ||
626 | return i; | ||
627 | } | ||
628 | |||
629 | static char sprint_buf[1024]; | ||
630 | |||
631 | void | ||
632 | printk(char *fmt, ...) | ||
633 | { | ||
634 | va_list args; | ||
635 | int n; | ||
636 | |||
637 | va_start(args, fmt); | ||
638 | n = vsprintf(sprint_buf, fmt, args); | ||
639 | va_end(args); | ||
640 | write(stdout, sprint_buf, n); | ||
641 | } | ||
642 | |||
643 | int | ||
644 | printf(char *fmt, ...) | ||
645 | { | ||
646 | va_list args; | ||
647 | int n; | ||
648 | |||
649 | va_start(args, fmt); | ||
650 | n = vsprintf(sprint_buf, fmt, args); | ||
651 | va_end(args); | ||
652 | write(stdout, sprint_buf, n); | ||
653 | return n; | ||
654 | } | ||
diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c index 9802beefa217..f8f19637f73f 100644 --- a/arch/ppc64/kernel/HvLpEvent.c +++ b/arch/ppc64/kernel/HvLpEvent.c | |||
@@ -45,7 +45,7 @@ int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType ) | |||
45 | /* We now sleep until all other CPUs have scheduled. This ensures that | 45 | /* We now sleep until all other CPUs have scheduled. This ensures that |
46 | * the deletion is seen by all other CPUs, and that the deleted handler | 46 | * the deletion is seen by all other CPUs, and that the deleted handler |
47 | * isn't still running on another CPU when we return. */ | 47 | * isn't still running on another CPU when we return. */ |
48 | synchronize_kernel(); | 48 | synchronize_rcu(); |
49 | } | 49 | } |
50 | } | 50 | } |
51 | return rc; | 51 | return rc; |
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S index 90b41f48d21c..b944717c1dbd 100644 --- a/arch/ppc64/kernel/misc.S +++ b/arch/ppc64/kernel/misc.S | |||
@@ -32,7 +32,7 @@ | |||
32 | .text | 32 | .text |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * Returns (address we're running at) - (address we were linked at) | 35 | * Returns (address we were linked at) - (address we are running at) |
36 | * for use before the text and data are mapped to KERNELBASE. | 36 | * for use before the text and data are mapped to KERNELBASE. |
37 | */ | 37 | */ |
38 | 38 | ||
diff --git a/arch/ppc64/kernel/nvram.c b/arch/ppc64/kernel/nvram.c index b9069c2d1933..4e71781a4414 100644 --- a/arch/ppc64/kernel/nvram.c +++ b/arch/ppc64/kernel/nvram.c | |||
@@ -339,9 +339,9 @@ static int nvram_remove_os_partition(void) | |||
339 | static int nvram_create_os_partition(void) | 339 | static int nvram_create_os_partition(void) |
340 | { | 340 | { |
341 | struct list_head * p; | 341 | struct list_head * p; |
342 | struct nvram_partition * part; | 342 | struct nvram_partition *part = NULL; |
343 | struct nvram_partition * new_part = NULL; | 343 | struct nvram_partition *new_part = NULL; |
344 | struct nvram_partition * free_part = NULL; | 344 | struct nvram_partition *free_part = NULL; |
345 | int seq_init[2] = { 0, 0 }; | 345 | int seq_init[2] = { 0, 0 }; |
346 | loff_t tmp_index; | 346 | loff_t tmp_index; |
347 | long size = 0; | 347 | long size = 0; |
@@ -364,13 +364,11 @@ static int nvram_create_os_partition(void) | |||
364 | free_part = part; | 364 | free_part = part; |
365 | } | 365 | } |
366 | } | 366 | } |
367 | if (!size) { | 367 | if (!size) |
368 | return -ENOSPC; | 368 | return -ENOSPC; |
369 | } | ||
370 | 369 | ||
371 | /* Create our OS partition */ | 370 | /* Create our OS partition */ |
372 | new_part = (struct nvram_partition *) | 371 | new_part = kmalloc(sizeof(*new_part), GFP_KERNEL); |
373 | kmalloc(sizeof(struct nvram_partition), GFP_KERNEL); | ||
374 | if (!new_part) { | 372 | if (!new_part) { |
375 | printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n"); | 373 | printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n"); |
376 | return -ENOMEM; | 374 | return -ENOMEM; |
@@ -379,7 +377,7 @@ static int nvram_create_os_partition(void) | |||
379 | new_part->index = free_part->index; | 377 | new_part->index = free_part->index; |
380 | new_part->header.signature = NVRAM_SIG_OS; | 378 | new_part->header.signature = NVRAM_SIG_OS; |
381 | new_part->header.length = size; | 379 | new_part->header.length = size; |
382 | sprintf(new_part->header.name, "ppc64,linux"); | 380 | strcpy(new_part->header.name, "ppc64,linux"); |
383 | new_part->header.checksum = nvram_checksum(&new_part->header); | 381 | new_part->header.checksum = nvram_checksum(&new_part->header); |
384 | 382 | ||
385 | rc = nvram_write_header(new_part); | 383 | rc = nvram_write_header(new_part); |
@@ -394,7 +392,8 @@ static int nvram_create_os_partition(void) | |||
394 | tmp_index = new_part->index + NVRAM_HEADER_LEN; | 392 | tmp_index = new_part->index + NVRAM_HEADER_LEN; |
395 | rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index); | 393 | rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index); |
396 | if (rc <= 0) { | 394 | if (rc <= 0) { |
397 | printk(KERN_ERR "nvram_create_os_partition: nvram_write failed (%d)\n", rc); | 395 | printk(KERN_ERR "nvram_create_os_partition: nvram_write " |
396 | "failed (%d)\n", rc); | ||
398 | return rc; | 397 | return rc; |
399 | } | 398 | } |
400 | 399 | ||
diff --git a/arch/ppc64/kernel/pSeries_hvCall.S b/arch/ppc64/kernel/pSeries_hvCall.S index 0715d3038019..176e8da76466 100644 --- a/arch/ppc64/kernel/pSeries_hvCall.S +++ b/arch/ppc64/kernel/pSeries_hvCall.S | |||
@@ -28,6 +28,8 @@ | |||
28 | unsigned long *out3); R10 | 28 | unsigned long *out3); R10 |
29 | */ | 29 | */ |
30 | _GLOBAL(plpar_hcall) | 30 | _GLOBAL(plpar_hcall) |
31 | HMT_MEDIUM | ||
32 | |||
31 | mfcr r0 | 33 | mfcr r0 |
32 | 34 | ||
33 | std r8,STK_PARM(r8)(r1) /* Save out ptrs */ | 35 | std r8,STK_PARM(r8)(r1) /* Save out ptrs */ |
@@ -53,6 +55,8 @@ _GLOBAL(plpar_hcall) | |||
53 | 55 | ||
54 | /* Simple interface with no output values (other than status) */ | 56 | /* Simple interface with no output values (other than status) */ |
55 | _GLOBAL(plpar_hcall_norets) | 57 | _GLOBAL(plpar_hcall_norets) |
58 | HMT_MEDIUM | ||
59 | |||
56 | mfcr r0 | 60 | mfcr r0 |
57 | stw r0,8(r1) | 61 | stw r0,8(r1) |
58 | 62 | ||
@@ -75,6 +79,8 @@ _GLOBAL(plpar_hcall_norets) | |||
75 | unsigned long *out1); 120(R1) | 79 | unsigned long *out1); 120(R1) |
76 | */ | 80 | */ |
77 | _GLOBAL(plpar_hcall_8arg_2ret) | 81 | _GLOBAL(plpar_hcall_8arg_2ret) |
82 | HMT_MEDIUM | ||
83 | |||
78 | mfcr r0 | 84 | mfcr r0 |
79 | ld r11,STK_PARM(r11)(r1) /* put arg8 in R11 */ | 85 | ld r11,STK_PARM(r11)(r1) /* put arg8 in R11 */ |
80 | stw r0,8(r1) | 86 | stw r0,8(r1) |
@@ -99,6 +105,8 @@ _GLOBAL(plpar_hcall_8arg_2ret) | |||
99 | unsigned long *out4); 112(R1) | 105 | unsigned long *out4); 112(R1) |
100 | */ | 106 | */ |
101 | _GLOBAL(plpar_hcall_4out) | 107 | _GLOBAL(plpar_hcall_4out) |
108 | HMT_MEDIUM | ||
109 | |||
102 | mfcr r0 | 110 | mfcr r0 |
103 | stw r0,8(r1) | 111 | stw r0,8(r1) |
104 | 112 | ||
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/ppc64/kernel/pSeries_smp.c index c60d8cb2b84d..fbad349ec58c 100644 --- a/arch/ppc64/kernel/pSeries_smp.c +++ b/arch/ppc64/kernel/pSeries_smp.c | |||
@@ -326,13 +326,6 @@ static void __devinit smp_xics_setup_cpu(int cpu) | |||
326 | 326 | ||
327 | cpu_clear(cpu, of_spin_map); | 327 | cpu_clear(cpu, of_spin_map); |
328 | 328 | ||
329 | /* | ||
330 | * Put the calling processor into the GIQ. This is really only | ||
331 | * necessary from a secondary thread as the OF start-cpu interface | ||
332 | * performs this function for us on primary threads. | ||
333 | */ | ||
334 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | ||
335 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); | ||
336 | } | 329 | } |
337 | 330 | ||
338 | static DEFINE_SPINLOCK(timebase_lock); | 331 | static DEFINE_SPINLOCK(timebase_lock); |
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c index be3cc387c1ec..d786d4b6af0b 100644 --- a/arch/ppc64/kernel/pci.c +++ b/arch/ppc64/kernel/pci.c | |||
@@ -438,7 +438,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, | |||
438 | int i; | 438 | int i; |
439 | 439 | ||
440 | if (page_is_ram(offset >> PAGE_SHIFT)) | 440 | if (page_is_ram(offset >> PAGE_SHIFT)) |
441 | return prot; | 441 | return __pgprot(prot); |
442 | 442 | ||
443 | prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; | 443 | prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; |
444 | 444 | ||
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c index 45a4ad08fbc2..eb6538b58008 100644 --- a/arch/ppc64/kernel/prom.c +++ b/arch/ppc64/kernel/prom.c | |||
@@ -321,6 +321,10 @@ static int __devinit finish_node_interrupts(struct device_node *np, | |||
321 | char *name = get_property(ic->parent, "name", NULL); | 321 | char *name = get_property(ic->parent, "name", NULL); |
322 | if (name && !strcmp(name, "u3")) | 322 | if (name && !strcmp(name, "u3")) |
323 | np->intrs[intrcount].line += 128; | 323 | np->intrs[intrcount].line += 128; |
324 | else if (!(name && !strcmp(name, "mac-io"))) | ||
325 | /* ignore other cascaded controllers, such as | ||
326 | the k2-sata-root */ | ||
327 | break; | ||
324 | } | 328 | } |
325 | np->intrs[intrcount].sense = 1; | 329 | np->intrs[intrcount].sense = 1; |
326 | if (n > 1) | 330 | if (n > 1) |
@@ -830,7 +834,7 @@ void __init unflatten_device_tree(void) | |||
830 | { | 834 | { |
831 | unsigned long start, mem, size; | 835 | unsigned long start, mem, size; |
832 | struct device_node **allnextp = &allnodes; | 836 | struct device_node **allnextp = &allnodes; |
833 | char *p; | 837 | char *p = NULL; |
834 | int l = 0; | 838 | int l = 0; |
835 | 839 | ||
836 | DBG(" -> unflatten_device_tree()\n"); | 840 | DBG(" -> unflatten_device_tree()\n"); |
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c index 8dffa9ae2623..35ec42de962e 100644 --- a/arch/ppc64/kernel/prom_init.c +++ b/arch/ppc64/kernel/prom_init.c | |||
@@ -493,6 +493,113 @@ static void __init early_cmdline_parse(void) | |||
493 | } | 493 | } |
494 | 494 | ||
495 | /* | 495 | /* |
496 | * To tell the firmware what our capabilities are, we have to pass | ||
497 | * it a fake 32-bit ELF header containing a couple of PT_NOTE sections | ||
498 | * that contain structures that contain the actual values. | ||
499 | */ | ||
500 | static struct fake_elf { | ||
501 | Elf32_Ehdr elfhdr; | ||
502 | Elf32_Phdr phdr[2]; | ||
503 | struct chrpnote { | ||
504 | u32 namesz; | ||
505 | u32 descsz; | ||
506 | u32 type; | ||
507 | char name[8]; /* "PowerPC" */ | ||
508 | struct chrpdesc { | ||
509 | u32 real_mode; | ||
510 | u32 real_base; | ||
511 | u32 real_size; | ||
512 | u32 virt_base; | ||
513 | u32 virt_size; | ||
514 | u32 load_base; | ||
515 | } chrpdesc; | ||
516 | } chrpnote; | ||
517 | struct rpanote { | ||
518 | u32 namesz; | ||
519 | u32 descsz; | ||
520 | u32 type; | ||
521 | char name[24]; /* "IBM,RPA-Client-Config" */ | ||
522 | struct rpadesc { | ||
523 | u32 lpar_affinity; | ||
524 | u32 min_rmo_size; | ||
525 | u32 min_rmo_percent; | ||
526 | u32 max_pft_size; | ||
527 | u32 splpar; | ||
528 | u32 min_load; | ||
529 | u32 new_mem_def; | ||
530 | u32 ignore_me; | ||
531 | } rpadesc; | ||
532 | } rpanote; | ||
533 | } fake_elf = { | ||
534 | .elfhdr = { | ||
535 | .e_ident = { 0x7f, 'E', 'L', 'F', | ||
536 | ELFCLASS32, ELFDATA2MSB, EV_CURRENT }, | ||
537 | .e_type = ET_EXEC, /* yeah right */ | ||
538 | .e_machine = EM_PPC, | ||
539 | .e_version = EV_CURRENT, | ||
540 | .e_phoff = offsetof(struct fake_elf, phdr), | ||
541 | .e_phentsize = sizeof(Elf32_Phdr), | ||
542 | .e_phnum = 2 | ||
543 | }, | ||
544 | .phdr = { | ||
545 | [0] = { | ||
546 | .p_type = PT_NOTE, | ||
547 | .p_offset = offsetof(struct fake_elf, chrpnote), | ||
548 | .p_filesz = sizeof(struct chrpnote) | ||
549 | }, [1] = { | ||
550 | .p_type = PT_NOTE, | ||
551 | .p_offset = offsetof(struct fake_elf, rpanote), | ||
552 | .p_filesz = sizeof(struct rpanote) | ||
553 | } | ||
554 | }, | ||
555 | .chrpnote = { | ||
556 | .namesz = sizeof("PowerPC"), | ||
557 | .descsz = sizeof(struct chrpdesc), | ||
558 | .type = 0x1275, | ||
559 | .name = "PowerPC", | ||
560 | .chrpdesc = { | ||
561 | .real_mode = ~0U, /* ~0 means "don't care" */ | ||
562 | .real_base = ~0U, | ||
563 | .real_size = ~0U, | ||
564 | .virt_base = ~0U, | ||
565 | .virt_size = ~0U, | ||
566 | .load_base = ~0U | ||
567 | }, | ||
568 | }, | ||
569 | .rpanote = { | ||
570 | .namesz = sizeof("IBM,RPA-Client-Config"), | ||
571 | .descsz = sizeof(struct rpadesc), | ||
572 | .type = 0x12759999, | ||
573 | .name = "IBM,RPA-Client-Config", | ||
574 | .rpadesc = { | ||
575 | .lpar_affinity = 0, | ||
576 | .min_rmo_size = 64, /* in megabytes */ | ||
577 | .min_rmo_percent = 0, | ||
578 | .max_pft_size = 48, /* 2^48 bytes max PFT size */ | ||
579 | .splpar = 1, | ||
580 | .min_load = ~0U, | ||
581 | .new_mem_def = 0 | ||
582 | } | ||
583 | } | ||
584 | }; | ||
585 | |||
586 | static void __init prom_send_capabilities(void) | ||
587 | { | ||
588 | unsigned long offset = reloc_offset(); | ||
589 | ihandle elfloader; | ||
590 | int ret; | ||
591 | |||
592 | elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader")); | ||
593 | if (elfloader == 0) { | ||
594 | prom_printf("couldn't open /packages/elf-loader\n"); | ||
595 | return; | ||
596 | } | ||
597 | ret = call_prom("call-method", 3, 1, ADDR("process-elf-header"), | ||
598 | elfloader, ADDR(&fake_elf)); | ||
599 | call_prom("close", 1, 0, elfloader); | ||
600 | } | ||
601 | |||
602 | /* | ||
496 | * Memory allocation strategy... our layout is normally: | 603 | * Memory allocation strategy... our layout is normally: |
497 | * | 604 | * |
498 | * at 14Mb or more we vmlinux, then a gap and initrd. In some rare cases, initrd | 605 | * at 14Mb or more we vmlinux, then a gap and initrd. In some rare cases, initrd |
@@ -1448,6 +1555,12 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start, | |||
1448 | } | 1555 | } |
1449 | } | 1556 | } |
1450 | 1557 | ||
1558 | /* | ||
1559 | * The Open Firmware 1275 specification states properties must be 31 bytes or | ||
1560 | * less, however not all firmwares obey this. Make it 64 bytes to be safe. | ||
1561 | */ | ||
1562 | #define MAX_PROPERTY_NAME 64 | ||
1563 | |||
1451 | static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, | 1564 | static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, |
1452 | unsigned long *mem_end) | 1565 | unsigned long *mem_end) |
1453 | { | 1566 | { |
@@ -1457,7 +1570,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, | |||
1457 | unsigned long soff; | 1570 | unsigned long soff; |
1458 | unsigned char *valp; | 1571 | unsigned char *valp; |
1459 | unsigned long offset = reloc_offset(); | 1572 | unsigned long offset = reloc_offset(); |
1460 | char pname[32]; | 1573 | char pname[MAX_PROPERTY_NAME]; |
1461 | char *path; | 1574 | char *path; |
1462 | 1575 | ||
1463 | path = RELOC(prom_scratch); | 1576 | path = RELOC(prom_scratch); |
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c index 354a287c67eb..9f8c6087ae56 100644 --- a/arch/ppc64/kernel/ptrace.c +++ b/arch/ppc64/kernel/ptrace.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/security.h> | 28 | #include <linux/security.h> |
29 | #include <linux/audit.h> | 29 | #include <linux/audit.h> |
30 | #include <linux/seccomp.h> | 30 | #include <linux/seccomp.h> |
31 | #include <linux/signal.h> | ||
31 | 32 | ||
32 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
33 | #include <asm/page.h> | 34 | #include <asm/page.h> |
@@ -162,7 +163,7 @@ int sys_ptrace(long request, long pid, long addr, long data) | |||
162 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 163 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
163 | case PTRACE_CONT: { /* restart after signal. */ | 164 | case PTRACE_CONT: { /* restart after signal. */ |
164 | ret = -EIO; | 165 | ret = -EIO; |
165 | if ((unsigned long) data > _NSIG) | 166 | if (!valid_signal(data)) |
166 | break; | 167 | break; |
167 | if (request == PTRACE_SYSCALL) | 168 | if (request == PTRACE_SYSCALL) |
168 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 169 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -194,7 +195,7 @@ int sys_ptrace(long request, long pid, long addr, long data) | |||
194 | 195 | ||
195 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ | 196 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ |
196 | ret = -EIO; | 197 | ret = -EIO; |
197 | if ((unsigned long) data > _NSIG) | 198 | if (!valid_signal(data)) |
198 | break; | 199 | break; |
199 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 200 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
200 | set_single_step(child); | 201 | set_single_step(child); |
@@ -304,14 +305,17 @@ static void do_syscall_trace(void) | |||
304 | 305 | ||
305 | void do_syscall_trace_enter(struct pt_regs *regs) | 306 | void do_syscall_trace_enter(struct pt_regs *regs) |
306 | { | 307 | { |
308 | if (test_thread_flag(TIF_SYSCALL_TRACE) | ||
309 | && (current->ptrace & PT_PTRACED)) | ||
310 | do_syscall_trace(); | ||
311 | |||
307 | if (unlikely(current->audit_context)) | 312 | if (unlikely(current->audit_context)) |
308 | audit_syscall_entry(current, regs->gpr[0], | 313 | audit_syscall_entry(current, |
314 | test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64, | ||
315 | regs->gpr[0], | ||
309 | regs->gpr[3], regs->gpr[4], | 316 | regs->gpr[3], regs->gpr[4], |
310 | regs->gpr[5], regs->gpr[6]); | 317 | regs->gpr[5], regs->gpr[6]); |
311 | 318 | ||
312 | if (test_thread_flag(TIF_SYSCALL_TRACE) | ||
313 | && (current->ptrace & PT_PTRACED)) | ||
314 | do_syscall_trace(); | ||
315 | } | 319 | } |
316 | 320 | ||
317 | void do_syscall_trace_leave(struct pt_regs *regs) | 321 | void do_syscall_trace_leave(struct pt_regs *regs) |
@@ -319,7 +323,9 @@ void do_syscall_trace_leave(struct pt_regs *regs) | |||
319 | secure_computing(regs->gpr[0]); | 323 | secure_computing(regs->gpr[0]); |
320 | 324 | ||
321 | if (unlikely(current->audit_context)) | 325 | if (unlikely(current->audit_context)) |
322 | audit_syscall_exit(current, regs->result); | 326 | audit_syscall_exit(current, |
327 | (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, | ||
328 | regs->result); | ||
323 | 329 | ||
324 | if ((test_thread_flag(TIF_SYSCALL_TRACE) | 330 | if ((test_thread_flag(TIF_SYSCALL_TRACE) |
325 | || test_thread_flag(TIF_SINGLESTEP)) | 331 | || test_thread_flag(TIF_SINGLESTEP)) |
diff --git a/arch/ppc64/kernel/ptrace32.c b/arch/ppc64/kernel/ptrace32.c index ee81b1b776cc..16436426c7e2 100644 --- a/arch/ppc64/kernel/ptrace32.c +++ b/arch/ppc64/kernel/ptrace32.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/ptrace.h> | 26 | #include <linux/ptrace.h> |
27 | #include <linux/user.h> | 27 | #include <linux/user.h> |
28 | #include <linux/security.h> | 28 | #include <linux/security.h> |
29 | #include <linux/signal.h> | ||
29 | 30 | ||
30 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
31 | #include <asm/page.h> | 32 | #include <asm/page.h> |
@@ -293,7 +294,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) | |||
293 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 294 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
294 | case PTRACE_CONT: { /* restart after signal. */ | 295 | case PTRACE_CONT: { /* restart after signal. */ |
295 | ret = -EIO; | 296 | ret = -EIO; |
296 | if ((unsigned long) data > _NSIG) | 297 | if (!valid_signal(data)) |
297 | break; | 298 | break; |
298 | if (request == PTRACE_SYSCALL) | 299 | if (request == PTRACE_SYSCALL) |
299 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 300 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -325,7 +326,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) | |||
325 | 326 | ||
326 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ | 327 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ |
327 | ret = -EIO; | 328 | ret = -EIO; |
328 | if ((unsigned long) data > _NSIG) | 329 | if (!valid_signal(data)) |
329 | break; | 330 | break; |
330 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 331 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
331 | set_single_step(child); | 332 | set_single_step(child); |
diff --git a/arch/ppc64/kernel/rtas_flash.c b/arch/ppc64/kernel/rtas_flash.c index 3213837282ca..923e2e201a70 100644 --- a/arch/ppc64/kernel/rtas_flash.c +++ b/arch/ppc64/kernel/rtas_flash.c | |||
@@ -218,7 +218,7 @@ static void get_flash_status_msg(int status, char *buf) | |||
218 | } | 218 | } |
219 | 219 | ||
220 | /* Reading the proc file will show status (not the firmware contents) */ | 220 | /* Reading the proc file will show status (not the firmware contents) */ |
221 | static ssize_t rtas_flash_read(struct file *file, char *buf, | 221 | static ssize_t rtas_flash_read(struct file *file, char __user *buf, |
222 | size_t count, loff_t *ppos) | 222 | size_t count, loff_t *ppos) |
223 | { | 223 | { |
224 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | 224 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); |
@@ -256,7 +256,7 @@ static ssize_t rtas_flash_read(struct file *file, char *buf, | |||
256 | * count is. If the system is low on memory it will be just as well | 256 | * count is. If the system is low on memory it will be just as well |
257 | * that we fail.... | 257 | * that we fail.... |
258 | */ | 258 | */ |
259 | static ssize_t rtas_flash_write(struct file *file, const char *buffer, | 259 | static ssize_t rtas_flash_write(struct file *file, const char __user *buffer, |
260 | size_t count, loff_t *off) | 260 | size_t count, loff_t *off) |
261 | { | 261 | { |
262 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | 262 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); |
@@ -356,7 +356,7 @@ static void manage_flash(struct rtas_manage_flash_t *args_buf) | |||
356 | args_buf->status = rc; | 356 | args_buf->status = rc; |
357 | } | 357 | } |
358 | 358 | ||
359 | static ssize_t manage_flash_read(struct file *file, char *buf, | 359 | static ssize_t manage_flash_read(struct file *file, char __user *buf, |
360 | size_t count, loff_t *ppos) | 360 | size_t count, loff_t *ppos) |
361 | { | 361 | { |
362 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | 362 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); |
@@ -386,7 +386,7 @@ static ssize_t manage_flash_read(struct file *file, char *buf, | |||
386 | return msglen; | 386 | return msglen; |
387 | } | 387 | } |
388 | 388 | ||
389 | static ssize_t manage_flash_write(struct file *file, const char *buf, | 389 | static ssize_t manage_flash_write(struct file *file, const char __user *buf, |
390 | size_t count, loff_t *off) | 390 | size_t count, loff_t *off) |
391 | { | 391 | { |
392 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | 392 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); |
@@ -466,7 +466,7 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, | |||
466 | return n; | 466 | return n; |
467 | } | 467 | } |
468 | 468 | ||
469 | static ssize_t validate_flash_read(struct file *file, char *buf, | 469 | static ssize_t validate_flash_read(struct file *file, char __user *buf, |
470 | size_t count, loff_t *ppos) | 470 | size_t count, loff_t *ppos) |
471 | { | 471 | { |
472 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | 472 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); |
@@ -494,7 +494,7 @@ static ssize_t validate_flash_read(struct file *file, char *buf, | |||
494 | return msglen; | 494 | return msglen; |
495 | } | 495 | } |
496 | 496 | ||
497 | static ssize_t validate_flash_write(struct file *file, const char *buf, | 497 | static ssize_t validate_flash_write(struct file *file, const char __user *buf, |
498 | size_t count, loff_t *off) | 498 | size_t count, loff_t *off) |
499 | { | 499 | { |
500 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | 500 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); |
diff --git a/arch/ppc64/kernel/scanlog.c b/arch/ppc64/kernel/scanlog.c index 189b81a41987..4d70736619c7 100644 --- a/arch/ppc64/kernel/scanlog.c +++ b/arch/ppc64/kernel/scanlog.c | |||
@@ -43,7 +43,7 @@ static int scanlog_debug; | |||
43 | static unsigned int ibm_scan_log_dump; /* RTAS token */ | 43 | static unsigned int ibm_scan_log_dump; /* RTAS token */ |
44 | static struct proc_dir_entry *proc_ppc64_scan_log_dump; /* The proc file */ | 44 | static struct proc_dir_entry *proc_ppc64_scan_log_dump; /* The proc file */ |
45 | 45 | ||
46 | static ssize_t scanlog_read(struct file *file, char *buf, | 46 | static ssize_t scanlog_read(struct file *file, char __user *buf, |
47 | size_t count, loff_t *ppos) | 47 | size_t count, loff_t *ppos) |
48 | { | 48 | { |
49 | struct inode * inode = file->f_dentry->d_inode; | 49 | struct inode * inode = file->f_dentry->d_inode; |
@@ -129,7 +129,7 @@ static ssize_t scanlog_read(struct file *file, char *buf, | |||
129 | /*NOTREACHED*/ | 129 | /*NOTREACHED*/ |
130 | } | 130 | } |
131 | 131 | ||
132 | static ssize_t scanlog_write(struct file * file, const char * buf, | 132 | static ssize_t scanlog_write(struct file * file, const char __user * buf, |
133 | size_t count, loff_t *ppos) | 133 | size_t count, loff_t *ppos) |
134 | { | 134 | { |
135 | char stkbuf[20]; | 135 | char stkbuf[20]; |
diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c index a95a2b49a1d5..bf782276984c 100644 --- a/arch/ppc64/kernel/signal.c +++ b/arch/ppc64/kernel/signal.c | |||
@@ -42,11 +42,7 @@ | |||
42 | 42 | ||
43 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 43 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
44 | 44 | ||
45 | #ifndef MIN | 45 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) |
46 | #define MIN(a,b) (((a) < (b)) ? (a) : (b)) | ||
47 | #endif | ||
48 | |||
49 | #define GP_REGS_SIZE MIN(sizeof(elf_gregset_t), sizeof(struct pt_regs)) | ||
50 | #define FP_REGS_SIZE sizeof(elf_fpregset_t) | 46 | #define FP_REGS_SIZE sizeof(elf_fpregset_t) |
51 | 47 | ||
52 | #define TRAMP_TRACEBACK 3 | 48 | #define TRAMP_TRACEBACK 3 |
diff --git a/arch/ppc64/kernel/signal32.c b/arch/ppc64/kernel/signal32.c index b0e167db6af9..3c2fa5c284c0 100644 --- a/arch/ppc64/kernel/signal32.c +++ b/arch/ppc64/kernel/signal32.c | |||
@@ -657,7 +657,7 @@ static int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, | |||
657 | 657 | ||
658 | /* Save user registers on the stack */ | 658 | /* Save user registers on the stack */ |
659 | frame = &rt_sf->uc.uc_mcontext; | 659 | frame = &rt_sf->uc.uc_mcontext; |
660 | if (put_user(regs->gpr[1], (unsigned long __user *)newsp)) | 660 | if (put_user(regs->gpr[1], (u32 __user *)newsp)) |
661 | goto badframe; | 661 | goto badframe; |
662 | 662 | ||
663 | if (vdso32_rt_sigtramp && current->thread.vdso_base) { | 663 | if (vdso32_rt_sigtramp && current->thread.vdso_base) { |
@@ -842,7 +842,7 @@ static int handle_signal32(unsigned long sig, struct k_sigaction *ka, | |||
842 | regs->link = (unsigned long) frame->mctx.tramp; | 842 | regs->link = (unsigned long) frame->mctx.tramp; |
843 | } | 843 | } |
844 | 844 | ||
845 | if (put_user(regs->gpr[1], (unsigned long __user *)newsp)) | 845 | if (put_user(regs->gpr[1], (u32 __user *)newsp)) |
846 | goto badframe; | 846 | goto badframe; |
847 | regs->gpr[1] = (unsigned long) newsp; | 847 | regs->gpr[1] = (unsigned long) newsp; |
848 | regs->gpr[3] = sig; | 848 | regs->gpr[3] = sig; |
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c index 1c92da3e4525..3b906cd94037 100644 --- a/arch/ppc64/kernel/smp.c +++ b/arch/ppc64/kernel/smp.c | |||
@@ -125,7 +125,7 @@ void __devinit smp_generic_kick_cpu(int nr) | |||
125 | * the processor will continue on to secondary_start | 125 | * the processor will continue on to secondary_start |
126 | */ | 126 | */ |
127 | paca[nr].cpu_start = 1; | 127 | paca[nr].cpu_start = 1; |
128 | mb(); | 128 | smp_mb(); |
129 | } | 129 | } |
130 | 130 | ||
131 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | 131 | #endif /* CONFIG_PPC_MULTIPLATFORM */ |
@@ -256,7 +256,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | |||
256 | } | 256 | } |
257 | 257 | ||
258 | call_data = &data; | 258 | call_data = &data; |
259 | wmb(); | 259 | smp_wmb(); |
260 | /* Send a message to all other CPUs and wait for them to respond */ | 260 | /* Send a message to all other CPUs and wait for them to respond */ |
261 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); | 261 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); |
262 | 262 | ||
@@ -431,7 +431,7 @@ int generic_cpu_enable(unsigned int cpu) | |||
431 | 431 | ||
432 | /* get the target out of it's holding state */ | 432 | /* get the target out of it's holding state */ |
433 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | 433 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
434 | wmb(); | 434 | smp_wmb(); |
435 | 435 | ||
436 | while (!cpu_online(cpu)) | 436 | while (!cpu_online(cpu)) |
437 | cpu_relax(); | 437 | cpu_relax(); |
@@ -447,7 +447,7 @@ void generic_cpu_die(unsigned int cpu) | |||
447 | int i; | 447 | int i; |
448 | 448 | ||
449 | for (i = 0; i < 100; i++) { | 449 | for (i = 0; i < 100; i++) { |
450 | rmb(); | 450 | smp_rmb(); |
451 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) | 451 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) |
452 | return; | 452 | return; |
453 | msleep(100); | 453 | msleep(100); |
@@ -463,7 +463,7 @@ void generic_mach_cpu_die(void) | |||
463 | cpu = smp_processor_id(); | 463 | cpu = smp_processor_id(); |
464 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | 464 | printk(KERN_DEBUG "CPU%d offline\n", cpu); |
465 | __get_cpu_var(cpu_state) = CPU_DEAD; | 465 | __get_cpu_var(cpu_state) = CPU_DEAD; |
466 | wmb(); | 466 | smp_wmb(); |
467 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | 467 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
468 | cpu_relax(); | 468 | cpu_relax(); |
469 | 469 | ||
@@ -515,7 +515,7 @@ int __devinit __cpu_up(unsigned int cpu) | |||
515 | * be written out to main store before we release | 515 | * be written out to main store before we release |
516 | * the processor. | 516 | * the processor. |
517 | */ | 517 | */ |
518 | mb(); | 518 | smp_mb(); |
519 | 519 | ||
520 | /* wake up cpus */ | 520 | /* wake up cpus */ |
521 | DBG("smp: kicking cpu %d\n", cpu); | 521 | DBG("smp: kicking cpu %d\n", cpu); |
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c index 77ded5a363b6..772a465b49f9 100644 --- a/arch/ppc64/kernel/time.c +++ b/arch/ppc64/kernel/time.c | |||
@@ -221,15 +221,15 @@ static __inline__ void timer_recalc_offset(unsigned long cur_tb) | |||
221 | temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs; | 221 | temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs; |
222 | temp_varp->tb_orig_stamp = new_tb_orig_stamp; | 222 | temp_varp->tb_orig_stamp = new_tb_orig_stamp; |
223 | temp_varp->stamp_xsec = new_stamp_xsec; | 223 | temp_varp->stamp_xsec = new_stamp_xsec; |
224 | mb(); | 224 | smp_mb(); |
225 | do_gtod.varp = temp_varp; | 225 | do_gtod.varp = temp_varp; |
226 | do_gtod.var_idx = temp_idx; | 226 | do_gtod.var_idx = temp_idx; |
227 | 227 | ||
228 | ++(systemcfg->tb_update_count); | 228 | ++(systemcfg->tb_update_count); |
229 | wmb(); | 229 | smp_wmb(); |
230 | systemcfg->tb_orig_stamp = new_tb_orig_stamp; | 230 | systemcfg->tb_orig_stamp = new_tb_orig_stamp; |
231 | systemcfg->stamp_xsec = new_stamp_xsec; | 231 | systemcfg->stamp_xsec = new_stamp_xsec; |
232 | wmb(); | 232 | smp_wmb(); |
233 | ++(systemcfg->tb_update_count); | 233 | ++(systemcfg->tb_update_count); |
234 | } | 234 | } |
235 | 235 | ||
@@ -648,7 +648,7 @@ void ppc_adjtimex(void) | |||
648 | temp_varp->tb_to_xs = new_tb_to_xs; | 648 | temp_varp->tb_to_xs = new_tb_to_xs; |
649 | temp_varp->stamp_xsec = new_stamp_xsec; | 649 | temp_varp->stamp_xsec = new_stamp_xsec; |
650 | temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp; | 650 | temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp; |
651 | mb(); | 651 | smp_mb(); |
652 | do_gtod.varp = temp_varp; | 652 | do_gtod.varp = temp_varp; |
653 | do_gtod.var_idx = temp_idx; | 653 | do_gtod.var_idx = temp_idx; |
654 | 654 | ||
@@ -662,10 +662,10 @@ void ppc_adjtimex(void) | |||
662 | * loops back and reads them again until this criteria is met. | 662 | * loops back and reads them again until this criteria is met. |
663 | */ | 663 | */ |
664 | ++(systemcfg->tb_update_count); | 664 | ++(systemcfg->tb_update_count); |
665 | wmb(); | 665 | smp_wmb(); |
666 | systemcfg->tb_to_xs = new_tb_to_xs; | 666 | systemcfg->tb_to_xs = new_tb_to_xs; |
667 | systemcfg->stamp_xsec = new_stamp_xsec; | 667 | systemcfg->stamp_xsec = new_stamp_xsec; |
668 | wmb(); | 668 | smp_wmb(); |
669 | ++(systemcfg->tb_update_count); | 669 | ++(systemcfg->tb_update_count); |
670 | 670 | ||
671 | write_sequnlock_irqrestore( &xtime_lock, flags ); | 671 | write_sequnlock_irqrestore( &xtime_lock, flags ); |
diff --git a/arch/ppc64/kernel/vdso32/Makefile b/arch/ppc64/kernel/vdso32/Makefile index ede2f7e477c2..0b1b0df973eb 100644 --- a/arch/ppc64/kernel/vdso32/Makefile +++ b/arch/ppc64/kernel/vdso32/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | 1 | ||
2 | # List of files in the vdso, has to be asm only for now | 2 | # List of files in the vdso, has to be asm only for now |
3 | 3 | ||
4 | obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o | 4 | obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o |
5 | 5 | ||
6 | # Build rules | 6 | # Build rules |
7 | 7 | ||
diff --git a/arch/ppc64/kernel/vdso32/cacheflush.S b/arch/ppc64/kernel/vdso32/cacheflush.S index c74fddb6afd4..0ed7ea721715 100644 --- a/arch/ppc64/kernel/vdso32/cacheflush.S +++ b/arch/ppc64/kernel/vdso32/cacheflush.S | |||
@@ -47,6 +47,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache) | |||
47 | addi r6,r6,128 | 47 | addi r6,r6,128 |
48 | bdnz 1b | 48 | bdnz 1b |
49 | isync | 49 | isync |
50 | li r3,0 | ||
50 | blr | 51 | blr |
51 | .cfi_endproc | 52 | .cfi_endproc |
52 | V_FUNCTION_END(__kernel_sync_dicache) | 53 | V_FUNCTION_END(__kernel_sync_dicache) |
@@ -59,6 +60,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache_p5) | |||
59 | .cfi_startproc | 60 | .cfi_startproc |
60 | sync | 61 | sync |
61 | isync | 62 | isync |
63 | li r3,0 | ||
62 | blr | 64 | blr |
63 | .cfi_endproc | 65 | .cfi_endproc |
64 | V_FUNCTION_END(__kernel_sync_dicache_p5) | 66 | V_FUNCTION_END(__kernel_sync_dicache_p5) |
diff --git a/arch/ppc64/kernel/vdso32/gettimeofday.S b/arch/ppc64/kernel/vdso32/gettimeofday.S index ca7f415195c4..2b48bf1fb109 100644 --- a/arch/ppc64/kernel/vdso32/gettimeofday.S +++ b/arch/ppc64/kernel/vdso32/gettimeofday.S | |||
@@ -58,6 +58,7 @@ V_FUNCTION_BEGIN(__kernel_gettimeofday) | |||
58 | stw r5,TZONE_TZ_DSTTIME(r11) | 58 | stw r5,TZONE_TZ_DSTTIME(r11) |
59 | 59 | ||
60 | 1: mtlr r12 | 60 | 1: mtlr r12 |
61 | li r3,0 | ||
61 | blr | 62 | blr |
62 | 63 | ||
63 | 2: mr r3,r10 | 64 | 2: mr r3,r10 |
diff --git a/arch/ppc64/kernel/vdso32/note.S b/arch/ppc64/kernel/vdso32/note.S new file mode 100644 index 000000000000..d4b5be4f3d5f --- /dev/null +++ b/arch/ppc64/kernel/vdso32/note.S | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. | ||
3 | * Here we can supply some information useful to userland. | ||
4 | */ | ||
5 | |||
6 | #include <linux/uts.h> | ||
7 | #include <linux/version.h> | ||
8 | |||
9 | #define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \ | ||
10 | .section name, flags; \ | ||
11 | .balign 4; \ | ||
12 | .long 1f - 0f; /* name length */ \ | ||
13 | .long 3f - 2f; /* data length */ \ | ||
14 | .long type; /* note type */ \ | ||
15 | 0: .asciz vendor; /* vendor name */ \ | ||
16 | 1: .balign 4; \ | ||
17 | 2: | ||
18 | |||
19 | #define ASM_ELF_NOTE_END \ | ||
20 | 3: .balign 4; /* pad out section */ \ | ||
21 | .previous | ||
22 | |||
23 | ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0) | ||
24 | .long LINUX_VERSION_CODE | ||
25 | ASM_ELF_NOTE_END | ||
diff --git a/arch/ppc64/kernel/vdso32/vdso32.lds.S b/arch/ppc64/kernel/vdso32/vdso32.lds.S index cca27bd03a57..11290c902ba3 100644 --- a/arch/ppc64/kernel/vdso32/vdso32.lds.S +++ b/arch/ppc64/kernel/vdso32/vdso32.lds.S | |||
@@ -20,6 +20,8 @@ SECTIONS | |||
20 | .gnu.version_d : { *(.gnu.version_d) } | 20 | .gnu.version_d : { *(.gnu.version_d) } |
21 | .gnu.version_r : { *(.gnu.version_r) } | 21 | .gnu.version_r : { *(.gnu.version_r) } |
22 | 22 | ||
23 | .note : { *(.note.*) } :text :note | ||
24 | |||
23 | . = ALIGN (16); | 25 | . = ALIGN (16); |
24 | .text : | 26 | .text : |
25 | { | 27 | { |
@@ -87,6 +89,7 @@ SECTIONS | |||
87 | PHDRS | 89 | PHDRS |
88 | { | 90 | { |
89 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ | 91 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ |
92 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
90 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | 93 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ |
91 | eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */ | 94 | eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */ |
92 | } | 95 | } |
diff --git a/arch/ppc64/kernel/vdso64/Makefile b/arch/ppc64/kernel/vdso64/Makefile index bd3f70b1a384..ab39988452cc 100644 --- a/arch/ppc64/kernel/vdso64/Makefile +++ b/arch/ppc64/kernel/vdso64/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # List of files in the vdso, has to be asm only for now | 1 | # List of files in the vdso, has to be asm only for now |
2 | 2 | ||
3 | obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o | 3 | obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o |
4 | 4 | ||
5 | # Build rules | 5 | # Build rules |
6 | 6 | ||
diff --git a/arch/ppc64/kernel/vdso64/cacheflush.S b/arch/ppc64/kernel/vdso64/cacheflush.S index d9696ffcf334..e0725b7b7003 100644 --- a/arch/ppc64/kernel/vdso64/cacheflush.S +++ b/arch/ppc64/kernel/vdso64/cacheflush.S | |||
@@ -47,6 +47,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache) | |||
47 | addi r6,r6,128 | 47 | addi r6,r6,128 |
48 | bdnz 1b | 48 | bdnz 1b |
49 | isync | 49 | isync |
50 | li r3,0 | ||
50 | blr | 51 | blr |
51 | .cfi_endproc | 52 | .cfi_endproc |
52 | V_FUNCTION_END(__kernel_sync_dicache) | 53 | V_FUNCTION_END(__kernel_sync_dicache) |
@@ -59,6 +60,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache_p5) | |||
59 | .cfi_startproc | 60 | .cfi_startproc |
60 | sync | 61 | sync |
61 | isync | 62 | isync |
63 | li r3,0 | ||
62 | blr | 64 | blr |
63 | .cfi_endproc | 65 | .cfi_endproc |
64 | V_FUNCTION_END(__kernel_sync_dicache_p5) | 66 | V_FUNCTION_END(__kernel_sync_dicache_p5) |
diff --git a/arch/ppc64/kernel/vdso64/note.S b/arch/ppc64/kernel/vdso64/note.S new file mode 100644 index 000000000000..dc2a509f7e8a --- /dev/null +++ b/arch/ppc64/kernel/vdso64/note.S | |||
@@ -0,0 +1 @@ | |||
#include "../vdso32/note.S" | |||
diff --git a/arch/ppc64/kernel/vdso64/vdso64.lds.S b/arch/ppc64/kernel/vdso64/vdso64.lds.S index 942c815c7bc7..9cb28181da80 100644 --- a/arch/ppc64/kernel/vdso64/vdso64.lds.S +++ b/arch/ppc64/kernel/vdso64/vdso64.lds.S | |||
@@ -18,12 +18,14 @@ SECTIONS | |||
18 | .gnu.version_d : { *(.gnu.version_d) } | 18 | .gnu.version_d : { *(.gnu.version_d) } |
19 | .gnu.version_r : { *(.gnu.version_r) } | 19 | .gnu.version_r : { *(.gnu.version_r) } |
20 | 20 | ||
21 | .note : { *(.note.*) } :text :note | ||
22 | |||
21 | . = ALIGN (16); | 23 | . = ALIGN (16); |
22 | .text : | 24 | .text : |
23 | { | 25 | { |
24 | *(.text .stub .text.* .gnu.linkonce.t.*) | 26 | *(.text .stub .text.* .gnu.linkonce.t.*) |
25 | *(.sfpr .glink) | 27 | *(.sfpr .glink) |
26 | } | 28 | } :text |
27 | PROVIDE (__etext = .); | 29 | PROVIDE (__etext = .); |
28 | PROVIDE (_etext = .); | 30 | PROVIDE (_etext = .); |
29 | PROVIDE (etext = .); | 31 | PROVIDE (etext = .); |
@@ -88,6 +90,7 @@ SECTIONS | |||
88 | PHDRS | 90 | PHDRS |
89 | { | 91 | { |
90 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ | 92 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ |
93 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
91 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | 94 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ |
92 | eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */ | 95 | eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */ |
93 | } | 96 | } |
diff --git a/arch/ppc64/kernel/xics.c b/arch/ppc64/kernel/xics.c index eedd1d3c2a10..879f39b90a33 100644 --- a/arch/ppc64/kernel/xics.c +++ b/arch/ppc64/kernel/xics.c | |||
@@ -432,6 +432,7 @@ void xics_cause_IPI(int cpu) | |||
432 | { | 432 | { |
433 | ops->qirr_info(cpu, IPI_PRIORITY); | 433 | ops->qirr_info(cpu, IPI_PRIORITY); |
434 | } | 434 | } |
435 | #endif /* CONFIG_SMP */ | ||
435 | 436 | ||
436 | void xics_setup_cpu(void) | 437 | void xics_setup_cpu(void) |
437 | { | 438 | { |
@@ -439,9 +440,17 @@ void xics_setup_cpu(void) | |||
439 | 440 | ||
440 | ops->cppr_info(cpu, 0xff); | 441 | ops->cppr_info(cpu, 0xff); |
441 | iosync(); | 442 | iosync(); |
442 | } | ||
443 | 443 | ||
444 | #endif /* CONFIG_SMP */ | 444 | /* |
445 | * Put the calling processor into the GIQ. This is really only | ||
446 | * necessary from a secondary thread as the OF start-cpu interface | ||
447 | * performs this function for us on primary threads. | ||
448 | * | ||
449 | * XXX: undo of teardown on kexec needs this too, as may hotplug | ||
450 | */ | ||
451 | rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | ||
452 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 1); | ||
453 | } | ||
445 | 454 | ||
446 | void xics_init_IRQ(void) | 455 | void xics_init_IRQ(void) |
447 | { | 456 | { |
@@ -563,8 +572,7 @@ nextnode: | |||
563 | for (; i < NR_IRQS; ++i) | 572 | for (; i < NR_IRQS; ++i) |
564 | get_irq_desc(i)->handler = &xics_pic; | 573 | get_irq_desc(i)->handler = &xics_pic; |
565 | 574 | ||
566 | ops->cppr_info(boot_cpuid, 0xff); | 575 | xics_setup_cpu(); |
567 | iosync(); | ||
568 | 576 | ||
569 | ppc64_boot_msg(0x21, "XICS Done"); | 577 | ppc64_boot_msg(0x21, "XICS Done"); |
570 | } | 578 | } |
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S index 8c0156a37001..c23d46956dd9 100644 --- a/arch/ppc64/mm/hash_low.S +++ b/arch/ppc64/mm/hash_low.S | |||
@@ -85,7 +85,10 @@ _GLOBAL(__hash_page) | |||
85 | bne- htab_wrong_access | 85 | bne- htab_wrong_access |
86 | /* Check if PTE is busy */ | 86 | /* Check if PTE is busy */ |
87 | andi. r0,r31,_PAGE_BUSY | 87 | andi. r0,r31,_PAGE_BUSY |
88 | bne- 1b | 88 | /* If so, just bail out and refault if needed. Someone else |
89 | * is changing this PTE anyway and might hash it. | ||
90 | */ | ||
91 | bne- bail_ok | ||
89 | /* Prepare new PTE value (turn access RW into DIRTY, then | 92 | /* Prepare new PTE value (turn access RW into DIRTY, then |
90 | * add BUSY,HASHPTE and ACCESSED) | 93 | * add BUSY,HASHPTE and ACCESSED) |
91 | */ | 94 | */ |
@@ -215,6 +218,10 @@ _GLOBAL(htab_call_hpte_remove) | |||
215 | /* Try all again */ | 218 | /* Try all again */ |
216 | b htab_insert_pte | 219 | b htab_insert_pte |
217 | 220 | ||
221 | bail_ok: | ||
222 | li r3,0 | ||
223 | b bail | ||
224 | |||
218 | htab_pte_insert_ok: | 225 | htab_pte_insert_ok: |
219 | /* Insert slot number & secondary bit in PTE */ | 226 | /* Insert slot number & secondary bit in PTE */ |
220 | rldimi r30,r3,12,63-15 | 227 | rldimi r30,r3,12,63-15 |
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c index 144657e0c3d5..52b6b9305341 100644 --- a/arch/ppc64/mm/hash_native.c +++ b/arch/ppc64/mm/hash_native.c | |||
@@ -320,8 +320,7 @@ static void native_flush_hash_range(unsigned long context, | |||
320 | 320 | ||
321 | j = 0; | 321 | j = 0; |
322 | for (i = 0; i < number; i++) { | 322 | for (i = 0; i < number; i++) { |
323 | if ((batch->addr[i] >= USER_START) && | 323 | if (batch->addr[i] < KERNELBASE) |
324 | (batch->addr[i] <= USER_END)) | ||
325 | vsid = get_vsid(context, batch->addr[i]); | 324 | vsid = get_vsid(context, batch->addr[i]); |
326 | else | 325 | else |
327 | vsid = get_kernel_vsid(batch->addr[i]); | 326 | vsid = get_kernel_vsid(batch->addr[i]); |
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c index e48be12f518c..0a0f97008d02 100644 --- a/arch/ppc64/mm/hash_utils.c +++ b/arch/ppc64/mm/hash_utils.c | |||
@@ -298,24 +298,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
298 | int local = 0; | 298 | int local = 0; |
299 | cpumask_t tmp; | 299 | cpumask_t tmp; |
300 | 300 | ||
301 | if ((ea & ~REGION_MASK) > EADDR_MASK) | ||
302 | return 1; | ||
303 | |||
301 | switch (REGION_ID(ea)) { | 304 | switch (REGION_ID(ea)) { |
302 | case USER_REGION_ID: | 305 | case USER_REGION_ID: |
303 | user_region = 1; | 306 | user_region = 1; |
304 | mm = current->mm; | 307 | mm = current->mm; |
305 | if ((ea > USER_END) || (! mm)) | 308 | if (! mm) |
306 | return 1; | 309 | return 1; |
307 | 310 | ||
308 | vsid = get_vsid(mm->context.id, ea); | 311 | vsid = get_vsid(mm->context.id, ea); |
309 | break; | 312 | break; |
310 | case IO_REGION_ID: | 313 | case IO_REGION_ID: |
311 | if (ea > IMALLOC_END) | ||
312 | return 1; | ||
313 | mm = &ioremap_mm; | 314 | mm = &ioremap_mm; |
314 | vsid = get_kernel_vsid(ea); | 315 | vsid = get_kernel_vsid(ea); |
315 | break; | 316 | break; |
316 | case VMALLOC_REGION_ID: | 317 | case VMALLOC_REGION_ID: |
317 | if (ea > VMALLOC_END) | ||
318 | return 1; | ||
319 | mm = &init_mm; | 318 | mm = &init_mm; |
320 | vsid = get_kernel_vsid(ea); | 319 | vsid = get_kernel_vsid(ea); |
321 | break; | 320 | break; |
@@ -362,7 +361,7 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, | |||
362 | unsigned long vsid, vpn, va, hash, secondary, slot; | 361 | unsigned long vsid, vpn, va, hash, secondary, slot; |
363 | unsigned long huge = pte_huge(pte); | 362 | unsigned long huge = pte_huge(pte); |
364 | 363 | ||
365 | if ((ea >= USER_START) && (ea <= USER_END)) | 364 | if (ea < KERNELBASE) |
366 | vsid = get_vsid(context, ea); | 365 | vsid = get_vsid(context, ea); |
367 | else | 366 | else |
368 | vsid = get_kernel_vsid(ea); | 367 | vsid = get_kernel_vsid(ea); |
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index 390296efe3e0..d3bf86a5c1ad 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c | |||
@@ -42,7 +42,7 @@ static inline int hugepgd_index(unsigned long addr) | |||
42 | return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; | 42 | return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; |
43 | } | 43 | } |
44 | 44 | ||
45 | static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) | 45 | static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) |
46 | { | 46 | { |
47 | int index; | 47 | int index; |
48 | 48 | ||
@@ -52,21 +52,21 @@ static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) | |||
52 | 52 | ||
53 | index = hugepgd_index(addr); | 53 | index = hugepgd_index(addr); |
54 | BUG_ON(index >= PTRS_PER_HUGEPGD); | 54 | BUG_ON(index >= PTRS_PER_HUGEPGD); |
55 | return mm->context.huge_pgdir + index; | 55 | return (pud_t *)(mm->context.huge_pgdir + index); |
56 | } | 56 | } |
57 | 57 | ||
58 | static inline pte_t *hugepte_offset(pgd_t *dir, unsigned long addr) | 58 | static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr) |
59 | { | 59 | { |
60 | int index; | 60 | int index; |
61 | 61 | ||
62 | if (pgd_none(*dir)) | 62 | if (pud_none(*dir)) |
63 | return NULL; | 63 | return NULL; |
64 | 64 | ||
65 | index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; | 65 | index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; |
66 | return (pte_t *)pgd_page(*dir) + index; | 66 | return (pte_t *)pud_page(*dir) + index; |
67 | } | 67 | } |
68 | 68 | ||
69 | static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) | 69 | static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) |
70 | { | 70 | { |
71 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 71 | BUG_ON(! in_hugepage_area(mm->context, addr)); |
72 | 72 | ||
@@ -90,10 +90,9 @@ static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) | |||
90 | return hugepgd_offset(mm, addr); | 90 | return hugepgd_offset(mm, addr); |
91 | } | 91 | } |
92 | 92 | ||
93 | static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, | 93 | static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr) |
94 | unsigned long addr) | ||
95 | { | 94 | { |
96 | if (! pgd_present(*dir)) { | 95 | if (! pud_present(*dir)) { |
97 | pte_t *new; | 96 | pte_t *new; |
98 | 97 | ||
99 | spin_unlock(&mm->page_table_lock); | 98 | spin_unlock(&mm->page_table_lock); |
@@ -104,7 +103,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, | |||
104 | * Because we dropped the lock, we should re-check the | 103 | * Because we dropped the lock, we should re-check the |
105 | * entry, as somebody else could have populated it.. | 104 | * entry, as somebody else could have populated it.. |
106 | */ | 105 | */ |
107 | if (pgd_present(*dir)) { | 106 | if (pud_present(*dir)) { |
108 | if (new) | 107 | if (new) |
109 | kmem_cache_free(zero_cache, new); | 108 | kmem_cache_free(zero_cache, new); |
110 | } else { | 109 | } else { |
@@ -115,7 +114,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, | |||
115 | ptepage = virt_to_page(new); | 114 | ptepage = virt_to_page(new); |
116 | ptepage->mapping = (void *) mm; | 115 | ptepage->mapping = (void *) mm; |
117 | ptepage->index = addr & HUGEPGDIR_MASK; | 116 | ptepage->index = addr & HUGEPGDIR_MASK; |
118 | pgd_populate(mm, dir, new); | 117 | pud_populate(mm, dir, new); |
119 | } | 118 | } |
120 | } | 119 | } |
121 | 120 | ||
@@ -124,28 +123,28 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, | |||
124 | 123 | ||
125 | static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 124 | static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
126 | { | 125 | { |
127 | pgd_t *pgd; | 126 | pud_t *pud; |
128 | 127 | ||
129 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 128 | BUG_ON(! in_hugepage_area(mm->context, addr)); |
130 | 129 | ||
131 | pgd = hugepgd_offset(mm, addr); | 130 | pud = hugepgd_offset(mm, addr); |
132 | if (! pgd) | 131 | if (! pud) |
133 | return NULL; | 132 | return NULL; |
134 | 133 | ||
135 | return hugepte_offset(pgd, addr); | 134 | return hugepte_offset(pud, addr); |
136 | } | 135 | } |
137 | 136 | ||
138 | static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 137 | static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
139 | { | 138 | { |
140 | pgd_t *pgd; | 139 | pud_t *pud; |
141 | 140 | ||
142 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 141 | BUG_ON(! in_hugepage_area(mm->context, addr)); |
143 | 142 | ||
144 | pgd = hugepgd_alloc(mm, addr); | 143 | pud = hugepgd_alloc(mm, addr); |
145 | if (! pgd) | 144 | if (! pud) |
146 | return NULL; | 145 | return NULL; |
147 | 146 | ||
148 | return hugepte_alloc(mm, pgd, addr); | 147 | return hugepte_alloc(mm, pud, addr); |
149 | } | 148 | } |
150 | 149 | ||
151 | static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, | 150 | static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, |
@@ -709,10 +708,10 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm) | |||
709 | 708 | ||
710 | /* cleanup any hugepte pages leftover */ | 709 | /* cleanup any hugepte pages leftover */ |
711 | for (i = 0; i < PTRS_PER_HUGEPGD; i++) { | 710 | for (i = 0; i < PTRS_PER_HUGEPGD; i++) { |
712 | pgd_t *pgd = pgdir + i; | 711 | pud_t *pud = (pud_t *)(pgdir + i); |
713 | 712 | ||
714 | if (! pgd_none(*pgd)) { | 713 | if (! pud_none(*pud)) { |
715 | pte_t *pte = (pte_t *)pgd_page(*pgd); | 714 | pte_t *pte = (pte_t *)pud_page(*pud); |
716 | struct page *ptepage = virt_to_page(pte); | 715 | struct page *ptepage = virt_to_page(pte); |
717 | 716 | ||
718 | ptepage->mapping = NULL; | 717 | ptepage->mapping = NULL; |
@@ -720,7 +719,7 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm) | |||
720 | BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); | 719 | BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); |
721 | kmem_cache_free(zero_cache, pte); | 720 | kmem_cache_free(zero_cache, pte); |
722 | } | 721 | } |
723 | pgd_clear(pgd); | 722 | pud_clear(pud); |
724 | } | 723 | } |
725 | 724 | ||
726 | BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); | 725 | BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); |
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c index 9d92b0d9cde5..cb8727f3267a 100644 --- a/arch/ppc64/mm/imalloc.c +++ b/arch/ppc64/mm/imalloc.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/pgalloc.h> | 14 | #include <asm/pgalloc.h> |
15 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
16 | #include <asm/semaphore.h> | 16 | #include <asm/semaphore.h> |
17 | #include <asm/imalloc.h> | ||
17 | 18 | ||
18 | static DECLARE_MUTEX(imlist_sem); | 19 | static DECLARE_MUTEX(imlist_sem); |
19 | struct vm_struct * imlist = NULL; | 20 | struct vm_struct * imlist = NULL; |
@@ -23,11 +24,11 @@ static int get_free_im_addr(unsigned long size, unsigned long *im_addr) | |||
23 | unsigned long addr; | 24 | unsigned long addr; |
24 | struct vm_struct **p, *tmp; | 25 | struct vm_struct **p, *tmp; |
25 | 26 | ||
26 | addr = IMALLOC_START; | 27 | addr = ioremap_bot; |
27 | for (p = &imlist; (tmp = *p) ; p = &tmp->next) { | 28 | for (p = &imlist; (tmp = *p) ; p = &tmp->next) { |
28 | if (size + addr < (unsigned long) tmp->addr) | 29 | if (size + addr < (unsigned long) tmp->addr) |
29 | break; | 30 | break; |
30 | if ((unsigned long)tmp->addr >= IMALLOC_START) | 31 | if ((unsigned long)tmp->addr >= ioremap_bot) |
31 | addr = tmp->size + (unsigned long) tmp->addr; | 32 | addr = tmp->size + (unsigned long) tmp->addr; |
32 | if (addr > IMALLOC_END-size) | 33 | if (addr > IMALLOC_END-size) |
33 | return 1; | 34 | return 1; |
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index a7149b9fc35c..4b42aff74d73 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c | |||
@@ -64,6 +64,7 @@ | |||
64 | #include <asm/iommu.h> | 64 | #include <asm/iommu.h> |
65 | #include <asm/abs_addr.h> | 65 | #include <asm/abs_addr.h> |
66 | #include <asm/vdso.h> | 66 | #include <asm/vdso.h> |
67 | #include <asm/imalloc.h> | ||
67 | 68 | ||
68 | int mem_init_done; | 69 | int mem_init_done; |
69 | unsigned long ioremap_bot = IMALLOC_BASE; | 70 | unsigned long ioremap_bot = IMALLOC_BASE; |
@@ -136,14 +137,78 @@ void iounmap(volatile void __iomem *addr) | |||
136 | 137 | ||
137 | #else | 138 | #else |
138 | 139 | ||
140 | static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr, | ||
141 | unsigned long end) | ||
142 | { | ||
143 | pte_t *pte; | ||
144 | |||
145 | pte = pte_offset_kernel(pmd, addr); | ||
146 | do { | ||
147 | pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte); | ||
148 | WARN_ON(!pte_none(ptent) && !pte_present(ptent)); | ||
149 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
150 | } | ||
151 | |||
152 | static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr, | ||
153 | unsigned long end) | ||
154 | { | ||
155 | pmd_t *pmd; | ||
156 | unsigned long next; | ||
157 | |||
158 | pmd = pmd_offset(pud, addr); | ||
159 | do { | ||
160 | next = pmd_addr_end(addr, end); | ||
161 | if (pmd_none_or_clear_bad(pmd)) | ||
162 | continue; | ||
163 | unmap_im_area_pte(pmd, addr, next); | ||
164 | } while (pmd++, addr = next, addr != end); | ||
165 | } | ||
166 | |||
167 | static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr, | ||
168 | unsigned long end) | ||
169 | { | ||
170 | pud_t *pud; | ||
171 | unsigned long next; | ||
172 | |||
173 | pud = pud_offset(pgd, addr); | ||
174 | do { | ||
175 | next = pud_addr_end(addr, end); | ||
176 | if (pud_none_or_clear_bad(pud)) | ||
177 | continue; | ||
178 | unmap_im_area_pmd(pud, addr, next); | ||
179 | } while (pud++, addr = next, addr != end); | ||
180 | } | ||
181 | |||
182 | static void unmap_im_area(unsigned long addr, unsigned long end) | ||
183 | { | ||
184 | struct mm_struct *mm = &ioremap_mm; | ||
185 | unsigned long next; | ||
186 | pgd_t *pgd; | ||
187 | |||
188 | spin_lock(&mm->page_table_lock); | ||
189 | |||
190 | pgd = pgd_offset_i(addr); | ||
191 | flush_cache_vunmap(addr, end); | ||
192 | do { | ||
193 | next = pgd_addr_end(addr, end); | ||
194 | if (pgd_none_or_clear_bad(pgd)) | ||
195 | continue; | ||
196 | unmap_im_area_pud(pgd, addr, next); | ||
197 | } while (pgd++, addr = next, addr != end); | ||
198 | flush_tlb_kernel_range(start, end); | ||
199 | |||
200 | spin_unlock(&mm->page_table_lock); | ||
201 | } | ||
202 | |||
139 | /* | 203 | /* |
140 | * map_io_page currently only called by __ioremap | 204 | * map_io_page currently only called by __ioremap |
141 | * map_io_page adds an entry to the ioremap page table | 205 | * map_io_page adds an entry to the ioremap page table |
142 | * and adds an entry to the HPT, possibly bolting it | 206 | * and adds an entry to the HPT, possibly bolting it |
143 | */ | 207 | */ |
144 | static void map_io_page(unsigned long ea, unsigned long pa, int flags) | 208 | static int map_io_page(unsigned long ea, unsigned long pa, int flags) |
145 | { | 209 | { |
146 | pgd_t *pgdp; | 210 | pgd_t *pgdp; |
211 | pud_t *pudp; | ||
147 | pmd_t *pmdp; | 212 | pmd_t *pmdp; |
148 | pte_t *ptep; | 213 | pte_t *ptep; |
149 | unsigned long vsid; | 214 | unsigned long vsid; |
@@ -151,9 +216,15 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
151 | if (mem_init_done) { | 216 | if (mem_init_done) { |
152 | spin_lock(&ioremap_mm.page_table_lock); | 217 | spin_lock(&ioremap_mm.page_table_lock); |
153 | pgdp = pgd_offset_i(ea); | 218 | pgdp = pgd_offset_i(ea); |
154 | pmdp = pmd_alloc(&ioremap_mm, pgdp, ea); | 219 | pudp = pud_alloc(&ioremap_mm, pgdp, ea); |
220 | if (!pudp) | ||
221 | return -ENOMEM; | ||
222 | pmdp = pmd_alloc(&ioremap_mm, pudp, ea); | ||
223 | if (!pmdp) | ||
224 | return -ENOMEM; | ||
155 | ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); | 225 | ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); |
156 | 226 | if (!ptep) | |
227 | return -ENOMEM; | ||
157 | pa = abs_to_phys(pa); | 228 | pa = abs_to_phys(pa); |
158 | set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | 229 | set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
159 | __pgprot(flags))); | 230 | __pgprot(flags))); |
@@ -181,6 +252,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
181 | panic("map_io_page: could not insert mapping"); | 252 | panic("map_io_page: could not insert mapping"); |
182 | } | 253 | } |
183 | } | 254 | } |
255 | return 0; | ||
184 | } | 256 | } |
185 | 257 | ||
186 | 258 | ||
@@ -194,9 +266,14 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, | |||
194 | flags |= pgprot_val(PAGE_KERNEL); | 266 | flags |= pgprot_val(PAGE_KERNEL); |
195 | 267 | ||
196 | for (i = 0; i < size; i += PAGE_SIZE) | 268 | for (i = 0; i < size; i += PAGE_SIZE) |
197 | map_io_page(ea+i, pa+i, flags); | 269 | if (map_io_page(ea+i, pa+i, flags)) |
270 | goto failure; | ||
198 | 271 | ||
199 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); | 272 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); |
273 | failure: | ||
274 | if (mem_init_done) | ||
275 | unmap_im_area(ea, ea + size); | ||
276 | return NULL; | ||
200 | } | 277 | } |
201 | 278 | ||
202 | 279 | ||
@@ -206,10 +283,11 @@ ioremap(unsigned long addr, unsigned long size) | |||
206 | return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); | 283 | return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); |
207 | } | 284 | } |
208 | 285 | ||
209 | void __iomem * | 286 | void __iomem * __ioremap(unsigned long addr, unsigned long size, |
210 | __ioremap(unsigned long addr, unsigned long size, unsigned long flags) | 287 | unsigned long flags) |
211 | { | 288 | { |
212 | unsigned long pa, ea; | 289 | unsigned long pa, ea; |
290 | void __iomem *ret; | ||
213 | 291 | ||
214 | /* | 292 | /* |
215 | * Choose an address to map it to. | 293 | * Choose an address to map it to. |
@@ -232,12 +310,16 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags) | |||
232 | if (area == NULL) | 310 | if (area == NULL) |
233 | return NULL; | 311 | return NULL; |
234 | ea = (unsigned long)(area->addr); | 312 | ea = (unsigned long)(area->addr); |
313 | ret = __ioremap_com(addr, pa, ea, size, flags); | ||
314 | if (!ret) | ||
315 | im_free(area->addr); | ||
235 | } else { | 316 | } else { |
236 | ea = ioremap_bot; | 317 | ea = ioremap_bot; |
237 | ioremap_bot += size; | 318 | ret = __ioremap_com(addr, pa, ea, size, flags); |
319 | if (ret) | ||
320 | ioremap_bot += size; | ||
238 | } | 321 | } |
239 | 322 | return ret; | |
240 | return __ioremap_com(addr, pa, ea, size, flags); | ||
241 | } | 323 | } |
242 | 324 | ||
243 | #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) | 325 | #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) |
@@ -246,6 +328,7 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, | |||
246 | unsigned long size, unsigned long flags) | 328 | unsigned long size, unsigned long flags) |
247 | { | 329 | { |
248 | struct vm_struct *area; | 330 | struct vm_struct *area; |
331 | void __iomem *ret; | ||
249 | 332 | ||
250 | /* For now, require page-aligned values for pa, ea, and size */ | 333 | /* For now, require page-aligned values for pa, ea, and size */ |
251 | if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || | 334 | if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || |
@@ -276,7 +359,12 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, | |||
276 | } | 359 | } |
277 | } | 360 | } |
278 | 361 | ||
279 | if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) { | 362 | ret = __ioremap_com(pa, pa, ea, size, flags); |
363 | if (ret == NULL) { | ||
364 | printk(KERN_ERR "ioremap_explicit() allocation failure !\n"); | ||
365 | return 1; | ||
366 | } | ||
367 | if (ret != (void *) ea) { | ||
280 | printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); | 368 | printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); |
281 | return 1; | 369 | return 1; |
282 | } | 370 | } |
@@ -284,69 +372,6 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, | |||
284 | return 0; | 372 | return 0; |
285 | } | 373 | } |
286 | 374 | ||
287 | static void unmap_im_area_pte(pmd_t *pmd, unsigned long address, | ||
288 | unsigned long size) | ||
289 | { | ||
290 | unsigned long base, end; | ||
291 | pte_t *pte; | ||
292 | |||
293 | if (pmd_none(*pmd)) | ||
294 | return; | ||
295 | if (pmd_bad(*pmd)) { | ||
296 | pmd_ERROR(*pmd); | ||
297 | pmd_clear(pmd); | ||
298 | return; | ||
299 | } | ||
300 | |||
301 | pte = pte_offset_kernel(pmd, address); | ||
302 | base = address & PMD_MASK; | ||
303 | address &= ~PMD_MASK; | ||
304 | end = address + size; | ||
305 | if (end > PMD_SIZE) | ||
306 | end = PMD_SIZE; | ||
307 | |||
308 | do { | ||
309 | pte_t page; | ||
310 | page = ptep_get_and_clear(&ioremap_mm, base + address, pte); | ||
311 | address += PAGE_SIZE; | ||
312 | pte++; | ||
313 | if (pte_none(page)) | ||
314 | continue; | ||
315 | if (pte_present(page)) | ||
316 | continue; | ||
317 | printk(KERN_CRIT "Whee.. Swapped out page in kernel page" | ||
318 | " table\n"); | ||
319 | } while (address < end); | ||
320 | } | ||
321 | |||
322 | static void unmap_im_area_pmd(pgd_t *dir, unsigned long address, | ||
323 | unsigned long size) | ||
324 | { | ||
325 | unsigned long base, end; | ||
326 | pmd_t *pmd; | ||
327 | |||
328 | if (pgd_none(*dir)) | ||
329 | return; | ||
330 | if (pgd_bad(*dir)) { | ||
331 | pgd_ERROR(*dir); | ||
332 | pgd_clear(dir); | ||
333 | return; | ||
334 | } | ||
335 | |||
336 | pmd = pmd_offset(dir, address); | ||
337 | base = address & PGDIR_MASK; | ||
338 | address &= ~PGDIR_MASK; | ||
339 | end = address + size; | ||
340 | if (end > PGDIR_SIZE) | ||
341 | end = PGDIR_SIZE; | ||
342 | |||
343 | do { | ||
344 | unmap_im_area_pte(pmd, base + address, end - address); | ||
345 | address = (address + PMD_SIZE) & PMD_MASK; | ||
346 | pmd++; | ||
347 | } while (address < end); | ||
348 | } | ||
349 | |||
350 | /* | 375 | /* |
351 | * Unmap an IO region and remove it from imalloc'd list. | 376 | * Unmap an IO region and remove it from imalloc'd list. |
352 | * Access to IO memory should be serialized by driver. | 377 | * Access to IO memory should be serialized by driver. |
@@ -356,39 +381,19 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address, | |||
356 | */ | 381 | */ |
357 | void iounmap(volatile void __iomem *token) | 382 | void iounmap(volatile void __iomem *token) |
358 | { | 383 | { |
359 | unsigned long address, start, end, size; | 384 | unsigned long address, size; |
360 | struct mm_struct *mm; | ||
361 | pgd_t *dir; | ||
362 | void *addr; | 385 | void *addr; |
363 | 386 | ||
364 | if (!mem_init_done) { | 387 | if (!mem_init_done) |
365 | return; | 388 | return; |
366 | } | ||
367 | 389 | ||
368 | addr = (void *) ((unsigned long __force) token & PAGE_MASK); | 390 | addr = (void *) ((unsigned long __force) token & PAGE_MASK); |
369 | 391 | ||
370 | if ((size = im_free(addr)) == 0) { | 392 | if ((size = im_free(addr)) == 0) |
371 | return; | 393 | return; |
372 | } | ||
373 | 394 | ||
374 | address = (unsigned long)addr; | 395 | address = (unsigned long)addr; |
375 | start = address; | 396 | unmap_im_area(address, address + size); |
376 | end = address + size; | ||
377 | |||
378 | mm = &ioremap_mm; | ||
379 | spin_lock(&mm->page_table_lock); | ||
380 | |||
381 | dir = pgd_offset_i(address); | ||
382 | flush_cache_vunmap(address, end); | ||
383 | do { | ||
384 | unmap_im_area_pmd(dir, address, end - address); | ||
385 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
386 | dir++; | ||
387 | } while (address && (address < end)); | ||
388 | flush_tlb_kernel_range(start, end); | ||
389 | |||
390 | spin_unlock(&mm->page_table_lock); | ||
391 | return; | ||
392 | } | 397 | } |
393 | 398 | ||
394 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) | 399 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) |
@@ -664,7 +669,7 @@ void __init paging_init(void) | |||
664 | zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; | 669 | zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; |
665 | zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; | 670 | zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; |
666 | 671 | ||
667 | free_area_init_node(0, &contig_page_data, zones_size, | 672 | free_area_init_node(0, NODE_DATA(0), zones_size, |
668 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); | 673 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); |
669 | } | 674 | } |
670 | #endif /* CONFIG_DISCONTIGMEM */ | 675 | #endif /* CONFIG_DISCONTIGMEM */ |
diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c index 6a20773f695d..244150a0bc18 100644 --- a/arch/ppc64/mm/slb.c +++ b/arch/ppc64/mm/slb.c | |||
@@ -33,8 +33,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) | |||
33 | return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; | 33 | return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline void create_slbe(unsigned long ea, unsigned long vsid, | 36 | static inline void create_slbe(unsigned long ea, unsigned long flags, |
37 | unsigned long flags, unsigned long entry) | 37 | unsigned long entry) |
38 | { | 38 | { |
39 | asm volatile("slbmte %0,%1" : | 39 | asm volatile("slbmte %0,%1" : |
40 | : "r" (mk_vsid_data(ea, flags)), | 40 | : "r" (mk_vsid_data(ea, flags)), |
@@ -145,9 +145,8 @@ void slb_initialize(void) | |||
145 | asm volatile("isync":::"memory"); | 145 | asm volatile("isync":::"memory"); |
146 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); | 146 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); |
147 | asm volatile("isync; slbia; isync":::"memory"); | 147 | asm volatile("isync; slbia; isync":::"memory"); |
148 | create_slbe(KERNELBASE, get_kernel_vsid(KERNELBASE), flags, 0); | 148 | create_slbe(KERNELBASE, flags, 0); |
149 | create_slbe(VMALLOCBASE, get_kernel_vsid(KERNELBASE), | 149 | create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1); |
150 | SLB_VSID_KERNEL, 1); | ||
151 | /* We don't bolt the stack for the time being - we're in boot, | 150 | /* We don't bolt the stack for the time being - we're in boot, |
152 | * so the stack is in the bolted segment. By the time it goes | 151 | * so the stack is in the bolted segment. By the time it goes |
153 | * elsewhere, we'll call _switch() which will bolt in the new | 152 | * elsewhere, we'll call _switch() which will bolt in the new |
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c index 31491131d5e4..df4bbe14153c 100644 --- a/arch/ppc64/mm/stab.c +++ b/arch/ppc64/mm/stab.c | |||
@@ -19,6 +19,11 @@ | |||
19 | #include <asm/paca.h> | 19 | #include <asm/paca.h> |
20 | #include <asm/cputable.h> | 20 | #include <asm/cputable.h> |
21 | 21 | ||
22 | struct stab_entry { | ||
23 | unsigned long esid_data; | ||
24 | unsigned long vsid_data; | ||
25 | }; | ||
26 | |||
22 | /* Both the segment table and SLB code uses the following cache */ | 27 | /* Both the segment table and SLB code uses the following cache */ |
23 | #define NR_STAB_CACHE_ENTRIES 8 | 28 | #define NR_STAB_CACHE_ENTRIES 8 |
24 | DEFINE_PER_CPU(long, stab_cache_ptr); | 29 | DEFINE_PER_CPU(long, stab_cache_ptr); |
diff --git a/arch/ppc64/xmon/ppc-opc.c b/arch/ppc64/xmon/ppc-opc.c index 1e4e7e319970..5ee8fc32f824 100644 --- a/arch/ppc64/xmon/ppc-opc.c +++ b/arch/ppc64/xmon/ppc-opc.c | |||
@@ -20,6 +20,7 @@ | |||
20 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA | 20 | Software Foundation, 59 Temple Place - Suite 330, Boston, MA |
21 | 02111-1307, USA. */ | 21 | 02111-1307, USA. */ |
22 | 22 | ||
23 | #include <linux/stddef.h> | ||
23 | #include "nonstdio.h" | 24 | #include "nonstdio.h" |
24 | #include "ppc.h" | 25 | #include "ppc.h" |
25 | 26 | ||
@@ -110,12 +111,12 @@ const struct powerpc_operand powerpc_operands[] = | |||
110 | /* The zero index is used to indicate the end of the list of | 111 | /* The zero index is used to indicate the end of the list of |
111 | operands. */ | 112 | operands. */ |
112 | #define UNUSED 0 | 113 | #define UNUSED 0 |
113 | { 0, 0, 0, 0, 0 }, | 114 | { 0, 0, NULL, NULL, 0 }, |
114 | 115 | ||
115 | /* The BA field in an XL form instruction. */ | 116 | /* The BA field in an XL form instruction. */ |
116 | #define BA UNUSED + 1 | 117 | #define BA UNUSED + 1 |
117 | #define BA_MASK (0x1f << 16) | 118 | #define BA_MASK (0x1f << 16) |
118 | { 5, 16, 0, 0, PPC_OPERAND_CR }, | 119 | { 5, 16, NULL, NULL, PPC_OPERAND_CR }, |
119 | 120 | ||
120 | /* The BA field in an XL form instruction when it must be the same | 121 | /* The BA field in an XL form instruction when it must be the same |
121 | as the BT field in the same instruction. */ | 122 | as the BT field in the same instruction. */ |
@@ -125,7 +126,7 @@ const struct powerpc_operand powerpc_operands[] = | |||
125 | /* The BB field in an XL form instruction. */ | 126 | /* The BB field in an XL form instruction. */ |
126 | #define BB BAT + 1 | 127 | #define BB BAT + 1 |
127 | #define BB_MASK (0x1f << 11) | 128 | #define BB_MASK (0x1f << 11) |
128 | { 5, 11, 0, 0, PPC_OPERAND_CR }, | 129 | { 5, 11, NULL, NULL, PPC_OPERAND_CR }, |
129 | 130 | ||
130 | /* The BB field in an XL form instruction when it must be the same | 131 | /* The BB field in an XL form instruction when it must be the same |
131 | as the BA field in the same instruction. */ | 132 | as the BA field in the same instruction. */ |
@@ -168,21 +169,21 @@ const struct powerpc_operand powerpc_operands[] = | |||
168 | 169 | ||
169 | /* The BF field in an X or XL form instruction. */ | 170 | /* The BF field in an X or XL form instruction. */ |
170 | #define BF BDPA + 1 | 171 | #define BF BDPA + 1 |
171 | { 3, 23, 0, 0, PPC_OPERAND_CR }, | 172 | { 3, 23, NULL, NULL, PPC_OPERAND_CR }, |
172 | 173 | ||
173 | /* An optional BF field. This is used for comparison instructions, | 174 | /* An optional BF field. This is used for comparison instructions, |
174 | in which an omitted BF field is taken as zero. */ | 175 | in which an omitted BF field is taken as zero. */ |
175 | #define OBF BF + 1 | 176 | #define OBF BF + 1 |
176 | { 3, 23, 0, 0, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL }, | 177 | { 3, 23, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL }, |
177 | 178 | ||
178 | /* The BFA field in an X or XL form instruction. */ | 179 | /* The BFA field in an X or XL form instruction. */ |
179 | #define BFA OBF + 1 | 180 | #define BFA OBF + 1 |
180 | { 3, 18, 0, 0, PPC_OPERAND_CR }, | 181 | { 3, 18, NULL, NULL, PPC_OPERAND_CR }, |
181 | 182 | ||
182 | /* The BI field in a B form or XL form instruction. */ | 183 | /* The BI field in a B form or XL form instruction. */ |
183 | #define BI BFA + 1 | 184 | #define BI BFA + 1 |
184 | #define BI_MASK (0x1f << 16) | 185 | #define BI_MASK (0x1f << 16) |
185 | { 5, 16, 0, 0, PPC_OPERAND_CR }, | 186 | { 5, 16, NULL, NULL, PPC_OPERAND_CR }, |
186 | 187 | ||
187 | /* The BO field in a B form instruction. Certain values are | 188 | /* The BO field in a B form instruction. Certain values are |
188 | illegal. */ | 189 | illegal. */ |
@@ -197,36 +198,36 @@ const struct powerpc_operand powerpc_operands[] = | |||
197 | 198 | ||
198 | /* The BT field in an X or XL form instruction. */ | 199 | /* The BT field in an X or XL form instruction. */ |
199 | #define BT BOE + 1 | 200 | #define BT BOE + 1 |
200 | { 5, 21, 0, 0, PPC_OPERAND_CR }, | 201 | { 5, 21, NULL, NULL, PPC_OPERAND_CR }, |
201 | 202 | ||
202 | /* The condition register number portion of the BI field in a B form | 203 | /* The condition register number portion of the BI field in a B form |
203 | or XL form instruction. This is used for the extended | 204 | or XL form instruction. This is used for the extended |
204 | conditional branch mnemonics, which set the lower two bits of the | 205 | conditional branch mnemonics, which set the lower two bits of the |
205 | BI field. This field is optional. */ | 206 | BI field. This field is optional. */ |
206 | #define CR BT + 1 | 207 | #define CR BT + 1 |
207 | { 3, 18, 0, 0, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL }, | 208 | { 3, 18, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL }, |
208 | 209 | ||
209 | /* The CRB field in an X form instruction. */ | 210 | /* The CRB field in an X form instruction. */ |
210 | #define CRB CR + 1 | 211 | #define CRB CR + 1 |
211 | { 5, 6, 0, 0, 0 }, | 212 | { 5, 6, NULL, NULL, 0 }, |
212 | 213 | ||
213 | /* The CRFD field in an X form instruction. */ | 214 | /* The CRFD field in an X form instruction. */ |
214 | #define CRFD CRB + 1 | 215 | #define CRFD CRB + 1 |
215 | { 3, 23, 0, 0, PPC_OPERAND_CR }, | 216 | { 3, 23, NULL, NULL, PPC_OPERAND_CR }, |
216 | 217 | ||
217 | /* The CRFS field in an X form instruction. */ | 218 | /* The CRFS field in an X form instruction. */ |
218 | #define CRFS CRFD + 1 | 219 | #define CRFS CRFD + 1 |
219 | { 3, 0, 0, 0, PPC_OPERAND_CR }, | 220 | { 3, 0, NULL, NULL, PPC_OPERAND_CR }, |
220 | 221 | ||
221 | /* The CT field in an X form instruction. */ | 222 | /* The CT field in an X form instruction. */ |
222 | #define CT CRFS + 1 | 223 | #define CT CRFS + 1 |
223 | { 5, 21, 0, 0, PPC_OPERAND_OPTIONAL }, | 224 | { 5, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, |
224 | 225 | ||
225 | /* The D field in a D form instruction. This is a displacement off | 226 | /* The D field in a D form instruction. This is a displacement off |
226 | a register, and implies that the next operand is a register in | 227 | a register, and implies that the next operand is a register in |
227 | parentheses. */ | 228 | parentheses. */ |
228 | #define D CT + 1 | 229 | #define D CT + 1 |
229 | { 16, 0, 0, 0, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, | 230 | { 16, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED }, |
230 | 231 | ||
231 | /* The DE field in a DE form instruction. This is like D, but is 12 | 232 | /* The DE field in a DE form instruction. This is like D, but is 12 |
232 | bits only. */ | 233 | bits only. */ |
@@ -252,40 +253,40 @@ const struct powerpc_operand powerpc_operands[] = | |||
252 | 253 | ||
253 | /* The E field in a wrteei instruction. */ | 254 | /* The E field in a wrteei instruction. */ |
254 | #define E DS + 1 | 255 | #define E DS + 1 |
255 | { 1, 15, 0, 0, 0 }, | 256 | { 1, 15, NULL, NULL, 0 }, |
256 | 257 | ||
257 | /* The FL1 field in a POWER SC form instruction. */ | 258 | /* The FL1 field in a POWER SC form instruction. */ |
258 | #define FL1 E + 1 | 259 | #define FL1 E + 1 |
259 | { 4, 12, 0, 0, 0 }, | 260 | { 4, 12, NULL, NULL, 0 }, |
260 | 261 | ||
261 | /* The FL2 field in a POWER SC form instruction. */ | 262 | /* The FL2 field in a POWER SC form instruction. */ |
262 | #define FL2 FL1 + 1 | 263 | #define FL2 FL1 + 1 |
263 | { 3, 2, 0, 0, 0 }, | 264 | { 3, 2, NULL, NULL, 0 }, |
264 | 265 | ||
265 | /* The FLM field in an XFL form instruction. */ | 266 | /* The FLM field in an XFL form instruction. */ |
266 | #define FLM FL2 + 1 | 267 | #define FLM FL2 + 1 |
267 | { 8, 17, 0, 0, 0 }, | 268 | { 8, 17, NULL, NULL, 0 }, |
268 | 269 | ||
269 | /* The FRA field in an X or A form instruction. */ | 270 | /* The FRA field in an X or A form instruction. */ |
270 | #define FRA FLM + 1 | 271 | #define FRA FLM + 1 |
271 | #define FRA_MASK (0x1f << 16) | 272 | #define FRA_MASK (0x1f << 16) |
272 | { 5, 16, 0, 0, PPC_OPERAND_FPR }, | 273 | { 5, 16, NULL, NULL, PPC_OPERAND_FPR }, |
273 | 274 | ||
274 | /* The FRB field in an X or A form instruction. */ | 275 | /* The FRB field in an X or A form instruction. */ |
275 | #define FRB FRA + 1 | 276 | #define FRB FRA + 1 |
276 | #define FRB_MASK (0x1f << 11) | 277 | #define FRB_MASK (0x1f << 11) |
277 | { 5, 11, 0, 0, PPC_OPERAND_FPR }, | 278 | { 5, 11, NULL, NULL, PPC_OPERAND_FPR }, |
278 | 279 | ||
279 | /* The FRC field in an A form instruction. */ | 280 | /* The FRC field in an A form instruction. */ |
280 | #define FRC FRB + 1 | 281 | #define FRC FRB + 1 |
281 | #define FRC_MASK (0x1f << 6) | 282 | #define FRC_MASK (0x1f << 6) |
282 | { 5, 6, 0, 0, PPC_OPERAND_FPR }, | 283 | { 5, 6, NULL, NULL, PPC_OPERAND_FPR }, |
283 | 284 | ||
284 | /* The FRS field in an X form instruction or the FRT field in a D, X | 285 | /* The FRS field in an X form instruction or the FRT field in a D, X |
285 | or A form instruction. */ | 286 | or A form instruction. */ |
286 | #define FRS FRC + 1 | 287 | #define FRS FRC + 1 |
287 | #define FRT FRS | 288 | #define FRT FRS |
288 | { 5, 21, 0, 0, PPC_OPERAND_FPR }, | 289 | { 5, 21, NULL, NULL, PPC_OPERAND_FPR }, |
289 | 290 | ||
290 | /* The FXM field in an XFX instruction. */ | 291 | /* The FXM field in an XFX instruction. */ |
291 | #define FXM FRS + 1 | 292 | #define FXM FRS + 1 |
@@ -298,11 +299,11 @@ const struct powerpc_operand powerpc_operands[] = | |||
298 | 299 | ||
299 | /* The L field in a D or X form instruction. */ | 300 | /* The L field in a D or X form instruction. */ |
300 | #define L FXM4 + 1 | 301 | #define L FXM4 + 1 |
301 | { 1, 21, 0, 0, PPC_OPERAND_OPTIONAL }, | 302 | { 1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, |
302 | 303 | ||
303 | /* The LEV field in a POWER SC form instruction. */ | 304 | /* The LEV field in a POWER SC form instruction. */ |
304 | #define LEV L + 1 | 305 | #define LEV L + 1 |
305 | { 7, 5, 0, 0, 0 }, | 306 | { 7, 5, NULL, NULL, 0 }, |
306 | 307 | ||
307 | /* The LI field in an I form instruction. The lower two bits are | 308 | /* The LI field in an I form instruction. The lower two bits are |
308 | forced to zero. */ | 309 | forced to zero. */ |
@@ -316,24 +317,24 @@ const struct powerpc_operand powerpc_operands[] = | |||
316 | 317 | ||
317 | /* The LS field in an X (sync) form instruction. */ | 318 | /* The LS field in an X (sync) form instruction. */ |
318 | #define LS LIA + 1 | 319 | #define LS LIA + 1 |
319 | { 2, 21, 0, 0, PPC_OPERAND_OPTIONAL }, | 320 | { 2, 21, NULL, NULL, PPC_OPERAND_OPTIONAL }, |
320 | 321 | ||
321 | /* The MB field in an M form instruction. */ | 322 | /* The MB field in an M form instruction. */ |
322 | #define MB LS + 1 | 323 | #define MB LS + 1 |
323 | #define MB_MASK (0x1f << 6) | 324 | #define MB_MASK (0x1f << 6) |
324 | { 5, 6, 0, 0, 0 }, | 325 | { 5, 6, NULL, NULL, 0 }, |
325 | 326 | ||
326 | /* The ME field in an M form instruction. */ | 327 | /* The ME field in an M form instruction. */ |
327 | #define ME MB + 1 | 328 | #define ME MB + 1 |
328 | #define ME_MASK (0x1f << 1) | 329 | #define ME_MASK (0x1f << 1) |
329 | { 5, 1, 0, 0, 0 }, | 330 | { 5, 1, NULL, NULL, 0 }, |
330 | 331 | ||
331 | /* The MB and ME fields in an M form instruction expressed a single | 332 | /* The MB and ME fields in an M form instruction expressed a single |
332 | operand which is a bitmask indicating which bits to select. This | 333 | operand which is a bitmask indicating which bits to select. This |
333 | is a two operand form using PPC_OPERAND_NEXT. See the | 334 | is a two operand form using PPC_OPERAND_NEXT. See the |
334 | description in opcode/ppc.h for what this means. */ | 335 | description in opcode/ppc.h for what this means. */ |
335 | #define MBE ME + 1 | 336 | #define MBE ME + 1 |
336 | { 5, 6, 0, 0, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT }, | 337 | { 5, 6, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT }, |
337 | { 32, 0, insert_mbe, extract_mbe, 0 }, | 338 | { 32, 0, insert_mbe, extract_mbe, 0 }, |
338 | 339 | ||
339 | /* The MB or ME field in an MD or MDS form instruction. The high | 340 | /* The MB or ME field in an MD or MDS form instruction. The high |
@@ -345,7 +346,7 @@ const struct powerpc_operand powerpc_operands[] = | |||
345 | 346 | ||
346 | /* The MO field in an mbar instruction. */ | 347 | /* The MO field in an mbar instruction. */ |
347 | #define MO MB6 + 1 | 348 | #define MO MB6 + 1 |
348 | { 5, 21, 0, 0, 0 }, | 349 | { 5, 21, NULL, NULL, 0 }, |
349 | 350 | ||
350 | /* The NB field in an X form instruction. The value 32 is stored as | 351 | /* The NB field in an X form instruction. The value 32 is stored as |
351 | 0. */ | 352 | 0. */ |
@@ -361,34 +362,34 @@ const struct powerpc_operand powerpc_operands[] = | |||
361 | /* The RA field in an D, DS, DQ, X, XO, M, or MDS form instruction. */ | 362 | /* The RA field in an D, DS, DQ, X, XO, M, or MDS form instruction. */ |
362 | #define RA NSI + 1 | 363 | #define RA NSI + 1 |
363 | #define RA_MASK (0x1f << 16) | 364 | #define RA_MASK (0x1f << 16) |
364 | { 5, 16, 0, 0, PPC_OPERAND_GPR }, | 365 | { 5, 16, NULL, NULL, PPC_OPERAND_GPR }, |
365 | 366 | ||
366 | /* The RA field in the DQ form lq instruction, which has special | 367 | /* The RA field in the DQ form lq instruction, which has special |
367 | value restrictions. */ | 368 | value restrictions. */ |
368 | #define RAQ RA + 1 | 369 | #define RAQ RA + 1 |
369 | { 5, 16, insert_raq, 0, PPC_OPERAND_GPR }, | 370 | { 5, 16, insert_raq, NULL, PPC_OPERAND_GPR }, |
370 | 371 | ||
371 | /* The RA field in a D or X form instruction which is an updating | 372 | /* The RA field in a D or X form instruction which is an updating |
372 | load, which means that the RA field may not be zero and may not | 373 | load, which means that the RA field may not be zero and may not |
373 | equal the RT field. */ | 374 | equal the RT field. */ |
374 | #define RAL RAQ + 1 | 375 | #define RAL RAQ + 1 |
375 | { 5, 16, insert_ral, 0, PPC_OPERAND_GPR }, | 376 | { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR }, |
376 | 377 | ||
377 | /* The RA field in an lmw instruction, which has special value | 378 | /* The RA field in an lmw instruction, which has special value |
378 | restrictions. */ | 379 | restrictions. */ |
379 | #define RAM RAL + 1 | 380 | #define RAM RAL + 1 |
380 | { 5, 16, insert_ram, 0, PPC_OPERAND_GPR }, | 381 | { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR }, |
381 | 382 | ||
382 | /* The RA field in a D or X form instruction which is an updating | 383 | /* The RA field in a D or X form instruction which is an updating |
383 | store or an updating floating point load, which means that the RA | 384 | store or an updating floating point load, which means that the RA |
384 | field may not be zero. */ | 385 | field may not be zero. */ |
385 | #define RAS RAM + 1 | 386 | #define RAS RAM + 1 |
386 | { 5, 16, insert_ras, 0, PPC_OPERAND_GPR }, | 387 | { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR }, |
387 | 388 | ||
388 | /* The RB field in an X, XO, M, or MDS form instruction. */ | 389 | /* The RB field in an X, XO, M, or MDS form instruction. */ |
389 | #define RB RAS + 1 | 390 | #define RB RAS + 1 |
390 | #define RB_MASK (0x1f << 11) | 391 | #define RB_MASK (0x1f << 11) |
391 | { 5, 11, 0, 0, PPC_OPERAND_GPR }, | 392 | { 5, 11, NULL, NULL, PPC_OPERAND_GPR }, |
392 | 393 | ||
393 | /* The RB field in an X form instruction when it must be the same as | 394 | /* The RB field in an X form instruction when it must be the same as |
394 | the RS field in the instruction. This is used for extended | 395 | the RS field in the instruction. This is used for extended |
@@ -402,22 +403,22 @@ const struct powerpc_operand powerpc_operands[] = | |||
402 | #define RS RBS + 1 | 403 | #define RS RBS + 1 |
403 | #define RT RS | 404 | #define RT RS |
404 | #define RT_MASK (0x1f << 21) | 405 | #define RT_MASK (0x1f << 21) |
405 | { 5, 21, 0, 0, PPC_OPERAND_GPR }, | 406 | { 5, 21, NULL, NULL, PPC_OPERAND_GPR }, |
406 | 407 | ||
407 | /* The RS field of the DS form stq instruction, which has special | 408 | /* The RS field of the DS form stq instruction, which has special |
408 | value restrictions. */ | 409 | value restrictions. */ |
409 | #define RSQ RS + 1 | 410 | #define RSQ RS + 1 |
410 | { 5, 21, insert_rsq, 0, PPC_OPERAND_GPR }, | 411 | { 5, 21, insert_rsq, NULL, PPC_OPERAND_GPR }, |
411 | 412 | ||
412 | /* The RT field of the DQ form lq instruction, which has special | 413 | /* The RT field of the DQ form lq instruction, which has special |
413 | value restrictions. */ | 414 | value restrictions. */ |
414 | #define RTQ RSQ + 1 | 415 | #define RTQ RSQ + 1 |
415 | { 5, 21, insert_rtq, 0, PPC_OPERAND_GPR }, | 416 | { 5, 21, insert_rtq, NULL, PPC_OPERAND_GPR }, |
416 | 417 | ||
417 | /* The SH field in an X or M form instruction. */ | 418 | /* The SH field in an X or M form instruction. */ |
418 | #define SH RTQ + 1 | 419 | #define SH RTQ + 1 |
419 | #define SH_MASK (0x1f << 11) | 420 | #define SH_MASK (0x1f << 11) |
420 | { 5, 11, 0, 0, 0 }, | 421 | { 5, 11, NULL, NULL, 0 }, |
421 | 422 | ||
422 | /* The SH field in an MD form instruction. This is split. */ | 423 | /* The SH field in an MD form instruction. This is split. */ |
423 | #define SH6 SH + 1 | 424 | #define SH6 SH + 1 |
@@ -426,12 +427,12 @@ const struct powerpc_operand powerpc_operands[] = | |||
426 | 427 | ||
427 | /* The SI field in a D form instruction. */ | 428 | /* The SI field in a D form instruction. */ |
428 | #define SI SH6 + 1 | 429 | #define SI SH6 + 1 |
429 | { 16, 0, 0, 0, PPC_OPERAND_SIGNED }, | 430 | { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED }, |
430 | 431 | ||
431 | /* The SI field in a D form instruction when we accept a wide range | 432 | /* The SI field in a D form instruction when we accept a wide range |
432 | of positive values. */ | 433 | of positive values. */ |
433 | #define SISIGNOPT SI + 1 | 434 | #define SISIGNOPT SI + 1 |
434 | { 16, 0, 0, 0, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, | 435 | { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT }, |
435 | 436 | ||
436 | /* The SPR field in an XFX form instruction. This is flipped--the | 437 | /* The SPR field in an XFX form instruction. This is flipped--the |
437 | lower 5 bits are stored in the upper 5 and vice- versa. */ | 438 | lower 5 bits are stored in the upper 5 and vice- versa. */ |
@@ -443,25 +444,25 @@ const struct powerpc_operand powerpc_operands[] = | |||
443 | /* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */ | 444 | /* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */ |
444 | #define SPRBAT SPR + 1 | 445 | #define SPRBAT SPR + 1 |
445 | #define SPRBAT_MASK (0x3 << 17) | 446 | #define SPRBAT_MASK (0x3 << 17) |
446 | { 2, 17, 0, 0, 0 }, | 447 | { 2, 17, NULL, NULL, 0 }, |
447 | 448 | ||
448 | /* The SPRG register number in an XFX form m[ft]sprg instruction. */ | 449 | /* The SPRG register number in an XFX form m[ft]sprg instruction. */ |
449 | #define SPRG SPRBAT + 1 | 450 | #define SPRG SPRBAT + 1 |
450 | #define SPRG_MASK (0x3 << 16) | 451 | #define SPRG_MASK (0x3 << 16) |
451 | { 2, 16, 0, 0, 0 }, | 452 | { 2, 16, NULL, NULL, 0 }, |
452 | 453 | ||
453 | /* The SR field in an X form instruction. */ | 454 | /* The SR field in an X form instruction. */ |
454 | #define SR SPRG + 1 | 455 | #define SR SPRG + 1 |
455 | { 4, 16, 0, 0, 0 }, | 456 | { 4, 16, NULL, NULL, 0 }, |
456 | 457 | ||
457 | /* The STRM field in an X AltiVec form instruction. */ | 458 | /* The STRM field in an X AltiVec form instruction. */ |
458 | #define STRM SR + 1 | 459 | #define STRM SR + 1 |
459 | #define STRM_MASK (0x3 << 21) | 460 | #define STRM_MASK (0x3 << 21) |
460 | { 2, 21, 0, 0, 0 }, | 461 | { 2, 21, NULL, NULL, 0 }, |
461 | 462 | ||
462 | /* The SV field in a POWER SC form instruction. */ | 463 | /* The SV field in a POWER SC form instruction. */ |
463 | #define SV STRM + 1 | 464 | #define SV STRM + 1 |
464 | { 14, 2, 0, 0, 0 }, | 465 | { 14, 2, NULL, NULL, 0 }, |
465 | 466 | ||
466 | /* The TBR field in an XFX form instruction. This is like the SPR | 467 | /* The TBR field in an XFX form instruction. This is like the SPR |
467 | field, but it is optional. */ | 468 | field, but it is optional. */ |
@@ -471,52 +472,52 @@ const struct powerpc_operand powerpc_operands[] = | |||
471 | /* The TO field in a D or X form instruction. */ | 472 | /* The TO field in a D or X form instruction. */ |
472 | #define TO TBR + 1 | 473 | #define TO TBR + 1 |
473 | #define TO_MASK (0x1f << 21) | 474 | #define TO_MASK (0x1f << 21) |
474 | { 5, 21, 0, 0, 0 }, | 475 | { 5, 21, NULL, NULL, 0 }, |
475 | 476 | ||
476 | /* The U field in an X form instruction. */ | 477 | /* The U field in an X form instruction. */ |
477 | #define U TO + 1 | 478 | #define U TO + 1 |
478 | { 4, 12, 0, 0, 0 }, | 479 | { 4, 12, NULL, NULL, 0 }, |
479 | 480 | ||
480 | /* The UI field in a D form instruction. */ | 481 | /* The UI field in a D form instruction. */ |
481 | #define UI U + 1 | 482 | #define UI U + 1 |
482 | { 16, 0, 0, 0, 0 }, | 483 | { 16, 0, NULL, NULL, 0 }, |
483 | 484 | ||
484 | /* The VA field in a VA, VX or VXR form instruction. */ | 485 | /* The VA field in a VA, VX or VXR form instruction. */ |
485 | #define VA UI + 1 | 486 | #define VA UI + 1 |
486 | #define VA_MASK (0x1f << 16) | 487 | #define VA_MASK (0x1f << 16) |
487 | { 5, 16, 0, 0, PPC_OPERAND_VR }, | 488 | { 5, 16, NULL, NULL, PPC_OPERAND_VR }, |
488 | 489 | ||
489 | /* The VB field in a VA, VX or VXR form instruction. */ | 490 | /* The VB field in a VA, VX or VXR form instruction. */ |
490 | #define VB VA + 1 | 491 | #define VB VA + 1 |
491 | #define VB_MASK (0x1f << 11) | 492 | #define VB_MASK (0x1f << 11) |
492 | { 5, 11, 0, 0, PPC_OPERAND_VR }, | 493 | { 5, 11, NULL, NULL, PPC_OPERAND_VR }, |
493 | 494 | ||
494 | /* The VC field in a VA form instruction. */ | 495 | /* The VC field in a VA form instruction. */ |
495 | #define VC VB + 1 | 496 | #define VC VB + 1 |
496 | #define VC_MASK (0x1f << 6) | 497 | #define VC_MASK (0x1f << 6) |
497 | { 5, 6, 0, 0, PPC_OPERAND_VR }, | 498 | { 5, 6, NULL, NULL, PPC_OPERAND_VR }, |
498 | 499 | ||
499 | /* The VD or VS field in a VA, VX, VXR or X form instruction. */ | 500 | /* The VD or VS field in a VA, VX, VXR or X form instruction. */ |
500 | #define VD VC + 1 | 501 | #define VD VC + 1 |
501 | #define VS VD | 502 | #define VS VD |
502 | #define VD_MASK (0x1f << 21) | 503 | #define VD_MASK (0x1f << 21) |
503 | { 5, 21, 0, 0, PPC_OPERAND_VR }, | 504 | { 5, 21, NULL, NULL, PPC_OPERAND_VR }, |
504 | 505 | ||
505 | /* The SIMM field in a VX form instruction. */ | 506 | /* The SIMM field in a VX form instruction. */ |
506 | #define SIMM VD + 1 | 507 | #define SIMM VD + 1 |
507 | { 5, 16, 0, 0, PPC_OPERAND_SIGNED}, | 508 | { 5, 16, NULL, NULL, PPC_OPERAND_SIGNED}, |
508 | 509 | ||
509 | /* The UIMM field in a VX form instruction. */ | 510 | /* The UIMM field in a VX form instruction. */ |
510 | #define UIMM SIMM + 1 | 511 | #define UIMM SIMM + 1 |
511 | { 5, 16, 0, 0, 0 }, | 512 | { 5, 16, NULL, NULL, 0 }, |
512 | 513 | ||
513 | /* The SHB field in a VA form instruction. */ | 514 | /* The SHB field in a VA form instruction. */ |
514 | #define SHB UIMM + 1 | 515 | #define SHB UIMM + 1 |
515 | { 4, 6, 0, 0, 0 }, | 516 | { 4, 6, NULL, NULL, 0 }, |
516 | 517 | ||
517 | /* The other UIMM field in a EVX form instruction. */ | 518 | /* The other UIMM field in a EVX form instruction. */ |
518 | #define EVUIMM SHB + 1 | 519 | #define EVUIMM SHB + 1 |
519 | { 5, 11, 0, 0, 0 }, | 520 | { 5, 11, NULL, NULL, 0 }, |
520 | 521 | ||
521 | /* The other UIMM field in a half word EVX form instruction. */ | 522 | /* The other UIMM field in a half word EVX form instruction. */ |
522 | #define EVUIMM_2 EVUIMM + 1 | 523 | #define EVUIMM_2 EVUIMM + 1 |
@@ -533,11 +534,11 @@ const struct powerpc_operand powerpc_operands[] = | |||
533 | /* The WS field. */ | 534 | /* The WS field. */ |
534 | #define WS EVUIMM_8 + 1 | 535 | #define WS EVUIMM_8 + 1 |
535 | #define WS_MASK (0x7 << 11) | 536 | #define WS_MASK (0x7 << 11) |
536 | { 3, 11, 0, 0, 0 }, | 537 | { 3, 11, NULL, NULL, 0 }, |
537 | 538 | ||
538 | /* The L field in an mtmsrd instruction */ | 539 | /* The L field in an mtmsrd instruction */ |
539 | #define MTMSRD_L WS + 1 | 540 | #define MTMSRD_L WS + 1 |
540 | { 1, 16, 0, 0, PPC_OPERAND_OPTIONAL }, | 541 | { 1, 16, NULL, NULL, PPC_OPERAND_OPTIONAL }, |
541 | 542 | ||
542 | }; | 543 | }; |
543 | 544 | ||
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 1358b4201701..07fd0414a4bf 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.11 | 3 | # Linux kernel version: 2.6.12-rc3 |
4 | # Wed Mar 2 16:57:55 2005 | 4 | # Fri Apr 22 15:30:58 2005 |
5 | # | 5 | # |
6 | CONFIG_MMU=y | 6 | CONFIG_MMU=y |
7 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | 7 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y |
@@ -15,6 +15,7 @@ CONFIG_UID16=y | |||
15 | CONFIG_EXPERIMENTAL=y | 15 | CONFIG_EXPERIMENTAL=y |
16 | CONFIG_CLEAN_COMPILE=y | 16 | CONFIG_CLEAN_COMPILE=y |
17 | CONFIG_LOCK_KERNEL=y | 17 | CONFIG_LOCK_KERNEL=y |
18 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
18 | 19 | ||
19 | # | 20 | # |
20 | # General setup | 21 | # General setup |
@@ -26,24 +27,25 @@ CONFIG_SYSVIPC=y | |||
26 | # CONFIG_BSD_PROCESS_ACCT is not set | 27 | # CONFIG_BSD_PROCESS_ACCT is not set |
27 | CONFIG_SYSCTL=y | 28 | CONFIG_SYSCTL=y |
28 | # CONFIG_AUDIT is not set | 29 | # CONFIG_AUDIT is not set |
29 | CONFIG_LOG_BUF_SHIFT=17 | ||
30 | CONFIG_HOTPLUG=y | 30 | CONFIG_HOTPLUG=y |
31 | CONFIG_KOBJECT_UEVENT=y | 31 | CONFIG_KOBJECT_UEVENT=y |
32 | CONFIG_IKCONFIG=y | 32 | CONFIG_IKCONFIG=y |
33 | CONFIG_IKCONFIG_PROC=y | 33 | CONFIG_IKCONFIG_PROC=y |
34 | # CONFIG_CPUSETS is not set | ||
34 | # CONFIG_EMBEDDED is not set | 35 | # CONFIG_EMBEDDED is not set |
35 | CONFIG_KALLSYMS=y | 36 | CONFIG_KALLSYMS=y |
36 | # CONFIG_KALLSYMS_ALL is not set | 37 | # CONFIG_KALLSYMS_ALL is not set |
37 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | 38 | # CONFIG_KALLSYMS_EXTRA_PASS is not set |
39 | CONFIG_BASE_FULL=y | ||
38 | CONFIG_FUTEX=y | 40 | CONFIG_FUTEX=y |
39 | CONFIG_EPOLL=y | 41 | CONFIG_EPOLL=y |
40 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
41 | CONFIG_SHMEM=y | 42 | CONFIG_SHMEM=y |
42 | CONFIG_CC_ALIGN_FUNCTIONS=0 | 43 | CONFIG_CC_ALIGN_FUNCTIONS=0 |
43 | CONFIG_CC_ALIGN_LABELS=0 | 44 | CONFIG_CC_ALIGN_LABELS=0 |
44 | CONFIG_CC_ALIGN_LOOPS=0 | 45 | CONFIG_CC_ALIGN_LOOPS=0 |
45 | CONFIG_CC_ALIGN_JUMPS=0 | 46 | CONFIG_CC_ALIGN_JUMPS=0 |
46 | # CONFIG_TINY_SHMEM is not set | 47 | # CONFIG_TINY_SHMEM is not set |
48 | CONFIG_BASE_SMALL=0 | ||
47 | 49 | ||
48 | # | 50 | # |
49 | # Loadable module support | 51 | # Loadable module support |
@@ -261,7 +263,6 @@ CONFIG_NET=y | |||
261 | # | 263 | # |
262 | CONFIG_PACKET=y | 264 | CONFIG_PACKET=y |
263 | # CONFIG_PACKET_MMAP is not set | 265 | # CONFIG_PACKET_MMAP is not set |
264 | # CONFIG_NETLINK_DEV is not set | ||
265 | CONFIG_UNIX=y | 266 | CONFIG_UNIX=y |
266 | CONFIG_NET_KEY=y | 267 | CONFIG_NET_KEY=y |
267 | CONFIG_INET=y | 268 | CONFIG_INET=y |
@@ -329,6 +330,7 @@ CONFIG_NET_SCH_DSMARK=m | |||
329 | CONFIG_NET_QOS=y | 330 | CONFIG_NET_QOS=y |
330 | CONFIG_NET_ESTIMATOR=y | 331 | CONFIG_NET_ESTIMATOR=y |
331 | CONFIG_NET_CLS=y | 332 | CONFIG_NET_CLS=y |
333 | # CONFIG_NET_CLS_BASIC is not set | ||
332 | CONFIG_NET_CLS_TCINDEX=m | 334 | CONFIG_NET_CLS_TCINDEX=m |
333 | CONFIG_NET_CLS_ROUTE4=m | 335 | CONFIG_NET_CLS_ROUTE4=m |
334 | CONFIG_NET_CLS_ROUTE=y | 336 | CONFIG_NET_CLS_ROUTE=y |
@@ -338,6 +340,7 @@ CONFIG_NET_CLS_U32=m | |||
338 | # CONFIG_NET_CLS_IND is not set | 340 | # CONFIG_NET_CLS_IND is not set |
339 | CONFIG_NET_CLS_RSVP=m | 341 | CONFIG_NET_CLS_RSVP=m |
340 | CONFIG_NET_CLS_RSVP6=m | 342 | CONFIG_NET_CLS_RSVP6=m |
343 | # CONFIG_NET_EMATCH is not set | ||
341 | # CONFIG_NET_CLS_ACT is not set | 344 | # CONFIG_NET_CLS_ACT is not set |
342 | CONFIG_NET_CLS_POLICE=y | 345 | CONFIG_NET_CLS_POLICE=y |
343 | 346 | ||
@@ -393,6 +396,8 @@ CONFIG_CTC=m | |||
393 | CONFIG_IUCV=m | 396 | CONFIG_IUCV=m |
394 | # CONFIG_NETIUCV is not set | 397 | # CONFIG_NETIUCV is not set |
395 | # CONFIG_SMSGIUCV is not set | 398 | # CONFIG_SMSGIUCV is not set |
399 | # CONFIG_CLAW is not set | ||
400 | # CONFIG_MPC is not set | ||
396 | CONFIG_QETH=y | 401 | CONFIG_QETH=y |
397 | 402 | ||
398 | # | 403 | # |
@@ -532,10 +537,13 @@ CONFIG_MSDOS_PARTITION=y | |||
532 | # | 537 | # |
533 | # Kernel hacking | 538 | # Kernel hacking |
534 | # | 539 | # |
540 | # CONFIG_PRINTK_TIME is not set | ||
535 | CONFIG_DEBUG_KERNEL=y | 541 | CONFIG_DEBUG_KERNEL=y |
536 | CONFIG_MAGIC_SYSRQ=y | 542 | CONFIG_MAGIC_SYSRQ=y |
543 | CONFIG_LOG_BUF_SHIFT=17 | ||
537 | # CONFIG_SCHEDSTATS is not set | 544 | # CONFIG_SCHEDSTATS is not set |
538 | # CONFIG_DEBUG_SLAB is not set | 545 | # CONFIG_DEBUG_SLAB is not set |
546 | # CONFIG_DEBUG_SPINLOCK is not set | ||
539 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | 547 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set |
540 | # CONFIG_DEBUG_KOBJECT is not set | 548 | # CONFIG_DEBUG_KOBJECT is not set |
541 | # CONFIG_DEBUG_INFO is not set | 549 | # CONFIG_DEBUG_INFO is not set |
@@ -560,6 +568,7 @@ CONFIG_CRYPTO=y | |||
560 | # CONFIG_CRYPTO_SHA256 is not set | 568 | # CONFIG_CRYPTO_SHA256 is not set |
561 | # CONFIG_CRYPTO_SHA512 is not set | 569 | # CONFIG_CRYPTO_SHA512 is not set |
562 | # CONFIG_CRYPTO_WP512 is not set | 570 | # CONFIG_CRYPTO_WP512 is not set |
571 | # CONFIG_CRYPTO_TGR192 is not set | ||
563 | # CONFIG_CRYPTO_DES is not set | 572 | # CONFIG_CRYPTO_DES is not set |
564 | # CONFIG_CRYPTO_DES_Z990 is not set | 573 | # CONFIG_CRYPTO_DES_Z990 is not set |
565 | # CONFIG_CRYPTO_BLOWFISH is not set | 574 | # CONFIG_CRYPTO_BLOWFISH is not set |
diff --git a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c index 96571ff7115d..03d03c6d3cbb 100644 --- a/arch/s390/kernel/compat_ioctl.c +++ b/arch/s390/kernel/compat_ioctl.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #define CODE | 16 | #define CODE |
17 | #include "../../../fs/compat_ioctl.c" | 17 | #include "../../../fs/compat_ioctl.c" |
18 | #include <asm/dasd.h> | 18 | #include <asm/dasd.h> |
19 | #include <asm/cmb.h> | ||
19 | #include <asm/tape390.h> | 20 | #include <asm/tape390.h> |
20 | 21 | ||
21 | static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd, | 22 | static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd, |
@@ -58,7 +59,11 @@ COMPATIBLE_IOCTL(BIODASDPRRD) | |||
58 | COMPATIBLE_IOCTL(BIODASDPSRD) | 59 | COMPATIBLE_IOCTL(BIODASDPSRD) |
59 | COMPATIBLE_IOCTL(BIODASDGATTR) | 60 | COMPATIBLE_IOCTL(BIODASDGATTR) |
60 | COMPATIBLE_IOCTL(BIODASDSATTR) | 61 | COMPATIBLE_IOCTL(BIODASDSATTR) |
61 | 62 | #if defined(CONFIG_DASD_CMB) || defined(CONFIG_DASD_CMB_MODULE) | |
63 | COMPATIBLE_IOCTL(BIODASDCMFENABLE) | ||
64 | COMPATIBLE_IOCTL(BIODASDCMFDISABLE) | ||
65 | COMPATIBLE_IOCTL(BIODASDREADALLCMB) | ||
66 | #endif | ||
62 | #endif | 67 | #endif |
63 | 68 | ||
64 | #if defined(CONFIG_S390_TAPE) || defined(CONFIG_S390_TAPE_MODULE) | 69 | #if defined(CONFIG_S390_TAPE) || defined(CONFIG_S390_TAPE_MODULE) |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 647233c02fc8..26889366929a 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/user.h> | 32 | #include <linux/user.h> |
33 | #include <linux/security.h> | 33 | #include <linux/security.h> |
34 | #include <linux/audit.h> | 34 | #include <linux/audit.h> |
35 | #include <linux/signal.h> | ||
35 | 36 | ||
36 | #include <asm/segment.h> | 37 | #include <asm/segment.h> |
37 | #include <asm/page.h> | 38 | #include <asm/page.h> |
@@ -609,7 +610,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data) | |||
609 | /* continue and stop at next (return from) syscall */ | 610 | /* continue and stop at next (return from) syscall */ |
610 | case PTRACE_CONT: | 611 | case PTRACE_CONT: |
611 | /* restart after signal. */ | 612 | /* restart after signal. */ |
612 | if ((unsigned long) data >= _NSIG) | 613 | if (!valid_signal(data)) |
613 | return -EIO; | 614 | return -EIO; |
614 | if (request == PTRACE_SYSCALL) | 615 | if (request == PTRACE_SYSCALL) |
615 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 616 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -637,7 +638,7 @@ do_ptrace(struct task_struct *child, long request, long addr, long data) | |||
637 | 638 | ||
638 | case PTRACE_SINGLESTEP: | 639 | case PTRACE_SINGLESTEP: |
639 | /* set the trap flag. */ | 640 | /* set the trap flag. */ |
640 | if ((unsigned long) data >= _NSIG) | 641 | if (!valid_signal(data)) |
641 | return -EIO; | 642 | return -EIO; |
642 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 643 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
643 | child->exit_code = data; | 644 | child->exit_code = data; |
@@ -711,18 +712,13 @@ out: | |||
711 | asmlinkage void | 712 | asmlinkage void |
712 | syscall_trace(struct pt_regs *regs, int entryexit) | 713 | syscall_trace(struct pt_regs *regs, int entryexit) |
713 | { | 714 | { |
714 | if (unlikely(current->audit_context)) { | 715 | if (unlikely(current->audit_context) && entryexit) |
715 | if (!entryexit) | 716 | audit_syscall_exit(current, AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]); |
716 | audit_syscall_entry(current, regs->gprs[2], | 717 | |
717 | regs->orig_gpr2, regs->gprs[3], | ||
718 | regs->gprs[4], regs->gprs[5]); | ||
719 | else | ||
720 | audit_syscall_exit(current, regs->gprs[2]); | ||
721 | } | ||
722 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 718 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) |
723 | return; | 719 | goto out; |
724 | if (!(current->ptrace & PT_PTRACED)) | 720 | if (!(current->ptrace & PT_PTRACED)) |
725 | return; | 721 | goto out; |
726 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | 722 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) |
727 | ? 0x80 : 0)); | 723 | ? 0x80 : 0)); |
728 | 724 | ||
@@ -735,4 +731,10 @@ syscall_trace(struct pt_regs *regs, int entryexit) | |||
735 | send_sig(current->exit_code, current, 1); | 731 | send_sig(current->exit_code, current, 1); |
736 | current->exit_code = 0; | 732 | current->exit_code = 0; |
737 | } | 733 | } |
734 | out: | ||
735 | if (unlikely(current->audit_context) && !entryexit) | ||
736 | audit_syscall_entry(current, | ||
737 | test_thread_flag(TIF_31BIT)?AUDIT_ARCH_S390:AUDIT_ARCH_S390X, | ||
738 | regs->gprs[2], regs->orig_gpr2, regs->gprs[3], | ||
739 | regs->gprs[4], regs->gprs[5]); | ||
738 | } | 740 | } |
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 11fd6d556d8f..bee654abb6d3 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c | |||
@@ -34,7 +34,6 @@ EXPORT_SYMBOL(__clear_user_asm); | |||
34 | EXPORT_SYMBOL(__strncpy_from_user_asm); | 34 | EXPORT_SYMBOL(__strncpy_from_user_asm); |
35 | EXPORT_SYMBOL(__strnlen_user_asm); | 35 | EXPORT_SYMBOL(__strnlen_user_asm); |
36 | EXPORT_SYMBOL(diag10); | 36 | EXPORT_SYMBOL(diag10); |
37 | EXPORT_SYMBOL(default_storage_key); | ||
38 | 37 | ||
39 | /* | 38 | /* |
40 | * semaphore ops | 39 | * semaphore ops |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c879c40aa7a5..df83215beac3 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <asm/cpcmd.h> | 44 | #include <asm/cpcmd.h> |
45 | #include <asm/lowcore.h> | 45 | #include <asm/lowcore.h> |
46 | #include <asm/irq.h> | 46 | #include <asm/irq.h> |
47 | #include <asm/page.h> | ||
48 | #include <asm/ptrace.h> | ||
47 | 49 | ||
48 | /* | 50 | /* |
49 | * Machine setup.. | 51 | * Machine setup.. |
@@ -53,13 +55,14 @@ unsigned int console_devno = -1; | |||
53 | unsigned int console_irq = -1; | 55 | unsigned int console_irq = -1; |
54 | unsigned long memory_size = 0; | 56 | unsigned long memory_size = 0; |
55 | unsigned long machine_flags = 0; | 57 | unsigned long machine_flags = 0; |
56 | unsigned int default_storage_key = 0; | ||
57 | struct { | 58 | struct { |
58 | unsigned long addr, size, type; | 59 | unsigned long addr, size, type; |
59 | } memory_chunk[MEMORY_CHUNKS] = { { 0 } }; | 60 | } memory_chunk[MEMORY_CHUNKS] = { { 0 } }; |
60 | #define CHUNK_READ_WRITE 0 | 61 | #define CHUNK_READ_WRITE 0 |
61 | #define CHUNK_READ_ONLY 1 | 62 | #define CHUNK_READ_ONLY 1 |
62 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ | 63 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ |
64 | unsigned long __initdata zholes_size[MAX_NR_ZONES]; | ||
65 | static unsigned long __initdata memory_end; | ||
63 | 66 | ||
64 | /* | 67 | /* |
65 | * Setup options | 68 | * Setup options |
@@ -78,11 +81,15 @@ static char command_line[COMMAND_LINE_SIZE] = { 0, }; | |||
78 | 81 | ||
79 | static struct resource code_resource = { | 82 | static struct resource code_resource = { |
80 | .name = "Kernel code", | 83 | .name = "Kernel code", |
84 | .start = (unsigned long) &_text, | ||
85 | .end = (unsigned long) &_etext - 1, | ||
81 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | 86 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, |
82 | }; | 87 | }; |
83 | 88 | ||
84 | static struct resource data_resource = { | 89 | static struct resource data_resource = { |
85 | .name = "Kernel data", | 90 | .name = "Kernel data", |
91 | .start = (unsigned long) &_etext, | ||
92 | .end = (unsigned long) &_edata - 1, | ||
86 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | 93 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, |
87 | }; | 94 | }; |
88 | 95 | ||
@@ -310,90 +317,50 @@ void machine_power_off(void) | |||
310 | 317 | ||
311 | EXPORT_SYMBOL(machine_power_off); | 318 | EXPORT_SYMBOL(machine_power_off); |
312 | 319 | ||
313 | /* | 320 | static void __init |
314 | * Setup function called from init/main.c just after the banner | 321 | add_memory_hole(unsigned long start, unsigned long end) |
315 | * was printed. | 322 | { |
316 | */ | 323 | unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; |
317 | extern char _pstart, _pend, _stext; | 324 | |
325 | if (end <= dma_pfn) | ||
326 | zholes_size[ZONE_DMA] += end - start + 1; | ||
327 | else if (start > dma_pfn) | ||
328 | zholes_size[ZONE_NORMAL] += end - start + 1; | ||
329 | else { | ||
330 | zholes_size[ZONE_DMA] += dma_pfn - start + 1; | ||
331 | zholes_size[ZONE_NORMAL] += end - dma_pfn; | ||
332 | } | ||
333 | } | ||
318 | 334 | ||
319 | void __init setup_arch(char **cmdline_p) | 335 | static void __init |
336 | parse_cmdline_early(char **cmdline_p) | ||
320 | { | 337 | { |
321 | unsigned long bootmap_size; | 338 | char c = ' ', cn, *to = command_line, *from = COMMAND_LINE; |
322 | unsigned long memory_start, memory_end; | 339 | unsigned long delay = 0; |
323 | char c = ' ', cn, *to = command_line, *from = COMMAND_LINE; | ||
324 | unsigned long start_pfn, end_pfn; | ||
325 | static unsigned int smptrap=0; | ||
326 | unsigned long delay = 0; | ||
327 | struct _lowcore *lc; | ||
328 | int i; | ||
329 | 340 | ||
330 | if (smptrap) | 341 | /* Save unparsed command line copy for /proc/cmdline */ |
331 | return; | 342 | memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); |
332 | smptrap=1; | 343 | saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; |
333 | 344 | ||
334 | /* | 345 | for (;;) { |
335 | * print what head.S has found out about the machine | 346 | /* |
336 | */ | 347 | * "mem=XXX[kKmM]" sets memsize |
337 | #ifndef CONFIG_ARCH_S390X | 348 | */ |
338 | printk((MACHINE_IS_VM) ? | 349 | if (c == ' ' && strncmp(from, "mem=", 4) == 0) { |
339 | "We are running under VM (31 bit mode)\n" : | 350 | memory_end = simple_strtoul(from+4, &from, 0); |
340 | "We are running native (31 bit mode)\n"); | 351 | if ( *from == 'K' || *from == 'k' ) { |
341 | printk((MACHINE_HAS_IEEE) ? | 352 | memory_end = memory_end << 10; |
342 | "This machine has an IEEE fpu\n" : | 353 | from++; |
343 | "This machine has no IEEE fpu\n"); | 354 | } else if ( *from == 'M' || *from == 'm' ) { |
344 | #else /* CONFIG_ARCH_S390X */ | 355 | memory_end = memory_end << 20; |
345 | printk((MACHINE_IS_VM) ? | 356 | from++; |
346 | "We are running under VM (64 bit mode)\n" : | 357 | } |
347 | "We are running native (64 bit mode)\n"); | 358 | } |
348 | #endif /* CONFIG_ARCH_S390X */ | 359 | /* |
349 | 360 | * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes | |
350 | ROOT_DEV = Root_RAM0; | 361 | */ |
351 | memory_start = (unsigned long) &_end; /* fixit if use $CODELO etc*/ | 362 | if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) { |
352 | #ifndef CONFIG_ARCH_S390X | 363 | delay = simple_strtoul(from+9, &from, 0); |
353 | memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ | ||
354 | /* | ||
355 | * We need some free virtual space to be able to do vmalloc. | ||
356 | * On a machine with 2GB memory we make sure that we have at | ||
357 | * least 128 MB free space for vmalloc. | ||
358 | */ | ||
359 | if (memory_end > 1920*1024*1024) | ||
360 | memory_end = 1920*1024*1024; | ||
361 | #else /* CONFIG_ARCH_S390X */ | ||
362 | memory_end = memory_size & ~0x200000UL; /* detected in head.s */ | ||
363 | #endif /* CONFIG_ARCH_S390X */ | ||
364 | init_mm.start_code = PAGE_OFFSET; | ||
365 | init_mm.end_code = (unsigned long) &_etext; | ||
366 | init_mm.end_data = (unsigned long) &_edata; | ||
367 | init_mm.brk = (unsigned long) &_end; | ||
368 | |||
369 | code_resource.start = (unsigned long) &_text; | ||
370 | code_resource.end = (unsigned long) &_etext - 1; | ||
371 | data_resource.start = (unsigned long) &_etext; | ||
372 | data_resource.end = (unsigned long) &_edata - 1; | ||
373 | |||
374 | /* Save unparsed command line copy for /proc/cmdline */ | ||
375 | memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); | ||
376 | saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; | ||
377 | |||
378 | for (;;) { | ||
379 | /* | ||
380 | * "mem=XXX[kKmM]" sets memsize | ||
381 | */ | ||
382 | if (c == ' ' && strncmp(from, "mem=", 4) == 0) { | ||
383 | memory_end = simple_strtoul(from+4, &from, 0); | ||
384 | if ( *from == 'K' || *from == 'k' ) { | ||
385 | memory_end = memory_end << 10; | ||
386 | from++; | ||
387 | } else if ( *from == 'M' || *from == 'm' ) { | ||
388 | memory_end = memory_end << 20; | ||
389 | from++; | ||
390 | } | ||
391 | } | ||
392 | /* | ||
393 | * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes | ||
394 | */ | ||
395 | if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) { | ||
396 | delay = simple_strtoul(from+9, &from, 0); | ||
397 | if (*from == 's' || *from == 'S') { | 364 | if (*from == 's' || *from == 'S') { |
398 | delay = delay*1000000; | 365 | delay = delay*1000000; |
399 | from++; | 366 | from++; |
@@ -403,24 +370,110 @@ void __init setup_arch(char **cmdline_p) | |||
403 | } | 370 | } |
404 | /* now wait for the requested amount of time */ | 371 | /* now wait for the requested amount of time */ |
405 | udelay(delay); | 372 | udelay(delay); |
406 | } | 373 | } |
407 | cn = *(from++); | 374 | cn = *(from++); |
408 | if (!cn) | 375 | if (!cn) |
409 | break; | 376 | break; |
410 | if (cn == '\n') | 377 | if (cn == '\n') |
411 | cn = ' '; /* replace newlines with space */ | 378 | cn = ' '; /* replace newlines with space */ |
412 | if (cn == 0x0d) | 379 | if (cn == 0x0d) |
413 | cn = ' '; /* replace 0x0d with space */ | 380 | cn = ' '; /* replace 0x0d with space */ |
414 | if (cn == ' ' && c == ' ') | 381 | if (cn == ' ' && c == ' ') |
415 | continue; /* remove additional spaces */ | 382 | continue; /* remove additional spaces */ |
416 | c = cn; | 383 | c = cn; |
417 | if (to - command_line >= COMMAND_LINE_SIZE) | 384 | if (to - command_line >= COMMAND_LINE_SIZE) |
418 | break; | 385 | break; |
419 | *(to++) = c; | 386 | *(to++) = c; |
420 | } | 387 | } |
421 | if (c == ' ' && to > command_line) to--; | 388 | if (c == ' ' && to > command_line) to--; |
422 | *to = '\0'; | 389 | *to = '\0'; |
423 | *cmdline_p = command_line; | 390 | *cmdline_p = command_line; |
391 | } | ||
392 | |||
393 | static void __init | ||
394 | setup_lowcore(void) | ||
395 | { | ||
396 | struct _lowcore *lc; | ||
397 | int lc_pages; | ||
398 | |||
399 | /* | ||
400 | * Setup lowcore for boot cpu | ||
401 | */ | ||
402 | lc_pages = sizeof(void *) == 8 ? 2 : 1; | ||
403 | lc = (struct _lowcore *) | ||
404 | __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); | ||
405 | memset(lc, 0, lc_pages * PAGE_SIZE); | ||
406 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | ||
407 | lc->restart_psw.addr = | ||
408 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | ||
409 | lc->external_new_psw.mask = PSW_KERNEL_BITS; | ||
410 | lc->external_new_psw.addr = | ||
411 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | ||
412 | lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; | ||
413 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | ||
414 | lc->program_new_psw.mask = PSW_KERNEL_BITS; | ||
415 | lc->program_new_psw.addr = | ||
416 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; | ||
417 | lc->mcck_new_psw.mask = PSW_KERNEL_BITS; | ||
418 | lc->mcck_new_psw.addr = | ||
419 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | ||
420 | lc->io_new_psw.mask = PSW_KERNEL_BITS; | ||
421 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | ||
422 | lc->ipl_device = S390_lowcore.ipl_device; | ||
423 | lc->jiffy_timer = -1LL; | ||
424 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | ||
425 | lc->async_stack = (unsigned long) | ||
426 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; | ||
427 | #ifdef CONFIG_CHECK_STACK | ||
428 | lc->panic_stack = (unsigned long) | ||
429 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; | ||
430 | #endif | ||
431 | lc->current_task = (unsigned long) init_thread_union.thread_info.task; | ||
432 | lc->thread_info = (unsigned long) &init_thread_union; | ||
433 | #ifdef CONFIG_ARCH_S390X | ||
434 | if (MACHINE_HAS_DIAG44) | ||
435 | lc->diag44_opcode = 0x83000044; | ||
436 | else | ||
437 | lc->diag44_opcode = 0x07000700; | ||
438 | #endif /* CONFIG_ARCH_S390X */ | ||
439 | set_prefix((u32)(unsigned long) lc); | ||
440 | } | ||
441 | |||
442 | static void __init | ||
443 | setup_resources(void) | ||
444 | { | ||
445 | struct resource *res; | ||
446 | int i; | ||
447 | |||
448 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
449 | res = alloc_bootmem_low(sizeof(struct resource)); | ||
450 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
451 | switch (memory_chunk[i].type) { | ||
452 | case CHUNK_READ_WRITE: | ||
453 | res->name = "System RAM"; | ||
454 | break; | ||
455 | case CHUNK_READ_ONLY: | ||
456 | res->name = "System ROM"; | ||
457 | res->flags |= IORESOURCE_READONLY; | ||
458 | break; | ||
459 | default: | ||
460 | res->name = "reserved"; | ||
461 | } | ||
462 | res->start = memory_chunk[i].addr; | ||
463 | res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; | ||
464 | request_resource(&iomem_resource, res); | ||
465 | request_resource(res, &code_resource); | ||
466 | request_resource(res, &data_resource); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | static void __init | ||
471 | setup_memory(void) | ||
472 | { | ||
473 | unsigned long bootmap_size; | ||
474 | unsigned long start_pfn, end_pfn, init_pfn; | ||
475 | unsigned long last_rw_end; | ||
476 | int i; | ||
424 | 477 | ||
425 | /* | 478 | /* |
426 | * partially used pages are not usable - thus | 479 | * partially used pages are not usable - thus |
@@ -429,6 +482,10 @@ void __init setup_arch(char **cmdline_p) | |||
429 | start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 482 | start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
430 | end_pfn = max_pfn = memory_end >> PAGE_SHIFT; | 483 | end_pfn = max_pfn = memory_end >> PAGE_SHIFT; |
431 | 484 | ||
485 | /* Initialize storage key for kernel pages */ | ||
486 | for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++) | ||
487 | page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); | ||
488 | |||
432 | /* | 489 | /* |
433 | * Initialize the boot-time allocator (with low memory only): | 490 | * Initialize the boot-time allocator (with low memory only): |
434 | */ | 491 | */ |
@@ -437,7 +494,9 @@ void __init setup_arch(char **cmdline_p) | |||
437 | /* | 494 | /* |
438 | * Register RAM areas with the bootmem allocator. | 495 | * Register RAM areas with the bootmem allocator. |
439 | */ | 496 | */ |
440 | for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) { | 497 | last_rw_end = start_pfn; |
498 | |||
499 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
441 | unsigned long start_chunk, end_chunk; | 500 | unsigned long start_chunk, end_chunk; |
442 | 501 | ||
443 | if (memory_chunk[i].type != CHUNK_READ_WRITE) | 502 | if (memory_chunk[i].type != CHUNK_READ_WRITE) |
@@ -450,102 +509,98 @@ void __init setup_arch(char **cmdline_p) | |||
450 | start_chunk = start_pfn; | 509 | start_chunk = start_pfn; |
451 | if (end_chunk > end_pfn) | 510 | if (end_chunk > end_pfn) |
452 | end_chunk = end_pfn; | 511 | end_chunk = end_pfn; |
453 | if (start_chunk < end_chunk) | 512 | if (start_chunk < end_chunk) { |
513 | /* Initialize storage key for RAM pages */ | ||
514 | for (init_pfn = start_chunk ; init_pfn < end_chunk; | ||
515 | init_pfn++) | ||
516 | page_set_storage_key(init_pfn << PAGE_SHIFT, | ||
517 | PAGE_DEFAULT_KEY); | ||
454 | free_bootmem(start_chunk << PAGE_SHIFT, | 518 | free_bootmem(start_chunk << PAGE_SHIFT, |
455 | (end_chunk - start_chunk) << PAGE_SHIFT); | 519 | (end_chunk - start_chunk) << PAGE_SHIFT); |
520 | if (last_rw_end < start_chunk) | ||
521 | add_memory_hole(last_rw_end, start_chunk - 1); | ||
522 | last_rw_end = end_chunk; | ||
523 | } | ||
456 | } | 524 | } |
457 | 525 | ||
458 | /* | 526 | psw_set_key(PAGE_DEFAULT_KEY); |
459 | * Reserve the bootmem bitmap itself as well. We do this in two | 527 | |
460 | * steps (first step was init_bootmem()) because this catches | 528 | if (last_rw_end < end_pfn - 1) |
461 | * the (very unlikely) case of us accidentally initializing the | 529 | add_memory_hole(last_rw_end, end_pfn - 1); |
462 | * bootmem allocator with an invalid RAM area. | 530 | |
463 | */ | 531 | /* |
464 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size); | 532 | * Reserve the bootmem bitmap itself as well. We do this in two |
533 | * steps (first step was init_bootmem()) because this catches | ||
534 | * the (very unlikely) case of us accidentally initializing the | ||
535 | * bootmem allocator with an invalid RAM area. | ||
536 | */ | ||
537 | reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size); | ||
465 | 538 | ||
466 | #ifdef CONFIG_BLK_DEV_INITRD | 539 | #ifdef CONFIG_BLK_DEV_INITRD |
467 | if (INITRD_START) { | 540 | if (INITRD_START) { |
468 | if (INITRD_START + INITRD_SIZE <= memory_end) { | 541 | if (INITRD_START + INITRD_SIZE <= memory_end) { |
469 | reserve_bootmem(INITRD_START, INITRD_SIZE); | 542 | reserve_bootmem(INITRD_START, INITRD_SIZE); |
470 | initrd_start = INITRD_START; | 543 | initrd_start = INITRD_START; |
471 | initrd_end = initrd_start + INITRD_SIZE; | 544 | initrd_end = initrd_start + INITRD_SIZE; |
472 | } else { | 545 | } else { |
473 | printk("initrd extends beyond end of memory " | 546 | printk("initrd extends beyond end of memory " |
474 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | 547 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", |
475 | initrd_start + INITRD_SIZE, memory_end); | 548 | initrd_start + INITRD_SIZE, memory_end); |
476 | initrd_start = initrd_end = 0; | 549 | initrd_start = initrd_end = 0; |
477 | } | 550 | } |
478 | } | 551 | } |
479 | #endif | 552 | #endif |
553 | } | ||
480 | 554 | ||
481 | for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) { | 555 | /* |
482 | struct resource *res; | 556 | * Setup function called from init/main.c just after the banner |
483 | 557 | * was printed. | |
484 | res = alloc_bootmem_low(sizeof(struct resource)); | 558 | */ |
485 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
486 | |||
487 | switch (memory_chunk[i].type) { | ||
488 | case CHUNK_READ_WRITE: | ||
489 | res->name = "System RAM"; | ||
490 | break; | ||
491 | case CHUNK_READ_ONLY: | ||
492 | res->name = "System ROM"; | ||
493 | res->flags |= IORESOURCE_READONLY; | ||
494 | break; | ||
495 | default: | ||
496 | res->name = "reserved"; | ||
497 | } | ||
498 | res->start = memory_chunk[i].addr; | ||
499 | res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; | ||
500 | request_resource(&iomem_resource, res); | ||
501 | request_resource(res, &code_resource); | ||
502 | request_resource(res, &data_resource); | ||
503 | } | ||
504 | 559 | ||
560 | void __init | ||
561 | setup_arch(char **cmdline_p) | ||
562 | { | ||
505 | /* | 563 | /* |
506 | * Setup lowcore for boot cpu | 564 | * print what head.S has found out about the machine |
507 | */ | 565 | */ |
508 | #ifndef CONFIG_ARCH_S390X | 566 | #ifndef CONFIG_ARCH_S390X |
509 | lc = (struct _lowcore *) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); | 567 | printk((MACHINE_IS_VM) ? |
510 | memset(lc, 0, PAGE_SIZE); | 568 | "We are running under VM (31 bit mode)\n" : |
569 | "We are running native (31 bit mode)\n"); | ||
570 | printk((MACHINE_HAS_IEEE) ? | ||
571 | "This machine has an IEEE fpu\n" : | ||
572 | "This machine has no IEEE fpu\n"); | ||
511 | #else /* CONFIG_ARCH_S390X */ | 573 | #else /* CONFIG_ARCH_S390X */ |
512 | lc = (struct _lowcore *) __alloc_bootmem(2*PAGE_SIZE, 2*PAGE_SIZE, 0); | 574 | printk((MACHINE_IS_VM) ? |
513 | memset(lc, 0, 2*PAGE_SIZE); | 575 | "We are running under VM (64 bit mode)\n" : |
576 | "We are running native (64 bit mode)\n"); | ||
514 | #endif /* CONFIG_ARCH_S390X */ | 577 | #endif /* CONFIG_ARCH_S390X */ |
515 | lc->restart_psw.mask = PSW_BASE_BITS; | 578 | |
516 | lc->restart_psw.addr = | 579 | ROOT_DEV = Root_RAM0; |
517 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | 580 | #ifndef CONFIG_ARCH_S390X |
518 | lc->external_new_psw.mask = PSW_KERNEL_BITS; | 581 | memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ |
519 | lc->external_new_psw.addr = | 582 | /* |
520 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | 583 | * We need some free virtual space to be able to do vmalloc. |
521 | lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; | 584 | * On a machine with 2GB memory we make sure that we have at |
522 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | 585 | * least 128 MB free space for vmalloc. |
523 | lc->program_new_psw.mask = PSW_KERNEL_BITS; | 586 | */ |
524 | lc->program_new_psw.addr = | 587 | if (memory_end > 1920*1024*1024) |
525 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; | 588 | memory_end = 1920*1024*1024; |
526 | lc->mcck_new_psw.mask = PSW_KERNEL_BITS; | 589 | #else /* CONFIG_ARCH_S390X */ |
527 | lc->mcck_new_psw.addr = | 590 | memory_end = memory_size & ~0x200000UL; /* detected in head.s */ |
528 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | ||
529 | lc->io_new_psw.mask = PSW_KERNEL_BITS; | ||
530 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | ||
531 | lc->ipl_device = S390_lowcore.ipl_device; | ||
532 | lc->jiffy_timer = -1LL; | ||
533 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | ||
534 | lc->async_stack = (unsigned long) | ||
535 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; | ||
536 | #ifdef CONFIG_CHECK_STACK | ||
537 | lc->panic_stack = (unsigned long) | ||
538 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; | ||
539 | #endif | ||
540 | lc->current_task = (unsigned long) init_thread_union.thread_info.task; | ||
541 | lc->thread_info = (unsigned long) &init_thread_union; | ||
542 | #ifdef CONFIG_ARCH_S390X | ||
543 | if (MACHINE_HAS_DIAG44) | ||
544 | lc->diag44_opcode = 0x83000044; | ||
545 | else | ||
546 | lc->diag44_opcode = 0x07000700; | ||
547 | #endif /* CONFIG_ARCH_S390X */ | 591 | #endif /* CONFIG_ARCH_S390X */ |
548 | set_prefix((u32)(unsigned long) lc); | 592 | |
593 | init_mm.start_code = PAGE_OFFSET; | ||
594 | init_mm.end_code = (unsigned long) &_etext; | ||
595 | init_mm.end_data = (unsigned long) &_edata; | ||
596 | init_mm.brk = (unsigned long) &_end; | ||
597 | |||
598 | parse_cmdline_early(cmdline_p); | ||
599 | |||
600 | setup_memory(); | ||
601 | setup_resources(); | ||
602 | setup_lowcore(); | ||
603 | |||
549 | cpu_init(); | 604 | cpu_init(); |
550 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; | 605 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; |
551 | 606 | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 061e81138dc2..8ca485676780 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -244,7 +244,7 @@ int sysctl_hz_timer = 1; | |||
244 | */ | 244 | */ |
245 | static inline void stop_hz_timer(void) | 245 | static inline void stop_hz_timer(void) |
246 | { | 246 | { |
247 | __u64 timer; | 247 | __u64 timer, todval; |
248 | 248 | ||
249 | if (sysctl_hz_timer != 0) | 249 | if (sysctl_hz_timer != 0) |
250 | return; | 250 | return; |
@@ -265,8 +265,14 @@ static inline void stop_hz_timer(void) | |||
265 | * for the next event. | 265 | * for the next event. |
266 | */ | 266 | */ |
267 | timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64; | 267 | timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64; |
268 | timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY; | 268 | todval = -1ULL; |
269 | asm volatile ("SCKC %0" : : "m" (timer)); | 269 | /* Be careful about overflows. */ |
270 | if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) { | ||
271 | timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY; | ||
272 | if (timer >= jiffies_timer_cc) | ||
273 | todval = timer; | ||
274 | } | ||
275 | asm volatile ("SCKC %0" : : "m" (todval)); | ||
270 | } | 276 | } |
271 | 277 | ||
272 | /* | 278 | /* |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index bb6cf02418a2..fa0726507b3d 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -122,12 +122,17 @@ static void start_cpu_timer(void) | |||
122 | struct vtimer_queue *vt_list; | 122 | struct vtimer_queue *vt_list; |
123 | 123 | ||
124 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | 124 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); |
125 | set_vtimer(vt_list->idle); | 125 | |
126 | /* CPU timer interrupt is pending, don't reprogramm it */ | ||
127 | if (vt_list->idle & 1LL<<63) | ||
128 | return; | ||
129 | |||
130 | if (!list_empty(&vt_list->list)) | ||
131 | set_vtimer(vt_list->idle); | ||
126 | } | 132 | } |
127 | 133 | ||
128 | static void stop_cpu_timer(void) | 134 | static void stop_cpu_timer(void) |
129 | { | 135 | { |
130 | __u64 done; | ||
131 | struct vtimer_queue *vt_list; | 136 | struct vtimer_queue *vt_list; |
132 | 137 | ||
133 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | 138 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); |
@@ -138,21 +143,17 @@ static void stop_cpu_timer(void) | |||
138 | goto fire; | 143 | goto fire; |
139 | } | 144 | } |
140 | 145 | ||
141 | /* store progress */ | 146 | /* store the actual expire value */ |
142 | asm volatile ("STPT %0" : "=m" (done)); | 147 | asm volatile ("STPT %0" : "=m" (vt_list->idle)); |
143 | 148 | ||
144 | /* | 149 | /* |
145 | * If done is negative we do not stop the CPU timer | 150 | * If the CPU timer is negative we don't reprogramm |
146 | * because we will get instantly an interrupt that | 151 | * it because we will get instantly an interrupt. |
147 | * will start the CPU timer again. | ||
148 | */ | 152 | */ |
149 | if (done & 1LL<<63) | 153 | if (vt_list->idle & 1LL<<63) |
150 | return; | 154 | return; |
151 | else | ||
152 | vt_list->offset += vt_list->to_expire - done; | ||
153 | 155 | ||
154 | /* save the actual expire value */ | 156 | vt_list->offset += vt_list->to_expire - vt_list->idle; |
155 | vt_list->idle = done; | ||
156 | 157 | ||
157 | /* | 158 | /* |
158 | * We cannot halt the CPU timer, we just write a value that | 159 | * We cannot halt the CPU timer, we just write a value that |
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index d30cdb4248a9..f5a5bc09b8fa 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c | |||
@@ -20,6 +20,11 @@ | |||
20 | #include <asm/pgalloc.h> | 20 | #include <asm/pgalloc.h> |
21 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
22 | 22 | ||
23 | static char *sender = "VMRMSVM"; | ||
24 | module_param(sender, charp, 0); | ||
25 | MODULE_PARM_DESC(sender, | ||
26 | "Guest name that may send SMSG messages (default VMRMSVM)"); | ||
27 | |||
23 | #include "../../../drivers/s390/net/smsgiucv.h" | 28 | #include "../../../drivers/s390/net/smsgiucv.h" |
24 | 29 | ||
25 | #define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2) | 30 | #define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2) |
@@ -367,10 +372,12 @@ static struct ctl_table cmm_dir_table[] = { | |||
367 | #ifdef CONFIG_CMM_IUCV | 372 | #ifdef CONFIG_CMM_IUCV |
368 | #define SMSG_PREFIX "CMM" | 373 | #define SMSG_PREFIX "CMM" |
369 | static void | 374 | static void |
370 | cmm_smsg_target(char *msg) | 375 | cmm_smsg_target(char *from, char *msg) |
371 | { | 376 | { |
372 | long pages, seconds; | 377 | long pages, seconds; |
373 | 378 | ||
379 | if (strlen(sender) > 0 && strcmp(from, sender) != 0) | ||
380 | return; | ||
374 | if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg)) | 381 | if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg)) |
375 | return; | 382 | return; |
376 | if (strncmp(msg, "SHRINK", 6) == 0) { | 383 | if (strncmp(msg, "SHRINK", 6) == 0) { |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 8e723bc7f795..6ec5cd981e74 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -101,6 +101,7 @@ extern unsigned long _end; | |||
101 | extern unsigned long __init_begin; | 101 | extern unsigned long __init_begin; |
102 | extern unsigned long __init_end; | 102 | extern unsigned long __init_end; |
103 | 103 | ||
104 | extern unsigned long __initdata zholes_size[]; | ||
104 | /* | 105 | /* |
105 | * paging_init() sets up the page tables | 106 | * paging_init() sets up the page tables |
106 | */ | 107 | */ |
@@ -163,10 +164,13 @@ void __init paging_init(void) | |||
163 | local_flush_tlb(); | 164 | local_flush_tlb(); |
164 | 165 | ||
165 | { | 166 | { |
166 | unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0}; | 167 | unsigned long zones_size[MAX_NR_ZONES]; |
167 | 168 | ||
169 | memset(zones_size, 0, sizeof(zones_size)); | ||
168 | zones_size[ZONE_DMA] = max_low_pfn; | 170 | zones_size[ZONE_DMA] = max_low_pfn; |
169 | free_area_init(zones_size); | 171 | free_area_init_node(0, &contig_page_data, zones_size, |
172 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, | ||
173 | zholes_size); | ||
170 | } | 174 | } |
171 | return; | 175 | return; |
172 | } | 176 | } |
@@ -184,9 +188,10 @@ void __init paging_init(void) | |||
184 | _KERN_REGION_TABLE; | 188 | _KERN_REGION_TABLE; |
185 | static const int ssm_mask = 0x04000000L; | 189 | static const int ssm_mask = 0x04000000L; |
186 | 190 | ||
187 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | 191 | unsigned long zones_size[MAX_NR_ZONES]; |
188 | unsigned long dma_pfn, high_pfn; | 192 | unsigned long dma_pfn, high_pfn; |
189 | 193 | ||
194 | memset(zones_size, 0, sizeof(zones_size)); | ||
190 | dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; | 195 | dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; |
191 | high_pfn = max_low_pfn; | 196 | high_pfn = max_low_pfn; |
192 | 197 | ||
@@ -198,8 +203,8 @@ void __init paging_init(void) | |||
198 | } | 203 | } |
199 | 204 | ||
200 | /* Initialize mem_map[]. */ | 205 | /* Initialize mem_map[]. */ |
201 | free_area_init(zones_size); | 206 | free_area_init_node(0, &contig_page_data, zones_size, |
202 | 207 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); | |
203 | 208 | ||
204 | /* | 209 | /* |
205 | * map whole physical memory to virtual memory (identity mapping) | 210 | * map whole physical memory to virtual memory (identity mapping) |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 722ea1d63c94..3468d5127223 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -693,6 +693,10 @@ config RTC_9701JE | |||
693 | 693 | ||
694 | endmenu | 694 | endmenu |
695 | 695 | ||
696 | config ISA_DMA_API | ||
697 | bool | ||
698 | depends on MPC1211 | ||
699 | default y | ||
696 | 700 | ||
697 | menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" | 701 | menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" |
698 | 702 | ||
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c index 1b0dfb4d8ea4..b28919b65682 100644 --- a/arch/sh/kernel/ptrace.c +++ b/arch/sh/kernel/ptrace.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/user.h> | 20 | #include <linux/user.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/security.h> | 22 | #include <linux/security.h> |
23 | #include <linux/signal.h> | ||
23 | 24 | ||
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
@@ -197,7 +198,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
197 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 198 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
198 | case PTRACE_CONT: { /* restart after signal. */ | 199 | case PTRACE_CONT: { /* restart after signal. */ |
199 | ret = -EIO; | 200 | ret = -EIO; |
200 | if ((unsigned long) data > _NSIG) | 201 | if (!valid_signal(data)) |
201 | break; | 202 | break; |
202 | if (request == PTRACE_SYSCALL) | 203 | if (request == PTRACE_SYSCALL) |
203 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 204 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -228,7 +229,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
228 | struct pt_regs *dummy = NULL; | 229 | struct pt_regs *dummy = NULL; |
229 | 230 | ||
230 | ret = -EIO; | 231 | ret = -EIO; |
231 | if ((unsigned long) data > _NSIG) | 232 | if (!valid_signal(data)) |
232 | break; | 233 | break; |
233 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 234 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
234 | if ((child->ptrace & PT_DTRACE) == 0) { | 235 | if ((child->ptrace & PT_DTRACE) == 0) { |
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c index 800288c1562b..fd2000956dae 100644 --- a/arch/sh64/kernel/ptrace.c +++ b/arch/sh64/kernel/ptrace.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
28 | #include <linux/ptrace.h> | 28 | #include <linux/ptrace.h> |
29 | #include <linux/user.h> | 29 | #include <linux/user.h> |
30 | #include <linux/signal.h> | ||
30 | 31 | ||
31 | #include <asm/io.h> | 32 | #include <asm/io.h> |
32 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
@@ -255,7 +256,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
255 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 256 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
256 | case PTRACE_CONT: { /* restart after signal. */ | 257 | case PTRACE_CONT: { /* restart after signal. */ |
257 | ret = -EIO; | 258 | ret = -EIO; |
258 | if ((unsigned long) data > _NSIG) | 259 | if (!valid_signal(data)) |
259 | break; | 260 | break; |
260 | if (request == PTRACE_SYSCALL) | 261 | if (request == PTRACE_SYSCALL) |
261 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 262 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
@@ -285,7 +286,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) | |||
285 | struct pt_regs *regs; | 286 | struct pt_regs *regs; |
286 | 287 | ||
287 | ret = -EIO; | 288 | ret = -EIO; |
288 | if ((unsigned long) data > _NSIG) | 289 | if (!valid_signal(data)) |
289 | break; | 290 | break; |
290 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 291 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
291 | if ((child->ptrace & PT_DTRACE) == 0) { | 292 | if ((child->ptrace & PT_DTRACE) == 0) { |
diff --git a/arch/sh64/kernel/sys_sh64.c b/arch/sh64/kernel/sys_sh64.c index 4546845b9caf..58ff7d522d81 100644 --- a/arch/sh64/kernel/sys_sh64.c +++ b/arch/sh64/kernel/sys_sh64.c | |||
@@ -283,18 +283,3 @@ asmlinkage int sys_uname(struct old_utsname * name) | |||
283 | up_read(&uts_sem); | 283 | up_read(&uts_sem); |
284 | return err?-EFAULT:0; | 284 | return err?-EFAULT:0; |
285 | } | 285 | } |
286 | |||
287 | /* Copy from mips version */ | ||
288 | asmlinkage long sys_shmatcall(int shmid, char __user *shmaddr, | ||
289 | int shmflg) | ||
290 | { | ||
291 | unsigned long raddr; | ||
292 | int err; | ||
293 | |||
294 | err = do_shmat(shmid, shmaddr, shmflg, &raddr); | ||
295 | if (err) | ||
296 | return err; | ||
297 | |||
298 | err = raddr; | ||
299 | return err; | ||
300 | } | ||
diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S index 8ed417df3dc6..6aabc63e4518 100644 --- a/arch/sh64/kernel/syscalls.S +++ b/arch/sh64/kernel/syscalls.S | |||
@@ -268,7 +268,7 @@ sys_call_table: | |||
268 | .long sys_msgrcv | 268 | .long sys_msgrcv |
269 | .long sys_msgget | 269 | .long sys_msgget |
270 | .long sys_msgctl | 270 | .long sys_msgctl |
271 | .long sys_shmatcall | 271 | .long sys_shmat |
272 | .long sys_shmdt /* 245 */ | 272 | .long sys_shmdt /* 245 */ |
273 | .long sys_shmget | 273 | .long sys_shmget |
274 | .long sys_shmctl | 274 | .long sys_shmctl |
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c index 143fe2f3c1c4..2c216ffeea90 100644 --- a/arch/sparc/kernel/process.c +++ b/arch/sparc/kernel/process.c | |||
@@ -83,9 +83,6 @@ void default_idle(void) | |||
83 | */ | 83 | */ |
84 | void cpu_idle(void) | 84 | void cpu_idle(void) |
85 | { | 85 | { |
86 | if (current->pid != 0) | ||
87 | goto out; | ||
88 | |||
89 | /* endless idle loop with no priority at all */ | 86 | /* endless idle loop with no priority at all */ |
90 | for (;;) { | 87 | for (;;) { |
91 | if (ARCH_SUN4C_SUN4) { | 88 | if (ARCH_SUN4C_SUN4) { |
@@ -126,8 +123,6 @@ void cpu_idle(void) | |||
126 | schedule(); | 123 | schedule(); |
127 | check_pgt_cache(); | 124 | check_pgt_cache(); |
128 | } | 125 | } |
129 | out: | ||
130 | return; | ||
131 | } | 126 | } |
132 | 127 | ||
133 | #else | 128 | #else |
@@ -333,6 +328,17 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) | |||
333 | printk("\n"); | 328 | printk("\n"); |
334 | } | 329 | } |
335 | 330 | ||
331 | void dump_stack(void) | ||
332 | { | ||
333 | unsigned long *ksp; | ||
334 | |||
335 | __asm__ __volatile__("mov %%fp, %0" | ||
336 | : "=r" (ksp)); | ||
337 | show_stack(current, ksp); | ||
338 | } | ||
339 | |||
340 | EXPORT_SYMBOL(dump_stack); | ||
341 | |||
336 | /* | 342 | /* |
337 | * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. | 343 | * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. |
338 | */ | 344 | */ |
diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c index c4f93bd2daf2..475c4c13462c 100644 --- a/arch/sparc/kernel/ptrace.c +++ b/arch/sparc/kernel/ptrace.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/smp_lock.h> | 19 | #include <linux/smp_lock.h> |
20 | #include <linux/security.h> | 20 | #include <linux/security.h> |
21 | #include <linux/signal.h> | ||
21 | 22 | ||
22 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
23 | #include <asm/system.h> | 24 | #include <asm/system.h> |
@@ -526,7 +527,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs) | |||
526 | addr = 1; | 527 | addr = 1; |
527 | 528 | ||
528 | case PTRACE_CONT: { /* restart after signal. */ | 529 | case PTRACE_CONT: { /* restart after signal. */ |
529 | if (data > _NSIG) { | 530 | if (!valid_signal(data)) { |
530 | pt_error_return(regs, EIO); | 531 | pt_error_return(regs, EIO); |
531 | goto out_tsk; | 532 | goto out_tsk; |
532 | } | 533 | } |
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index f91b0e8d0dc8..1bd430d0ca06 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/in6.h> | 20 | #include <linux/in6.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/syscalls.h> | ||
23 | #ifdef CONFIG_PCI | 24 | #ifdef CONFIG_PCI |
24 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
25 | #endif | 26 | #endif |
@@ -89,6 +90,9 @@ extern void ___atomic24_sub(void); | |||
89 | extern void ___set_bit(void); | 90 | extern void ___set_bit(void); |
90 | extern void ___clear_bit(void); | 91 | extern void ___clear_bit(void); |
91 | extern void ___change_bit(void); | 92 | extern void ___change_bit(void); |
93 | extern void ___rw_read_enter(void); | ||
94 | extern void ___rw_read_exit(void); | ||
95 | extern void ___rw_write_enter(void); | ||
92 | 96 | ||
93 | /* Alias functions whose names begin with "." and export the aliases. | 97 | /* Alias functions whose names begin with "." and export the aliases. |
94 | * The module references will be fixed up by module_frob_arch_sections. | 98 | * The module references will be fixed up by module_frob_arch_sections. |
@@ -121,9 +125,9 @@ EXPORT_SYMBOL(_do_write_unlock); | |||
121 | #endif | 125 | #endif |
122 | #else | 126 | #else |
123 | // XXX find what uses (or used) these. | 127 | // XXX find what uses (or used) these. |
124 | // EXPORT_SYMBOL_PRIVATE(_rw_read_enter); | 128 | EXPORT_SYMBOL(___rw_read_enter); |
125 | // EXPORT_SYMBOL_PRIVATE(_rw_read_exit); | 129 | EXPORT_SYMBOL(___rw_read_exit); |
126 | // EXPORT_SYMBOL_PRIVATE(_rw_write_enter); | 130 | EXPORT_SYMBOL(___rw_write_enter); |
127 | #endif | 131 | #endif |
128 | /* semaphores */ | 132 | /* semaphores */ |
129 | EXPORT_SYMBOL(__up); | 133 | EXPORT_SYMBOL(__up); |
@@ -144,6 +148,9 @@ EXPORT_SYMBOL(___set_bit); | |||
144 | EXPORT_SYMBOL(___clear_bit); | 148 | EXPORT_SYMBOL(___clear_bit); |
145 | EXPORT_SYMBOL(___change_bit); | 149 | EXPORT_SYMBOL(___change_bit); |
146 | 150 | ||
151 | /* Per-CPU information table */ | ||
152 | EXPORT_PER_CPU_SYMBOL(__cpu_data); | ||
153 | |||
147 | #ifdef CONFIG_SMP | 154 | #ifdef CONFIG_SMP |
148 | /* IRQ implementation. */ | 155 | /* IRQ implementation. */ |
149 | EXPORT_SYMBOL(synchronize_irq); | 156 | EXPORT_SYMBOL(synchronize_irq); |
@@ -151,6 +158,10 @@ EXPORT_SYMBOL(synchronize_irq); | |||
151 | /* Misc SMP information */ | 158 | /* Misc SMP information */ |
152 | EXPORT_SYMBOL(__cpu_number_map); | 159 | EXPORT_SYMBOL(__cpu_number_map); |
153 | EXPORT_SYMBOL(__cpu_logical_map); | 160 | EXPORT_SYMBOL(__cpu_logical_map); |
161 | |||
162 | /* CPU online map and active count. */ | ||
163 | EXPORT_SYMBOL(cpu_online_map); | ||
164 | EXPORT_SYMBOL(phys_cpu_present_map); | ||
154 | #endif | 165 | #endif |
155 | 166 | ||
156 | EXPORT_SYMBOL(__udelay); | 167 | EXPORT_SYMBOL(__udelay); |
@@ -332,3 +343,6 @@ EXPORT_SYMBOL(do_BUG); | |||
332 | 343 | ||
333 | /* Sun Power Management Idle Handler */ | 344 | /* Sun Power Management Idle Handler */ |
334 | EXPORT_SYMBOL(pm_idle); | 345 | EXPORT_SYMBOL(pm_idle); |
346 | |||
347 | /* Binfmt_misc needs this */ | ||
348 | EXPORT_SYMBOL(sys_close); | ||
diff --git a/arch/sparc/prom/memory.c b/arch/sparc/prom/memory.c index 46aa51afec14..c20e5309f8aa 100644 --- a/arch/sparc/prom/memory.c +++ b/arch/sparc/prom/memory.c | |||
@@ -47,9 +47,9 @@ prom_sortmemlist(struct linux_mlist_v0 *thislist) | |||
47 | char *tmpaddr; | 47 | char *tmpaddr; |
48 | char *lowest; | 48 | char *lowest; |
49 | 49 | ||
50 | for(i=0; thislist[i].theres_more != 0; i++) { | 50 | for(i=0; thislist[i].theres_more; i++) { |
51 | lowest = thislist[i].start_adr; | 51 | lowest = thislist[i].start_adr; |
52 | for(mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++) | 52 | for(mitr = i+1; thislist[mitr-1].theres_more; mitr++) |
53 | if(thislist[mitr].start_adr < lowest) { | 53 | if(thislist[mitr].start_adr < lowest) { |
54 | lowest = thislist[mitr].start_adr; | 54 | lowest = thislist[mitr].start_adr; |
55 | swapi = mitr; | 55 | swapi = mitr; |
@@ -85,7 +85,7 @@ void __init prom_meminit(void) | |||
85 | prom_phys_total[iter].num_bytes = mptr->num_bytes; | 85 | prom_phys_total[iter].num_bytes = mptr->num_bytes; |
86 | prom_phys_total[iter].theres_more = &prom_phys_total[iter+1]; | 86 | prom_phys_total[iter].theres_more = &prom_phys_total[iter+1]; |
87 | } | 87 | } |
88 | prom_phys_total[iter-1].theres_more = 0x0; | 88 | prom_phys_total[iter-1].theres_more = NULL; |
89 | /* Second, the total prom taken descriptors. */ | 89 | /* Second, the total prom taken descriptors. */ |
90 | for(mptr = (*(romvec->pv_v0mem.v0_prommap)), iter=0; | 90 | for(mptr = (*(romvec->pv_v0mem.v0_prommap)), iter=0; |
91 | mptr; mptr=mptr->theres_more, iter++) { | 91 | mptr; mptr=mptr->theres_more, iter++) { |
@@ -93,7 +93,7 @@ void __init prom_meminit(void) | |||
93 | prom_prom_taken[iter].num_bytes = mptr->num_bytes; | 93 | prom_prom_taken[iter].num_bytes = mptr->num_bytes; |
94 | prom_prom_taken[iter].theres_more = &prom_prom_taken[iter+1]; | 94 | prom_prom_taken[iter].theres_more = &prom_prom_taken[iter+1]; |
95 | } | 95 | } |
96 | prom_prom_taken[iter-1].theres_more = 0x0; | 96 | prom_prom_taken[iter-1].theres_more = NULL; |
97 | /* Last, the available physical descriptors. */ | 97 | /* Last, the available physical descriptors. */ |
98 | for(mptr = (*(romvec->pv_v0mem.v0_available)), iter=0; | 98 | for(mptr = (*(romvec->pv_v0mem.v0_available)), iter=0; |
99 | mptr; mptr=mptr->theres_more, iter++) { | 99 | mptr; mptr=mptr->theres_more, iter++) { |
@@ -101,7 +101,7 @@ void __init prom_meminit(void) | |||
101 | prom_phys_avail[iter].num_bytes = mptr->num_bytes; | 101 | prom_phys_avail[iter].num_bytes = mptr->num_bytes; |
102 | prom_phys_avail[iter].theres_more = &prom_phys_avail[iter+1]; | 102 | prom_phys_avail[iter].theres_more = &prom_phys_avail[iter+1]; |
103 | } | 103 | } |
104 | prom_phys_avail[iter-1].theres_more = 0x0; | 104 | prom_phys_avail[iter-1].theres_more = NULL; |
105 | /* Sort all the lists. */ | 105 | /* Sort all the lists. */ |
106 | prom_sortmemlist(prom_phys_total); | 106 | prom_sortmemlist(prom_phys_total); |
107 | prom_sortmemlist(prom_prom_taken); | 107 | prom_sortmemlist(prom_prom_taken); |
@@ -124,7 +124,7 @@ void __init prom_meminit(void) | |||
124 | prom_phys_avail[iter].theres_more = | 124 | prom_phys_avail[iter].theres_more = |
125 | &prom_phys_avail[iter+1]; | 125 | &prom_phys_avail[iter+1]; |
126 | } | 126 | } |
127 | prom_phys_avail[iter-1].theres_more = 0x0; | 127 | prom_phys_avail[iter-1].theres_more = NULL; |
128 | 128 | ||
129 | num_regs = prom_getproperty(node, "reg", | 129 | num_regs = prom_getproperty(node, "reg", |
130 | (char *) prom_reg_memlist, | 130 | (char *) prom_reg_memlist, |
@@ -138,7 +138,7 @@ void __init prom_meminit(void) | |||
138 | prom_phys_total[iter].theres_more = | 138 | prom_phys_total[iter].theres_more = |
139 | &prom_phys_total[iter+1]; | 139 | &prom_phys_total[iter+1]; |
140 | } | 140 | } |
141 | prom_phys_total[iter-1].theres_more = 0x0; | 141 | prom_phys_total[iter-1].theres_more = NULL; |
142 | 142 | ||
143 | node = prom_getchild(prom_root_node); | 143 | node = prom_getchild(prom_root_node); |
144 | node = prom_searchsiblings(node, "virtual-memory"); | 144 | node = prom_searchsiblings(node, "virtual-memory"); |
@@ -158,7 +158,7 @@ void __init prom_meminit(void) | |||
158 | prom_prom_taken[iter].theres_more = | 158 | prom_prom_taken[iter].theres_more = |
159 | &prom_prom_taken[iter+1]; | 159 | &prom_prom_taken[iter+1]; |
160 | } | 160 | } |
161 | prom_prom_taken[iter-1].theres_more = 0x0; | 161 | prom_prom_taken[iter-1].theres_more = NULL; |
162 | 162 | ||
163 | prom_sortmemlist(prom_prom_taken); | 163 | prom_sortmemlist(prom_prom_taken); |
164 | 164 | ||
@@ -182,15 +182,15 @@ void __init prom_meminit(void) | |||
182 | case PROM_SUN4: | 182 | case PROM_SUN4: |
183 | #ifdef CONFIG_SUN4 | 183 | #ifdef CONFIG_SUN4 |
184 | /* how simple :) */ | 184 | /* how simple :) */ |
185 | prom_phys_total[0].start_adr = 0x0; | 185 | prom_phys_total[0].start_adr = NULL; |
186 | prom_phys_total[0].num_bytes = *(sun4_romvec->memorysize); | 186 | prom_phys_total[0].num_bytes = *(sun4_romvec->memorysize); |
187 | prom_phys_total[0].theres_more = 0x0; | 187 | prom_phys_total[0].theres_more = NULL; |
188 | prom_prom_taken[0].start_adr = 0x0; | 188 | prom_prom_taken[0].start_adr = NULL; |
189 | prom_prom_taken[0].num_bytes = 0x0; | 189 | prom_prom_taken[0].num_bytes = 0x0; |
190 | prom_prom_taken[0].theres_more = 0x0; | 190 | prom_prom_taken[0].theres_more = NULL; |
191 | prom_phys_avail[0].start_adr = 0x0; | 191 | prom_phys_avail[0].start_adr = NULL; |
192 | prom_phys_avail[0].num_bytes = *(sun4_romvec->memoryavail); | 192 | prom_phys_avail[0].num_bytes = *(sun4_romvec->memoryavail); |
193 | prom_phys_avail[0].theres_more = 0x0; | 193 | prom_phys_avail[0].theres_more = NULL; |
194 | #endif | 194 | #endif |
195 | break; | 195 | break; |
196 | 196 | ||
diff --git a/arch/sparc/prom/sun4prom.c b/arch/sparc/prom/sun4prom.c index 69ca735f0d4e..00390a2652aa 100644 --- a/arch/sparc/prom/sun4prom.c +++ b/arch/sparc/prom/sun4prom.c | |||
@@ -151,7 +151,7 @@ struct linux_romvec * __init sun4_prom_init(void) | |||
151 | * have more time, we can teach the penguin to say "By your | 151 | * have more time, we can teach the penguin to say "By your |
152 | * command" or "Activating turbo boost, Michael". :-) | 152 | * command" or "Activating turbo boost, Michael". :-) |
153 | */ | 153 | */ |
154 | sun4_romvec->setLEDs(0x0); | 154 | sun4_romvec->setLEDs(NULL); |
155 | 155 | ||
156 | printk("PROMLIB: Old Sun4 boot PROM monitor %s, romvec version %d\n", | 156 | printk("PROMLIB: Old Sun4 boot PROM monitor %s, romvec version %d\n", |
157 | sun4_romvec->monid, | 157 | sun4_romvec->monid, |
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index fb1189641c74..a72fd15d5ea8 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig | |||
@@ -118,6 +118,7 @@ config VT_CONSOLE | |||
118 | 118 | ||
119 | config HW_CONSOLE | 119 | config HW_CONSOLE |
120 | bool | 120 | bool |
121 | depends on VT | ||
121 | default y | 122 | default y |
122 | 123 | ||
123 | config SMP | 124 | config SMP |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index a38cb5036df0..4dcb8af94090 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -756,7 +756,7 @@ void handler_irq(int irq, struct pt_regs *regs) | |||
756 | clear_softint(clr_mask); | 756 | clear_softint(clr_mask); |
757 | } | 757 | } |
758 | #else | 758 | #else |
759 | int should_forward = 1; | 759 | int should_forward = 0; |
760 | 760 | ||
761 | clear_softint(1 << irq); | 761 | clear_softint(1 << irq); |
762 | #endif | 762 | #endif |
@@ -1007,10 +1007,10 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu) | |||
1007 | } | 1007 | } |
1008 | upa_writel(tid | IMAP_VALID, imap); | 1008 | upa_writel(tid | IMAP_VALID, imap); |
1009 | 1009 | ||
1010 | while (!cpu_online(goal_cpu)) { | 1010 | do { |
1011 | if (++goal_cpu >= NR_CPUS) | 1011 | if (++goal_cpu >= NR_CPUS) |
1012 | goal_cpu = 0; | 1012 | goal_cpu = 0; |
1013 | } | 1013 | } while (!cpu_online(goal_cpu)); |
1014 | 1014 | ||
1015 | return goal_cpu; | 1015 | return goal_cpu; |
1016 | } | 1016 | } |
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 26d3ec41da1c..a0cd2b2494d6 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -62,9 +62,6 @@ void default_idle(void) | |||
62 | */ | 62 | */ |
63 | void cpu_idle(void) | 63 | void cpu_idle(void) |
64 | { | 64 | { |
65 | if (current->pid != 0) | ||
66 | return; | ||
67 | |||
68 | /* endless idle loop with no priority at all */ | 65 | /* endless idle loop with no priority at all */ |
69 | for (;;) { | 66 | for (;;) { |
70 | /* If current->work.need_resched is zero we should really | 67 | /* If current->work.need_resched is zero we should really |
@@ -80,7 +77,6 @@ void cpu_idle(void) | |||
80 | schedule(); | 77 | schedule(); |
81 | check_pgt_cache(); | 78 | check_pgt_cache(); |
82 | } | 79 | } |
83 | return; | ||
84 | } | 80 | } |
85 | 81 | ||
86 | #else | 82 | #else |
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 5f080cf04b33..80a76e2ad732 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/smp.h> | 19 | #include <linux/smp.h> |
20 | #include <linux/smp_lock.h> | 20 | #include <linux/smp_lock.h> |
21 | #include <linux/security.h> | 21 | #include <linux/security.h> |
22 | #include <linux/signal.h> | ||
22 | 23 | ||
23 | #include <asm/asi.h> | 24 | #include <asm/asi.h> |
24 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
@@ -559,7 +560,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs) | |||
559 | addr = 1; | 560 | addr = 1; |
560 | 561 | ||
561 | case PTRACE_CONT: { /* restart after signal. */ | 562 | case PTRACE_CONT: { /* restart after signal. */ |
562 | if (data > _NSIG) { | 563 | if (!valid_signal(data)) { |
563 | pt_error_return(regs, EIO); | 564 | pt_error_return(regs, EIO); |
564 | goto out_tsk; | 565 | goto out_tsk; |
565 | } | 566 | } |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index cad5a1122800..e78cc53594fa 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -278,7 +278,7 @@ EXPORT_SYMBOL(verify_compat_iovec); | |||
278 | 278 | ||
279 | EXPORT_SYMBOL(dump_thread); | 279 | EXPORT_SYMBOL(dump_thread); |
280 | EXPORT_SYMBOL(dump_fpu); | 280 | EXPORT_SYMBOL(dump_fpu); |
281 | EXPORT_SYMBOL(__pte_alloc_one_kernel); | 281 | EXPORT_SYMBOL(pte_alloc_one_kernel); |
282 | #ifndef CONFIG_SMP | 282 | #ifndef CONFIG_SMP |
283 | EXPORT_SYMBOL(pgt_quicklists); | 283 | EXPORT_SYMBOL(pgt_quicklists); |
284 | #endif | 284 | #endif |
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 6a717d4d2bc5..71b4e3807694 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | DEFINE_SPINLOCK(mostek_lock); | 49 | DEFINE_SPINLOCK(mostek_lock); |
50 | DEFINE_SPINLOCK(rtc_lock); | 50 | DEFINE_SPINLOCK(rtc_lock); |
51 | unsigned long mstk48t02_regs = 0UL; | 51 | void __iomem *mstk48t02_regs = NULL; |
52 | #ifdef CONFIG_PCI | 52 | #ifdef CONFIG_PCI |
53 | unsigned long ds1287_regs = 0UL; | 53 | unsigned long ds1287_regs = 0UL; |
54 | #endif | 54 | #endif |
@@ -59,8 +59,8 @@ u64 jiffies_64 = INITIAL_JIFFIES; | |||
59 | 59 | ||
60 | EXPORT_SYMBOL(jiffies_64); | 60 | EXPORT_SYMBOL(jiffies_64); |
61 | 61 | ||
62 | static unsigned long mstk48t08_regs = 0UL; | 62 | static void __iomem *mstk48t08_regs; |
63 | static unsigned long mstk48t59_regs = 0UL; | 63 | static void __iomem *mstk48t59_regs; |
64 | 64 | ||
65 | static int set_rtc_mmss(unsigned long); | 65 | static int set_rtc_mmss(unsigned long); |
66 | 66 | ||
@@ -520,7 +520,7 @@ void timer_tick_interrupt(struct pt_regs *regs) | |||
520 | /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ | 520 | /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ |
521 | static void __init kick_start_clock(void) | 521 | static void __init kick_start_clock(void) |
522 | { | 522 | { |
523 | unsigned long regs = mstk48t02_regs; | 523 | void __iomem *regs = mstk48t02_regs; |
524 | u8 sec, tmp; | 524 | u8 sec, tmp; |
525 | int i, count; | 525 | int i, count; |
526 | 526 | ||
@@ -604,7 +604,7 @@ static void __init kick_start_clock(void) | |||
604 | /* Return nonzero if the clock chip battery is low. */ | 604 | /* Return nonzero if the clock chip battery is low. */ |
605 | static int __init has_low_battery(void) | 605 | static int __init has_low_battery(void) |
606 | { | 606 | { |
607 | unsigned long regs = mstk48t02_regs; | 607 | void __iomem *regs = mstk48t02_regs; |
608 | u8 data1, data2; | 608 | u8 data1, data2; |
609 | 609 | ||
610 | spin_lock_irq(&mostek_lock); | 610 | spin_lock_irq(&mostek_lock); |
@@ -623,7 +623,7 @@ static int __init has_low_battery(void) | |||
623 | static void __init set_system_time(void) | 623 | static void __init set_system_time(void) |
624 | { | 624 | { |
625 | unsigned int year, mon, day, hour, min, sec; | 625 | unsigned int year, mon, day, hour, min, sec; |
626 | unsigned long mregs = mstk48t02_regs; | 626 | void __iomem *mregs = mstk48t02_regs; |
627 | #ifdef CONFIG_PCI | 627 | #ifdef CONFIG_PCI |
628 | unsigned long dregs = ds1287_regs; | 628 | unsigned long dregs = ds1287_regs; |
629 | #else | 629 | #else |
@@ -843,7 +843,8 @@ void __init clock_probe(void) | |||
843 | !strcmp(model, "m5823")) { | 843 | !strcmp(model, "m5823")) { |
844 | ds1287_regs = edev->resource[0].start; | 844 | ds1287_regs = edev->resource[0].start; |
845 | } else { | 845 | } else { |
846 | mstk48t59_regs = edev->resource[0].start; | 846 | mstk48t59_regs = (void __iomem *) |
847 | edev->resource[0].start; | ||
847 | mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02; | 848 | mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02; |
848 | } | 849 | } |
849 | break; | 850 | break; |
@@ -865,7 +866,8 @@ try_isa_clock: | |||
865 | !strcmp(model, "m5823")) { | 866 | !strcmp(model, "m5823")) { |
866 | ds1287_regs = isadev->resource.start; | 867 | ds1287_regs = isadev->resource.start; |
867 | } else { | 868 | } else { |
868 | mstk48t59_regs = isadev->resource.start; | 869 | mstk48t59_regs = (void __iomem *) |
870 | isadev->resource.start; | ||
869 | mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02; | 871 | mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02; |
870 | } | 872 | } |
871 | break; | 873 | break; |
@@ -893,21 +895,24 @@ try_isa_clock: | |||
893 | } | 895 | } |
894 | 896 | ||
895 | if(model[5] == '0' && model[6] == '2') { | 897 | if(model[5] == '0' && model[6] == '2') { |
896 | mstk48t02_regs = (((u64)clk_reg[0].phys_addr) | | 898 | mstk48t02_regs = (void __iomem *) |
897 | (((u64)clk_reg[0].which_io)<<32UL)); | 899 | (((u64)clk_reg[0].phys_addr) | |
900 | (((u64)clk_reg[0].which_io)<<32UL)); | ||
898 | } else if(model[5] == '0' && model[6] == '8') { | 901 | } else if(model[5] == '0' && model[6] == '8') { |
899 | mstk48t08_regs = (((u64)clk_reg[0].phys_addr) | | 902 | mstk48t08_regs = (void __iomem *) |
900 | (((u64)clk_reg[0].which_io)<<32UL)); | 903 | (((u64)clk_reg[0].phys_addr) | |
904 | (((u64)clk_reg[0].which_io)<<32UL)); | ||
901 | mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02; | 905 | mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02; |
902 | } else { | 906 | } else { |
903 | mstk48t59_regs = (((u64)clk_reg[0].phys_addr) | | 907 | mstk48t59_regs = (void __iomem *) |
904 | (((u64)clk_reg[0].which_io)<<32UL)); | 908 | (((u64)clk_reg[0].phys_addr) | |
909 | (((u64)clk_reg[0].which_io)<<32UL)); | ||
905 | mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02; | 910 | mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02; |
906 | } | 911 | } |
907 | break; | 912 | break; |
908 | } | 913 | } |
909 | 914 | ||
910 | if (mstk48t02_regs != 0UL) { | 915 | if (mstk48t02_regs != NULL) { |
911 | /* Report a low battery voltage condition. */ | 916 | /* Report a low battery voltage condition. */ |
912 | if (has_low_battery()) | 917 | if (has_low_battery()) |
913 | prom_printf("NVRAM: Low battery voltage!\n"); | 918 | prom_printf("NVRAM: Low battery voltage!\n"); |
@@ -1087,7 +1092,7 @@ unsigned long long sched_clock(void) | |||
1087 | static int set_rtc_mmss(unsigned long nowtime) | 1092 | static int set_rtc_mmss(unsigned long nowtime) |
1088 | { | 1093 | { |
1089 | int real_seconds, real_minutes, chip_minutes; | 1094 | int real_seconds, real_minutes, chip_minutes; |
1090 | unsigned long mregs = mstk48t02_regs; | 1095 | void __iomem *mregs = mstk48t02_regs; |
1091 | #ifdef CONFIG_PCI | 1096 | #ifdef CONFIG_PCI |
1092 | unsigned long dregs = ds1287_regs; | 1097 | unsigned long dregs = ds1287_regs; |
1093 | #else | 1098 | #else |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index db6fa77b4dab..9c5222075da9 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -1114,7 +1114,7 @@ struct pgtable_cache_struct pgt_quicklists; | |||
1114 | #else | 1114 | #else |
1115 | #define DC_ALIAS_SHIFT 0 | 1115 | #define DC_ALIAS_SHIFT 0 |
1116 | #endif | 1116 | #endif |
1117 | pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 1117 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
1118 | { | 1118 | { |
1119 | struct page *page; | 1119 | struct page *page; |
1120 | unsigned long color; | 1120 | unsigned long color; |
diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 9a23df182123..c5292181a664 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig | |||
@@ -244,6 +244,7 @@ config KERNEL_HALF_GIGS | |||
244 | 244 | ||
245 | config HIGHMEM | 245 | config HIGHMEM |
246 | bool "Highmem support" | 246 | bool "Highmem support" |
247 | depends on !64BIT | ||
247 | 248 | ||
248 | config KERNEL_STACK_ORDER | 249 | config KERNEL_STACK_ORDER |
249 | int "Kernel stack size order" | 250 | int "Kernel stack size order" |
diff --git a/arch/um/Kconfig_i386 b/arch/um/Kconfig_i386 index 203c242201b6..e41f3748d30f 100644 --- a/arch/um/Kconfig_i386 +++ b/arch/um/Kconfig_i386 | |||
@@ -1,4 +1,8 @@ | |||
1 | config 64_BIT | 1 | config UML_X86 |
2 | bool | ||
3 | default y | ||
4 | |||
5 | config 64BIT | ||
2 | bool | 6 | bool |
3 | default n | 7 | default n |
4 | 8 | ||
diff --git a/arch/um/Kconfig_x86_64 b/arch/um/Kconfig_x86_64 index 768dc6626a8d..fd8d7e8982b1 100644 --- a/arch/um/Kconfig_x86_64 +++ b/arch/um/Kconfig_x86_64 | |||
@@ -1,4 +1,8 @@ | |||
1 | config 64_BIT | 1 | config UML_X86 |
2 | bool | ||
3 | default y | ||
4 | |||
5 | config 64BIT | ||
2 | bool | 6 | bool |
3 | default y | 7 | default y |
4 | 8 | ||
diff --git a/arch/um/Makefile b/arch/um/Makefile index 97bca6b5ca95..f2a0c40a9204 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile | |||
@@ -17,7 +17,7 @@ core-y += $(ARCH_DIR)/kernel/ \ | |||
17 | 17 | ||
18 | # Have to precede the include because the included Makefiles reference them. | 18 | # Have to precede the include because the included Makefiles reference them. |
19 | SYMLINK_HEADERS := archparam.h system.h sigcontext.h processor.h ptrace.h \ | 19 | SYMLINK_HEADERS := archparam.h system.h sigcontext.h processor.h ptrace.h \ |
20 | arch-signal.h module.h vm-flags.h | 20 | module.h vm-flags.h elf.h |
21 | SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),include/asm-um/$(header)) | 21 | SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),include/asm-um/$(header)) |
22 | 22 | ||
23 | # XXX: The "os" symlink is only used by arch/um/include/os.h, which includes | 23 | # XXX: The "os" symlink is only used by arch/um/include/os.h, which includes |
@@ -44,6 +44,11 @@ ifneq ($(MAKEFILES-INCL),) | |||
44 | endif | 44 | endif |
45 | 45 | ||
46 | ARCH_INCLUDE := -I$(ARCH_DIR)/include | 46 | ARCH_INCLUDE := -I$(ARCH_DIR)/include |
47 | ifneq ($(KBUILD_SRC),) | ||
48 | ARCH_INCLUDE += -I$(ARCH_DIR)/include2 | ||
49 | ARCH_INCLUDE += -I$(srctree)/$(ARCH_DIR)/include | ||
50 | MRPROPER_DIRS += $(ARCH_DIR)/include2 | ||
51 | endif | ||
47 | SYS_DIR := $(ARCH_DIR)/include/sysdep-$(SUBARCH) | 52 | SYS_DIR := $(ARCH_DIR)/include/sysdep-$(SUBARCH) |
48 | 53 | ||
49 | include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH) | 54 | include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH) |
@@ -94,17 +99,18 @@ define archhelp | |||
94 | echo ' find in the kernel root.' | 99 | echo ' find in the kernel root.' |
95 | endef | 100 | endef |
96 | 101 | ||
102 | ifneq ($(KBUILD_SRC),) | ||
103 | $(shell mkdir -p $(ARCH_DIR) && ln -fsn $(srctree)/$(ARCH_DIR)/Kconfig_$(SUBARCH) $(ARCH_DIR)/Kconfig_arch) | ||
104 | CLEAN_FILES += $(ARCH_DIR)/Kconfig_arch | ||
105 | else | ||
97 | $(shell cd $(ARCH_DIR) && ln -sf Kconfig_$(SUBARCH) Kconfig_arch) | 106 | $(shell cd $(ARCH_DIR) && ln -sf Kconfig_$(SUBARCH) Kconfig_arch) |
107 | endif | ||
98 | 108 | ||
99 | prepare: $(ARCH_SYMLINKS) $(SYS_HEADERS) $(GEN_HEADERS) \ | 109 | prepare: $(ARCH_SYMLINKS) $(SYS_HEADERS) $(GEN_HEADERS) |
100 | $(ARCH_DIR)/kernel/vmlinux.lds.S | ||
101 | 110 | ||
102 | LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static | 111 | LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static |
103 | LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib | 112 | LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib |
104 | 113 | ||
105 | LD_SCRIPT-$(CONFIG_LD_SCRIPT_STATIC) := uml.lds.S | ||
106 | LD_SCRIPT-$(CONFIG_LD_SCRIPT_DYN) := dyn.lds.S | ||
107 | |||
108 | CPP_MODE-$(CONFIG_MODE_TT) := -DMODE_TT | 114 | CPP_MODE-$(CONFIG_MODE_TT) := -DMODE_TT |
109 | CONFIG_KERNEL_STACK_ORDER ?= 2 | 115 | CONFIG_KERNEL_STACK_ORDER ?= 2 |
110 | STACK_SIZE := $(shell echo $$[ 4096 * (1 << $(CONFIG_KERNEL_STACK_ORDER)) ] ) | 116 | STACK_SIZE := $(shell echo $$[ 4096 * (1 << $(CONFIG_KERNEL_STACK_ORDER)) ] ) |
@@ -126,7 +132,7 @@ define cmd_vmlinux__ | |||
126 | $(CC) $(CFLAGS_vmlinux) -o $@ \ | 132 | $(CC) $(CFLAGS_vmlinux) -o $@ \ |
127 | -Wl,-T,$(vmlinux-lds) $(vmlinux-init) \ | 133 | -Wl,-T,$(vmlinux-lds) $(vmlinux-init) \ |
128 | -Wl,--start-group $(vmlinux-main) -Wl,--end-group \ | 134 | -Wl,--start-group $(vmlinux-main) -Wl,--end-group \ |
129 | -L/usr/lib -lutil \ | 135 | -lutil \ |
130 | $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) \ | 136 | $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) \ |
131 | FORCE ,$^) ; rm -f linux | 137 | FORCE ,$^) ; rm -f linux |
132 | endef | 138 | endef |
@@ -145,31 +151,42 @@ archclean: | |||
145 | @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ | 151 | @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ |
146 | -o -name '*.gcov' \) -type f -print | xargs rm -f | 152 | -o -name '*.gcov' \) -type f -print | xargs rm -f |
147 | 153 | ||
148 | #We need to re-preprocess this when the symlink dest changes. | ||
149 | #So we touch it when needed. | ||
150 | $(ARCH_DIR)/kernel/vmlinux.lds.S: FORCE | ||
151 | $(Q)if [ "$(shell readlink $@)" != "$(LD_SCRIPT-y)" ]; then \ | ||
152 | echo ' SYMLINK $@'; \ | ||
153 | ln -sf $(LD_SCRIPT-y) $@; \ | ||
154 | touch $@; \ | ||
155 | fi; | ||
156 | |||
157 | $(SYMLINK_HEADERS): | 154 | $(SYMLINK_HEADERS): |
158 | @echo ' SYMLINK $@' | 155 | @echo ' SYMLINK $@' |
156 | ifneq ($(KBUILD_SRC),) | ||
157 | ln -fsn $(srctree)/include/asm-um/$(basename $(notdir $@))-$(SUBARCH)$(suffix $@) $@ | ||
158 | else | ||
159 | $(Q)cd $(TOPDIR)/$(dir $@) ; \ | 159 | $(Q)cd $(TOPDIR)/$(dir $@) ; \ |
160 | ln -sf $(basename $(notdir $@))-$(SUBARCH)$(suffix $@) $(notdir $@) | 160 | ln -sf $(basename $(notdir $@))-$(SUBARCH)$(suffix $@) $(notdir $@) |
161 | endif | ||
161 | 162 | ||
162 | include/asm-um/arch: | 163 | include/asm-um/arch: |
163 | @echo ' SYMLINK $@' | 164 | @echo ' SYMLINK $@' |
165 | ifneq ($(KBUILD_SRC),) | ||
166 | $(Q)mkdir -p include/asm-um | ||
167 | $(Q)ln -fsn $(srctree)/include/asm-$(SUBARCH) include/asm-um/arch | ||
168 | else | ||
164 | $(Q)cd $(TOPDIR)/include/asm-um && ln -sf ../asm-$(SUBARCH) arch | 169 | $(Q)cd $(TOPDIR)/include/asm-um && ln -sf ../asm-$(SUBARCH) arch |
170 | endif | ||
165 | 171 | ||
166 | $(ARCH_DIR)/include/sysdep: | 172 | $(ARCH_DIR)/include/sysdep: |
167 | @echo ' SYMLINK $@' | 173 | @echo ' SYMLINK $@' |
174 | ifneq ($(KBUILD_SRC),) | ||
175 | $(Q)mkdir -p $(ARCH_DIR)/include | ||
176 | $(Q)mkdir -p $(ARCH_DIR)/include2 | ||
177 | $(Q)ln -fsn sysdep-$(SUBARCH) $(ARCH_DIR)/include/sysdep | ||
178 | $(Q)ln -fsn $(srctree)/$(ARCH_DIR)/include/sysdep-$(SUBARCH) $(ARCH_DIR)/include2/sysdep | ||
179 | else | ||
168 | $(Q)cd $(ARCH_DIR)/include && ln -sf sysdep-$(SUBARCH) sysdep | 180 | $(Q)cd $(ARCH_DIR)/include && ln -sf sysdep-$(SUBARCH) sysdep |
181 | endif | ||
169 | 182 | ||
170 | $(ARCH_DIR)/os: | 183 | $(ARCH_DIR)/os: |
171 | @echo ' SYMLINK $@' | 184 | @echo ' SYMLINK $@' |
185 | ifneq ($(KBUILD_SRC),) | ||
186 | $(Q)ln -fsn $(srctree)/$(ARCH_DIR)/os-$(OS) $(ARCH_DIR)/os | ||
187 | else | ||
172 | $(Q)cd $(ARCH_DIR) && ln -sf os-$(OS) os | 188 | $(Q)cd $(ARCH_DIR) && ln -sf os-$(OS) os |
189 | endif | ||
173 | 190 | ||
174 | # Generated files | 191 | # Generated files |
175 | define filechk_umlconfig | 192 | define filechk_umlconfig |
@@ -179,10 +196,31 @@ endef | |||
179 | $(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h | 196 | $(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h |
180 | $(call filechk,umlconfig) | 197 | $(call filechk,umlconfig) |
181 | 198 | ||
199 | $(ARCH_DIR)/user-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.c | ||
200 | $(CC) $(USER_CFLAGS) -S -o $@ $< | ||
201 | |||
202 | $(ARCH_DIR)/user-offsets.h: $(ARCH_DIR)/user-offsets.s | ||
203 | $(call filechk,gen-asm-offsets) | ||
204 | |||
205 | CLEAN_FILES += $(ARCH_DIR)/user-offsets.s $(ARCH_DIR)/user-offsets.h | ||
206 | |||
207 | $(ARCH_DIR)/kernel-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/kernel-offsets.c \ | ||
208 | $(ARCH_SYMLINKS) \ | ||
209 | $(SYS_DIR)/sc.h \ | ||
210 | include/asm include/linux/version.h \ | ||
211 | include/config/MARKER \ | ||
212 | $(ARCH_DIR)/include/user_constants.h | ||
213 | $(CC) $(CFLAGS) $(NOSTDINC_FLAGS) $(CPPFLAGS) -S -o $@ $< | ||
214 | |||
215 | $(ARCH_DIR)/kernel-offsets.h: $(ARCH_DIR)/kernel-offsets.s | ||
216 | $(call filechk,gen-asm-offsets) | ||
217 | |||
218 | CLEAN_FILES += $(ARCH_DIR)/kernel-offsets.s $(ARCH_DIR)/kernel-offsets.h | ||
219 | |||
182 | $(ARCH_DIR)/include/task.h: $(ARCH_DIR)/util/mk_task | 220 | $(ARCH_DIR)/include/task.h: $(ARCH_DIR)/util/mk_task |
183 | $(call filechk,gen_header) | 221 | $(call filechk,gen_header) |
184 | 222 | ||
185 | $(ARCH_DIR)/include/user_constants.h: $(ARCH_DIR)/os/util/mk_user_constants | 223 | $(ARCH_DIR)/include/user_constants.h: $(ARCH_DIR)/os-$(OS)/util/mk_user_constants |
186 | $(call filechk,gen_header) | 224 | $(call filechk,gen_header) |
187 | 225 | ||
188 | $(ARCH_DIR)/include/kern_constants.h: $(ARCH_DIR)/util/mk_constants | 226 | $(ARCH_DIR)/include/kern_constants.h: $(ARCH_DIR)/util/mk_constants |
@@ -191,20 +229,20 @@ $(ARCH_DIR)/include/kern_constants.h: $(ARCH_DIR)/util/mk_constants | |||
191 | $(ARCH_DIR)/include/skas_ptregs.h: $(ARCH_DIR)/kernel/skas/util/mk_ptregs | 229 | $(ARCH_DIR)/include/skas_ptregs.h: $(ARCH_DIR)/kernel/skas/util/mk_ptregs |
192 | $(call filechk,gen_header) | 230 | $(call filechk,gen_header) |
193 | 231 | ||
194 | $(ARCH_DIR)/os/util/mk_user_constants: $(ARCH_DIR)/os/util FORCE ; | 232 | $(ARCH_DIR)/os-$(OS)/util/mk_user_constants: $(ARCH_DIR)/os-$(OS)/util FORCE ; |
195 | 233 | ||
196 | $(ARCH_DIR)/util/mk_task $(ARCH_DIR)/util/mk_constants: $(ARCH_DIR)/include/user_constants.h $(ARCH_DIR)/util \ | 234 | $(ARCH_DIR)/util/mk_task $(ARCH_DIR)/util/mk_constants: $(ARCH_DIR)/include/user_constants.h $(ARCH_DIR)/util \ |
197 | FORCE ; | 235 | FORCE ; |
198 | 236 | ||
199 | $(ARCH_DIR)/kernel/skas/util/mk_ptregs: $(ARCH_DIR)/kernel/skas/util FORCE ; | 237 | $(ARCH_DIR)/kernel/skas/util/mk_ptregs: $(ARCH_DIR)/kernel/skas/util FORCE ; |
200 | 238 | ||
201 | $(ARCH_DIR)/util: scripts_basic $(SYS_DIR)/sc.h FORCE | 239 | $(ARCH_DIR)/util: scripts_basic $(SYS_DIR)/sc.h $(ARCH_DIR)/kernel-offsets.h FORCE |
202 | $(Q)$(MAKE) $(build)=$@ | 240 | $(Q)$(MAKE) $(build)=$@ |
203 | 241 | ||
204 | $(ARCH_DIR)/kernel/skas/util: scripts_basic FORCE | 242 | $(ARCH_DIR)/kernel/skas/util: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE |
205 | $(Q)$(MAKE) $(build)=$@ | 243 | $(Q)$(MAKE) $(build)=$@ |
206 | 244 | ||
207 | $(ARCH_DIR)/os/util: scripts_basic FORCE | 245 | $(ARCH_DIR)/os-$(OS)/util: scripts_basic FORCE |
208 | $(Q)$(MAKE) $(build)=$@ | 246 | $(Q)$(MAKE) $(build)=$@ |
209 | 247 | ||
210 | export SUBARCH USER_CFLAGS OS | 248 | export SUBARCH USER_CFLAGS OS |
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386 index 97b223bfa78e..29e182d5a83a 100644 --- a/arch/um/Makefile-i386 +++ b/arch/um/Makefile-i386 | |||
@@ -1,4 +1,4 @@ | |||
1 | SUBARCH_CORE := arch/um/sys-i386/ | 1 | SUBARCH_CORE := arch/um/sys-i386/ arch/i386/crypto/ |
2 | 2 | ||
3 | TOP_ADDR := $(CONFIG_TOP_ADDR) | 3 | TOP_ADDR := $(CONFIG_TOP_ADDR) |
4 | 4 | ||
@@ -32,10 +32,10 @@ $(SYS_DIR)/sc.h: $(SYS_UTIL_DIR)/mk_sc | |||
32 | $(SYS_DIR)/thread.h: $(SYS_UTIL_DIR)/mk_thread | 32 | $(SYS_DIR)/thread.h: $(SYS_UTIL_DIR)/mk_thread |
33 | $(call filechk,gen_header) | 33 | $(call filechk,gen_header) |
34 | 34 | ||
35 | $(SYS_UTIL_DIR)/mk_sc: scripts_basic FORCE | 35 | $(SYS_UTIL_DIR)/mk_sc: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE |
36 | $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ | 36 | $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ |
37 | 37 | ||
38 | $(SYS_UTIL_DIR)/mk_thread: scripts_basic $(ARCH_SYMLINKS) $(GEN_HEADERS) FORCE | 38 | $(SYS_UTIL_DIR)/mk_thread: scripts_basic $(ARCH_DIR)/kernel-offsets.h FORCE |
39 | $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ | 39 | $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ |
40 | 40 | ||
41 | $(SYS_UTIL_DIR): scripts_basic include/asm FORCE | 41 | $(SYS_UTIL_DIR): scripts_basic include/asm FORCE |
diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64 index a77971133e91..32144562c279 100644 --- a/arch/um/Makefile-x86_64 +++ b/arch/um/Makefile-x86_64 | |||
@@ -23,10 +23,10 @@ $(SYS_DIR)/sc.h: $(SYS_UTIL_DIR)/mk_sc | |||
23 | $(SYS_DIR)/thread.h: $(SYS_UTIL_DIR)/mk_thread | 23 | $(SYS_DIR)/thread.h: $(SYS_UTIL_DIR)/mk_thread |
24 | $(call filechk,gen_header) | 24 | $(call filechk,gen_header) |
25 | 25 | ||
26 | $(SYS_UTIL_DIR)/mk_sc: scripts_basic FORCE | 26 | $(SYS_UTIL_DIR)/mk_sc: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE |
27 | $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ | 27 | $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ |
28 | 28 | ||
29 | $(SYS_UTIL_DIR)/mk_thread: scripts_basic $(ARCH_SYMLINKS) $(GEN_HEADERS) FORCE | 29 | $(SYS_UTIL_DIR)/mk_thread: scripts_basic $(GEN_HEADERS) $(ARCH_DIR)/kernel-offsets.h FORCE |
30 | $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ | 30 | $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ |
31 | 31 | ||
32 | CLEAN_FILES += $(SYS_HEADERS) | 32 | CLEAN_FILES += $(SYS_HEADERS) |
diff --git a/arch/um/defconfig b/arch/um/defconfig index fc3075c589d8..4067c3aa5b60 100644 --- a/arch/um/defconfig +++ b/arch/um/defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.12-rc1-bk1 | 3 | # Linux kernel version: 2.6.12-rc3-skas3-v9-pre2 |
4 | # Sun Mar 20 16:53:00 2005 | 4 | # Sun Apr 24 19:46:10 2005 |
5 | # | 5 | # |
6 | CONFIG_GENERIC_HARDIRQS=y | 6 | CONFIG_GENERIC_HARDIRQS=y |
7 | CONFIG_UML=y | 7 | CONFIG_UML=y |
@@ -15,7 +15,8 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y | |||
15 | # | 15 | # |
16 | CONFIG_MODE_TT=y | 16 | CONFIG_MODE_TT=y |
17 | CONFIG_MODE_SKAS=y | 17 | CONFIG_MODE_SKAS=y |
18 | # CONFIG_64_BIT is not set | 18 | CONFIG_UML_X86=y |
19 | # CONFIG_64BIT is not set | ||
19 | CONFIG_TOP_ADDR=0xc0000000 | 20 | CONFIG_TOP_ADDR=0xc0000000 |
20 | # CONFIG_3_LEVEL_PGTABLES is not set | 21 | # CONFIG_3_LEVEL_PGTABLES is not set |
21 | CONFIG_ARCH_HAS_SC_SIGNALS=y | 22 | CONFIG_ARCH_HAS_SC_SIGNALS=y |
@@ -41,6 +42,7 @@ CONFIG_UML_REAL_TIME_CLOCK=y | |||
41 | CONFIG_EXPERIMENTAL=y | 42 | CONFIG_EXPERIMENTAL=y |
42 | CONFIG_CLEAN_COMPILE=y | 43 | CONFIG_CLEAN_COMPILE=y |
43 | CONFIG_BROKEN_ON_SMP=y | 44 | CONFIG_BROKEN_ON_SMP=y |
45 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
44 | 46 | ||
45 | # | 47 | # |
46 | # General setup | 48 | # General setup |
@@ -158,7 +160,6 @@ CONFIG_UML_NET_SLIRP=y | |||
158 | # | 160 | # |
159 | CONFIG_PACKET=y | 161 | CONFIG_PACKET=y |
160 | CONFIG_PACKET_MMAP=y | 162 | CONFIG_PACKET_MMAP=y |
161 | # CONFIG_NETLINK_DEV is not set | ||
162 | CONFIG_UNIX=y | 163 | CONFIG_UNIX=y |
163 | # CONFIG_NET_KEY is not set | 164 | # CONFIG_NET_KEY is not set |
164 | CONFIG_INET=y | 165 | CONFIG_INET=y |
@@ -412,6 +413,5 @@ CONFIG_DEBUG_INFO=y | |||
412 | # CONFIG_DEBUG_FS is not set | 413 | # CONFIG_DEBUG_FS is not set |
413 | CONFIG_FRAME_POINTER=y | 414 | CONFIG_FRAME_POINTER=y |
414 | CONFIG_PT_PROXY=y | 415 | CONFIG_PT_PROXY=y |
415 | # CONFIG_GPROF is not set | ||
416 | # CONFIG_GCOV is not set | 416 | # CONFIG_GCOV is not set |
417 | # CONFIG_SYSCALL_DEBUG is not set | 417 | # CONFIG_SYSCALL_DEBUG is not set |
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 1f77deb3fd23..0150038af795 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #ifdef CONFIG_NOCONFIG_CHAN | 22 | #ifdef CONFIG_NOCONFIG_CHAN |
23 | static void *not_configged_init(char *str, int device, struct chan_opts *opts) | 23 | static void *not_configged_init(char *str, int device, struct chan_opts *opts) |
24 | { | 24 | { |
25 | printk(KERN_ERR "Using a channel type which is configured out of " | 25 | printf(KERN_ERR "Using a channel type which is configured out of " |
26 | "UML\n"); | 26 | "UML\n"); |
27 | return(NULL); | 27 | return(NULL); |
28 | } | 28 | } |
@@ -30,27 +30,27 @@ static void *not_configged_init(char *str, int device, struct chan_opts *opts) | |||
30 | static int not_configged_open(int input, int output, int primary, void *data, | 30 | static int not_configged_open(int input, int output, int primary, void *data, |
31 | char **dev_out) | 31 | char **dev_out) |
32 | { | 32 | { |
33 | printk(KERN_ERR "Using a channel type which is configured out of " | 33 | printf(KERN_ERR "Using a channel type which is configured out of " |
34 | "UML\n"); | 34 | "UML\n"); |
35 | return(-ENODEV); | 35 | return(-ENODEV); |
36 | } | 36 | } |
37 | 37 | ||
38 | static void not_configged_close(int fd, void *data) | 38 | static void not_configged_close(int fd, void *data) |
39 | { | 39 | { |
40 | printk(KERN_ERR "Using a channel type which is configured out of " | 40 | printf(KERN_ERR "Using a channel type which is configured out of " |
41 | "UML\n"); | 41 | "UML\n"); |
42 | } | 42 | } |
43 | 43 | ||
44 | static int not_configged_read(int fd, char *c_out, void *data) | 44 | static int not_configged_read(int fd, char *c_out, void *data) |
45 | { | 45 | { |
46 | printk(KERN_ERR "Using a channel type which is configured out of " | 46 | printf(KERN_ERR "Using a channel type which is configured out of " |
47 | "UML\n"); | 47 | "UML\n"); |
48 | return(-EIO); | 48 | return(-EIO); |
49 | } | 49 | } |
50 | 50 | ||
51 | static int not_configged_write(int fd, const char *buf, int len, void *data) | 51 | static int not_configged_write(int fd, const char *buf, int len, void *data) |
52 | { | 52 | { |
53 | printk(KERN_ERR "Using a channel type which is configured out of " | 53 | printf(KERN_ERR "Using a channel type which is configured out of " |
54 | "UML\n"); | 54 | "UML\n"); |
55 | return(-EIO); | 55 | return(-EIO); |
56 | } | 56 | } |
@@ -58,7 +58,7 @@ static int not_configged_write(int fd, const char *buf, int len, void *data) | |||
58 | static int not_configged_console_write(int fd, const char *buf, int len, | 58 | static int not_configged_console_write(int fd, const char *buf, int len, |
59 | void *data) | 59 | void *data) |
60 | { | 60 | { |
61 | printk(KERN_ERR "Using a channel type which is configured out of " | 61 | printf(KERN_ERR "Using a channel type which is configured out of " |
62 | "UML\n"); | 62 | "UML\n"); |
63 | return(-EIO); | 63 | return(-EIO); |
64 | } | 64 | } |
@@ -66,14 +66,14 @@ static int not_configged_console_write(int fd, const char *buf, int len, | |||
66 | static int not_configged_window_size(int fd, void *data, unsigned short *rows, | 66 | static int not_configged_window_size(int fd, void *data, unsigned short *rows, |
67 | unsigned short *cols) | 67 | unsigned short *cols) |
68 | { | 68 | { |
69 | printk(KERN_ERR "Using a channel type which is configured out of " | 69 | printf(KERN_ERR "Using a channel type which is configured out of " |
70 | "UML\n"); | 70 | "UML\n"); |
71 | return(-ENODEV); | 71 | return(-ENODEV); |
72 | } | 72 | } |
73 | 73 | ||
74 | static void not_configged_free(void *data) | 74 | static void not_configged_free(void *data) |
75 | { | 75 | { |
76 | printk(KERN_ERR "Using a channel type which is configured out of " | 76 | printf(KERN_ERR "Using a channel type which is configured out of " |
77 | "UML\n"); | 77 | "UML\n"); |
78 | } | 78 | } |
79 | 79 | ||
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 6924f273ced9..025d3be8aca4 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c | |||
@@ -39,19 +39,69 @@ static void line_timer_cb(void *arg) | |||
39 | line_interrupt(line->driver->read_irq, arg, NULL); | 39 | line_interrupt(line->driver->read_irq, arg, NULL); |
40 | } | 40 | } |
41 | 41 | ||
42 | static int write_room(struct line *dev) | 42 | /* Returns the free space inside the ring buffer of this line. |
43 | * | ||
44 | * Should be called while holding line->lock (this does not modify datas). | ||
45 | */ | ||
46 | static int write_room(struct line *line) | ||
43 | { | 47 | { |
44 | int n; | 48 | int n; |
45 | 49 | ||
46 | if (dev->buffer == NULL) | 50 | if (line->buffer == NULL) |
47 | return (LINE_BUFSIZE - 1); | 51 | return LINE_BUFSIZE - 1; |
52 | |||
53 | /* This is for the case where the buffer is wrapped! */ | ||
54 | n = line->head - line->tail; | ||
48 | 55 | ||
49 | n = dev->head - dev->tail; | ||
50 | if (n <= 0) | 56 | if (n <= 0) |
51 | n = LINE_BUFSIZE + n; | 57 | n = LINE_BUFSIZE + n; /* The other case */ |
52 | return (n - 1); | 58 | return n - 1; |
59 | } | ||
60 | |||
61 | int line_write_room(struct tty_struct *tty) | ||
62 | { | ||
63 | struct line *line = tty->driver_data; | ||
64 | unsigned long flags; | ||
65 | int room; | ||
66 | |||
67 | if (tty->stopped) | ||
68 | return 0; | ||
69 | |||
70 | spin_lock_irqsave(&line->lock, flags); | ||
71 | room = write_room(line); | ||
72 | spin_unlock_irqrestore(&line->lock, flags); | ||
73 | |||
74 | /*XXX: Warning to remove */ | ||
75 | if (0 == room) | ||
76 | printk(KERN_DEBUG "%s: %s: no room left in buffer\n", | ||
77 | __FUNCTION__,tty->name); | ||
78 | return room; | ||
79 | } | ||
80 | |||
81 | int line_chars_in_buffer(struct tty_struct *tty) | ||
82 | { | ||
83 | struct line *line = tty->driver_data; | ||
84 | unsigned long flags; | ||
85 | int ret; | ||
86 | |||
87 | spin_lock_irqsave(&line->lock, flags); | ||
88 | |||
89 | /*write_room subtracts 1 for the needed NULL, so we readd it.*/ | ||
90 | ret = LINE_BUFSIZE - (write_room(line) + 1); | ||
91 | spin_unlock_irqrestore(&line->lock, flags); | ||
92 | |||
93 | return ret; | ||
53 | } | 94 | } |
54 | 95 | ||
96 | /* | ||
97 | * This copies the content of buf into the circular buffer associated with | ||
98 | * this line. | ||
99 | * The return value is the number of characters actually copied, i.e. the ones | ||
100 | * for which there was space: this function is not supposed to ever flush out | ||
101 | * the circular buffer. | ||
102 | * | ||
103 | * Must be called while holding line->lock! | ||
104 | */ | ||
55 | static int buffer_data(struct line *line, const char *buf, int len) | 105 | static int buffer_data(struct line *line, const char *buf, int len) |
56 | { | 106 | { |
57 | int end, room; | 107 | int end, room; |
@@ -70,48 +120,95 @@ static int buffer_data(struct line *line, const char *buf, int len) | |||
70 | len = (len > room) ? room : len; | 120 | len = (len > room) ? room : len; |
71 | 121 | ||
72 | end = line->buffer + LINE_BUFSIZE - line->tail; | 122 | end = line->buffer + LINE_BUFSIZE - line->tail; |
73 | if(len < end){ | 123 | |
124 | if (len < end){ | ||
74 | memcpy(line->tail, buf, len); | 125 | memcpy(line->tail, buf, len); |
75 | line->tail += len; | 126 | line->tail += len; |
76 | } | 127 | } else { |
77 | else { | 128 | /* The circular buffer is wrapping */ |
78 | memcpy(line->tail, buf, end); | 129 | memcpy(line->tail, buf, end); |
79 | buf += end; | 130 | buf += end; |
80 | memcpy(line->buffer, buf, len - end); | 131 | memcpy(line->buffer, buf, len - end); |
81 | line->tail = line->buffer + len - end; | 132 | line->tail = line->buffer + len - end; |
82 | } | 133 | } |
83 | 134 | ||
84 | return(len); | 135 | return len; |
85 | } | 136 | } |
86 | 137 | ||
138 | /* | ||
139 | * Flushes the ring buffer to the output channels. That is, write_chan is | ||
140 | * called, passing it line->head as buffer, and an appropriate count. | ||
141 | * | ||
142 | * On exit, returns 1 when the buffer is empty, | ||
143 | * 0 when the buffer is not empty on exit, | ||
144 | * and -errno when an error occurred. | ||
145 | * | ||
146 | * Must be called while holding line->lock!*/ | ||
87 | static int flush_buffer(struct line *line) | 147 | static int flush_buffer(struct line *line) |
88 | { | 148 | { |
89 | int n, count; | 149 | int n, count; |
90 | 150 | ||
91 | if ((line->buffer == NULL) || (line->head == line->tail)) | 151 | if ((line->buffer == NULL) || (line->head == line->tail)) |
92 | return(1); | 152 | return 1; |
93 | 153 | ||
94 | if (line->tail < line->head) { | 154 | if (line->tail < line->head) { |
155 | /* line->buffer + LINE_BUFSIZE is the end of the buffer! */ | ||
95 | count = line->buffer + LINE_BUFSIZE - line->head; | 156 | count = line->buffer + LINE_BUFSIZE - line->head; |
157 | |||
96 | n = write_chan(&line->chan_list, line->head, count, | 158 | n = write_chan(&line->chan_list, line->head, count, |
97 | line->driver->write_irq); | 159 | line->driver->write_irq); |
98 | if (n < 0) | 160 | if (n < 0) |
99 | return(n); | 161 | return n; |
100 | if (n == count) | 162 | if (n == count) { |
163 | /* We have flushed from ->head to buffer end, now we | ||
164 | * must flush only from the beginning to ->tail.*/ | ||
101 | line->head = line->buffer; | 165 | line->head = line->buffer; |
102 | else { | 166 | } else { |
103 | line->head += n; | 167 | line->head += n; |
104 | return(0); | 168 | return 0; |
105 | } | 169 | } |
106 | } | 170 | } |
107 | 171 | ||
108 | count = line->tail - line->head; | 172 | count = line->tail - line->head; |
109 | n = write_chan(&line->chan_list, line->head, count, | 173 | n = write_chan(&line->chan_list, line->head, count, |
110 | line->driver->write_irq); | 174 | line->driver->write_irq); |
111 | if(n < 0) return(n); | 175 | |
176 | if(n < 0) | ||
177 | return n; | ||
112 | 178 | ||
113 | line->head += n; | 179 | line->head += n; |
114 | return(line->head == line->tail); | 180 | return line->head == line->tail; |
181 | } | ||
182 | |||
183 | void line_flush_buffer(struct tty_struct *tty) | ||
184 | { | ||
185 | struct line *line = tty->driver_data; | ||
186 | unsigned long flags; | ||
187 | int err; | ||
188 | |||
189 | /*XXX: copied from line_write, verify if it is correct!*/ | ||
190 | if(tty->stopped) | ||
191 | return; | ||
192 | //return 0; | ||
193 | |||
194 | spin_lock_irqsave(&line->lock, flags); | ||
195 | err = flush_buffer(line); | ||
196 | /*if (err == 1) | ||
197 | err = 0;*/ | ||
198 | spin_unlock_irqrestore(&line->lock, flags); | ||
199 | //return err; | ||
200 | } | ||
201 | |||
202 | /* We map both ->flush_chars and ->put_char (which go in pair) onto ->flush_buffer | ||
203 | * and ->write. Hope it's not that bad.*/ | ||
204 | void line_flush_chars(struct tty_struct *tty) | ||
205 | { | ||
206 | line_flush_buffer(tty); | ||
207 | } | ||
208 | |||
209 | void line_put_char(struct tty_struct *tty, unsigned char ch) | ||
210 | { | ||
211 | line_write(tty, &ch, sizeof(ch)); | ||
115 | } | 212 | } |
116 | 213 | ||
117 | int line_write(struct tty_struct *tty, const unsigned char *buf, int len) | 214 | int line_write(struct tty_struct *tty, const unsigned char *buf, int len) |
@@ -120,38 +217,31 @@ int line_write(struct tty_struct *tty, const unsigned char *buf, int len) | |||
120 | unsigned long flags; | 217 | unsigned long flags; |
121 | int n, err, ret = 0; | 218 | int n, err, ret = 0; |
122 | 219 | ||
123 | if(tty->stopped) return 0; | 220 | if(tty->stopped) |
221 | return 0; | ||
124 | 222 | ||
125 | down(&line->sem); | 223 | spin_lock_irqsave(&line->lock, flags); |
126 | if(line->head != line->tail){ | 224 | if (line->head != line->tail) { |
127 | local_irq_save(flags); | ||
128 | ret = buffer_data(line, buf, len); | 225 | ret = buffer_data(line, buf, len); |
129 | err = flush_buffer(line); | 226 | err = flush_buffer(line); |
130 | local_irq_restore(flags); | 227 | if (err <= 0 && (err != -EAGAIN || !ret)) |
131 | if(err <= 0 && (err != -EAGAIN || !ret)) | ||
132 | ret = err; | 228 | ret = err; |
133 | } | 229 | } else { |
134 | else { | ||
135 | n = write_chan(&line->chan_list, buf, len, | 230 | n = write_chan(&line->chan_list, buf, len, |
136 | line->driver->write_irq); | 231 | line->driver->write_irq); |
137 | if(n < 0){ | 232 | if (n < 0) { |
138 | ret = n; | 233 | ret = n; |
139 | goto out_up; | 234 | goto out_up; |
140 | } | 235 | } |
141 | 236 | ||
142 | len -= n; | 237 | len -= n; |
143 | ret += n; | 238 | ret += n; |
144 | if(len > 0) | 239 | if (len > 0) |
145 | ret += buffer_data(line, buf + n, len); | 240 | ret += buffer_data(line, buf + n, len); |
146 | } | 241 | } |
147 | out_up: | 242 | out_up: |
148 | up(&line->sem); | 243 | spin_unlock_irqrestore(&line->lock, flags); |
149 | return(ret); | 244 | return ret; |
150 | } | ||
151 | |||
152 | void line_put_char(struct tty_struct *tty, unsigned char ch) | ||
153 | { | ||
154 | line_write(tty, &ch, sizeof(ch)); | ||
155 | } | 245 | } |
156 | 246 | ||
157 | void line_set_termios(struct tty_struct *tty, struct termios * old) | 247 | void line_set_termios(struct tty_struct *tty, struct termios * old) |
@@ -159,11 +249,6 @@ void line_set_termios(struct tty_struct *tty, struct termios * old) | |||
159 | /* nothing */ | 249 | /* nothing */ |
160 | } | 250 | } |
161 | 251 | ||
162 | int line_chars_in_buffer(struct tty_struct *tty) | ||
163 | { | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static struct { | 252 | static struct { |
168 | int cmd; | 253 | int cmd; |
169 | char *level; | 254 | char *level; |
@@ -250,7 +335,7 @@ int line_ioctl(struct tty_struct *tty, struct file * file, | |||
250 | ret = -ENOIOCTLCMD; | 335 | ret = -ENOIOCTLCMD; |
251 | break; | 336 | break; |
252 | } | 337 | } |
253 | return(ret); | 338 | return ret; |
254 | } | 339 | } |
255 | 340 | ||
256 | static irqreturn_t line_write_interrupt(int irq, void *data, | 341 | static irqreturn_t line_write_interrupt(int irq, void *data, |
@@ -260,18 +345,23 @@ static irqreturn_t line_write_interrupt(int irq, void *data, | |||
260 | struct line *line = tty->driver_data; | 345 | struct line *line = tty->driver_data; |
261 | int err; | 346 | int err; |
262 | 347 | ||
348 | /* Interrupts are enabled here because we registered the interrupt with | ||
349 | * SA_INTERRUPT (see line_setup_irq).*/ | ||
350 | |||
351 | spin_lock_irq(&line->lock); | ||
263 | err = flush_buffer(line); | 352 | err = flush_buffer(line); |
264 | if(err == 0) | 353 | if (err == 0) { |
265 | return(IRQ_NONE); | 354 | return IRQ_NONE; |
266 | else if(err < 0){ | 355 | } else if(err < 0) { |
267 | line->head = line->buffer; | 356 | line->head = line->buffer; |
268 | line->tail = line->buffer; | 357 | line->tail = line->buffer; |
269 | } | 358 | } |
359 | spin_unlock_irq(&line->lock); | ||
270 | 360 | ||
271 | if(tty == NULL) | 361 | if(tty == NULL) |
272 | return(IRQ_NONE); | 362 | return IRQ_NONE; |
273 | 363 | ||
274 | if(test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && | 364 | if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && |
275 | (tty->ldisc.write_wakeup != NULL)) | 365 | (tty->ldisc.write_wakeup != NULL)) |
276 | (tty->ldisc.write_wakeup)(tty); | 366 | (tty->ldisc.write_wakeup)(tty); |
277 | 367 | ||
@@ -281,9 +371,9 @@ static irqreturn_t line_write_interrupt(int irq, void *data, | |||
281 | * writes. | 371 | * writes. |
282 | */ | 372 | */ |
283 | 373 | ||
284 | if(waitqueue_active(&tty->write_wait)) | 374 | if (waitqueue_active(&tty->write_wait)) |
285 | wake_up_interruptible(&tty->write_wait); | 375 | wake_up_interruptible(&tty->write_wait); |
286 | return(IRQ_HANDLED); | 376 | return IRQ_HANDLED; |
287 | } | 377 | } |
288 | 378 | ||
289 | int line_setup_irq(int fd, int input, int output, struct tty_struct *tty) | 379 | int line_setup_irq(int fd, int input, int output, struct tty_struct *tty) |
@@ -292,15 +382,18 @@ int line_setup_irq(int fd, int input, int output, struct tty_struct *tty) | |||
292 | struct line_driver *driver = line->driver; | 382 | struct line_driver *driver = line->driver; |
293 | int err = 0, flags = SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM; | 383 | int err = 0, flags = SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM; |
294 | 384 | ||
295 | if(input) err = um_request_irq(driver->read_irq, fd, IRQ_READ, | 385 | if (input) |
386 | err = um_request_irq(driver->read_irq, fd, IRQ_READ, | ||
296 | line_interrupt, flags, | 387 | line_interrupt, flags, |
297 | driver->read_irq_name, tty); | 388 | driver->read_irq_name, tty); |
298 | if(err) return(err); | 389 | if (err) |
299 | if(output) err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, | 390 | return err; |
391 | if (output) | ||
392 | err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, | ||
300 | line_write_interrupt, flags, | 393 | line_write_interrupt, flags, |
301 | driver->write_irq_name, tty); | 394 | driver->write_irq_name, tty); |
302 | line->have_irq = 1; | 395 | line->have_irq = 1; |
303 | return(err); | 396 | return err; |
304 | } | 397 | } |
305 | 398 | ||
306 | void line_disable(struct tty_struct *tty, int current_irq) | 399 | void line_disable(struct tty_struct *tty, int current_irq) |
@@ -336,7 +429,9 @@ int line_open(struct line *lines, struct tty_struct *tty, | |||
336 | line = &lines[tty->index]; | 429 | line = &lines[tty->index]; |
337 | tty->driver_data = line; | 430 | tty->driver_data = line; |
338 | 431 | ||
339 | down(&line->sem); | 432 | /* The IRQ which takes this lock is not yet enabled and won't be run |
433 | * before the end, so we don't need to use spin_lock_irq.*/ | ||
434 | spin_lock(&line->lock); | ||
340 | if (tty->count == 1) { | 435 | if (tty->count == 1) { |
341 | if (!line->valid) { | 436 | if (!line->valid) { |
342 | err = -ENODEV; | 437 | err = -ENODEV; |
@@ -349,6 +444,7 @@ int line_open(struct line *lines, struct tty_struct *tty, | |||
349 | err = open_chan(&line->chan_list); | 444 | err = open_chan(&line->chan_list); |
350 | if(err) goto out; | 445 | if(err) goto out; |
351 | } | 446 | } |
447 | /* Here the interrupt is registered.*/ | ||
352 | enable_chan(&line->chan_list, tty); | 448 | enable_chan(&line->chan_list, tty); |
353 | INIT_WORK(&line->task, line_timer_cb, tty); | 449 | INIT_WORK(&line->task, line_timer_cb, tty); |
354 | } | 450 | } |
@@ -362,21 +458,36 @@ int line_open(struct line *lines, struct tty_struct *tty, | |||
362 | line->count++; | 458 | line->count++; |
363 | 459 | ||
364 | out: | 460 | out: |
365 | up(&line->sem); | 461 | spin_unlock(&line->lock); |
366 | return(err); | 462 | return err; |
367 | } | 463 | } |
368 | 464 | ||
465 | static void unregister_winch(struct tty_struct *tty); | ||
466 | |||
369 | void line_close(struct tty_struct *tty, struct file * filp) | 467 | void line_close(struct tty_struct *tty, struct file * filp) |
370 | { | 468 | { |
371 | struct line *line = tty->driver_data; | 469 | struct line *line = tty->driver_data; |
372 | 470 | ||
373 | down(&line->sem); | 471 | /* XXX: I assume this should be called in process context, not with |
472 | * interrupts disabled! | ||
473 | */ | ||
474 | spin_lock_irq(&line->lock); | ||
475 | |||
476 | /* We ignore the error anyway! */ | ||
477 | flush_buffer(line); | ||
478 | |||
374 | line->count--; | 479 | line->count--; |
375 | if (tty->count == 1) { | 480 | if (tty->count == 1) { |
376 | line_disable(tty, -1); | 481 | line_disable(tty, -1); |
377 | tty->driver_data = NULL; | 482 | tty->driver_data = NULL; |
378 | } | 483 | } |
379 | up(&line->sem); | 484 | |
485 | if((line->count == 0) && line->sigio){ | ||
486 | unregister_winch(tty); | ||
487 | line->sigio = 0; | ||
488 | } | ||
489 | |||
490 | spin_unlock_irq(&line->lock); | ||
380 | } | 491 | } |
381 | 492 | ||
382 | void close_lines(struct line *lines, int nlines) | 493 | void close_lines(struct line *lines, int nlines) |
@@ -387,31 +498,41 @@ void close_lines(struct line *lines, int nlines) | |||
387 | close_chan(&lines[i].chan_list); | 498 | close_chan(&lines[i].chan_list); |
388 | } | 499 | } |
389 | 500 | ||
390 | int line_setup(struct line *lines, int num, char *init, int all_allowed) | 501 | /* Common setup code for both startup command line and mconsole initialization. |
502 | * @lines contains the the array (of size @num) to modify; | ||
503 | * @init is the setup string; | ||
504 | * @all_allowed is a boolean saying if we can setup the whole @lines | ||
505 | * at once. For instance, it will be usually true for startup init. (where we | ||
506 | * can use con=xterm) and false for mconsole.*/ | ||
507 | |||
508 | int line_setup(struct line *lines, unsigned int num, char *init, int all_allowed) | ||
391 | { | 509 | { |
392 | int i, n; | 510 | int i, n; |
393 | char *end; | 511 | char *end; |
394 | 512 | ||
395 | if(*init == '=') n = -1; | 513 | if(*init == '=') { |
396 | else { | 514 | /* We said con=/ssl= instead of con#=, so we are configuring all |
515 | * consoles at once.*/ | ||
516 | n = -1; | ||
517 | } else { | ||
397 | n = simple_strtoul(init, &end, 0); | 518 | n = simple_strtoul(init, &end, 0); |
398 | if(*end != '='){ | 519 | if(*end != '='){ |
399 | printk(KERN_ERR "line_setup failed to parse \"%s\"\n", | 520 | printk(KERN_ERR "line_setup failed to parse \"%s\"\n", |
400 | init); | 521 | init); |
401 | return(0); | 522 | return 0; |
402 | } | 523 | } |
403 | init = end; | 524 | init = end; |
404 | } | 525 | } |
405 | init++; | 526 | init++; |
406 | if((n >= 0) && (n >= num)){ | 527 | |
528 | if (n >= (signed int) num) { | ||
407 | printk("line_setup - %d out of range ((0 ... %d) allowed)\n", | 529 | printk("line_setup - %d out of range ((0 ... %d) allowed)\n", |
408 | n, num - 1); | 530 | n, num - 1); |
409 | return(0); | 531 | return 0; |
410 | } | 532 | } else if (n >= 0){ |
411 | else if (n >= 0){ | ||
412 | if (lines[n].count > 0) { | 533 | if (lines[n].count > 0) { |
413 | printk("line_setup - device %d is open\n", n); | 534 | printk("line_setup - device %d is open\n", n); |
414 | return(0); | 535 | return 0; |
415 | } | 536 | } |
416 | if (lines[n].init_pri <= INIT_ONE){ | 537 | if (lines[n].init_pri <= INIT_ONE){ |
417 | lines[n].init_pri = INIT_ONE; | 538 | lines[n].init_pri = INIT_ONE; |
@@ -422,13 +543,11 @@ int line_setup(struct line *lines, int num, char *init, int all_allowed) | |||
422 | lines[n].valid = 1; | 543 | lines[n].valid = 1; |
423 | } | 544 | } |
424 | } | 545 | } |
425 | } | 546 | } else if(!all_allowed){ |
426 | else if(!all_allowed){ | ||
427 | printk("line_setup - can't configure all devices from " | 547 | printk("line_setup - can't configure all devices from " |
428 | "mconsole\n"); | 548 | "mconsole\n"); |
429 | return(0); | 549 | return 0; |
430 | } | 550 | } else { |
431 | else { | ||
432 | for(i = 0; i < num; i++){ | 551 | for(i = 0; i < num; i++){ |
433 | if(lines[i].init_pri <= INIT_ALL){ | 552 | if(lines[i].init_pri <= INIT_ALL){ |
434 | lines[i].init_pri = INIT_ALL; | 553 | lines[i].init_pri = INIT_ALL; |
@@ -440,21 +559,21 @@ int line_setup(struct line *lines, int num, char *init, int all_allowed) | |||
440 | } | 559 | } |
441 | } | 560 | } |
442 | } | 561 | } |
443 | return(1); | 562 | return 1; |
444 | } | 563 | } |
445 | 564 | ||
446 | int line_config(struct line *lines, int num, char *str) | 565 | int line_config(struct line *lines, unsigned int num, char *str) |
447 | { | 566 | { |
448 | char *new = uml_strdup(str); | 567 | char *new = uml_strdup(str); |
449 | 568 | ||
450 | if(new == NULL){ | 569 | if(new == NULL){ |
451 | printk("line_config - uml_strdup failed\n"); | 570 | printk("line_config - uml_strdup failed\n"); |
452 | return(-ENOMEM); | 571 | return -ENOMEM; |
453 | } | 572 | } |
454 | return(!line_setup(lines, num, new, 0)); | 573 | return !line_setup(lines, num, new, 0); |
455 | } | 574 | } |
456 | 575 | ||
457 | int line_get_config(char *name, struct line *lines, int num, char *str, | 576 | int line_get_config(char *name, struct line *lines, unsigned int num, char *str, |
458 | int size, char **error_out) | 577 | int size, char **error_out) |
459 | { | 578 | { |
460 | struct line *line; | 579 | struct line *line; |
@@ -464,47 +583,33 @@ int line_get_config(char *name, struct line *lines, int num, char *str, | |||
464 | dev = simple_strtoul(name, &end, 0); | 583 | dev = simple_strtoul(name, &end, 0); |
465 | if((*end != '\0') || (end == name)){ | 584 | if((*end != '\0') || (end == name)){ |
466 | *error_out = "line_get_config failed to parse device number"; | 585 | *error_out = "line_get_config failed to parse device number"; |
467 | return(0); | 586 | return 0; |
468 | } | 587 | } |
469 | 588 | ||
470 | if((dev < 0) || (dev >= num)){ | 589 | if((dev < 0) || (dev >= num)){ |
471 | *error_out = "device number of of range"; | 590 | *error_out = "device number out of range"; |
472 | return(0); | 591 | return 0; |
473 | } | 592 | } |
474 | 593 | ||
475 | line = &lines[dev]; | 594 | line = &lines[dev]; |
476 | 595 | ||
477 | down(&line->sem); | 596 | spin_lock(&line->lock); |
478 | if(!line->valid) | 597 | if(!line->valid) |
479 | CONFIG_CHUNK(str, size, n, "none", 1); | 598 | CONFIG_CHUNK(str, size, n, "none", 1); |
480 | else if(line->count == 0) | 599 | else if(line->count == 0) |
481 | CONFIG_CHUNK(str, size, n, line->init_str, 1); | 600 | CONFIG_CHUNK(str, size, n, line->init_str, 1); |
482 | else n = chan_config_string(&line->chan_list, str, size, error_out); | 601 | else n = chan_config_string(&line->chan_list, str, size, error_out); |
483 | up(&line->sem); | 602 | spin_unlock(&line->lock); |
484 | 603 | ||
485 | return(n); | 604 | return n; |
486 | } | 605 | } |
487 | 606 | ||
488 | int line_remove(struct line *lines, int num, char *str) | 607 | int line_remove(struct line *lines, unsigned int num, char *str) |
489 | { | 608 | { |
490 | char config[sizeof("conxxxx=none\0")]; | 609 | char config[sizeof("conxxxx=none\0")]; |
491 | 610 | ||
492 | sprintf(config, "%s=none", str); | 611 | sprintf(config, "%s=none", str); |
493 | return(!line_setup(lines, num, config, 0)); | 612 | return !line_setup(lines, num, config, 0); |
494 | } | ||
495 | |||
496 | int line_write_room(struct tty_struct *tty) | ||
497 | { | ||
498 | struct line *dev = tty->driver_data; | ||
499 | int room; | ||
500 | |||
501 | if (tty->stopped) | ||
502 | return 0; | ||
503 | room = write_room(dev); | ||
504 | if (0 == room) | ||
505 | printk(KERN_DEBUG "%s: %s: no room left in buffer\n", | ||
506 | __FUNCTION__,tty->name); | ||
507 | return room; | ||
508 | } | 613 | } |
509 | 614 | ||
510 | struct tty_driver *line_register_devfs(struct lines *set, | 615 | struct tty_driver *line_register_devfs(struct lines *set, |
@@ -553,7 +658,7 @@ void lines_init(struct line *lines, int nlines) | |||
553 | for(i = 0; i < nlines; i++){ | 658 | for(i = 0; i < nlines; i++){ |
554 | line = &lines[i]; | 659 | line = &lines[i]; |
555 | INIT_LIST_HEAD(&line->chan_list); | 660 | INIT_LIST_HEAD(&line->chan_list); |
556 | sema_init(&line->sem, 1); | 661 | spin_lock_init(&line->lock); |
557 | if(line->init_str != NULL){ | 662 | if(line->init_str != NULL){ |
558 | line->init_str = uml_strdup(line->init_str); | 663 | line->init_str = uml_strdup(line->init_str); |
559 | if(line->init_str == NULL) | 664 | if(line->init_str == NULL) |
@@ -587,7 +692,7 @@ irqreturn_t winch_interrupt(int irq, void *data, struct pt_regs *unused) | |||
587 | "errno = %d\n", -err); | 692 | "errno = %d\n", -err); |
588 | printk("fd %d is losing SIGWINCH support\n", | 693 | printk("fd %d is losing SIGWINCH support\n", |
589 | winch->tty_fd); | 694 | winch->tty_fd); |
590 | return(IRQ_HANDLED); | 695 | return IRQ_HANDLED; |
591 | } | 696 | } |
592 | goto out; | 697 | goto out; |
593 | } | 698 | } |
@@ -603,7 +708,7 @@ irqreturn_t winch_interrupt(int irq, void *data, struct pt_regs *unused) | |||
603 | out: | 708 | out: |
604 | if(winch->fd != -1) | 709 | if(winch->fd != -1) |
605 | reactivate_fd(winch->fd, WINCH_IRQ); | 710 | reactivate_fd(winch->fd, WINCH_IRQ); |
606 | return(IRQ_HANDLED); | 711 | return IRQ_HANDLED; |
607 | } | 712 | } |
608 | 713 | ||
609 | DECLARE_MUTEX(winch_handler_sem); | 714 | DECLARE_MUTEX(winch_handler_sem); |
@@ -625,7 +730,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty) | |||
625 | .pid = pid, | 730 | .pid = pid, |
626 | .tty = tty }); | 731 | .tty = tty }); |
627 | list_add(&winch->list, &winch_handlers); | 732 | list_add(&winch->list, &winch_handlers); |
628 | if(um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, | 733 | if(um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, |
629 | SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM, | 734 | SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM, |
630 | "winch", winch) < 0) | 735 | "winch", winch) < 0) |
631 | printk("register_winch_irq - failed to register IRQ\n"); | 736 | printk("register_winch_irq - failed to register IRQ\n"); |
@@ -633,6 +738,34 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty) | |||
633 | up(&winch_handler_sem); | 738 | up(&winch_handler_sem); |
634 | } | 739 | } |
635 | 740 | ||
741 | static void unregister_winch(struct tty_struct *tty) | ||
742 | { | ||
743 | struct list_head *ele; | ||
744 | struct winch *winch, *found = NULL; | ||
745 | |||
746 | down(&winch_handler_sem); | ||
747 | list_for_each(ele, &winch_handlers){ | ||
748 | winch = list_entry(ele, struct winch, list); | ||
749 | if(winch->tty == tty){ | ||
750 | found = winch; | ||
751 | break; | ||
752 | } | ||
753 | } | ||
754 | |||
755 | if(found == NULL) | ||
756 | goto out; | ||
757 | |||
758 | if(winch->pid != -1) | ||
759 | os_kill_process(winch->pid, 1); | ||
760 | |||
761 | free_irq_by_irq_and_dev(WINCH_IRQ, winch); | ||
762 | free_irq(WINCH_IRQ, winch); | ||
763 | list_del(&winch->list); | ||
764 | kfree(winch); | ||
765 | out: | ||
766 | up(&winch_handler_sem); | ||
767 | } | ||
768 | |||
636 | static void winch_cleanup(void) | 769 | static void winch_cleanup(void) |
637 | { | 770 | { |
638 | struct list_head *ele; | 771 | struct list_head *ele; |
@@ -656,26 +789,16 @@ char *add_xterm_umid(char *base) | |||
656 | int len; | 789 | int len; |
657 | 790 | ||
658 | umid = get_umid(1); | 791 | umid = get_umid(1); |
659 | if(umid == NULL) return(base); | 792 | if(umid == NULL) |
793 | return base; | ||
660 | 794 | ||
661 | len = strlen(base) + strlen(" ()") + strlen(umid) + 1; | 795 | len = strlen(base) + strlen(" ()") + strlen(umid) + 1; |
662 | title = kmalloc(len, GFP_KERNEL); | 796 | title = kmalloc(len, GFP_KERNEL); |
663 | if(title == NULL){ | 797 | if(title == NULL){ |
664 | printk("Failed to allocate buffer for xterm title\n"); | 798 | printk("Failed to allocate buffer for xterm title\n"); |
665 | return(base); | 799 | return base; |
666 | } | 800 | } |
667 | 801 | ||
668 | snprintf(title, len, "%s (%s)", base, umid); | 802 | snprintf(title, len, "%s (%s)", base, umid); |
669 | return(title); | 803 | return title; |
670 | } | 804 | } |
671 | |||
672 | /* | ||
673 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
674 | * Emacs will notice this stuff at the end of the file and automatically | ||
675 | * adjust the settings for this buffer only. This must remain at the end | ||
676 | * of the file. | ||
677 | * --------------------------------------------------------------------------- | ||
678 | * Local variables: | ||
679 | * c-file-style: "linux" | ||
680 | * End: | ||
681 | */ | ||
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c index c5839c3141f8..a2bac429f3d4 100644 --- a/arch/um/drivers/ssl.c +++ b/arch/um/drivers/ssl.c | |||
@@ -107,11 +107,6 @@ int ssl_open(struct tty_struct *tty, struct file *filp) | |||
107 | } | 107 | } |
108 | 108 | ||
109 | #if 0 | 109 | #if 0 |
110 | static int ssl_chars_in_buffer(struct tty_struct *tty) | ||
111 | { | ||
112 | return(0); | ||
113 | } | ||
114 | |||
115 | static void ssl_flush_buffer(struct tty_struct *tty) | 110 | static void ssl_flush_buffer(struct tty_struct *tty) |
116 | { | 111 | { |
117 | return; | 112 | return; |
@@ -149,11 +144,11 @@ static struct tty_operations ssl_ops = { | |||
149 | .put_char = line_put_char, | 144 | .put_char = line_put_char, |
150 | .write_room = line_write_room, | 145 | .write_room = line_write_room, |
151 | .chars_in_buffer = line_chars_in_buffer, | 146 | .chars_in_buffer = line_chars_in_buffer, |
147 | .flush_buffer = line_flush_buffer, | ||
148 | .flush_chars = line_flush_chars, | ||
152 | .set_termios = line_set_termios, | 149 | .set_termios = line_set_termios, |
153 | .ioctl = line_ioctl, | 150 | .ioctl = line_ioctl, |
154 | #if 0 | 151 | #if 0 |
155 | .flush_chars = ssl_flush_chars, | ||
156 | .flush_buffer = ssl_flush_buffer, | ||
157 | .throttle = ssl_throttle, | 152 | .throttle = ssl_throttle, |
158 | .unthrottle = ssl_unthrottle, | 153 | .unthrottle = ssl_unthrottle, |
159 | .stop = ssl_stop, | 154 | .stop = ssl_stop, |
@@ -171,10 +166,11 @@ static void ssl_console_write(struct console *c, const char *string, | |||
171 | unsigned len) | 166 | unsigned len) |
172 | { | 167 | { |
173 | struct line *line = &serial_lines[c->index]; | 168 | struct line *line = &serial_lines[c->index]; |
169 | unsigned long flags; | ||
174 | 170 | ||
175 | down(&line->sem); | 171 | spin_lock_irqsave(&line->lock, flags); |
176 | console_write_chan(&line->chan_list, string, len); | 172 | console_write_chan(&line->chan_list, string, len); |
177 | up(&line->sem); | 173 | spin_unlock_irqrestore(&line->lock, flags); |
178 | } | 174 | } |
179 | 175 | ||
180 | static struct tty_driver *ssl_console_device(struct console *c, int *index) | 176 | static struct tty_driver *ssl_console_device(struct console *c, int *index) |
@@ -238,14 +234,3 @@ static int ssl_chan_setup(char *str) | |||
238 | 234 | ||
239 | __setup("ssl", ssl_chan_setup); | 235 | __setup("ssl", ssl_chan_setup); |
240 | __channel_help(ssl_chan_setup, "ssl"); | 236 | __channel_help(ssl_chan_setup, "ssl"); |
241 | |||
242 | /* | ||
243 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
244 | * Emacs will notice this stuff at the end of the file and automatically | ||
245 | * adjust the settings for this buffer only. This must remain at the end | ||
246 | * of the file. | ||
247 | * --------------------------------------------------------------------------- | ||
248 | * Local variables: | ||
249 | * c-file-style: "linux" | ||
250 | * End: | ||
251 | */ | ||
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c index e604d7c87695..361d0be342b3 100644 --- a/arch/um/drivers/stdio_console.c +++ b/arch/um/drivers/stdio_console.c | |||
@@ -116,8 +116,11 @@ static struct tty_operations console_ops = { | |||
116 | .open = con_open, | 116 | .open = con_open, |
117 | .close = line_close, | 117 | .close = line_close, |
118 | .write = line_write, | 118 | .write = line_write, |
119 | .put_char = line_put_char, | ||
119 | .write_room = line_write_room, | 120 | .write_room = line_write_room, |
120 | .chars_in_buffer = line_chars_in_buffer, | 121 | .chars_in_buffer = line_chars_in_buffer, |
122 | .flush_buffer = line_flush_buffer, | ||
123 | .flush_chars = line_flush_chars, | ||
121 | .set_termios = line_set_termios, | 124 | .set_termios = line_set_termios, |
122 | .ioctl = line_ioctl, | 125 | .ioctl = line_ioctl, |
123 | }; | 126 | }; |
@@ -126,10 +129,11 @@ static void uml_console_write(struct console *console, const char *string, | |||
126 | unsigned len) | 129 | unsigned len) |
127 | { | 130 | { |
128 | struct line *line = &vts[console->index]; | 131 | struct line *line = &vts[console->index]; |
132 | unsigned long flags; | ||
129 | 133 | ||
130 | down(&line->sem); | 134 | spin_lock_irqsave(&line->lock, flags); |
131 | console_write_chan(&line->chan_list, string, len); | 135 | console_write_chan(&line->chan_list, string, len); |
132 | up(&line->sem); | 136 | spin_unlock_irqrestore(&line->lock, flags); |
133 | } | 137 | } |
134 | 138 | ||
135 | static struct tty_driver *uml_console_device(struct console *c, int *index) | 139 | static struct tty_driver *uml_console_device(struct console *c, int *index) |
@@ -192,14 +196,3 @@ static int console_chan_setup(char *str) | |||
192 | } | 196 | } |
193 | __setup("con", console_chan_setup); | 197 | __setup("con", console_chan_setup); |
194 | __channel_help(console_chan_setup, "con"); | 198 | __channel_help(console_chan_setup, "con"); |
195 | |||
196 | /* | ||
197 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
198 | * Emacs will notice this stuff at the end of the file and automatically | ||
199 | * adjust the settings for this buffer only. This must remain at the end | ||
200 | * of the file. | ||
201 | * --------------------------------------------------------------------------- | ||
202 | * Local variables: | ||
203 | * c-file-style: "linux" | ||
204 | * End: | ||
205 | */ | ||
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 4d8b165bfa48..9a56ff94308d 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
@@ -156,6 +156,7 @@ static struct gendisk *fake_gendisk[MAX_DEV]; | |||
156 | static struct openflags global_openflags = OPEN_FLAGS; | 156 | static struct openflags global_openflags = OPEN_FLAGS; |
157 | 157 | ||
158 | struct cow { | 158 | struct cow { |
159 | /* This is the backing file, actually */ | ||
159 | char *file; | 160 | char *file; |
160 | int fd; | 161 | int fd; |
161 | unsigned long *bitmap; | 162 | unsigned long *bitmap; |
@@ -927,10 +928,14 @@ static int ubd_open(struct inode *inode, struct file *filp) | |||
927 | } | 928 | } |
928 | } | 929 | } |
929 | dev->count++; | 930 | dev->count++; |
930 | if((filp->f_mode & FMODE_WRITE) && !dev->openflags.w){ | 931 | set_disk_ro(disk, !dev->openflags.w); |
932 | |||
933 | /* This should no more be needed. And it didn't work anyway to exclude | ||
934 | * read-write remounting of filesystems.*/ | ||
935 | /*if((filp->f_mode & FMODE_WRITE) && !dev->openflags.w){ | ||
931 | if(--dev->count == 0) ubd_close(dev); | 936 | if(--dev->count == 0) ubd_close(dev); |
932 | err = -EROFS; | 937 | err = -EROFS; |
933 | } | 938 | }*/ |
934 | out: | 939 | out: |
935 | return(err); | 940 | return(err); |
936 | } | 941 | } |
@@ -1096,6 +1101,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req) | |||
1096 | 1101 | ||
1097 | if(req->rq_status == RQ_INACTIVE) return(1); | 1102 | if(req->rq_status == RQ_INACTIVE) return(1); |
1098 | 1103 | ||
1104 | /* This should be impossible now */ | ||
1099 | if((rq_data_dir(req) == WRITE) && !dev->openflags.w){ | 1105 | if((rq_data_dir(req) == WRITE) && !dev->openflags.w){ |
1100 | printk("Write attempted on readonly ubd device %s\n", | 1106 | printk("Write attempted on readonly ubd device %s\n", |
1101 | disk->disk_name); | 1107 | disk->disk_name); |
@@ -1243,6 +1249,7 @@ static int ubd_check_remapped(int fd, unsigned long address, int is_write, | |||
1243 | 1249 | ||
1244 | /* It's a write to a ubd device */ | 1250 | /* It's a write to a ubd device */ |
1245 | 1251 | ||
1252 | /* This should be impossible now */ | ||
1246 | if(!dev->openflags.w){ | 1253 | if(!dev->openflags.w){ |
1247 | /* It's a write access on a read-only device - probably | 1254 | /* It's a write access on a read-only device - probably |
1248 | * shouldn't happen. If the kernel is trying to change | 1255 | * shouldn't happen. If the kernel is trying to change |
@@ -1605,8 +1612,7 @@ void do_io(struct io_thread_req *req) | |||
1605 | } | 1612 | } |
1606 | } while((n < len) && (n != 0)); | 1613 | } while((n < len) && (n != 0)); |
1607 | if (n < len) memset(&buf[n], 0, len - n); | 1614 | if (n < len) memset(&buf[n], 0, len - n); |
1608 | } | 1615 | } else { |
1609 | else { | ||
1610 | n = os_write_file(req->fds[bit], buf, len); | 1616 | n = os_write_file(req->fds[bit], buf, len); |
1611 | if(n != len){ | 1617 | if(n != len){ |
1612 | printk("do_io - write failed err = %d " | 1618 | printk("do_io - write failed err = %d " |
diff --git a/arch/um/drivers/xterm_kern.c b/arch/um/drivers/xterm_kern.c index 7917b9d1cec8..a4fdf3584ad2 100644 --- a/arch/um/drivers/xterm_kern.c +++ b/arch/um/drivers/xterm_kern.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include "linux/slab.h" | 7 | #include "linux/slab.h" |
8 | #include "linux/signal.h" | 8 | #include "linux/signal.h" |
9 | #include "linux/interrupt.h" | 9 | #include "linux/interrupt.h" |
10 | #include "asm/semaphore.h" | ||
11 | #include "asm/irq.h" | 10 | #include "asm/irq.h" |
12 | #include "irq_user.h" | 11 | #include "irq_user.h" |
13 | #include "irq_kern.h" | 12 | #include "irq_kern.h" |
diff --git a/arch/um/include/common-offsets.h b/arch/um/include/common-offsets.h new file mode 100644 index 000000000000..d705daa2d854 --- /dev/null +++ b/arch/um/include/common-offsets.h | |||
@@ -0,0 +1,14 @@ | |||
1 | /* for use by sys-$SUBARCH/kernel-offsets.c */ | ||
2 | |||
3 | OFFSET(TASK_REGS, task_struct, thread.regs); | ||
4 | OFFSET(TASK_PID, task_struct, pid); | ||
5 | DEFINE(UM_KERN_PAGE_SIZE, PAGE_SIZE); | ||
6 | DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC); | ||
7 | DEFINE_STR(UM_KERN_EMERG, KERN_EMERG); | ||
8 | DEFINE_STR(UM_KERN_ALERT, KERN_ALERT); | ||
9 | DEFINE_STR(UM_KERN_CRIT, KERN_CRIT); | ||
10 | DEFINE_STR(UM_KERN_ERR, KERN_ERR); | ||
11 | DEFINE_STR(UM_KERN_WARNING, KERN_WARNING); | ||
12 | DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE); | ||
13 | DEFINE_STR(UM_KERN_INFO, KERN_INFO); | ||
14 | DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG); | ||
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h index 15389c886b41..e5fec5570199 100644 --- a/arch/um/include/kern_util.h +++ b/arch/um/include/kern_util.h | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include "linux/threads.h" | 9 | #include "linux/threads.h" |
10 | #include "sysdep/ptrace.h" | 10 | #include "sysdep/ptrace.h" |
11 | #include "sysdep/faultinfo.h" | ||
11 | 12 | ||
12 | extern int ncpus; | 13 | extern int ncpus; |
13 | extern char *linux_prog; | 14 | extern char *linux_prog; |
@@ -31,8 +32,8 @@ extern int current_pid(void); | |||
31 | extern unsigned long alloc_stack(int order, int atomic); | 32 | extern unsigned long alloc_stack(int order, int atomic); |
32 | extern int do_signal(void); | 33 | extern int do_signal(void); |
33 | extern int is_stack_fault(unsigned long sp); | 34 | extern int is_stack_fault(unsigned long sp); |
34 | extern unsigned long segv(unsigned long address, unsigned long ip, | 35 | extern unsigned long segv(struct faultinfo fi, unsigned long ip, |
35 | int is_write, int is_user, void *sc); | 36 | int is_user, void *sc); |
36 | extern int handle_page_fault(unsigned long address, unsigned long ip, | 37 | extern int handle_page_fault(unsigned long address, unsigned long ip, |
37 | int is_write, int is_user, int *code_out); | 38 | int is_write, int is_user, int *code_out); |
38 | extern void syscall_ready(void); | 39 | extern void syscall_ready(void); |
@@ -82,7 +83,7 @@ extern void timer_irq(union uml_pt_regs *regs); | |||
82 | extern void unprotect_stack(unsigned long stack); | 83 | extern void unprotect_stack(unsigned long stack); |
83 | extern void do_uml_exitcalls(void); | 84 | extern void do_uml_exitcalls(void); |
84 | extern int attach_debugger(int idle_pid, int pid, int stop); | 85 | extern int attach_debugger(int idle_pid, int pid, int stop); |
85 | extern void bad_segv(unsigned long address, unsigned long ip, int is_write); | 86 | extern void bad_segv(struct faultinfo fi, unsigned long ip); |
86 | extern int config_gdb(char *str); | 87 | extern int config_gdb(char *str); |
87 | extern int remove_gdb(void); | 88 | extern int remove_gdb(void); |
88 | extern char *uml_strdup(char *string); | 89 | extern char *uml_strdup(char *string); |
diff --git a/arch/um/include/line.h b/arch/um/include/line.h index 6d81ecc17be5..4c5e92c04ccb 100644 --- a/arch/um/include/line.h +++ b/arch/um/include/line.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #include "linux/workqueue.h" | 10 | #include "linux/workqueue.h" |
11 | #include "linux/tty.h" | 11 | #include "linux/tty.h" |
12 | #include "linux/interrupt.h" | 12 | #include "linux/interrupt.h" |
13 | #include "asm/semaphore.h" | 13 | #include "linux/spinlock.h" |
14 | #include "chan_user.h" | 14 | #include "chan_user.h" |
15 | #include "mconsole_kern.h" | 15 | #include "mconsole_kern.h" |
16 | 16 | ||
@@ -37,10 +37,18 @@ struct line { | |||
37 | struct list_head chan_list; | 37 | struct list_head chan_list; |
38 | int valid; | 38 | int valid; |
39 | int count; | 39 | int count; |
40 | struct semaphore sem; | 40 | /*This lock is actually, mostly, local to*/ |
41 | spinlock_t lock; | ||
42 | |||
43 | /* Yes, this is a real circular buffer. | ||
44 | * XXX: And this should become a struct kfifo! | ||
45 | * | ||
46 | * buffer points to a buffer allocated on demand, of length | ||
47 | * LINE_BUFSIZE, head to the start of the ring, tail to the end.*/ | ||
41 | char *buffer; | 48 | char *buffer; |
42 | char *head; | 49 | char *head; |
43 | char *tail; | 50 | char *tail; |
51 | |||
44 | int sigio; | 52 | int sigio; |
45 | struct work_struct task; | 53 | struct work_struct task; |
46 | struct line_driver *driver; | 54 | struct line_driver *driver; |
@@ -52,7 +60,6 @@ struct line { | |||
52 | init_pri : INIT_STATIC, \ | 60 | init_pri : INIT_STATIC, \ |
53 | chan_list : { }, \ | 61 | chan_list : { }, \ |
54 | valid : 1, \ | 62 | valid : 1, \ |
55 | sem : { }, \ | ||
56 | buffer : NULL, \ | 63 | buffer : NULL, \ |
57 | head : NULL, \ | 64 | head : NULL, \ |
58 | tail : NULL, \ | 65 | tail : NULL, \ |
@@ -69,15 +76,18 @@ struct lines { | |||
69 | extern void line_close(struct tty_struct *tty, struct file * filp); | 76 | extern void line_close(struct tty_struct *tty, struct file * filp); |
70 | extern int line_open(struct line *lines, struct tty_struct *tty, | 77 | extern int line_open(struct line *lines, struct tty_struct *tty, |
71 | struct chan_opts *opts); | 78 | struct chan_opts *opts); |
72 | extern int line_setup(struct line *lines, int num, char *init, | 79 | extern int line_setup(struct line *lines, unsigned int sizeof_lines, char *init, |
73 | int all_allowed); | 80 | int all_allowed); |
74 | extern int line_write(struct tty_struct *tty, const unsigned char *buf, int len); | 81 | extern int line_write(struct tty_struct *tty, const unsigned char *buf, int len); |
75 | extern void line_put_char(struct tty_struct *tty, unsigned char ch); | 82 | extern void line_put_char(struct tty_struct *tty, unsigned char ch); |
76 | extern void line_set_termios(struct tty_struct *tty, struct termios * old); | 83 | extern void line_set_termios(struct tty_struct *tty, struct termios * old); |
77 | extern int line_chars_in_buffer(struct tty_struct *tty); | 84 | extern int line_chars_in_buffer(struct tty_struct *tty); |
85 | extern void line_flush_buffer(struct tty_struct *tty); | ||
86 | extern void line_flush_chars(struct tty_struct *tty); | ||
78 | extern int line_write_room(struct tty_struct *tty); | 87 | extern int line_write_room(struct tty_struct *tty); |
79 | extern int line_ioctl(struct tty_struct *tty, struct file * file, | 88 | extern int line_ioctl(struct tty_struct *tty, struct file * file, |
80 | unsigned int cmd, unsigned long arg); | 89 | unsigned int cmd, unsigned long arg); |
90 | |||
81 | extern char *add_xterm_umid(char *base); | 91 | extern char *add_xterm_umid(char *base); |
82 | extern int line_setup_irq(int fd, int input, int output, struct tty_struct *tty); | 92 | extern int line_setup_irq(int fd, int input, int output, struct tty_struct *tty); |
83 | extern void line_close_chan(struct line *line); | 93 | extern void line_close_chan(struct line *line); |
@@ -89,20 +99,10 @@ extern struct tty_driver * line_register_devfs(struct lines *set, | |||
89 | int nlines); | 99 | int nlines); |
90 | extern void lines_init(struct line *lines, int nlines); | 100 | extern void lines_init(struct line *lines, int nlines); |
91 | extern void close_lines(struct line *lines, int nlines); | 101 | extern void close_lines(struct line *lines, int nlines); |
92 | extern int line_config(struct line *lines, int num, char *str); | 102 | |
93 | extern int line_remove(struct line *lines, int num, char *str); | 103 | extern int line_config(struct line *lines, unsigned int sizeof_lines, char *str); |
94 | extern int line_get_config(char *dev, struct line *lines, int num, char *str, | 104 | extern int line_remove(struct line *lines, unsigned int sizeof_lines, char *str); |
105 | extern int line_get_config(char *dev, struct line *lines, unsigned int sizeof_lines, char *str, | ||
95 | int size, char **error_out); | 106 | int size, char **error_out); |
96 | 107 | ||
97 | #endif | 108 | #endif |
98 | |||
99 | /* | ||
100 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
101 | * Emacs will notice this stuff at the end of the file and automatically | ||
102 | * adjust the settings for this buffer only. This must remain at the end | ||
103 | * of the file. | ||
104 | * --------------------------------------------------------------------------- | ||
105 | * Local variables: | ||
106 | * c-file-style: "linux" | ||
107 | * End: | ||
108 | */ | ||
diff --git a/arch/um/include/os.h b/arch/um/include/os.h index 07340c8cf203..d246d5a24609 100644 --- a/arch/um/include/os.h +++ b/arch/um/include/os.h | |||
@@ -160,6 +160,7 @@ extern void os_kill_process(int pid, int reap_child); | |||
160 | extern void os_kill_ptraced_process(int pid, int reap_child); | 160 | extern void os_kill_ptraced_process(int pid, int reap_child); |
161 | extern void os_usr1_process(int pid); | 161 | extern void os_usr1_process(int pid); |
162 | extern int os_getpid(void); | 162 | extern int os_getpid(void); |
163 | extern int os_getpgrp(void); | ||
163 | 164 | ||
164 | extern int os_map_memory(void *virt, int fd, unsigned long long off, | 165 | extern int os_map_memory(void *virt, int fd, unsigned long long off, |
165 | unsigned long len, int r, int w, int x); | 166 | unsigned long len, int r, int w, int x); |
diff --git a/arch/um/include/skas_ptrace.h b/arch/um/include/skas_ptrace.h index cfb5fb4f5b91..cd2327d09c8d 100644 --- a/arch/um/include/skas_ptrace.h +++ b/arch/um/include/skas_ptrace.h | |||
@@ -6,22 +6,11 @@ | |||
6 | #ifndef __SKAS_PTRACE_H | 6 | #ifndef __SKAS_PTRACE_H |
7 | #define __SKAS_PTRACE_H | 7 | #define __SKAS_PTRACE_H |
8 | 8 | ||
9 | struct ptrace_faultinfo { | ||
10 | int is_write; | ||
11 | unsigned long addr; | ||
12 | }; | ||
13 | |||
14 | struct ptrace_ldt { | ||
15 | int func; | ||
16 | void *ptr; | ||
17 | unsigned long bytecount; | ||
18 | }; | ||
19 | |||
20 | #define PTRACE_FAULTINFO 52 | 9 | #define PTRACE_FAULTINFO 52 |
21 | #define PTRACE_SIGPENDING 53 | ||
22 | #define PTRACE_LDT 54 | ||
23 | #define PTRACE_SWITCH_MM 55 | 10 | #define PTRACE_SWITCH_MM 55 |
24 | 11 | ||
12 | #include "sysdep/skas_ptrace.h" | ||
13 | |||
25 | #endif | 14 | #endif |
26 | 15 | ||
27 | /* | 16 | /* |
diff --git a/arch/um/include/sysdep-i386/checksum.h b/arch/um/include/sysdep-i386/checksum.h index 3a2a45811aa3..764ba4db4788 100644 --- a/arch/um/include/sysdep-i386/checksum.h +++ b/arch/um/include/sysdep-i386/checksum.h | |||
@@ -24,19 +24,6 @@ unsigned int csum_partial(const unsigned char * buff, int len, | |||
24 | unsigned int sum); | 24 | unsigned int sum); |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * the same as csum_partial, but copies from src while it | ||
28 | * checksums, and handles user-space pointer exceptions correctly, when needed. | ||
29 | * | ||
30 | * here even more important to align src and dst on a 32-bit (or even | ||
31 | * better 64-bit) boundary | ||
32 | */ | ||
33 | |||
34 | unsigned int csum_partial_copy_to(const unsigned char *src, unsigned char *dst, | ||
35 | int len, int sum, int *err_ptr); | ||
36 | unsigned int csum_partial_copy_from(const unsigned char *src, unsigned char *dst, | ||
37 | int len, int sum, int *err_ptr); | ||
38 | |||
39 | /* | ||
40 | * Note: when you get a NULL pointer exception here this means someone | 27 | * Note: when you get a NULL pointer exception here this means someone |
41 | * passed in an incorrect kernel address to one of these functions. | 28 | * passed in an incorrect kernel address to one of these functions. |
42 | * | 29 | * |
@@ -52,11 +39,24 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char * | |||
52 | return(csum_partial(dst, len, sum)); | 39 | return(csum_partial(dst, len, sum)); |
53 | } | 40 | } |
54 | 41 | ||
42 | /* | ||
43 | * the same as csum_partial, but copies from src while it | ||
44 | * checksums, and handles user-space pointer exceptions correctly, when needed. | ||
45 | * | ||
46 | * here even more important to align src and dst on a 32-bit (or even | ||
47 | * better 64-bit) boundary | ||
48 | */ | ||
49 | |||
55 | static __inline__ | 50 | static __inline__ |
56 | unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, | 51 | unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, |
57 | int len, int sum, int *err_ptr) | 52 | int len, int sum, int *err_ptr) |
58 | { | 53 | { |
59 | return csum_partial_copy_from(src, dst, len, sum, err_ptr); | 54 | if(copy_from_user(dst, src, len)){ |
55 | *err_ptr = -EFAULT; | ||
56 | return(-1); | ||
57 | } | ||
58 | |||
59 | return csum_partial(dst, len, sum); | ||
60 | } | 60 | } |
61 | 61 | ||
62 | /* | 62 | /* |
@@ -67,7 +67,6 @@ unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char | |||
67 | */ | 67 | */ |
68 | 68 | ||
69 | #define csum_partial_copy_fromuser csum_partial_copy_from_user | 69 | #define csum_partial_copy_fromuser csum_partial_copy_from_user |
70 | unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, int sum); | ||
71 | 70 | ||
72 | /* | 71 | /* |
73 | * This is a version of ip_compute_csum() optimized for IP headers, | 72 | * This is a version of ip_compute_csum() optimized for IP headers, |
@@ -196,8 +195,14 @@ static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src, | |||
196 | unsigned char *dst, | 195 | unsigned char *dst, |
197 | int len, int sum, int *err_ptr) | 196 | int len, int sum, int *err_ptr) |
198 | { | 197 | { |
199 | if (access_ok(VERIFY_WRITE, dst, len)) | 198 | if (access_ok(VERIFY_WRITE, dst, len)){ |
200 | return(csum_partial_copy_to(src, dst, len, sum, err_ptr)); | 199 | if(copy_to_user(dst, src, len)){ |
200 | *err_ptr = -EFAULT; | ||
201 | return(-1); | ||
202 | } | ||
203 | |||
204 | return csum_partial(src, len, sum); | ||
205 | } | ||
201 | 206 | ||
202 | if (len) | 207 | if (len) |
203 | *err_ptr = -EFAULT; | 208 | *err_ptr = -EFAULT; |
diff --git a/arch/um/include/sysdep-i386/faultinfo.h b/arch/um/include/sysdep-i386/faultinfo.h new file mode 100644 index 000000000000..db437cc373bc --- /dev/null +++ b/arch/um/include/sysdep-i386/faultinfo.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Fujitsu Siemens Computers GmbH | ||
3 | * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com> | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #ifndef __FAULTINFO_I386_H | ||
8 | #define __FAULTINFO_I386_H | ||
9 | |||
10 | /* this structure contains the full arch-specific faultinfo | ||
11 | * from the traps. | ||
12 | * On i386, ptrace_faultinfo unfortunately doesn't provide | ||
13 | * all the info, since trap_no is missing. | ||
14 | * All common elements are defined at the same position in | ||
15 | * both structures, thus making it easy to copy the | ||
16 | * contents without knowledge about the structure elements. | ||
17 | */ | ||
18 | struct faultinfo { | ||
19 | int error_code; /* in ptrace_faultinfo misleadingly called is_write */ | ||
20 | unsigned long cr2; /* in ptrace_faultinfo called addr */ | ||
21 | int trap_no; /* missing in ptrace_faultinfo */ | ||
22 | }; | ||
23 | |||
24 | #define FAULT_WRITE(fi) ((fi).error_code & 2) | ||
25 | #define FAULT_ADDRESS(fi) ((fi).cr2) | ||
26 | |||
27 | #define PTRACE_FULL_FAULTINFO 0 | ||
28 | |||
29 | #endif | ||
diff --git a/arch/um/include/sysdep-i386/ptrace.h b/arch/um/include/sysdep-i386/ptrace.h index 661d495e2044..84ec7ff5cf8c 100644 --- a/arch/um/include/sysdep-i386/ptrace.h +++ b/arch/um/include/sysdep-i386/ptrace.h | |||
@@ -31,6 +31,7 @@ extern int sysemu_supported; | |||
31 | #ifdef UML_CONFIG_MODE_SKAS | 31 | #ifdef UML_CONFIG_MODE_SKAS |
32 | 32 | ||
33 | #include "skas_ptregs.h" | 33 | #include "skas_ptregs.h" |
34 | #include "sysdep/faultinfo.h" | ||
34 | 35 | ||
35 | #define REGS_IP(r) ((r)[HOST_IP]) | 36 | #define REGS_IP(r) ((r)[HOST_IP]) |
36 | #define REGS_SP(r) ((r)[HOST_SP]) | 37 | #define REGS_SP(r) ((r)[HOST_SP]) |
@@ -53,12 +54,6 @@ extern int sysemu_supported; | |||
53 | 54 | ||
54 | #define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r)) | 55 | #define REGS_RESTART_SYSCALL(r) IP_RESTART_SYSCALL(REGS_IP(r)) |
55 | 56 | ||
56 | #define REGS_SEGV_IS_FIXABLE(r) SEGV_IS_FIXABLE((r)->trap_type) | ||
57 | |||
58 | #define REGS_FAULT_ADDR(r) ((r)->fault_addr) | ||
59 | |||
60 | #define REGS_FAULT_WRITE(r) FAULT_WRITE((r)->fault_type) | ||
61 | |||
62 | #endif | 57 | #endif |
63 | #ifndef PTRACE_SYSEMU_SINGLESTEP | 58 | #ifndef PTRACE_SYSEMU_SINGLESTEP |
64 | #define PTRACE_SYSEMU_SINGLESTEP 32 | 59 | #define PTRACE_SYSEMU_SINGLESTEP 32 |
@@ -71,6 +66,7 @@ union uml_pt_regs { | |||
71 | struct tt_regs { | 66 | struct tt_regs { |
72 | long syscall; | 67 | long syscall; |
73 | void *sc; | 68 | void *sc; |
69 | struct faultinfo faultinfo; | ||
74 | } tt; | 70 | } tt; |
75 | #endif | 71 | #endif |
76 | #ifdef UML_CONFIG_MODE_SKAS | 72 | #ifdef UML_CONFIG_MODE_SKAS |
@@ -78,9 +74,7 @@ union uml_pt_regs { | |||
78 | unsigned long regs[HOST_FRAME_SIZE]; | 74 | unsigned long regs[HOST_FRAME_SIZE]; |
79 | unsigned long fp[HOST_FP_SIZE]; | 75 | unsigned long fp[HOST_FP_SIZE]; |
80 | unsigned long xfp[HOST_XFP_SIZE]; | 76 | unsigned long xfp[HOST_XFP_SIZE]; |
81 | unsigned long fault_addr; | 77 | struct faultinfo faultinfo; |
82 | unsigned long fault_type; | ||
83 | unsigned long trap_type; | ||
84 | long syscall; | 78 | long syscall; |
85 | int is_user; | 79 | int is_user; |
86 | } skas; | 80 | } skas; |
@@ -217,15 +211,8 @@ struct syscall_args { | |||
217 | #define UPT_SYSCALL_NR(r) UPT_ORIG_EAX(r) | 211 | #define UPT_SYSCALL_NR(r) UPT_ORIG_EAX(r) |
218 | #define UPT_SYSCALL_RET(r) UPT_EAX(r) | 212 | #define UPT_SYSCALL_RET(r) UPT_EAX(r) |
219 | 213 | ||
220 | #define UPT_SEGV_IS_FIXABLE(r) \ | 214 | #define UPT_FAULTINFO(r) \ |
221 | CHOOSE_MODE(SC_SEGV_IS_FIXABLE(UPT_SC(r)), \ | 215 | CHOOSE_MODE((&(r)->tt.faultinfo), (&(r)->skas.faultinfo)) |
222 | REGS_SEGV_IS_FIXABLE(&r->skas)) | ||
223 | |||
224 | #define UPT_FAULT_ADDR(r) \ | ||
225 | __CHOOSE_MODE(SC_FAULT_ADDR(UPT_SC(r)), REGS_FAULT_ADDR(&r->skas)) | ||
226 | |||
227 | #define UPT_FAULT_WRITE(r) \ | ||
228 | CHOOSE_MODE(SC_FAULT_WRITE(UPT_SC(r)), REGS_FAULT_WRITE(&r->skas)) | ||
229 | 216 | ||
230 | #endif | 217 | #endif |
231 | 218 | ||
diff --git a/arch/um/include/sysdep-i386/sigcontext.h b/arch/um/include/sysdep-i386/sigcontext.h index dfee589de360..1fe729265167 100644 --- a/arch/um/include/sysdep-i386/sigcontext.h +++ b/arch/um/include/sysdep-i386/sigcontext.h | |||
@@ -13,15 +13,12 @@ | |||
13 | #define SC_RESTART_SYSCALL(sc) IP_RESTART_SYSCALL(SC_IP(sc)) | 13 | #define SC_RESTART_SYSCALL(sc) IP_RESTART_SYSCALL(SC_IP(sc)) |
14 | #define SC_SET_SYSCALL_RETURN(sc, result) SC_EAX(sc) = (result) | 14 | #define SC_SET_SYSCALL_RETURN(sc, result) SC_EAX(sc) = (result) |
15 | 15 | ||
16 | #define SC_FAULT_ADDR(sc) SC_CR2(sc) | 16 | #define GET_FAULTINFO_FROM_SC(fi,sc) \ |
17 | #define SC_FAULT_TYPE(sc) SC_ERR(sc) | 17 | { \ |
18 | 18 | (fi).cr2 = SC_CR2(sc); \ | |
19 | #define FAULT_WRITE(err) (err & 2) | 19 | (fi).error_code = SC_ERR(sc); \ |
20 | #define TO_SC_ERR(is_write) ((is_write) ? 2 : 0) | 20 | (fi).trap_no = SC_TRAPNO(sc); \ |
21 | 21 | } | |
22 | #define SC_FAULT_WRITE(sc) (FAULT_WRITE(SC_ERR(sc))) | ||
23 | |||
24 | #define SC_TRAP_TYPE(sc) SC_TRAPNO(sc) | ||
25 | 22 | ||
26 | /* ptrace expects that, at the start of a system call, %eax contains | 23 | /* ptrace expects that, at the start of a system call, %eax contains |
27 | * -ENOSYS, so this makes it so. | 24 | * -ENOSYS, so this makes it so. |
@@ -29,9 +26,7 @@ | |||
29 | #define SC_START_SYSCALL(sc) do SC_EAX(sc) = -ENOSYS; while(0) | 26 | #define SC_START_SYSCALL(sc) do SC_EAX(sc) = -ENOSYS; while(0) |
30 | 27 | ||
31 | /* This is Page Fault */ | 28 | /* This is Page Fault */ |
32 | #define SEGV_IS_FIXABLE(trap) (trap == 14) | 29 | #define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) |
33 | |||
34 | #define SC_SEGV_IS_FIXABLE(sc) (SEGV_IS_FIXABLE(SC_TRAPNO(sc))) | ||
35 | 30 | ||
36 | extern unsigned long *sc_sigmask(void *sc_ptr); | 31 | extern unsigned long *sc_sigmask(void *sc_ptr); |
37 | extern int sc_get_fpregs(unsigned long buf, void *sc_ptr); | 32 | extern int sc_get_fpregs(unsigned long buf, void *sc_ptr); |
diff --git a/arch/um/include/sysdep-i386/signal.h b/arch/um/include/sysdep-i386/signal.h index b1e1f7a77499..07518b162136 100644 --- a/arch/um/include/sysdep-i386/signal.h +++ b/arch/um/include/sysdep-i386/signal.h | |||
@@ -8,6 +8,8 @@ | |||
8 | 8 | ||
9 | #include <signal.h> | 9 | #include <signal.h> |
10 | 10 | ||
11 | #define ARCH_SIGHDLR_PARAM int sig | ||
12 | |||
11 | #define ARCH_GET_SIGCONTEXT(sc, sig) \ | 13 | #define ARCH_GET_SIGCONTEXT(sc, sig) \ |
12 | do sc = (struct sigcontext *) (&sig + 1); while(0) | 14 | do sc = (struct sigcontext *) (&sig + 1); while(0) |
13 | 15 | ||
diff --git a/arch/um/include/sysdep-i386/skas_ptrace.h b/arch/um/include/sysdep-i386/skas_ptrace.h new file mode 100644 index 000000000000..e27b8a791773 --- /dev/null +++ b/arch/um/include/sysdep-i386/skas_ptrace.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __SYSDEP_I386_SKAS_PTRACE_H | ||
7 | #define __SYSDEP_I386_SKAS_PTRACE_H | ||
8 | |||
9 | struct ptrace_faultinfo { | ||
10 | int is_write; | ||
11 | unsigned long addr; | ||
12 | }; | ||
13 | |||
14 | struct ptrace_ldt { | ||
15 | int func; | ||
16 | void *ptr; | ||
17 | unsigned long bytecount; | ||
18 | }; | ||
19 | |||
20 | #define PTRACE_LDT 54 | ||
21 | |||
22 | #endif | ||
diff --git a/arch/um/include/sysdep-i386/syscalls.h b/arch/um/include/sysdep-i386/syscalls.h index 5db81ec9087d..be0a3e3469eb 100644 --- a/arch/um/include/sysdep-i386/syscalls.h +++ b/arch/um/include/sysdep-i386/syscalls.h | |||
@@ -22,102 +22,3 @@ extern syscall_handler_t old_mmap_i386; | |||
22 | extern long sys_mmap2(unsigned long addr, unsigned long len, | 22 | extern long sys_mmap2(unsigned long addr, unsigned long len, |
23 | unsigned long prot, unsigned long flags, | 23 | unsigned long prot, unsigned long flags, |
24 | unsigned long fd, unsigned long pgoff); | 24 | unsigned long fd, unsigned long pgoff); |
25 | |||
26 | /* On i386 they choose a meaningless naming.*/ | ||
27 | #define __NR_kexec_load __NR_sys_kexec_load | ||
28 | |||
29 | #define ARCH_SYSCALLS \ | ||
30 | [ __NR_waitpid ] = (syscall_handler_t *) sys_waitpid, \ | ||
31 | [ __NR_break ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
32 | [ __NR_oldstat ] = (syscall_handler_t *) sys_stat, \ | ||
33 | [ __NR_umount ] = (syscall_handler_t *) sys_oldumount, \ | ||
34 | [ __NR_stime ] = um_stime, \ | ||
35 | [ __NR_oldfstat ] = (syscall_handler_t *) sys_fstat, \ | ||
36 | [ __NR_stty ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
37 | [ __NR_gtty ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
38 | [ __NR_nice ] = (syscall_handler_t *) sys_nice, \ | ||
39 | [ __NR_ftime ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
40 | [ __NR_prof ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
41 | [ __NR_signal ] = (syscall_handler_t *) sys_signal, \ | ||
42 | [ __NR_lock ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
43 | [ __NR_mpx ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
44 | [ __NR_ulimit ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
45 | [ __NR_oldolduname ] = (syscall_handler_t *) sys_olduname, \ | ||
46 | [ __NR_sigaction ] = (syscall_handler_t *) sys_sigaction, \ | ||
47 | [ __NR_sgetmask ] = (syscall_handler_t *) sys_sgetmask, \ | ||
48 | [ __NR_ssetmask ] = (syscall_handler_t *) sys_ssetmask, \ | ||
49 | [ __NR_sigsuspend ] = (syscall_handler_t *) sys_sigsuspend, \ | ||
50 | [ __NR_sigpending ] = (syscall_handler_t *) sys_sigpending, \ | ||
51 | [ __NR_oldlstat ] = (syscall_handler_t *) sys_lstat, \ | ||
52 | [ __NR_readdir ] = old_readdir, \ | ||
53 | [ __NR_profil ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
54 | [ __NR_socketcall ] = (syscall_handler_t *) sys_socketcall, \ | ||
55 | [ __NR_olduname ] = (syscall_handler_t *) sys_uname, \ | ||
56 | [ __NR_iopl ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
57 | [ __NR_idle ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
58 | [ __NR_ipc ] = (syscall_handler_t *) sys_ipc, \ | ||
59 | [ __NR_sigreturn ] = (syscall_handler_t *) sys_sigreturn, \ | ||
60 | [ __NR_sigprocmask ] = (syscall_handler_t *) sys_sigprocmask, \ | ||
61 | [ __NR_bdflush ] = (syscall_handler_t *) sys_bdflush, \ | ||
62 | [ __NR__llseek ] = (syscall_handler_t *) sys_llseek, \ | ||
63 | [ __NR__newselect ] = (syscall_handler_t *) sys_select, \ | ||
64 | [ __NR_vm86 ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
65 | [ __NR_mmap ] = (syscall_handler_t *) old_mmap_i386, \ | ||
66 | [ __NR_ugetrlimit ] = (syscall_handler_t *) sys_getrlimit, \ | ||
67 | [ __NR_mmap2 ] = (syscall_handler_t *) sys_mmap2, \ | ||
68 | [ __NR_truncate64 ] = (syscall_handler_t *) sys_truncate64, \ | ||
69 | [ __NR_ftruncate64 ] = (syscall_handler_t *) sys_ftruncate64, \ | ||
70 | [ __NR_stat64 ] = (syscall_handler_t *) sys_stat64, \ | ||
71 | [ __NR_lstat64 ] = (syscall_handler_t *) sys_lstat64, \ | ||
72 | [ __NR_fstat64 ] = (syscall_handler_t *) sys_fstat64, \ | ||
73 | [ __NR_fcntl64 ] = (syscall_handler_t *) sys_fcntl64, \ | ||
74 | [ __NR_sendfile64 ] = (syscall_handler_t *) sys_sendfile64, \ | ||
75 | [ __NR_statfs64 ] = (syscall_handler_t *) sys_statfs64, \ | ||
76 | [ __NR_fstatfs64 ] = (syscall_handler_t *) sys_fstatfs64, \ | ||
77 | [ __NR_fadvise64_64 ] = (syscall_handler_t *) sys_fadvise64_64, \ | ||
78 | [ __NR_select ] = (syscall_handler_t *) old_select, \ | ||
79 | [ __NR_vm86old ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
80 | [ __NR_modify_ldt ] = (syscall_handler_t *) sys_modify_ldt, \ | ||
81 | [ __NR_lchown32 ] = (syscall_handler_t *) sys_lchown, \ | ||
82 | [ __NR_getuid32 ] = (syscall_handler_t *) sys_getuid, \ | ||
83 | [ __NR_getgid32 ] = (syscall_handler_t *) sys_getgid, \ | ||
84 | [ __NR_geteuid32 ] = (syscall_handler_t *) sys_geteuid, \ | ||
85 | [ __NR_getegid32 ] = (syscall_handler_t *) sys_getegid, \ | ||
86 | [ __NR_setreuid32 ] = (syscall_handler_t *) sys_setreuid, \ | ||
87 | [ __NR_setregid32 ] = (syscall_handler_t *) sys_setregid, \ | ||
88 | [ __NR_getgroups32 ] = (syscall_handler_t *) sys_getgroups, \ | ||
89 | [ __NR_setgroups32 ] = (syscall_handler_t *) sys_setgroups, \ | ||
90 | [ __NR_fchown32 ] = (syscall_handler_t *) sys_fchown, \ | ||
91 | [ __NR_setresuid32 ] = (syscall_handler_t *) sys_setresuid, \ | ||
92 | [ __NR_getresuid32 ] = (syscall_handler_t *) sys_getresuid, \ | ||
93 | [ __NR_setresgid32 ] = (syscall_handler_t *) sys_setresgid, \ | ||
94 | [ __NR_getresgid32 ] = (syscall_handler_t *) sys_getresgid, \ | ||
95 | [ __NR_chown32 ] = (syscall_handler_t *) sys_chown, \ | ||
96 | [ __NR_setuid32 ] = (syscall_handler_t *) sys_setuid, \ | ||
97 | [ __NR_setgid32 ] = (syscall_handler_t *) sys_setgid, \ | ||
98 | [ __NR_setfsuid32 ] = (syscall_handler_t *) sys_setfsuid, \ | ||
99 | [ __NR_setfsgid32 ] = (syscall_handler_t *) sys_setfsgid, \ | ||
100 | [ __NR_pivot_root ] = (syscall_handler_t *) sys_pivot_root, \ | ||
101 | [ __NR_mincore ] = (syscall_handler_t *) sys_mincore, \ | ||
102 | [ __NR_madvise ] = (syscall_handler_t *) sys_madvise, \ | ||
103 | [ 222 ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
104 | [ 223 ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
105 | [ __NR_set_thread_area ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
106 | [ __NR_get_thread_area ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
107 | [ 251 ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
108 | [ 285 ] = (syscall_handler_t *) sys_ni_syscall, | ||
109 | |||
110 | /* 222 doesn't yet have a name in include/asm-i386/unistd.h */ | ||
111 | |||
112 | #define LAST_ARCH_SYSCALL 285 | ||
113 | |||
114 | /* | ||
115 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
116 | * Emacs will notice this stuff at the end of the file and automatically | ||
117 | * adjust the settings for this buffer only. This must remain at the end | ||
118 | * of the file. | ||
119 | * --------------------------------------------------------------------------- | ||
120 | * Local variables: | ||
121 | * c-file-style: "linux" | ||
122 | * End: | ||
123 | */ | ||
diff --git a/arch/um/include/sysdep-ia64/skas_ptrace.h b/arch/um/include/sysdep-ia64/skas_ptrace.h new file mode 100644 index 000000000000..25a38e715702 --- /dev/null +++ b/arch/um/include/sysdep-ia64/skas_ptrace.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __SYSDEP_IA64_SKAS_PTRACE_H | ||
7 | #define __SYSDEP_IA64_SKAS_PTRACE_H | ||
8 | |||
9 | struct ptrace_faultinfo { | ||
10 | int is_write; | ||
11 | unsigned long addr; | ||
12 | }; | ||
13 | |||
14 | struct ptrace_ldt { | ||
15 | int func; | ||
16 | void *ptr; | ||
17 | unsigned long bytecount; | ||
18 | }; | ||
19 | |||
20 | #define PTRACE_LDT 54 | ||
21 | |||
22 | #endif | ||
diff --git a/arch/um/include/sysdep-ppc/skas_ptrace.h b/arch/um/include/sysdep-ppc/skas_ptrace.h new file mode 100644 index 000000000000..d9fbbac10de0 --- /dev/null +++ b/arch/um/include/sysdep-ppc/skas_ptrace.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __SYSDEP_PPC_SKAS_PTRACE_H | ||
7 | #define __SYSDEP_PPC_SKAS_PTRACE_H | ||
8 | |||
9 | struct ptrace_faultinfo { | ||
10 | int is_write; | ||
11 | unsigned long addr; | ||
12 | }; | ||
13 | |||
14 | struct ptrace_ldt { | ||
15 | int func; | ||
16 | void *ptr; | ||
17 | unsigned long bytecount; | ||
18 | }; | ||
19 | |||
20 | #define PTRACE_LDT 54 | ||
21 | |||
22 | #endif | ||
diff --git a/arch/um/include/sysdep-x86_64/faultinfo.h b/arch/um/include/sysdep-x86_64/faultinfo.h new file mode 100644 index 000000000000..cb917b0d5660 --- /dev/null +++ b/arch/um/include/sysdep-x86_64/faultinfo.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004 Fujitsu Siemens Computers GmbH | ||
3 | * Author: Bodo Stroesser <bstroesser@fujitsu-siemens.com> | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #ifndef __FAULTINFO_X86_64_H | ||
8 | #define __FAULTINFO_X86_64_H | ||
9 | |||
10 | /* this structure contains the full arch-specific faultinfo | ||
11 | * from the traps. | ||
12 | * On i386, ptrace_faultinfo unfortunately doesn't provide | ||
13 | * all the info, since trap_no is missing. | ||
14 | * All common elements are defined at the same position in | ||
15 | * both structures, thus making it easy to copy the | ||
16 | * contents without knowledge about the structure elements. | ||
17 | */ | ||
18 | struct faultinfo { | ||
19 | int error_code; /* in ptrace_faultinfo misleadingly called is_write */ | ||
20 | unsigned long cr2; /* in ptrace_faultinfo called addr */ | ||
21 | int trap_no; /* missing in ptrace_faultinfo */ | ||
22 | }; | ||
23 | |||
24 | #define FAULT_WRITE(fi) ((fi).error_code & 2) | ||
25 | #define FAULT_ADDRESS(fi) ((fi).cr2) | ||
26 | |||
27 | #define PTRACE_FULL_FAULTINFO 1 | ||
28 | |||
29 | #endif | ||
diff --git a/arch/um/include/sysdep-x86_64/ptrace.h b/arch/um/include/sysdep-x86_64/ptrace.h index 915c82daffbd..348e8fcd513f 100644 --- a/arch/um/include/sysdep-x86_64/ptrace.h +++ b/arch/um/include/sysdep-x86_64/ptrace.h | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include "uml-config.h" | 10 | #include "uml-config.h" |
11 | #include "user_constants.h" | 11 | #include "user_constants.h" |
12 | #include "sysdep/faultinfo.h" | ||
12 | 13 | ||
13 | #define MAX_REG_OFFSET (UM_FRAME_SIZE) | 14 | #define MAX_REG_OFFSET (UM_FRAME_SIZE) |
14 | #define MAX_REG_NR ((MAX_REG_OFFSET) / sizeof(unsigned long)) | 15 | #define MAX_REG_NR ((MAX_REG_OFFSET) / sizeof(unsigned long)) |
@@ -83,6 +84,7 @@ union uml_pt_regs { | |||
83 | long syscall; | 84 | long syscall; |
84 | unsigned long orig_rax; | 85 | unsigned long orig_rax; |
85 | void *sc; | 86 | void *sc; |
87 | struct faultinfo faultinfo; | ||
86 | } tt; | 88 | } tt; |
87 | #endif | 89 | #endif |
88 | #ifdef UML_CONFIG_MODE_SKAS | 90 | #ifdef UML_CONFIG_MODE_SKAS |
@@ -90,9 +92,7 @@ union uml_pt_regs { | |||
90 | /* XXX */ | 92 | /* XXX */ |
91 | unsigned long regs[27]; | 93 | unsigned long regs[27]; |
92 | unsigned long fp[65]; | 94 | unsigned long fp[65]; |
93 | unsigned long fault_addr; | 95 | struct faultinfo faultinfo; |
94 | unsigned long fault_type; | ||
95 | unsigned long trap_type; | ||
96 | long syscall; | 96 | long syscall; |
97 | int is_user; | 97 | int is_user; |
98 | } skas; | 98 | } skas; |
@@ -241,14 +241,8 @@ struct syscall_args { | |||
241 | CHOOSE_MODE(SC_SEGV_IS_FIXABLE(UPT_SC(r)), \ | 241 | CHOOSE_MODE(SC_SEGV_IS_FIXABLE(UPT_SC(r)), \ |
242 | REGS_SEGV_IS_FIXABLE(&r->skas)) | 242 | REGS_SEGV_IS_FIXABLE(&r->skas)) |
243 | 243 | ||
244 | #define UPT_FAULT_ADDR(r) \ | 244 | #define UPT_FAULTINFO(r) \ |
245 | __CHOOSE_MODE(SC_FAULT_ADDR(UPT_SC(r)), REGS_FAULT_ADDR(&r->skas)) | 245 | CHOOSE_MODE((&(r)->tt.faultinfo), (&(r)->skas.faultinfo)) |
246 | |||
247 | #define UPT_FAULT_WRITE(r) \ | ||
248 | CHOOSE_MODE(SC_FAULT_WRITE(UPT_SC(r)), REGS_FAULT_WRITE(&r->skas)) | ||
249 | |||
250 | #define UPT_TRAP(r) __CHOOSE_MODE(SC_TRAP_TYPE(UPT_SC(r)), REGS_TRAP(&r->skas)) | ||
251 | #define UPT_ERR(r) __CHOOSE_MODE(SC_FAULT_TYPE(UPT_SC(r)), REGS_ERR(&r->skas)) | ||
252 | 246 | ||
253 | #endif | 247 | #endif |
254 | 248 | ||
diff --git a/arch/um/include/sysdep-x86_64/sigcontext.h b/arch/um/include/sysdep-x86_64/sigcontext.h index 1e38a54ff4cf..2a78260d15a0 100644 --- a/arch/um/include/sysdep-x86_64/sigcontext.h +++ b/arch/um/include/sysdep-x86_64/sigcontext.h | |||
@@ -17,11 +17,12 @@ | |||
17 | #define SC_FAULT_ADDR(sc) SC_CR2(sc) | 17 | #define SC_FAULT_ADDR(sc) SC_CR2(sc) |
18 | #define SC_FAULT_TYPE(sc) SC_ERR(sc) | 18 | #define SC_FAULT_TYPE(sc) SC_ERR(sc) |
19 | 19 | ||
20 | #define FAULT_WRITE(err) ((err) & 2) | 20 | #define GET_FAULTINFO_FROM_SC(fi,sc) \ |
21 | 21 | { \ | |
22 | #define SC_FAULT_WRITE(sc) FAULT_WRITE(SC_FAULT_TYPE(sc)) | 22 | (fi).cr2 = SC_CR2(sc); \ |
23 | 23 | (fi).error_code = SC_ERR(sc); \ | |
24 | #define SC_TRAP_TYPE(sc) SC_TRAPNO(sc) | 24 | (fi).trap_no = SC_TRAPNO(sc); \ |
25 | } | ||
25 | 26 | ||
26 | /* ptrace expects that, at the start of a system call, %eax contains | 27 | /* ptrace expects that, at the start of a system call, %eax contains |
27 | * -ENOSYS, so this makes it so. | 28 | * -ENOSYS, so this makes it so. |
@@ -29,8 +30,8 @@ | |||
29 | 30 | ||
30 | #define SC_START_SYSCALL(sc) do SC_RAX(sc) = -ENOSYS; while(0) | 31 | #define SC_START_SYSCALL(sc) do SC_RAX(sc) = -ENOSYS; while(0) |
31 | 32 | ||
32 | #define SEGV_IS_FIXABLE(trap) ((trap) == 14) | 33 | /* This is Page Fault */ |
33 | #define SC_SEGV_IS_FIXABLE(sc) SEGV_IS_FIXABLE(SC_TRAP_TYPE(sc)) | 34 | #define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) |
34 | 35 | ||
35 | extern unsigned long *sc_sigmask(void *sc_ptr); | 36 | extern unsigned long *sc_sigmask(void *sc_ptr); |
36 | 37 | ||
diff --git a/arch/um/include/sysdep-x86_64/signal.h b/arch/um/include/sysdep-x86_64/signal.h index e5e52756fab4..6142897af3d1 100644 --- a/arch/um/include/sysdep-x86_64/signal.h +++ b/arch/um/include/sysdep-x86_64/signal.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #ifndef __X86_64_SIGNAL_H_ | 6 | #ifndef __X86_64_SIGNAL_H_ |
7 | #define __X86_64_SIGNAL_H_ | 7 | #define __X86_64_SIGNAL_H_ |
8 | 8 | ||
9 | #define ARCH_SIGHDLR_PARAM int sig | ||
10 | |||
9 | #define ARCH_GET_SIGCONTEXT(sc, sig_addr) \ | 11 | #define ARCH_GET_SIGCONTEXT(sc, sig_addr) \ |
10 | do { \ | 12 | do { \ |
11 | struct ucontext *__uc; \ | 13 | struct ucontext *__uc; \ |
diff --git a/arch/um/include/sysdep-x86_64/skas_ptrace.h b/arch/um/include/sysdep-x86_64/skas_ptrace.h new file mode 100644 index 000000000000..95db4be786e4 --- /dev/null +++ b/arch/um/include/sysdep-x86_64/skas_ptrace.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | ||
3 | * Licensed under the GPL | ||
4 | */ | ||
5 | |||
6 | #ifndef __SYSDEP_X86_64_SKAS_PTRACE_H | ||
7 | #define __SYSDEP_X86_64_SKAS_PTRACE_H | ||
8 | |||
9 | struct ptrace_faultinfo { | ||
10 | int is_write; | ||
11 | unsigned long addr; | ||
12 | }; | ||
13 | |||
14 | struct ptrace_ldt { | ||
15 | int func; | ||
16 | void *ptr; | ||
17 | unsigned long bytecount; | ||
18 | }; | ||
19 | |||
20 | #define PTRACE_LDT 54 | ||
21 | |||
22 | #endif | ||
diff --git a/arch/um/include/sysdep-x86_64/syscalls.h b/arch/um/include/sysdep-x86_64/syscalls.h index b187a4157ff3..67923cca5691 100644 --- a/arch/um/include/sysdep-x86_64/syscalls.h +++ b/arch/um/include/sysdep-x86_64/syscalls.h | |||
@@ -26,66 +26,9 @@ extern syscall_handler_t *ia32_sys_call_table[]; | |||
26 | extern long old_mmap(unsigned long addr, unsigned long len, | 26 | extern long old_mmap(unsigned long addr, unsigned long len, |
27 | unsigned long prot, unsigned long flags, | 27 | unsigned long prot, unsigned long flags, |
28 | unsigned long fd, unsigned long pgoff); | 28 | unsigned long fd, unsigned long pgoff); |
29 | extern syscall_handler_t wrap_sys_shmat; | ||
30 | extern syscall_handler_t sys_modify_ldt; | 29 | extern syscall_handler_t sys_modify_ldt; |
31 | extern syscall_handler_t sys_arch_prctl; | 30 | extern syscall_handler_t sys_arch_prctl; |
32 | 31 | ||
33 | #define ARCH_SYSCALLS \ | 32 | #define NR_syscalls (__NR_syscall_max + 1) |
34 | [ __NR_mmap ] = (syscall_handler_t *) old_mmap, \ | ||
35 | [ __NR_select ] = (syscall_handler_t *) sys_select, \ | ||
36 | [ __NR_mincore ] = (syscall_handler_t *) sys_mincore, \ | ||
37 | [ __NR_madvise ] = (syscall_handler_t *) sys_madvise, \ | ||
38 | [ __NR_shmget ] = (syscall_handler_t *) sys_shmget, \ | ||
39 | [ __NR_shmat ] = (syscall_handler_t *) wrap_sys_shmat, \ | ||
40 | [ __NR_shmctl ] = (syscall_handler_t *) sys_shmctl, \ | ||
41 | [ __NR_semop ] = (syscall_handler_t *) sys_semop, \ | ||
42 | [ __NR_semget ] = (syscall_handler_t *) sys_semget, \ | ||
43 | [ __NR_semctl ] = (syscall_handler_t *) sys_semctl, \ | ||
44 | [ __NR_shmdt ] = (syscall_handler_t *) sys_shmdt, \ | ||
45 | [ __NR_msgget ] = (syscall_handler_t *) sys_msgget, \ | ||
46 | [ __NR_msgsnd ] = (syscall_handler_t *) sys_msgsnd, \ | ||
47 | [ __NR_msgrcv ] = (syscall_handler_t *) sys_msgrcv, \ | ||
48 | [ __NR_msgctl ] = (syscall_handler_t *) sys_msgctl, \ | ||
49 | [ __NR_pivot_root ] = (syscall_handler_t *) sys_pivot_root, \ | ||
50 | [ __NR_tuxcall ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
51 | [ __NR_security ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
52 | [ __NR_epoll_ctl_old ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
53 | [ __NR_epoll_wait_old ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
54 | [ __NR_modify_ldt ] = (syscall_handler_t *) sys_modify_ldt, \ | ||
55 | [ __NR_arch_prctl ] = (syscall_handler_t *) sys_arch_prctl, \ | ||
56 | [ __NR_socket ] = (syscall_handler_t *) sys_socket, \ | ||
57 | [ __NR_connect ] = (syscall_handler_t *) sys_connect, \ | ||
58 | [ __NR_accept ] = (syscall_handler_t *) sys_accept, \ | ||
59 | [ __NR_recvfrom ] = (syscall_handler_t *) sys_recvfrom, \ | ||
60 | [ __NR_recvmsg ] = (syscall_handler_t *) sys_recvmsg, \ | ||
61 | [ __NR_sendmsg ] = (syscall_handler_t *) sys_sendmsg, \ | ||
62 | [ __NR_bind ] = (syscall_handler_t *) sys_bind, \ | ||
63 | [ __NR_listen ] = (syscall_handler_t *) sys_listen, \ | ||
64 | [ __NR_getsockname ] = (syscall_handler_t *) sys_getsockname, \ | ||
65 | [ __NR_getpeername ] = (syscall_handler_t *) sys_getpeername, \ | ||
66 | [ __NR_socketpair ] = (syscall_handler_t *) sys_socketpair, \ | ||
67 | [ __NR_sendto ] = (syscall_handler_t *) sys_sendto, \ | ||
68 | [ __NR_shutdown ] = (syscall_handler_t *) sys_shutdown, \ | ||
69 | [ __NR_setsockopt ] = (syscall_handler_t *) sys_setsockopt, \ | ||
70 | [ __NR_getsockopt ] = (syscall_handler_t *) sys_getsockopt, \ | ||
71 | [ __NR_iopl ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
72 | [ __NR_set_thread_area ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
73 | [ __NR_get_thread_area ] = (syscall_handler_t *) sys_ni_syscall, \ | ||
74 | [ __NR_semtimedop ] = (syscall_handler_t *) sys_semtimedop, \ | ||
75 | [ 251 ] = (syscall_handler_t *) sys_ni_syscall, | ||
76 | |||
77 | #define LAST_ARCH_SYSCALL 251 | ||
78 | #define NR_syscalls 1024 | ||
79 | 33 | ||
80 | #endif | 34 | #endif |
81 | |||
82 | /* | ||
83 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
84 | * Emacs will notice this stuff at the end of the file and automatically | ||
85 | * adjust the settings for this buffer only. This must remain at the end | ||
86 | * of the file. | ||
87 | * --------------------------------------------------------------------------- | ||
88 | * Local variables: | ||
89 | * c-file-style: "linux" | ||
90 | * End: | ||
91 | */ | ||
diff --git a/arch/um/include/user_util.h b/arch/um/include/user_util.h index 103cd320386c..b8c5b8a95250 100644 --- a/arch/um/include/user_util.h +++ b/arch/um/include/user_util.h | |||
@@ -67,7 +67,6 @@ extern void *um_kmalloc(int size); | |||
67 | extern int switcheroo(int fd, int prot, void *from, void *to, int size); | 67 | extern int switcheroo(int fd, int prot, void *from, void *to, int size); |
68 | extern void setup_machinename(char *machine_out); | 68 | extern void setup_machinename(char *machine_out); |
69 | extern void setup_hostinfo(void); | 69 | extern void setup_hostinfo(void); |
70 | extern void add_arg(char *arg); | ||
71 | extern void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int)); | 70 | extern void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int)); |
72 | extern void init_new_thread_signals(int altstack); | 71 | extern void init_new_thread_signals(int altstack); |
73 | extern void do_exec(int old_pid, int new_pid); | 72 | extern void do_exec(int old_pid, int new_pid); |
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index dc796c1bf39e..9736ca27c5f0 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile | |||
@@ -4,13 +4,13 @@ | |||
4 | # | 4 | # |
5 | 5 | ||
6 | extra-y := vmlinux.lds | 6 | extra-y := vmlinux.lds |
7 | clean-files := vmlinux.lds.S config.tmp | 7 | clean-files := |
8 | 8 | ||
9 | obj-y = checksum.o config.o exec_kern.o exitcode.o \ | 9 | obj-y = config.o exec_kern.o exitcode.o \ |
10 | helper.o init_task.o irq.o irq_user.o ksyms.o main.o mem.o mem_user.o \ | 10 | helper.o init_task.o irq.o irq_user.o ksyms.o main.o mem.o mem_user.o \ |
11 | physmem.o process.o process_kern.o ptrace.o reboot.o resource.o \ | 11 | physmem.o process.o process_kern.o ptrace.o reboot.o resource.o \ |
12 | sigio_user.o sigio_kern.o signal_kern.o signal_user.o smp.o \ | 12 | sigio_user.o sigio_kern.o signal_kern.o signal_user.o smp.o \ |
13 | syscall_kern.o sysrq.o sys_call_table.o tempfile.o time.o time_kern.o \ | 13 | syscall_kern.o sysrq.o tempfile.o time.o time_kern.o \ |
14 | tlb.o trap_kern.o trap_user.o uaccess_user.o um_arch.o umid.o \ | 14 | tlb.o trap_kern.o trap_user.o uaccess_user.o um_arch.o umid.o \ |
15 | user_util.o | 15 | user_util.o |
16 | 16 | ||
@@ -23,18 +23,14 @@ obj-$(CONFIG_SYSCALL_DEBUG) += syscall_user.o | |||
23 | obj-$(CONFIG_MODE_TT) += tt/ | 23 | obj-$(CONFIG_MODE_TT) += tt/ |
24 | obj-$(CONFIG_MODE_SKAS) += skas/ | 24 | obj-$(CONFIG_MODE_SKAS) += skas/ |
25 | 25 | ||
26 | # This needs be compiled with frame pointers regardless of how the rest of the | ||
27 | # kernel is built. | ||
28 | CFLAGS_frame.o := -fno-omit-frame-pointer | ||
29 | |||
30 | user-objs-$(CONFIG_TTY_LOG) += tty_log.o | 26 | user-objs-$(CONFIG_TTY_LOG) += tty_log.o |
31 | 27 | ||
32 | USER_OBJS := $(user-objs-y) config.o helper.o main.o process.o tempfile.o \ | 28 | USER_OBJS := $(user-objs-y) config.o helper.o main.o process.o tempfile.o \ |
33 | time.o tty_log.o umid.o user_util.o frame.o | 29 | time.o tty_log.o umid.o user_util.o |
34 | 30 | ||
35 | include arch/um/scripts/Makefile.rules | 31 | include arch/um/scripts/Makefile.rules |
36 | 32 | ||
37 | targets += config.c | 33 | targets := config.c config.tmp |
38 | 34 | ||
39 | # Be careful with the below Sed code - sed is pitfall-rich! | 35 | # Be careful with the below Sed code - sed is pitfall-rich! |
40 | # We use sed to lower build requirements, for "embedded" builders for instance. | 36 | # We use sed to lower build requirements, for "embedded" builders for instance. |
@@ -53,6 +49,7 @@ quiet_cmd_quote2 = QUOTE $@ | |||
53 | cmd_quote2 = sed -e '/CONFIG/{' \ | 49 | cmd_quote2 = sed -e '/CONFIG/{' \ |
54 | -e 's/"CONFIG"\;/""/' \ | 50 | -e 's/"CONFIG"\;/""/' \ |
55 | -e 'r $(obj)/config.tmp' \ | 51 | -e 'r $(obj)/config.tmp' \ |
56 | -e 'a""\;' \ | 52 | -e 'a \' \ |
53 | -e '""\;' \ | ||
57 | -e '}' \ | 54 | -e '}' \ |
58 | $< > $@ | 55 | $< > $@ |
diff --git a/arch/um/kernel/checksum.c b/arch/um/kernel/checksum.c index e69b2be951d1..e69de29bb2d1 100644 --- a/arch/um/kernel/checksum.c +++ b/arch/um/kernel/checksum.c | |||
@@ -1,36 +0,0 @@ | |||
1 | #include "asm/uaccess.h" | ||
2 | #include "linux/errno.h" | ||
3 | #include "linux/module.h" | ||
4 | |||
5 | unsigned int arch_csum_partial(const unsigned char *buff, int len, int sum); | ||
6 | |||
7 | unsigned int csum_partial(unsigned char *buff, int len, int sum) | ||
8 | { | ||
9 | return arch_csum_partial(buff, len, sum); | ||
10 | } | ||
11 | |||
12 | EXPORT_SYMBOL(csum_partial); | ||
13 | |||
14 | unsigned int csum_partial_copy_to(const unsigned char *src, | ||
15 | unsigned char __user *dst, int len, int sum, | ||
16 | int *err_ptr) | ||
17 | { | ||
18 | if(copy_to_user(dst, src, len)){ | ||
19 | *err_ptr = -EFAULT; | ||
20 | return(-1); | ||
21 | } | ||
22 | |||
23 | return(arch_csum_partial(src, len, sum)); | ||
24 | } | ||
25 | |||
26 | unsigned int csum_partial_copy_from(const unsigned char __user *src, | ||
27 | unsigned char *dst, int len, int sum, | ||
28 | int *err_ptr) | ||
29 | { | ||
30 | if(copy_from_user(dst, src, len)){ | ||
31 | *err_ptr = -EFAULT; | ||
32 | return(-1); | ||
33 | } | ||
34 | |||
35 | return arch_csum_partial(dst, len, sum); | ||
36 | } | ||
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index d71e8f00810f..d44fb5282547 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c | |||
@@ -163,7 +163,6 @@ void __init init_IRQ(void) | |||
163 | irq_desc[i].handler = &SIGIO_irq_type; | 163 | irq_desc[i].handler = &SIGIO_irq_type; |
164 | enable_irq(i); | 164 | enable_irq(i); |
165 | } | 165 | } |
166 | init_irq_signals(0); | ||
167 | } | 166 | } |
168 | 167 | ||
169 | /* | 168 | /* |
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c index b41d3397d07b..78d69dc74b26 100644 --- a/arch/um/kernel/ksyms.c +++ b/arch/um/kernel/ksyms.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include "linux/spinlock.h" | 10 | #include "linux/spinlock.h" |
11 | #include "linux/highmem.h" | 11 | #include "linux/highmem.h" |
12 | #include "asm/current.h" | 12 | #include "asm/current.h" |
13 | #include "asm/delay.h" | ||
14 | #include "asm/processor.h" | 13 | #include "asm/processor.h" |
15 | #include "asm/unistd.h" | 14 | #include "asm/unistd.h" |
16 | #include "asm/pgalloc.h" | 15 | #include "asm/pgalloc.h" |
@@ -28,8 +27,6 @@ EXPORT_SYMBOL(uml_physmem); | |||
28 | EXPORT_SYMBOL(set_signals); | 27 | EXPORT_SYMBOL(set_signals); |
29 | EXPORT_SYMBOL(get_signals); | 28 | EXPORT_SYMBOL(get_signals); |
30 | EXPORT_SYMBOL(kernel_thread); | 29 | EXPORT_SYMBOL(kernel_thread); |
31 | EXPORT_SYMBOL(__const_udelay); | ||
32 | EXPORT_SYMBOL(__udelay); | ||
33 | EXPORT_SYMBOL(sys_waitpid); | 30 | EXPORT_SYMBOL(sys_waitpid); |
34 | EXPORT_SYMBOL(task_size); | 31 | EXPORT_SYMBOL(task_size); |
35 | EXPORT_SYMBOL(flush_tlb_range); | 32 | EXPORT_SYMBOL(flush_tlb_range); |
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index f76a2692adca..51f8e5a8ac6a 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -65,8 +65,6 @@ void init_new_thread_signals(int altstack) | |||
65 | SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1); | 65 | SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1); |
66 | set_handler(SIGBUS, (__sighandler_t) sig_handler, flags, | 66 | set_handler(SIGBUS, (__sighandler_t) sig_handler, flags, |
67 | SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1); | 67 | SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1); |
68 | set_handler(SIGWINCH, (__sighandler_t) sig_handler, flags, | ||
69 | SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1); | ||
70 | set_handler(SIGUSR2, (__sighandler_t) sig_handler, | 68 | set_handler(SIGUSR2, (__sighandler_t) sig_handler, |
71 | flags, SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1); | 69 | flags, SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1); |
72 | signal(SIGHUP, SIG_IGN); | 70 | signal(SIGHUP, SIG_IGN); |
diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c index 1d719d5b4bb9..c1adf7ba3fd1 100644 --- a/arch/um/kernel/process_kern.c +++ b/arch/um/kernel/process_kern.c | |||
@@ -115,16 +115,6 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
115 | return(pid); | 115 | return(pid); |
116 | } | 116 | } |
117 | 117 | ||
118 | void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
119 | struct task_struct *tsk) | ||
120 | { | ||
121 | int cpu = smp_processor_id(); | ||
122 | |||
123 | if (prev != next) | ||
124 | cpu_clear(cpu, prev->cpu_vm_mask); | ||
125 | cpu_set(cpu, next->cpu_vm_mask); | ||
126 | } | ||
127 | |||
128 | void set_current(void *t) | 118 | void set_current(void *t) |
129 | { | 119 | { |
130 | struct task_struct *task = t; | 120 | struct task_struct *task = t; |
@@ -152,7 +142,6 @@ void release_thread(struct task_struct *task) | |||
152 | 142 | ||
153 | void exit_thread(void) | 143 | void exit_thread(void) |
154 | { | 144 | { |
155 | CHOOSE_MODE(exit_thread_tt(), exit_thread_skas()); | ||
156 | unprotect_stack((unsigned long) current_thread); | 145 | unprotect_stack((unsigned long) current_thread); |
157 | } | 146 | } |
158 | 147 | ||
@@ -161,10 +150,6 @@ void *get_current(void) | |||
161 | return(current); | 150 | return(current); |
162 | } | 151 | } |
163 | 152 | ||
164 | void prepare_to_copy(struct task_struct *tsk) | ||
165 | { | ||
166 | } | ||
167 | |||
168 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, | 153 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
169 | unsigned long stack_top, struct task_struct * p, | 154 | unsigned long stack_top, struct task_struct * p, |
170 | struct pt_regs *regs) | 155 | struct pt_regs *regs) |
@@ -480,12 +465,21 @@ int singlestepping(void * t) | |||
480 | return 2; | 465 | return 2; |
481 | } | 466 | } |
482 | 467 | ||
468 | /* | ||
469 | * Only x86 and x86_64 have an arch_align_stack(). | ||
470 | * All other arches have "#define arch_align_stack(x) (x)" | ||
471 | * in their asm/system.h | ||
472 | * As this is included in UML from asm-um/system-generic.h, | ||
473 | * we can use it to behave as the subarch does. | ||
474 | */ | ||
475 | #ifndef arch_align_stack | ||
483 | unsigned long arch_align_stack(unsigned long sp) | 476 | unsigned long arch_align_stack(unsigned long sp) |
484 | { | 477 | { |
485 | if (randomize_va_space) | 478 | if (randomize_va_space) |
486 | sp -= get_random_int() % 8192; | 479 | sp -= get_random_int() % 8192; |
487 | return sp & ~0xf; | 480 | return sp & ~0xf; |
488 | } | 481 | } |
482 | #endif | ||
489 | 483 | ||
490 | 484 | ||
491 | /* | 485 | /* |
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c index 3a99ee6d94eb..2b75d8d9ba73 100644 --- a/arch/um/kernel/ptrace.c +++ b/arch/um/kernel/ptrace.c | |||
@@ -19,15 +19,30 @@ | |||
19 | #include "skas_ptrace.h" | 19 | #include "skas_ptrace.h" |
20 | #include "sysdep/ptrace.h" | 20 | #include "sysdep/ptrace.h" |
21 | 21 | ||
22 | static inline void set_singlestepping(struct task_struct *child, int on) | ||
23 | { | ||
24 | if (on) | ||
25 | child->ptrace |= PT_DTRACE; | ||
26 | else | ||
27 | child->ptrace &= ~PT_DTRACE; | ||
28 | child->thread.singlestep_syscall = 0; | ||
29 | |||
30 | #ifdef SUBARCH_SET_SINGLESTEPPING | ||
31 | SUBARCH_SET_SINGLESTEPPING(child, on) | ||
32 | #endif | ||
33 | } | ||
34 | |||
22 | /* | 35 | /* |
23 | * Called by kernel/ptrace.c when detaching.. | 36 | * Called by kernel/ptrace.c when detaching.. |
24 | */ | 37 | */ |
25 | void ptrace_disable(struct task_struct *child) | 38 | void ptrace_disable(struct task_struct *child) |
26 | { | 39 | { |
27 | child->ptrace &= ~PT_DTRACE; | 40 | set_singlestepping(child,0); |
28 | child->thread.singlestep_syscall = 0; | ||
29 | } | 41 | } |
30 | 42 | ||
43 | extern int peek_user(struct task_struct * child, long addr, long data); | ||
44 | extern int poke_user(struct task_struct * child, long addr, long data); | ||
45 | |||
31 | long sys_ptrace(long request, long pid, long addr, long data) | 46 | long sys_ptrace(long request, long pid, long addr, long data) |
32 | { | 47 | { |
33 | struct task_struct *child; | 48 | struct task_struct *child; |
@@ -67,6 +82,10 @@ long sys_ptrace(long request, long pid, long addr, long data) | |||
67 | goto out_tsk; | 82 | goto out_tsk; |
68 | } | 83 | } |
69 | 84 | ||
85 | #ifdef SUBACH_PTRACE_SPECIAL | ||
86 | SUBARCH_PTRACE_SPECIAL(child,request,addr,data) | ||
87 | #endif | ||
88 | |||
70 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | 89 | ret = ptrace_check_attach(child, request == PTRACE_KILL); |
71 | if (ret < 0) | 90 | if (ret < 0) |
72 | goto out_tsk; | 91 | goto out_tsk; |
@@ -87,26 +106,9 @@ long sys_ptrace(long request, long pid, long addr, long data) | |||
87 | } | 106 | } |
88 | 107 | ||
89 | /* read the word at location addr in the USER area. */ | 108 | /* read the word at location addr in the USER area. */ |
90 | case PTRACE_PEEKUSR: { | 109 | case PTRACE_PEEKUSR: |
91 | unsigned long tmp; | 110 | ret = peek_user(child, addr, data); |
92 | 111 | break; | |
93 | ret = -EIO; | ||
94 | if ((addr & 3) || addr < 0) | ||
95 | break; | ||
96 | |||
97 | tmp = 0; /* Default return condition */ | ||
98 | if(addr < MAX_REG_OFFSET){ | ||
99 | tmp = getreg(child, addr); | ||
100 | } | ||
101 | else if((addr >= offsetof(struct user, u_debugreg[0])) && | ||
102 | (addr <= offsetof(struct user, u_debugreg[7]))){ | ||
103 | addr -= offsetof(struct user, u_debugreg[0]); | ||
104 | addr = addr >> 2; | ||
105 | tmp = child->thread.arch.debugregs[addr]; | ||
106 | } | ||
107 | ret = put_user(tmp, (unsigned long __user *) data); | ||
108 | break; | ||
109 | } | ||
110 | 112 | ||
111 | /* when I and D space are separate, this will have to be fixed. */ | 113 | /* when I and D space are separate, this will have to be fixed. */ |
112 | case PTRACE_POKETEXT: /* write the word at location addr. */ | 114 | case PTRACE_POKETEXT: /* write the word at location addr. */ |
@@ -119,35 +121,16 @@ long sys_ptrace(long request, long pid, long addr, long data) | |||
119 | break; | 121 | break; |
120 | 122 | ||
121 | case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ | 123 | case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ |
122 | ret = -EIO; | 124 | ret = poke_user(child, addr, data); |
123 | if ((addr & 3) || addr < 0) | 125 | break; |
124 | break; | ||
125 | |||
126 | if (addr < MAX_REG_OFFSET) { | ||
127 | ret = putreg(child, addr, data); | ||
128 | break; | ||
129 | } | ||
130 | #if 0 /* XXX x86_64 */ | ||
131 | else if((addr >= offsetof(struct user, u_debugreg[0])) && | ||
132 | (addr <= offsetof(struct user, u_debugreg[7]))){ | ||
133 | addr -= offsetof(struct user, u_debugreg[0]); | ||
134 | addr = addr >> 2; | ||
135 | if((addr == 4) || (addr == 5)) break; | ||
136 | child->thread.arch.debugregs[addr] = data; | ||
137 | ret = 0; | ||
138 | } | ||
139 | #endif | ||
140 | |||
141 | break; | ||
142 | 126 | ||
143 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | 127 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ |
144 | case PTRACE_CONT: { /* restart after signal. */ | 128 | case PTRACE_CONT: { /* restart after signal. */ |
145 | ret = -EIO; | 129 | ret = -EIO; |
146 | if ((unsigned long) data > _NSIG) | 130 | if (!valid_signal(data)) |
147 | break; | 131 | break; |
148 | 132 | ||
149 | child->ptrace &= ~PT_DTRACE; | 133 | set_singlestepping(child, 0); |
150 | child->thread.singlestep_syscall = 0; | ||
151 | if (request == PTRACE_SYSCALL) { | 134 | if (request == PTRACE_SYSCALL) { |
152 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 135 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
153 | } | 136 | } |
@@ -170,8 +153,7 @@ long sys_ptrace(long request, long pid, long addr, long data) | |||
170 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | 153 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ |
171 | break; | 154 | break; |
172 | 155 | ||
173 | child->ptrace &= ~PT_DTRACE; | 156 | set_singlestepping(child, 0); |
174 | child->thread.singlestep_syscall = 0; | ||
175 | child->exit_code = SIGKILL; | 157 | child->exit_code = SIGKILL; |
176 | wake_up_process(child); | 158 | wake_up_process(child); |
177 | break; | 159 | break; |
@@ -179,11 +161,10 @@ long sys_ptrace(long request, long pid, long addr, long data) | |||
179 | 161 | ||
180 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ | 162 | case PTRACE_SINGLESTEP: { /* set the trap flag. */ |
181 | ret = -EIO; | 163 | ret = -EIO; |
182 | if ((unsigned long) data > _NSIG) | 164 | if (!valid_signal(data)) |
183 | break; | 165 | break; |
184 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 166 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
185 | child->ptrace |= PT_DTRACE; | 167 | set_singlestepping(child, 1); |
186 | child->thread.singlestep_syscall = 0; | ||
187 | child->exit_code = data; | 168 | child->exit_code = data; |
188 | /* give it a chance to run. */ | 169 | /* give it a chance to run. */ |
189 | wake_up_process(child); | 170 | wake_up_process(child); |
@@ -250,23 +231,19 @@ long sys_ptrace(long request, long pid, long addr, long data) | |||
250 | break; | 231 | break; |
251 | #endif | 232 | #endif |
252 | case PTRACE_FAULTINFO: { | 233 | case PTRACE_FAULTINFO: { |
253 | struct ptrace_faultinfo fault; | 234 | /* Take the info from thread->arch->faultinfo, |
254 | 235 | * but transfer max. sizeof(struct ptrace_faultinfo). | |
255 | fault = ((struct ptrace_faultinfo) | 236 | * On i386, ptrace_faultinfo is smaller! |
256 | { .is_write = child->thread.err, | 237 | */ |
257 | .addr = child->thread.cr2 }); | 238 | ret = copy_to_user((unsigned long __user *) data, |
258 | ret = copy_to_user((unsigned long __user *) data, &fault, | 239 | &child->thread.arch.faultinfo, |
259 | sizeof(fault)); | 240 | sizeof(struct ptrace_faultinfo)); |
260 | if(ret) | 241 | if(ret) |
261 | break; | 242 | break; |
262 | break; | 243 | break; |
263 | } | 244 | } |
264 | case PTRACE_SIGPENDING: | ||
265 | ret = copy_to_user((unsigned long __user *) data, | ||
266 | &child->pending.signal, | ||
267 | sizeof(child->pending.signal)); | ||
268 | break; | ||
269 | 245 | ||
246 | #ifdef PTRACE_LDT | ||
270 | case PTRACE_LDT: { | 247 | case PTRACE_LDT: { |
271 | struct ptrace_ldt ldt; | 248 | struct ptrace_ldt ldt; |
272 | 249 | ||
@@ -282,6 +259,7 @@ long sys_ptrace(long request, long pid, long addr, long data) | |||
282 | ret = -EIO; | 259 | ret = -EIO; |
283 | break; | 260 | break; |
284 | } | 261 | } |
262 | #endif | ||
285 | #ifdef CONFIG_PROC_MM | 263 | #ifdef CONFIG_PROC_MM |
286 | case PTRACE_SWITCH_MM: { | 264 | case PTRACE_SWITCH_MM: { |
287 | struct mm_struct *old = child->mm; | 265 | struct mm_struct *old = child->mm; |
@@ -337,15 +315,18 @@ void syscall_trace(union uml_pt_regs *regs, int entryexit) | |||
337 | 315 | ||
338 | if (unlikely(current->audit_context)) { | 316 | if (unlikely(current->audit_context)) { |
339 | if (!entryexit) | 317 | if (!entryexit) |
340 | audit_syscall_entry(current, | 318 | audit_syscall_entry(current, |
341 | UPT_SYSCALL_NR(®s->regs), | 319 | HOST_AUDIT_ARCH, |
342 | UPT_SYSCALL_ARG1(®s->regs), | 320 | UPT_SYSCALL_NR(regs), |
343 | UPT_SYSCALL_ARG2(®s->regs), | 321 | UPT_SYSCALL_ARG1(regs), |
344 | UPT_SYSCALL_ARG3(®s->regs), | 322 | UPT_SYSCALL_ARG2(regs), |
345 | UPT_SYSCALL_ARG4(®s->regs)); | 323 | UPT_SYSCALL_ARG3(regs), |
346 | else | 324 | UPT_SYSCALL_ARG4(regs)); |
347 | audit_syscall_exit(current, | 325 | else { |
348 | UPT_SYSCALL_RET(®s->regs)); | 326 | int res = UPT_SYSCALL_RET(regs); |
327 | audit_syscall_exit(current, AUDITSC_RESULT(res), | ||
328 | res); | ||
329 | } | ||
349 | } | 330 | } |
350 | 331 | ||
351 | /* Fake a debug trap */ | 332 | /* Fake a debug trap */ |
diff --git a/arch/um/kernel/sigio_user.c b/arch/um/kernel/sigio_user.c index 668df13d8c9d..e89218958f38 100644 --- a/arch/um/kernel/sigio_user.c +++ b/arch/um/kernel/sigio_user.c | |||
@@ -182,6 +182,7 @@ static int write_sigio_thread(void *unused) | |||
182 | int i, n, respond_fd; | 182 | int i, n, respond_fd; |
183 | char c; | 183 | char c; |
184 | 184 | ||
185 | signal(SIGWINCH, SIG_IGN); | ||
185 | fds = ¤t_poll; | 186 | fds = ¤t_poll; |
186 | while(1){ | 187 | while(1){ |
187 | n = poll(fds->poll, fds->used, -1); | 188 | n = poll(fds->poll, fds->used, -1); |
diff --git a/arch/um/kernel/skas/include/mode_kern-skas.h b/arch/um/kernel/skas/include/mode_kern-skas.h index 94c564962378..e48490028111 100644 --- a/arch/um/kernel/skas/include/mode_kern-skas.h +++ b/arch/um/kernel/skas/include/mode_kern-skas.h | |||
@@ -18,7 +18,6 @@ extern int copy_thread_skas(int nr, unsigned long clone_flags, | |||
18 | unsigned long sp, unsigned long stack_top, | 18 | unsigned long sp, unsigned long stack_top, |
19 | struct task_struct *p, struct pt_regs *regs); | 19 | struct task_struct *p, struct pt_regs *regs); |
20 | extern void release_thread_skas(struct task_struct *task); | 20 | extern void release_thread_skas(struct task_struct *task); |
21 | extern void exit_thread_skas(void); | ||
22 | extern void initial_thread_cb_skas(void (*proc)(void *), void *arg); | 21 | extern void initial_thread_cb_skas(void (*proc)(void *), void *arg); |
23 | extern void init_idle_skas(void); | 22 | extern void init_idle_skas(void); |
24 | extern void flush_tlb_kernel_range_skas(unsigned long start, | 23 | extern void flush_tlb_kernel_range_skas(unsigned long start, |
diff --git a/arch/um/kernel/skas/include/skas.h b/arch/um/kernel/skas/include/skas.h index f0702c2c7204..96b51dba3471 100644 --- a/arch/um/kernel/skas/include/skas.h +++ b/arch/um/kernel/skas/include/skas.h | |||
@@ -27,9 +27,10 @@ extern void map(int fd, unsigned long virt, unsigned long len, int r, int w, | |||
27 | extern int unmap(int fd, void *addr, unsigned long len); | 27 | extern int unmap(int fd, void *addr, unsigned long len); |
28 | extern int protect(int fd, unsigned long addr, unsigned long len, | 28 | extern int protect(int fd, unsigned long addr, unsigned long len, |
29 | int r, int w, int x); | 29 | int r, int w, int x); |
30 | extern void user_signal(int sig, union uml_pt_regs *regs); | 30 | extern void user_signal(int sig, union uml_pt_regs *regs, int pid); |
31 | extern int new_mm(int from); | 31 | extern int new_mm(int from); |
32 | extern void start_userspace(int cpu); | 32 | extern void start_userspace(int cpu); |
33 | extern void get_skas_faultinfo(int pid, struct faultinfo * fi); | ||
33 | extern long execute_syscall_skas(void *r); | 34 | extern long execute_syscall_skas(void *r); |
34 | 35 | ||
35 | #endif | 36 | #endif |
diff --git a/arch/um/kernel/skas/include/uaccess-skas.h b/arch/um/kernel/skas/include/uaccess-skas.h index 11986c9b9ddf..cd6c280482cb 100644 --- a/arch/um/kernel/skas/include/uaccess-skas.h +++ b/arch/um/kernel/skas/include/uaccess-skas.h | |||
@@ -19,7 +19,7 @@ | |||
19 | ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))) | 19 | ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))) |
20 | 20 | ||
21 | static inline int verify_area_skas(int type, const void * addr, | 21 | static inline int verify_area_skas(int type, const void * addr, |
22 | unsigned long size) | 22 | unsigned long size) |
23 | { | 23 | { |
24 | return(access_ok_skas(type, addr, size) ? 0 : -EFAULT); | 24 | return(access_ok_skas(type, addr, size) ? 0 : -EFAULT); |
25 | } | 25 | } |
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c index b4ffaaa81241..773cd2b525fc 100644 --- a/arch/um/kernel/skas/process.c +++ b/arch/um/kernel/skas/process.c | |||
@@ -4,6 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <stdlib.h> | 6 | #include <stdlib.h> |
7 | #include <string.h> | ||
7 | #include <unistd.h> | 8 | #include <unistd.h> |
8 | #include <errno.h> | 9 | #include <errno.h> |
9 | #include <signal.h> | 10 | #include <signal.h> |
@@ -27,27 +28,37 @@ | |||
27 | #include "chan_user.h" | 28 | #include "chan_user.h" |
28 | #include "signal_user.h" | 29 | #include "signal_user.h" |
29 | #include "registers.h" | 30 | #include "registers.h" |
31 | #include "process.h" | ||
30 | 32 | ||
31 | int is_skas_winch(int pid, int fd, void *data) | 33 | int is_skas_winch(int pid, int fd, void *data) |
32 | { | 34 | { |
33 | if(pid != os_getpid()) | 35 | if(pid != os_getpgrp()) |
34 | return(0); | 36 | return(0); |
35 | 37 | ||
36 | register_winch_irq(-1, fd, -1, data); | 38 | register_winch_irq(-1, fd, -1, data); |
37 | return(1); | 39 | return(1); |
38 | } | 40 | } |
39 | 41 | ||
40 | static void handle_segv(int pid) | 42 | void get_skas_faultinfo(int pid, struct faultinfo * fi) |
41 | { | 43 | { |
42 | struct ptrace_faultinfo fault; | ||
43 | int err; | 44 | int err; |
44 | 45 | ||
45 | err = ptrace(PTRACE_FAULTINFO, pid, 0, &fault); | 46 | err = ptrace(PTRACE_FAULTINFO, pid, 0, fi); |
46 | if(err) | 47 | if(err) |
47 | panic("handle_segv - PTRACE_FAULTINFO failed, errno = %d\n", | 48 | panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, " |
48 | errno); | 49 | "errno = %d\n", errno); |
50 | |||
51 | /* Special handling for i386, which has different structs */ | ||
52 | if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo)) | ||
53 | memset((char *)fi + sizeof(struct ptrace_faultinfo), 0, | ||
54 | sizeof(struct faultinfo) - | ||
55 | sizeof(struct ptrace_faultinfo)); | ||
56 | } | ||
49 | 57 | ||
50 | segv(fault.addr, 0, FAULT_WRITE(fault.is_write), 1, NULL); | 58 | static void handle_segv(int pid, union uml_pt_regs * regs) |
59 | { | ||
60 | get_skas_faultinfo(pid, ®s->skas.faultinfo); | ||
61 | segv(regs->skas.faultinfo, 0, 1, NULL); | ||
51 | } | 62 | } |
52 | 63 | ||
53 | /*To use the same value of using_sysemu as the caller, ask it that value (in local_using_sysemu)*/ | 64 | /*To use the same value of using_sysemu as the caller, ask it that value (in local_using_sysemu)*/ |
@@ -163,7 +174,7 @@ void userspace(union uml_pt_regs *regs) | |||
163 | if(WIFSTOPPED(status)){ | 174 | if(WIFSTOPPED(status)){ |
164 | switch(WSTOPSIG(status)){ | 175 | switch(WSTOPSIG(status)){ |
165 | case SIGSEGV: | 176 | case SIGSEGV: |
166 | handle_segv(pid); | 177 | handle_segv(pid, regs); |
167 | break; | 178 | break; |
168 | case SIGTRAP + 0x80: | 179 | case SIGTRAP + 0x80: |
169 | handle_trap(pid, regs, local_using_sysemu); | 180 | handle_trap(pid, regs, local_using_sysemu); |
@@ -177,7 +188,7 @@ void userspace(union uml_pt_regs *regs) | |||
177 | case SIGBUS: | 188 | case SIGBUS: |
178 | case SIGFPE: | 189 | case SIGFPE: |
179 | case SIGWINCH: | 190 | case SIGWINCH: |
180 | user_signal(WSTOPSIG(status), regs); | 191 | user_signal(WSTOPSIG(status), regs, pid); |
181 | break; | 192 | break; |
182 | default: | 193 | default: |
183 | printk("userspace - child stopped with signal " | 194 | printk("userspace - child stopped with signal " |
@@ -190,6 +201,11 @@ void userspace(union uml_pt_regs *regs) | |||
190 | } | 201 | } |
191 | } | 202 | } |
192 | } | 203 | } |
204 | #define INIT_JMP_NEW_THREAD 0 | ||
205 | #define INIT_JMP_REMOVE_SIGSTACK 1 | ||
206 | #define INIT_JMP_CALLBACK 2 | ||
207 | #define INIT_JMP_HALT 3 | ||
208 | #define INIT_JMP_REBOOT 4 | ||
193 | 209 | ||
194 | void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr, | 210 | void new_thread(void *stack, void **switch_buf_ptr, void **fork_buf_ptr, |
195 | void (*handler)(int)) | 211 | void (*handler)(int)) |
@@ -225,7 +241,7 @@ void thread_wait(void *sw, void *fb) | |||
225 | *switch_buf = &buf; | 241 | *switch_buf = &buf; |
226 | fork_buf = fb; | 242 | fork_buf = fb; |
227 | if(sigsetjmp(buf, 1) == 0) | 243 | if(sigsetjmp(buf, 1) == 0) |
228 | siglongjmp(*fork_buf, 1); | 244 | siglongjmp(*fork_buf, INIT_JMP_REMOVE_SIGSTACK); |
229 | } | 245 | } |
230 | 246 | ||
231 | void switch_threads(void *me, void *next) | 247 | void switch_threads(void *me, void *next) |
@@ -249,23 +265,31 @@ int start_idle_thread(void *stack, void *switch_buf_ptr, void **fork_buf_ptr) | |||
249 | sigjmp_buf **switch_buf = switch_buf_ptr; | 265 | sigjmp_buf **switch_buf = switch_buf_ptr; |
250 | int n; | 266 | int n; |
251 | 267 | ||
268 | set_handler(SIGWINCH, (__sighandler_t) sig_handler, | ||
269 | SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGALRM, | ||
270 | SIGVTALRM, -1); | ||
271 | |||
252 | *fork_buf_ptr = &initial_jmpbuf; | 272 | *fork_buf_ptr = &initial_jmpbuf; |
253 | n = sigsetjmp(initial_jmpbuf, 1); | 273 | n = sigsetjmp(initial_jmpbuf, 1); |
254 | if(n == 0) | 274 | switch(n){ |
255 | new_thread_proc((void *) stack, new_thread_handler); | 275 | case INIT_JMP_NEW_THREAD: |
256 | else if(n == 1) | 276 | new_thread_proc((void *) stack, new_thread_handler); |
257 | remove_sigstack(); | 277 | break; |
258 | else if(n == 2){ | 278 | case INIT_JMP_REMOVE_SIGSTACK: |
279 | remove_sigstack(); | ||
280 | break; | ||
281 | case INIT_JMP_CALLBACK: | ||
259 | (*cb_proc)(cb_arg); | 282 | (*cb_proc)(cb_arg); |
260 | siglongjmp(*cb_back, 1); | 283 | siglongjmp(*cb_back, 1); |
261 | } | 284 | break; |
262 | else if(n == 3){ | 285 | case INIT_JMP_HALT: |
263 | kmalloc_ok = 0; | 286 | kmalloc_ok = 0; |
264 | return(0); | 287 | return(0); |
265 | } | 288 | case INIT_JMP_REBOOT: |
266 | else if(n == 4){ | ||
267 | kmalloc_ok = 0; | 289 | kmalloc_ok = 0; |
268 | return(1); | 290 | return(1); |
291 | default: | ||
292 | panic("Bad sigsetjmp return in start_idle_thread - %d\n", n); | ||
269 | } | 293 | } |
270 | siglongjmp(**switch_buf, 1); | 294 | siglongjmp(**switch_buf, 1); |
271 | } | 295 | } |
@@ -290,7 +314,7 @@ void initial_thread_cb_skas(void (*proc)(void *), void *arg) | |||
290 | 314 | ||
291 | block_signals(); | 315 | block_signals(); |
292 | if(sigsetjmp(here, 1) == 0) | 316 | if(sigsetjmp(here, 1) == 0) |
293 | siglongjmp(initial_jmpbuf, 2); | 317 | siglongjmp(initial_jmpbuf, INIT_JMP_CALLBACK); |
294 | unblock_signals(); | 318 | unblock_signals(); |
295 | 319 | ||
296 | cb_proc = NULL; | 320 | cb_proc = NULL; |
@@ -301,13 +325,13 @@ void initial_thread_cb_skas(void (*proc)(void *), void *arg) | |||
301 | void halt_skas(void) | 325 | void halt_skas(void) |
302 | { | 326 | { |
303 | block_signals(); | 327 | block_signals(); |
304 | siglongjmp(initial_jmpbuf, 3); | 328 | siglongjmp(initial_jmpbuf, INIT_JMP_HALT); |
305 | } | 329 | } |
306 | 330 | ||
307 | void reboot_skas(void) | 331 | void reboot_skas(void) |
308 | { | 332 | { |
309 | block_signals(); | 333 | block_signals(); |
310 | siglongjmp(initial_jmpbuf, 4); | 334 | siglongjmp(initial_jmpbuf, INIT_JMP_REBOOT); |
311 | } | 335 | } |
312 | 336 | ||
313 | void switch_mm_skas(int mm_fd) | 337 | void switch_mm_skas(int mm_fd) |
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c index 5d096ea63b97..ab5d3271da0b 100644 --- a/arch/um/kernel/skas/process_kern.c +++ b/arch/um/kernel/skas/process_kern.c | |||
@@ -83,10 +83,6 @@ void release_thread_skas(struct task_struct *task) | |||
83 | { | 83 | { |
84 | } | 84 | } |
85 | 85 | ||
86 | void exit_thread_skas(void) | ||
87 | { | ||
88 | } | ||
89 | |||
90 | void fork_handler(int sig) | 86 | void fork_handler(int sig) |
91 | { | 87 | { |
92 | change_sig(SIGUSR1, 1); | 88 | change_sig(SIGUSR1, 1); |
diff --git a/arch/um/kernel/skas/trap_user.c b/arch/um/kernel/skas/trap_user.c index 8e9b46d4702e..0dee1d95c806 100644 --- a/arch/um/kernel/skas/trap_user.c +++ b/arch/um/kernel/skas/trap_user.c | |||
@@ -5,12 +5,15 @@ | |||
5 | 5 | ||
6 | #include <signal.h> | 6 | #include <signal.h> |
7 | #include <errno.h> | 7 | #include <errno.h> |
8 | #include "sysdep/ptrace.h" | ||
9 | #include "signal_user.h" | 8 | #include "signal_user.h" |
10 | #include "user_util.h" | 9 | #include "user_util.h" |
11 | #include "kern_util.h" | 10 | #include "kern_util.h" |
12 | #include "task.h" | 11 | #include "task.h" |
13 | #include "sigcontext.h" | 12 | #include "sigcontext.h" |
13 | #include "skas.h" | ||
14 | #include "ptrace_user.h" | ||
15 | #include "sysdep/ptrace.h" | ||
16 | #include "sysdep/ptrace_user.h" | ||
14 | 17 | ||
15 | void sig_handler_common_skas(int sig, void *sc_ptr) | 18 | void sig_handler_common_skas(int sig, void *sc_ptr) |
16 | { | 19 | { |
@@ -31,9 +34,11 @@ void sig_handler_common_skas(int sig, void *sc_ptr) | |||
31 | r = &TASK_REGS(get_current())->skas; | 34 | r = &TASK_REGS(get_current())->skas; |
32 | save_user = r->is_user; | 35 | save_user = r->is_user; |
33 | r->is_user = 0; | 36 | r->is_user = 0; |
34 | r->fault_addr = SC_FAULT_ADDR(sc); | 37 | if ( sig == SIGFPE || sig == SIGSEGV || |
35 | r->fault_type = SC_FAULT_TYPE(sc); | 38 | sig == SIGBUS || sig == SIGILL || |
36 | r->trap_type = SC_TRAP_TYPE(sc); | 39 | sig == SIGTRAP ) { |
40 | GET_FAULTINFO_FROM_SC(r->faultinfo, sc); | ||
41 | } | ||
37 | 42 | ||
38 | change_sig(SIGUSR1, 1); | 43 | change_sig(SIGUSR1, 1); |
39 | info = &sig_info[sig]; | 44 | info = &sig_info[sig]; |
@@ -45,14 +50,17 @@ void sig_handler_common_skas(int sig, void *sc_ptr) | |||
45 | r->is_user = save_user; | 50 | r->is_user = save_user; |
46 | } | 51 | } |
47 | 52 | ||
48 | void user_signal(int sig, union uml_pt_regs *regs) | 53 | extern int ptrace_faultinfo; |
54 | |||
55 | void user_signal(int sig, union uml_pt_regs *regs, int pid) | ||
49 | { | 56 | { |
50 | struct signal_info *info; | 57 | struct signal_info *info; |
58 | int segv = ((sig == SIGFPE) || (sig == SIGSEGV) || (sig == SIGBUS) || | ||
59 | (sig == SIGILL) || (sig == SIGTRAP)); | ||
51 | 60 | ||
52 | regs->skas.is_user = 1; | 61 | regs->skas.is_user = 1; |
53 | regs->skas.fault_addr = 0; | 62 | if (segv) |
54 | regs->skas.fault_type = 0; | 63 | get_skas_faultinfo(pid, ®s->skas.faultinfo); |
55 | regs->skas.trap_type = 0; | ||
56 | info = &sig_info[sig]; | 64 | info = &sig_info[sig]; |
57 | (*info->handler)(sig, regs); | 65 | (*info->handler)(sig, regs); |
58 | 66 | ||
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c index 7575ec489b63..75195281081e 100644 --- a/arch/um/kernel/skas/uaccess.c +++ b/arch/um/kernel/skas/uaccess.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * Licensed under the GPL | 3 | * Licensed under the GPL |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/compiler.h" | ||
6 | #include "linux/stddef.h" | 7 | #include "linux/stddef.h" |
7 | #include "linux/kernel.h" | 8 | #include "linux/kernel.h" |
8 | #include "linux/string.h" | 9 | #include "linux/string.h" |
@@ -28,9 +29,12 @@ static unsigned long maybe_map(unsigned long virt, int is_write) | |||
28 | if(IS_ERR(phys) || (is_write && !pte_write(pte))){ | 29 | if(IS_ERR(phys) || (is_write && !pte_write(pte))){ |
29 | err = handle_page_fault(virt, 0, is_write, 1, &dummy_code); | 30 | err = handle_page_fault(virt, 0, is_write, 1, &dummy_code); |
30 | if(err) | 31 | if(err) |
31 | return(0); | 32 | return(-1UL); |
32 | phys = um_virt_to_phys(current, virt, NULL); | 33 | phys = um_virt_to_phys(current, virt, NULL); |
33 | } | 34 | } |
35 | if(IS_ERR(phys)) | ||
36 | phys = (void *) -1; | ||
37 | |||
34 | return((unsigned long) phys); | 38 | return((unsigned long) phys); |
35 | } | 39 | } |
36 | 40 | ||
@@ -41,7 +45,7 @@ static int do_op(unsigned long addr, int len, int is_write, | |||
41 | int n; | 45 | int n; |
42 | 46 | ||
43 | addr = maybe_map(addr, is_write); | 47 | addr = maybe_map(addr, is_write); |
44 | if(addr == -1) | 48 | if(addr == -1UL) |
45 | return(-1); | 49 | return(-1); |
46 | 50 | ||
47 | page = phys_to_page(addr); | 51 | page = phys_to_page(addr); |
@@ -61,8 +65,7 @@ static void do_buffer_op(void *jmpbuf, void *arg_ptr) | |||
61 | void *arg; | 65 | void *arg; |
62 | int *res; | 66 | int *res; |
63 | 67 | ||
64 | /* Some old gccs recognize __va_copy, but not va_copy */ | 68 | va_copy(args, *(va_list *)arg_ptr); |
65 | __va_copy(args, *(va_list *)arg_ptr); | ||
66 | addr = va_arg(args, unsigned long); | 69 | addr = va_arg(args, unsigned long); |
67 | len = va_arg(args, int); | 70 | len = va_arg(args, int); |
68 | is_write = va_arg(args, int); | 71 | is_write = va_arg(args, int); |
diff --git a/arch/um/kernel/skas/util/Makefile b/arch/um/kernel/skas/util/Makefile index 17f5909d60f7..f7b7eba83340 100644 --- a/arch/um/kernel/skas/util/Makefile +++ b/arch/um/kernel/skas/util/Makefile | |||
@@ -2,3 +2,4 @@ hostprogs-y := mk_ptregs | |||
2 | always := $(hostprogs-y) | 2 | always := $(hostprogs-y) |
3 | 3 | ||
4 | mk_ptregs-objs := mk_ptregs-$(SUBARCH).o | 4 | mk_ptregs-objs := mk_ptregs-$(SUBARCH).o |
5 | HOSTCFLAGS_mk_ptregs-$(SUBARCH).o := -I$(objtree)/arch/um | ||
diff --git a/arch/um/kernel/skas/util/mk_ptregs-i386.c b/arch/um/kernel/skas/util/mk_ptregs-i386.c index 0788dd05bcac..1f96e1eeb8a7 100644 --- a/arch/um/kernel/skas/util/mk_ptregs-i386.c +++ b/arch/um/kernel/skas/util/mk_ptregs-i386.c | |||
@@ -1,8 +1,7 @@ | |||
1 | #include <stdio.h> | 1 | #include <stdio.h> |
2 | #include <asm/ptrace.h> | 2 | #include <user-offsets.h> |
3 | #include <asm/user.h> | ||
4 | 3 | ||
5 | #define PRINT_REG(name, val) printf("#define HOST_%s %d\n", (name), (val)) | 4 | #define SHOW(name) printf("#define %s %d\n", #name, name) |
6 | 5 | ||
7 | int main(int argc, char **argv) | 6 | int main(int argc, char **argv) |
8 | { | 7 | { |
@@ -12,28 +11,27 @@ int main(int argc, char **argv) | |||
12 | printf("#ifndef __SKAS_PT_REGS_\n"); | 11 | printf("#ifndef __SKAS_PT_REGS_\n"); |
13 | printf("#define __SKAS_PT_REGS_\n"); | 12 | printf("#define __SKAS_PT_REGS_\n"); |
14 | printf("\n"); | 13 | printf("\n"); |
15 | printf("#define HOST_FRAME_SIZE %d\n", FRAME_SIZE); | 14 | SHOW(HOST_FRAME_SIZE); |
16 | printf("#define HOST_FP_SIZE %d\n", | 15 | SHOW(HOST_FP_SIZE); |
17 | sizeof(struct user_i387_struct) / sizeof(unsigned long)); | 16 | SHOW(HOST_XFP_SIZE); |
18 | printf("#define HOST_XFP_SIZE %d\n", | 17 | |
19 | sizeof(struct user_fxsr_struct) / sizeof(unsigned long)); | 18 | SHOW(HOST_IP); |
19 | SHOW(HOST_SP); | ||
20 | SHOW(HOST_EFLAGS); | ||
21 | SHOW(HOST_EAX); | ||
22 | SHOW(HOST_EBX); | ||
23 | SHOW(HOST_ECX); | ||
24 | SHOW(HOST_EDX); | ||
25 | SHOW(HOST_ESI); | ||
26 | SHOW(HOST_EDI); | ||
27 | SHOW(HOST_EBP); | ||
28 | SHOW(HOST_CS); | ||
29 | SHOW(HOST_SS); | ||
30 | SHOW(HOST_DS); | ||
31 | SHOW(HOST_FS); | ||
32 | SHOW(HOST_ES); | ||
33 | SHOW(HOST_GS); | ||
20 | 34 | ||
21 | PRINT_REG("IP", EIP); | ||
22 | PRINT_REG("SP", UESP); | ||
23 | PRINT_REG("EFLAGS", EFL); | ||
24 | PRINT_REG("EAX", EAX); | ||
25 | PRINT_REG("EBX", EBX); | ||
26 | PRINT_REG("ECX", ECX); | ||
27 | PRINT_REG("EDX", EDX); | ||
28 | PRINT_REG("ESI", ESI); | ||
29 | PRINT_REG("EDI", EDI); | ||
30 | PRINT_REG("EBP", EBP); | ||
31 | PRINT_REG("CS", CS); | ||
32 | PRINT_REG("SS", SS); | ||
33 | PRINT_REG("DS", DS); | ||
34 | PRINT_REG("FS", FS); | ||
35 | PRINT_REG("ES", ES); | ||
36 | PRINT_REG("GS", GS); | ||
37 | printf("\n"); | 35 | printf("\n"); |
38 | printf("#endif\n"); | 36 | printf("#endif\n"); |
39 | return(0); | 37 | return(0); |
diff --git a/arch/um/kernel/skas/util/mk_ptregs-x86_64.c b/arch/um/kernel/skas/util/mk_ptregs-x86_64.c index 67aee92a70ef..5fccbfe35f78 100644 --- a/arch/um/kernel/skas/util/mk_ptregs-x86_64.c +++ b/arch/um/kernel/skas/util/mk_ptregs-x86_64.c | |||
@@ -5,11 +5,10 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <stdio.h> | 7 | #include <stdio.h> |
8 | #define __FRAME_OFFSETS | 8 | #include <user-offsets.h> |
9 | #include <asm/ptrace.h> | ||
10 | 9 | ||
11 | #define PRINT_REG(name, val) \ | 10 | #define SHOW(name) \ |
12 | printf("#define HOST_%s (%d / sizeof(unsigned long))\n", (name), (val)) | 11 | printf("#define %s (%d / sizeof(unsigned long))\n", #name, name) |
13 | 12 | ||
14 | int main(int argc, char **argv) | 13 | int main(int argc, char **argv) |
15 | { | 14 | { |
@@ -18,36 +17,35 @@ int main(int argc, char **argv) | |||
18 | printf("\n"); | 17 | printf("\n"); |
19 | printf("#ifndef __SKAS_PT_REGS_\n"); | 18 | printf("#ifndef __SKAS_PT_REGS_\n"); |
20 | printf("#define __SKAS_PT_REGS_\n"); | 19 | printf("#define __SKAS_PT_REGS_\n"); |
21 | printf("#define HOST_FRAME_SIZE (%d / sizeof(unsigned long))\n", | 20 | SHOW(HOST_FRAME_SIZE); |
22 | FRAME_SIZE); | 21 | SHOW(HOST_RBX); |
23 | PRINT_REG("RBX", RBX); | 22 | SHOW(HOST_RCX); |
24 | PRINT_REG("RCX", RCX); | 23 | SHOW(HOST_RDI); |
25 | PRINT_REG("RDI", RDI); | 24 | SHOW(HOST_RSI); |
26 | PRINT_REG("RSI", RSI); | 25 | SHOW(HOST_RDX); |
27 | PRINT_REG("RDX", RDX); | 26 | SHOW(HOST_RBP); |
28 | PRINT_REG("RBP", RBP); | 27 | SHOW(HOST_RAX); |
29 | PRINT_REG("RAX", RAX); | 28 | SHOW(HOST_R8); |
30 | PRINT_REG("R8", R8); | 29 | SHOW(HOST_R9); |
31 | PRINT_REG("R9", R9); | 30 | SHOW(HOST_R10); |
32 | PRINT_REG("R10", R10); | 31 | SHOW(HOST_R11); |
33 | PRINT_REG("R11", R11); | 32 | SHOW(HOST_R12); |
34 | PRINT_REG("R12", R12); | 33 | SHOW(HOST_R13); |
35 | PRINT_REG("R13", R13); | 34 | SHOW(HOST_R14); |
36 | PRINT_REG("R14", R14); | 35 | SHOW(HOST_R15); |
37 | PRINT_REG("R15", R15); | 36 | SHOW(HOST_ORIG_RAX); |
38 | PRINT_REG("ORIG_RAX", ORIG_RAX); | 37 | SHOW(HOST_CS); |
39 | PRINT_REG("CS", CS); | 38 | SHOW(HOST_SS); |
40 | PRINT_REG("SS", SS); | 39 | SHOW(HOST_EFLAGS); |
41 | PRINT_REG("EFLAGS", EFLAGS); | ||
42 | #if 0 | 40 | #if 0 |
43 | PRINT_REG("FS", FS); | 41 | SHOW(HOST_FS); |
44 | PRINT_REG("GS", GS); | 42 | SHOW(HOST_GS); |
45 | PRINT_REG("DS", DS); | 43 | SHOW(HOST_DS); |
46 | PRINT_REG("ES", ES); | 44 | SHOW(HOST_ES); |
47 | #endif | 45 | #endif |
48 | 46 | ||
49 | PRINT_REG("IP", RIP); | 47 | SHOW(HOST_IP); |
50 | PRINT_REG("SP", RSP); | 48 | SHOW(HOST_SP); |
51 | printf("#define HOST_FP_SIZE 0\n"); | 49 | printf("#define HOST_FP_SIZE 0\n"); |
52 | printf("#define HOST_XFP_SIZE 0\n"); | 50 | printf("#define HOST_XFP_SIZE 0\n"); |
53 | printf("\n"); | 51 | printf("\n"); |
diff --git a/arch/um/kernel/sys_call_table.c b/arch/um/kernel/sys_call_table.c deleted file mode 100644 index 7fc06c85b29d..000000000000 --- a/arch/um/kernel/sys_call_table.c +++ /dev/null | |||
@@ -1,276 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) | ||
3 | * Copyright 2003 PathScale, Inc. | ||
4 | * Licensed under the GPL | ||
5 | */ | ||
6 | |||
7 | #include "linux/config.h" | ||
8 | #include "linux/unistd.h" | ||
9 | #include "linux/sys.h" | ||
10 | #include "linux/swap.h" | ||
11 | #include "linux/syscalls.h" | ||
12 | #include "linux/sysctl.h" | ||
13 | #include "asm/signal.h" | ||
14 | #include "sysdep/syscalls.h" | ||
15 | #include "kern_util.h" | ||
16 | |||
17 | #ifdef CONFIG_NFSD | ||
18 | #define NFSSERVCTL sys_nfsservctl | ||
19 | #else | ||
20 | #define NFSSERVCTL sys_ni_syscall | ||
21 | #endif | ||
22 | |||
23 | #define LAST_GENERIC_SYSCALL __NR_keyctl | ||
24 | |||
25 | #if LAST_GENERIC_SYSCALL > LAST_ARCH_SYSCALL | ||
26 | #define LAST_SYSCALL LAST_GENERIC_SYSCALL | ||
27 | #else | ||
28 | #define LAST_SYSCALL LAST_ARCH_SYSCALL | ||
29 | #endif | ||
30 | |||
31 | extern syscall_handler_t sys_fork; | ||
32 | extern syscall_handler_t sys_execve; | ||
33 | extern syscall_handler_t um_time; | ||
34 | extern syscall_handler_t um_stime; | ||
35 | extern syscall_handler_t sys_pipe; | ||
36 | extern syscall_handler_t sys_olduname; | ||
37 | extern syscall_handler_t sys_sigaction; | ||
38 | extern syscall_handler_t sys_sigsuspend; | ||
39 | extern syscall_handler_t old_readdir; | ||
40 | extern syscall_handler_t sys_uname; | ||
41 | extern syscall_handler_t sys_ipc; | ||
42 | extern syscall_handler_t sys_sigreturn; | ||
43 | extern syscall_handler_t sys_clone; | ||
44 | extern syscall_handler_t sys_rt_sigreturn; | ||
45 | extern syscall_handler_t sys_sigaltstack; | ||
46 | extern syscall_handler_t sys_vfork; | ||
47 | extern syscall_handler_t old_select; | ||
48 | extern syscall_handler_t sys_modify_ldt; | ||
49 | extern syscall_handler_t sys_rt_sigsuspend; | ||
50 | extern syscall_handler_t sys_mbind; | ||
51 | extern syscall_handler_t sys_get_mempolicy; | ||
52 | extern syscall_handler_t sys_set_mempolicy; | ||
53 | extern syscall_handler_t sys_sys_setaltroot; | ||
54 | |||
55 | syscall_handler_t *sys_call_table[] = { | ||
56 | [ __NR_restart_syscall ] = (syscall_handler_t *) sys_restart_syscall, | ||
57 | [ __NR_exit ] = (syscall_handler_t *) sys_exit, | ||
58 | [ __NR_fork ] = (syscall_handler_t *) sys_fork, | ||
59 | [ __NR_read ] = (syscall_handler_t *) sys_read, | ||
60 | [ __NR_write ] = (syscall_handler_t *) sys_write, | ||
61 | |||
62 | /* These three are declared differently in asm/unistd.h */ | ||
63 | [ __NR_open ] = (syscall_handler_t *) sys_open, | ||
64 | [ __NR_close ] = (syscall_handler_t *) sys_close, | ||
65 | [ __NR_creat ] = (syscall_handler_t *) sys_creat, | ||
66 | [ __NR_link ] = (syscall_handler_t *) sys_link, | ||
67 | [ __NR_unlink ] = (syscall_handler_t *) sys_unlink, | ||
68 | [ __NR_execve ] = (syscall_handler_t *) sys_execve, | ||
69 | |||
70 | /* declared differently in kern_util.h */ | ||
71 | [ __NR_chdir ] = (syscall_handler_t *) sys_chdir, | ||
72 | [ __NR_time ] = um_time, | ||
73 | [ __NR_mknod ] = (syscall_handler_t *) sys_mknod, | ||
74 | [ __NR_chmod ] = (syscall_handler_t *) sys_chmod, | ||
75 | [ __NR_lchown ] = (syscall_handler_t *) sys_lchown16, | ||
76 | [ __NR_lseek ] = (syscall_handler_t *) sys_lseek, | ||
77 | [ __NR_getpid ] = (syscall_handler_t *) sys_getpid, | ||
78 | [ __NR_mount ] = (syscall_handler_t *) sys_mount, | ||
79 | [ __NR_setuid ] = (syscall_handler_t *) sys_setuid16, | ||
80 | [ __NR_getuid ] = (syscall_handler_t *) sys_getuid16, | ||
81 | [ __NR_ptrace ] = (syscall_handler_t *) sys_ptrace, | ||
82 | [ __NR_alarm ] = (syscall_handler_t *) sys_alarm, | ||
83 | [ __NR_pause ] = (syscall_handler_t *) sys_pause, | ||
84 | [ __NR_utime ] = (syscall_handler_t *) sys_utime, | ||
85 | [ __NR_access ] = (syscall_handler_t *) sys_access, | ||
86 | [ __NR_sync ] = (syscall_handler_t *) sys_sync, | ||
87 | [ __NR_kill ] = (syscall_handler_t *) sys_kill, | ||
88 | [ __NR_rename ] = (syscall_handler_t *) sys_rename, | ||
89 | [ __NR_mkdir ] = (syscall_handler_t *) sys_mkdir, | ||
90 | [ __NR_rmdir ] = (syscall_handler_t *) sys_rmdir, | ||
91 | |||
92 | /* Declared differently in asm/unistd.h */ | ||
93 | [ __NR_dup ] = (syscall_handler_t *) sys_dup, | ||
94 | [ __NR_pipe ] = (syscall_handler_t *) sys_pipe, | ||
95 | [ __NR_times ] = (syscall_handler_t *) sys_times, | ||
96 | [ __NR_brk ] = (syscall_handler_t *) sys_brk, | ||
97 | [ __NR_setgid ] = (syscall_handler_t *) sys_setgid16, | ||
98 | [ __NR_getgid ] = (syscall_handler_t *) sys_getgid16, | ||
99 | [ __NR_geteuid ] = (syscall_handler_t *) sys_geteuid16, | ||
100 | [ __NR_getegid ] = (syscall_handler_t *) sys_getegid16, | ||
101 | [ __NR_acct ] = (syscall_handler_t *) sys_acct, | ||
102 | [ __NR_umount2 ] = (syscall_handler_t *) sys_umount, | ||
103 | [ __NR_ioctl ] = (syscall_handler_t *) sys_ioctl, | ||
104 | [ __NR_fcntl ] = (syscall_handler_t *) sys_fcntl, | ||
105 | [ __NR_setpgid ] = (syscall_handler_t *) sys_setpgid, | ||
106 | [ __NR_umask ] = (syscall_handler_t *) sys_umask, | ||
107 | [ __NR_chroot ] = (syscall_handler_t *) sys_chroot, | ||
108 | [ __NR_ustat ] = (syscall_handler_t *) sys_ustat, | ||
109 | [ __NR_dup2 ] = (syscall_handler_t *) sys_dup2, | ||
110 | [ __NR_getppid ] = (syscall_handler_t *) sys_getppid, | ||
111 | [ __NR_getpgrp ] = (syscall_handler_t *) sys_getpgrp, | ||
112 | [ __NR_setsid ] = (syscall_handler_t *) sys_setsid, | ||
113 | [ __NR_setreuid ] = (syscall_handler_t *) sys_setreuid16, | ||
114 | [ __NR_setregid ] = (syscall_handler_t *) sys_setregid16, | ||
115 | [ __NR_sethostname ] = (syscall_handler_t *) sys_sethostname, | ||
116 | [ __NR_setrlimit ] = (syscall_handler_t *) sys_setrlimit, | ||
117 | [ __NR_getrlimit ] = (syscall_handler_t *) sys_old_getrlimit, | ||
118 | [ __NR_getrusage ] = (syscall_handler_t *) sys_getrusage, | ||
119 | [ __NR_gettimeofday ] = (syscall_handler_t *) sys_gettimeofday, | ||
120 | [ __NR_settimeofday ] = (syscall_handler_t *) sys_settimeofday, | ||
121 | [ __NR_getgroups ] = (syscall_handler_t *) sys_getgroups16, | ||
122 | [ __NR_setgroups ] = (syscall_handler_t *) sys_setgroups16, | ||
123 | [ __NR_symlink ] = (syscall_handler_t *) sys_symlink, | ||
124 | [ __NR_readlink ] = (syscall_handler_t *) sys_readlink, | ||
125 | [ __NR_uselib ] = (syscall_handler_t *) sys_uselib, | ||
126 | [ __NR_swapon ] = (syscall_handler_t *) sys_swapon, | ||
127 | [ __NR_reboot ] = (syscall_handler_t *) sys_reboot, | ||
128 | [ __NR_munmap ] = (syscall_handler_t *) sys_munmap, | ||
129 | [ __NR_truncate ] = (syscall_handler_t *) sys_truncate, | ||
130 | [ __NR_ftruncate ] = (syscall_handler_t *) sys_ftruncate, | ||
131 | [ __NR_fchmod ] = (syscall_handler_t *) sys_fchmod, | ||
132 | [ __NR_fchown ] = (syscall_handler_t *) sys_fchown16, | ||
133 | [ __NR_getpriority ] = (syscall_handler_t *) sys_getpriority, | ||
134 | [ __NR_setpriority ] = (syscall_handler_t *) sys_setpriority, | ||
135 | [ __NR_statfs ] = (syscall_handler_t *) sys_statfs, | ||
136 | [ __NR_fstatfs ] = (syscall_handler_t *) sys_fstatfs, | ||
137 | [ __NR_ioperm ] = (syscall_handler_t *) sys_ni_syscall, | ||
138 | [ __NR_syslog ] = (syscall_handler_t *) sys_syslog, | ||
139 | [ __NR_setitimer ] = (syscall_handler_t *) sys_setitimer, | ||
140 | [ __NR_getitimer ] = (syscall_handler_t *) sys_getitimer, | ||
141 | [ __NR_stat ] = (syscall_handler_t *) sys_newstat, | ||
142 | [ __NR_lstat ] = (syscall_handler_t *) sys_newlstat, | ||
143 | [ __NR_fstat ] = (syscall_handler_t *) sys_newfstat, | ||
144 | [ __NR_vhangup ] = (syscall_handler_t *) sys_vhangup, | ||
145 | [ __NR_wait4 ] = (syscall_handler_t *) sys_wait4, | ||
146 | [ __NR_swapoff ] = (syscall_handler_t *) sys_swapoff, | ||
147 | [ __NR_sysinfo ] = (syscall_handler_t *) sys_sysinfo, | ||
148 | [ __NR_fsync ] = (syscall_handler_t *) sys_fsync, | ||
149 | [ __NR_clone ] = (syscall_handler_t *) sys_clone, | ||
150 | [ __NR_setdomainname ] = (syscall_handler_t *) sys_setdomainname, | ||
151 | [ __NR_uname ] = (syscall_handler_t *) sys_newuname, | ||
152 | [ __NR_adjtimex ] = (syscall_handler_t *) sys_adjtimex, | ||
153 | [ __NR_mprotect ] = (syscall_handler_t *) sys_mprotect, | ||
154 | [ __NR_create_module ] = (syscall_handler_t *) sys_ni_syscall, | ||
155 | [ __NR_init_module ] = (syscall_handler_t *) sys_init_module, | ||
156 | [ __NR_delete_module ] = (syscall_handler_t *) sys_delete_module, | ||
157 | [ __NR_get_kernel_syms ] = (syscall_handler_t *) sys_ni_syscall, | ||
158 | [ __NR_quotactl ] = (syscall_handler_t *) sys_quotactl, | ||
159 | [ __NR_getpgid ] = (syscall_handler_t *) sys_getpgid, | ||
160 | [ __NR_fchdir ] = (syscall_handler_t *) sys_fchdir, | ||
161 | [ __NR_sysfs ] = (syscall_handler_t *) sys_sysfs, | ||
162 | [ __NR_personality ] = (syscall_handler_t *) sys_personality, | ||
163 | [ __NR_afs_syscall ] = (syscall_handler_t *) sys_ni_syscall, | ||
164 | [ __NR_setfsuid ] = (syscall_handler_t *) sys_setfsuid16, | ||
165 | [ __NR_setfsgid ] = (syscall_handler_t *) sys_setfsgid16, | ||
166 | [ __NR_getdents ] = (syscall_handler_t *) sys_getdents, | ||
167 | [ __NR_flock ] = (syscall_handler_t *) sys_flock, | ||
168 | [ __NR_msync ] = (syscall_handler_t *) sys_msync, | ||
169 | [ __NR_readv ] = (syscall_handler_t *) sys_readv, | ||
170 | [ __NR_writev ] = (syscall_handler_t *) sys_writev, | ||
171 | [ __NR_getsid ] = (syscall_handler_t *) sys_getsid, | ||
172 | [ __NR_fdatasync ] = (syscall_handler_t *) sys_fdatasync, | ||
173 | [ __NR__sysctl ] = (syscall_handler_t *) sys_sysctl, | ||
174 | [ __NR_mlock ] = (syscall_handler_t *) sys_mlock, | ||
175 | [ __NR_munlock ] = (syscall_handler_t *) sys_munlock, | ||
176 | [ __NR_mlockall ] = (syscall_handler_t *) sys_mlockall, | ||
177 | [ __NR_munlockall ] = (syscall_handler_t *) sys_munlockall, | ||
178 | [ __NR_sched_setparam ] = (syscall_handler_t *) sys_sched_setparam, | ||
179 | [ __NR_sched_getparam ] = (syscall_handler_t *) sys_sched_getparam, | ||
180 | [ __NR_sched_setscheduler ] = (syscall_handler_t *) sys_sched_setscheduler, | ||
181 | [ __NR_sched_getscheduler ] = (syscall_handler_t *) sys_sched_getscheduler, | ||
182 | [ __NR_sched_yield ] = (syscall_handler_t *) yield, | ||
183 | [ __NR_sched_get_priority_max ] = (syscall_handler_t *) sys_sched_get_priority_max, | ||
184 | [ __NR_sched_get_priority_min ] = (syscall_handler_t *) sys_sched_get_priority_min, | ||
185 | [ __NR_sched_rr_get_interval ] = (syscall_handler_t *) sys_sched_rr_get_interval, | ||
186 | [ __NR_nanosleep ] = (syscall_handler_t *) sys_nanosleep, | ||
187 | [ __NR_mremap ] = (syscall_handler_t *) sys_mremap, | ||
188 | [ __NR_setresuid ] = (syscall_handler_t *) sys_setresuid16, | ||
189 | [ __NR_getresuid ] = (syscall_handler_t *) sys_getresuid16, | ||
190 | [ __NR_query_module ] = (syscall_handler_t *) sys_ni_syscall, | ||
191 | [ __NR_poll ] = (syscall_handler_t *) sys_poll, | ||
192 | [ __NR_nfsservctl ] = (syscall_handler_t *) NFSSERVCTL, | ||
193 | [ __NR_setresgid ] = (syscall_handler_t *) sys_setresgid16, | ||
194 | [ __NR_getresgid ] = (syscall_handler_t *) sys_getresgid16, | ||
195 | [ __NR_prctl ] = (syscall_handler_t *) sys_prctl, | ||
196 | [ __NR_rt_sigreturn ] = (syscall_handler_t *) sys_rt_sigreturn, | ||
197 | [ __NR_rt_sigaction ] = (syscall_handler_t *) sys_rt_sigaction, | ||
198 | [ __NR_rt_sigprocmask ] = (syscall_handler_t *) sys_rt_sigprocmask, | ||
199 | [ __NR_rt_sigpending ] = (syscall_handler_t *) sys_rt_sigpending, | ||
200 | [ __NR_rt_sigtimedwait ] = (syscall_handler_t *) sys_rt_sigtimedwait, | ||
201 | [ __NR_rt_sigqueueinfo ] = (syscall_handler_t *) sys_rt_sigqueueinfo, | ||
202 | [ __NR_rt_sigsuspend ] = (syscall_handler_t *) sys_rt_sigsuspend, | ||
203 | [ __NR_pread64 ] = (syscall_handler_t *) sys_pread64, | ||
204 | [ __NR_pwrite64 ] = (syscall_handler_t *) sys_pwrite64, | ||
205 | [ __NR_chown ] = (syscall_handler_t *) sys_chown16, | ||
206 | [ __NR_getcwd ] = (syscall_handler_t *) sys_getcwd, | ||
207 | [ __NR_capget ] = (syscall_handler_t *) sys_capget, | ||
208 | [ __NR_capset ] = (syscall_handler_t *) sys_capset, | ||
209 | [ __NR_sigaltstack ] = (syscall_handler_t *) sys_sigaltstack, | ||
210 | [ __NR_sendfile ] = (syscall_handler_t *) sys_sendfile, | ||
211 | [ __NR_getpmsg ] = (syscall_handler_t *) sys_ni_syscall, | ||
212 | [ __NR_putpmsg ] = (syscall_handler_t *) sys_ni_syscall, | ||
213 | [ __NR_vfork ] = (syscall_handler_t *) sys_vfork, | ||
214 | [ __NR_getdents64 ] = (syscall_handler_t *) sys_getdents64, | ||
215 | [ __NR_gettid ] = (syscall_handler_t *) sys_gettid, | ||
216 | [ __NR_readahead ] = (syscall_handler_t *) sys_readahead, | ||
217 | [ __NR_setxattr ] = (syscall_handler_t *) sys_setxattr, | ||
218 | [ __NR_lsetxattr ] = (syscall_handler_t *) sys_lsetxattr, | ||
219 | [ __NR_fsetxattr ] = (syscall_handler_t *) sys_fsetxattr, | ||
220 | [ __NR_getxattr ] = (syscall_handler_t *) sys_getxattr, | ||
221 | [ __NR_lgetxattr ] = (syscall_handler_t *) sys_lgetxattr, | ||
222 | [ __NR_fgetxattr ] = (syscall_handler_t *) sys_fgetxattr, | ||
223 | [ __NR_listxattr ] = (syscall_handler_t *) sys_listxattr, | ||
224 | [ __NR_llistxattr ] = (syscall_handler_t *) sys_llistxattr, | ||
225 | [ __NR_flistxattr ] = (syscall_handler_t *) sys_flistxattr, | ||
226 | [ __NR_removexattr ] = (syscall_handler_t *) sys_removexattr, | ||
227 | [ __NR_lremovexattr ] = (syscall_handler_t *) sys_lremovexattr, | ||
228 | [ __NR_fremovexattr ] = (syscall_handler_t *) sys_fremovexattr, | ||
229 | [ __NR_tkill ] = (syscall_handler_t *) sys_tkill, | ||
230 | [ __NR_futex ] = (syscall_handler_t *) sys_futex, | ||
231 | [ __NR_sched_setaffinity ] = (syscall_handler_t *) sys_sched_setaffinity, | ||
232 | [ __NR_sched_getaffinity ] = (syscall_handler_t *) sys_sched_getaffinity, | ||
233 | [ __NR_io_setup ] = (syscall_handler_t *) sys_io_setup, | ||
234 | [ __NR_io_destroy ] = (syscall_handler_t *) sys_io_destroy, | ||
235 | [ __NR_io_getevents ] = (syscall_handler_t *) sys_io_getevents, | ||
236 | [ __NR_io_submit ] = (syscall_handler_t *) sys_io_submit, | ||
237 | [ __NR_io_cancel ] = (syscall_handler_t *) sys_io_cancel, | ||
238 | [ __NR_exit_group ] = (syscall_handler_t *) sys_exit_group, | ||
239 | [ __NR_lookup_dcookie ] = (syscall_handler_t *) sys_lookup_dcookie, | ||
240 | [ __NR_epoll_create ] = (syscall_handler_t *) sys_epoll_create, | ||
241 | [ __NR_epoll_ctl ] = (syscall_handler_t *) sys_epoll_ctl, | ||
242 | [ __NR_epoll_wait ] = (syscall_handler_t *) sys_epoll_wait, | ||
243 | [ __NR_remap_file_pages ] = (syscall_handler_t *) sys_remap_file_pages, | ||
244 | [ __NR_set_tid_address ] = (syscall_handler_t *) sys_set_tid_address, | ||
245 | [ __NR_timer_create ] = (syscall_handler_t *) sys_timer_create, | ||
246 | [ __NR_timer_settime ] = (syscall_handler_t *) sys_timer_settime, | ||
247 | [ __NR_timer_gettime ] = (syscall_handler_t *) sys_timer_gettime, | ||
248 | [ __NR_timer_getoverrun ] = (syscall_handler_t *) sys_timer_getoverrun, | ||
249 | [ __NR_timer_delete ] = (syscall_handler_t *) sys_timer_delete, | ||
250 | [ __NR_clock_settime ] = (syscall_handler_t *) sys_clock_settime, | ||
251 | [ __NR_clock_gettime ] = (syscall_handler_t *) sys_clock_gettime, | ||
252 | [ __NR_clock_getres ] = (syscall_handler_t *) sys_clock_getres, | ||
253 | [ __NR_clock_nanosleep ] = (syscall_handler_t *) sys_clock_nanosleep, | ||
254 | [ __NR_tgkill ] = (syscall_handler_t *) sys_tgkill, | ||
255 | [ __NR_utimes ] = (syscall_handler_t *) sys_utimes, | ||
256 | [ __NR_fadvise64 ] = (syscall_handler_t *) sys_fadvise64, | ||
257 | [ __NR_vserver ] = (syscall_handler_t *) sys_ni_syscall, | ||
258 | [ __NR_mbind ] = (syscall_handler_t *) sys_mbind, | ||
259 | [ __NR_get_mempolicy ] = (syscall_handler_t *) sys_get_mempolicy, | ||
260 | [ __NR_set_mempolicy ] = (syscall_handler_t *) sys_set_mempolicy, | ||
261 | [ __NR_mq_open ] = (syscall_handler_t *) sys_mq_open, | ||
262 | [ __NR_mq_unlink ] = (syscall_handler_t *) sys_mq_unlink, | ||
263 | [ __NR_mq_timedsend ] = (syscall_handler_t *) sys_mq_timedsend, | ||
264 | [ __NR_mq_timedreceive ] = (syscall_handler_t *) sys_mq_timedreceive, | ||
265 | [ __NR_mq_notify ] = (syscall_handler_t *) sys_mq_notify, | ||
266 | [ __NR_mq_getsetattr ] = (syscall_handler_t *) sys_mq_getsetattr, | ||
267 | [ __NR_kexec_load ] = (syscall_handler_t *) sys_ni_syscall, | ||
268 | [ __NR_waitid ] = (syscall_handler_t *) sys_waitid, | ||
269 | [ __NR_add_key ] = (syscall_handler_t *) sys_add_key, | ||
270 | [ __NR_request_key ] = (syscall_handler_t *) sys_request_key, | ||
271 | [ __NR_keyctl ] = (syscall_handler_t *) sys_keyctl, | ||
272 | |||
273 | ARCH_SYSCALLS | ||
274 | [ LAST_SYSCALL + 1 ... NR_syscalls ] = | ||
275 | (syscall_handler_t *) sys_ni_syscall | ||
276 | }; | ||
diff --git a/arch/um/kernel/syscall_kern.c b/arch/um/kernel/syscall_kern.c index 42731e04f50f..b7a55251e897 100644 --- a/arch/um/kernel/syscall_kern.c +++ b/arch/um/kernel/syscall_kern.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include "linux/utime.h" | 17 | #include "linux/utime.h" |
18 | #include "asm/mman.h" | 18 | #include "asm/mman.h" |
19 | #include "asm/uaccess.h" | 19 | #include "asm/uaccess.h" |
20 | #include "asm/ipc.h" | ||
21 | #include "kern_util.h" | 20 | #include "kern_util.h" |
22 | #include "user_util.h" | 21 | #include "user_util.h" |
23 | #include "sysdep/syscalls.h" | 22 | #include "sysdep/syscalls.h" |
diff --git a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c index 2461cd73ca87..6516fc52afe0 100644 --- a/arch/um/kernel/time_kern.c +++ b/arch/um/kernel/time_kern.c | |||
@@ -48,8 +48,6 @@ static unsigned long long prev_usecs; | |||
48 | static long long delta; /* Deviation per interval */ | 48 | static long long delta; /* Deviation per interval */ |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #define MILLION 1000000 | ||
52 | |||
53 | void timer_irq(union uml_pt_regs *regs) | 51 | void timer_irq(union uml_pt_regs *regs) |
54 | { | 52 | { |
55 | unsigned long long ticks = 0; | 53 | unsigned long long ticks = 0; |
@@ -136,22 +134,6 @@ long um_stime(int __user *tptr) | |||
136 | return 0; | 134 | return 0; |
137 | } | 135 | } |
138 | 136 | ||
139 | void __udelay(unsigned long usecs) | ||
140 | { | ||
141 | int i, n; | ||
142 | |||
143 | n = (loops_per_jiffy * HZ * usecs) / MILLION; | ||
144 | for(i=0;i<n;i++) ; | ||
145 | } | ||
146 | |||
147 | void __const_udelay(unsigned long usecs) | ||
148 | { | ||
149 | int i, n; | ||
150 | |||
151 | n = (loops_per_jiffy * HZ * usecs) / MILLION; | ||
152 | for(i=0;i<n;i++) ; | ||
153 | } | ||
154 | |||
155 | void timer_handler(int sig, union uml_pt_regs *regs) | 137 | void timer_handler(int sig, union uml_pt_regs *regs) |
156 | { | 138 | { |
157 | local_irq_disable(); | 139 | local_irq_disable(); |
diff --git a/arch/um/kernel/trap_kern.c b/arch/um/kernel/trap_kern.c index 47e766e6ba10..5fca2c61eb98 100644 --- a/arch/um/kernel/trap_kern.c +++ b/arch/um/kernel/trap_kern.c | |||
@@ -48,7 +48,7 @@ int handle_page_fault(unsigned long address, unsigned long ip, | |||
48 | goto good_area; | 48 | goto good_area; |
49 | else if(!(vma->vm_flags & VM_GROWSDOWN)) | 49 | else if(!(vma->vm_flags & VM_GROWSDOWN)) |
50 | goto out; | 50 | goto out; |
51 | else if(!ARCH_IS_STACKGROW(address)) | 51 | else if(is_user && !ARCH_IS_STACKGROW(address)) |
52 | goto out; | 52 | goto out; |
53 | else if(expand_stack(vma, address)) | 53 | else if(expand_stack(vma, address)) |
54 | goto out; | 54 | goto out; |
@@ -133,12 +133,19 @@ static int check_remapped_addr(unsigned long address, int is_write) | |||
133 | return(0); | 133 | return(0); |
134 | } | 134 | } |
135 | 135 | ||
136 | unsigned long segv(unsigned long address, unsigned long ip, int is_write, | 136 | /* |
137 | int is_user, void *sc) | 137 | * We give a *copy* of the faultinfo in the regs to segv. |
138 | * This must be done, since nesting SEGVs could overwrite | ||
139 | * the info in the regs. A pointer to the info then would | ||
140 | * give us bad data! | ||
141 | */ | ||
142 | unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc) | ||
138 | { | 143 | { |
139 | struct siginfo si; | 144 | struct siginfo si; |
140 | void *catcher; | 145 | void *catcher; |
141 | int err; | 146 | int err; |
147 | int is_write = FAULT_WRITE(fi); | ||
148 | unsigned long address = FAULT_ADDRESS(fi); | ||
142 | 149 | ||
143 | if(!is_user && (address >= start_vm) && (address < end_vm)){ | 150 | if(!is_user && (address >= start_vm) && (address < end_vm)){ |
144 | flush_tlb_kernel_vm(); | 151 | flush_tlb_kernel_vm(); |
@@ -159,7 +166,7 @@ unsigned long segv(unsigned long address, unsigned long ip, int is_write, | |||
159 | } | 166 | } |
160 | else if(current->thread.fault_addr != NULL) | 167 | else if(current->thread.fault_addr != NULL) |
161 | panic("fault_addr set but no fault catcher"); | 168 | panic("fault_addr set but no fault catcher"); |
162 | else if(arch_fixup(ip, sc)) | 169 | else if(!is_user && arch_fixup(ip, sc)) |
163 | return(0); | 170 | return(0); |
164 | 171 | ||
165 | if(!is_user) | 172 | if(!is_user) |
@@ -171,6 +178,7 @@ unsigned long segv(unsigned long address, unsigned long ip, int is_write, | |||
171 | si.si_errno = 0; | 178 | si.si_errno = 0; |
172 | si.si_code = BUS_ADRERR; | 179 | si.si_code = BUS_ADRERR; |
173 | si.si_addr = (void *)address; | 180 | si.si_addr = (void *)address; |
181 | current->thread.arch.faultinfo = fi; | ||
174 | force_sig_info(SIGBUS, &si, current); | 182 | force_sig_info(SIGBUS, &si, current); |
175 | } | 183 | } |
176 | else if(err == -ENOMEM){ | 184 | else if(err == -ENOMEM){ |
@@ -180,22 +188,20 @@ unsigned long segv(unsigned long address, unsigned long ip, int is_write, | |||
180 | else { | 188 | else { |
181 | si.si_signo = SIGSEGV; | 189 | si.si_signo = SIGSEGV; |
182 | si.si_addr = (void *) address; | 190 | si.si_addr = (void *) address; |
183 | current->thread.cr2 = address; | 191 | current->thread.arch.faultinfo = fi; |
184 | current->thread.err = is_write; | ||
185 | force_sig_info(SIGSEGV, &si, current); | 192 | force_sig_info(SIGSEGV, &si, current); |
186 | } | 193 | } |
187 | return(0); | 194 | return(0); |
188 | } | 195 | } |
189 | 196 | ||
190 | void bad_segv(unsigned long address, unsigned long ip, int is_write) | 197 | void bad_segv(struct faultinfo fi, unsigned long ip) |
191 | { | 198 | { |
192 | struct siginfo si; | 199 | struct siginfo si; |
193 | 200 | ||
194 | si.si_signo = SIGSEGV; | 201 | si.si_signo = SIGSEGV; |
195 | si.si_code = SEGV_ACCERR; | 202 | si.si_code = SEGV_ACCERR; |
196 | si.si_addr = (void *) address; | 203 | si.si_addr = (void *) FAULT_ADDRESS(fi); |
197 | current->thread.cr2 = address; | 204 | current->thread.arch.faultinfo = fi; |
198 | current->thread.err = is_write; | ||
199 | force_sig_info(SIGSEGV, &si, current); | 205 | force_sig_info(SIGSEGV, &si, current); |
200 | } | 206 | } |
201 | 207 | ||
@@ -204,6 +210,7 @@ void relay_signal(int sig, union uml_pt_regs *regs) | |||
204 | if(arch_handle_signal(sig, regs)) return; | 210 | if(arch_handle_signal(sig, regs)) return; |
205 | if(!UPT_IS_USER(regs)) | 211 | if(!UPT_IS_USER(regs)) |
206 | panic("Kernel mode signal %d", sig); | 212 | panic("Kernel mode signal %d", sig); |
213 | current->thread.arch.faultinfo = *UPT_FAULTINFO(regs); | ||
207 | force_sig(sig, current); | 214 | force_sig(sig, current); |
208 | } | 215 | } |
209 | 216 | ||
diff --git a/arch/um/kernel/trap_user.c b/arch/um/kernel/trap_user.c index 50a4042a509f..f825a6eda3f5 100644 --- a/arch/um/kernel/trap_user.c +++ b/arch/um/kernel/trap_user.c | |||
@@ -54,23 +54,22 @@ struct { | |||
54 | void segv_handler(int sig, union uml_pt_regs *regs) | 54 | void segv_handler(int sig, union uml_pt_regs *regs) |
55 | { | 55 | { |
56 | int index, max; | 56 | int index, max; |
57 | struct faultinfo * fi = UPT_FAULTINFO(regs); | ||
57 | 58 | ||
58 | if(UPT_IS_USER(regs) && !UPT_SEGV_IS_FIXABLE(regs)){ | 59 | if(UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)){ |
59 | bad_segv(UPT_FAULT_ADDR(regs), UPT_IP(regs), | 60 | bad_segv(*fi, UPT_IP(regs)); |
60 | UPT_FAULT_WRITE(regs)); | ||
61 | return; | 61 | return; |
62 | } | 62 | } |
63 | max = sizeof(segfault_record)/sizeof(segfault_record[0]); | 63 | max = sizeof(segfault_record)/sizeof(segfault_record[0]); |
64 | index = next_trap_index(max); | 64 | index = next_trap_index(max); |
65 | 65 | ||
66 | nsegfaults++; | 66 | nsegfaults++; |
67 | segfault_record[index].address = UPT_FAULT_ADDR(regs); | 67 | segfault_record[index].address = FAULT_ADDRESS(*fi); |
68 | segfault_record[index].pid = os_getpid(); | 68 | segfault_record[index].pid = os_getpid(); |
69 | segfault_record[index].is_write = UPT_FAULT_WRITE(regs); | 69 | segfault_record[index].is_write = FAULT_WRITE(*fi); |
70 | segfault_record[index].sp = UPT_SP(regs); | 70 | segfault_record[index].sp = UPT_SP(regs); |
71 | segfault_record[index].is_user = UPT_IS_USER(regs); | 71 | segfault_record[index].is_user = UPT_IS_USER(regs); |
72 | segv(UPT_FAULT_ADDR(regs), UPT_IP(regs), UPT_FAULT_WRITE(regs), | 72 | segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs); |
73 | UPT_IS_USER(regs), regs); | ||
74 | } | 73 | } |
75 | 74 | ||
76 | void usr2_handler(int sig, union uml_pt_regs *regs) | 75 | void usr2_handler(int sig, union uml_pt_regs *regs) |
diff --git a/arch/um/kernel/tt/Makefile b/arch/um/kernel/tt/Makefile index 3d5177df3504..c3faea21a996 100644 --- a/arch/um/kernel/tt/Makefile +++ b/arch/um/kernel/tt/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | # | 4 | # |
5 | 5 | ||
6 | extra-y := unmap_fin.o | 6 | extra-y := unmap_fin.o |
7 | targets := unmap.o | ||
7 | clean-files := unmap_tmp.o | 8 | clean-files := unmap_tmp.o |
8 | 9 | ||
9 | obj-y = exec_kern.o exec_user.o gdb.o ksyms.o mem.o mem_user.o process_kern.o \ | 10 | obj-y = exec_kern.o exec_user.o gdb.o ksyms.o mem.o mem_user.o process_kern.o \ |
diff --git a/arch/um/kernel/tt/include/mode_kern-tt.h b/arch/um/kernel/tt/include/mode_kern-tt.h index 28aaab3448fa..e0ca0e0b2516 100644 --- a/arch/um/kernel/tt/include/mode_kern-tt.h +++ b/arch/um/kernel/tt/include/mode_kern-tt.h | |||
@@ -19,7 +19,6 @@ extern int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp, | |||
19 | unsigned long stack_top, struct task_struct *p, | 19 | unsigned long stack_top, struct task_struct *p, |
20 | struct pt_regs *regs); | 20 | struct pt_regs *regs); |
21 | extern void release_thread_tt(struct task_struct *task); | 21 | extern void release_thread_tt(struct task_struct *task); |
22 | extern void exit_thread_tt(void); | ||
23 | extern void initial_thread_cb_tt(void (*proc)(void *), void *arg); | 22 | extern void initial_thread_cb_tt(void (*proc)(void *), void *arg); |
24 | extern void init_idle_tt(void); | 23 | extern void init_idle_tt(void); |
25 | extern void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end); | 24 | extern void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end); |
diff --git a/arch/um/kernel/tt/include/uaccess-tt.h b/arch/um/kernel/tt/include/uaccess-tt.h index f0bad010cebd..3fbb5fe26f49 100644 --- a/arch/um/kernel/tt/include/uaccess-tt.h +++ b/arch/um/kernel/tt/include/uaccess-tt.h | |||
@@ -34,7 +34,7 @@ extern unsigned long uml_physmem; | |||
34 | (under_task_size(addr, size) || is_stack(addr, size)))) | 34 | (under_task_size(addr, size) || is_stack(addr, size)))) |
35 | 35 | ||
36 | static inline int verify_area_tt(int type, const void * addr, | 36 | static inline int verify_area_tt(int type, const void * addr, |
37 | unsigned long size) | 37 | unsigned long size) |
38 | { | 38 | { |
39 | return(access_ok_tt(type, addr, size) ? 0 : -EFAULT); | 39 | return(access_ok_tt(type, addr, size) ? 0 : -EFAULT); |
40 | } | 40 | } |
diff --git a/arch/um/kernel/tt/mem.c b/arch/um/kernel/tt/mem.c index 74346a04a2b2..bcb8796c3cb1 100644 --- a/arch/um/kernel/tt/mem.c +++ b/arch/um/kernel/tt/mem.c | |||
@@ -21,14 +21,8 @@ void before_mem_tt(unsigned long brk_start) | |||
21 | remap_data(UML_ROUND_DOWN(&__bss_start), UML_ROUND_UP(&_end), 1); | 21 | remap_data(UML_ROUND_DOWN(&__bss_start), UML_ROUND_UP(&_end), 1); |
22 | } | 22 | } |
23 | 23 | ||
24 | #ifdef CONFIG_HOST_2G_2G | ||
25 | #define TOP 0x80000000 | ||
26 | #else | ||
27 | #define TOP 0xc0000000 | ||
28 | #endif | ||
29 | |||
30 | #define SIZE ((CONFIG_NEST_LEVEL + CONFIG_KERNEL_HALF_GIGS) * 0x20000000) | 24 | #define SIZE ((CONFIG_NEST_LEVEL + CONFIG_KERNEL_HALF_GIGS) * 0x20000000) |
31 | #define START (TOP - SIZE) | 25 | #define START (CONFIG_TOP_ADDR - SIZE) |
32 | 26 | ||
33 | unsigned long set_task_sizes_tt(int arg, unsigned long *host_size_out, | 27 | unsigned long set_task_sizes_tt(int arg, unsigned long *host_size_out, |
34 | unsigned long *task_size_out) | 28 | unsigned long *task_size_out) |
diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c index f19f7c18febe..df810ca8fc12 100644 --- a/arch/um/kernel/tt/process_kern.c +++ b/arch/um/kernel/tt/process_kern.c | |||
@@ -65,8 +65,7 @@ void *switch_to_tt(void *prev, void *next, void *last) | |||
65 | panic("write of switch_pipe failed, err = %d", -err); | 65 | panic("write of switch_pipe failed, err = %d", -err); |
66 | 66 | ||
67 | reading = 1; | 67 | reading = 1; |
68 | if((from->exit_state == EXIT_ZOMBIE) || | 68 | if(from->thread.mode.tt.switch_pipe[0] == -1) |
69 | (from->exit_state == EXIT_DEAD)) | ||
70 | os_kill_process(os_getpid(), 0); | 69 | os_kill_process(os_getpid(), 0); |
71 | 70 | ||
72 | err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c, sizeof(c)); | 71 | err = os_read_file(from->thread.mode.tt.switch_pipe[0], &c, sizeof(c)); |
@@ -81,8 +80,7 @@ void *switch_to_tt(void *prev, void *next, void *last) | |||
81 | * in case it has not already killed itself. | 80 | * in case it has not already killed itself. |
82 | */ | 81 | */ |
83 | prev_sched = current->thread.prev_sched; | 82 | prev_sched = current->thread.prev_sched; |
84 | if((prev_sched->exit_state == EXIT_ZOMBIE) || | 83 | if(prev_sched->thread.mode.tt.switch_pipe[0] == -1) |
85 | (prev_sched->exit_state == EXIT_DEAD)) | ||
86 | os_kill_process(prev_sched->thread.mode.tt.extern_pid, 1); | 84 | os_kill_process(prev_sched->thread.mode.tt.extern_pid, 1); |
87 | 85 | ||
88 | change_sig(SIGVTALRM, vtalrm); | 86 | change_sig(SIGVTALRM, vtalrm); |
@@ -101,14 +99,18 @@ void release_thread_tt(struct task_struct *task) | |||
101 | { | 99 | { |
102 | int pid = task->thread.mode.tt.extern_pid; | 100 | int pid = task->thread.mode.tt.extern_pid; |
103 | 101 | ||
102 | /* | ||
103 | * We first have to kill the other process, before | ||
104 | * closing its switch_pipe. Else it might wake up | ||
105 | * and receive "EOF" before we could kill it. | ||
106 | */ | ||
104 | if(os_getpid() != pid) | 107 | if(os_getpid() != pid) |
105 | os_kill_process(pid, 0); | 108 | os_kill_process(pid, 0); |
106 | } | ||
107 | 109 | ||
108 | void exit_thread_tt(void) | 110 | os_close_file(task->thread.mode.tt.switch_pipe[0]); |
109 | { | 111 | os_close_file(task->thread.mode.tt.switch_pipe[1]); |
110 | os_close_file(current->thread.mode.tt.switch_pipe[0]); | 112 | /* use switch_pipe as flag: thread is released */ |
111 | os_close_file(current->thread.mode.tt.switch_pipe[1]); | 113 | task->thread.mode.tt.switch_pipe[0] = -1; |
112 | } | 114 | } |
113 | 115 | ||
114 | void suspend_new_thread(int fd) | 116 | void suspend_new_thread(int fd) |
diff --git a/arch/um/kernel/tt/syscall_user.c b/arch/um/kernel/tt/syscall_user.c index e4e7e9c2224c..b218316cfdb2 100644 --- a/arch/um/kernel/tt/syscall_user.c +++ b/arch/um/kernel/tt/syscall_user.c | |||
@@ -63,6 +63,10 @@ void do_syscall(void *task, int pid, int local_using_sysemu) | |||
63 | 63 | ||
64 | UPT_SYSCALL_NR(TASK_REGS(task)) = PT_SYSCALL_NR(proc_regs); | 64 | UPT_SYSCALL_NR(TASK_REGS(task)) = PT_SYSCALL_NR(proc_regs); |
65 | 65 | ||
66 | #ifdef UPT_ORIGGPR2 | ||
67 | UPT_ORIGGPR2(TASK_REGS(task)) = REGS_ORIGGPR2(proc_regs); | ||
68 | #endif | ||
69 | |||
66 | if(((unsigned long *) PT_IP(proc_regs) >= &_stext) && | 70 | if(((unsigned long *) PT_IP(proc_regs) >= &_stext) && |
67 | ((unsigned long *) PT_IP(proc_regs) <= &_etext)) | 71 | ((unsigned long *) PT_IP(proc_regs) <= &_etext)) |
68 | tracer_panic("I'm tracing myself and I can't get out"); | 72 | tracer_panic("I'm tracing myself and I can't get out"); |
diff --git a/arch/um/kernel/tt/tracer.c b/arch/um/kernel/tt/tracer.c index 7b5d937e5955..d11e7399d7a1 100644 --- a/arch/um/kernel/tt/tracer.c +++ b/arch/um/kernel/tt/tracer.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "kern_util.h" | 26 | #include "kern_util.h" |
27 | #include "chan_user.h" | 27 | #include "chan_user.h" |
28 | #include "ptrace_user.h" | 28 | #include "ptrace_user.h" |
29 | #include "irq_user.h" | ||
29 | #include "mode.h" | 30 | #include "mode.h" |
30 | #include "tt.h" | 31 | #include "tt.h" |
31 | 32 | ||
@@ -33,7 +34,7 @@ static int tracer_winch[2]; | |||
33 | 34 | ||
34 | int is_tracer_winch(int pid, int fd, void *data) | 35 | int is_tracer_winch(int pid, int fd, void *data) |
35 | { | 36 | { |
36 | if(pid != tracing_pid) | 37 | if(pid != os_getpgrp()) |
37 | return(0); | 38 | return(0); |
38 | 39 | ||
39 | register_winch_irq(tracer_winch[0], fd, -1, data); | 40 | register_winch_irq(tracer_winch[0], fd, -1, data); |
@@ -89,8 +90,10 @@ void tracer_panic(char *format, ...) | |||
89 | 90 | ||
90 | static void tracer_segv(int sig, struct sigcontext sc) | 91 | static void tracer_segv(int sig, struct sigcontext sc) |
91 | { | 92 | { |
93 | struct faultinfo fi; | ||
94 | GET_FAULTINFO_FROM_SC(fi, &sc); | ||
92 | printf("Tracing thread segfault at address 0x%lx, ip 0x%lx\n", | 95 | printf("Tracing thread segfault at address 0x%lx, ip 0x%lx\n", |
93 | SC_FAULT_ADDR(&sc), SC_IP(&sc)); | 96 | FAULT_ADDRESS(fi), SC_IP(&sc)); |
94 | while(1) | 97 | while(1) |
95 | pause(); | 98 | pause(); |
96 | } | 99 | } |
@@ -117,6 +120,7 @@ static int signal_tramp(void *arg) | |||
117 | signal(SIGSEGV, (__sighandler_t) sig_handler); | 120 | signal(SIGSEGV, (__sighandler_t) sig_handler); |
118 | set_cmdline("(idle thread)"); | 121 | set_cmdline("(idle thread)"); |
119 | set_init_pid(os_getpid()); | 122 | set_init_pid(os_getpid()); |
123 | init_irq_signals(0); | ||
120 | proc = arg; | 124 | proc = arg; |
121 | return((*proc)(NULL)); | 125 | return((*proc)(NULL)); |
122 | } | 126 | } |
diff --git a/arch/um/kernel/tt/trap_user.c b/arch/um/kernel/tt/trap_user.c index 92a3820ca543..fc108615beaf 100644 --- a/arch/um/kernel/tt/trap_user.c +++ b/arch/um/kernel/tt/trap_user.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <errno.h> | 7 | #include <errno.h> |
8 | #include <signal.h> | 8 | #include <signal.h> |
9 | #include "sysdep/ptrace.h" | 9 | #include "sysdep/ptrace.h" |
10 | #include "sysdep/sigcontext.h" | ||
10 | #include "signal_user.h" | 11 | #include "signal_user.h" |
11 | #include "user_util.h" | 12 | #include "user_util.h" |
12 | #include "kern_util.h" | 13 | #include "kern_util.h" |
@@ -28,6 +29,11 @@ void sig_handler_common_tt(int sig, void *sc_ptr) | |||
28 | change_sig(SIGSEGV, 1); | 29 | change_sig(SIGSEGV, 1); |
29 | 30 | ||
30 | r = &TASK_REGS(get_current())->tt; | 31 | r = &TASK_REGS(get_current())->tt; |
32 | if ( sig == SIGFPE || sig == SIGSEGV || | ||
33 | sig == SIGBUS || sig == SIGILL || | ||
34 | sig == SIGTRAP ) { | ||
35 | GET_FAULTINFO_FROM_SC(r->faultinfo, sc); | ||
36 | } | ||
31 | save_regs = *r; | 37 | save_regs = *r; |
32 | is_user = user_context(SC_SP(sc)); | 38 | is_user = user_context(SC_SP(sc)); |
33 | r->sc = sc; | 39 | r->sc = sc; |
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 5c49d88eed3d..4d10ec372a67 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include "asm/ptrace.h" | 23 | #include "asm/ptrace.h" |
24 | #include "asm/elf.h" | 24 | #include "asm/elf.h" |
25 | #include "asm/user.h" | 25 | #include "asm/user.h" |
26 | #include "asm/setup.h" | ||
26 | #include "ubd_user.h" | 27 | #include "ubd_user.h" |
27 | #include "asm/current.h" | 28 | #include "asm/current.h" |
28 | #include "asm/setup.h" | 29 | #include "asm/setup.h" |
@@ -42,9 +43,9 @@ | |||
42 | #define DEFAULT_COMMAND_LINE "root=98:0" | 43 | #define DEFAULT_COMMAND_LINE "root=98:0" |
43 | 44 | ||
44 | /* Changed in linux_main and setup_arch, which run before SMP is started */ | 45 | /* Changed in linux_main and setup_arch, which run before SMP is started */ |
45 | char command_line[COMMAND_LINE_SIZE] = { 0 }; | 46 | static char command_line[COMMAND_LINE_SIZE] = { 0 }; |
46 | 47 | ||
47 | void add_arg(char *arg) | 48 | static void add_arg(char *arg) |
48 | { | 49 | { |
49 | if (strlen(command_line) + strlen(arg) + 1 > COMMAND_LINE_SIZE) { | 50 | if (strlen(command_line) + strlen(arg) + 1 > COMMAND_LINE_SIZE) { |
50 | printf("add_arg: Too many command line arguments!\n"); | 51 | printf("add_arg: Too many command line arguments!\n"); |
@@ -449,7 +450,7 @@ void __init setup_arch(char **cmdline_p) | |||
449 | { | 450 | { |
450 | notifier_chain_register(&panic_notifier_list, &panic_exit_notifier); | 451 | notifier_chain_register(&panic_notifier_list, &panic_exit_notifier); |
451 | paging_init(); | 452 | paging_init(); |
452 | strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE); | 453 | strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE); |
453 | *cmdline_p = command_line; | 454 | *cmdline_p = command_line; |
454 | setup_hostinfo(); | 455 | setup_hostinfo(); |
455 | } | 456 | } |
diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..1660a769674b --- /dev/null +++ b/arch/um/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,6 @@ | |||
1 | #include <linux/config.h> | ||
2 | #ifdef CONFIG_LD_SCRIPT_STATIC | ||
3 | #include "uml.lds.S" | ||
4 | #else | ||
5 | #include "dyn.lds.S" | ||
6 | #endif | ||
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c index ba9ca1cc790a..1e126bfd31a7 100644 --- a/arch/um/os-Linux/process.c +++ b/arch/um/os-Linux/process.c | |||
@@ -123,6 +123,11 @@ int os_getpid(void) | |||
123 | return(getpid()); | 123 | return(getpid()); |
124 | } | 124 | } |
125 | 125 | ||
126 | int os_getpgrp(void) | ||
127 | { | ||
128 | return getpgrp(); | ||
129 | } | ||
130 | |||
126 | int os_map_memory(void *virt, int fd, unsigned long long off, unsigned long len, | 131 | int os_map_memory(void *virt, int fd, unsigned long long off, unsigned long len, |
127 | int r, int w, int x) | 132 | int r, int w, int x) |
128 | { | 133 | { |
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c index 7eac1baf5975..c7bfd5ee3925 100644 --- a/arch/um/os-Linux/signal.c +++ b/arch/um/os-Linux/signal.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include "mode.h" | 8 | #include "mode.h" |
9 | #include "sysdep/signal.h" | 9 | #include "sysdep/signal.h" |
10 | 10 | ||
11 | void sig_handler(int sig) | 11 | void sig_handler(ARCH_SIGHDLR_PARAM) |
12 | { | 12 | { |
13 | struct sigcontext *sc; | 13 | struct sigcontext *sc; |
14 | 14 | ||
@@ -19,7 +19,7 @@ void sig_handler(int sig) | |||
19 | 19 | ||
20 | extern int timer_irq_inited; | 20 | extern int timer_irq_inited; |
21 | 21 | ||
22 | void alarm_handler(int sig) | 22 | void alarm_handler(ARCH_SIGHDLR_PARAM) |
23 | { | 23 | { |
24 | struct sigcontext *sc; | 24 | struct sigcontext *sc; |
25 | 25 | ||
diff --git a/arch/um/os-Linux/sys-i386/registers.c b/arch/um/os-Linux/sys-i386/registers.c index 148645b14480..9a0ad094d926 100644 --- a/arch/um/os-Linux/sys-i386/registers.c +++ b/arch/um/os-Linux/sys-i386/registers.c | |||
@@ -105,14 +105,15 @@ void init_registers(int pid) | |||
105 | panic("check_ptrace : PTRACE_GETREGS failed, errno = %d", | 105 | panic("check_ptrace : PTRACE_GETREGS failed, errno = %d", |
106 | err); | 106 | err); |
107 | 107 | ||
108 | errno = 0; | ||
108 | err = ptrace(PTRACE_GETFPXREGS, pid, 0, exec_fpx_regs); | 109 | err = ptrace(PTRACE_GETFPXREGS, pid, 0, exec_fpx_regs); |
109 | if(!err) | 110 | if(!err) |
110 | return; | 111 | return; |
112 | if(errno != EIO) | ||
113 | panic("check_ptrace : PTRACE_GETFPXREGS failed, errno = %d", | ||
114 | errno); | ||
111 | 115 | ||
112 | have_fpx_regs = 0; | 116 | have_fpx_regs = 0; |
113 | if(err != EIO) | ||
114 | panic("check_ptrace : PTRACE_GETFPXREGS failed, errno = %d", | ||
115 | err); | ||
116 | 117 | ||
117 | err = ptrace(PTRACE_GETFPREGS, pid, 0, exec_fp_regs); | 118 | err = ptrace(PTRACE_GETFPREGS, pid, 0, exec_fp_regs); |
118 | if(err) | 119 | if(err) |
diff --git a/arch/um/os-Linux/util/Makefile b/arch/um/os-Linux/util/Makefile index fb00ddf969bd..9778aed0c314 100644 --- a/arch/um/os-Linux/util/Makefile +++ b/arch/um/os-Linux/util/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | hostprogs-y := mk_user_constants | 1 | hostprogs-y := mk_user_constants |
2 | always := $(hostprogs-y) | 2 | always := $(hostprogs-y) |
3 | 3 | ||
4 | mk_user_constants-objs := mk_user_constants.o | 4 | HOSTCFLAGS_mk_user_constants.o := -I$(objtree)/arch/um |
diff --git a/arch/um/os-Linux/util/mk_user_constants.c b/arch/um/os-Linux/util/mk_user_constants.c index 0933518aa8bd..4838f30eecf0 100644 --- a/arch/um/os-Linux/util/mk_user_constants.c +++ b/arch/um/os-Linux/util/mk_user_constants.c | |||
@@ -1,11 +1,5 @@ | |||
1 | #include <stdio.h> | 1 | #include <stdio.h> |
2 | #include <asm/types.h> | 2 | #include <user-offsets.h> |
3 | /* For some reason, x86_64 nowhere defines u64 and u32, even though they're | ||
4 | * used throughout the headers. | ||
5 | */ | ||
6 | typedef __u64 u64; | ||
7 | typedef __u32 u32; | ||
8 | #include <asm/user.h> | ||
9 | 3 | ||
10 | int main(int argc, char **argv) | 4 | int main(int argc, char **argv) |
11 | { | 5 | { |
@@ -20,7 +14,7 @@ int main(int argc, char **argv) | |||
20 | * x86_64 (216 vs 168 bytes). user_regs_struct is the correct size on | 14 | * x86_64 (216 vs 168 bytes). user_regs_struct is the correct size on |
21 | * both x86_64 and i386. | 15 | * both x86_64 and i386. |
22 | */ | 16 | */ |
23 | printf("#define UM_FRAME_SIZE %d\n", (int) sizeof(struct user_regs_struct)); | 17 | printf("#define UM_FRAME_SIZE %d\n", __UM_FRAME_SIZE); |
24 | 18 | ||
25 | printf("\n"); | 19 | printf("\n"); |
26 | printf("#endif\n"); | 20 | printf("#endif\n"); |
diff --git a/arch/um/scripts/Makefile.rules b/arch/um/scripts/Makefile.rules index 143f6fea0763..0b2491883d9c 100644 --- a/arch/um/scripts/Makefile.rules +++ b/arch/um/scripts/Makefile.rules | |||
@@ -2,12 +2,27 @@ | |||
2 | # arch/um: Generic definitions | 2 | # arch/um: Generic definitions |
3 | # =========================================================================== | 3 | # =========================================================================== |
4 | 4 | ||
5 | USER_SINGLE_OBJS = $(foreach f,$(patsubst %.o,%,$(obj-y) $(obj-m)),$($(f)-objs)) | 5 | USER_SINGLE_OBJS := \ |
6 | USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m) $(USER_SINGLE_OBJS)) | 6 | $(foreach f,$(patsubst %.o,%,$(obj-y) $(obj-m)),$($(f)-objs)) |
7 | 7 | USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m) $(USER_SINGLE_OBJS)) | |
8 | USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file)) | 8 | USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file)) |
9 | 9 | ||
10 | $(USER_OBJS): c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) $(CFLAGS_$(notdir $@)) | 10 | $(USER_OBJS) : c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) \ |
11 | $(CFLAGS_$(notdir $@)) | ||
11 | 12 | ||
12 | quiet_cmd_make_link = SYMLINK $@ | 13 | quiet_cmd_make_link = SYMLINK $@ |
13 | cmd_make_link = rm -f $@; ln -sf $(srctree)/arch/$(SUBARCH)/$($(notdir $@)-dir)/$(notdir $@) $@ | 14 | cmd_make_link = ln -sf $(srctree)/arch/$(SUBARCH)/$($(notdir $@)-dir)/$(notdir $@) $@ |
15 | |||
16 | # this needs to be before the foreach, because targets does not accept | ||
17 | # complete paths like $(obj)/$(f). To make sure this works, use a := assignment, | ||
18 | # or we will get $(obj)/$(f) in the "targets" value. | ||
19 | # Also, this forces you to use the := syntax when assigning to targets. | ||
20 | # Otherwise the line below will cause an infinite loop (if you don't know why, | ||
21 | # just do it). | ||
22 | |||
23 | targets := $(targets) $(SYMLINKS) | ||
24 | |||
25 | SYMLINKS := $(foreach f,$(SYMLINKS),$(obj)/$(f)) | ||
26 | |||
27 | $(SYMLINKS): FORCE | ||
28 | $(call if_changed,make_link) | ||
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile index 71b47e618605..fcd67c3414e4 100644 --- a/arch/um/sys-i386/Makefile +++ b/arch/um/sys-i386/Makefile | |||
@@ -1,29 +1,19 @@ | |||
1 | obj-y = bitops.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ | 1 | obj-y = bitops.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ |
2 | ptrace_user.o semaphore.o signal.o sigcontext.o syscalls.o sysrq.o | 2 | ptrace_user.o semaphore.o signal.o sigcontext.o syscalls.o sysrq.o \ |
3 | sys_call_table.o | ||
3 | 4 | ||
4 | obj-$(CONFIG_HIGHMEM) += highmem.o | 5 | obj-$(CONFIG_HIGHMEM) += highmem.o |
5 | obj-$(CONFIG_MODULES) += module.o | 6 | obj-$(CONFIG_MODULES) += module.o |
6 | 7 | ||
7 | USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o | 8 | USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o |
8 | 9 | ||
9 | include arch/um/scripts/Makefile.rules | ||
10 | |||
11 | SYMLINKS = bitops.c semaphore.c highmem.c module.c | 10 | SYMLINKS = bitops.c semaphore.c highmem.c module.c |
12 | 11 | ||
13 | # this needs to be before the foreach, because clean-files does not accept | ||
14 | # complete paths like $(src)/$f. | ||
15 | clean-files := $(SYMLINKS) | ||
16 | |||
17 | targets += $(SYMLINKS) | ||
18 | |||
19 | SYMLINKS := $(foreach f,$(SYMLINKS),$(obj)/$f) | ||
20 | |||
21 | bitops.c-dir = lib | 12 | bitops.c-dir = lib |
22 | semaphore.c-dir = kernel | 13 | semaphore.c-dir = kernel |
23 | highmem.c-dir = mm | 14 | highmem.c-dir = mm |
24 | module.c-dir = kernel | 15 | module.c-dir = kernel |
25 | 16 | ||
26 | $(SYMLINKS): FORCE | ||
27 | $(call if_changed,make_link) | ||
28 | |||
29 | subdir- := util | 17 | subdir- := util |
18 | |||
19 | include arch/um/scripts/Makefile.rules | ||
diff --git a/arch/um/sys-i386/checksum.S b/arch/um/sys-i386/checksum.S index a11171fb6223..d98b2fff3d08 100644 --- a/arch/um/sys-i386/checksum.S +++ b/arch/um/sys-i386/checksum.S | |||
@@ -38,7 +38,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |||
38 | 38 | ||
39 | .text | 39 | .text |
40 | .align 4 | 40 | .align 4 |
41 | .globl arch_csum_partial | 41 | .globl csum_partial |
42 | 42 | ||
43 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM | 43 | #ifndef CONFIG_X86_USE_PPRO_CHECKSUM |
44 | 44 | ||
@@ -49,7 +49,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |||
49 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte | 49 | * Fortunately, it is easy to convert 2-byte alignment to 4-byte |
50 | * alignment for the unrolled loop. | 50 | * alignment for the unrolled loop. |
51 | */ | 51 | */ |
52 | arch_csum_partial: | 52 | csum_partial: |
53 | pushl %esi | 53 | pushl %esi |
54 | pushl %ebx | 54 | pushl %ebx |
55 | movl 20(%esp),%eax # Function arg: unsigned int sum | 55 | movl 20(%esp),%eax # Function arg: unsigned int sum |
@@ -119,7 +119,7 @@ arch_csum_partial: | |||
119 | 119 | ||
120 | /* Version for PentiumII/PPro */ | 120 | /* Version for PentiumII/PPro */ |
121 | 121 | ||
122 | arch_csum_partial: | 122 | csum_partial: |
123 | pushl %esi | 123 | pushl %esi |
124 | pushl %ebx | 124 | pushl %ebx |
125 | movl 20(%esp),%eax # Function arg: unsigned int sum | 125 | movl 20(%esp),%eax # Function arg: unsigned int sum |
diff --git a/arch/um/sys-i386/delay.c b/arch/um/sys-i386/delay.c index 20d37dbbaf08..e9892eef51ce 100644 --- a/arch/um/sys-i386/delay.c +++ b/arch/um/sys-i386/delay.c | |||
@@ -1,3 +1,6 @@ | |||
1 | #include "linux/delay.h" | ||
2 | #include "asm/param.h" | ||
3 | |||
1 | void __delay(unsigned long time) | 4 | void __delay(unsigned long time) |
2 | { | 5 | { |
3 | /* Stolen from the i386 __loop_delay */ | 6 | /* Stolen from the i386 __loop_delay */ |
@@ -12,3 +15,18 @@ void __delay(unsigned long time) | |||
12 | :"0" (time)); | 15 | :"0" (time)); |
13 | } | 16 | } |
14 | 17 | ||
18 | void __udelay(unsigned long usecs) | ||
19 | { | ||
20 | int i, n; | ||
21 | |||
22 | n = (loops_per_jiffy * HZ * usecs) / MILLION; | ||
23 | for(i=0;i<n;i++) ; | ||
24 | } | ||
25 | |||
26 | void __const_udelay(unsigned long usecs) | ||
27 | { | ||
28 | int i, n; | ||
29 | |||
30 | n = (loops_per_jiffy * HZ * usecs) / MILLION; | ||
31 | for(i=0;i<n;i++) ; | ||
32 | } | ||
diff --git a/arch/um/sys-i386/kernel-offsets.c b/arch/um/sys-i386/kernel-offsets.c new file mode 100644 index 000000000000..9f8ecd1fdd96 --- /dev/null +++ b/arch/um/sys-i386/kernel-offsets.c | |||
@@ -0,0 +1,25 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <linux/stddef.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/time.h> | ||
5 | #include <asm/page.h> | ||
6 | |||
7 | #define DEFINE(sym, val) \ | ||
8 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
9 | |||
10 | #define STR(x) #x | ||
11 | #define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : ) | ||
12 | |||
13 | #define BLANK() asm volatile("\n->" : : ) | ||
14 | |||
15 | #define OFFSET(sym, str, mem) \ | ||
16 | DEFINE(sym, offsetof(struct str, mem)); | ||
17 | |||
18 | void foo(void) | ||
19 | { | ||
20 | OFFSET(TASK_DEBUGREGS, task_struct, thread.arch.debugregs); | ||
21 | #ifdef CONFIG_MODE_TT | ||
22 | OFFSET(TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid); | ||
23 | #endif | ||
24 | #include <common-offsets.h> | ||
25 | } | ||
diff --git a/arch/um/sys-i386/ksyms.c b/arch/um/sys-i386/ksyms.c index 74f70a120458..db524ab3f743 100644 --- a/arch/um/sys-i386/ksyms.c +++ b/arch/um/sys-i386/ksyms.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include "linux/in6.h" | 2 | #include "linux/in6.h" |
3 | #include "linux/rwsem.h" | 3 | #include "linux/rwsem.h" |
4 | #include "asm/byteorder.h" | 4 | #include "asm/byteorder.h" |
5 | #include "asm/delay.h" | ||
5 | #include "asm/semaphore.h" | 6 | #include "asm/semaphore.h" |
6 | #include "asm/uaccess.h" | 7 | #include "asm/uaccess.h" |
7 | #include "asm/checksum.h" | 8 | #include "asm/checksum.h" |
@@ -13,5 +14,8 @@ EXPORT_SYMBOL(__down_failed_trylock); | |||
13 | EXPORT_SYMBOL(__up_wakeup); | 14 | EXPORT_SYMBOL(__up_wakeup); |
14 | 15 | ||
15 | /* Networking helper routines. */ | 16 | /* Networking helper routines. */ |
16 | EXPORT_SYMBOL(csum_partial_copy_from); | 17 | EXPORT_SYMBOL(csum_partial); |
17 | EXPORT_SYMBOL(csum_partial_copy_to); | 18 | |
19 | /* delay core functions */ | ||
20 | EXPORT_SYMBOL(__const_udelay); | ||
21 | EXPORT_SYMBOL(__udelay); | ||
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c index 31bcb2f997d4..dc755b0b9db8 100644 --- a/arch/um/sys-i386/ldt.c +++ b/arch/um/sys-i386/ldt.c | |||
@@ -25,7 +25,7 @@ int sys_modify_ldt_tt(int func, void __user *ptr, unsigned long bytecount) | |||
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | #ifdef CONFIG_MODE_SKAS | 27 | #ifdef CONFIG_MODE_SKAS |
28 | extern int userspace_pid; | 28 | extern int userspace_pid[]; |
29 | 29 | ||
30 | #include "skas_ptrace.h" | 30 | #include "skas_ptrace.h" |
31 | 31 | ||
@@ -56,7 +56,8 @@ int sys_modify_ldt_skas(int func, void __user *ptr, unsigned long bytecount) | |||
56 | ldt = ((struct ptrace_ldt) { .func = func, | 56 | ldt = ((struct ptrace_ldt) { .func = func, |
57 | .ptr = buf, | 57 | .ptr = buf, |
58 | .bytecount = bytecount }); | 58 | .bytecount = bytecount }); |
59 | res = ptrace(PTRACE_LDT, userspace_pid, 0, (unsigned long) &ldt); | 59 | #warning Need to look up userspace_pid by cpu |
60 | res = ptrace(PTRACE_LDT, userspace_pid[0], 0, (unsigned long) &ldt); | ||
60 | if(res < 0) | 61 | if(res < 0) |
61 | goto out; | 62 | goto out; |
62 | 63 | ||
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c index e470d28cdf84..e839ce65ad28 100644 --- a/arch/um/sys-i386/ptrace.c +++ b/arch/um/sys-i386/ptrace.c | |||
@@ -73,6 +73,25 @@ int putreg(struct task_struct *child, int regno, unsigned long value) | |||
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
75 | 75 | ||
76 | int poke_user(struct task_struct *child, long addr, long data) | ||
77 | { | ||
78 | if ((addr & 3) || addr < 0) | ||
79 | return -EIO; | ||
80 | |||
81 | if (addr < MAX_REG_OFFSET) | ||
82 | return putreg(child, addr, data); | ||
83 | |||
84 | else if((addr >= offsetof(struct user, u_debugreg[0])) && | ||
85 | (addr <= offsetof(struct user, u_debugreg[7]))){ | ||
86 | addr -= offsetof(struct user, u_debugreg[0]); | ||
87 | addr = addr >> 2; | ||
88 | if((addr == 4) || (addr == 5)) return -EIO; | ||
89 | child->thread.arch.debugregs[addr] = data; | ||
90 | return 0; | ||
91 | } | ||
92 | return -EIO; | ||
93 | } | ||
94 | |||
76 | unsigned long getreg(struct task_struct *child, int regno) | 95 | unsigned long getreg(struct task_struct *child, int regno) |
77 | { | 96 | { |
78 | unsigned long retval = ~0UL; | 97 | unsigned long retval = ~0UL; |
@@ -93,6 +112,27 @@ unsigned long getreg(struct task_struct *child, int regno) | |||
93 | return retval; | 112 | return retval; |
94 | } | 113 | } |
95 | 114 | ||
115 | int peek_user(struct task_struct *child, long addr, long data) | ||
116 | { | ||
117 | /* read the word at location addr in the USER area. */ | ||
118 | unsigned long tmp; | ||
119 | |||
120 | if ((addr & 3) || addr < 0) | ||
121 | return -EIO; | ||
122 | |||
123 | tmp = 0; /* Default return condition */ | ||
124 | if(addr < MAX_REG_OFFSET){ | ||
125 | tmp = getreg(child, addr); | ||
126 | } | ||
127 | else if((addr >= offsetof(struct user, u_debugreg[0])) && | ||
128 | (addr <= offsetof(struct user, u_debugreg[7]))){ | ||
129 | addr -= offsetof(struct user, u_debugreg[0]); | ||
130 | addr = addr >> 2; | ||
131 | tmp = child->thread.arch.debugregs[addr]; | ||
132 | } | ||
133 | return put_user(tmp, (unsigned long *) data); | ||
134 | } | ||
135 | |||
96 | struct i387_fxsave_struct { | 136 | struct i387_fxsave_struct { |
97 | unsigned short cwd; | 137 | unsigned short cwd; |
98 | unsigned short swd; | 138 | unsigned short swd; |
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c index 76ba87254b25..03913ca5d256 100644 --- a/arch/um/sys-i386/signal.c +++ b/arch/um/sys-i386/signal.c | |||
@@ -47,9 +47,6 @@ static int copy_sc_from_user_skas(struct pt_regs *regs, | |||
47 | REGS_CS(regs->regs.skas.regs) = sc.cs; | 47 | REGS_CS(regs->regs.skas.regs) = sc.cs; |
48 | REGS_EFLAGS(regs->regs.skas.regs) = sc.eflags; | 48 | REGS_EFLAGS(regs->regs.skas.regs) = sc.eflags; |
49 | REGS_SS(regs->regs.skas.regs) = sc.ss; | 49 | REGS_SS(regs->regs.skas.regs) = sc.ss; |
50 | regs->regs.skas.fault_addr = sc.cr2; | ||
51 | regs->regs.skas.fault_type = FAULT_WRITE(sc.err); | ||
52 | regs->regs.skas.trap_type = sc.trapno; | ||
53 | 50 | ||
54 | err = restore_fp_registers(userspace_pid[0], fpregs); | 51 | err = restore_fp_registers(userspace_pid[0], fpregs); |
55 | if(err < 0){ | 52 | if(err < 0){ |
@@ -62,11 +59,11 @@ static int copy_sc_from_user_skas(struct pt_regs *regs, | |||
62 | } | 59 | } |
63 | 60 | ||
64 | int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, | 61 | int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, |
65 | struct pt_regs *regs, unsigned long fault_addr, | 62 | struct pt_regs *regs) |
66 | int fault_type) | ||
67 | { | 63 | { |
68 | struct sigcontext sc; | 64 | struct sigcontext sc; |
69 | unsigned long fpregs[HOST_FP_SIZE]; | 65 | unsigned long fpregs[HOST_FP_SIZE]; |
66 | struct faultinfo * fi = ¤t->thread.arch.faultinfo; | ||
70 | int err; | 67 | int err; |
71 | 68 | ||
72 | sc.gs = REGS_GS(regs->regs.skas.regs); | 69 | sc.gs = REGS_GS(regs->regs.skas.regs); |
@@ -86,9 +83,9 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, | |||
86 | sc.eflags = REGS_EFLAGS(regs->regs.skas.regs); | 83 | sc.eflags = REGS_EFLAGS(regs->regs.skas.regs); |
87 | sc.esp_at_signal = regs->regs.skas.regs[UESP]; | 84 | sc.esp_at_signal = regs->regs.skas.regs[UESP]; |
88 | sc.ss = regs->regs.skas.regs[SS]; | 85 | sc.ss = regs->regs.skas.regs[SS]; |
89 | sc.cr2 = fault_addr; | 86 | sc.cr2 = fi->cr2; |
90 | sc.err = TO_SC_ERR(fault_type); | 87 | sc.err = fi->error_code; |
91 | sc.trapno = regs->regs.skas.trap_type; | 88 | sc.trapno = fi->trap_no; |
92 | 89 | ||
93 | err = save_fp_registers(userspace_pid[0], fpregs); | 90 | err = save_fp_registers(userspace_pid[0], fpregs); |
94 | if(err < 0){ | 91 | if(err < 0){ |
@@ -167,9 +164,7 @@ static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp, | |||
167 | { | 164 | { |
168 | return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs), | 165 | return(CHOOSE_MODE(copy_sc_to_user_tt(to, fp, UPT_SC(&from->regs), |
169 | sizeof(*fp)), | 166 | sizeof(*fp)), |
170 | copy_sc_to_user_skas(to, fp, from, | 167 | copy_sc_to_user_skas(to, fp, from))); |
171 | current->thread.cr2, | ||
172 | current->thread.err))); | ||
173 | } | 168 | } |
174 | 169 | ||
175 | static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp, | 170 | static int copy_ucontext_to_user(struct ucontext *uc, struct _fpstate *fp, |
diff --git a/arch/um/sys-i386/sys_call_table.S b/arch/um/sys-i386/sys_call_table.S new file mode 100644 index 000000000000..ad75c27afe38 --- /dev/null +++ b/arch/um/sys-i386/sys_call_table.S | |||
@@ -0,0 +1,16 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | /* Steal i386 syscall table for our purposes, but with some slight changes.*/ | ||
3 | |||
4 | #define sys_iopl sys_ni_syscall | ||
5 | #define sys_ioperm sys_ni_syscall | ||
6 | |||
7 | #define sys_vm86old sys_ni_syscall | ||
8 | #define sys_vm86 sys_ni_syscall | ||
9 | #define sys_set_thread_area sys_ni_syscall | ||
10 | #define sys_get_thread_area sys_ni_syscall | ||
11 | |||
12 | #define sys_stime um_stime | ||
13 | #define sys_time um_time | ||
14 | #define old_mmap old_mmap_i386 | ||
15 | |||
16 | #include "../../i386/kernel/syscall_table.S" | ||
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c new file mode 100644 index 000000000000..3ceaabceb3d7 --- /dev/null +++ b/arch/um/sys-i386/user-offsets.c | |||
@@ -0,0 +1,69 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <signal.h> | ||
3 | #include <asm/ptrace.h> | ||
4 | #include <asm/user.h> | ||
5 | #include <linux/stddef.h> | ||
6 | |||
7 | #define DEFINE(sym, val) \ | ||
8 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
9 | |||
10 | #define OFFSET(sym, str, mem) \ | ||
11 | DEFINE(sym, offsetof(struct str, mem)); | ||
12 | |||
13 | void foo(void) | ||
14 | { | ||
15 | OFFSET(SC_IP, sigcontext, eip); | ||
16 | OFFSET(SC_SP, sigcontext, esp); | ||
17 | OFFSET(SC_FS, sigcontext, fs); | ||
18 | OFFSET(SC_GS, sigcontext, gs); | ||
19 | OFFSET(SC_DS, sigcontext, ds); | ||
20 | OFFSET(SC_ES, sigcontext, es); | ||
21 | OFFSET(SC_SS, sigcontext, ss); | ||
22 | OFFSET(SC_CS, sigcontext, cs); | ||
23 | OFFSET(SC_EFLAGS, sigcontext, eflags); | ||
24 | OFFSET(SC_EAX, sigcontext, eax); | ||
25 | OFFSET(SC_EBX, sigcontext, ebx); | ||
26 | OFFSET(SC_ECX, sigcontext, ecx); | ||
27 | OFFSET(SC_EDX, sigcontext, edx); | ||
28 | OFFSET(SC_EDI, sigcontext, edi); | ||
29 | OFFSET(SC_ESI, sigcontext, esi); | ||
30 | OFFSET(SC_EBP, sigcontext, ebp); | ||
31 | OFFSET(SC_TRAPNO, sigcontext, trapno); | ||
32 | OFFSET(SC_ERR, sigcontext, err); | ||
33 | OFFSET(SC_CR2, sigcontext, cr2); | ||
34 | OFFSET(SC_FPSTATE, sigcontext, fpstate); | ||
35 | OFFSET(SC_SIGMASK, sigcontext, oldmask); | ||
36 | OFFSET(SC_FP_CW, _fpstate, cw); | ||
37 | OFFSET(SC_FP_SW, _fpstate, sw); | ||
38 | OFFSET(SC_FP_TAG, _fpstate, tag); | ||
39 | OFFSET(SC_FP_IPOFF, _fpstate, ipoff); | ||
40 | OFFSET(SC_FP_CSSEL, _fpstate, cssel); | ||
41 | OFFSET(SC_FP_DATAOFF, _fpstate, dataoff); | ||
42 | OFFSET(SC_FP_DATASEL, _fpstate, datasel); | ||
43 | OFFSET(SC_FP_ST, _fpstate, _st); | ||
44 | OFFSET(SC_FXSR_ENV, _fpstate, _fxsr_env); | ||
45 | |||
46 | DEFINE(HOST_FRAME_SIZE, FRAME_SIZE); | ||
47 | DEFINE(HOST_FP_SIZE, | ||
48 | sizeof(struct user_i387_struct) / sizeof(unsigned long)); | ||
49 | DEFINE(HOST_XFP_SIZE, | ||
50 | sizeof(struct user_fxsr_struct) / sizeof(unsigned long)); | ||
51 | |||
52 | DEFINE(HOST_IP, EIP); | ||
53 | DEFINE(HOST_SP, UESP); | ||
54 | DEFINE(HOST_EFLAGS, EFL); | ||
55 | DEFINE(HOST_EAX, EAX); | ||
56 | DEFINE(HOST_EBX, EBX); | ||
57 | DEFINE(HOST_ECX, ECX); | ||
58 | DEFINE(HOST_EDX, EDX); | ||
59 | DEFINE(HOST_ESI, ESI); | ||
60 | DEFINE(HOST_EDI, EDI); | ||
61 | DEFINE(HOST_EBP, EBP); | ||
62 | DEFINE(HOST_CS, CS); | ||
63 | DEFINE(HOST_SS, SS); | ||
64 | DEFINE(HOST_DS, DS); | ||
65 | DEFINE(HOST_FS, FS); | ||
66 | DEFINE(HOST_ES, ES); | ||
67 | DEFINE(HOST_GS, GS); | ||
68 | DEFINE(__UM_FRAME_SIZE, sizeof(struct user_regs_struct)); | ||
69 | } | ||
diff --git a/arch/um/sys-i386/util/Makefile b/arch/um/sys-i386/util/Makefile index 34860f9ca7b0..bf61afd0b045 100644 --- a/arch/um/sys-i386/util/Makefile +++ b/arch/um/sys-i386/util/Makefile | |||
@@ -1,8 +1,5 @@ | |||
1 | |||
2 | hostprogs-y := mk_sc mk_thread | 1 | hostprogs-y := mk_sc mk_thread |
3 | always := $(hostprogs-y) | 2 | always := $(hostprogs-y) |
4 | 3 | ||
5 | mk_thread-objs := mk_thread_kern.o mk_thread_user.o | 4 | HOSTCFLAGS_mk_sc.o := -I$(objtree)/arch/um |
6 | 5 | HOSTCFLAGS_mk_thread.o := -I$(objtree)/arch/um | |
7 | HOSTCFLAGS_mk_thread_kern.o := $(CFLAGS) $(CPPFLAGS) | ||
8 | HOSTCFLAGS_mk_thread_user.o := $(USER_CFLAGS) | ||
diff --git a/arch/um/sys-i386/util/mk_sc.c b/arch/um/sys-i386/util/mk_sc.c index 85cbd30396f7..04c0d73433aa 100644 --- a/arch/um/sys-i386/util/mk_sc.c +++ b/arch/um/sys-i386/util/mk_sc.c | |||
@@ -1,52 +1,51 @@ | |||
1 | #include <stdio.h> | 1 | #include <stdio.h> |
2 | #include <signal.h> | 2 | #include <user-offsets.h> |
3 | #include <linux/stddef.h> | ||
4 | 3 | ||
5 | #define SC_OFFSET(name, field) \ | 4 | #define SC_OFFSET(name, field) \ |
6 | printf("#define " name "(sc) *((unsigned long *) &(((char *) (sc))[%d]))\n",\ | 5 | printf("#define " #name "(sc) *((unsigned long *) &(((char *) (sc))[%d]))\n",\ |
7 | offsetof(struct sigcontext, field)) | 6 | name) |
8 | 7 | ||
9 | #define SC_FP_OFFSET(name, field) \ | 8 | #define SC_FP_OFFSET(name, field) \ |
10 | printf("#define " name \ | 9 | printf("#define " #name \ |
11 | "(sc) *((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[%d]))\n",\ | 10 | "(sc) *((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[%d]))\n",\ |
12 | offsetof(struct _fpstate, field)) | 11 | name) |
13 | 12 | ||
14 | #define SC_FP_OFFSET_PTR(name, field, type) \ | 13 | #define SC_FP_OFFSET_PTR(name, field, type) \ |
15 | printf("#define " name \ | 14 | printf("#define " #name \ |
16 | "(sc) ((" type " *) &(((char *) (SC_FPSTATE(sc)))[%d]))\n",\ | 15 | "(sc) ((" type " *) &(((char *) (SC_FPSTATE(sc)))[%d]))\n",\ |
17 | offsetof(struct _fpstate, field)) | 16 | name) |
18 | 17 | ||
19 | int main(int argc, char **argv) | 18 | int main(int argc, char **argv) |
20 | { | 19 | { |
21 | SC_OFFSET("SC_IP", eip); | 20 | SC_OFFSET(SC_IP, eip); |
22 | SC_OFFSET("SC_SP", esp); | 21 | SC_OFFSET(SC_SP, esp); |
23 | SC_OFFSET("SC_FS", fs); | 22 | SC_OFFSET(SC_FS, fs); |
24 | SC_OFFSET("SC_GS", gs); | 23 | SC_OFFSET(SC_GS, gs); |
25 | SC_OFFSET("SC_DS", ds); | 24 | SC_OFFSET(SC_DS, ds); |
26 | SC_OFFSET("SC_ES", es); | 25 | SC_OFFSET(SC_ES, es); |
27 | SC_OFFSET("SC_SS", ss); | 26 | SC_OFFSET(SC_SS, ss); |
28 | SC_OFFSET("SC_CS", cs); | 27 | SC_OFFSET(SC_CS, cs); |
29 | SC_OFFSET("SC_EFLAGS", eflags); | 28 | SC_OFFSET(SC_EFLAGS, eflags); |
30 | SC_OFFSET("SC_EAX", eax); | 29 | SC_OFFSET(SC_EAX, eax); |
31 | SC_OFFSET("SC_EBX", ebx); | 30 | SC_OFFSET(SC_EBX, ebx); |
32 | SC_OFFSET("SC_ECX", ecx); | 31 | SC_OFFSET(SC_ECX, ecx); |
33 | SC_OFFSET("SC_EDX", edx); | 32 | SC_OFFSET(SC_EDX, edx); |
34 | SC_OFFSET("SC_EDI", edi); | 33 | SC_OFFSET(SC_EDI, edi); |
35 | SC_OFFSET("SC_ESI", esi); | 34 | SC_OFFSET(SC_ESI, esi); |
36 | SC_OFFSET("SC_EBP", ebp); | 35 | SC_OFFSET(SC_EBP, ebp); |
37 | SC_OFFSET("SC_TRAPNO", trapno); | 36 | SC_OFFSET(SC_TRAPNO, trapno); |
38 | SC_OFFSET("SC_ERR", err); | 37 | SC_OFFSET(SC_ERR, err); |
39 | SC_OFFSET("SC_CR2", cr2); | 38 | SC_OFFSET(SC_CR2, cr2); |
40 | SC_OFFSET("SC_FPSTATE", fpstate); | 39 | SC_OFFSET(SC_FPSTATE, fpstate); |
41 | SC_OFFSET("SC_SIGMASK", oldmask); | 40 | SC_OFFSET(SC_SIGMASK, oldmask); |
42 | SC_FP_OFFSET("SC_FP_CW", cw); | 41 | SC_FP_OFFSET(SC_FP_CW, cw); |
43 | SC_FP_OFFSET("SC_FP_SW", sw); | 42 | SC_FP_OFFSET(SC_FP_SW, sw); |
44 | SC_FP_OFFSET("SC_FP_TAG", tag); | 43 | SC_FP_OFFSET(SC_FP_TAG, tag); |
45 | SC_FP_OFFSET("SC_FP_IPOFF", ipoff); | 44 | SC_FP_OFFSET(SC_FP_IPOFF, ipoff); |
46 | SC_FP_OFFSET("SC_FP_CSSEL", cssel); | 45 | SC_FP_OFFSET(SC_FP_CSSEL, cssel); |
47 | SC_FP_OFFSET("SC_FP_DATAOFF", dataoff); | 46 | SC_FP_OFFSET(SC_FP_DATAOFF, dataoff); |
48 | SC_FP_OFFSET("SC_FP_DATASEL", datasel); | 47 | SC_FP_OFFSET(SC_FP_DATASEL, datasel); |
49 | SC_FP_OFFSET_PTR("SC_FP_ST", _st, "struct _fpstate"); | 48 | SC_FP_OFFSET_PTR(SC_FP_ST, _st, "struct _fpstate"); |
50 | SC_FP_OFFSET_PTR("SC_FXSR_ENV", _fxsr_env, "void"); | 49 | SC_FP_OFFSET_PTR(SC_FXSR_ENV, _fxsr_env, "void"); |
51 | return(0); | 50 | return(0); |
52 | } | 51 | } |
diff --git a/arch/um/sys-i386/util/mk_thread.c b/arch/um/sys-i386/util/mk_thread.c new file mode 100644 index 000000000000..7470d0dda67e --- /dev/null +++ b/arch/um/sys-i386/util/mk_thread.c | |||
@@ -0,0 +1,22 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <kernel-offsets.h> | ||
3 | |||
4 | int main(int argc, char **argv) | ||
5 | { | ||
6 | printf("/*\n"); | ||
7 | printf(" * Generated by mk_thread\n"); | ||
8 | printf(" */\n"); | ||
9 | printf("\n"); | ||
10 | printf("#ifndef __UM_THREAD_H\n"); | ||
11 | printf("#define __UM_THREAD_H\n"); | ||
12 | printf("\n"); | ||
13 | printf("#define TASK_DEBUGREGS(task) ((unsigned long *) " | ||
14 | "&(((char *) (task))[%d]))\n", TASK_DEBUGREGS); | ||
15 | #ifdef TASK_EXTERN_PID | ||
16 | printf("#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[%d]))\n", | ||
17 | TASK_EXTERN_PID); | ||
18 | #endif | ||
19 | printf("\n"); | ||
20 | printf("#endif\n"); | ||
21 | return(0); | ||
22 | } | ||
diff --git a/arch/um/sys-i386/util/mk_thread_kern.c b/arch/um/sys-i386/util/mk_thread_kern.c deleted file mode 100644 index 948b1ce85230..000000000000 --- a/arch/um/sys-i386/util/mk_thread_kern.c +++ /dev/null | |||
@@ -1,22 +0,0 @@ | |||
1 | #include "linux/config.h" | ||
2 | #include "linux/stddef.h" | ||
3 | #include "linux/sched.h" | ||
4 | |||
5 | extern void print_head(void); | ||
6 | extern void print_constant_ptr(char *name, int value); | ||
7 | extern void print_constant(char *name, char *type, int value); | ||
8 | extern void print_tail(void); | ||
9 | |||
10 | #define THREAD_OFFSET(field) offsetof(struct task_struct, thread.field) | ||
11 | |||
12 | int main(int argc, char **argv) | ||
13 | { | ||
14 | print_head(); | ||
15 | print_constant_ptr("TASK_DEBUGREGS", THREAD_OFFSET(arch.debugregs)); | ||
16 | #ifdef CONFIG_MODE_TT | ||
17 | print_constant("TASK_EXTERN_PID", "int", THREAD_OFFSET(mode.tt.extern_pid)); | ||
18 | #endif | ||
19 | print_tail(); | ||
20 | return(0); | ||
21 | } | ||
22 | |||
diff --git a/arch/um/sys-i386/util/mk_thread_user.c b/arch/um/sys-i386/util/mk_thread_user.c deleted file mode 100644 index 2620cd6aa1f1..000000000000 --- a/arch/um/sys-i386/util/mk_thread_user.c +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | #include <stdio.h> | ||
2 | |||
3 | void print_head(void) | ||
4 | { | ||
5 | printf("/*\n"); | ||
6 | printf(" * Generated by mk_thread\n"); | ||
7 | printf(" */\n"); | ||
8 | printf("\n"); | ||
9 | printf("#ifndef __UM_THREAD_H\n"); | ||
10 | printf("#define __UM_THREAD_H\n"); | ||
11 | printf("\n"); | ||
12 | } | ||
13 | |||
14 | void print_constant_ptr(char *name, int value) | ||
15 | { | ||
16 | printf("#define %s(task) ((unsigned long *) " | ||
17 | "&(((char *) (task))[%d]))\n", name, value); | ||
18 | } | ||
19 | |||
20 | void print_constant(char *name, char *type, int value) | ||
21 | { | ||
22 | printf("#define %s(task) *((%s *) &(((char *) (task))[%d]))\n", name, type, | ||
23 | value); | ||
24 | } | ||
25 | |||
26 | void print_tail(void) | ||
27 | { | ||
28 | printf("\n"); | ||
29 | printf("#endif\n"); | ||
30 | } | ||
diff --git a/arch/um/sys-ppc/ptrace.c b/arch/um/sys-ppc/ptrace.c index a971366d3277..8e71b47f2b8e 100644 --- a/arch/um/sys-ppc/ptrace.c +++ b/arch/um/sys-ppc/ptrace.c | |||
@@ -8,6 +8,25 @@ int putreg(struct task_struct *child, unsigned long regno, | |||
8 | return 0; | 8 | return 0; |
9 | } | 9 | } |
10 | 10 | ||
11 | int poke_user(struct task_struct *child, long addr, long data) | ||
12 | { | ||
13 | if ((addr & 3) || addr < 0) | ||
14 | return -EIO; | ||
15 | |||
16 | if (addr < MAX_REG_OFFSET) | ||
17 | return putreg(child, addr, data); | ||
18 | |||
19 | else if((addr >= offsetof(struct user, u_debugreg[0])) && | ||
20 | (addr <= offsetof(struct user, u_debugreg[7]))){ | ||
21 | addr -= offsetof(struct user, u_debugreg[0]); | ||
22 | addr = addr >> 2; | ||
23 | if((addr == 4) || (addr == 5)) return -EIO; | ||
24 | child->thread.arch.debugregs[addr] = data; | ||
25 | return 0; | ||
26 | } | ||
27 | return -EIO; | ||
28 | } | ||
29 | |||
11 | unsigned long getreg(struct task_struct *child, unsigned long regno) | 30 | unsigned long getreg(struct task_struct *child, unsigned long regno) |
12 | { | 31 | { |
13 | unsigned long retval = ~0UL; | 32 | unsigned long retval = ~0UL; |
@@ -16,6 +35,27 @@ unsigned long getreg(struct task_struct *child, unsigned long regno) | |||
16 | return retval; | 35 | return retval; |
17 | } | 36 | } |
18 | 37 | ||
38 | int peek_user(struct task_struct *child, long addr, long data) | ||
39 | { | ||
40 | /* read the word at location addr in the USER area. */ | ||
41 | unsigned long tmp; | ||
42 | |||
43 | if ((addr & 3) || addr < 0) | ||
44 | return -EIO; | ||
45 | |||
46 | tmp = 0; /* Default return condition */ | ||
47 | if(addr < MAX_REG_OFFSET){ | ||
48 | tmp = getreg(child, addr); | ||
49 | } | ||
50 | else if((addr >= offsetof(struct user, u_debugreg[0])) && | ||
51 | (addr <= offsetof(struct user, u_debugreg[7]))){ | ||
52 | addr -= offsetof(struct user, u_debugreg[0]); | ||
53 | addr = addr >> 2; | ||
54 | tmp = child->thread.arch.debugregs[addr]; | ||
55 | } | ||
56 | return put_user(tmp, (unsigned long *) data); | ||
57 | } | ||
58 | |||
19 | /* | 59 | /* |
20 | * Overrides for Emacs so that we follow Linus's tabbing style. | 60 | * Overrides for Emacs so that we follow Linus's tabbing style. |
21 | * Emacs will notice this stuff at the end of the file and automatically | 61 | * Emacs will notice this stuff at the end of the file and automatically |
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile index 2129e3143559..3d7da911cc8c 100644 --- a/arch/um/sys-x86_64/Makefile +++ b/arch/um/sys-x86_64/Makefile | |||
@@ -4,24 +4,20 @@ | |||
4 | # Licensed under the GPL | 4 | # Licensed under the GPL |
5 | # | 5 | # |
6 | 6 | ||
7 | #XXX: why into lib-y? | ||
7 | lib-y = bitops.o bugs.o csum-partial.o delay.o fault.o mem.o memcpy.o \ | 8 | lib-y = bitops.o bugs.o csum-partial.o delay.o fault.o mem.o memcpy.o \ |
8 | ptrace.o ptrace_user.o semaphore.o sigcontext.o signal.o \ | 9 | ptrace.o ptrace_user.o semaphore.o sigcontext.o signal.o \ |
9 | syscalls.o sysrq.o thunk.o | 10 | syscalls.o sysrq.o thunk.o syscall_table.o |
11 | |||
12 | obj-y := ksyms.o | ||
13 | obj-$(CONFIG_MODULES) += module.o um_module.o | ||
10 | 14 | ||
11 | USER_OBJS := ptrace_user.o sigcontext.o | 15 | USER_OBJS := ptrace_user.o sigcontext.o |
12 | 16 | ||
13 | include arch/um/scripts/Makefile.rules | 17 | include arch/um/scripts/Makefile.rules |
14 | 18 | ||
15 | SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c memcpy.S \ | 19 | SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c memcpy.S \ |
16 | semaphore.c thunk.S | 20 | semaphore.c thunk.S module.c |
17 | |||
18 | # this needs to be before the foreach, because clean-files does not accept | ||
19 | # complete paths like $(src)/$f. | ||
20 | clean-files := $(SYMLINKS) | ||
21 | |||
22 | targets += $(SYMLINKS) | ||
23 | |||
24 | SYMLINKS := $(foreach f,$(SYMLINKS),$(obj)/$f) | ||
25 | 21 | ||
26 | bitops.c-dir = lib | 22 | bitops.c-dir = lib |
27 | csum-copy.S-dir = lib | 23 | csum-copy.S-dir = lib |
@@ -30,8 +26,8 @@ csum-wrappers.c-dir = lib | |||
30 | memcpy.S-dir = lib | 26 | memcpy.S-dir = lib |
31 | semaphore.c-dir = kernel | 27 | semaphore.c-dir = kernel |
32 | thunk.S-dir = lib | 28 | thunk.S-dir = lib |
33 | 29 | module.c-dir = kernel | |
34 | $(SYMLINKS): FORCE | ||
35 | $(call if_changed,make_link) | ||
36 | 30 | ||
37 | CFLAGS_csum-partial.o := -Dcsum_partial=arch_csum_partial | 31 | CFLAGS_csum-partial.o := -Dcsum_partial=arch_csum_partial |
32 | |||
33 | subdir- := util | ||
diff --git a/arch/um/sys-x86_64/delay.c b/arch/um/sys-x86_64/delay.c index f3b5187942b4..651332aeec22 100644 --- a/arch/um/sys-x86_64/delay.c +++ b/arch/um/sys-x86_64/delay.c | |||
@@ -5,7 +5,9 @@ | |||
5 | * Licensed under the GPL | 5 | * Licensed under the GPL |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "linux/delay.h" | ||
8 | #include "asm/processor.h" | 9 | #include "asm/processor.h" |
10 | #include "asm/param.h" | ||
9 | 11 | ||
10 | void __delay(unsigned long loops) | 12 | void __delay(unsigned long loops) |
11 | { | 13 | { |
@@ -14,6 +16,22 @@ void __delay(unsigned long loops) | |||
14 | for(i = 0; i < loops; i++) ; | 16 | for(i = 0; i < loops; i++) ; |
15 | } | 17 | } |
16 | 18 | ||
19 | void __udelay(unsigned long usecs) | ||
20 | { | ||
21 | int i, n; | ||
22 | |||
23 | n = (loops_per_jiffy * HZ * usecs) / MILLION; | ||
24 | for(i=0;i<n;i++) ; | ||
25 | } | ||
26 | |||
27 | void __const_udelay(unsigned long usecs) | ||
28 | { | ||
29 | int i, n; | ||
30 | |||
31 | n = (loops_per_jiffy * HZ * usecs) / MILLION; | ||
32 | for(i=0;i<n;i++) ; | ||
33 | } | ||
34 | |||
17 | /* | 35 | /* |
18 | * Overrides for Emacs so that we follow Linus's tabbing style. | 36 | * Overrides for Emacs so that we follow Linus's tabbing style. |
19 | * Emacs will notice this stuff at the end of the file and automatically | 37 | * Emacs will notice this stuff at the end of the file and automatically |
diff --git a/arch/um/sys-x86_64/kernel-offsets.c b/arch/um/sys-x86_64/kernel-offsets.c new file mode 100644 index 000000000000..220e875cbe29 --- /dev/null +++ b/arch/um/sys-x86_64/kernel-offsets.c | |||
@@ -0,0 +1,24 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <linux/stddef.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/time.h> | ||
5 | #include <asm/page.h> | ||
6 | |||
7 | #define DEFINE(sym, val) \ | ||
8 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
9 | |||
10 | #define DEFINE_STR1(x) #x | ||
11 | #define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " DEFINE_STR1(val) " " #val: : ) | ||
12 | |||
13 | #define BLANK() asm volatile("\n->" : : ) | ||
14 | |||
15 | #define OFFSET(sym, str, mem) \ | ||
16 | DEFINE(sym, offsetof(struct str, mem)); | ||
17 | |||
18 | void foo(void) | ||
19 | { | ||
20 | #ifdef CONFIG_MODE_TT | ||
21 | OFFSET(TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid); | ||
22 | #endif | ||
23 | #include <common-offsets.h> | ||
24 | } | ||
diff --git a/arch/um/sys-x86_64/ksyms.c b/arch/um/sys-x86_64/ksyms.c new file mode 100644 index 000000000000..a27f0ee6a4f6 --- /dev/null +++ b/arch/um/sys-x86_64/ksyms.c | |||
@@ -0,0 +1,20 @@ | |||
1 | #include "linux/module.h" | ||
2 | #include "linux/in6.h" | ||
3 | #include "linux/rwsem.h" | ||
4 | #include "asm/byteorder.h" | ||
5 | #include "asm/semaphore.h" | ||
6 | #include "asm/uaccess.h" | ||
7 | #include "asm/checksum.h" | ||
8 | #include "asm/errno.h" | ||
9 | |||
10 | EXPORT_SYMBOL(__down_failed); | ||
11 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
12 | EXPORT_SYMBOL(__down_failed_trylock); | ||
13 | EXPORT_SYMBOL(__up_wakeup); | ||
14 | |||
15 | /*XXX: we need them because they would be exported by x86_64 */ | ||
16 | EXPORT_SYMBOL(__memcpy); | ||
17 | |||
18 | /* Networking helper routines. */ | ||
19 | /*EXPORT_SYMBOL(csum_partial_copy_from); | ||
20 | EXPORT_SYMBOL(csum_partial_copy_to);*/ | ||
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c index 8c146b2a1e00..b593bb256f2c 100644 --- a/arch/um/sys-x86_64/ptrace.c +++ b/arch/um/sys-x86_64/ptrace.c | |||
@@ -62,6 +62,27 @@ int putreg(struct task_struct *child, int regno, unsigned long value) | |||
62 | return 0; | 62 | return 0; |
63 | } | 63 | } |
64 | 64 | ||
65 | int poke_user(struct task_struct *child, long addr, long data) | ||
66 | { | ||
67 | if ((addr & 3) || addr < 0) | ||
68 | return -EIO; | ||
69 | |||
70 | if (addr < MAX_REG_OFFSET) | ||
71 | return putreg(child, addr, data); | ||
72 | |||
73 | #if 0 /* Need x86_64 debugregs handling */ | ||
74 | else if((addr >= offsetof(struct user, u_debugreg[0])) && | ||
75 | (addr <= offsetof(struct user, u_debugreg[7]))){ | ||
76 | addr -= offsetof(struct user, u_debugreg[0]); | ||
77 | addr = addr >> 2; | ||
78 | if((addr == 4) || (addr == 5)) return -EIO; | ||
79 | child->thread.arch.debugregs[addr] = data; | ||
80 | return 0; | ||
81 | } | ||
82 | #endif | ||
83 | return -EIO; | ||
84 | } | ||
85 | |||
65 | unsigned long getreg(struct task_struct *child, int regno) | 86 | unsigned long getreg(struct task_struct *child, int regno) |
66 | { | 87 | { |
67 | unsigned long retval = ~0UL; | 88 | unsigned long retval = ~0UL; |
@@ -84,6 +105,29 @@ unsigned long getreg(struct task_struct *child, int regno) | |||
84 | return retval; | 105 | return retval; |
85 | } | 106 | } |
86 | 107 | ||
108 | int peek_user(struct task_struct *child, long addr, long data) | ||
109 | { | ||
110 | /* read the word at location addr in the USER area. */ | ||
111 | unsigned long tmp; | ||
112 | |||
113 | if ((addr & 3) || addr < 0) | ||
114 | return -EIO; | ||
115 | |||
116 | tmp = 0; /* Default return condition */ | ||
117 | if(addr < MAX_REG_OFFSET){ | ||
118 | tmp = getreg(child, addr); | ||
119 | } | ||
120 | #if 0 /* Need x86_64 debugregs handling */ | ||
121 | else if((addr >= offsetof(struct user, u_debugreg[0])) && | ||
122 | (addr <= offsetof(struct user, u_debugreg[7]))){ | ||
123 | addr -= offsetof(struct user, u_debugreg[0]); | ||
124 | addr = addr >> 2; | ||
125 | tmp = child->thread.arch.debugregs[addr]; | ||
126 | } | ||
127 | #endif | ||
128 | return put_user(tmp, (unsigned long *) data); | ||
129 | } | ||
130 | |||
87 | void arch_switch(void) | 131 | void arch_switch(void) |
88 | { | 132 | { |
89 | /* XXX | 133 | /* XXX |
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c index 5bc5a0d796e5..73a7926f7370 100644 --- a/arch/um/sys-x86_64/signal.c +++ b/arch/um/sys-x86_64/signal.c | |||
@@ -57,7 +57,7 @@ static int copy_sc_from_user_skas(struct pt_regs *regs, | |||
57 | int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, | 57 | int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, |
58 | struct pt_regs *regs, unsigned long mask) | 58 | struct pt_regs *regs, unsigned long mask) |
59 | { | 59 | { |
60 | unsigned long eflags; | 60 | struct faultinfo * fi = ¤t->thread.arch.faultinfo; |
61 | int err = 0; | 61 | int err = 0; |
62 | 62 | ||
63 | err |= __put_user(0, &to->gs); | 63 | err |= __put_user(0, &to->gs); |
@@ -84,14 +84,16 @@ int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp, | |||
84 | err |= PUTREG(regs, R14, to, r14); | 84 | err |= PUTREG(regs, R14, to, r14); |
85 | err |= PUTREG(regs, R15, to, r15); | 85 | err |= PUTREG(regs, R15, to, r15); |
86 | err |= PUTREG(regs, CS, to, cs); /* XXX x86_64 doesn't do this */ | 86 | err |= PUTREG(regs, CS, to, cs); /* XXX x86_64 doesn't do this */ |
87 | err |= __put_user(current->thread.err, &to->err); | 87 | |
88 | err |= __put_user(current->thread.trap_no, &to->trapno); | 88 | err |= __put_user(fi->cr2, &to->cr2); |
89 | err |= __put_user(fi->error_code, &to->err); | ||
90 | err |= __put_user(fi->trap_no, &to->trapno); | ||
91 | |||
89 | err |= PUTREG(regs, RIP, to, rip); | 92 | err |= PUTREG(regs, RIP, to, rip); |
90 | err |= PUTREG(regs, EFLAGS, to, eflags); | 93 | err |= PUTREG(regs, EFLAGS, to, eflags); |
91 | #undef PUTREG | 94 | #undef PUTREG |
92 | 95 | ||
93 | err |= __put_user(mask, &to->oldmask); | 96 | err |= __put_user(mask, &to->oldmask); |
94 | err |= __put_user(current->thread.cr2, &to->cr2); | ||
95 | 97 | ||
96 | return(err); | 98 | return(err); |
97 | } | 99 | } |
@@ -166,7 +168,7 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, | |||
166 | 168 | ||
167 | frame = (struct rt_sigframe __user *) | 169 | frame = (struct rt_sigframe __user *) |
168 | round_down(stack_top - sizeof(struct rt_sigframe), 16) - 8; | 170 | round_down(stack_top - sizeof(struct rt_sigframe), 16) - 8; |
169 | frame -= 128; | 171 | ((unsigned char *) frame) -= 128; |
170 | 172 | ||
171 | if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) | 173 | if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) |
172 | goto out; | 174 | goto out; |
diff --git a/arch/um/sys-x86_64/syscall_table.c b/arch/um/sys-x86_64/syscall_table.c new file mode 100644 index 000000000000..34b2e842864f --- /dev/null +++ b/arch/um/sys-x86_64/syscall_table.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c | ||
2 | * with some changes for UML. */ | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | #include <linux/sys.h> | ||
6 | #include <linux/cache.h> | ||
7 | #include <linux/config.h> | ||
8 | |||
9 | #define __NO_STUBS | ||
10 | |||
11 | /* Below you can see, in terms of #define's, the differences between the x86-64 | ||
12 | * and the UML syscall table. */ | ||
13 | |||
14 | /* Not going to be implemented by UML, since we have no hardware. */ | ||
15 | #define stub_iopl sys_ni_syscall | ||
16 | #define sys_ioperm sys_ni_syscall | ||
17 | |||
18 | /* The UML TLS problem. Note that x86_64 does not implement this, so the below | ||
19 | * is needed only for the ia32 compatibility. */ | ||
20 | /*#define sys_set_thread_area sys_ni_syscall | ||
21 | #define sys_get_thread_area sys_ni_syscall*/ | ||
22 | |||
23 | /* For __NR_time. The x86-64 name hopefully will change from sys_time64 to | ||
24 | * sys_time (since the current situation is bogus). I've sent a patch to cleanup | ||
25 | * this. Remove below the obsoleted line. */ | ||
26 | #define sys_time64 um_time | ||
27 | #define sys_time um_time | ||
28 | |||
29 | /* On UML we call it this way ("old" means it's not mmap2) */ | ||
30 | #define sys_mmap old_mmap | ||
31 | /* On x86-64 sys_uname is actually sys_newuname plus a compatibility trick. | ||
32 | * See arch/x86_64/kernel/sys_x86_64.c */ | ||
33 | #define sys_uname sys_uname64 | ||
34 | |||
35 | #define stub_clone sys_clone | ||
36 | #define stub_fork sys_fork | ||
37 | #define stub_vfork sys_vfork | ||
38 | #define stub_execve sys_execve | ||
39 | #define stub_rt_sigsuspend sys_rt_sigsuspend | ||
40 | #define stub_sigaltstack sys_sigaltstack | ||
41 | #define stub_rt_sigreturn sys_rt_sigreturn | ||
42 | |||
43 | #define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ; | ||
44 | #undef _ASM_X86_64_UNISTD_H_ | ||
45 | #include <asm-x86_64/unistd.h> | ||
46 | |||
47 | #undef __SYSCALL | ||
48 | #define __SYSCALL(nr, sym) [ nr ] = sym, | ||
49 | #undef _ASM_X86_64_UNISTD_H_ | ||
50 | |||
51 | typedef void (*sys_call_ptr_t)(void); | ||
52 | |||
53 | extern void sys_ni_syscall(void); | ||
54 | |||
55 | sys_call_ptr_t sys_call_table[__NR_syscall_max+1] __cacheline_aligned = { | ||
56 | /* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */ | ||
57 | [0 ... __NR_syscall_max] = &sys_ni_syscall, | ||
58 | #include <asm-x86_64/unistd.h> | ||
59 | }; | ||
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c index 68205a03364c..dd9914642b8e 100644 --- a/arch/um/sys-x86_64/syscalls.c +++ b/arch/um/sys-x86_64/syscalls.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include "linux/linkage.h" | 7 | #include "linux/linkage.h" |
8 | #include "linux/slab.h" | 8 | #include "linux/slab.h" |
9 | #include "linux/shm.h" | 9 | #include "linux/shm.h" |
10 | #include "linux/utsname.h" | ||
11 | #include "linux/personality.h" | ||
10 | #include "asm/uaccess.h" | 12 | #include "asm/uaccess.h" |
11 | #define __FRAME_OFFSETS | 13 | #define __FRAME_OFFSETS |
12 | #include "asm/ptrace.h" | 14 | #include "asm/ptrace.h" |
@@ -14,11 +16,15 @@ | |||
14 | #include "asm/prctl.h" /* XXX This should get the constants from libc */ | 16 | #include "asm/prctl.h" /* XXX This should get the constants from libc */ |
15 | #include "choose-mode.h" | 17 | #include "choose-mode.h" |
16 | 18 | ||
17 | asmlinkage long wrap_sys_shmat(int shmid, char __user *shmaddr, int shmflg) | 19 | asmlinkage long sys_uname64(struct new_utsname __user * name) |
18 | { | 20 | { |
19 | unsigned long raddr; | 21 | int err; |
20 | 22 | down_read(&uts_sem); | |
21 | return do_shmat(shmid, shmaddr, shmflg, &raddr) ?: (long) raddr; | 23 | err = copy_to_user(name, &system_utsname, sizeof (*name)); |
24 | up_read(&uts_sem); | ||
25 | if (personality(current->personality) == PER_LINUX32) | ||
26 | err |= copy_to_user(&name->machine, "i686", 5); | ||
27 | return err ? -EFAULT : 0; | ||
22 | } | 28 | } |
23 | 29 | ||
24 | #ifdef CONFIG_MODE_TT | 30 | #ifdef CONFIG_MODE_TT |
@@ -38,6 +44,8 @@ long sys_modify_ldt_tt(int func, void *ptr, unsigned long bytecount) | |||
38 | #ifdef CONFIG_MODE_SKAS | 44 | #ifdef CONFIG_MODE_SKAS |
39 | extern int userspace_pid[]; | 45 | extern int userspace_pid[]; |
40 | 46 | ||
47 | #include "skas_ptrace.h" | ||
48 | |||
41 | long sys_modify_ldt_skas(int func, void *ptr, unsigned long bytecount) | 49 | long sys_modify_ldt_skas(int func, void *ptr, unsigned long bytecount) |
42 | { | 50 | { |
43 | struct ptrace_ldt ldt; | 51 | struct ptrace_ldt ldt; |
diff --git a/arch/um/sys-x86_64/um_module.c b/arch/um/sys-x86_64/um_module.c new file mode 100644 index 000000000000..8b8eff1bd977 --- /dev/null +++ b/arch/um/sys-x86_64/um_module.c | |||
@@ -0,0 +1,19 @@ | |||
1 | #include <linux/vmalloc.h> | ||
2 | #include <linux/moduleloader.h> | ||
3 | |||
4 | /*Copied from i386 arch/i386/kernel/module.c */ | ||
5 | void *module_alloc(unsigned long size) | ||
6 | { | ||
7 | if (size == 0) | ||
8 | return NULL; | ||
9 | return vmalloc_exec(size); | ||
10 | } | ||
11 | |||
12 | /* Free memory returned from module_alloc */ | ||
13 | void module_free(struct module *mod, void *module_region) | ||
14 | { | ||
15 | vfree(module_region); | ||
16 | /* FIXME: If module_region == mod->init_region, trim exception | ||
17 | table entries. */ | ||
18 | } | ||
19 | |||
diff --git a/arch/um/sys-x86_64/user-offsets.c b/arch/um/sys-x86_64/user-offsets.c new file mode 100644 index 000000000000..5e14792e4838 --- /dev/null +++ b/arch/um/sys-x86_64/user-offsets.c | |||
@@ -0,0 +1,78 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <stddef.h> | ||
3 | #include <signal.h> | ||
4 | #define __FRAME_OFFSETS | ||
5 | #include <asm/ptrace.h> | ||
6 | #include <asm/user.h> | ||
7 | |||
8 | #define DEFINE(sym, val) \ | ||
9 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
10 | |||
11 | #define OFFSET(sym, str, mem) \ | ||
12 | DEFINE(sym, offsetof(struct str, mem)); | ||
13 | |||
14 | void foo(void) | ||
15 | { | ||
16 | OFFSET(SC_RBX, sigcontext, rbx); | ||
17 | OFFSET(SC_RCX, sigcontext, rcx); | ||
18 | OFFSET(SC_RDX, sigcontext, rdx); | ||
19 | OFFSET(SC_RSI, sigcontext, rsi); | ||
20 | OFFSET(SC_RDI, sigcontext, rdi); | ||
21 | OFFSET(SC_RBP, sigcontext, rbp); | ||
22 | OFFSET(SC_RAX, sigcontext, rax); | ||
23 | OFFSET(SC_R8, sigcontext, r8); | ||
24 | OFFSET(SC_R9, sigcontext, r9); | ||
25 | OFFSET(SC_R10, sigcontext, r10); | ||
26 | OFFSET(SC_R11, sigcontext, r11); | ||
27 | OFFSET(SC_R12, sigcontext, r12); | ||
28 | OFFSET(SC_R13, sigcontext, r13); | ||
29 | OFFSET(SC_R14, sigcontext, r14); | ||
30 | OFFSET(SC_R15, sigcontext, r15); | ||
31 | OFFSET(SC_IP, sigcontext, rip); | ||
32 | OFFSET(SC_SP, sigcontext, rsp); | ||
33 | OFFSET(SC_CR2, sigcontext, cr2); | ||
34 | OFFSET(SC_ERR, sigcontext, err); | ||
35 | OFFSET(SC_TRAPNO, sigcontext, trapno); | ||
36 | OFFSET(SC_CS, sigcontext, cs); | ||
37 | OFFSET(SC_FS, sigcontext, fs); | ||
38 | OFFSET(SC_GS, sigcontext, gs); | ||
39 | OFFSET(SC_EFLAGS, sigcontext, eflags); | ||
40 | OFFSET(SC_SIGMASK, sigcontext, oldmask); | ||
41 | #if 0 | ||
42 | OFFSET(SC_ORIG_RAX, sigcontext, orig_rax); | ||
43 | OFFSET(SC_DS, sigcontext, ds); | ||
44 | OFFSET(SC_ES, sigcontext, es); | ||
45 | OFFSET(SC_SS, sigcontext, ss); | ||
46 | #endif | ||
47 | |||
48 | DEFINE(HOST_FRAME_SIZE, FRAME_SIZE); | ||
49 | DEFINE(HOST_RBX, RBX); | ||
50 | DEFINE(HOST_RCX, RCX); | ||
51 | DEFINE(HOST_RDI, RDI); | ||
52 | DEFINE(HOST_RSI, RSI); | ||
53 | DEFINE(HOST_RDX, RDX); | ||
54 | DEFINE(HOST_RBP, RBP); | ||
55 | DEFINE(HOST_RAX, RAX); | ||
56 | DEFINE(HOST_R8, R8); | ||
57 | DEFINE(HOST_R9, R9); | ||
58 | DEFINE(HOST_R10, R10); | ||
59 | DEFINE(HOST_R11, R11); | ||
60 | DEFINE(HOST_R12, R12); | ||
61 | DEFINE(HOST_R13, R13); | ||
62 | DEFINE(HOST_R14, R14); | ||
63 | DEFINE(HOST_R15, R15); | ||
64 | DEFINE(HOST_ORIG_RAX, ORIG_RAX); | ||
65 | DEFINE(HOST_CS, CS); | ||
66 | DEFINE(HOST_SS, SS); | ||
67 | DEFINE(HOST_EFLAGS, EFLAGS); | ||
68 | #if 0 | ||
69 | DEFINE(HOST_FS, FS); | ||
70 | DEFINE(HOST_GS, GS); | ||
71 | DEFINE(HOST_DS, DS); | ||
72 | DEFINE(HOST_ES, ES); | ||
73 | #endif | ||
74 | |||
75 | DEFINE(HOST_IP, RIP); | ||
76 | DEFINE(HOST_SP, RSP); | ||
77 | DEFINE(__UM_FRAME_SIZE, sizeof(struct user_regs_struct)); | ||
78 | } | ||
diff --git a/arch/um/sys-x86_64/util/Makefile b/arch/um/sys-x86_64/util/Makefile index 002607980864..75b052cfc206 100644 --- a/arch/um/sys-x86_64/util/Makefile +++ b/arch/um/sys-x86_64/util/Makefile | |||
@@ -4,7 +4,5 @@ | |||
4 | hostprogs-y := mk_sc mk_thread | 4 | hostprogs-y := mk_sc mk_thread |
5 | always := $(hostprogs-y) | 5 | always := $(hostprogs-y) |
6 | 6 | ||
7 | mk_thread-objs := mk_thread_kern.o mk_thread_user.o | 7 | HOSTCFLAGS_mk_sc.o := -I$(objtree)/arch/um |
8 | 8 | HOSTCFLAGS_mk_thread.o := -I$(objtree)/arch/um | |
9 | HOSTCFLAGS_mk_thread_kern.o := $(CFLAGS) $(CPPFLAGS) | ||
10 | HOSTCFLAGS_mk_thread_user.o := $(USER_CFLAGS) | ||
diff --git a/arch/um/sys-x86_64/util/mk_sc.c b/arch/um/sys-x86_64/util/mk_sc.c index c236e213918d..7619bc377c1f 100644 --- a/arch/um/sys-x86_64/util/mk_sc.c +++ b/arch/um/sys-x86_64/util/mk_sc.c | |||
@@ -3,56 +3,45 @@ | |||
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <stdio.h> | 5 | #include <stdio.h> |
6 | #include <signal.h> | 6 | #include <user-offsets.h> |
7 | #include <linux/stddef.h> | ||
8 | 7 | ||
9 | #define SC_OFFSET(name, field) \ | 8 | #define SC_OFFSET(name) \ |
10 | printf("#define " name \ | 9 | printf("#define " #name \ |
11 | "(sc) *((unsigned long *) &(((char *) (sc))[%ld]))\n",\ | 10 | "(sc) *((unsigned long *) &(((char *) (sc))[%d]))\n",\ |
12 | offsetof(struct sigcontext, field)) | 11 | name) |
13 | |||
14 | #define SC_FP_OFFSET(name, field) \ | ||
15 | printf("#define " name \ | ||
16 | "(sc) *((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[%ld]))\n",\ | ||
17 | offsetof(struct _fpstate, field)) | ||
18 | |||
19 | #define SC_FP_OFFSET_PTR(name, field, type) \ | ||
20 | printf("#define " name \ | ||
21 | "(sc) ((" type " *) &(((char *) (SC_FPSTATE(sc)))[%d]))\n",\ | ||
22 | offsetof(struct _fpstate, field)) | ||
23 | 12 | ||
24 | int main(int argc, char **argv) | 13 | int main(int argc, char **argv) |
25 | { | 14 | { |
26 | SC_OFFSET("SC_RBX", rbx); | 15 | SC_OFFSET(SC_RBX); |
27 | SC_OFFSET("SC_RCX", rcx); | 16 | SC_OFFSET(SC_RCX); |
28 | SC_OFFSET("SC_RDX", rdx); | 17 | SC_OFFSET(SC_RDX); |
29 | SC_OFFSET("SC_RSI", rsi); | 18 | SC_OFFSET(SC_RSI); |
30 | SC_OFFSET("SC_RDI", rdi); | 19 | SC_OFFSET(SC_RDI); |
31 | SC_OFFSET("SC_RBP", rbp); | 20 | SC_OFFSET(SC_RBP); |
32 | SC_OFFSET("SC_RAX", rax); | 21 | SC_OFFSET(SC_RAX); |
33 | SC_OFFSET("SC_R8", r8); | 22 | SC_OFFSET(SC_R8); |
34 | SC_OFFSET("SC_R9", r9); | 23 | SC_OFFSET(SC_R9); |
35 | SC_OFFSET("SC_R10", r10); | 24 | SC_OFFSET(SC_R10); |
36 | SC_OFFSET("SC_R11", r11); | 25 | SC_OFFSET(SC_R11); |
37 | SC_OFFSET("SC_R12", r12); | 26 | SC_OFFSET(SC_R12); |
38 | SC_OFFSET("SC_R13", r13); | 27 | SC_OFFSET(SC_R13); |
39 | SC_OFFSET("SC_R14", r14); | 28 | SC_OFFSET(SC_R14); |
40 | SC_OFFSET("SC_R15", r15); | 29 | SC_OFFSET(SC_R15); |
41 | SC_OFFSET("SC_IP", rip); | 30 | SC_OFFSET(SC_IP); |
42 | SC_OFFSET("SC_SP", rsp); | 31 | SC_OFFSET(SC_SP); |
43 | SC_OFFSET("SC_CR2", cr2); | 32 | SC_OFFSET(SC_CR2); |
44 | SC_OFFSET("SC_ERR", err); | 33 | SC_OFFSET(SC_ERR); |
45 | SC_OFFSET("SC_TRAPNO", trapno); | 34 | SC_OFFSET(SC_TRAPNO); |
46 | SC_OFFSET("SC_CS", cs); | 35 | SC_OFFSET(SC_CS); |
47 | SC_OFFSET("SC_FS", fs); | 36 | SC_OFFSET(SC_FS); |
48 | SC_OFFSET("SC_GS", gs); | 37 | SC_OFFSET(SC_GS); |
49 | SC_OFFSET("SC_EFLAGS", eflags); | 38 | SC_OFFSET(SC_EFLAGS); |
50 | SC_OFFSET("SC_SIGMASK", oldmask); | 39 | SC_OFFSET(SC_SIGMASK); |
51 | #if 0 | 40 | #if 0 |
52 | SC_OFFSET("SC_ORIG_RAX", orig_rax); | 41 | SC_OFFSET(SC_ORIG_RAX); |
53 | SC_OFFSET("SC_DS", ds); | 42 | SC_OFFSET(SC_DS); |
54 | SC_OFFSET("SC_ES", es); | 43 | SC_OFFSET(SC_ES); |
55 | SC_OFFSET("SC_SS", ss); | 44 | SC_OFFSET(SC_SS); |
56 | #endif | 45 | #endif |
57 | return(0); | 46 | return(0); |
58 | } | 47 | } |
diff --git a/arch/um/sys-x86_64/util/mk_thread.c b/arch/um/sys-x86_64/util/mk_thread.c new file mode 100644 index 000000000000..15517396e9cf --- /dev/null +++ b/arch/um/sys-x86_64/util/mk_thread.c | |||
@@ -0,0 +1,20 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <kernel-offsets.h> | ||
3 | |||
4 | int main(int argc, char **argv) | ||
5 | { | ||
6 | printf("/*\n"); | ||
7 | printf(" * Generated by mk_thread\n"); | ||
8 | printf(" */\n"); | ||
9 | printf("\n"); | ||
10 | printf("#ifndef __UM_THREAD_H\n"); | ||
11 | printf("#define __UM_THREAD_H\n"); | ||
12 | printf("\n"); | ||
13 | #ifdef TASK_EXTERN_PID | ||
14 | printf("#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[%d]))\n", | ||
15 | TASK_EXTERN_PID); | ||
16 | #endif | ||
17 | printf("\n"); | ||
18 | printf("#endif\n"); | ||
19 | return(0); | ||
20 | } | ||
diff --git a/arch/um/sys-x86_64/util/mk_thread_kern.c b/arch/um/sys-x86_64/util/mk_thread_kern.c deleted file mode 100644 index a281673f02b2..000000000000 --- a/arch/um/sys-x86_64/util/mk_thread_kern.c +++ /dev/null | |||
@@ -1,21 +0,0 @@ | |||
1 | #include "linux/config.h" | ||
2 | #include "linux/stddef.h" | ||
3 | #include "linux/sched.h" | ||
4 | |||
5 | extern void print_head(void); | ||
6 | extern void print_constant_ptr(char *name, int value); | ||
7 | extern void print_constant(char *name, char *type, int value); | ||
8 | extern void print_tail(void); | ||
9 | |||
10 | #define THREAD_OFFSET(field) offsetof(struct task_struct, thread.field) | ||
11 | |||
12 | int main(int argc, char **argv) | ||
13 | { | ||
14 | print_head(); | ||
15 | #ifdef CONFIG_MODE_TT | ||
16 | print_constant("TASK_EXTERN_PID", "int", THREAD_OFFSET(mode.tt.extern_pid)); | ||
17 | #endif | ||
18 | print_tail(); | ||
19 | return(0); | ||
20 | } | ||
21 | |||
diff --git a/arch/um/sys-x86_64/util/mk_thread_user.c b/arch/um/sys-x86_64/util/mk_thread_user.c deleted file mode 100644 index 7989725568b8..000000000000 --- a/arch/um/sys-x86_64/util/mk_thread_user.c +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | #include <stdio.h> | ||
2 | |||
3 | void print_head(void) | ||
4 | { | ||
5 | printf("/*\n"); | ||
6 | printf(" * Generated by mk_thread\n"); | ||
7 | printf(" */\n"); | ||
8 | printf("\n"); | ||
9 | printf("#ifndef __UM_THREAD_H\n"); | ||
10 | printf("#define __UM_THREAD_H\n"); | ||
11 | printf("\n"); | ||
12 | } | ||
13 | |||
14 | void print_constant_ptr(char *name, int value) | ||
15 | { | ||
16 | printf("#define %s(task) ((unsigned long *) " | ||
17 | "&(((char *) (task))[%d]))\n", name, value); | ||
18 | } | ||
19 | |||
20 | void print_constant(char *name, char *type, int value) | ||
21 | { | ||
22 | printf("#define %s(task) *((%s *) &(((char *) (task))[%d]))\n", name, type, | ||
23 | value); | ||
24 | } | ||
25 | |||
26 | void print_tail(void) | ||
27 | { | ||
28 | printf("\n"); | ||
29 | printf("#endif\n"); | ||
30 | } | ||
diff --git a/arch/um/util/Makefile b/arch/um/util/Makefile index e2ab71209f3f..4c7551c28033 100644 --- a/arch/um/util/Makefile +++ b/arch/um/util/Makefile | |||
@@ -1,8 +1,5 @@ | |||
1 | hostprogs-y := mk_task mk_constants | 1 | hostprogs-y := mk_task mk_constants |
2 | always := $(hostprogs-y) | 2 | always := $(hostprogs-y) |
3 | 3 | ||
4 | mk_task-objs := mk_task_user.o mk_task_kern.o | 4 | HOSTCFLAGS_mk_task.o := -I$(objtree)/arch/um |
5 | mk_constants-objs := mk_constants_user.o mk_constants_kern.o | 5 | HOSTCFLAGS_mk_constants.o := -I$(objtree)/arch/um |
6 | |||
7 | HOSTCFLAGS_mk_task_kern.o := $(CFLAGS) $(CPPFLAGS) | ||
8 | HOSTCFLAGS_mk_constants_kern.o := $(CFLAGS) $(CPPFLAGS) | ||
diff --git a/arch/um/util/mk_constants.c b/arch/um/util/mk_constants.c new file mode 100644 index 000000000000..ab217becc36a --- /dev/null +++ b/arch/um/util/mk_constants.c | |||
@@ -0,0 +1,32 @@ | |||
1 | #include <stdio.h> | ||
2 | #include <kernel-offsets.h> | ||
3 | |||
4 | #define SHOW_INT(sym) printf("#define %s %d\n", #sym, sym) | ||
5 | #define SHOW_STR(sym) printf("#define %s %s\n", #sym, sym) | ||
6 | |||
7 | int main(int argc, char **argv) | ||
8 | { | ||
9 | printf("/*\n"); | ||
10 | printf(" * Generated by mk_constants\n"); | ||
11 | printf(" */\n"); | ||
12 | printf("\n"); | ||
13 | printf("#ifndef __UM_CONSTANTS_H\n"); | ||
14 | printf("#define __UM_CONSTANTS_H\n"); | ||
15 | printf("\n"); | ||
16 | |||
17 | SHOW_INT(UM_KERN_PAGE_SIZE); | ||
18 | |||
19 | SHOW_STR(UM_KERN_EMERG); | ||
20 | SHOW_STR(UM_KERN_ALERT); | ||
21 | SHOW_STR(UM_KERN_CRIT); | ||
22 | SHOW_STR(UM_KERN_ERR); | ||
23 | SHOW_STR(UM_KERN_WARNING); | ||
24 | SHOW_STR(UM_KERN_NOTICE); | ||
25 | SHOW_STR(UM_KERN_INFO); | ||
26 | SHOW_STR(UM_KERN_DEBUG); | ||
27 | |||
28 | SHOW_INT(UM_NSEC_PER_SEC); | ||
29 | printf("\n"); | ||
30 | printf("#endif\n"); | ||
31 | return(0); | ||
32 | } | ||
diff --git a/arch/um/util/mk_constants_kern.c b/arch/um/util/mk_constants_kern.c deleted file mode 100644 index cdcb1232a1ea..000000000000 --- a/arch/um/util/mk_constants_kern.c +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | #include "linux/kernel.h" | ||
2 | #include "linux/stringify.h" | ||
3 | #include "linux/time.h" | ||
4 | #include "asm/page.h" | ||
5 | |||
6 | extern void print_head(void); | ||
7 | extern void print_constant_str(char *name, char *value); | ||
8 | extern void print_constant_int(char *name, int value); | ||
9 | extern void print_tail(void); | ||
10 | |||
11 | int main(int argc, char **argv) | ||
12 | { | ||
13 | print_head(); | ||
14 | print_constant_int("UM_KERN_PAGE_SIZE", PAGE_SIZE); | ||
15 | |||
16 | print_constant_str("UM_KERN_EMERG", KERN_EMERG); | ||
17 | print_constant_str("UM_KERN_ALERT", KERN_ALERT); | ||
18 | print_constant_str("UM_KERN_CRIT", KERN_CRIT); | ||
19 | print_constant_str("UM_KERN_ERR", KERN_ERR); | ||
20 | print_constant_str("UM_KERN_WARNING", KERN_WARNING); | ||
21 | print_constant_str("UM_KERN_NOTICE", KERN_NOTICE); | ||
22 | print_constant_str("UM_KERN_INFO", KERN_INFO); | ||
23 | print_constant_str("UM_KERN_DEBUG", KERN_DEBUG); | ||
24 | |||
25 | print_constant_int("UM_NSEC_PER_SEC", NSEC_PER_SEC); | ||
26 | print_tail(); | ||
27 | return(0); | ||
28 | } | ||
diff --git a/arch/um/util/mk_constants_user.c b/arch/um/util/mk_constants_user.c deleted file mode 100644 index 8f4d7e50be7c..000000000000 --- a/arch/um/util/mk_constants_user.c +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | #include <stdio.h> | ||
2 | |||
3 | void print_head(void) | ||
4 | { | ||
5 | printf("/*\n"); | ||
6 | printf(" * Generated by mk_constants\n"); | ||
7 | printf(" */\n"); | ||
8 | printf("\n"); | ||
9 | printf("#ifndef __UM_CONSTANTS_H\n"); | ||
10 | printf("#define __UM_CONSTANTS_H\n"); | ||
11 | printf("\n"); | ||
12 | } | ||
13 | |||
14 | void print_constant_str(char *name, char *value) | ||
15 | { | ||
16 | printf("#define %s \"%s\"\n", name, value); | ||
17 | } | ||
18 | |||
19 | void print_constant_int(char *name, int value) | ||
20 | { | ||
21 | printf("#define %s %d\n", name, value); | ||
22 | } | ||
23 | |||
24 | void print_tail(void) | ||
25 | { | ||
26 | printf("\n"); | ||
27 | printf("#endif\n"); | ||
28 | } | ||
diff --git a/arch/um/util/mk_task_user.c b/arch/um/util/mk_task.c index 9db849f3f3ac..36c9606505e2 100644 --- a/arch/um/util/mk_task_user.c +++ b/arch/um/util/mk_task.c | |||
@@ -1,18 +1,19 @@ | |||
1 | #include <stdio.h> | 1 | #include <stdio.h> |
2 | #include <kernel-offsets.h> | ||
2 | 3 | ||
3 | void print(char *name, char *type, int offset) | 4 | void print_ptr(char *name, char *type, int offset) |
4 | { | 5 | { |
5 | printf("#define %s(task) *((%s *) &(((char *) (task))[%d]))\n", name, type, | 6 | printf("#define %s(task) ((%s *) &(((char *) (task))[%d]))\n", name, type, |
6 | offset); | 7 | offset); |
7 | } | 8 | } |
8 | 9 | ||
9 | void print_ptr(char *name, char *type, int offset) | 10 | void print(char *name, char *type, int offset) |
10 | { | 11 | { |
11 | printf("#define %s(task) ((%s *) &(((char *) (task))[%d]))\n", name, type, | 12 | printf("#define %s(task) *((%s *) &(((char *) (task))[%d]))\n", name, type, |
12 | offset); | 13 | offset); |
13 | } | 14 | } |
14 | 15 | ||
15 | void print_head(void) | 16 | int main(int argc, char **argv) |
16 | { | 17 | { |
17 | printf("/*\n"); | 18 | printf("/*\n"); |
18 | printf(" * Generated by mk_task\n"); | 19 | printf(" * Generated by mk_task\n"); |
@@ -21,10 +22,9 @@ void print_head(void) | |||
21 | printf("#ifndef __TASK_H\n"); | 22 | printf("#ifndef __TASK_H\n"); |
22 | printf("#define __TASK_H\n"); | 23 | printf("#define __TASK_H\n"); |
23 | printf("\n"); | 24 | printf("\n"); |
24 | } | 25 | print_ptr("TASK_REGS", "union uml_pt_regs", TASK_REGS); |
25 | 26 | print("TASK_PID", "int", TASK_PID); | |
26 | void print_tail(void) | ||
27 | { | ||
28 | printf("\n"); | 27 | printf("\n"); |
29 | printf("#endif\n"); | 28 | printf("#endif\n"); |
29 | return(0); | ||
30 | } | 30 | } |
diff --git a/arch/um/util/mk_task_kern.c b/arch/um/util/mk_task_kern.c deleted file mode 100644 index c218103315ed..000000000000 --- a/arch/um/util/mk_task_kern.c +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | #include "linux/sched.h" | ||
2 | #include "linux/stddef.h" | ||
3 | |||
4 | extern void print(char *name, char *type, int offset); | ||
5 | extern void print_ptr(char *name, char *type, int offset); | ||
6 | extern void print_head(void); | ||
7 | extern void print_tail(void); | ||
8 | |||
9 | int main(int argc, char **argv) | ||
10 | { | ||
11 | print_head(); | ||
12 | print_ptr("TASK_REGS", "union uml_pt_regs", | ||
13 | offsetof(struct task_struct, thread.regs)); | ||
14 | print("TASK_PID", "int", offsetof(struct task_struct, pid)); | ||
15 | print_tail(); | ||
16 | return(0); | ||
17 | } | ||
diff --git a/arch/v850/kernel/ptrace.c b/arch/v850/kernel/ptrace.c index 8fa780757dcd..4726b87f5e5a 100644 --- a/arch/v850/kernel/ptrace.c +++ b/arch/v850/kernel/ptrace.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/smp_lock.h> | 24 | #include <linux/smp_lock.h> |
25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
26 | #include <linux/signal.h> | ||
26 | 27 | ||
27 | #include <asm/errno.h> | 28 | #include <asm/errno.h> |
28 | #include <asm/ptrace.h> | 29 | #include <asm/ptrace.h> |
@@ -208,7 +209,7 @@ int sys_ptrace(long request, long pid, long addr, long data) | |||
208 | /* Execute a single instruction. */ | 209 | /* Execute a single instruction. */ |
209 | case PTRACE_SINGLESTEP: | 210 | case PTRACE_SINGLESTEP: |
210 | rval = -EIO; | 211 | rval = -EIO; |
211 | if ((unsigned long) data > _NSIG) | 212 | if (!valid_signal(data)) |
212 | break; | 213 | break; |
213 | 214 | ||
214 | /* Turn CHILD's single-step flag on or off. */ | 215 | /* Turn CHILD's single-step flag on or off. */ |
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 80c38c5d71fe..44ee7f6acf7b 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig | |||
@@ -379,6 +379,11 @@ config GENERIC_IRQ_PROBE | |||
379 | bool | 379 | bool |
380 | default y | 380 | default y |
381 | 381 | ||
382 | # we have no ISA slots, but we do have ISA-style DMA. | ||
383 | config ISA_DMA_API | ||
384 | bool | ||
385 | default y | ||
386 | |||
382 | menu "Power management options" | 387 | menu "Power management options" |
383 | 388 | ||
384 | source kernel/power/Kconfig | 389 | source kernel/power/Kconfig |
diff --git a/arch/x86_64/boot/bootsect.S b/arch/x86_64/boot/bootsect.S index bb15d406ee95..011b7a4993d4 100644 --- a/arch/x86_64/boot/bootsect.S +++ b/arch/x86_64/boot/bootsect.S | |||
@@ -63,7 +63,7 @@ msg_loop: | |||
63 | jz die | 63 | jz die |
64 | movb $0xe, %ah | 64 | movb $0xe, %ah |
65 | movw $7, %bx | 65 | movw $7, %bx |
66 | int $0x10 | 66 | int $0x10 |
67 | jmp msg_loop | 67 | jmp msg_loop |
68 | 68 | ||
69 | die: | 69 | die: |
@@ -71,7 +71,7 @@ die: | |||
71 | xorw %ax, %ax | 71 | xorw %ax, %ax |
72 | int $0x16 | 72 | int $0x16 |
73 | int $0x19 | 73 | int $0x19 |
74 | 74 | ||
75 | # int 0x19 should never return. In case it does anyway, | 75 | # int 0x19 should never return. In case it does anyway, |
76 | # invoke the BIOS reset code... | 76 | # invoke the BIOS reset code... |
77 | ljmp $0xf000,$0xfff0 | 77 | ljmp $0xf000,$0xfff0 |
diff --git a/arch/x86_64/boot/setup.S b/arch/x86_64/boot/setup.S index 3e838be9dbe7..75d4d2ad93b3 100644 --- a/arch/x86_64/boot/setup.S +++ b/arch/x86_64/boot/setup.S | |||
@@ -160,7 +160,7 @@ ramdisk_max: .long 0xffffffff | |||
160 | trampoline: call start_of_setup | 160 | trampoline: call start_of_setup |
161 | .align 16 | 161 | .align 16 |
162 | # The offset at this point is 0x240 | 162 | # The offset at this point is 0x240 |
163 | .space (0x7ff-0x240+1) # E820 & EDD space (ending at 0x7ff) | 163 | .space (0xeff-0x240+1) # E820 & EDD space (ending at 0xeff) |
164 | # End of setup header ##################################################### | 164 | # End of setup header ##################################################### |
165 | 165 | ||
166 | start_of_setup: | 166 | start_of_setup: |
@@ -412,9 +412,9 @@ jmpe820: | |||
412 | # sizeof(e820rec). | 412 | # sizeof(e820rec). |
413 | # | 413 | # |
414 | good820: | 414 | good820: |
415 | movb (E820NR), %al # up to 32 entries | 415 | movb (E820NR), %al # up to 128 entries |
416 | cmpb $E820MAX, %al | 416 | cmpb $E820MAX, %al |
417 | jnl bail820 | 417 | jae bail820 |
418 | 418 | ||
419 | incb (E820NR) | 419 | incb (E820NR) |
420 | movw %di, %ax | 420 | movw %di, %ax |
diff --git a/arch/x86_64/ia32/vsyscall.lds b/arch/x86_64/ia32/vsyscall.lds index fa4b4dd4a9ff..f2e75ed4c6c7 100644 --- a/arch/x86_64/ia32/vsyscall.lds +++ b/arch/x86_64/ia32/vsyscall.lds | |||
@@ -36,6 +36,7 @@ SECTIONS | |||
36 | 36 | ||
37 | .text.rtsigreturn : { *(.text.rtsigreturn) } :text =0x90909090 | 37 | .text.rtsigreturn : { *(.text.rtsigreturn) } :text =0x90909090 |
38 | 38 | ||
39 | .note : { *(.note.*) } :text :note | ||
39 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr | 40 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr |
40 | .eh_frame : { KEEP (*(.eh_frame)) } :text | 41 | .eh_frame : { KEEP (*(.eh_frame)) } :text |
41 | .dynamic : { *(.dynamic) } :text :dynamic | 42 | .dynamic : { *(.dynamic) } :text :dynamic |
@@ -55,6 +56,7 @@ PHDRS | |||
55 | { | 56 | { |
56 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ | 57 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ |
57 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | 58 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ |
59 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
58 | eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */ | 60 | eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */ |
59 | } | 61 | } |
60 | 62 | ||
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index 56516ac92e5d..7c154dfff64a 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c | |||
@@ -2,6 +2,12 @@ | |||
2 | * Handle the memory map. | 2 | * Handle the memory map. |
3 | * The functions here do the job until bootmem takes over. | 3 | * The functions here do the job until bootmem takes over. |
4 | * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $ | 4 | * $Id: e820.c,v 1.4 2002/09/19 19:25:32 ak Exp $ |
5 | * | ||
6 | * Getting sanitize_e820_map() in sync with i386 version by applying change: | ||
7 | * - Provisions for empty E820 memory regions (reported by certain BIOSes). | ||
8 | * Alex Achenbach <xela@slit.de>, December 2002. | ||
9 | * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
10 | * | ||
5 | */ | 11 | */ |
6 | #include <linux/config.h> | 12 | #include <linux/config.h> |
7 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
@@ -277,7 +283,7 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) | |||
277 | int chgidx, still_changing; | 283 | int chgidx, still_changing; |
278 | int overlap_entries; | 284 | int overlap_entries; |
279 | int new_bios_entry; | 285 | int new_bios_entry; |
280 | int old_nr, new_nr; | 286 | int old_nr, new_nr, chg_nr; |
281 | int i; | 287 | int i; |
282 | 288 | ||
283 | /* | 289 | /* |
@@ -331,20 +337,24 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) | |||
331 | for (i=0; i < 2*old_nr; i++) | 337 | for (i=0; i < 2*old_nr; i++) |
332 | change_point[i] = &change_point_list[i]; | 338 | change_point[i] = &change_point_list[i]; |
333 | 339 | ||
334 | /* record all known change-points (starting and ending addresses) */ | 340 | /* record all known change-points (starting and ending addresses), |
341 | omitting those that are for empty memory regions */ | ||
335 | chgidx = 0; | 342 | chgidx = 0; |
336 | for (i=0; i < old_nr; i++) { | 343 | for (i=0; i < old_nr; i++) { |
337 | change_point[chgidx]->addr = biosmap[i].addr; | 344 | if (biosmap[i].size != 0) { |
338 | change_point[chgidx++]->pbios = &biosmap[i]; | 345 | change_point[chgidx]->addr = biosmap[i].addr; |
339 | change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size; | 346 | change_point[chgidx++]->pbios = &biosmap[i]; |
340 | change_point[chgidx++]->pbios = &biosmap[i]; | 347 | change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size; |
348 | change_point[chgidx++]->pbios = &biosmap[i]; | ||
349 | } | ||
341 | } | 350 | } |
351 | chg_nr = chgidx; | ||
342 | 352 | ||
343 | /* sort change-point list by memory addresses (low -> high) */ | 353 | /* sort change-point list by memory addresses (low -> high) */ |
344 | still_changing = 1; | 354 | still_changing = 1; |
345 | while (still_changing) { | 355 | while (still_changing) { |
346 | still_changing = 0; | 356 | still_changing = 0; |
347 | for (i=1; i < 2*old_nr; i++) { | 357 | for (i=1; i < chg_nr; i++) { |
348 | /* if <current_addr> > <last_addr>, swap */ | 358 | /* if <current_addr> > <last_addr>, swap */ |
349 | /* or, if current=<start_addr> & last=<end_addr>, swap */ | 359 | /* or, if current=<start_addr> & last=<end_addr>, swap */ |
350 | if ((change_point[i]->addr < change_point[i-1]->addr) || | 360 | if ((change_point[i]->addr < change_point[i-1]->addr) || |
@@ -367,7 +377,7 @@ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map) | |||
367 | last_type = 0; /* start with undefined memory type */ | 377 | last_type = 0; /* start with undefined memory type */ |
368 | last_addr = 0; /* start with 0 as last starting address */ | 378 | last_addr = 0; /* start with 0 as last starting address */ |
369 | /* loop through change-points, determining affect on the new bios map */ | 379 | /* loop through change-points, determining affect on the new bios map */ |
370 | for (chgidx=0; chgidx < 2*old_nr; chgidx++) | 380 | for (chgidx=0; chgidx < chg_nr; chgidx++) |
371 | { | 381 | { |
372 | /* keep track of all overlapping bios entries */ | 382 | /* keep track of all overlapping bios entries */ |
373 | if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr) | 383 | if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr) |
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c index 750bcd0655dc..e3a19e8ebbf8 100644 --- a/arch/x86_64/kernel/early_printk.c +++ b/arch/x86_64/kernel/early_printk.c | |||
@@ -60,7 +60,7 @@ static struct console early_vga_console = { | |||
60 | 60 | ||
61 | /* Serial functions loosely based on a similar package from Klaus P. Gerlicher */ | 61 | /* Serial functions loosely based on a similar package from Klaus P. Gerlicher */ |
62 | 62 | ||
63 | int early_serial_base = 0x3f8; /* ttyS0 */ | 63 | static int early_serial_base = 0x3f8; /* ttyS0 */ |
64 | 64 | ||
65 | #define XMTRDY 0x20 | 65 | #define XMTRDY 0x20 |
66 | 66 | ||
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S index 3233a15cc4e0..1086b5fcac21 100644 --- a/arch/x86_64/kernel/entry.S +++ b/arch/x86_64/kernel/entry.S | |||
@@ -296,6 +296,7 @@ int_very_careful: | |||
296 | call syscall_trace_leave | 296 | call syscall_trace_leave |
297 | popq %rdi | 297 | popq %rdi |
298 | andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi | 298 | andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi |
299 | cli | ||
299 | jmp int_restore_rest | 300 | jmp int_restore_rest |
300 | 301 | ||
301 | int_signal: | 302 | int_signal: |
@@ -307,6 +308,7 @@ int_signal: | |||
307 | 1: movl $_TIF_NEED_RESCHED,%edi | 308 | 1: movl $_TIF_NEED_RESCHED,%edi |
308 | int_restore_rest: | 309 | int_restore_rest: |
309 | RESTORE_REST | 310 | RESTORE_REST |
311 | cli | ||
310 | jmp int_with_check | 312 | jmp int_with_check |
311 | CFI_ENDPROC | 313 | CFI_ENDPROC |
312 | 314 | ||
@@ -490,7 +492,8 @@ retint_signal: | |||
490 | call do_notify_resume | 492 | call do_notify_resume |
491 | RESTORE_REST | 493 | RESTORE_REST |
492 | cli | 494 | cli |
493 | GET_THREAD_INFO(%rcx) | 495 | GET_THREAD_INFO(%rcx) |
496 | movl $_TIF_WORK_MASK,%edi | ||
494 | jmp retint_check | 497 | jmp retint_check |
495 | 498 | ||
496 | #ifdef CONFIG_PREEMPT | 499 | #ifdef CONFIG_PREEMPT |
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c index 6cad46c98a23..0f8c78dcd38c 100644 --- a/arch/x86_64/kernel/head64.c +++ b/arch/x86_64/kernel/head64.c | |||
@@ -29,8 +29,6 @@ static void __init clear_bss(void) | |||
29 | (unsigned long) __bss_end - (unsigned long) __bss_start); | 29 | (unsigned long) __bss_end - (unsigned long) __bss_start); |
30 | } | 30 | } |
31 | 31 | ||
32 | extern char x86_boot_params[2048]; | ||
33 | |||
34 | #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ | 32 | #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ |
35 | #define OLD_CL_MAGIC_ADDR 0x90020 | 33 | #define OLD_CL_MAGIC_ADDR 0x90020 |
36 | #define OLD_CL_MAGIC 0xA33F | 34 | #define OLD_CL_MAGIC 0xA33F |
@@ -44,7 +42,7 @@ static void __init copy_bootdata(char *real_mode_data) | |||
44 | int new_data; | 42 | int new_data; |
45 | char * command_line; | 43 | char * command_line; |
46 | 44 | ||
47 | memcpy(x86_boot_params, real_mode_data, 2048); | 45 | memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE); |
48 | new_data = *(int *) (x86_boot_params + NEW_CL_POINTER); | 46 | new_data = *(int *) (x86_boot_params + NEW_CL_POINTER); |
49 | if (!new_data) { | 47 | if (!new_data) { |
50 | if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) { | 48 | if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) { |
@@ -93,9 +91,6 @@ void __init x86_64_start_kernel(char * real_mode_data) | |||
93 | #ifdef CONFIG_SMP | 91 | #ifdef CONFIG_SMP |
94 | cpu_set(0, cpu_online_map); | 92 | cpu_set(0, cpu_online_map); |
95 | #endif | 93 | #endif |
96 | /* default console: */ | ||
97 | if (!strstr(saved_command_line, "console=")) | ||
98 | strcat(saved_command_line, " console=tty0"); | ||
99 | s = strstr(saved_command_line, "earlyprintk="); | 94 | s = strstr(saved_command_line, "earlyprintk="); |
100 | if (s != NULL) | 95 | if (s != NULL) |
101 | setup_early_printk(s); | 96 | setup_early_printk(s); |
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 29a257295484..60be58617eb9 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c | |||
@@ -1607,7 +1607,6 @@ static inline void check_timer(void) | |||
1607 | disable_8259A_irq(0); | 1607 | disable_8259A_irq(0); |
1608 | setup_nmi(); | 1608 | setup_nmi(); |
1609 | enable_8259A_irq(0); | 1609 | enable_8259A_irq(0); |
1610 | check_nmi_watchdog(); | ||
1611 | } | 1610 | } |
1612 | return; | 1611 | return; |
1613 | } | 1612 | } |
@@ -1627,7 +1626,6 @@ static inline void check_timer(void) | |||
1627 | nmi_watchdog_default(); | 1626 | nmi_watchdog_default(); |
1628 | if (nmi_watchdog == NMI_IO_APIC) { | 1627 | if (nmi_watchdog == NMI_IO_APIC) { |
1629 | setup_nmi(); | 1628 | setup_nmi(); |
1630 | check_nmi_watchdog(); | ||
1631 | } | 1629 | } |
1632 | return; | 1630 | return; |
1633 | } | 1631 | } |
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 4f2a852299b6..f77f8a0ff187 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
@@ -355,6 +355,13 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
355 | *tos &= ~(TF_MASK | IF_MASK); | 355 | *tos &= ~(TF_MASK | IF_MASK); |
356 | *tos |= kprobe_old_rflags; | 356 | *tos |= kprobe_old_rflags; |
357 | break; | 357 | break; |
358 | case 0xc3: /* ret/lret */ | ||
359 | case 0xcb: | ||
360 | case 0xc2: | ||
361 | case 0xca: | ||
362 | regs->eflags &= ~TF_MASK; | ||
363 | /* rip is already adjusted, no more changes required*/ | ||
364 | return; | ||
358 | case 0xe8: /* call relative - Fix return addr */ | 365 | case 0xe8: /* call relative - Fix return addr */ |
359 | *tos = orig_rip + (*tos - copy_rip); | 366 | *tos = orig_rip + (*tos - copy_rip); |
360 | break; | 367 | break; |
diff --git a/arch/x86_64/kernel/module.c b/arch/x86_64/kernel/module.c index c2ffea8845ed..bac195c74bcc 100644 --- a/arch/x86_64/kernel/module.c +++ b/arch/x86_64/kernel/module.c | |||
@@ -30,9 +30,12 @@ | |||
30 | 30 | ||
31 | #define DEBUGP(fmt...) | 31 | #define DEBUGP(fmt...) |
32 | 32 | ||
33 | #ifndef CONFIG_UML | ||
33 | void module_free(struct module *mod, void *module_region) | 34 | void module_free(struct module *mod, void *module_region) |
34 | { | 35 | { |
35 | vfree(module_region); | 36 | vfree(module_region); |
37 | /* FIXME: If module_region == mod->init_region, trim exception | ||
38 | table entries. */ | ||
36 | } | 39 | } |
37 | 40 | ||
38 | void *module_alloc(unsigned long size) | 41 | void *module_alloc(unsigned long size) |
@@ -51,6 +54,7 @@ void *module_alloc(unsigned long size) | |||
51 | 54 | ||
52 | return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC); | 55 | return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC); |
53 | } | 56 | } |
57 | #endif | ||
54 | 58 | ||
55 | /* We don't need anything special. */ | 59 | /* We don't need anything special. */ |
56 | int module_frob_arch_sections(Elf_Ehdr *hdr, | 60 | int module_frob_arch_sections(Elf_Ehdr *hdr, |
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index e00d4adec36b..61de0b34a01e 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
@@ -112,17 +112,20 @@ static __init int cpu_has_lapic(void) | |||
112 | } | 112 | } |
113 | } | 113 | } |
114 | 114 | ||
115 | int __init check_nmi_watchdog (void) | 115 | static int __init check_nmi_watchdog (void) |
116 | { | 116 | { |
117 | int counts[NR_CPUS]; | 117 | int counts[NR_CPUS]; |
118 | int cpu; | 118 | int cpu; |
119 | 119 | ||
120 | if (nmi_watchdog == NMI_NONE) | ||
121 | return 0; | ||
122 | |||
120 | if (nmi_watchdog == NMI_LOCAL_APIC && !cpu_has_lapic()) { | 123 | if (nmi_watchdog == NMI_LOCAL_APIC && !cpu_has_lapic()) { |
121 | nmi_watchdog = NMI_NONE; | 124 | nmi_watchdog = NMI_NONE; |
122 | return -1; | 125 | return -1; |
123 | } | 126 | } |
124 | 127 | ||
125 | printk(KERN_INFO "testing NMI watchdog ... "); | 128 | printk(KERN_INFO "Testing NMI watchdog ... "); |
126 | 129 | ||
127 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 130 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
128 | counts[cpu] = cpu_pda[cpu].__nmi_count; | 131 | counts[cpu] = cpu_pda[cpu].__nmi_count; |
@@ -148,6 +151,8 @@ int __init check_nmi_watchdog (void) | |||
148 | 151 | ||
149 | return 0; | 152 | return 0; |
150 | } | 153 | } |
154 | /* Have this called later during boot so counters are updating */ | ||
155 | late_initcall(check_nmi_watchdog); | ||
151 | 156 | ||
152 | int __init setup_nmi_watchdog(char *str) | 157 | int __init setup_nmi_watchdog(char *str) |
153 | { | 158 | { |
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 9922d2ba24a3..761b6d35e338 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c | |||
@@ -402,10 +402,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, | |||
402 | p->thread.fs = me->thread.fs; | 402 | p->thread.fs = me->thread.fs; |
403 | p->thread.gs = me->thread.gs; | 403 | p->thread.gs = me->thread.gs; |
404 | 404 | ||
405 | asm("movl %%gs,%0" : "=m" (p->thread.gsindex)); | 405 | asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); |
406 | asm("movl %%fs,%0" : "=m" (p->thread.fsindex)); | 406 | asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); |
407 | asm("movl %%es,%0" : "=m" (p->thread.es)); | 407 | asm("mov %%es,%0" : "=m" (p->thread.es)); |
408 | asm("movl %%ds,%0" : "=m" (p->thread.ds)); | 408 | asm("mov %%ds,%0" : "=m" (p->thread.ds)); |
409 | 409 | ||
410 | if (unlikely(me->thread.io_bitmap_ptr != NULL)) { | 410 | if (unlikely(me->thread.io_bitmap_ptr != NULL)) { |
411 | p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); | 411 | p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); |
@@ -468,11 +468,11 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct * | |||
468 | * Switch DS and ES. | 468 | * Switch DS and ES. |
469 | * This won't pick up thread selector changes, but I guess that is ok. | 469 | * This won't pick up thread selector changes, but I guess that is ok. |
470 | */ | 470 | */ |
471 | asm volatile("movl %%es,%0" : "=m" (prev->es)); | 471 | asm volatile("mov %%es,%0" : "=m" (prev->es)); |
472 | if (unlikely(next->es | prev->es)) | 472 | if (unlikely(next->es | prev->es)) |
473 | loadsegment(es, next->es); | 473 | loadsegment(es, next->es); |
474 | 474 | ||
475 | asm volatile ("movl %%ds,%0" : "=m" (prev->ds)); | 475 | asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); |
476 | if (unlikely(next->ds | prev->ds)) | 476 | if (unlikely(next->ds | prev->ds)) |
477 | loadsegment(ds, next->ds); | 477 | loadsegment(ds, next->ds); |
478 | 478 | ||
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c index c7011675007d..e26e86bb56fe 100644 --- a/arch/x86_64/kernel/ptrace.c +++ b/arch/x86_64/kernel/ptrace.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/security.h> | 18 | #include <linux/security.h> |
19 | #include <linux/audit.h> | 19 | #include <linux/audit.h> |
20 | #include <linux/seccomp.h> | 20 | #include <linux/seccomp.h> |
21 | #include <linux/signal.h> | ||
21 | 22 | ||
22 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
23 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
@@ -467,7 +468,7 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data | |||
467 | case PTRACE_CONT: /* restart after signal. */ | 468 | case PTRACE_CONT: /* restart after signal. */ |
468 | 469 | ||
469 | ret = -EIO; | 470 | ret = -EIO; |
470 | if ((unsigned long) data > _NSIG) | 471 | if (!valid_signal(data)) |
471 | break; | 472 | break; |
472 | if (request == PTRACE_SYSCALL) | 473 | if (request == PTRACE_SYSCALL) |
473 | set_tsk_thread_flag(child,TIF_SYSCALL_TRACE); | 474 | set_tsk_thread_flag(child,TIF_SYSCALL_TRACE); |
@@ -529,7 +530,7 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data | |||
529 | 530 | ||
530 | case PTRACE_SINGLESTEP: /* set the trap flag. */ | 531 | case PTRACE_SINGLESTEP: /* set the trap flag. */ |
531 | ret = -EIO; | 532 | ret = -EIO; |
532 | if ((unsigned long) data > _NSIG) | 533 | if (!valid_signal(data)) |
533 | break; | 534 | break; |
534 | clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE); | 535 | clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE); |
535 | set_singlestep(child); | 536 | set_singlestep(child); |
@@ -634,20 +635,29 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs) | |||
634 | /* do the secure computing check first */ | 635 | /* do the secure computing check first */ |
635 | secure_computing(regs->orig_rax); | 636 | secure_computing(regs->orig_rax); |
636 | 637 | ||
637 | if (unlikely(current->audit_context)) | ||
638 | audit_syscall_entry(current, regs->orig_rax, | ||
639 | regs->rdi, regs->rsi, | ||
640 | regs->rdx, regs->r10); | ||
641 | |||
642 | if (test_thread_flag(TIF_SYSCALL_TRACE) | 638 | if (test_thread_flag(TIF_SYSCALL_TRACE) |
643 | && (current->ptrace & PT_PTRACED)) | 639 | && (current->ptrace & PT_PTRACED)) |
644 | syscall_trace(regs); | 640 | syscall_trace(regs); |
641 | |||
642 | if (unlikely(current->audit_context)) { | ||
643 | if (test_thread_flag(TIF_IA32)) { | ||
644 | audit_syscall_entry(current, AUDIT_ARCH_I386, | ||
645 | regs->orig_rax, | ||
646 | regs->rbx, regs->rcx, | ||
647 | regs->rdx, regs->rsi); | ||
648 | } else { | ||
649 | audit_syscall_entry(current, AUDIT_ARCH_X86_64, | ||
650 | regs->orig_rax, | ||
651 | regs->rdi, regs->rsi, | ||
652 | regs->rdx, regs->r10); | ||
653 | } | ||
654 | } | ||
645 | } | 655 | } |
646 | 656 | ||
647 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) | 657 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) |
648 | { | 658 | { |
649 | if (unlikely(current->audit_context)) | 659 | if (unlikely(current->audit_context)) |
650 | audit_syscall_exit(current, regs->rax); | 660 | audit_syscall_exit(current, AUDITSC_RESULT(regs->rax), regs->rax); |
651 | 661 | ||
652 | if ((test_thread_flag(TIF_SYSCALL_TRACE) | 662 | if ((test_thread_flag(TIF_SYSCALL_TRACE) |
653 | || test_thread_flag(TIF_SINGLESTEP)) | 663 | || test_thread_flag(TIF_SINGLESTEP)) |
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index b18c114c7648..2129cf9ba6b2 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c | |||
@@ -977,7 +977,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c) | |||
977 | if ((xlvl & 0xffff0000) == 0x80000000) { | 977 | if ((xlvl & 0xffff0000) == 0x80000000) { |
978 | if (xlvl >= 0x80000001) { | 978 | if (xlvl >= 0x80000001) { |
979 | c->x86_capability[1] = cpuid_edx(0x80000001); | 979 | c->x86_capability[1] = cpuid_edx(0x80000001); |
980 | c->x86_capability[5] = cpuid_ecx(0x80000001); | 980 | c->x86_capability[6] = cpuid_ecx(0x80000001); |
981 | } | 981 | } |
982 | if (xlvl >= 0x80000004) | 982 | if (xlvl >= 0x80000004) |
983 | get_model_name(c); /* Default name */ | 983 | get_model_name(c); /* Default name */ |
@@ -1076,7 +1076,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1076 | "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL, | 1076 | "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL, |
1077 | 1077 | ||
1078 | /* AMD-defined */ | 1078 | /* AMD-defined */ |
1079 | "pni", NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 1079 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1080 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, | 1080 | NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL, |
1081 | NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL, | 1081 | NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL, |
1082 | NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow", | 1082 | NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow", |
@@ -1100,11 +1100,17 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1100 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 1100 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1101 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 1101 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1102 | 1102 | ||
1103 | /* VIA/Cyrix/Centaur-defined */ | ||
1104 | NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", | ||
1105 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1106 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1107 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | ||
1108 | |||
1103 | /* AMD-defined (#2) */ | 1109 | /* AMD-defined (#2) */ |
1104 | "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL, | 1110 | "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL, |
1105 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 1111 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1106 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | 1112 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1107 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL | 1113 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
1108 | }; | 1114 | }; |
1109 | static char *x86_power_flags[] = { | 1115 | static char *x86_power_flags[] = { |
1110 | "ts", /* temperature sensor */ | 1116 | "ts", /* temperature sensor */ |
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index b5305b04bc40..678b7ac33b8b 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/bootmem.h> | 13 | #include <linux/bootmem.h> |
14 | #include <linux/bitops.h> | 14 | #include <linux/bitops.h> |
15 | #include <asm/bootsetup.h> | ||
15 | #include <asm/pda.h> | 16 | #include <asm/pda.h> |
16 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
17 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
@@ -26,7 +27,7 @@ | |||
26 | #include <asm/mman.h> | 27 | #include <asm/mman.h> |
27 | #include <asm/numa.h> | 28 | #include <asm/numa.h> |
28 | 29 | ||
29 | char x86_boot_params[2048] __initdata = {0,}; | 30 | char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,}; |
30 | 31 | ||
31 | cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; | 32 | cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; |
32 | 33 | ||
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c index 7760224cdfe3..d439ced150c6 100644 --- a/arch/x86_64/kernel/signal.c +++ b/arch/x86_64/kernel/signal.c | |||
@@ -83,7 +83,7 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | |||
83 | 83 | ||
84 | struct rt_sigframe | 84 | struct rt_sigframe |
85 | { | 85 | { |
86 | char *pretcode; | 86 | char __user *pretcode; |
87 | struct ucontext uc; | 87 | struct ucontext uc; |
88 | struct siginfo info; | 88 | struct siginfo info; |
89 | }; | 89 | }; |
diff --git a/arch/x86_64/kernel/sys_x86_64.c b/arch/x86_64/kernel/sys_x86_64.c index 477d8be57d64..dbebd5ccba6b 100644 --- a/arch/x86_64/kernel/sys_x86_64.c +++ b/arch/x86_64/kernel/sys_x86_64.c | |||
@@ -152,12 +152,6 @@ asmlinkage long sys_uname(struct new_utsname __user * name) | |||
152 | return err ? -EFAULT : 0; | 152 | return err ? -EFAULT : 0; |
153 | } | 153 | } |
154 | 154 | ||
155 | asmlinkage long wrap_sys_shmat(int shmid, char __user *shmaddr, int shmflg) | ||
156 | { | ||
157 | unsigned long raddr; | ||
158 | return do_shmat(shmid,shmaddr,shmflg,&raddr) ?: (long)raddr; | ||
159 | } | ||
160 | |||
161 | asmlinkage long sys_time64(long __user * tloc) | 155 | asmlinkage long sys_time64(long __user * tloc) |
162 | { | 156 | { |
163 | struct timeval now; | 157 | struct timeval now; |
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c index 88626e626886..a43dedb58fa2 100644 --- a/arch/x86_64/kernel/x8664_ksyms.c +++ b/arch/x86_64/kernel/x8664_ksyms.c | |||
@@ -139,35 +139,23 @@ EXPORT_SYMBOL_GPL(unset_nmi_callback); | |||
139 | #undef memmove | 139 | #undef memmove |
140 | #undef memchr | 140 | #undef memchr |
141 | #undef strlen | 141 | #undef strlen |
142 | #undef strcpy | ||
143 | #undef strncmp | 142 | #undef strncmp |
144 | #undef strncpy | 143 | #undef strncpy |
145 | #undef strchr | 144 | #undef strchr |
146 | #undef strcmp | ||
147 | #undef strcpy | ||
148 | #undef strcat | ||
149 | #undef memcmp | ||
150 | 145 | ||
151 | extern void * memset(void *,int,__kernel_size_t); | 146 | extern void * memset(void *,int,__kernel_size_t); |
152 | extern size_t strlen(const char *); | 147 | extern size_t strlen(const char *); |
153 | extern void * memmove(void * dest,const void *src,size_t count); | 148 | extern void * memmove(void * dest,const void *src,size_t count); |
154 | extern char * strcpy(char * dest,const char *src); | ||
155 | extern int strcmp(const char * cs,const char * ct); | ||
156 | extern void *memchr(const void *s, int c, size_t n); | 149 | extern void *memchr(const void *s, int c, size_t n); |
157 | extern void * memcpy(void *,const void *,__kernel_size_t); | 150 | extern void * memcpy(void *,const void *,__kernel_size_t); |
158 | extern void * __memcpy(void *,const void *,__kernel_size_t); | 151 | extern void * __memcpy(void *,const void *,__kernel_size_t); |
159 | extern char * strcat(char *, const char *); | ||
160 | extern int memcmp(const void * cs,const void * ct,size_t count); | ||
161 | 152 | ||
162 | EXPORT_SYMBOL(memset); | 153 | EXPORT_SYMBOL(memset); |
163 | EXPORT_SYMBOL(strlen); | 154 | EXPORT_SYMBOL(strlen); |
164 | EXPORT_SYMBOL(memmove); | 155 | EXPORT_SYMBOL(memmove); |
165 | EXPORT_SYMBOL(strcpy); | ||
166 | EXPORT_SYMBOL(strncmp); | 156 | EXPORT_SYMBOL(strncmp); |
167 | EXPORT_SYMBOL(strncpy); | 157 | EXPORT_SYMBOL(strncpy); |
168 | EXPORT_SYMBOL(strchr); | 158 | EXPORT_SYMBOL(strchr); |
169 | EXPORT_SYMBOL(strcmp); | ||
170 | EXPORT_SYMBOL(strcat); | ||
171 | EXPORT_SYMBOL(strncat); | 159 | EXPORT_SYMBOL(strncat); |
172 | EXPORT_SYMBOL(memchr); | 160 | EXPORT_SYMBOL(memchr); |
173 | EXPORT_SYMBOL(strrchr); | 161 | EXPORT_SYMBOL(strrchr); |
@@ -175,7 +163,6 @@ EXPORT_SYMBOL(strnlen); | |||
175 | EXPORT_SYMBOL(memscan); | 163 | EXPORT_SYMBOL(memscan); |
176 | EXPORT_SYMBOL(memcpy); | 164 | EXPORT_SYMBOL(memcpy); |
177 | EXPORT_SYMBOL(__memcpy); | 165 | EXPORT_SYMBOL(__memcpy); |
178 | EXPORT_SYMBOL(memcmp); | ||
179 | 166 | ||
180 | #ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM | 167 | #ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM |
181 | /* prototypes are wrong, these are assembly with custom calling functions */ | 168 | /* prototypes are wrong, these are assembly with custom calling functions */ |
diff --git a/arch/x86_64/lib/putuser.S b/arch/x86_64/lib/putuser.S index d9d3e5ec9ad1..5828b8191667 100644 --- a/arch/x86_64/lib/putuser.S +++ b/arch/x86_64/lib/putuser.S | |||
@@ -49,8 +49,8 @@ __put_user_2: | |||
49 | jc 20f | 49 | jc 20f |
50 | cmpq threadinfo_addr_limit(%r8),%rcx | 50 | cmpq threadinfo_addr_limit(%r8),%rcx |
51 | jae 20f | 51 | jae 20f |
52 | 2: decq %rcx | 52 | decq %rcx |
53 | movw %dx,(%rcx) | 53 | 2: movw %dx,(%rcx) |
54 | xorl %eax,%eax | 54 | xorl %eax,%eax |
55 | ret | 55 | ret |
56 | 20: decq %rcx | 56 | 20: decq %rcx |
@@ -64,8 +64,8 @@ __put_user_4: | |||
64 | jc 30f | 64 | jc 30f |
65 | cmpq threadinfo_addr_limit(%r8),%rcx | 65 | cmpq threadinfo_addr_limit(%r8),%rcx |
66 | jae 30f | 66 | jae 30f |
67 | 3: subq $3,%rcx | 67 | subq $3,%rcx |
68 | movl %edx,(%rcx) | 68 | 3: movl %edx,(%rcx) |
69 | xorl %eax,%eax | 69 | xorl %eax,%eax |
70 | ret | 70 | ret |
71 | 30: subq $3,%rcx | 71 | 30: subq $3,%rcx |
@@ -79,8 +79,8 @@ __put_user_8: | |||
79 | jc 40f | 79 | jc 40f |
80 | cmpq threadinfo_addr_limit(%r8),%rcx | 80 | cmpq threadinfo_addr_limit(%r8),%rcx |
81 | jae 40f | 81 | jae 40f |
82 | 4: subq $7,%rcx | 82 | subq $7,%rcx |
83 | movq %rdx,(%rcx) | 83 | 4: movq %rdx,(%rcx) |
84 | xorl %eax,%eax | 84 | xorl %eax,%eax |
85 | ret | 85 | ret |
86 | 40: subq $7,%rcx | 86 | 40: subq $7,%rcx |