aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/Kconfig.debug9
-rw-r--r--arch/i386/kernel/acpi/boot.c5
-rw-r--r--arch/powerpc/Kconfig6
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/entry_32.S35
-rw-r--r--arch/powerpc/kernel/head_64.S49
-rw-r--r--arch/powerpc/kernel/idle.c4
-rw-r--r--arch/powerpc/kernel/idle_6xx.S63
-rw-r--r--arch/powerpc/kernel/idle_power4.S10
-rw-r--r--arch/powerpc/kernel/irq.c36
-rw-r--r--arch/powerpc/kernel/prom_init.c5
-rw-r--r--arch/powerpc/kernel/rtas-proc.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c2
-rw-r--r--arch/powerpc/platforms/chrp/chrp.h1
-rw-r--r--arch/powerpc/platforms/chrp/pci.c6
-rw-r--r--arch/powerpc/platforms/chrp/setup.c44
-rw-r--r--arch/powerpc/platforms/iseries/setup.c7
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c6
-rw-r--r--arch/powerpc/platforms/pseries/rtasd.c2
-rw-r--r--arch/ppc/syslib/ppc_sys.c9
-rw-r--r--arch/x86_64/defconfig19
-rw-r--r--arch/x86_64/ia32/ia32entry.S1
-rw-r--r--arch/x86_64/kernel/traps.c5
-rw-r--r--block/as-iosched.c5
-rw-r--r--block/cfq-iosched.c53
-rw-r--r--drivers/char/drm/drmP.h4
-rw-r--r--drivers/char/drm/drm_drv.c4
-rw-r--r--drivers/char/drm/drm_memory.c134
-rw-r--r--drivers/char/drm/drm_memory.h128
-rw-r--r--drivers/char/drm/drm_memory_debug.h2
-rw-r--r--drivers/char/drm/drm_pci.c1
-rw-r--r--drivers/char/drm/via_irq.c12
-rw-r--r--fs/open.c24
-rw-r--r--include/asm-powerpc/irq.h7
-rw-r--r--include/asm-powerpc/thread_info.h8
-rw-r--r--include/asm-x86_64/mmzone.h3
-rw-r--r--include/asm-x86_64/unistd.h4
-rw-r--r--init/Kconfig9
40 files changed, 443 insertions, 290 deletions
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index 6e97df6979e8..c92191b1fb67 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -81,4 +81,13 @@ config X86_MPPARSE
81 depends on X86_LOCAL_APIC && !X86_VISWS 81 depends on X86_LOCAL_APIC && !X86_VISWS
82 default y 82 default y
83 83
84config DOUBLEFAULT
85 default y
86 bool "Enable doublefault exception handler" if EMBEDDED
87 help
88 This option allows trapping of rare doublefault exceptions that
89 would otherwise cause a system to silently reboot. Disabling this
90 option saves about 4k and might cause you much additional grey
91 hair.
92
84endmenu 93endmenu
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 030a0007a4e0..049a25583793 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -168,7 +168,7 @@ int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
168 unsigned long i; 168 unsigned long i;
169 int config_size; 169 int config_size;
170 170
171 if (!phys_addr || !size || !cpu_has_apic) 171 if (!phys_addr || !size)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size); 174 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
@@ -1102,6 +1102,9 @@ int __init acpi_boot_table_init(void)
1102 dmi_check_system(acpi_dmi_table); 1102 dmi_check_system(acpi_dmi_table);
1103#endif 1103#endif
1104 1104
1105 if (!cpu_has_apic)
1106 return -ENODEV;
1107
1105 /* 1108 /*
1106 * If acpi_disabled, bail out 1109 * If acpi_disabled, bail out
1107 * One exception: acpi=ht continues far enough to enumerate LAPICs 1110 * One exception: acpi=ht continues far enough to enumerate LAPICs
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 167e70e95556..6729c98b66f9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -366,6 +366,7 @@ config PPC_PMAC64
366 select U3_DART 366 select U3_DART
367 select MPIC_BROKEN_U3 367 select MPIC_BROKEN_U3
368 select GENERIC_TBSYNC 368 select GENERIC_TBSYNC
369 select PPC_970_NAP
369 default y 370 default y
370 371
371config PPC_PREP 372config PPC_PREP
@@ -383,6 +384,7 @@ config PPC_MAPLE
383 select MPIC_BROKEN_U3 384 select MPIC_BROKEN_U3
384 select GENERIC_TBSYNC 385 select GENERIC_TBSYNC
385 select PPC_UDBG_16550 386 select PPC_UDBG_16550
387 select PPC_970_NAP
386 default n 388 default n
387 help 389 help
388 This option enables support for the Maple 970FX Evaluation Board. 390 This option enables support for the Maple 970FX Evaluation Board.
@@ -457,6 +459,10 @@ config PPC_MPC106
457 bool 459 bool
458 default n 460 default n
459 461
462config PPC_970_NAP
463 bool
464 default n
465
460source "drivers/cpufreq/Kconfig" 466source "drivers/cpufreq/Kconfig"
461 467
462config CPU_FREQ_PMAC 468config CPU_FREQ_PMAC
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 6ec84d37a337..ed5b26aa8be3 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -104,6 +104,10 @@ ifndef CONFIG_FSL_BOOKE
104CFLAGS += -mstring 104CFLAGS += -mstring
105endif 105endif
106 106
107ifeq ($(CONFIG_6xx),y)
108CFLAGS += -mcpu=powerpc
109endif
110
107cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge 111cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
108cpu-as-$(CONFIG_4xx) += -Wa,-m405 112cpu-as-$(CONFIG_4xx) += -Wa,-m405
109cpu-as-$(CONFIG_6xx) += -Wa,-maltivec 113cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 0cc0995b81b0..803858e86160 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
20 firmware.o sysfs.o 20 firmware.o sysfs.o
21obj-$(CONFIG_PPC64) += vdso64/ 21obj-$(CONFIG_PPC64) += vdso64/
22obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o 22obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
23obj-$(CONFIG_POWER4) += idle_power4.o 23obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
24obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o 24obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o
25procfs-$(CONFIG_PPC64) := proc_ppc64.o 25procfs-$(CONFIG_PPC64) := proc_ppc64.o
26obj-$(CONFIG_PROC_FS) += $(procfs-y) 26obj-$(CONFIG_PROC_FS) += $(procfs-y)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 54b48f330051..8f85c5e8a55a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -91,6 +91,7 @@ int main(void)
91#endif /* CONFIG_PPC64 */ 91#endif /* CONFIG_PPC64 */
92 92
93 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 93 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
94 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
94 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 95 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
95 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 96 DEFINE(TI_TASK, offsetof(struct thread_info, task));
96#ifdef CONFIG_PPC32 97#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index b3a979467225..8866fd26c6b9 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -128,37 +128,36 @@ transfer_to_handler:
128 stw r12,4(r11) 128 stw r12,4(r11)
129#endif 129#endif
130 b 3f 130 b 3f
131
1312: /* if from kernel, check interrupted DOZE/NAP mode and 1322: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow 133 * check for stack overflow
133 */ 134 */
135 lwz r9,THREAD_INFO-THREAD(r12)
136 cmplw r1,r9 /* if r1 <= current->thread_info */
137 ble- stack_ovf /* then the kernel stack overflowed */
1385:
134#ifdef CONFIG_6xx 139#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0 140 tophys(r9,r9) /* check local flags */
136 mtcr r11 141 lwz r12,TI_LOCAL_FLAGS(r9)
137BEGIN_FTR_SECTION 142 mtcrf 0x01,r12
138 bt- 8,4f /* Check DOZE */ 143 bt- 31-TLF_NAPPING,4f
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,4f /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */ 144#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont 145 .globl transfer_to_handler_cont
145transfer_to_handler_cont: 146transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493: 1473:
150 mflr r9 148 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */ 149 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */ 150 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11 151 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10 152 mtspr SPRN_SRR1,r10
156 mtlr r9 153 mtlr r9
157 SYNC 154 SYNC
158 RFI /* jump to handler, enable MMU */ 155 RFI /* jump to handler, enable MMU */
159 156
160#ifdef CONFIG_6xx 157#ifdef CONFIG_6xx
1614: b power_save_6xx_restore 1584: rlwinm r12,r12,0,~_TLF_NAPPING
159 stw r12,TI_LOCAL_FLAGS(r9)
160 b power_save_6xx_restore
162#endif 161#endif
163 162
164/* 163/*
@@ -167,10 +166,10 @@ transfer_to_handler_cont:
167 */ 166 */
168stack_ovf: 167stack_ovf:
169 /* sometimes we use a statically-allocated stack, which is OK. */ 168 /* sometimes we use a statically-allocated stack, which is OK. */
170 lis r11,_end@h 169 lis r12,_end@h
171 ori r11,r11,_end@l 170 ori r12,r12,_end@l
172 cmplw r1,r11 171 cmplw r1,r12
173 ble 3b /* r1 <= &_end is OK */ 172 ble 5b /* r1 <= &_end is OK */
174 SAVE_NVGPRS(r11) 173 SAVE_NVGPRS(r11)
175 addi r3,r1,STACK_FRAME_OVERHEAD 174 addi r3,r1,STACK_FRAME_OVERHEAD
176 lis r1,init_thread_union@ha 175 lis r1,init_thread_union@ha
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index a5ae04a57c78..b7d140430a41 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -376,11 +376,28 @@ label##_common: \
376 bl hdlr; \ 376 bl hdlr; \
377 b .ret_from_except 377 b .ret_from_except
378 378
379/*
380 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
381 * in the idle task and therefore need the special idle handling.
382 */
383#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \
384 .align 7; \
385 .globl label##_common; \
386label##_common: \
387 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
388 FINISH_NAP; \
389 DISABLE_INTS; \
390 bl .save_nvgprs; \
391 addi r3,r1,STACK_FRAME_OVERHEAD; \
392 bl hdlr; \
393 b .ret_from_except
394
379#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ 395#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
380 .align 7; \ 396 .align 7; \
381 .globl label##_common; \ 397 .globl label##_common; \
382label##_common: \ 398label##_common: \
383 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 399 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
400 FINISH_NAP; \
384 DISABLE_INTS; \ 401 DISABLE_INTS; \
385 bl .ppc64_runlatch_on; \ 402 bl .ppc64_runlatch_on; \
386 addi r3,r1,STACK_FRAME_OVERHEAD; \ 403 addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -388,6 +405,25 @@ label##_common: \
388 b .ret_from_except_lite 405 b .ret_from_except_lite
389 406
390/* 407/*
408 * When the idle code in power4_idle puts the CPU into NAP mode,
409 * it has to do so in a loop, and relies on the external interrupt
410 * and decrementer interrupt entry code to get it out of the loop.
411 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
412 * to signal that it is in the loop and needs help to get out.
413 */
414#ifdef CONFIG_PPC_970_NAP
415#define FINISH_NAP \
416BEGIN_FTR_SECTION \
417 clrrdi r11,r1,THREAD_SHIFT; \
418 ld r9,TI_LOCAL_FLAGS(r11); \
419 andi. r10,r9,_TLF_NAPPING; \
420 bnel power4_fixup_nap; \
421END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
422#else
423#define FINISH_NAP
424#endif
425
426/*
391 * Start of pSeries system interrupt routines 427 * Start of pSeries system interrupt routines
392 */ 428 */
393 . = 0x100 429 . = 0x100
@@ -772,6 +808,7 @@ hardware_interrupt_iSeries_masked:
772 .globl machine_check_common 808 .globl machine_check_common
773machine_check_common: 809machine_check_common:
774 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) 810 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
811 FINISH_NAP
775 DISABLE_INTS 812 DISABLE_INTS
776 bl .save_nvgprs 813 bl .save_nvgprs
777 addi r3,r1,STACK_FRAME_OVERHEAD 814 addi r3,r1,STACK_FRAME_OVERHEAD
@@ -783,7 +820,7 @@ machine_check_common:
783 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 820 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
784 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 821 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
785 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 822 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
786 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) 823 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
787 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 824 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
788#ifdef CONFIG_ALTIVEC 825#ifdef CONFIG_ALTIVEC
789 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 826 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
@@ -1034,6 +1071,7 @@ unrecov_slb:
1034 .globl hardware_interrupt_entry 1071 .globl hardware_interrupt_entry
1035hardware_interrupt_common: 1072hardware_interrupt_common:
1036 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) 1073 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
1074 FINISH_NAP
1037hardware_interrupt_entry: 1075hardware_interrupt_entry:
1038 DISABLE_INTS 1076 DISABLE_INTS
1039 bl .ppc64_runlatch_on 1077 bl .ppc64_runlatch_on
@@ -1041,6 +1079,15 @@ hardware_interrupt_entry:
1041 bl .do_IRQ 1079 bl .do_IRQ
1042 b .ret_from_except_lite 1080 b .ret_from_except_lite
1043 1081
1082#ifdef CONFIG_PPC_970_NAP
1083power4_fixup_nap:
1084 andc r9,r9,r10
1085 std r9,TI_LOCAL_FLAGS(r11)
1086 ld r10,_LINK(r1) /* make idle task do the */
1087 std r10,_NIP(r1) /* equivalent of a blr */
1088 blr
1089#endif
1090
1044 .align 7 1091 .align 7
1045 .globl alignment_common 1092 .globl alignment_common
1046alignment_common: 1093alignment_common:
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index e9f321d74d85..d491052c8e0c 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -50,9 +50,9 @@ void cpu_idle(void)
50 50
51 set_thread_flag(TIF_POLLING_NRFLAG); 51 set_thread_flag(TIF_POLLING_NRFLAG);
52 while (1) { 52 while (1) {
53 ppc64_runlatch_off();
54
55 while (!need_resched() && !cpu_should_die()) { 53 while (!need_resched() && !cpu_should_die()) {
54 ppc64_runlatch_off();
55
56 if (ppc_md.power_save) { 56 if (ppc_md.power_save) {
57 clear_thread_flag(TIF_POLLING_NRFLAG); 57 clear_thread_flag(TIF_POLLING_NRFLAG);
58 /* 58 /*
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 12a4efbaa08f..b45fa0e37212 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -22,8 +22,6 @@
22#include <asm/ppc_asm.h> 22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h> 23#include <asm/asm-offsets.h>
24 24
25#undef DEBUG
26
27 .text 25 .text
28 26
29/* 27/*
@@ -109,12 +107,6 @@ BEGIN_FTR_SECTION
109 dcbf 0,r4 107 dcbf 0,r4
110 dcbf 0,r4 108 dcbf 0,r4
111END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR) 109END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
112#ifdef DEBUG
113 lis r6,nap_enter_count@ha
114 lwz r4,nap_enter_count@l(r6)
115 addi r4,r4,1
116 stw r4,nap_enter_count@l(r6)
117#endif
1182: 1102:
119BEGIN_FTR_SECTION 111BEGIN_FTR_SECTION
120 /* Go to low speed mode on some 750FX */ 112 /* Go to low speed mode on some 750FX */
@@ -144,48 +136,42 @@ BEGIN_FTR_SECTION
144 DSSALL 136 DSSALL
145 sync 137 sync
146END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 138END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
139 rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */
140 lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
141 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
142 stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
147 mfmsr r7 143 mfmsr r7
148 ori r7,r7,MSR_EE 144 ori r7,r7,MSR_EE
149 oris r7,r7,MSR_POW@h 145 oris r7,r7,MSR_POW@h
150 sync 1461: sync
151 isync
152 mtmsr r7 147 mtmsr r7
153 isync 148 isync
154 sync 149 b 1b
155 blr 150
156
157/* 151/*
158 * Return from NAP/DOZE mode, restore some CPU specific registers, 152 * Return from NAP/DOZE mode, restore some CPU specific registers,
159 * we are called with DR/IR still off and r2 containing physical 153 * we are called with DR/IR still off and r2 containing physical
160 * address of current. 154 * address of current. R11 points to the exception frame (physical
155 * address). We have to preserve r10.
161 */ 156 */
162_GLOBAL(power_save_6xx_restore) 157_GLOBAL(power_save_6xx_restore)
163 mfspr r11,SPRN_HID0 158 lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
164 rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */ 159 stw r9,_NIP(r11) /* make it do a blr */
165 cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
166BEGIN_FTR_SECTION
167 rlwinm r11,r11,0,9,7 /* Clear DOZE */
168END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
169 mtspr SPRN_HID0, r11
170 160
171#ifdef DEBUG 161#ifdef CONFIG_SMP
172 beq cr1,1f 162 mfspr r12,SPRN_SPRG3
173 lis r11,(nap_return_count-KERNELBASE)@ha 163 lwz r11,TI_CPU(r12) /* get cpu number * 4 */
174 lwz r9,nap_return_count@l(r11)
175 addi r9,r9,1
176 stw r9,nap_return_count@l(r11)
1771:
178#endif
179
180 rlwinm r9,r1,0,0,18
181 tophys(r9,r9)
182 lwz r11,TI_CPU(r9)
183 slwi r11,r11,2 164 slwi r11,r11,2
165#else
166 li r11,0
167#endif
184 /* Todo make sure all these are in the same page 168 /* Todo make sure all these are in the same page
185 * and load r22 (@ha part + CPU offset) only once 169 * and load r11 (@ha part + CPU offset) only once
186 */ 170 */
187BEGIN_FTR_SECTION 171BEGIN_FTR_SECTION
188 beq cr1,1f 172 mfspr r9,SPRN_HID0
173 andis. r9,r9,HID0_NAP@h
174 beq 1f
189 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha 175 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
190 lwz r9,nap_save_msscr0@l(r9) 176 lwz r9,nap_save_msscr0@l(r9)
191 mtspr SPRN_MSSCR0, r9 177 mtspr SPRN_MSSCR0, r9
@@ -210,10 +196,3 @@ _GLOBAL(nap_save_hid1)
210 196
211_GLOBAL(powersave_lowspeed) 197_GLOBAL(powersave_lowspeed)
212 .long 0 198 .long 0
213
214#ifdef DEBUG
215_GLOBAL(nap_enter_count)
216 .space 4
217_GLOBAL(nap_return_count)
218 .space 4
219#endif
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 6dad1c02496e..d85c7c938eed 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -35,12 +35,16 @@ BEGIN_FTR_SECTION
35 DSSALL 35 DSSALL
36 sync 36 sync
37END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 37END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
38 clrrdi r9,r1,THREAD_SHIFT /* current thread_info */
39 ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
40 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
41 std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
38 mfmsr r7 42 mfmsr r7
39 ori r7,r7,MSR_EE 43 ori r7,r7,MSR_EE
40 oris r7,r7,MSR_POW@h 44 oris r7,r7,MSR_POW@h
41 sync 451: sync
42 isync 46 isync
43 mtmsrd r7 47 mtmsrd r7
44 isync 48 isync
45 sync 49 b 1b
46 blr 50
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index bb5c9501234c..57d560c68897 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -272,18 +272,26 @@ unsigned int virt_irq_to_real_map[NR_IRQS];
272 * Don't use virtual irqs 0, 1, 2 for devices. 272 * Don't use virtual irqs 0, 1, 2 for devices.
273 * The pcnet32 driver considers interrupt numbers < 2 to be invalid, 273 * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
274 * and 2 is the XICS IPI interrupt. 274 * and 2 is the XICS IPI interrupt.
275 * We limit virtual irqs to 17 less than NR_IRQS so that when we 275 * We limit virtual irqs to __irq_offet_value less than virt_irq_max so
276 * offset them by 16 (to reserve the first 16 for ISA interrupts) 276 * that when we offset them we don't end up with an interrupt
277 * we don't end up with an interrupt number >= NR_IRQS. 277 * number >= virt_irq_max.
278 */ 278 */
279#define MIN_VIRT_IRQ 3 279#define MIN_VIRT_IRQ 3
280#define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1) 280
281#define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1) 281unsigned int virt_irq_max;
282static unsigned int max_virt_irq;
283static unsigned int nr_virt_irqs;
282 284
283void 285void
284virt_irq_init(void) 286virt_irq_init(void)
285{ 287{
286 int i; 288 int i;
289
290 if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1)))
291 virt_irq_max = NR_IRQS - 1;
292 max_virt_irq = virt_irq_max - __irq_offset_value;
293 nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1;
294
287 for (i = 0; i < NR_IRQS; i++) 295 for (i = 0; i < NR_IRQS; i++)
288 virt_irq_to_real_map[i] = UNDEFINED_IRQ; 296 virt_irq_to_real_map[i] = UNDEFINED_IRQ;
289} 297}
@@ -308,17 +316,17 @@ int virt_irq_create_mapping(unsigned int real_irq)
308 return real_irq; 316 return real_irq;
309 } 317 }
310 318
311 /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */ 319 /* map to a number between MIN_VIRT_IRQ and max_virt_irq */
312 virq = real_irq; 320 virq = real_irq;
313 if (virq > MAX_VIRT_IRQ) 321 if (virq > max_virt_irq)
314 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; 322 virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
315 323
316 /* search for this number or a free slot */ 324 /* search for this number or a free slot */
317 first_virq = virq; 325 first_virq = virq;
318 while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { 326 while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
319 if (virt_irq_to_real_map[virq] == real_irq) 327 if (virt_irq_to_real_map[virq] == real_irq)
320 return virq; 328 return virq;
321 if (++virq > MAX_VIRT_IRQ) 329 if (++virq > max_virt_irq)
322 virq = MIN_VIRT_IRQ; 330 virq = MIN_VIRT_IRQ;
323 if (virq == first_virq) 331 if (virq == first_virq)
324 goto nospace; /* oops, no free slots */ 332 goto nospace; /* oops, no free slots */
@@ -330,8 +338,8 @@ int virt_irq_create_mapping(unsigned int real_irq)
330 nospace: 338 nospace:
331 if (!warned) { 339 if (!warned) {
332 printk(KERN_CRIT "Interrupt table is full\n"); 340 printk(KERN_CRIT "Interrupt table is full\n");
333 printk(KERN_CRIT "Increase NR_IRQS (currently %d) " 341 printk(KERN_CRIT "Increase virt_irq_max (currently %d) "
334 "in your kernel sources and rebuild.\n", NR_IRQS); 342 "in your kernel sources and rebuild.\n", virt_irq_max);
335 warned = 1; 343 warned = 1;
336 } 344 }
337 return NO_IRQ; 345 return NO_IRQ;
@@ -349,8 +357,8 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
349 357
350 virq = real_irq; 358 virq = real_irq;
351 359
352 if (virq > MAX_VIRT_IRQ) 360 if (virq > max_virt_irq)
353 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; 361 virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ;
354 362
355 first_virq = virq; 363 first_virq = virq;
356 364
@@ -360,7 +368,7 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
360 368
361 virq++; 369 virq++;
362 370
363 if (virq >= MAX_VIRT_IRQ) 371 if (virq >= max_virt_irq)
364 virq = 0; 372 virq = 0;
365 373
366 } while (first_virq != virq); 374 } while (first_virq != virq);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index d66c5e77fcff..7e4d54821a07 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1528,12 +1528,11 @@ static int __init prom_find_machine_type(void)
1528 * non-IBM designs ! 1528 * non-IBM designs !
1529 * - it has /rtas 1529 * - it has /rtas
1530 */ 1530 */
1531 len = prom_getprop(_prom->root, "model", 1531 len = prom_getprop(_prom->root, "device_type",
1532 compat, sizeof(compat)-1); 1532 compat, sizeof(compat)-1);
1533 if (len <= 0) 1533 if (len <= 0)
1534 return PLATFORM_GENERIC; 1534 return PLATFORM_GENERIC;
1535 compat[len] = 0; 1535 if (strncmp(compat, RELOC("chrp"), 4))
1536 if (strcmp(compat, "chrp"))
1537 return PLATFORM_GENERIC; 1536 return PLATFORM_GENERIC;
1538 1537
1539 /* Default to pSeries. We need to know if we are running LPAR */ 1538 /* Default to pSeries. We need to know if we are running LPAR */
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 456286cf1d14..9c9ad1fa9cce 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -258,11 +258,11 @@ static int __init proc_rtas_init(void)
258 struct proc_dir_entry *entry; 258 struct proc_dir_entry *entry;
259 259
260 if (!machine_is(pseries)) 260 if (!machine_is(pseries))
261 return 1; 261 return -ENODEV;
262 262
263 rtas_node = of_find_node_by_name(NULL, "rtas"); 263 rtas_node = of_find_node_by_name(NULL, "rtas");
264 if (rtas_node == NULL) 264 if (rtas_node == NULL)
265 return 1; 265 return -ENODEV;
266 266
267 entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL); 267 entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL);
268 if (entry) 268 if (entry)
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 97898d5d34e5..1726bfe38ee0 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -1297,7 +1297,7 @@ static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1297 cycles_t resume_time = get_cycles(); 1297 cycles_t resume_time = get_cycles();
1298 cycles_t delta_time = resume_time - csa->suspend_time; 1298 cycles_t delta_time = resume_time - csa->suspend_time;
1299 1299
1300 csa->lscsa->decr.slot[0] = delta_time; 1300 csa->lscsa->decr.slot[0] -= delta_time;
1301 } 1301 }
1302} 1302}
1303 1303
diff --git a/arch/powerpc/platforms/chrp/chrp.h b/arch/powerpc/platforms/chrp/chrp.h
index 63f0aee4c158..996c28744e96 100644
--- a/arch/powerpc/platforms/chrp/chrp.h
+++ b/arch/powerpc/platforms/chrp/chrp.h
@@ -9,3 +9,4 @@ extern long chrp_time_init(void);
9 9
10extern void chrp_find_bridges(void); 10extern void chrp_find_bridges(void);
11extern void chrp_event_scan(unsigned long); 11extern void chrp_event_scan(unsigned long);
12extern void chrp_pcibios_fixup(void);
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 8ef279ad36ad..ac224876ce59 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -23,6 +23,8 @@
23#include <asm/grackle.h> 23#include <asm/grackle.h>
24#include <asm/rtas.h> 24#include <asm/rtas.h>
25 25
26#include "chrp.h"
27
26/* LongTrail */ 28/* LongTrail */
27void __iomem *gg2_pci_config_base; 29void __iomem *gg2_pci_config_base;
28 30
@@ -314,6 +316,6 @@ chrp_find_bridges(void)
314 } 316 }
315 317
316 /* Do not fixup interrupts from OF tree on pegasos */ 318 /* Do not fixup interrupts from OF tree on pegasos */
317 if (is_pegasos == 0) 319 if (is_pegasos)
318 ppc_md.pcibios_fixup = chrp_pcibios_fixup; 320 ppc_md.pcibios_fixup = NULL;
319} 321}
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 23a201718704..18d89f38796b 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -440,8 +440,6 @@ void __init chrp_init_IRQ(void)
440 440
441 if (_chrp_type == _CHRP_Pegasos) 441 if (_chrp_type == _CHRP_Pegasos)
442 ppc_md.get_irq = i8259_irq; 442 ppc_md.get_irq = i8259_irq;
443 else
444 ppc_md.get_irq = mpic_get_irq;
445 443
446#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) 444#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
447 /* see if there is a keyboard in the device tree 445 /* see if there is a keyboard in the device tree
@@ -528,26 +526,24 @@ static int __init chrp_probe(void)
528 /* Assume we have an 8259... */ 526 /* Assume we have an 8259... */
529 __irq_offset_value = NUM_ISA_INTERRUPTS; 527 __irq_offset_value = NUM_ISA_INTERRUPTS;
530 528
531 ppc_md.setup_arch = chrp_setup_arch; 529 return 1;
532 ppc_md.show_cpuinfo = chrp_show_cpuinfo;
533
534 ppc_md.init_IRQ = chrp_init_IRQ;
535 ppc_md.init = chrp_init2;
536
537 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
538
539 ppc_md.restart = rtas_restart;
540 ppc_md.power_off = rtas_power_off;
541 ppc_md.halt = rtas_halt;
542
543 ppc_md.time_init = chrp_time_init;
544 ppc_md.calibrate_decr = generic_calibrate_decr;
545
546 /* this may get overridden with rtas routines later... */
547 ppc_md.set_rtc_time = chrp_set_rtc_time;
548 ppc_md.get_rtc_time = chrp_get_rtc_time;
549
550#ifdef CONFIG_SMP
551 smp_ops = &chrp_smp_ops;
552#endif /* CONFIG_SMP */
553} 530}
531
532define_machine(chrp) {
533 .name = "CHRP",
534 .probe = chrp_probe,
535 .setup_arch = chrp_setup_arch,
536 .init = chrp_init2,
537 .show_cpuinfo = chrp_show_cpuinfo,
538 .init_IRQ = chrp_init_IRQ,
539 .get_irq = mpic_get_irq,
540 .pcibios_fixup = chrp_pcibios_fixup,
541 .restart = rtas_restart,
542 .power_off = rtas_power_off,
543 .halt = rtas_halt,
544 .time_init = chrp_time_init,
545 .set_rtc_time = chrp_set_rtc_time,
546 .get_rtc_time = chrp_get_rtc_time,
547 .calibrate_decr = generic_calibrate_decr,
548 .phys_mem_access_prot = pci_phys_mem_access_prot,
549};
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 6ce8a404ba6b..a6fd9bedb074 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -54,6 +54,7 @@
54#include <asm/iseries/hv_lp_event.h> 54#include <asm/iseries/hv_lp_event.h>
55#include <asm/iseries/lpar_map.h> 55#include <asm/iseries/lpar_map.h>
56#include <asm/udbg.h> 56#include <asm/udbg.h>
57#include <asm/irq.h>
57 58
58#include "naca.h" 59#include "naca.h"
59#include "setup.h" 60#include "setup.h"
@@ -684,6 +685,12 @@ static int __init iseries_probe(void)
684 powerpc_firmware_features |= FW_FEATURE_ISERIES; 685 powerpc_firmware_features |= FW_FEATURE_ISERIES;
685 powerpc_firmware_features |= FW_FEATURE_LPAR; 686 powerpc_firmware_features |= FW_FEATURE_LPAR;
686 687
688 /*
689 * The Hypervisor only allows us up to 256 interrupt
690 * sources (the irq number is passed in a u8).
691 */
692 virt_irq_max = 255;
693
687 return 1; 694 return 1;
688} 695}
689 696
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 780fb27a0099..32eaddfa5470 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -957,8 +957,10 @@ static void eeh_remove_device(struct pci_dev *dev)
957 pci_addr_cache_remove_device(dev); 957 pci_addr_cache_remove_device(dev);
958 958
959 dn = pci_device_to_OF_node(dev); 959 dn = pci_device_to_OF_node(dev);
960 PCI_DN(dn)->pcidev = NULL; 960 if (PCI_DN(dn)->pcidev) {
961 pci_dev_put (dev); 961 PCI_DN(dn)->pcidev = NULL;
962 pci_dev_put (dev);
963 }
962} 964}
963 965
964void eeh_remove_bus_device(struct pci_dev *dev) 966void eeh_remove_bus_device(struct pci_dev *dev)
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index fcc4d561a236..e0000ce769e5 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -488,7 +488,7 @@ static int __init rtas_init(void)
488 /* No RTAS */ 488 /* No RTAS */
489 if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) { 489 if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) {
490 printk(KERN_INFO "rtasd: no event-scan on system\n"); 490 printk(KERN_INFO "rtasd: no event-scan on system\n");
491 return 1; 491 return -ENODEV;
492 } 492 }
493 493
494 entry = create_proc_entry("ppc64/rtas/error_log", S_IRUSR, NULL); 494 entry = create_proc_entry("ppc64/rtas/error_log", S_IRUSR, NULL);
diff --git a/arch/ppc/syslib/ppc_sys.c b/arch/ppc/syslib/ppc_sys.c
index 60c724e11584..7662c4e6e7d6 100644
--- a/arch/ppc/syslib/ppc_sys.c
+++ b/arch/ppc/syslib/ppc_sys.c
@@ -156,12 +156,13 @@ void platform_notify_map(const struct platform_notify_dev_map *map,
156 while (map->bus_id != NULL) { 156 while (map->bus_id != NULL) {
157 idx = -1; 157 idx = -1;
158 s = strrchr(dev->bus_id, '.'); 158 s = strrchr(dev->bus_id, '.');
159 if (s != NULL) 159 if (s != NULL) {
160 idx = (int)simple_strtol(s + 1, NULL, 10); 160 idx = (int)simple_strtol(s + 1, NULL, 10);
161 else 161 len = s - dev->bus_id;
162 } else {
162 s = dev->bus_id; 163 s = dev->bus_id;
163 164 len = strlen(dev->bus_id);
164 len = s - dev->bus_id; 165 }
165 166
166 if (!strncmp(dev->bus_id, map->bus_id, len)) { 167 if (!strncmp(dev->bus_id, map->bus_id, len)) {
167 pdev = container_of(dev, struct platform_device, dev); 168 pdev = container_of(dev, struct platform_device, dev);
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 3c45ec22b3fe..69db0c0721d1 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.17-rc1 3# Linux kernel version: 2.6.17-rc1-git11
4# Mon Apr 3 16:11:14 2006 4# Sun Apr 16 07:22:36 2006
5# 5#
6CONFIG_X86_64=y 6CONFIG_X86_64=y
7CONFIG_64BIT=y 7CONFIG_64BIT=y
@@ -57,6 +57,7 @@ CONFIG_FUTEX=y
57CONFIG_EPOLL=y 57CONFIG_EPOLL=y
58CONFIG_SHMEM=y 58CONFIG_SHMEM=y
59CONFIG_SLAB=y 59CONFIG_SLAB=y
60CONFIG_DOUBLEFAULT=y
60# CONFIG_TINY_SHMEM is not set 61# CONFIG_TINY_SHMEM is not set
61CONFIG_BASE_SMALL=0 62CONFIG_BASE_SMALL=0
62# CONFIG_SLOB is not set 63# CONFIG_SLOB is not set
@@ -121,6 +122,7 @@ CONFIG_PREEMPT_VOLUNTARY=y
121CONFIG_PREEMPT_BKL=y 122CONFIG_PREEMPT_BKL=y
122CONFIG_NUMA=y 123CONFIG_NUMA=y
123CONFIG_K8_NUMA=y 124CONFIG_K8_NUMA=y
125CONFIG_NODES_SHIFT=6
124CONFIG_X86_64_ACPI_NUMA=y 126CONFIG_X86_64_ACPI_NUMA=y
125CONFIG_NUMA_EMU=y 127CONFIG_NUMA_EMU=y
126CONFIG_ARCH_DISCONTIGMEM_ENABLE=y 128CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
@@ -544,7 +546,6 @@ CONFIG_SCSI_SATA_INTEL_COMBINED=y
544# CONFIG_SCSI_INIA100 is not set 546# CONFIG_SCSI_INIA100 is not set
545# CONFIG_SCSI_SYM53C8XX_2 is not set 547# CONFIG_SCSI_SYM53C8XX_2 is not set
546# CONFIG_SCSI_IPR is not set 548# CONFIG_SCSI_IPR is not set
547# CONFIG_SCSI_QLOGIC_FC is not set
548# CONFIG_SCSI_QLOGIC_1280 is not set 549# CONFIG_SCSI_QLOGIC_1280 is not set
549# CONFIG_SCSI_QLA_FC is not set 550# CONFIG_SCSI_QLA_FC is not set
550# CONFIG_SCSI_LPFC is not set 551# CONFIG_SCSI_LPFC is not set
@@ -1045,9 +1046,7 @@ CONFIG_USB_HIDINPUT=y
1045# CONFIG_USB_ACECAD is not set 1046# CONFIG_USB_ACECAD is not set
1046# CONFIG_USB_KBTAB is not set 1047# CONFIG_USB_KBTAB is not set
1047# CONFIG_USB_POWERMATE is not set 1048# CONFIG_USB_POWERMATE is not set
1048# CONFIG_USB_MTOUCH is not set 1049# CONFIG_USB_TOUCHSCREEN is not set
1049# CONFIG_USB_ITMTOUCH is not set
1050# CONFIG_USB_EGALAX is not set
1051# CONFIG_USB_YEALINK is not set 1050# CONFIG_USB_YEALINK is not set
1052# CONFIG_USB_XPAD is not set 1051# CONFIG_USB_XPAD is not set
1053# CONFIG_USB_ATI_REMOTE is not set 1052# CONFIG_USB_ATI_REMOTE is not set
@@ -1118,6 +1117,14 @@ CONFIG_USB_MON=y
1118# CONFIG_NEW_LEDS is not set 1117# CONFIG_NEW_LEDS is not set
1119 1118
1120# 1119#
1120# LED drivers
1121#
1122
1123#
1124# LED Triggers
1125#
1126
1127#
1121# InfiniBand support 1128# InfiniBand support
1122# 1129#
1123# CONFIG_INFINIBAND is not set 1130# CONFIG_INFINIBAND is not set
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index 5a9802676689..57fc37e0fb9c 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -694,4 +694,5 @@ ia32_sys_call_table:
694 .quad compat_sys_get_robust_list 694 .quad compat_sys_get_robust_list
695 .quad sys_splice 695 .quad sys_splice
696 .quad sys_sync_file_range 696 .quad sys_sync_file_range
697 .quad sys_tee
697ia32_syscall_end: 698ia32_syscall_end:
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 6bda322d3caf..2700b1375c1f 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -30,6 +30,7 @@
30#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
31#include <linux/nmi.h> 31#include <linux/nmi.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/kexec.h>
33 34
34#include <asm/system.h> 35#include <asm/system.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
@@ -433,6 +434,8 @@ void __kprobes __die(const char * str, struct pt_regs * regs, long err)
433 printk(KERN_ALERT "RIP "); 434 printk(KERN_ALERT "RIP ");
434 printk_address(regs->rip); 435 printk_address(regs->rip);
435 printk(" RSP <%016lx>\n", regs->rsp); 436 printk(" RSP <%016lx>\n", regs->rsp);
437 if (kexec_should_crash(current))
438 crash_kexec(regs);
436} 439}
437 440
438void die(const char * str, struct pt_regs * regs, long err) 441void die(const char * str, struct pt_regs * regs, long err)
@@ -455,6 +458,8 @@ void __kprobes die_nmi(char *str, struct pt_regs *regs)
455 */ 458 */
456 printk(str, safe_smp_processor_id()); 459 printk(str, safe_smp_processor_id());
457 show_registers(regs); 460 show_registers(regs);
461 if (kexec_should_crash(current))
462 crash_kexec(regs);
458 if (panic_on_timeout || panic_on_oops) 463 if (panic_on_timeout || panic_on_oops)
459 panic("nmi watchdog"); 464 panic("nmi watchdog");
460 printk("console shuts up ...\n"); 465 printk("console shuts up ...\n");
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 296708ceceb2..e25a5d79ab27 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1844,9 +1844,10 @@ static void __exit as_exit(void)
1844 DECLARE_COMPLETION(all_gone); 1844 DECLARE_COMPLETION(all_gone);
1845 elv_unregister(&iosched_as); 1845 elv_unregister(&iosched_as);
1846 ioc_gone = &all_gone; 1846 ioc_gone = &all_gone;
1847 barrier(); 1847 /* ioc_gone's update must be visible before reading ioc_count */
1848 smp_wmb();
1848 if (atomic_read(&ioc_count)) 1849 if (atomic_read(&ioc_count))
1849 complete(ioc_gone); 1850 wait_for_completion(ioc_gone);
1850 synchronize_rcu(); 1851 synchronize_rcu();
1851 kmem_cache_destroy(arq_pool); 1852 kmem_cache_destroy(arq_pool);
1852} 1853}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 67d446de0227..2540dfaa3e38 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1472,19 +1472,37 @@ out:
1472 return cfqq; 1472 return cfqq;
1473} 1473}
1474 1474
1475static void
1476cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
1477{
1478 read_lock(&cfq_exit_lock);
1479 rb_erase(&cic->rb_node, &ioc->cic_root);
1480 read_unlock(&cfq_exit_lock);
1481 kmem_cache_free(cfq_ioc_pool, cic);
1482 atomic_dec(&ioc_count);
1483}
1484
1475static struct cfq_io_context * 1485static struct cfq_io_context *
1476cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) 1486cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
1477{ 1487{
1478 struct rb_node *n = ioc->cic_root.rb_node; 1488 struct rb_node *n;
1479 struct cfq_io_context *cic; 1489 struct cfq_io_context *cic;
1480 void *key = cfqd; 1490 void *k, *key = cfqd;
1481 1491
1492restart:
1493 n = ioc->cic_root.rb_node;
1482 while (n) { 1494 while (n) {
1483 cic = rb_entry(n, struct cfq_io_context, rb_node); 1495 cic = rb_entry(n, struct cfq_io_context, rb_node);
1496 /* ->key must be copied to avoid race with cfq_exit_queue() */
1497 k = cic->key;
1498 if (unlikely(!k)) {
1499 cfq_drop_dead_cic(ioc, cic);
1500 goto restart;
1501 }
1484 1502
1485 if (key < cic->key) 1503 if (key < k)
1486 n = n->rb_left; 1504 n = n->rb_left;
1487 else if (key > cic->key) 1505 else if (key > k)
1488 n = n->rb_right; 1506 n = n->rb_right;
1489 else 1507 else
1490 return cic; 1508 return cic;
@@ -1497,29 +1515,37 @@ static inline void
1497cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, 1515cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
1498 struct cfq_io_context *cic) 1516 struct cfq_io_context *cic)
1499{ 1517{
1500 struct rb_node **p = &ioc->cic_root.rb_node; 1518 struct rb_node **p;
1501 struct rb_node *parent = NULL; 1519 struct rb_node *parent;
1502 struct cfq_io_context *__cic; 1520 struct cfq_io_context *__cic;
1503 1521 void *k;
1504 read_lock(&cfq_exit_lock);
1505 1522
1506 cic->ioc = ioc; 1523 cic->ioc = ioc;
1507 cic->key = cfqd; 1524 cic->key = cfqd;
1508 1525
1509 ioc->set_ioprio = cfq_ioc_set_ioprio; 1526 ioc->set_ioprio = cfq_ioc_set_ioprio;
1510 1527restart:
1528 parent = NULL;
1529 p = &ioc->cic_root.rb_node;
1511 while (*p) { 1530 while (*p) {
1512 parent = *p; 1531 parent = *p;
1513 __cic = rb_entry(parent, struct cfq_io_context, rb_node); 1532 __cic = rb_entry(parent, struct cfq_io_context, rb_node);
1533 /* ->key must be copied to avoid race with cfq_exit_queue() */
1534 k = __cic->key;
1535 if (unlikely(!k)) {
1536 cfq_drop_dead_cic(ioc, cic);
1537 goto restart;
1538 }
1514 1539
1515 if (cic->key < __cic->key) 1540 if (cic->key < k)
1516 p = &(*p)->rb_left; 1541 p = &(*p)->rb_left;
1517 else if (cic->key > __cic->key) 1542 else if (cic->key > k)
1518 p = &(*p)->rb_right; 1543 p = &(*p)->rb_right;
1519 else 1544 else
1520 BUG(); 1545 BUG();
1521 } 1546 }
1522 1547
1548 read_lock(&cfq_exit_lock);
1523 rb_link_node(&cic->rb_node, parent, p); 1549 rb_link_node(&cic->rb_node, parent, p);
1524 rb_insert_color(&cic->rb_node, &ioc->cic_root); 1550 rb_insert_color(&cic->rb_node, &ioc->cic_root);
1525 list_add(&cic->queue_list, &cfqd->cic_list); 1551 list_add(&cic->queue_list, &cfqd->cic_list);
@@ -2439,9 +2465,10 @@ static void __exit cfq_exit(void)
2439 DECLARE_COMPLETION(all_gone); 2465 DECLARE_COMPLETION(all_gone);
2440 elv_unregister(&iosched_cfq); 2466 elv_unregister(&iosched_cfq);
2441 ioc_gone = &all_gone; 2467 ioc_gone = &all_gone;
2442 barrier(); 2468 /* ioc_gone's update must be visible before reading ioc_count */
2469 smp_wmb();
2443 if (atomic_read(&ioc_count)) 2470 if (atomic_read(&ioc_count))
2444 complete(ioc_gone); 2471 wait_for_completion(ioc_gone);
2445 synchronize_rcu(); 2472 synchronize_rcu();
2446 cfq_slab_kill(); 2473 cfq_slab_kill();
2447} 2474}
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index edc72a6348a7..e1aadae00623 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -815,8 +815,6 @@ extern int drm_mem_info(char *buf, char **start, off_t offset,
815extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); 815extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
816extern void *drm_ioremap(unsigned long offset, unsigned long size, 816extern void *drm_ioremap(unsigned long offset, unsigned long size,
817 drm_device_t * dev); 817 drm_device_t * dev);
818extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
819 drm_device_t * dev);
820extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev); 818extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev);
821 819
822extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type); 820extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type);
@@ -1022,11 +1020,13 @@ static __inline__ void drm_core_ioremap(struct drm_map *map,
1022 map->handle = drm_ioremap(map->offset, map->size, dev); 1020 map->handle = drm_ioremap(map->offset, map->size, dev);
1023} 1021}
1024 1022
1023#if 0
1025static __inline__ void drm_core_ioremap_nocache(struct drm_map *map, 1024static __inline__ void drm_core_ioremap_nocache(struct drm_map *map,
1026 struct drm_device *dev) 1025 struct drm_device *dev)
1027{ 1026{
1028 map->handle = drm_ioremap_nocache(map->offset, map->size, dev); 1027 map->handle = drm_ioremap_nocache(map->offset, map->size, dev);
1029} 1028}
1029#endif /* 0 */
1030 1030
1031static __inline__ void drm_core_ioremapfree(struct drm_map *map, 1031static __inline__ void drm_core_ioremapfree(struct drm_map *map,
1032 struct drm_device *dev) 1032 struct drm_device *dev)
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index dc6bbe8a18dc..3c0b882a8e72 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -75,8 +75,8 @@ static drm_ioctl_desc_t drm_ioctls[] = {
75 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 75 [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
76 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH}, 76 [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH},
77 77
78 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 78 [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY},
79 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 79 [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_ROOT_ONLY},
80 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 80 [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
81 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH}, 81 [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH},
82 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, 82 [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index dddf8de66143..7e3318e1d1c6 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -80,6 +80,71 @@ void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
80} 80}
81 81
82#if __OS_HAS_AGP 82#if __OS_HAS_AGP
83/*
84 * Find the drm_map that covers the range [offset, offset+size).
85 */
86static drm_map_t *drm_lookup_map(unsigned long offset,
87 unsigned long size, drm_device_t * dev)
88{
89 struct list_head *list;
90 drm_map_list_t *r_list;
91 drm_map_t *map;
92
93 list_for_each(list, &dev->maplist->head) {
94 r_list = (drm_map_list_t *) list;
95 map = r_list->map;
96 if (!map)
97 continue;
98 if (map->offset <= offset
99 && (offset + size) <= (map->offset + map->size))
100 return map;
101 }
102 return NULL;
103}
104
105static void *agp_remap(unsigned long offset, unsigned long size,
106 drm_device_t * dev)
107{
108 unsigned long *phys_addr_map, i, num_pages =
109 PAGE_ALIGN(size) / PAGE_SIZE;
110 struct drm_agp_mem *agpmem;
111 struct page **page_map;
112 void *addr;
113
114 size = PAGE_ALIGN(size);
115
116#ifdef __alpha__
117 offset -= dev->hose->mem_space->start;
118#endif
119
120 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
121 if (agpmem->bound <= offset
122 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
123 (offset + size))
124 break;
125 if (!agpmem)
126 return NULL;
127
128 /*
129 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
130 * the CPU do not get remapped by the GART. We fix this by using the kernel's
131 * page-table instead (that's probably faster anyhow...).
132 */
133 /* note: use vmalloc() because num_pages could be large... */
134 page_map = vmalloc(num_pages * sizeof(struct page *));
135 if (!page_map)
136 return NULL;
137
138 phys_addr_map =
139 agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
140 for (i = 0; i < num_pages; ++i)
141 page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
142 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
143 vfree(page_map);
144
145 return addr;
146}
147
83/** Wrapper around agp_allocate_memory() */ 148/** Wrapper around agp_allocate_memory() */
84DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type) 149DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
85{ 150{
@@ -103,5 +168,74 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
103{ 168{
104 return drm_agp_unbind_memory(handle); 169 return drm_agp_unbind_memory(handle);
105} 170}
171
172#else /* __OS_HAS_AGP */
173
174static inline drm_map_t *drm_lookup_map(unsigned long offset,
175 unsigned long size, drm_device_t * dev)
176{
177 return NULL;
178}
179
180static inline void *agp_remap(unsigned long offset, unsigned long size,
181 drm_device_t * dev)
182{
183 return NULL;
184}
185
106#endif /* agp */ 186#endif /* agp */
187
188void *drm_ioremap(unsigned long offset, unsigned long size,
189 drm_device_t * dev)
190{
191 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
192 drm_map_t *map = drm_lookup_map(offset, size, dev);
193
194 if (map && map->type == _DRM_AGP)
195 return agp_remap(offset, size, dev);
196 }
197 return ioremap(offset, size);
198}
199EXPORT_SYMBOL(drm_ioremap);
200
201#if 0
202void *drm_ioremap_nocache(unsigned long offset,
203 unsigned long size, drm_device_t * dev)
204{
205 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
206 drm_map_t *map = drm_lookup_map(offset, size, dev);
207
208 if (map && map->type == _DRM_AGP)
209 return agp_remap(offset, size, dev);
210 }
211 return ioremap_nocache(offset, size);
212}
213#endif /* 0 */
214
215void drm_ioremapfree(void *pt, unsigned long size,
216 drm_device_t * dev)
217{
218 /*
219 * This is a bit ugly. It would be much cleaner if the DRM API would use separate
220 * routines for handling mappings in the AGP space. Hopefully this can be done in
221 * a future revision of the interface...
222 */
223 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
224 && ((unsigned long)pt >= VMALLOC_START
225 && (unsigned long)pt < VMALLOC_END)) {
226 unsigned long offset;
227 drm_map_t *map;
228
229 offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
230 map = drm_lookup_map(offset, size, dev);
231 if (map && map->type == _DRM_AGP) {
232 vunmap(pt);
233 return;
234 }
235 }
236
237 iounmap(pt);
238}
239EXPORT_SYMBOL(drm_ioremapfree);
240
107#endif /* debug_memory */ 241#endif /* debug_memory */
diff --git a/drivers/char/drm/drm_memory.h b/drivers/char/drm/drm_memory.h
index 3732a61c3762..714d9aedcff5 100644
--- a/drivers/char/drm/drm_memory.h
+++ b/drivers/char/drm/drm_memory.h
@@ -57,71 +57,6 @@
57# endif 57# endif
58#endif 58#endif
59 59
60/*
61 * Find the drm_map that covers the range [offset, offset+size).
62 */
63static inline drm_map_t *drm_lookup_map(unsigned long offset,
64 unsigned long size, drm_device_t * dev)
65{
66 struct list_head *list;
67 drm_map_list_t *r_list;
68 drm_map_t *map;
69
70 list_for_each(list, &dev->maplist->head) {
71 r_list = (drm_map_list_t *) list;
72 map = r_list->map;
73 if (!map)
74 continue;
75 if (map->offset <= offset
76 && (offset + size) <= (map->offset + map->size))
77 return map;
78 }
79 return NULL;
80}
81
82static inline void *agp_remap(unsigned long offset, unsigned long size,
83 drm_device_t * dev)
84{
85 unsigned long *phys_addr_map, i, num_pages =
86 PAGE_ALIGN(size) / PAGE_SIZE;
87 struct drm_agp_mem *agpmem;
88 struct page **page_map;
89 void *addr;
90
91 size = PAGE_ALIGN(size);
92
93#ifdef __alpha__
94 offset -= dev->hose->mem_space->start;
95#endif
96
97 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
98 if (agpmem->bound <= offset
99 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
100 (offset + size))
101 break;
102 if (!agpmem)
103 return NULL;
104
105 /*
106 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
107 * the CPU do not get remapped by the GART. We fix this by using the kernel's
108 * page-table instead (that's probably faster anyhow...).
109 */
110 /* note: use vmalloc() because num_pages could be large... */
111 page_map = vmalloc(num_pages * sizeof(struct page *));
112 if (!page_map)
113 return NULL;
114
115 phys_addr_map =
116 agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
117 for (i = 0; i < num_pages; ++i)
118 page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
119 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
120 vfree(page_map);
121
122 return addr;
123}
124
125static inline unsigned long drm_follow_page(void *vaddr) 60static inline unsigned long drm_follow_page(void *vaddr)
126{ 61{
127 pgd_t *pgd = pgd_offset_k((unsigned long)vaddr); 62 pgd_t *pgd = pgd_offset_k((unsigned long)vaddr);
@@ -133,18 +68,6 @@ static inline unsigned long drm_follow_page(void *vaddr)
133 68
134#else /* __OS_HAS_AGP */ 69#else /* __OS_HAS_AGP */
135 70
136static inline drm_map_t *drm_lookup_map(unsigned long offset,
137 unsigned long size, drm_device_t * dev)
138{
139 return NULL;
140}
141
142static inline void *agp_remap(unsigned long offset, unsigned long size,
143 drm_device_t * dev)
144{
145 return NULL;
146}
147
148static inline unsigned long drm_follow_page(void *vaddr) 71static inline unsigned long drm_follow_page(void *vaddr)
149{ 72{
150 return 0; 73 return 0;
@@ -152,51 +75,8 @@ static inline unsigned long drm_follow_page(void *vaddr)
152 75
153#endif 76#endif
154 77
155static inline void *drm_ioremap(unsigned long offset, unsigned long size, 78void *drm_ioremap(unsigned long offset, unsigned long size,
156 drm_device_t * dev) 79 drm_device_t * dev);
157{
158 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
159 drm_map_t *map = drm_lookup_map(offset, size, dev);
160
161 if (map && map->type == _DRM_AGP)
162 return agp_remap(offset, size, dev);
163 }
164 return ioremap(offset, size);
165}
166
167static inline void *drm_ioremap_nocache(unsigned long offset,
168 unsigned long size, drm_device_t * dev)
169{
170 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
171 drm_map_t *map = drm_lookup_map(offset, size, dev);
172
173 if (map && map->type == _DRM_AGP)
174 return agp_remap(offset, size, dev);
175 }
176 return ioremap_nocache(offset, size);
177}
178
179static inline void drm_ioremapfree(void *pt, unsigned long size,
180 drm_device_t * dev)
181{
182 /*
183 * This is a bit ugly. It would be much cleaner if the DRM API would use separate
184 * routines for handling mappings in the AGP space. Hopefully this can be done in
185 * a future revision of the interface...
186 */
187 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
188 && ((unsigned long)pt >= VMALLOC_START
189 && (unsigned long)pt < VMALLOC_END)) {
190 unsigned long offset;
191 drm_map_t *map;
192
193 offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
194 map = drm_lookup_map(offset, size, dev);
195 if (map && map->type == _DRM_AGP) {
196 vunmap(pt);
197 return;
198 }
199 }
200 80
201 iounmap(pt); 81void drm_ioremapfree(void *pt, unsigned long size,
202} 82 drm_device_t * dev);
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
index 7868341817da..6543b9a14c42 100644
--- a/drivers/char/drm/drm_memory_debug.h
+++ b/drivers/char/drm/drm_memory_debug.h
@@ -229,6 +229,7 @@ void *drm_ioremap (unsigned long offset, unsigned long size,
229 return pt; 229 return pt;
230} 230}
231 231
232#if 0
232void *drm_ioremap_nocache (unsigned long offset, unsigned long size, 233void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
233 drm_device_t * dev) { 234 drm_device_t * dev) {
234 void *pt; 235 void *pt;
@@ -251,6 +252,7 @@ void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
251 spin_unlock(&drm_mem_lock); 252 spin_unlock(&drm_mem_lock);
252 return pt; 253 return pt;
253} 254}
255#endif /* 0 */
254 256
255void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) { 257void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
256 int alloc_count; 258 int alloc_count;
diff --git a/drivers/char/drm/drm_pci.c b/drivers/char/drm/drm_pci.c
index b28ca9cea8a2..86a0f1c22091 100644
--- a/drivers/char/drm/drm_pci.c
+++ b/drivers/char/drm/drm_pci.c
@@ -37,6 +37,7 @@
37 */ 37 */
38 38
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/dma-mapping.h>
40#include "drmP.h" 41#include "drmP.h"
41 42
42/**********************************************************************/ 43/**********************************************************************/
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index 6152415644e9..c33d068cde19 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -196,9 +196,9 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
196{ 196{
197 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 197 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
198 unsigned int cur_irq_sequence; 198 unsigned int cur_irq_sequence;
199 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 199 drm_via_irq_t *cur_irq;
200 int ret = 0; 200 int ret = 0;
201 maskarray_t *masks = dev_priv->irq_masks; 201 maskarray_t *masks;
202 int real_irq; 202 int real_irq;
203 203
204 DRM_DEBUG("%s\n", __FUNCTION__); 204 DRM_DEBUG("%s\n", __FUNCTION__);
@@ -221,8 +221,9 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
221 __FUNCTION__, irq); 221 __FUNCTION__, irq);
222 return DRM_ERR(EINVAL); 222 return DRM_ERR(EINVAL);
223 } 223 }
224 224
225 cur_irq += real_irq; 225 masks = dev_priv->irq_masks;
226 cur_irq = dev_priv->via_irqs + real_irq;
226 227
227 if (masks[real_irq][2] && !force_sequence) { 228 if (masks[real_irq][2] && !force_sequence) {
228 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, 229 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
@@ -247,11 +248,12 @@ void via_driver_irq_preinstall(drm_device_t * dev)
247{ 248{
248 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 249 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
249 u32 status; 250 u32 status;
250 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 251 drm_via_irq_t *cur_irq;
251 int i; 252 int i;
252 253
253 DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv); 254 DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
254 if (dev_priv) { 255 if (dev_priv) {
256 cur_irq = dev_priv->via_irqs;
255 257
256 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; 258 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
257 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; 259 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
diff --git a/fs/open.c b/fs/open.c
index c32c89d6d8db..53ec28c36777 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -331,7 +331,10 @@ out:
331 331
332asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length) 332asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length)
333{ 333{
334 return do_sys_ftruncate(fd, length, 1); 334 long ret = do_sys_ftruncate(fd, length, 1);
335 /* avoid REGPARM breakage on x86: */
336 prevent_tail_call(ret);
337 return ret;
335} 338}
336 339
337/* LFS versions of truncate are only needed on 32 bit machines */ 340/* LFS versions of truncate are only needed on 32 bit machines */
@@ -343,7 +346,10 @@ asmlinkage long sys_truncate64(const char __user * path, loff_t length)
343 346
344asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length) 347asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length)
345{ 348{
346 return do_sys_ftruncate(fd, length, 0); 349 long ret = do_sys_ftruncate(fd, length, 0);
350 /* avoid REGPARM breakage on x86: */
351 prevent_tail_call(ret);
352 return ret;
347} 353}
348#endif 354#endif
349 355
@@ -1093,20 +1099,30 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
1093 1099
1094asmlinkage long sys_open(const char __user *filename, int flags, int mode) 1100asmlinkage long sys_open(const char __user *filename, int flags, int mode)
1095{ 1101{
1102 long ret;
1103
1096 if (force_o_largefile()) 1104 if (force_o_largefile())
1097 flags |= O_LARGEFILE; 1105 flags |= O_LARGEFILE;
1098 1106
1099 return do_sys_open(AT_FDCWD, filename, flags, mode); 1107 ret = do_sys_open(AT_FDCWD, filename, flags, mode);
1108 /* avoid REGPARM breakage on x86: */
1109 prevent_tail_call(ret);
1110 return ret;
1100} 1111}
1101EXPORT_SYMBOL_GPL(sys_open); 1112EXPORT_SYMBOL_GPL(sys_open);
1102 1113
1103asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, 1114asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
1104 int mode) 1115 int mode)
1105{ 1116{
1117 long ret;
1118
1106 if (force_o_largefile()) 1119 if (force_o_largefile())
1107 flags |= O_LARGEFILE; 1120 flags |= O_LARGEFILE;
1108 1121
1109 return do_sys_open(dfd, filename, flags, mode); 1122 ret = do_sys_open(dfd, filename, flags, mode);
1123 /* avoid REGPARM breakage on x86: */
1124 prevent_tail_call(ret);
1125 return ret;
1110} 1126}
1111EXPORT_SYMBOL_GPL(sys_openat); 1127EXPORT_SYMBOL_GPL(sys_openat);
1112 1128
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index 51f87d9993b6..7bc6d73b2823 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -54,6 +54,13 @@
54 */ 54 */
55extern unsigned int virt_irq_to_real_map[NR_IRQS]; 55extern unsigned int virt_irq_to_real_map[NR_IRQS];
56 56
57/* The maximum virtual IRQ number that we support. This
58 * can be set by the platform and will be reduced by the
59 * value of __irq_offset_value. It defaults to and is
60 * capped by (NR_IRQS - 1).
61 */
62extern unsigned int virt_irq_max;
63
57/* Create a mapping for a real_irq if it doesn't already exist. 64/* Create a mapping for a real_irq if it doesn't already exist.
58 * Return the virtual irq as a convenience. 65 * Return the virtual irq as a convenience.
59 */ 66 */
diff --git a/include/asm-powerpc/thread_info.h b/include/asm-powerpc/thread_info.h
index ffc7462d77ba..88b553c6b26c 100644
--- a/include/asm-powerpc/thread_info.h
+++ b/include/asm-powerpc/thread_info.h
@@ -37,6 +37,8 @@ struct thread_info {
37 int preempt_count; /* 0 => preemptable, 37 int preempt_count; /* 0 => preemptable,
38 <0 => BUG */ 38 <0 => BUG */
39 struct restart_block restart_block; 39 struct restart_block restart_block;
40 unsigned long local_flags; /* private flags for thread */
41
40 /* low level flags - has atomic operations done on it */ 42 /* low level flags - has atomic operations done on it */
41 unsigned long flags ____cacheline_aligned_in_smp; 43 unsigned long flags ____cacheline_aligned_in_smp;
42}; 44};
@@ -143,6 +145,12 @@ static inline struct thread_info *current_thread_info(void)
143 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) 145 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
144#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) 146#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
145 147
148/* Bits in local_flags */
149/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
150#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
151
152#define _TLF_NAPPING (1 << TLF_NAPPING)
153
146#endif /* __KERNEL__ */ 154#endif /* __KERNEL__ */
147 155
148#endif /* _ASM_POWERPC_THREAD_INFO_H */ 156#endif /* _ASM_POWERPC_THREAD_INFO_H */
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 6b18cd8f293d..6944e7122df5 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -12,7 +12,8 @@
12 12
13#include <asm/smp.h> 13#include <asm/smp.h>
14 14
15#define NODEMAPSIZE 0xfff 15/* Should really switch to dynamic allocation at some point */
16#define NODEMAPSIZE 0x4fff
16 17
17/* Simple perfect hash to map physical addresses to node numbers */ 18/* Simple perfect hash to map physical addresses to node numbers */
18struct memnode { 19struct memnode {
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index d86494e23b63..98c36eae567c 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -613,8 +613,10 @@ __SYSCALL(__NR_get_robust_list, sys_get_robust_list)
613__SYSCALL(__NR_splice, sys_splice) 613__SYSCALL(__NR_splice, sys_splice)
614#define __NR_tee 276 614#define __NR_tee 276
615__SYSCALL(__NR_tee, sys_tee) 615__SYSCALL(__NR_tee, sys_tee)
616#define __NR_sync_file_range 277
617__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
616 618
617#define __NR_syscall_max __NR_tee 619#define __NR_syscall_max __NR_sync_file_range
618 620
619#ifndef __NO_STUBS 621#ifndef __NO_STUBS
620 622
diff --git a/init/Kconfig b/init/Kconfig
index f1bc2f0e94e5..3b36a1d53656 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -374,15 +374,6 @@ config SLAB
374 SLOB is more space efficient but does not scale well and is 374 SLOB is more space efficient but does not scale well and is
375 more susceptible to fragmentation. 375 more susceptible to fragmentation.
376 376
377config DOUBLEFAULT
378 default y
379 bool "Enable doublefault exception handler" if EMBEDDED && X86_32
380 help
381 This option allows trapping of rare doublefault exceptions that
382 would otherwise cause a system to silently reboot. Disabling this
383 option saves about 4k and might cause you much additional grey
384 hair.
385
386endmenu # General setup 377endmenu # General setup
387 378
388config TINY_SHMEM 379config TINY_SHMEM