aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS3
-rw-r--r--arch/i386/kernel/Makefile1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k7.c36
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h4
-rw-r--r--arch/i386/kernel/smp.c65
-rw-r--r--arch/i386/kernel/smpboot.c22
-rw-r--r--arch/i386/kernel/smpcommon.c79
-rw-r--r--arch/i386/kernel/traps.c5
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c106
-rw-r--r--arch/m68k/lib/uaccess.c4
-rw-r--r--arch/sparc/defconfig151
-rw-r--r--arch/sparc/kernel/head.S2
-rw-r--r--arch/sparc64/defconfig26
-rw-r--r--arch/sparc64/kernel/of_device.c7
-rw-r--r--arch/sparc64/kernel/smp.c3
-rw-r--r--arch/x86_64/kernel/traps.c3
-rw-r--r--block/ll_rw_blk.c1
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c134
-rw-r--r--drivers/infiniband/core/cma.c106
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c94
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c17
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c13
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c12
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c11
-rw-r--r--drivers/mmc/card/block.c17
-rw-r--r--drivers/mmc/host/au1xmmc.c35
-rw-r--r--drivers/mmc/host/pxamci.c18
-rw-r--r--drivers/mmc/host/sdhci.c9
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/sbus/char/bbc_i2c.c1
-rw-r--r--drivers/sbus/char/display7seg.c1
-rw-r--r--drivers/scsi/Kconfig8
-rw-r--r--drivers/serial/sunhv.c4
-rw-r--r--drivers/video/Kconfig4
-rw-r--r--fs/eventpoll.c561
-rw-r--r--include/asm-alpha/mmu_context.h2
-rw-r--r--include/asm-h8300/atomic.h1
-rw-r--r--include/asm-i386/kdebug.h1
-rw-r--r--include/asm-i386/processor.h4
-rw-r--r--include/asm-m68k/uaccess.h4
-rw-r--r--include/asm-sparc/kdebug.h8
-rw-r--r--include/asm-sparc/system.h1
-rw-r--r--include/asm-sparc64/dma-mapping.h44
-rw-r--r--include/asm-x86_64/kdebug.h1
-rw-r--r--include/linux/compat.h6
-rw-r--r--include/linux/init.h7
-rw-r--r--include/linux/io.h8
-rw-r--r--include/linux/major.h2
-rw-r--r--include/linux/slub_def.h6
-rw-r--r--kernel/time/clocksource.c10
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/timer.c2
-rw-r--r--net/ipv4/proc.c21
-rw-r--r--net/ipv6/ip6_input.c2
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/xfrm/xfrm_hash.c3
-rw-r--r--net/xfrm/xfrm_policy.c4
68 files changed, 850 insertions, 890 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 68a56add73e3..bbeb5b6b5b05 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1712,8 +1712,6 @@ L: Linux-Kernel@vger.kernel.org
1712S: Maintained 1712S: Maintained
1713 1713
1714i386 SETUP CODE / CPU ERRATA WORKAROUNDS 1714i386 SETUP CODE / CPU ERRATA WORKAROUNDS
1715P: Dave Jones
1716M: davej@codemonkey.org.uk
1717P: H. Peter Anvin 1715P: H. Peter Anvin
1718M: hpa@zytor.com 1716M: hpa@zytor.com
1719S: Maintained 1717S: Maintained
@@ -3269,6 +3267,7 @@ W: http://tpmdd.sourceforge.net
3269P: Marcel Selhorst 3267P: Marcel Selhorst
3270M: tpm@selhorst.net 3268M: tpm@selhorst.net
3271W: http://www.prosec.rub.de/tpm/ 3269W: http://www.prosec.rub.de/tpm/
3270L: tpmdd-devel@lists.sourceforge.net
3272S: Maintained 3271S: Maintained
3273 3272
3274Telecom Clock Driver for MCPL0010 3273Telecom Clock Driver for MCPL0010
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 91cff8dc9e1a..06da59f6f837 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_X86_CPUID) += cpuid.o
19obj-$(CONFIG_MICROCODE) += microcode.o 19obj-$(CONFIG_MICROCODE) += microcode.o
20obj-$(CONFIG_APM) += apm.o 20obj-$(CONFIG_APM) += apm.o
21obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o 21obj-$(CONFIG_X86_SMP) += smp.o smpboot.o tsc_sync.o
22obj-$(CONFIG_SMP) += smpcommon.o
22obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o 23obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
23obj-$(CONFIG_X86_MPPARSE) += mpparse.o 24obj-$(CONFIG_X86_MPPARSE) += mpparse.o
24obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o 25obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
index 837b04166a47..ca3e1d341889 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
@@ -341,15 +341,17 @@ static int powernow_acpi_init(void)
341 pc.val = (unsigned long) acpi_processor_perf->states[0].control; 341 pc.val = (unsigned long) acpi_processor_perf->states[0].control;
342 for (i = 0; i < number_scales; i++) { 342 for (i = 0; i < number_scales; i++) {
343 u8 fid, vid; 343 u8 fid, vid;
344 unsigned int speed; 344 struct acpi_processor_px *state =
345 &acpi_processor_perf->states[i];
346 unsigned int speed, speed_mhz;
345 347
346 pc.val = (unsigned long) acpi_processor_perf->states[i].control; 348 pc.val = (unsigned long) state->control;
347 dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", 349 dprintk ("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n",
348 i, 350 i,
349 (u32) acpi_processor_perf->states[i].core_frequency, 351 (u32) state->core_frequency,
350 (u32) acpi_processor_perf->states[i].power, 352 (u32) state->power,
351 (u32) acpi_processor_perf->states[i].transition_latency, 353 (u32) state->transition_latency,
352 (u32) acpi_processor_perf->states[i].control, 354 (u32) state->control,
353 pc.bits.sgtc); 355 pc.bits.sgtc);
354 356
355 vid = pc.bits.vid; 357 vid = pc.bits.vid;
@@ -360,6 +362,18 @@ static int powernow_acpi_init(void)
360 powernow_table[i].index |= (vid << 8); /* upper 8 bits */ 362 powernow_table[i].index |= (vid << 8); /* upper 8 bits */
361 363
362 speed = powernow_table[i].frequency; 364 speed = powernow_table[i].frequency;
365 speed_mhz = speed / 1000;
366
367 /* processor_perflib will multiply the MHz value by 1000 to
368 * get a KHz value (e.g. 1266000). However, powernow-k7 works
369 * with true KHz values (e.g. 1266768). To ensure that all
370 * powernow frequencies are available, we must ensure that
371 * ACPI doesn't restrict them, so we round up the MHz value
372 * to ensure that perflib's computed KHz value is greater than
373 * or equal to powernow's KHz value.
374 */
375 if (speed % 1000 > 0)
376 speed_mhz++;
363 377
364 if ((fid_codes[fid] % 10)==5) { 378 if ((fid_codes[fid] % 10)==5) {
365 if (have_a0 == 1) 379 if (have_a0 == 1)
@@ -368,10 +382,16 @@ static int powernow_acpi_init(void)
368 382
369 dprintk (" FID: 0x%x (%d.%dx [%dMHz]) " 383 dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
370 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, 384 "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
371 fid_codes[fid] % 10, speed/1000, vid, 385 fid_codes[fid] % 10, speed_mhz, vid,
372 mobile_vid_table[vid]/1000, 386 mobile_vid_table[vid]/1000,
373 mobile_vid_table[vid]%1000); 387 mobile_vid_table[vid]%1000);
374 388
389 if (state->core_frequency != speed_mhz) {
390 state->core_frequency = speed_mhz;
391 dprintk(" Corrected ACPI frequency to %d\n",
392 speed_mhz);
393 }
394
375 if (latency < pc.bits.sgtc) 395 if (latency < pc.bits.sgtc)
376 latency = pc.bits.sgtc; 396 latency = pc.bits.sgtc;
377 397
@@ -602,7 +622,7 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
602 result = powernow_acpi_init(); 622 result = powernow_acpi_init();
603 if (result) { 623 if (result) {
604 printk (KERN_INFO PFX "ACPI and legacy methods failed\n"); 624 printk (KERN_INFO PFX "ACPI and legacy methods failed\n");
605 printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.shtml\n"); 625 printk (KERN_INFO PFX "See http://www.codemonkey.org.uk/projects/cpufreq/powernow-k7.html\n");
606 } 626 }
607 } else { 627 } else {
608 /* SGTC use the bus clock as timer */ 628 /* SGTC use the bus clock as timer */
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 7cf3d207b6b3..4ade55c5f333 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -521,7 +521,7 @@ static int check_supported_cpu(unsigned int cpu)
521 521
522 if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { 522 if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
523 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || 523 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
524 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) { 524 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
525 printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax); 525 printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
526 goto out; 526 goto out;
527 } 527 }
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index 95be5013c984..b06c812208ca 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -46,8 +46,8 @@ struct powernow_k8_data {
46#define CPUID_XFAM 0x0ff00000 /* extended family */ 46#define CPUID_XFAM 0x0ff00000 /* extended family */
47#define CPUID_XFAM_K8 0 47#define CPUID_XFAM_K8 0
48#define CPUID_XMOD 0x000f0000 /* extended model */ 48#define CPUID_XMOD 0x000f0000 /* extended model */
49#define CPUID_XMOD_REV_G 0x00060000 49#define CPUID_XMOD_REV_MASK 0x00080000
50#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */ 50#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
51#define CPUID_USE_XFAM_XMOD 0x00000f00 51#define CPUID_USE_XFAM_XMOD 0x00000f00
52#define CPUID_GET_MAX_CAPABILITIES 0x80000000 52#define CPUID_GET_MAX_CAPABILITIES 0x80000000
53#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 53#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 706bda72dc60..c9a7c9835aba 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -467,7 +467,7 @@ void flush_tlb_all(void)
467 * it goes straight through and wastes no time serializing 467 * it goes straight through and wastes no time serializing
468 * anything. Worst case is that we lose a reschedule ... 468 * anything. Worst case is that we lose a reschedule ...
469 */ 469 */
470void native_smp_send_reschedule(int cpu) 470static void native_smp_send_reschedule(int cpu)
471{ 471{
472 WARN_ON(cpu_is_offline(cpu)); 472 WARN_ON(cpu_is_offline(cpu));
473 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); 473 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
@@ -546,9 +546,10 @@ static void __smp_call_function(void (*func) (void *info), void *info,
546 * You must not call this function with disabled interrupts or from a 546 * You must not call this function with disabled interrupts or from a
547 * hardware interrupt handler or from a bottom half handler. 547 * hardware interrupt handler or from a bottom half handler.
548 */ 548 */
549int native_smp_call_function_mask(cpumask_t mask, 549static int
550 void (*func)(void *), void *info, 550native_smp_call_function_mask(cpumask_t mask,
551 int wait) 551 void (*func)(void *), void *info,
552 int wait)
552{ 553{
553 struct call_data_struct data; 554 struct call_data_struct data;
554 cpumask_t allbutself; 555 cpumask_t allbutself;
@@ -599,60 +600,6 @@ int native_smp_call_function_mask(cpumask_t mask,
599 return 0; 600 return 0;
600} 601}
601 602
602/**
603 * smp_call_function(): Run a function on all other CPUs.
604 * @func: The function to run. This must be fast and non-blocking.
605 * @info: An arbitrary pointer to pass to the function.
606 * @nonatomic: Unused.
607 * @wait: If true, wait (atomically) until function has completed on other CPUs.
608 *
609 * Returns 0 on success, else a negative status code.
610 *
611 * If @wait is true, then returns once @func has returned; otherwise
612 * it returns just before the target cpu calls @func.
613 *
614 * You must not call this function with disabled interrupts or from a
615 * hardware interrupt handler or from a bottom half handler.
616 */
617int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
618 int wait)
619{
620 return smp_call_function_mask(cpu_online_map, func, info, wait);
621}
622EXPORT_SYMBOL(smp_call_function);
623
624/**
625 * smp_call_function_single - Run a function on another CPU
626 * @cpu: The target CPU. Cannot be the calling CPU.
627 * @func: The function to run. This must be fast and non-blocking.
628 * @info: An arbitrary pointer to pass to the function.
629 * @nonatomic: Unused.
630 * @wait: If true, wait until function has completed on other CPUs.
631 *
632 * Returns 0 on success, else a negative status code.
633 *
634 * If @wait is true, then returns once @func has returned; otherwise
635 * it returns just before the target cpu calls @func.
636 */
637int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
638 int nonatomic, int wait)
639{
640 /* prevent preemption and reschedule on another processor */
641 int ret;
642 int me = get_cpu();
643 if (cpu == me) {
644 WARN_ON(1);
645 put_cpu();
646 return -EBUSY;
647 }
648
649 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
650
651 put_cpu();
652 return ret;
653}
654EXPORT_SYMBOL(smp_call_function_single);
655
656static void stop_this_cpu (void * dummy) 603static void stop_this_cpu (void * dummy)
657{ 604{
658 local_irq_disable(); 605 local_irq_disable();
@@ -670,7 +617,7 @@ static void stop_this_cpu (void * dummy)
670 * this function calls the 'stop' function on all other CPUs in the system. 617 * this function calls the 'stop' function on all other CPUs in the system.
671 */ 618 */
672 619
673void native_smp_send_stop(void) 620static void native_smp_send_stop(void)
674{ 621{
675 /* Don't deadlock on the call lock in panic */ 622 /* Don't deadlock on the call lock in panic */
676 int nolock = !spin_trylock(&call_lock); 623 int nolock = !spin_trylock(&call_lock);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index b92cc4e8b3bb..08f07a74a9d3 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -98,9 +98,6 @@ EXPORT_SYMBOL(x86_cpu_to_apicid);
98 98
99u8 apicid_2_node[MAX_APICID]; 99u8 apicid_2_node[MAX_APICID];
100 100
101DEFINE_PER_CPU(unsigned long, this_cpu_off);
102EXPORT_PER_CPU_SYMBOL(this_cpu_off);
103
104/* 101/*
105 * Trampoline 80x86 program as an array. 102 * Trampoline 80x86 program as an array.
106 */ 103 */
@@ -763,25 +760,6 @@ static inline struct task_struct * alloc_idle_task(int cpu)
763#define alloc_idle_task(cpu) fork_idle(cpu) 760#define alloc_idle_task(cpu) fork_idle(cpu)
764#endif 761#endif
765 762
766/* Initialize the CPU's GDT. This is either the boot CPU doing itself
767 (still using the master per-cpu area), or a CPU doing it for a
768 secondary which will soon come up. */
769static __cpuinit void init_gdt(int cpu)
770{
771 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
772
773 pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
774 (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
775 __per_cpu_offset[cpu], 0xFFFFF,
776 0x80 | DESCTYPE_S | 0x2, 0x8);
777
778 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
779 per_cpu(cpu_number, cpu) = cpu;
780}
781
782/* Defined in head.S */
783extern struct Xgt_desc_struct early_gdt_descr;
784
785static int __cpuinit do_boot_cpu(int apicid, int cpu) 763static int __cpuinit do_boot_cpu(int apicid, int cpu)
786/* 764/*
787 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 765 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
diff --git a/arch/i386/kernel/smpcommon.c b/arch/i386/kernel/smpcommon.c
new file mode 100644
index 000000000000..1868ae18eb4d
--- /dev/null
+++ b/arch/i386/kernel/smpcommon.c
@@ -0,0 +1,79 @@
1/*
2 * SMP stuff which is common to all sub-architectures.
3 */
4#include <linux/module.h>
5#include <asm/smp.h>
6
7DEFINE_PER_CPU(unsigned long, this_cpu_off);
8EXPORT_PER_CPU_SYMBOL(this_cpu_off);
9
10/* Initialize the CPU's GDT. This is either the boot CPU doing itself
11 (still using the master per-cpu area), or a CPU doing it for a
12 secondary which will soon come up. */
13__cpuinit void init_gdt(int cpu)
14{
15 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16
17 pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
18 (u32 *)&gdt[GDT_ENTRY_PERCPU].b,
19 __per_cpu_offset[cpu], 0xFFFFF,
20 0x80 | DESCTYPE_S | 0x2, 0x8);
21
22 per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
23 per_cpu(cpu_number, cpu) = cpu;
24}
25
26
27/**
28 * smp_call_function(): Run a function on all other CPUs.
29 * @func: The function to run. This must be fast and non-blocking.
30 * @info: An arbitrary pointer to pass to the function.
31 * @nonatomic: Unused.
32 * @wait: If true, wait (atomically) until function has completed on other CPUs.
33 *
34 * Returns 0 on success, else a negative status code.
35 *
36 * If @wait is true, then returns once @func has returned; otherwise
37 * it returns just before the target cpu calls @func.
38 *
39 * You must not call this function with disabled interrupts or from a
40 * hardware interrupt handler or from a bottom half handler.
41 */
42int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
43 int wait)
44{
45 return smp_call_function_mask(cpu_online_map, func, info, wait);
46}
47EXPORT_SYMBOL(smp_call_function);
48
49/**
50 * smp_call_function_single - Run a function on another CPU
51 * @cpu: The target CPU. Cannot be the calling CPU.
52 * @func: The function to run. This must be fast and non-blocking.
53 * @info: An arbitrary pointer to pass to the function.
54 * @nonatomic: Unused.
55 * @wait: If true, wait until function has completed on other CPUs.
56 *
57 * Returns 0 on success, else a negative status code.
58 *
59 * If @wait is true, then returns once @func has returned; otherwise
60 * it returns just before the target cpu calls @func.
61 */
62int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
63 int nonatomic, int wait)
64{
65 /* prevent preemption and reschedule on another processor */
66 int ret;
67 int me = get_cpu();
68 if (cpu == me) {
69 WARN_ON(1);
70 put_cpu();
71 return -EBUSY;
72 }
73
74 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
75
76 put_cpu();
77 return ret;
78}
79EXPORT_SYMBOL(smp_call_function_single);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index c05e7e861b29..90da0575fcff 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -733,11 +733,6 @@ static __kprobes void default_do_nmi(struct pt_regs * regs)
733 */ 733 */
734 if (nmi_watchdog_tick(regs, reason)) 734 if (nmi_watchdog_tick(regs, reason))
735 return; 735 return;
736#endif
737 if (notify_die(DIE_NMI_POST, "nmi_post", regs, reason, 2, 0)
738 == NOTIFY_STOP)
739 return;
740#ifdef CONFIG_X86_LOCAL_APIC
741 if (!do_nmi_callback(regs, smp_processor_id())) 736 if (!do_nmi_callback(regs, smp_processor_id()))
742#endif 737#endif
743 unknown_nmi_error(reason, regs); 738 unknown_nmi_error(reason, regs);
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 50d9c52070b1..b87f8548e75a 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -27,7 +27,6 @@
27#include <asm/pgalloc.h> 27#include <asm/pgalloc.h>
28#include <asm/tlbflush.h> 28#include <asm/tlbflush.h>
29#include <asm/arch_hooks.h> 29#include <asm/arch_hooks.h>
30#include <asm/pda.h>
31 30
32/* TLB state -- visible externally, indexed physically */ 31/* TLB state -- visible externally, indexed physically */
33DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; 32DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
@@ -422,7 +421,7 @@ find_smp_config(void)
422 VOYAGER_SUS_IN_CONTROL_PORT); 421 VOYAGER_SUS_IN_CONTROL_PORT);
423 422
424 current_thread_info()->cpu = boot_cpu_id; 423 current_thread_info()->cpu = boot_cpu_id;
425 write_pda(cpu_number, boot_cpu_id); 424 x86_write_percpu(cpu_number, boot_cpu_id);
426} 425}
427 426
428/* 427/*
@@ -435,7 +434,7 @@ smp_store_cpu_info(int id)
435 434
436 *c = boot_cpu_data; 435 *c = boot_cpu_data;
437 436
438 identify_cpu(c); 437 identify_secondary_cpu(c);
439} 438}
440 439
441/* set up the trampoline and return the physical address of the code */ 440/* set up the trampoline and return the physical address of the code */
@@ -459,7 +458,7 @@ start_secondary(void *unused)
459 /* external functions not defined in the headers */ 458 /* external functions not defined in the headers */
460 extern void calibrate_delay(void); 459 extern void calibrate_delay(void);
461 460
462 secondary_cpu_init(); 461 cpu_init();
463 462
464 /* OK, we're in the routine */ 463 /* OK, we're in the routine */
465 ack_CPI(VIC_CPU_BOOT_CPI); 464 ack_CPI(VIC_CPU_BOOT_CPI);
@@ -572,7 +571,9 @@ do_boot_cpu(__u8 cpu)
572 /* init_tasks (in sched.c) is indexed logically */ 571 /* init_tasks (in sched.c) is indexed logically */
573 stack_start.esp = (void *) idle->thread.esp; 572 stack_start.esp = (void *) idle->thread.esp;
574 573
575 init_gdt(cpu, idle); 574 init_gdt(cpu);
575 per_cpu(current_task, cpu) = idle;
576 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
576 irq_ctx_init(cpu); 577 irq_ctx_init(cpu);
577 578
578 /* Note: Don't modify initial ss override */ 579 /* Note: Don't modify initial ss override */
@@ -859,8 +860,8 @@ smp_invalidate_interrupt(void)
859 860
860/* This routine is called with a physical cpu mask */ 861/* This routine is called with a physical cpu mask */
861static void 862static void
862flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, 863voyager_flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
863 unsigned long va) 864 unsigned long va)
864{ 865{
865 int stuck = 50000; 866 int stuck = 50000;
866 867
@@ -912,7 +913,7 @@ flush_tlb_current_task(void)
912 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id()); 913 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
913 local_flush_tlb(); 914 local_flush_tlb();
914 if (cpu_mask) 915 if (cpu_mask)
915 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 916 voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
916 917
917 preempt_enable(); 918 preempt_enable();
918} 919}
@@ -934,7 +935,7 @@ flush_tlb_mm (struct mm_struct * mm)
934 leave_mm(smp_processor_id()); 935 leave_mm(smp_processor_id());
935 } 936 }
936 if (cpu_mask) 937 if (cpu_mask)
937 flush_tlb_others(cpu_mask, mm, FLUSH_ALL); 938 voyager_flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
938 939
939 preempt_enable(); 940 preempt_enable();
940} 941}
@@ -955,7 +956,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
955 } 956 }
956 957
957 if (cpu_mask) 958 if (cpu_mask)
958 flush_tlb_others(cpu_mask, mm, va); 959 voyager_flush_tlb_others(cpu_mask, mm, va);
959 960
960 preempt_enable(); 961 preempt_enable();
961} 962}
@@ -1044,10 +1045,12 @@ smp_call_function_interrupt(void)
1044} 1045}
1045 1046
1046static int 1047static int
1047__smp_call_function_mask (void (*func) (void *info), void *info, int retry, 1048voyager_smp_call_function_mask (cpumask_t cpumask,
1048 int wait, __u32 mask) 1049 void (*func) (void *info), void *info,
1050 int wait)
1049{ 1051{
1050 struct call_data_struct data; 1052 struct call_data_struct data;
1053 u32 mask = cpus_addr(cpumask)[0];
1051 1054
1052 mask &= ~(1<<smp_processor_id()); 1055 mask &= ~(1<<smp_processor_id());
1053 1056
@@ -1083,47 +1086,6 @@ __smp_call_function_mask (void (*func) (void *info), void *info, int retry,
1083 return 0; 1086 return 0;
1084} 1087}
1085 1088
1086/* Call this function on all CPUs using the function_interrupt above
1087 <func> The function to run. This must be fast and non-blocking.
1088 <info> An arbitrary pointer to pass to the function.
1089 <retry> If true, keep retrying until ready.
1090 <wait> If true, wait until function has completed on other CPUs.
1091 [RETURNS] 0 on success, else a negative status code. Does not return until
1092 remote CPUs are nearly ready to execute <<func>> or are or have executed.
1093*/
1094int
1095smp_call_function(void (*func) (void *info), void *info, int retry,
1096 int wait)
1097{
1098 __u32 mask = cpus_addr(cpu_online_map)[0];
1099
1100 return __smp_call_function_mask(func, info, retry, wait, mask);
1101}
1102EXPORT_SYMBOL(smp_call_function);
1103
1104/*
1105 * smp_call_function_single - Run a function on another CPU
1106 * @func: The function to run. This must be fast and non-blocking.
1107 * @info: An arbitrary pointer to pass to the function.
1108 * @nonatomic: Currently unused.
1109 * @wait: If true, wait until function has completed on other CPUs.
1110 *
1111 * Retrurns 0 on success, else a negative status code.
1112 *
1113 * Does not return until the remote CPU is nearly ready to execute <func>
1114 * or is or has executed.
1115 */
1116
1117int
1118smp_call_function_single(int cpu, void (*func) (void *info), void *info,
1119 int nonatomic, int wait)
1120{
1121 __u32 mask = 1 << cpu;
1122
1123 return __smp_call_function_mask(func, info, nonatomic, wait, mask);
1124}
1125EXPORT_SYMBOL(smp_call_function_single);
1126
1127/* Sorry about the name. In an APIC based system, the APICs 1089/* Sorry about the name. In an APIC based system, the APICs
1128 * themselves are programmed to send a timer interrupt. This is used 1090 * themselves are programmed to send a timer interrupt. This is used
1129 * by linux to reschedule the processor. Voyager doesn't have this, 1091 * by linux to reschedule the processor. Voyager doesn't have this,
@@ -1237,8 +1199,8 @@ smp_alloc_memory(void)
1237} 1199}
1238 1200
1239/* send a reschedule CPI to one CPU by physical CPU number*/ 1201/* send a reschedule CPI to one CPU by physical CPU number*/
1240void 1202static void
1241smp_send_reschedule(int cpu) 1203voyager_smp_send_reschedule(int cpu)
1242{ 1204{
1243 send_one_CPI(cpu, VIC_RESCHEDULE_CPI); 1205 send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
1244} 1206}
@@ -1267,8 +1229,8 @@ safe_smp_processor_id(void)
1267} 1229}
1268 1230
1269/* broadcast a halt to all other CPUs */ 1231/* broadcast a halt to all other CPUs */
1270void 1232static void
1271smp_send_stop(void) 1233voyager_smp_send_stop(void)
1272{ 1234{
1273 smp_call_function(smp_stop_cpu_function, NULL, 1, 1); 1235 smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
1274} 1236}
@@ -1930,23 +1892,26 @@ smp_voyager_power_off(void *dummy)
1930 smp_stop_cpu_function(NULL); 1892 smp_stop_cpu_function(NULL);
1931} 1893}
1932 1894
1933void __init 1895static void __init
1934smp_prepare_cpus(unsigned int max_cpus) 1896voyager_smp_prepare_cpus(unsigned int max_cpus)
1935{ 1897{
1936 /* FIXME: ignore max_cpus for now */ 1898 /* FIXME: ignore max_cpus for now */
1937 smp_boot_cpus(); 1899 smp_boot_cpus();
1938} 1900}
1939 1901
1940void __devinit smp_prepare_boot_cpu(void) 1902static void __devinit voyager_smp_prepare_boot_cpu(void)
1941{ 1903{
1904 init_gdt(smp_processor_id());
1905 switch_to_new_gdt();
1906
1942 cpu_set(smp_processor_id(), cpu_online_map); 1907 cpu_set(smp_processor_id(), cpu_online_map);
1943 cpu_set(smp_processor_id(), cpu_callout_map); 1908 cpu_set(smp_processor_id(), cpu_callout_map);
1944 cpu_set(smp_processor_id(), cpu_possible_map); 1909 cpu_set(smp_processor_id(), cpu_possible_map);
1945 cpu_set(smp_processor_id(), cpu_present_map); 1910 cpu_set(smp_processor_id(), cpu_present_map);
1946} 1911}
1947 1912
1948int __devinit 1913static int __devinit
1949__cpu_up(unsigned int cpu) 1914voyager_cpu_up(unsigned int cpu)
1950{ 1915{
1951 /* This only works at boot for x86. See "rewrite" above. */ 1916 /* This only works at boot for x86. See "rewrite" above. */
1952 if (cpu_isset(cpu, smp_commenced_mask)) 1917 if (cpu_isset(cpu, smp_commenced_mask))
@@ -1962,8 +1927,8 @@ __cpu_up(unsigned int cpu)
1962 return 0; 1927 return 0;
1963} 1928}
1964 1929
1965void __init 1930static void __init
1966smp_cpus_done(unsigned int max_cpus) 1931voyager_smp_cpus_done(unsigned int max_cpus)
1967{ 1932{
1968 zap_low_mappings(); 1933 zap_low_mappings();
1969} 1934}
@@ -1972,5 +1937,16 @@ void __init
1972smp_setup_processor_id(void) 1937smp_setup_processor_id(void)
1973{ 1938{
1974 current_thread_info()->cpu = hard_smp_processor_id(); 1939 current_thread_info()->cpu = hard_smp_processor_id();
1975 write_pda(cpu_number, hard_smp_processor_id()); 1940 x86_write_percpu(cpu_number, hard_smp_processor_id());
1976} 1941}
1942
1943struct smp_ops smp_ops = {
1944 .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
1945 .smp_prepare_cpus = voyager_smp_prepare_cpus,
1946 .cpu_up = voyager_cpu_up,
1947 .smp_cpus_done = voyager_smp_cpus_done,
1948
1949 .smp_send_stop = voyager_smp_send_stop,
1950 .smp_send_reschedule = voyager_smp_send_reschedule,
1951 .smp_call_function_mask = voyager_smp_call_function_mask,
1952};
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
index 865f9fb9e686..13854ed8cd9a 100644
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -181,7 +181,7 @@ EXPORT_SYMBOL(strnlen_user);
181 * Zero Userspace 181 * Zero Userspace
182 */ 182 */
183 183
184unsigned long clear_user(void __user *to, unsigned long n) 184unsigned long __clear_user(void __user *to, unsigned long n)
185{ 185{
186 unsigned long res; 186 unsigned long res;
187 187
@@ -219,4 +219,4 @@ unsigned long clear_user(void __user *to, unsigned long n)
219 219
220 return res; 220 return res;
221} 221}
222EXPORT_SYMBOL(clear_user); 222EXPORT_SYMBOL(__clear_user);
diff --git a/arch/sparc/defconfig b/arch/sparc/defconfig
index 79e54894529d..38bd79fe6e75 100644
--- a/arch/sparc/defconfig
+++ b/arch/sparc/defconfig
@@ -1,10 +1,11 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.20-rc1 3# Linux kernel version: 2.6.22-rc1
4# Sun Dec 17 14:20:47 2006 4# Mon May 14 03:25:14 2007
5# 5#
6CONFIG_MMU=y 6CONFIG_MMU=y
7CONFIG_HIGHMEM=y 7CONFIG_HIGHMEM=y
8CONFIG_ZONE_DMA=y
8CONFIG_GENERIC_ISA_DMA=y 9CONFIG_GENERIC_ISA_DMA=y
9CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 10CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
10 11
@@ -23,14 +24,17 @@ CONFIG_LOCALVERSION_AUTO=y
23CONFIG_SWAP=y 24CONFIG_SWAP=y
24CONFIG_SYSVIPC=y 25CONFIG_SYSVIPC=y
25# CONFIG_IPC_NS is not set 26# CONFIG_IPC_NS is not set
27CONFIG_SYSVIPC_SYSCTL=y
26CONFIG_POSIX_MQUEUE=y 28CONFIG_POSIX_MQUEUE=y
27# CONFIG_BSD_PROCESS_ACCT is not set 29# CONFIG_BSD_PROCESS_ACCT is not set
28# CONFIG_TASKSTATS is not set 30# CONFIG_TASKSTATS is not set
29# CONFIG_UTS_NS is not set 31# CONFIG_UTS_NS is not set
30# CONFIG_AUDIT is not set 32# CONFIG_AUDIT is not set
31# CONFIG_IKCONFIG is not set 33# CONFIG_IKCONFIG is not set
34CONFIG_LOG_BUF_SHIFT=14
32CONFIG_SYSFS_DEPRECATED=y 35CONFIG_SYSFS_DEPRECATED=y
33# CONFIG_RELAY is not set 36# CONFIG_RELAY is not set
37CONFIG_BLK_DEV_INITRD=y
34CONFIG_INITRAMFS_SOURCE="" 38CONFIG_INITRAMFS_SOURCE=""
35# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 39# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
36CONFIG_SYSCTL=y 40CONFIG_SYSCTL=y
@@ -46,14 +50,19 @@ CONFIG_BUG=y
46CONFIG_ELF_CORE=y 50CONFIG_ELF_CORE=y
47CONFIG_BASE_FULL=y 51CONFIG_BASE_FULL=y
48CONFIG_FUTEX=y 52CONFIG_FUTEX=y
53CONFIG_ANON_INODES=y
49CONFIG_EPOLL=y 54CONFIG_EPOLL=y
55CONFIG_SIGNALFD=y
56CONFIG_TIMERFD=y
57CONFIG_EVENTFD=y
50CONFIG_SHMEM=y 58CONFIG_SHMEM=y
51CONFIG_SLAB=y
52CONFIG_VM_EVENT_COUNTERS=y 59CONFIG_VM_EVENT_COUNTERS=y
60CONFIG_SLAB=y
61# CONFIG_SLUB is not set
62# CONFIG_SLOB is not set
53CONFIG_RT_MUTEXES=y 63CONFIG_RT_MUTEXES=y
54# CONFIG_TINY_SHMEM is not set 64# CONFIG_TINY_SHMEM is not set
55CONFIG_BASE_SMALL=0 65CONFIG_BASE_SMALL=0
56# CONFIG_SLOB is not set
57 66
58# 67#
59# Loadable module support 68# Loadable module support
@@ -107,7 +116,7 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
107CONFIG_SUN_PM=y 116CONFIG_SUN_PM=y
108# CONFIG_SUN4 is not set 117# CONFIG_SUN4 is not set
109CONFIG_PCI=y 118CONFIG_PCI=y
110# CONFIG_PCI_MULTITHREAD_PROBE is not set 119# CONFIG_ARCH_SUPPORTS_MSI is not set
111# CONFIG_PCI_DEBUG is not set 120# CONFIG_PCI_DEBUG is not set
112CONFIG_SUN_OPENPROMFS=m 121CONFIG_SUN_OPENPROMFS=m
113# CONFIG_SPARC_LED is not set 122# CONFIG_SPARC_LED is not set
@@ -124,6 +133,7 @@ CONFIG_FLAT_NODE_MEM_MAP=y
124# CONFIG_SPARSEMEM_STATIC is not set 133# CONFIG_SPARSEMEM_STATIC is not set
125CONFIG_SPLIT_PTLOCK_CPUS=4 134CONFIG_SPLIT_PTLOCK_CPUS=4
126# CONFIG_RESOURCES_64BIT is not set 135# CONFIG_RESOURCES_64BIT is not set
136CONFIG_ZONE_DMA_FLAG=1
127 137
128# 138#
129# Networking 139# Networking
@@ -133,14 +143,15 @@ CONFIG_NET=y
133# 143#
134# Networking options 144# Networking options
135# 145#
136# CONFIG_NETDEBUG is not set
137CONFIG_PACKET=y 146CONFIG_PACKET=y
138# CONFIG_PACKET_MMAP is not set 147# CONFIG_PACKET_MMAP is not set
139CONFIG_UNIX=y 148CONFIG_UNIX=y
140CONFIG_XFRM=y 149CONFIG_XFRM=y
141CONFIG_XFRM_USER=m 150CONFIG_XFRM_USER=m
142# CONFIG_XFRM_SUB_POLICY is not set 151# CONFIG_XFRM_SUB_POLICY is not set
152# CONFIG_XFRM_MIGRATE is not set
143CONFIG_NET_KEY=m 153CONFIG_NET_KEY=m
154# CONFIG_NET_KEY_MIGRATE is not set
144CONFIG_INET=y 155CONFIG_INET=y
145# CONFIG_IP_MULTICAST is not set 156# CONFIG_IP_MULTICAST is not set
146# CONFIG_IP_ADVANCED_ROUTER is not set 157# CONFIG_IP_ADVANCED_ROUTER is not set
@@ -170,6 +181,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
170CONFIG_IPV6=m 181CONFIG_IPV6=m
171CONFIG_IPV6_PRIVACY=y 182CONFIG_IPV6_PRIVACY=y
172# CONFIG_IPV6_ROUTER_PREF is not set 183# CONFIG_IPV6_ROUTER_PREF is not set
184# CONFIG_IPV6_OPTIMISTIC_DAD is not set
173CONFIG_INET6_AH=m 185CONFIG_INET6_AH=m
174CONFIG_INET6_ESP=m 186CONFIG_INET6_ESP=m
175CONFIG_INET6_IPCOMP=m 187CONFIG_INET6_IPCOMP=m
@@ -229,7 +241,18 @@ CONFIG_NET_PKTGEN=m
229# CONFIG_HAMRADIO is not set 241# CONFIG_HAMRADIO is not set
230# CONFIG_IRDA is not set 242# CONFIG_IRDA is not set
231# CONFIG_BT is not set 243# CONFIG_BT is not set
244CONFIG_AF_RXRPC=m
245# CONFIG_AF_RXRPC_DEBUG is not set
246# CONFIG_RXKAD is not set
247
248#
249# Wireless
250#
251# CONFIG_CFG80211 is not set
252# CONFIG_WIRELESS_EXT is not set
253# CONFIG_MAC80211 is not set
232# CONFIG_IEEE80211 is not set 254# CONFIG_IEEE80211 is not set
255# CONFIG_RFKILL is not set
233 256
234# 257#
235# Device Drivers 258# Device Drivers
@@ -242,16 +265,13 @@ CONFIG_STANDALONE=y
242CONFIG_PREVENT_FIRMWARE_BUILD=y 265CONFIG_PREVENT_FIRMWARE_BUILD=y
243# CONFIG_FW_LOADER is not set 266# CONFIG_FW_LOADER is not set
244# CONFIG_DEBUG_DRIVER is not set 267# CONFIG_DEBUG_DRIVER is not set
268# CONFIG_DEBUG_DEVRES is not set
245# CONFIG_SYS_HYPERVISOR is not set 269# CONFIG_SYS_HYPERVISOR is not set
246 270
247# 271#
248# Connector - unified userspace <-> kernelspace linker 272# Connector - unified userspace <-> kernelspace linker
249# 273#
250# CONFIG_CONNECTOR is not set 274# CONFIG_CONNECTOR is not set
251
252#
253# Memory Technology Devices (MTD)
254#
255# CONFIG_MTD is not set 275# CONFIG_MTD is not set
256 276
257# 277#
@@ -262,6 +282,7 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
262# 282#
263# Plug and Play support 283# Plug and Play support
264# 284#
285# CONFIG_PNPACPI is not set
265 286
266# 287#
267# Block devices 288# Block devices
@@ -280,15 +301,16 @@ CONFIG_BLK_DEV_RAM=y
280CONFIG_BLK_DEV_RAM_COUNT=16 301CONFIG_BLK_DEV_RAM_COUNT=16
281CONFIG_BLK_DEV_RAM_SIZE=4096 302CONFIG_BLK_DEV_RAM_SIZE=4096
282CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 303CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
283CONFIG_BLK_DEV_INITRD=y
284# CONFIG_CDROM_PKTCDVD is not set 304# CONFIG_CDROM_PKTCDVD is not set
285# CONFIG_ATA_OVER_ETH is not set 305# CONFIG_ATA_OVER_ETH is not set
286 306
287# 307#
288# Misc devices 308# Misc devices
289# 309#
310# CONFIG_PHANTOM is not set
290# CONFIG_SGI_IOC4 is not set 311# CONFIG_SGI_IOC4 is not set
291# CONFIG_TIFM_CORE is not set 312# CONFIG_TIFM_CORE is not set
313# CONFIG_BLINK is not set
292 314
293# 315#
294# ATA/ATAPI/MFM/RLL support 316# ATA/ATAPI/MFM/RLL support
@@ -322,11 +344,12 @@ CONFIG_CHR_DEV_SG=m
322# CONFIG_SCSI_CONSTANTS is not set 344# CONFIG_SCSI_CONSTANTS is not set
323# CONFIG_SCSI_LOGGING is not set 345# CONFIG_SCSI_LOGGING is not set
324# CONFIG_SCSI_SCAN_ASYNC is not set 346# CONFIG_SCSI_SCAN_ASYNC is not set
347CONFIG_SCSI_WAIT_SCAN=m
325 348
326# 349#
327# SCSI Transports 350# SCSI Transports
328# 351#
329CONFIG_SCSI_SPI_ATTRS=m 352CONFIG_SCSI_SPI_ATTRS=y
330# CONFIG_SCSI_FC_ATTRS is not set 353# CONFIG_SCSI_FC_ATTRS is not set
331# CONFIG_SCSI_ISCSI_ATTRS is not set 354# CONFIG_SCSI_ISCSI_ATTRS is not set
332# CONFIG_SCSI_SAS_ATTRS is not set 355# CONFIG_SCSI_SAS_ATTRS is not set
@@ -366,12 +389,9 @@ CONFIG_SCSI_QLOGICPTI=m
366# CONFIG_SCSI_DC390T is not set 389# CONFIG_SCSI_DC390T is not set
367# CONFIG_SCSI_NSP32 is not set 390# CONFIG_SCSI_NSP32 is not set
368# CONFIG_SCSI_DEBUG is not set 391# CONFIG_SCSI_DEBUG is not set
392CONFIG_SCSI_ESP_CORE=y
369CONFIG_SCSI_SUNESP=y 393CONFIG_SCSI_SUNESP=y
370# CONFIG_SCSI_SRP is not set 394# CONFIG_SCSI_SRP is not set
371
372#
373# Serial ATA (prod) and Parallel ATA (experimental) drivers
374#
375# CONFIG_ATA is not set 395# CONFIG_ATA is not set
376 396
377# 397#
@@ -390,6 +410,7 @@ CONFIG_SCSI_SUNESP=y
390# 410#
391# IEEE 1394 (FireWire) support 411# IEEE 1394 (FireWire) support
392# 412#
413# CONFIG_FIREWIRE is not set
393# CONFIG_IEEE1394 is not set 414# CONFIG_IEEE1394 is not set
394 415
395# 416#
@@ -410,10 +431,6 @@ CONFIG_TUN=m
410# ARCnet devices 431# ARCnet devices
411# 432#
412# CONFIG_ARCNET is not set 433# CONFIG_ARCNET is not set
413
414#
415# PHY device support
416#
417# CONFIG_PHYLIB is not set 434# CONFIG_PHYLIB is not set
418 435
419# 436#
@@ -435,10 +452,7 @@ CONFIG_SUNQE=m
435# CONFIG_NET_TULIP is not set 452# CONFIG_NET_TULIP is not set
436# CONFIG_HP100 is not set 453# CONFIG_HP100 is not set
437# CONFIG_NET_PCI is not set 454# CONFIG_NET_PCI is not set
438 455CONFIG_NETDEV_1000=y
439#
440# Ethernet (1000 Mbit)
441#
442# CONFIG_ACENIC is not set 456# CONFIG_ACENIC is not set
443# CONFIG_DL2K is not set 457# CONFIG_DL2K is not set
444# CONFIG_E1000 is not set 458# CONFIG_E1000 is not set
@@ -454,15 +468,16 @@ CONFIG_SUNQE=m
454# CONFIG_TIGON3 is not set 468# CONFIG_TIGON3 is not set
455# CONFIG_BNX2 is not set 469# CONFIG_BNX2 is not set
456# CONFIG_QLA3XXX is not set 470# CONFIG_QLA3XXX is not set
457 471# CONFIG_ATL1 is not set
458# 472CONFIG_NETDEV_10000=y
459# Ethernet (10000 Mbit)
460#
461# CONFIG_CHELSIO_T1 is not set 473# CONFIG_CHELSIO_T1 is not set
474# CONFIG_CHELSIO_T3 is not set
462# CONFIG_IXGB is not set 475# CONFIG_IXGB is not set
463# CONFIG_S2IO is not set 476# CONFIG_S2IO is not set
464# CONFIG_MYRI10GE is not set 477# CONFIG_MYRI10GE is not set
465# CONFIG_NETXEN_NIC is not set 478# CONFIG_NETXEN_NIC is not set
479# CONFIG_MLX4_CORE is not set
480CONFIG_MLX4_DEBUG=y
466 481
467# 482#
468# Token Ring devices 483# Token Ring devices
@@ -470,13 +485,10 @@ CONFIG_SUNQE=m
470# CONFIG_TR is not set 485# CONFIG_TR is not set
471 486
472# 487#
473# Wireless LAN (non-hamradio) 488# Wireless LAN
474#
475# CONFIG_NET_RADIO is not set
476
477#
478# Wan interfaces
479# 489#
490# CONFIG_WLAN_PRE80211 is not set
491# CONFIG_WLAN_80211 is not set
480# CONFIG_WAN is not set 492# CONFIG_WAN is not set
481# CONFIG_FDDI is not set 493# CONFIG_FDDI is not set
482# CONFIG_HIPPI is not set 494# CONFIG_HIPPI is not set
@@ -528,9 +540,17 @@ CONFIG_KEYBOARD_SUNKBD=m
528# CONFIG_KEYBOARD_STOWAWAY is not set 540# CONFIG_KEYBOARD_STOWAWAY is not set
529CONFIG_INPUT_MOUSE=y 541CONFIG_INPUT_MOUSE=y
530CONFIG_MOUSE_PS2=m 542CONFIG_MOUSE_PS2=m
543CONFIG_MOUSE_PS2_ALPS=y
544CONFIG_MOUSE_PS2_LOGIPS2PP=y
545CONFIG_MOUSE_PS2_SYNAPTICS=y
546CONFIG_MOUSE_PS2_LIFEBOOK=y
547CONFIG_MOUSE_PS2_TRACKPOINT=y
548# CONFIG_MOUSE_PS2_TOUCHKIT is not set
531CONFIG_MOUSE_SERIAL=m 549CONFIG_MOUSE_SERIAL=m
550# CONFIG_MOUSE_APPLETOUCH is not set
532# CONFIG_MOUSE_VSXXXAA is not set 551# CONFIG_MOUSE_VSXXXAA is not set
533# CONFIG_INPUT_JOYSTICK is not set 552# CONFIG_INPUT_JOYSTICK is not set
553# CONFIG_INPUT_TABLET is not set
534# CONFIG_INPUT_TOUCHSCREEN is not set 554# CONFIG_INPUT_TOUCHSCREEN is not set
535# CONFIG_INPUT_MISC is not set 555# CONFIG_INPUT_MISC is not set
536 556
@@ -578,14 +598,9 @@ CONFIG_LEGACY_PTY_COUNT=256
578# IPMI 598# IPMI
579# 599#
580# CONFIG_IPMI_HANDLER is not set 600# CONFIG_IPMI_HANDLER is not set
581
582#
583# Watchdog Cards
584#
585# CONFIG_WATCHDOG is not set 601# CONFIG_WATCHDOG is not set
586CONFIG_HW_RANDOM=m 602CONFIG_HW_RANDOM=m
587CONFIG_RTC=m 603CONFIG_RTC=m
588# CONFIG_DTLK is not set
589# CONFIG_R3964 is not set 604# CONFIG_R3964 is not set
590# CONFIG_APPLICOM is not set 605# CONFIG_APPLICOM is not set
591# CONFIG_DRM is not set 606# CONFIG_DRM is not set
@@ -595,10 +610,7 @@ CONFIG_RTC=m
595# TPM devices 610# TPM devices
596# 611#
597# CONFIG_TCG_TPM is not set 612# CONFIG_TCG_TPM is not set
598 613CONFIG_DEVPORT=y
599#
600# I2C support
601#
602# CONFIG_I2C is not set 614# CONFIG_I2C is not set
603 615
604# 616#
@@ -611,32 +623,39 @@ CONFIG_RTC=m
611# Dallas's 1-wire bus 623# Dallas's 1-wire bus
612# 624#
613# CONFIG_W1 is not set 625# CONFIG_W1 is not set
614
615#
616# Hardware Monitoring support
617#
618CONFIG_HWMON=y 626CONFIG_HWMON=y
619# CONFIG_HWMON_VID is not set 627# CONFIG_HWMON_VID is not set
620# CONFIG_SENSORS_ABITUGURU is not set 628# CONFIG_SENSORS_ABITUGURU is not set
621# CONFIG_SENSORS_F71805F is not set 629# CONFIG_SENSORS_F71805F is not set
622# CONFIG_SENSORS_PC87427 is not set 630# CONFIG_SENSORS_PC87427 is not set
631# CONFIG_SENSORS_SMSC47M1 is not set
632# CONFIG_SENSORS_SMSC47B397 is not set
623# CONFIG_SENSORS_VT1211 is not set 633# CONFIG_SENSORS_VT1211 is not set
634# CONFIG_SENSORS_W83627HF is not set
624# CONFIG_HWMON_DEBUG_CHIP is not set 635# CONFIG_HWMON_DEBUG_CHIP is not set
625 636
626# 637#
638# Multifunction device drivers
639#
640# CONFIG_MFD_SM501 is not set
641
642#
627# Multimedia devices 643# Multimedia devices
628# 644#
629# CONFIG_VIDEO_DEV is not set 645# CONFIG_VIDEO_DEV is not set
646# CONFIG_DVB_CORE is not set
647# CONFIG_DAB is not set
630 648
631# 649#
632# Digital Video Broadcasting Devices 650# Graphics support
633# 651#
634# CONFIG_DVB is not set 652# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
635 653
636# 654#
637# Graphics support 655# Display device support
638# 656#
639CONFIG_FIRMWARE_EDID=y 657# CONFIG_DISPLAY_SUPPORT is not set
658# CONFIG_VGASTATE is not set
640# CONFIG_FB is not set 659# CONFIG_FB is not set
641 660
642# 661#
@@ -644,7 +663,6 @@ CONFIG_FIRMWARE_EDID=y
644# 663#
645# CONFIG_PROM_CONSOLE is not set 664# CONFIG_PROM_CONSOLE is not set
646CONFIG_DUMMY_CONSOLE=y 665CONFIG_DUMMY_CONSOLE=y
647# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
648 666
649# 667#
650# Sound 668# Sound
@@ -655,6 +673,7 @@ CONFIG_DUMMY_CONSOLE=y
655# HID Devices 673# HID Devices
656# 674#
657CONFIG_HID=y 675CONFIG_HID=y
676# CONFIG_HID_DEBUG is not set
658 677
659# 678#
660# USB support 679# USB support
@@ -672,10 +691,6 @@ CONFIG_USB_ARCH_HAS_EHCI=y
672# USB Gadget Support 691# USB Gadget Support
673# 692#
674# CONFIG_USB_GADGET is not set 693# CONFIG_USB_GADGET is not set
675
676#
677# MMC/SD Card support
678#
679# CONFIG_MMC is not set 694# CONFIG_MMC is not set
680 695
681# 696#
@@ -719,10 +734,6 @@ CONFIG_USB_ARCH_HAS_EHCI=y
719# 734#
720 735
721# 736#
722# Virtualization
723#
724
725#
726# Misc Linux/SPARC drivers 737# Misc Linux/SPARC drivers
727# 738#
728CONFIG_SUN_OPENPROMIO=m 739CONFIG_SUN_OPENPROMIO=m
@@ -801,6 +812,7 @@ CONFIG_RAMFS=y
801# 812#
802# CONFIG_ADFS_FS is not set 813# CONFIG_ADFS_FS is not set
803# CONFIG_AFFS_FS is not set 814# CONFIG_AFFS_FS is not set
815# CONFIG_ECRYPT_FS is not set
804# CONFIG_HFS_FS is not set 816# CONFIG_HFS_FS is not set
805# CONFIG_HFSPLUS_FS is not set 817# CONFIG_HFSPLUS_FS is not set
806CONFIG_BEFS_FS=m 818CONFIG_BEFS_FS=m
@@ -827,6 +839,7 @@ CONFIG_LOCKD=y
827CONFIG_NFS_COMMON=y 839CONFIG_NFS_COMMON=y
828CONFIG_SUNRPC=y 840CONFIG_SUNRPC=y
829CONFIG_SUNRPC_GSS=m 841CONFIG_SUNRPC_GSS=m
842# CONFIG_SUNRPC_BIND34 is not set
830CONFIG_RPCSEC_GSS_KRB5=m 843CONFIG_RPCSEC_GSS_KRB5=m
831# CONFIG_RPCSEC_GSS_SPKM3 is not set 844# CONFIG_RPCSEC_GSS_SPKM3 is not set
832# CONFIG_SMB_FS is not set 845# CONFIG_SMB_FS is not set
@@ -839,7 +852,7 @@ CONFIG_CIFS=m
839# CONFIG_NCP_FS is not set 852# CONFIG_NCP_FS is not set
840# CONFIG_CODA_FS is not set 853# CONFIG_CODA_FS is not set
841CONFIG_AFS_FS=m 854CONFIG_AFS_FS=m
842CONFIG_RXRPC=m 855# CONFIG_AFS_DEBUG is not set
843# CONFIG_9P_FS is not set 856# CONFIG_9P_FS is not set
844 857
845# 858#
@@ -913,15 +926,14 @@ CONFIG_MAGIC_SYSRQ=y
913# CONFIG_DEBUG_FS is not set 926# CONFIG_DEBUG_FS is not set
914# CONFIG_HEADERS_CHECK is not set 927# CONFIG_HEADERS_CHECK is not set
915CONFIG_DEBUG_KERNEL=y 928CONFIG_DEBUG_KERNEL=y
916CONFIG_LOG_BUF_SHIFT=14
917CONFIG_DETECT_SOFTLOCKUP=y 929CONFIG_DETECT_SOFTLOCKUP=y
918# CONFIG_SCHEDSTATS is not set 930# CONFIG_SCHEDSTATS is not set
931# CONFIG_TIMER_STATS is not set
919# CONFIG_DEBUG_SLAB is not set 932# CONFIG_DEBUG_SLAB is not set
920# CONFIG_DEBUG_RT_MUTEXES is not set 933# CONFIG_DEBUG_RT_MUTEXES is not set
921# CONFIG_RT_MUTEX_TESTER is not set 934# CONFIG_RT_MUTEX_TESTER is not set
922# CONFIG_DEBUG_SPINLOCK is not set 935# CONFIG_DEBUG_SPINLOCK is not set
923# CONFIG_DEBUG_MUTEXES is not set 936# CONFIG_DEBUG_MUTEXES is not set
924# CONFIG_DEBUG_RWSEMS is not set
925# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 937# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
926# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 938# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
927# CONFIG_DEBUG_KOBJECT is not set 939# CONFIG_DEBUG_KOBJECT is not set
@@ -932,12 +944,14 @@ CONFIG_DEBUG_BUGVERBOSE=y
932# CONFIG_DEBUG_LIST is not set 944# CONFIG_DEBUG_LIST is not set
933CONFIG_FORCED_INLINING=y 945CONFIG_FORCED_INLINING=y
934# CONFIG_RCU_TORTURE_TEST is not set 946# CONFIG_RCU_TORTURE_TEST is not set
947# CONFIG_FAULT_INJECTION is not set
935# CONFIG_DEBUG_STACK_USAGE is not set 948# CONFIG_DEBUG_STACK_USAGE is not set
936 949
937# 950#
938# Security options 951# Security options
939# 952#
940# CONFIG_KEYS is not set 953CONFIG_KEYS=y
954# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
941# CONFIG_SECURITY is not set 955# CONFIG_SECURITY is not set
942 956
943# 957#
@@ -961,8 +975,11 @@ CONFIG_CRYPTO_SHA512=m
961# CONFIG_CRYPTO_GF128MUL is not set 975# CONFIG_CRYPTO_GF128MUL is not set
962CONFIG_CRYPTO_ECB=m 976CONFIG_CRYPTO_ECB=m
963CONFIG_CRYPTO_CBC=y 977CONFIG_CRYPTO_CBC=y
978CONFIG_CRYPTO_PCBC=m
964# CONFIG_CRYPTO_LRW is not set 979# CONFIG_CRYPTO_LRW is not set
980# CONFIG_CRYPTO_CRYPTD is not set
965CONFIG_CRYPTO_DES=y 981CONFIG_CRYPTO_DES=y
982# CONFIG_CRYPTO_FCRYPT is not set
966CONFIG_CRYPTO_BLOWFISH=m 983CONFIG_CRYPTO_BLOWFISH=m
967CONFIG_CRYPTO_TWOFISH=m 984CONFIG_CRYPTO_TWOFISH=m
968CONFIG_CRYPTO_TWOFISH_COMMON=m 985CONFIG_CRYPTO_TWOFISH_COMMON=m
@@ -977,6 +994,7 @@ CONFIG_CRYPTO_ARC4=m
977CONFIG_CRYPTO_DEFLATE=y 994CONFIG_CRYPTO_DEFLATE=y
978CONFIG_CRYPTO_MICHAEL_MIC=m 995CONFIG_CRYPTO_MICHAEL_MIC=m
979CONFIG_CRYPTO_CRC32C=m 996CONFIG_CRYPTO_CRC32C=m
997# CONFIG_CRYPTO_CAMELLIA is not set
980# CONFIG_CRYPTO_TEST is not set 998# CONFIG_CRYPTO_TEST is not set
981 999
982# 1000#
@@ -989,9 +1007,12 @@ CONFIG_CRYPTO_CRC32C=m
989CONFIG_BITREVERSE=y 1007CONFIG_BITREVERSE=y
990# CONFIG_CRC_CCITT is not set 1008# CONFIG_CRC_CCITT is not set
991# CONFIG_CRC16 is not set 1009# CONFIG_CRC16 is not set
1010# CONFIG_CRC_ITU_T is not set
992CONFIG_CRC32=y 1011CONFIG_CRC32=y
993CONFIG_LIBCRC32C=m 1012CONFIG_LIBCRC32C=m
994CONFIG_ZLIB_INFLATE=y 1013CONFIG_ZLIB_INFLATE=y
995CONFIG_ZLIB_DEFLATE=y 1014CONFIG_ZLIB_DEFLATE=y
996CONFIG_PLIST=y 1015CONFIG_PLIST=y
997CONFIG_IOMAP_COPY=y 1016CONFIG_HAS_IOMEM=y
1017CONFIG_HAS_IOPORT=y
1018CONFIG_HAS_DMA=y
diff --git a/arch/sparc/kernel/head.S b/arch/sparc/kernel/head.S
index 97da13c52563..9a219e8b5ddb 100644
--- a/arch/sparc/kernel/head.S
+++ b/arch/sparc/kernel/head.S
@@ -19,7 +19,7 @@
19#include <asm/ptrace.h> 19#include <asm/ptrace.h>
20#include <asm/psr.h> 20#include <asm/psr.h>
21#include <asm/page.h> 21#include <asm/page.h>
22#include <linux/kdebug.h> 22#include <asm/kdebug.h>
23#include <asm/winmacro.h> 23#include <asm/winmacro.h>
24#include <asm/thread_info.h> /* TI_UWINMASK */ 24#include <asm/thread_info.h> /* TI_UWINMASK */
25#include <asm/errno.h> 25#include <asm/errno.h>
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 585ef4fb7591..65840a62bb9c 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.21 3# Linux kernel version: 2.6.22-rc1
4# Fri May 11 14:31:45 2007 4# Mon May 14 04:17:48 2007
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -508,10 +508,6 @@ CONFIG_ISCSI_TCP=m
508# CONFIG_SCSI_ESP_CORE is not set 508# CONFIG_SCSI_ESP_CORE is not set
509# CONFIG_SCSI_SUNESP is not set 509# CONFIG_SCSI_SUNESP is not set
510# CONFIG_SCSI_SRP is not set 510# CONFIG_SCSI_SRP is not set
511
512#
513# Serial ATA (prod) and Parallel ATA (experimental) drivers
514#
515# CONFIG_ATA is not set 511# CONFIG_ATA is not set
516 512
517# 513#
@@ -568,10 +564,6 @@ CONFIG_DUMMY=m
568# ARCnet devices 564# ARCnet devices
569# 565#
570# CONFIG_ARCNET is not set 566# CONFIG_ARCNET is not set
571
572#
573# PHY device support
574#
575# CONFIG_PHYLIB is not set 567# CONFIG_PHYLIB is not set
576 568
577# 569#
@@ -611,10 +603,7 @@ CONFIG_NET_PCI=y
611# CONFIG_SUNDANCE is not set 603# CONFIG_SUNDANCE is not set
612# CONFIG_VIA_RHINE is not set 604# CONFIG_VIA_RHINE is not set
613# CONFIG_SC92031 is not set 605# CONFIG_SC92031 is not set
614 606CONFIG_NETDEV_1000=y
615#
616# Ethernet (1000 Mbit)
617#
618# CONFIG_ACENIC is not set 607# CONFIG_ACENIC is not set
619# CONFIG_DL2K is not set 608# CONFIG_DL2K is not set
620CONFIG_E1000=m 609CONFIG_E1000=m
@@ -634,10 +623,7 @@ CONFIG_TIGON3=m
634CONFIG_BNX2=m 623CONFIG_BNX2=m
635# CONFIG_QLA3XXX is not set 624# CONFIG_QLA3XXX is not set
636# CONFIG_ATL1 is not set 625# CONFIG_ATL1 is not set
637 626CONFIG_NETDEV_10000=y
638#
639# Ethernet (10000 Mbit)
640#
641# CONFIG_CHELSIO_T1 is not set 627# CONFIG_CHELSIO_T1 is not set
642# CONFIG_CHELSIO_T3 is not set 628# CONFIG_CHELSIO_T3 is not set
643# CONFIG_IXGB is not set 629# CONFIG_IXGB is not set
@@ -667,10 +653,6 @@ CONFIG_MLX4_DEBUG=y
667# CONFIG_USB_RTL8150 is not set 653# CONFIG_USB_RTL8150 is not set
668# CONFIG_USB_USBNET_MII is not set 654# CONFIG_USB_USBNET_MII is not set
669# CONFIG_USB_USBNET is not set 655# CONFIG_USB_USBNET is not set
670
671#
672# Wan interfaces
673#
674# CONFIG_WAN is not set 656# CONFIG_WAN is not set
675# CONFIG_FDDI is not set 657# CONFIG_FDDI is not set
676# CONFIG_HIPPI is not set 658# CONFIG_HIPPI is not set
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 7455f5d05519..16cc46a71872 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -537,6 +537,13 @@ static int __init build_one_resource(struct device_node *parent,
537 return 0; 537 return 0;
538 } 538 }
539 539
540 /* When we miss an I/O space match on PCI, just pass it up
541 * to the next PCI bridge and/or controller.
542 */
543 if (!strcmp(bus->name, "pci") &&
544 (addr[0] & 0x03000000) == 0x01000000)
545 return 0;
546
540 return 1; 547 return 1;
541} 548}
542 549
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 8087d67a0cf8..24fdf1d0adc5 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -561,6 +561,9 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
561 unsigned long flags, status; 561 unsigned long flags, status;
562 int cnt, retries, this_cpu, prev_sent, i; 562 int cnt, retries, this_cpu, prev_sent, i;
563 563
564 if (cpus_empty(mask))
565 return;
566
564 /* We have to do this whole thing with interrupts fully disabled. 567 /* We have to do this whole thing with interrupts fully disabled.
565 * Otherwise if we send an xcall from interrupt context it will 568 * Otherwise if we send an xcall from interrupt context it will
566 * corrupt both our mondo block and cpu list state. 569 * corrupt both our mondo block and cpu list state.
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index d28f01379b9b..cb29fb96948d 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -776,9 +776,6 @@ asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
776 */ 776 */
777 if (nmi_watchdog_tick(regs,reason)) 777 if (nmi_watchdog_tick(regs,reason))
778 return; 778 return;
779 if (notify_die(DIE_NMI_POST, "nmi_post", regs, reason, 2, 0)
780 == NOTIFY_STOP)
781 return;
782 if (!do_nmi_callback(regs,cpu)) 779 if (!do_nmi_callback(regs,cpu))
783 unknown_nmi_error(reason, regs); 780 unknown_nmi_error(reason, regs);
784 781
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 74a567afb830..6b5173ac8131 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3802,7 +3802,6 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node)
3802 3802
3803 return ret; 3803 return ret;
3804} 3804}
3805EXPORT_SYMBOL(current_io_context);
3806 3805
3807/* 3806/*
3808 * If the current task has no IO context then create one and initialise it. 3807 * If the current task has no IO context then create one and initialise it.
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 147c12047cf3..41f78e2c158f 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -50,18 +50,10 @@
50#include <linux/poll.h> 50#include <linux/poll.h>
51#include <linux/string.h> 51#include <linux/string.h>
52#include <linux/ctype.h> 52#include <linux/ctype.h>
53#include <linux/delay.h>
54#include <asm/atomic.h> 53#include <asm/atomic.h>
55 54
56#ifdef CONFIG_X86 55#ifdef CONFIG_X86_LOCAL_APIC
57/* This is ugly, but I've determined that x86 is the only architecture 56#include <asm/apic.h>
58 that can reasonably support the IPMI NMI watchdog timeout at this
59 time. If another architecture adds this capability somehow, it
60 will have to be a somewhat different mechanism and I have no idea
61 how it will work. So in the unlikely event that another
62 architecture supports this, we can figure out a good generic
63 mechanism for it at that time. */
64#define HAVE_DIE_NMI_POST
65#endif 57#endif
66 58
67#define PFX "IPMI Watchdog: " 59#define PFX "IPMI Watchdog: "
@@ -327,11 +319,6 @@ static unsigned char ipmi_version_minor;
327/* If a pretimeout occurs, this is used to allow only one panic to happen. */ 319/* If a pretimeout occurs, this is used to allow only one panic to happen. */
328static atomic_t preop_panic_excl = ATOMIC_INIT(-1); 320static atomic_t preop_panic_excl = ATOMIC_INIT(-1);
329 321
330#ifdef HAVE_DIE_NMI_POST
331static int testing_nmi;
332static int nmi_handler_registered;
333#endif
334
335static int ipmi_heartbeat(void); 322static int ipmi_heartbeat(void);
336static void panic_halt_ipmi_heartbeat(void); 323static void panic_halt_ipmi_heartbeat(void);
337 324
@@ -373,10 +360,6 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
373 int hbnow = 0; 360 int hbnow = 0;
374 361
375 362
376 /* These can be cleared as we are setting the timeout. */
377 ipmi_start_timer_on_heartbeat = 0;
378 pretimeout_since_last_heartbeat = 0;
379
380 data[0] = 0; 363 data[0] = 0;
381 WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); 364 WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
382 365
@@ -451,12 +434,13 @@ static int ipmi_set_timeout(int do_heartbeat)
451 434
452 wait_for_completion(&set_timeout_wait); 435 wait_for_completion(&set_timeout_wait);
453 436
454 mutex_unlock(&set_timeout_lock);
455
456 if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB) 437 if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
457 || ((send_heartbeat_now) 438 || ((send_heartbeat_now)
458 && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY))) 439 && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
440 {
459 rv = ipmi_heartbeat(); 441 rv = ipmi_heartbeat();
442 }
443 mutex_unlock(&set_timeout_lock);
460 444
461out: 445out:
462 return rv; 446 return rv;
@@ -536,10 +520,12 @@ static int ipmi_heartbeat(void)
536 int rv; 520 int rv;
537 struct ipmi_system_interface_addr addr; 521 struct ipmi_system_interface_addr addr;
538 522
539 if (ipmi_ignore_heartbeat) 523 if (ipmi_ignore_heartbeat) {
540 return 0; 524 return 0;
525 }
541 526
542 if (ipmi_start_timer_on_heartbeat) { 527 if (ipmi_start_timer_on_heartbeat) {
528 ipmi_start_timer_on_heartbeat = 0;
543 ipmi_watchdog_state = action_val; 529 ipmi_watchdog_state = action_val;
544 return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); 530 return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
545 } else if (pretimeout_since_last_heartbeat) { 531 } else if (pretimeout_since_last_heartbeat) {
@@ -547,6 +533,7 @@ static int ipmi_heartbeat(void)
547 We don't want to set the action, though, we want to 533 We don't want to set the action, though, we want to
548 leave that alone (thus it can't be combined with the 534 leave that alone (thus it can't be combined with the
549 above operation. */ 535 above operation. */
536 pretimeout_since_last_heartbeat = 0;
550 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); 537 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
551 } 538 }
552 539
@@ -934,45 +921,6 @@ static void ipmi_register_watchdog(int ipmi_intf)
934 printk(KERN_CRIT PFX "Unable to register misc device\n"); 921 printk(KERN_CRIT PFX "Unable to register misc device\n");
935 } 922 }
936 923
937#ifdef HAVE_DIE_NMI_POST
938 if (nmi_handler_registered) {
939 int old_pretimeout = pretimeout;
940 int old_timeout = timeout;
941 int old_preop_val = preop_val;
942
943 /* Set the pretimeout to go off in a second and give
944 ourselves plenty of time to stop the timer. */
945 ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
946 preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
947 pretimeout = 99;
948 timeout = 100;
949
950 testing_nmi = 1;
951
952 rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
953 if (rv) {
954 printk(KERN_WARNING PFX "Error starting timer to"
955 " test NMI: 0x%x. The NMI pretimeout will"
956 " likely not work\n", rv);
957 rv = 0;
958 goto out_restore;
959 }
960
961 msleep(1500);
962
963 if (testing_nmi != 2) {
964 printk(KERN_WARNING PFX "IPMI NMI didn't seem to"
965 " occur. The NMI pretimeout will"
966 " likely not work\n");
967 }
968 out_restore:
969 testing_nmi = 0;
970 preop_val = old_preop_val;
971 pretimeout = old_pretimeout;
972 timeout = old_timeout;
973 }
974#endif
975
976 out: 924 out:
977 up_write(&register_sem); 925 up_write(&register_sem);
978 926
@@ -982,10 +930,6 @@ static void ipmi_register_watchdog(int ipmi_intf)
982 ipmi_watchdog_state = action_val; 930 ipmi_watchdog_state = action_val;
983 ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); 931 ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
984 printk(KERN_INFO PFX "Starting now!\n"); 932 printk(KERN_INFO PFX "Starting now!\n");
985 } else {
986 /* Stop the timer now. */
987 ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
988 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
989 } 933 }
990} 934}
991 935
@@ -1022,28 +966,17 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
1022 up_write(&register_sem); 966 up_write(&register_sem);
1023} 967}
1024 968
1025#ifdef HAVE_DIE_NMI_POST 969#ifdef HAVE_NMI_HANDLER
1026static int 970static int
1027ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) 971ipmi_nmi(void *dev_id, int cpu, int handled)
1028{ 972{
1029 if (val != DIE_NMI_POST)
1030 return NOTIFY_OK;
1031
1032 if (testing_nmi) {
1033 testing_nmi = 2;
1034 return NOTIFY_STOP;
1035 }
1036
1037 /* If we are not expecting a timeout, ignore it. */ 973 /* If we are not expecting a timeout, ignore it. */
1038 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) 974 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
1039 return NOTIFY_OK; 975 return NOTIFY_DONE;
1040
1041 if (preaction_val != WDOG_PRETIMEOUT_NMI)
1042 return NOTIFY_OK;
1043 976
1044 /* If no one else handled the NMI, we assume it was the IPMI 977 /* If no one else handled the NMI, we assume it was the IPMI
1045 watchdog. */ 978 watchdog. */
1046 if (preop_val == WDOG_PREOP_PANIC) { 979 if ((!handled) && (preop_val == WDOG_PREOP_PANIC)) {
1047 /* On some machines, the heartbeat will give 980 /* On some machines, the heartbeat will give
1048 an error and not work unless we re-enable 981 an error and not work unless we re-enable
1049 the timer. So do so. */ 982 the timer. So do so. */
@@ -1052,12 +985,18 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
1052 panic(PFX "pre-timeout"); 985 panic(PFX "pre-timeout");
1053 } 986 }
1054 987
1055 return NOTIFY_STOP; 988 return NOTIFY_DONE;
1056} 989}
1057 990
1058static struct notifier_block ipmi_nmi_handler = { 991static struct nmi_handler ipmi_nmi_handler =
1059 .notifier_call = ipmi_nmi 992{
993 .link = LIST_HEAD_INIT(ipmi_nmi_handler.link),
994 .dev_name = "ipmi_watchdog",
995 .dev_id = NULL,
996 .handler = ipmi_nmi,
997 .priority = 0, /* Call us last. */
1060}; 998};
999int nmi_handler_registered;
1061#endif 1000#endif
1062 1001
1063static int wdog_reboot_handler(struct notifier_block *this, 1002static int wdog_reboot_handler(struct notifier_block *this,
@@ -1174,7 +1113,7 @@ static int preaction_op(const char *inval, char *outval)
1174 preaction_val = WDOG_PRETIMEOUT_NONE; 1113 preaction_val = WDOG_PRETIMEOUT_NONE;
1175 else if (strcmp(inval, "pre_smi") == 0) 1114 else if (strcmp(inval, "pre_smi") == 0)
1176 preaction_val = WDOG_PRETIMEOUT_SMI; 1115 preaction_val = WDOG_PRETIMEOUT_SMI;
1177#ifdef HAVE_DIE_NMI_POST 1116#ifdef HAVE_NMI_HANDLER
1178 else if (strcmp(inval, "pre_nmi") == 0) 1117 else if (strcmp(inval, "pre_nmi") == 0)
1179 preaction_val = WDOG_PRETIMEOUT_NMI; 1118 preaction_val = WDOG_PRETIMEOUT_NMI;
1180#endif 1119#endif
@@ -1208,7 +1147,7 @@ static int preop_op(const char *inval, char *outval)
1208 1147
1209static void check_parms(void) 1148static void check_parms(void)
1210{ 1149{
1211#ifdef HAVE_DIE_NMI_POST 1150#ifdef HAVE_NMI_HANDLER
1212 int do_nmi = 0; 1151 int do_nmi = 0;
1213 int rv; 1152 int rv;
1214 1153
@@ -1221,9 +1160,20 @@ static void check_parms(void)
1221 preop_op("preop_none", NULL); 1160 preop_op("preop_none", NULL);
1222 do_nmi = 0; 1161 do_nmi = 0;
1223 } 1162 }
1163#ifdef CONFIG_X86_LOCAL_APIC
1164 if (nmi_watchdog == NMI_IO_APIC) {
1165 printk(KERN_WARNING PFX "nmi_watchdog is set to IO APIC"
1166 " mode (value is %d), that is incompatible"
1167 " with using NMI in the IPMI watchdog."
1168 " Disabling IPMI nmi pretimeout.\n",
1169 nmi_watchdog);
1170 preaction_val = WDOG_PRETIMEOUT_NONE;
1171 do_nmi = 0;
1172 }
1173#endif
1224 } 1174 }
1225 if (do_nmi && !nmi_handler_registered) { 1175 if (do_nmi && !nmi_handler_registered) {
1226 rv = register_die_notifier(&ipmi_nmi_handler); 1176 rv = request_nmi(&ipmi_nmi_handler);
1227 if (rv) { 1177 if (rv) {
1228 printk(KERN_WARNING PFX 1178 printk(KERN_WARNING PFX
1229 "Can't register nmi handler\n"); 1179 "Can't register nmi handler\n");
@@ -1231,7 +1181,7 @@ static void check_parms(void)
1231 } else 1181 } else
1232 nmi_handler_registered = 1; 1182 nmi_handler_registered = 1;
1233 } else if (!do_nmi && nmi_handler_registered) { 1183 } else if (!do_nmi && nmi_handler_registered) {
1234 unregister_die_notifier(&ipmi_nmi_handler); 1184 release_nmi(&ipmi_nmi_handler);
1235 nmi_handler_registered = 0; 1185 nmi_handler_registered = 0;
1236 } 1186 }
1237#endif 1187#endif
@@ -1267,9 +1217,9 @@ static int __init ipmi_wdog_init(void)
1267 1217
1268 rv = ipmi_smi_watcher_register(&smi_watcher); 1218 rv = ipmi_smi_watcher_register(&smi_watcher);
1269 if (rv) { 1219 if (rv) {
1270#ifdef HAVE_DIE_NMI_POST 1220#ifdef HAVE_NMI_HANDLER
1271 if (nmi_handler_registered) 1221 if (preaction_val == WDOG_PRETIMEOUT_NMI)
1272 unregister_die_notifier(&ipmi_nmi_handler); 1222 release_nmi(&ipmi_nmi_handler);
1273#endif 1223#endif
1274 atomic_notifier_chain_unregister(&panic_notifier_list, 1224 atomic_notifier_chain_unregister(&panic_notifier_list,
1275 &wdog_panic_notifier); 1225 &wdog_panic_notifier);
@@ -1288,9 +1238,9 @@ static void __exit ipmi_wdog_exit(void)
1288 ipmi_smi_watcher_unregister(&smi_watcher); 1238 ipmi_smi_watcher_unregister(&smi_watcher);
1289 ipmi_unregister_watchdog(watchdog_ifnum); 1239 ipmi_unregister_watchdog(watchdog_ifnum);
1290 1240
1291#ifdef HAVE_DIE_NMI_POST 1241#ifdef HAVE_NMI_HANDLER
1292 if (nmi_handler_registered) 1242 if (nmi_handler_registered)
1293 unregister_die_notifier(&ipmi_nmi_handler); 1243 release_nmi(&ipmi_nmi_handler);
1294#endif 1244#endif
1295 1245
1296 atomic_notifier_chain_unregister(&panic_notifier_list, 1246 atomic_notifier_chain_unregister(&panic_notifier_list,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index fde92ce45153..2eb52b7a71da 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -346,12 +346,33 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
346 complete(&id_priv->comp); 346 complete(&id_priv->comp);
347} 347}
348 348
349static void cma_release_remove(struct rdma_id_private *id_priv) 349static int cma_disable_remove(struct rdma_id_private *id_priv,
350 enum cma_state state)
351{
352 unsigned long flags;
353 int ret;
354
355 spin_lock_irqsave(&id_priv->lock, flags);
356 if (id_priv->state == state) {
357 atomic_inc(&id_priv->dev_remove);
358 ret = 0;
359 } else
360 ret = -EINVAL;
361 spin_unlock_irqrestore(&id_priv->lock, flags);
362 return ret;
363}
364
365static void cma_enable_remove(struct rdma_id_private *id_priv)
350{ 366{
351 if (atomic_dec_and_test(&id_priv->dev_remove)) 367 if (atomic_dec_and_test(&id_priv->dev_remove))
352 wake_up(&id_priv->wait_remove); 368 wake_up(&id_priv->wait_remove);
353} 369}
354 370
371static int cma_has_cm_dev(struct rdma_id_private *id_priv)
372{
373 return (id_priv->id.device && id_priv->cm_id.ib);
374}
375
355struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 376struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
356 void *context, enum rdma_port_space ps) 377 void *context, enum rdma_port_space ps)
357{ 378{
@@ -884,9 +905,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
884 struct rdma_cm_event event; 905 struct rdma_cm_event event;
885 int ret = 0; 906 int ret = 0;
886 907
887 atomic_inc(&id_priv->dev_remove); 908 if (cma_disable_remove(id_priv, CMA_CONNECT))
888 if (!cma_comp(id_priv, CMA_CONNECT)) 909 return 0;
889 goto out;
890 910
891 memset(&event, 0, sizeof event); 911 memset(&event, 0, sizeof event);
892 switch (ib_event->event) { 912 switch (ib_event->event) {
@@ -942,12 +962,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
942 /* Destroy the CM ID by returning a non-zero value. */ 962 /* Destroy the CM ID by returning a non-zero value. */
943 id_priv->cm_id.ib = NULL; 963 id_priv->cm_id.ib = NULL;
944 cma_exch(id_priv, CMA_DESTROYING); 964 cma_exch(id_priv, CMA_DESTROYING);
945 cma_release_remove(id_priv); 965 cma_enable_remove(id_priv);
946 rdma_destroy_id(&id_priv->id); 966 rdma_destroy_id(&id_priv->id);
947 return ret; 967 return ret;
948 } 968 }
949out: 969out:
950 cma_release_remove(id_priv); 970 cma_enable_remove(id_priv);
951 return ret; 971 return ret;
952} 972}
953 973
@@ -1057,11 +1077,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1057 int offset, ret; 1077 int offset, ret;
1058 1078
1059 listen_id = cm_id->context; 1079 listen_id = cm_id->context;
1060 atomic_inc(&listen_id->dev_remove); 1080 if (cma_disable_remove(listen_id, CMA_LISTEN))
1061 if (!cma_comp(listen_id, CMA_LISTEN)) { 1081 return -ECONNABORTED;
1062 ret = -ECONNABORTED;
1063 goto out;
1064 }
1065 1082
1066 memset(&event, 0, sizeof event); 1083 memset(&event, 0, sizeof event);
1067 offset = cma_user_data_offset(listen_id->id.ps); 1084 offset = cma_user_data_offset(listen_id->id.ps);
@@ -1101,11 +1118,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1101 1118
1102release_conn_id: 1119release_conn_id:
1103 cma_exch(conn_id, CMA_DESTROYING); 1120 cma_exch(conn_id, CMA_DESTROYING);
1104 cma_release_remove(conn_id); 1121 cma_enable_remove(conn_id);
1105 rdma_destroy_id(&conn_id->id); 1122 rdma_destroy_id(&conn_id->id);
1106 1123
1107out: 1124out:
1108 cma_release_remove(listen_id); 1125 cma_enable_remove(listen_id);
1109 return ret; 1126 return ret;
1110} 1127}
1111 1128
@@ -1171,9 +1188,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1171 struct sockaddr_in *sin; 1188 struct sockaddr_in *sin;
1172 int ret = 0; 1189 int ret = 0;
1173 1190
1174 memset(&event, 0, sizeof event); 1191 if (cma_disable_remove(id_priv, CMA_CONNECT))
1175 atomic_inc(&id_priv->dev_remove); 1192 return 0;
1176 1193
1194 memset(&event, 0, sizeof event);
1177 switch (iw_event->event) { 1195 switch (iw_event->event) {
1178 case IW_CM_EVENT_CLOSE: 1196 case IW_CM_EVENT_CLOSE:
1179 event.event = RDMA_CM_EVENT_DISCONNECTED; 1197 event.event = RDMA_CM_EVENT_DISCONNECTED;
@@ -1214,12 +1232,12 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1214 /* Destroy the CM ID by returning a non-zero value. */ 1232 /* Destroy the CM ID by returning a non-zero value. */
1215 id_priv->cm_id.iw = NULL; 1233 id_priv->cm_id.iw = NULL;
1216 cma_exch(id_priv, CMA_DESTROYING); 1234 cma_exch(id_priv, CMA_DESTROYING);
1217 cma_release_remove(id_priv); 1235 cma_enable_remove(id_priv);
1218 rdma_destroy_id(&id_priv->id); 1236 rdma_destroy_id(&id_priv->id);
1219 return ret; 1237 return ret;
1220 } 1238 }
1221 1239
1222 cma_release_remove(id_priv); 1240 cma_enable_remove(id_priv);
1223 return ret; 1241 return ret;
1224} 1242}
1225 1243
@@ -1234,11 +1252,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1234 int ret; 1252 int ret;
1235 1253
1236 listen_id = cm_id->context; 1254 listen_id = cm_id->context;
1237 atomic_inc(&listen_id->dev_remove); 1255 if (cma_disable_remove(listen_id, CMA_LISTEN))
1238 if (!cma_comp(listen_id, CMA_LISTEN)) { 1256 return -ECONNABORTED;
1239 ret = -ECONNABORTED;
1240 goto out;
1241 }
1242 1257
1243 /* Create a new RDMA id for the new IW CM ID */ 1258 /* Create a new RDMA id for the new IW CM ID */
1244 new_cm_id = rdma_create_id(listen_id->id.event_handler, 1259 new_cm_id = rdma_create_id(listen_id->id.event_handler,
@@ -1255,13 +1270,13 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1255 dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr); 1270 dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
1256 if (!dev) { 1271 if (!dev) {
1257 ret = -EADDRNOTAVAIL; 1272 ret = -EADDRNOTAVAIL;
1258 cma_release_remove(conn_id); 1273 cma_enable_remove(conn_id);
1259 rdma_destroy_id(new_cm_id); 1274 rdma_destroy_id(new_cm_id);
1260 goto out; 1275 goto out;
1261 } 1276 }
1262 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); 1277 ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1263 if (ret) { 1278 if (ret) {
1264 cma_release_remove(conn_id); 1279 cma_enable_remove(conn_id);
1265 rdma_destroy_id(new_cm_id); 1280 rdma_destroy_id(new_cm_id);
1266 goto out; 1281 goto out;
1267 } 1282 }
@@ -1270,7 +1285,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1270 ret = cma_acquire_dev(conn_id); 1285 ret = cma_acquire_dev(conn_id);
1271 mutex_unlock(&lock); 1286 mutex_unlock(&lock);
1272 if (ret) { 1287 if (ret) {
1273 cma_release_remove(conn_id); 1288 cma_enable_remove(conn_id);
1274 rdma_destroy_id(new_cm_id); 1289 rdma_destroy_id(new_cm_id);
1275 goto out; 1290 goto out;
1276 } 1291 }
@@ -1293,14 +1308,14 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1293 /* User wants to destroy the CM ID */ 1308 /* User wants to destroy the CM ID */
1294 conn_id->cm_id.iw = NULL; 1309 conn_id->cm_id.iw = NULL;
1295 cma_exch(conn_id, CMA_DESTROYING); 1310 cma_exch(conn_id, CMA_DESTROYING);
1296 cma_release_remove(conn_id); 1311 cma_enable_remove(conn_id);
1297 rdma_destroy_id(&conn_id->id); 1312 rdma_destroy_id(&conn_id->id);
1298 } 1313 }
1299 1314
1300out: 1315out:
1301 if (dev) 1316 if (dev)
1302 dev_put(dev); 1317 dev_put(dev);
1303 cma_release_remove(listen_id); 1318 cma_enable_remove(listen_id);
1304 return ret; 1319 return ret;
1305} 1320}
1306 1321
@@ -1519,7 +1534,7 @@ static void cma_work_handler(struct work_struct *_work)
1519 destroy = 1; 1534 destroy = 1;
1520 } 1535 }
1521out: 1536out:
1522 cma_release_remove(id_priv); 1537 cma_enable_remove(id_priv);
1523 cma_deref_id(id_priv); 1538 cma_deref_id(id_priv);
1524 if (destroy) 1539 if (destroy)
1525 rdma_destroy_id(&id_priv->id); 1540 rdma_destroy_id(&id_priv->id);
@@ -1711,13 +1726,13 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1711 1726
1712 if (id_priv->id.event_handler(&id_priv->id, &event)) { 1727 if (id_priv->id.event_handler(&id_priv->id, &event)) {
1713 cma_exch(id_priv, CMA_DESTROYING); 1728 cma_exch(id_priv, CMA_DESTROYING);
1714 cma_release_remove(id_priv); 1729 cma_enable_remove(id_priv);
1715 cma_deref_id(id_priv); 1730 cma_deref_id(id_priv);
1716 rdma_destroy_id(&id_priv->id); 1731 rdma_destroy_id(&id_priv->id);
1717 return; 1732 return;
1718 } 1733 }
1719out: 1734out:
1720 cma_release_remove(id_priv); 1735 cma_enable_remove(id_priv);
1721 cma_deref_id(id_priv); 1736 cma_deref_id(id_priv);
1722} 1737}
1723 1738
@@ -2042,11 +2057,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2042 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2057 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2043 int ret = 0; 2058 int ret = 0;
2044 2059
2045 memset(&event, 0, sizeof event); 2060 if (cma_disable_remove(id_priv, CMA_CONNECT))
2046 atomic_inc(&id_priv->dev_remove); 2061 return 0;
2047 if (!cma_comp(id_priv, CMA_CONNECT))
2048 goto out;
2049 2062
2063 memset(&event, 0, sizeof event);
2050 switch (ib_event->event) { 2064 switch (ib_event->event) {
2051 case IB_CM_SIDR_REQ_ERROR: 2065 case IB_CM_SIDR_REQ_ERROR:
2052 event.event = RDMA_CM_EVENT_UNREACHABLE; 2066 event.event = RDMA_CM_EVENT_UNREACHABLE;
@@ -2084,12 +2098,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2084 /* Destroy the CM ID by returning a non-zero value. */ 2098 /* Destroy the CM ID by returning a non-zero value. */
2085 id_priv->cm_id.ib = NULL; 2099 id_priv->cm_id.ib = NULL;
2086 cma_exch(id_priv, CMA_DESTROYING); 2100 cma_exch(id_priv, CMA_DESTROYING);
2087 cma_release_remove(id_priv); 2101 cma_enable_remove(id_priv);
2088 rdma_destroy_id(&id_priv->id); 2102 rdma_destroy_id(&id_priv->id);
2089 return ret; 2103 return ret;
2090 } 2104 }
2091out: 2105out:
2092 cma_release_remove(id_priv); 2106 cma_enable_remove(id_priv);
2093 return ret; 2107 return ret;
2094} 2108}
2095 2109
@@ -2413,7 +2427,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2413 int ret; 2427 int ret;
2414 2428
2415 id_priv = container_of(id, struct rdma_id_private, id); 2429 id_priv = container_of(id, struct rdma_id_private, id);
2416 if (!cma_comp(id_priv, CMA_CONNECT)) 2430 if (!cma_has_cm_dev(id_priv))
2417 return -EINVAL; 2431 return -EINVAL;
2418 2432
2419 switch (id->device->node_type) { 2433 switch (id->device->node_type) {
@@ -2435,7 +2449,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2435 int ret; 2449 int ret;
2436 2450
2437 id_priv = container_of(id, struct rdma_id_private, id); 2451 id_priv = container_of(id, struct rdma_id_private, id);
2438 if (!cma_comp(id_priv, CMA_CONNECT)) 2452 if (!cma_has_cm_dev(id_priv))
2439 return -EINVAL; 2453 return -EINVAL;
2440 2454
2441 switch (rdma_node_get_transport(id->device->node_type)) { 2455 switch (rdma_node_get_transport(id->device->node_type)) {
@@ -2466,8 +2480,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
2466 int ret; 2480 int ret;
2467 2481
2468 id_priv = container_of(id, struct rdma_id_private, id); 2482 id_priv = container_of(id, struct rdma_id_private, id);
2469 if (!cma_comp(id_priv, CMA_CONNECT) && 2483 if (!cma_has_cm_dev(id_priv))
2470 !cma_comp(id_priv, CMA_DISCONNECT))
2471 return -EINVAL; 2484 return -EINVAL;
2472 2485
2473 switch (rdma_node_get_transport(id->device->node_type)) { 2486 switch (rdma_node_get_transport(id->device->node_type)) {
@@ -2499,10 +2512,9 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2499 int ret; 2512 int ret;
2500 2513
2501 id_priv = mc->id_priv; 2514 id_priv = mc->id_priv;
2502 atomic_inc(&id_priv->dev_remove); 2515 if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) &&
2503 if (!cma_comp(id_priv, CMA_ADDR_BOUND) && 2516 cma_disable_remove(id_priv, CMA_ADDR_RESOLVED))
2504 !cma_comp(id_priv, CMA_ADDR_RESOLVED)) 2517 return 0;
2505 goto out;
2506 2518
2507 if (!status && id_priv->id.qp) 2519 if (!status && id_priv->id.qp)
2508 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, 2520 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
@@ -2524,12 +2536,12 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2524 ret = id_priv->id.event_handler(&id_priv->id, &event); 2536 ret = id_priv->id.event_handler(&id_priv->id, &event);
2525 if (ret) { 2537 if (ret) {
2526 cma_exch(id_priv, CMA_DESTROYING); 2538 cma_exch(id_priv, CMA_DESTROYING);
2527 cma_release_remove(id_priv); 2539 cma_enable_remove(id_priv);
2528 rdma_destroy_id(&id_priv->id); 2540 rdma_destroy_id(&id_priv->id);
2529 return 0; 2541 return 0;
2530 } 2542 }
2531out: 2543
2532 cma_release_remove(id_priv); 2544 cma_enable_remove(id_priv);
2533 return 0; 2545 return 0;
2534} 2546}
2535 2547
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index f64d42b08674..1d286d3cc2d5 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -277,6 +277,7 @@ void ehca_cleanup_mrmw_cache(void);
277 277
278extern spinlock_t ehca_qp_idr_lock; 278extern spinlock_t ehca_qp_idr_lock;
279extern spinlock_t ehca_cq_idr_lock; 279extern spinlock_t ehca_cq_idr_lock;
280extern spinlock_t hcall_lock;
280extern struct idr ehca_qp_idr; 281extern struct idr ehca_qp_idr;
281extern struct idr ehca_cq_idr; 282extern struct idr ehca_cq_idr;
282 283
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 82dda2faf4d0..100329ba3343 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -517,12 +517,11 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
517 else { 517 else {
518 struct ehca_cq *cq = eq->eqe_cache[i].cq; 518 struct ehca_cq *cq = eq->eqe_cache[i].cq;
519 comp_event_callback(cq); 519 comp_event_callback(cq);
520 spin_lock_irqsave(&ehca_cq_idr_lock, flags); 520 spin_lock(&ehca_cq_idr_lock);
521 cq->nr_events--; 521 cq->nr_events--;
522 if (!cq->nr_events) 522 if (!cq->nr_events)
523 wake_up(&cq->wait_completion); 523 wake_up(&cq->wait_completion);
524 spin_unlock_irqrestore(&ehca_cq_idr_lock, 524 spin_unlock(&ehca_cq_idr_lock);
525 flags);
526 } 525 }
527 } else { 526 } else {
528 ehca_dbg(&shca->ib_device, "Got non completion event"); 527 ehca_dbg(&shca->ib_device, "Got non completion event");
@@ -711,6 +710,7 @@ static void destroy_comp_task(struct ehca_comp_pool *pool,
711 kthread_stop(task); 710 kthread_stop(task);
712} 711}
713 712
713#ifdef CONFIG_HOTPLUG_CPU
714static void take_over_work(struct ehca_comp_pool *pool, 714static void take_over_work(struct ehca_comp_pool *pool,
715 int cpu) 715 int cpu)
716{ 716{
@@ -735,7 +735,6 @@ static void take_over_work(struct ehca_comp_pool *pool,
735 735
736} 736}
737 737
738#ifdef CONFIG_HOTPLUG_CPU
739static int comp_pool_callback(struct notifier_block *nfb, 738static int comp_pool_callback(struct notifier_block *nfb,
740 unsigned long action, 739 unsigned long action,
741 void *hcpu) 740 void *hcpu)
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index fe90e7454560..c3f99f33b49c 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
55MODULE_VERSION("SVNEHCA_0022"); 55MODULE_VERSION("SVNEHCA_0023");
56 56
57int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
58int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -62,7 +62,7 @@ int ehca_use_hp_mr = 0;
62int ehca_port_act_time = 30; 62int ehca_port_act_time = 30;
63int ehca_poll_all_eqs = 1; 63int ehca_poll_all_eqs = 1;
64int ehca_static_rate = -1; 64int ehca_static_rate = -1;
65int ehca_scaling_code = 1; 65int ehca_scaling_code = 0;
66 66
67module_param_named(open_aqp1, ehca_open_aqp1, int, 0); 67module_param_named(open_aqp1, ehca_open_aqp1, int, 0);
68module_param_named(debug_level, ehca_debug_level, int, 0); 68module_param_named(debug_level, ehca_debug_level, int, 0);
@@ -98,6 +98,7 @@ MODULE_PARM_DESC(scaling_code,
98 98
99spinlock_t ehca_qp_idr_lock; 99spinlock_t ehca_qp_idr_lock;
100spinlock_t ehca_cq_idr_lock; 100spinlock_t ehca_cq_idr_lock;
101spinlock_t hcall_lock;
101DEFINE_IDR(ehca_qp_idr); 102DEFINE_IDR(ehca_qp_idr);
102DEFINE_IDR(ehca_cq_idr); 103DEFINE_IDR(ehca_cq_idr);
103 104
@@ -453,15 +454,14 @@ static ssize_t ehca_store_debug_level(struct device_driver *ddp,
453DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR, 454DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
454 ehca_show_debug_level, ehca_store_debug_level); 455 ehca_show_debug_level, ehca_store_debug_level);
455 456
456void ehca_create_driver_sysfs(struct ibmebus_driver *drv) 457static struct attribute *ehca_drv_attrs[] = {
457{ 458 &driver_attr_debug_level.attr,
458 driver_create_file(&drv->driver, &driver_attr_debug_level); 459 NULL
459} 460};
460 461
461void ehca_remove_driver_sysfs(struct ibmebus_driver *drv) 462static struct attribute_group ehca_drv_attr_grp = {
462{ 463 .attrs = ehca_drv_attrs
463 driver_remove_file(&drv->driver, &driver_attr_debug_level); 464};
464}
465 465
466#define EHCA_RESOURCE_ATTR(name) \ 466#define EHCA_RESOURCE_ATTR(name) \
467static ssize_t ehca_show_##name(struct device *dev, \ 467static ssize_t ehca_show_##name(struct device *dev, \
@@ -523,44 +523,28 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
523} 523}
524static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); 524static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
525 525
526static struct attribute *ehca_dev_attrs[] = {
527 &dev_attr_adapter_handle.attr,
528 &dev_attr_num_ports.attr,
529 &dev_attr_hw_ver.attr,
530 &dev_attr_max_eq.attr,
531 &dev_attr_cur_eq.attr,
532 &dev_attr_max_cq.attr,
533 &dev_attr_cur_cq.attr,
534 &dev_attr_max_qp.attr,
535 &dev_attr_cur_qp.attr,
536 &dev_attr_max_mr.attr,
537 &dev_attr_cur_mr.attr,
538 &dev_attr_max_mw.attr,
539 &dev_attr_cur_mw.attr,
540 &dev_attr_max_pd.attr,
541 &dev_attr_max_ah.attr,
542 NULL
543};
526 544
527void ehca_create_device_sysfs(struct ibmebus_dev *dev) 545static struct attribute_group ehca_dev_attr_grp = {
528{ 546 .attrs = ehca_dev_attrs
529 device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle); 547};
530 device_create_file(&dev->ofdev.dev, &dev_attr_num_ports);
531 device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver);
532 device_create_file(&dev->ofdev.dev, &dev_attr_max_eq);
533 device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq);
534 device_create_file(&dev->ofdev.dev, &dev_attr_max_cq);
535 device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq);
536 device_create_file(&dev->ofdev.dev, &dev_attr_max_qp);
537 device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp);
538 device_create_file(&dev->ofdev.dev, &dev_attr_max_mr);
539 device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr);
540 device_create_file(&dev->ofdev.dev, &dev_attr_max_mw);
541 device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw);
542 device_create_file(&dev->ofdev.dev, &dev_attr_max_pd);
543 device_create_file(&dev->ofdev.dev, &dev_attr_max_ah);
544}
545
546void ehca_remove_device_sysfs(struct ibmebus_dev *dev)
547{
548 device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
549 device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports);
550 device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver);
551 device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq);
552 device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq);
553 device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq);
554 device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq);
555 device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp);
556 device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp);
557 device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr);
558 device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr);
559 device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw);
560 device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw);
561 device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd);
562 device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah);
563}
564 548
565static int __devinit ehca_probe(struct ibmebus_dev *dev, 549static int __devinit ehca_probe(struct ibmebus_dev *dev,
566 const struct of_device_id *id) 550 const struct of_device_id *id)
@@ -668,7 +652,10 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
668 } 652 }
669 } 653 }
670 654
671 ehca_create_device_sysfs(dev); 655 ret = sysfs_create_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
656 if (ret) /* only complain; we can live without attributes */
657 ehca_err(&shca->ib_device,
658 "Cannot create device attributes ret=%d", ret);
672 659
673 spin_lock(&shca_list_lock); 660 spin_lock(&shca_list_lock);
674 list_add(&shca->shca_list, &shca_list); 661 list_add(&shca->shca_list, &shca_list);
@@ -720,7 +707,7 @@ static int __devexit ehca_remove(struct ibmebus_dev *dev)
720 struct ehca_shca *shca = dev->ofdev.dev.driver_data; 707 struct ehca_shca *shca = dev->ofdev.dev.driver_data;
721 int ret; 708 int ret;
722 709
723 ehca_remove_device_sysfs(dev); 710 sysfs_remove_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
724 711
725 if (ehca_open_aqp1 == 1) { 712 if (ehca_open_aqp1 == 1) {
726 int i; 713 int i;
@@ -812,11 +799,12 @@ int __init ehca_module_init(void)
812 int ret; 799 int ret;
813 800
814 printk(KERN_INFO "eHCA Infiniband Device Driver " 801 printk(KERN_INFO "eHCA Infiniband Device Driver "
815 "(Rel.: SVNEHCA_0022)\n"); 802 "(Rel.: SVNEHCA_0023)\n");
816 idr_init(&ehca_qp_idr); 803 idr_init(&ehca_qp_idr);
817 idr_init(&ehca_cq_idr); 804 idr_init(&ehca_cq_idr);
818 spin_lock_init(&ehca_qp_idr_lock); 805 spin_lock_init(&ehca_qp_idr_lock);
819 spin_lock_init(&ehca_cq_idr_lock); 806 spin_lock_init(&ehca_cq_idr_lock);
807 spin_lock_init(&hcall_lock);
820 808
821 INIT_LIST_HEAD(&shca_list); 809 INIT_LIST_HEAD(&shca_list);
822 spin_lock_init(&shca_list_lock); 810 spin_lock_init(&shca_list_lock);
@@ -838,7 +826,9 @@ int __init ehca_module_init(void)
838 goto module_init2; 826 goto module_init2;
839 } 827 }
840 828
841 ehca_create_driver_sysfs(&ehca_driver); 829 ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
830 if (ret) /* only complain; we can live without attributes */
831 ehca_gen_err("Cannot create driver attributes ret=%d", ret);
842 832
843 if (ehca_poll_all_eqs != 1) { 833 if (ehca_poll_all_eqs != 1) {
844 ehca_gen_err("WARNING!!!"); 834 ehca_gen_err("WARNING!!!");
@@ -865,7 +855,7 @@ void __exit ehca_module_exit(void)
865 if (ehca_poll_all_eqs == 1) 855 if (ehca_poll_all_eqs == 1)
866 del_timer_sync(&poll_eqs_timer); 856 del_timer_sync(&poll_eqs_timer);
867 857
868 ehca_remove_driver_sysfs(&ehca_driver); 858 sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
869 ibmebus_unregister_driver(&ehca_driver); 859 ibmebus_unregister_driver(&ehca_driver);
870 860
871 ehca_destroy_slab_caches(); 861 ehca_destroy_slab_caches();
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index df0516f24379..b5bc787c77b6 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -523,6 +523,8 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
523 goto create_qp_exit1; 523 goto create_qp_exit1;
524 } 524 }
525 525
526 my_qp->ib_qp.qp_num = my_qp->real_qp_num;
527
526 switch (init_attr->qp_type) { 528 switch (init_attr->qp_type) {
527 case IB_QPT_RC: 529 case IB_QPT_RC:
528 if (isdaqp == 0) { 530 if (isdaqp == 0) {
@@ -568,7 +570,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
568 parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; 570 parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr;
569 parms.act_nr_send_sges = init_attr->cap.max_send_sge; 571 parms.act_nr_send_sges = init_attr->cap.max_send_sge;
570 parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; 572 parms.act_nr_recv_sges = init_attr->cap.max_recv_sge;
571 my_qp->real_qp_num = 573 my_qp->ib_qp.qp_num =
572 (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1; 574 (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1;
573 } 575 }
574 576
@@ -595,7 +597,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
595 my_qp->ib_qp.recv_cq = init_attr->recv_cq; 597 my_qp->ib_qp.recv_cq = init_attr->recv_cq;
596 my_qp->ib_qp.send_cq = init_attr->send_cq; 598 my_qp->ib_qp.send_cq = init_attr->send_cq;
597 599
598 my_qp->ib_qp.qp_num = my_qp->real_qp_num;
599 my_qp->ib_qp.qp_type = init_attr->qp_type; 600 my_qp->ib_qp.qp_type = init_attr->qp_type;
600 601
601 my_qp->qp_type = init_attr->qp_type; 602 my_qp->qp_type = init_attr->qp_type;
@@ -968,17 +969,21 @@ static int internal_modify_qp(struct ib_qp *ibqp,
968 ((ehca_mult - 1) / ah_mult) : 0; 969 ((ehca_mult - 1) / ah_mult) : 0;
969 else 970 else
970 mqpcb->max_static_rate = 0; 971 mqpcb->max_static_rate = 0;
971
972 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1); 972 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
973 973
974 /* 974 /*
975 * Always supply the GRH flag, even if it's zero, to give the
976 * hypervisor a clear "yes" or "no" instead of a "perhaps"
977 */
978 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
979
980 /*
975 * only if GRH is TRUE we might consider SOURCE_GID_IDX 981 * only if GRH is TRUE we might consider SOURCE_GID_IDX
976 * and DEST_GID otherwise phype will return H_ATTR_PARM!!! 982 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
977 */ 983 */
978 if (attr->ah_attr.ah_flags == IB_AH_GRH) { 984 if (attr->ah_attr.ah_flags == IB_AH_GRH) {
979 mqpcb->send_grh_flag = 1 << 31; 985 mqpcb->send_grh_flag = 1;
980 update_mask |= 986
981 EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
982 mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index; 987 mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
983 update_mask |= 988 update_mask |=
984 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1); 989 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index b564fcd3b282..7f0beec74f70 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -154,7 +154,8 @@ static long ehca_plpar_hcall9(unsigned long opcode,
154 unsigned long arg9) 154 unsigned long arg9)
155{ 155{
156 long ret; 156 long ret;
157 int i, sleep_msecs; 157 int i, sleep_msecs, lock_is_set = 0;
158 unsigned long flags;
158 159
159 ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " 160 ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
160 "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx", 161 "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
@@ -162,10 +163,18 @@ static long ehca_plpar_hcall9(unsigned long opcode,
162 arg8, arg9); 163 arg8, arg9);
163 164
164 for (i = 0; i < 5; i++) { 165 for (i = 0; i < 5; i++) {
166 if ((opcode == H_ALLOC_RESOURCE) && (arg2 == 5)) {
167 spin_lock_irqsave(&hcall_lock, flags);
168 lock_is_set = 1;
169 }
170
165 ret = plpar_hcall9(opcode, outs, 171 ret = plpar_hcall9(opcode, outs,
166 arg1, arg2, arg3, arg4, arg5, 172 arg1, arg2, arg3, arg4, arg5,
167 arg6, arg7, arg8, arg9); 173 arg6, arg7, arg8, arg9);
168 174
175 if (lock_is_set)
176 spin_unlock_irqrestore(&hcall_lock, flags);
177
169 if (H_IS_LONG_BUSY(ret)) { 178 if (H_IS_LONG_BUSY(ret)) {
170 sleep_msecs = get_longbusy_msecs(ret); 179 sleep_msecs = get_longbusy_msecs(ret);
171 msleep_interruptible(sleep_msecs); 180 msleep_interruptible(sleep_msecs);
@@ -193,11 +202,11 @@ static long ehca_plpar_hcall9(unsigned long opcode,
193 opcode, ret, outs[0], outs[1], outs[2], outs[3], 202 opcode, ret, outs[0], outs[1], outs[2], outs[3],
194 outs[4], outs[5], outs[6], outs[7], outs[8]); 203 outs[4], outs[5], outs[6], outs[7], outs[8]);
195 return ret; 204 return ret;
196
197 } 205 }
198 206
199 return H_BUSY; 207 return H_BUSY;
200} 208}
209
201u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, 210u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
202 struct ehca_pfeq *pfeq, 211 struct ehca_pfeq *pfeq,
203 const u32 neq_control, 212 const u32 neq_control,
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index 1b9c30857754..4e2e3dfeb2c8 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -747,7 +747,6 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
747 747
748static int ipath_pe_intconfig(struct ipath_devdata *dd) 748static int ipath_pe_intconfig(struct ipath_devdata *dd)
749{ 749{
750 u64 val;
751 u32 chiprev; 750 u32 chiprev;
752 751
753 /* 752 /*
@@ -760,9 +759,9 @@ static int ipath_pe_intconfig(struct ipath_devdata *dd)
760 if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) { 759 if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) {
761 /* Rev2+ reports extra errors via internal GPIO pins */ 760 /* Rev2+ reports extra errors via internal GPIO pins */
762 dd->ipath_flags |= IPATH_GPIO_ERRINTRS; 761 dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
763 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); 762 dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
764 val |= IPATH_GPIO_ERRINTR_MASK; 763 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
765 ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); 764 dd->ipath_gpio_mask);
766 } 765 }
767 return 0; 766 return 0;
768} 767}
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index 45d033169c6e..a90d3b5699c4 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -1056,7 +1056,7 @@ irqreturn_t ipath_intr(int irq, void *data)
1056 gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT); 1056 gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
1057 chk0rcv = 1; 1057 chk0rcv = 1;
1058 } 1058 }
1059 if (unlikely(gpiostatus)) { 1059 if (gpiostatus) {
1060 /* 1060 /*
1061 * Some unexpected bits remain. If they could have 1061 * Some unexpected bits remain. If they could have
1062 * caused the interrupt, complain and clear. 1062 * caused the interrupt, complain and clear.
@@ -1065,9 +1065,8 @@ irqreturn_t ipath_intr(int irq, void *data)
1065 * GPIO interrupts, possibly on a "three strikes" 1065 * GPIO interrupts, possibly on a "three strikes"
1066 * basis. 1066 * basis.
1067 */ 1067 */
1068 u32 mask; 1068 const u32 mask = (u32) dd->ipath_gpio_mask;
1069 mask = ipath_read_kreg32( 1069
1070 dd, dd->ipath_kregs->kr_gpio_mask);
1071 if (mask & gpiostatus) { 1070 if (mask & gpiostatus) {
1072 ipath_dbg("Unexpected GPIO IRQ bits %x\n", 1071 ipath_dbg("Unexpected GPIO IRQ bits %x\n",
1073 gpiostatus & mask); 1072 gpiostatus & mask);
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index e900c2593f44..12194f3dd8cc 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -397,6 +397,8 @@ struct ipath_devdata {
397 unsigned long ipath_pioavailshadow[8]; 397 unsigned long ipath_pioavailshadow[8];
398 /* shadow of kr_gpio_out, for rmw ops */ 398 /* shadow of kr_gpio_out, for rmw ops */
399 u64 ipath_gpio_out; 399 u64 ipath_gpio_out;
400 /* shadow the gpio mask register */
401 u64 ipath_gpio_mask;
400 /* kr_revision shadow */ 402 /* kr_revision shadow */
401 u64 ipath_revision; 403 u64 ipath_revision;
402 /* 404 /*
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 12933e77c7e9..bb70845279b8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1387,13 +1387,12 @@ static int enable_timer(struct ipath_devdata *dd)
1387 * processing. 1387 * processing.
1388 */ 1388 */
1389 if (dd->ipath_flags & IPATH_GPIO_INTR) { 1389 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1390 u64 val;
1391 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, 1390 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1392 0x2074076542310ULL); 1391 0x2074076542310ULL);
1393 /* Enable GPIO bit 2 interrupt */ 1392 /* Enable GPIO bit 2 interrupt */
1394 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); 1393 dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
1395 val |= (u64) (1 << IPATH_GPIO_PORT0_BIT); 1394 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1396 ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); 1395 dd->ipath_gpio_mask);
1397 } 1396 }
1398 1397
1399 init_timer(&dd->verbs_timer); 1398 init_timer(&dd->verbs_timer);
@@ -1412,8 +1411,9 @@ static int disable_timer(struct ipath_devdata *dd)
1412 u64 val; 1411 u64 val;
1413 /* Disable GPIO bit 2 interrupt */ 1412 /* Disable GPIO bit 2 interrupt */
1414 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); 1413 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask);
1415 val &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT)); 1414 dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
1416 ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); 1415 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1416 dd->ipath_gpio_mask);
1417 /* 1417 /*
1418 * We might want to undo changes to debugportselect, 1418 * We might want to undo changes to debugportselect,
1419 * but how? 1419 * but how?
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 688ecb4c39f3..402f3a20ec0a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -489,6 +489,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
489 ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 489 ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
490 if (!ibdev->uar_map) 490 if (!ibdev->uar_map)
491 goto err_uar; 491 goto err_uar;
492 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
492 493
493 INIT_LIST_HEAD(&ibdev->pgdir_list); 494 INIT_LIST_HEAD(&ibdev->pgdir_list);
494 mutex_init(&ibdev->pgdir_mutex); 495 mutex_init(&ibdev->pgdir_mutex);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index cf0868f6e965..ca224d018af2 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -284,7 +284,7 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
284{ 284{
285 struct mthca_cqe *cqe; 285 struct mthca_cqe *cqe;
286 u32 prod_index; 286 u32 prod_index;
287 int nfreed = 0; 287 int i, nfreed = 0;
288 288
289 spin_lock_irq(&cq->lock); 289 spin_lock_irq(&cq->lock);
290 290
@@ -321,6 +321,8 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
321 } 321 }
322 322
323 if (nfreed) { 323 if (nfreed) {
324 for (i = 0; i < nfreed; ++i)
325 set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe));
324 wmb(); 326 wmb();
325 cq->cons_index += nfreed; 327 cq->cons_index += nfreed;
326 update_cons_index(dev, cq, nfreed); 328 update_cons_index(dev, cq, nfreed);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index fee60c852d14..72fabb822f1c 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1862,6 +1862,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1862 dev->kar + MTHCA_RECEIVE_DOORBELL, 1862 dev->kar + MTHCA_RECEIVE_DOORBELL,
1863 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1863 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1864 1864
1865 qp->rq.next_ind = ind;
1865 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; 1866 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1866 size0 = 0; 1867 size0 = 0;
1867 } 1868 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 785bc8505f2a..eec833b81e9b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -257,10 +257,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
257 cm_id->context = p; 257 cm_id->context = p;
258 p->jiffies = jiffies; 258 p->jiffies = jiffies;
259 spin_lock_irq(&priv->lock); 259 spin_lock_irq(&priv->lock);
260 if (list_empty(&priv->cm.passive_ids))
261 queue_delayed_work(ipoib_workqueue,
262 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
260 list_add(&p->list, &priv->cm.passive_ids); 263 list_add(&p->list, &priv->cm.passive_ids);
261 spin_unlock_irq(&priv->lock); 264 spin_unlock_irq(&priv->lock);
262 queue_delayed_work(ipoib_workqueue,
263 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
264 return 0; 265 return 0;
265 266
266err_rep: 267err_rep:
@@ -378,8 +379,6 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
378 if (!list_empty(&p->list)) 379 if (!list_empty(&p->list))
379 list_move(&p->list, &priv->cm.passive_ids); 380 list_move(&p->list, &priv->cm.passive_ids);
380 spin_unlock_irqrestore(&priv->lock, flags); 381 spin_unlock_irqrestore(&priv->lock, flags);
381 queue_delayed_work(ipoib_workqueue,
382 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
383 } 382 }
384 } 383 }
385 384
@@ -1100,6 +1099,10 @@ static void ipoib_cm_stale_task(struct work_struct *work)
1100 kfree(p); 1099 kfree(p);
1101 spin_lock_irq(&priv->lock); 1100 spin_lock_irq(&priv->lock);
1102 } 1101 }
1102
1103 if (!list_empty(&priv->cm.passive_ids))
1104 queue_delayed_work(ipoib_workqueue,
1105 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1103 spin_unlock_irq(&priv->lock); 1106 spin_unlock_irq(&priv->lock);
1104} 1107}
1105 1108
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index d24ab234394c..a7562f7fc0b3 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -45,8 +45,6 @@
45 */ 45 */
46#define MMC_SHIFT 3 46#define MMC_SHIFT 3
47 47
48static int major;
49
50/* 48/*
51 * There is one mmc_blk_data per slot. 49 * There is one mmc_blk_data per slot.
52 */ 50 */
@@ -466,7 +464,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
466 md->queue.issue_fn = mmc_blk_issue_rq; 464 md->queue.issue_fn = mmc_blk_issue_rq;
467 md->queue.data = md; 465 md->queue.data = md;
468 466
469 md->disk->major = major; 467 md->disk->major = MMC_BLOCK_MAJOR;
470 md->disk->first_minor = devidx << MMC_SHIFT; 468 md->disk->first_minor = devidx << MMC_SHIFT;
471 md->disk->fops = &mmc_bdops; 469 md->disk->fops = &mmc_bdops;
472 md->disk->private_data = md; 470 md->disk->private_data = md;
@@ -634,14 +632,9 @@ static int __init mmc_blk_init(void)
634{ 632{
635 int res = -ENOMEM; 633 int res = -ENOMEM;
636 634
637 res = register_blkdev(major, "mmc"); 635 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
638 if (res < 0) { 636 if (res)
639 printk(KERN_WARNING "Unable to get major %d for MMC media: %d\n",
640 major, res);
641 goto out; 637 goto out;
642 }
643 if (major == 0)
644 major = res;
645 638
646 return mmc_register_driver(&mmc_driver); 639 return mmc_register_driver(&mmc_driver);
647 640
@@ -652,7 +645,7 @@ static int __init mmc_blk_init(void)
652static void __exit mmc_blk_exit(void) 645static void __exit mmc_blk_exit(void)
653{ 646{
654 mmc_unregister_driver(&mmc_driver); 647 mmc_unregister_driver(&mmc_driver);
655 unregister_blkdev(major, "mmc"); 648 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
656} 649}
657 650
658module_init(mmc_blk_init); 651module_init(mmc_blk_init);
@@ -661,5 +654,3 @@ module_exit(mmc_blk_exit);
661MODULE_LICENSE("GPL"); 654MODULE_LICENSE("GPL");
662MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 655MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
663 656
664module_param(major, int, 0444);
665MODULE_PARM_DESC(major, "specify the major device number for MMC block driver");
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index b7156a4555b5..f967226d7505 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -187,9 +187,8 @@ static void au1xmmc_tasklet_finish(unsigned long param)
187} 187}
188 188
189static int au1xmmc_send_command(struct au1xmmc_host *host, int wait, 189static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
190 struct mmc_command *cmd) 190 struct mmc_command *cmd, unsigned int flags)
191{ 191{
192
193 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT); 192 u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
194 193
195 switch (mmc_resp_type(cmd)) { 194 switch (mmc_resp_type(cmd)) {
@@ -213,24 +212,16 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
213 return MMC_ERR_INVALID; 212 return MMC_ERR_INVALID;
214 } 213 }
215 214
216 switch(cmd->opcode) { 215 if (flags & MMC_DATA_READ) {
217 case MMC_READ_SINGLE_BLOCK: 216 if (flags & MMC_DATA_MULTI)
218 case SD_APP_SEND_SCR: 217 mmccmd |= SD_CMD_CT_4;
219 mmccmd |= SD_CMD_CT_2; 218 else
220 break; 219 mmccmd |= SD_CMD_CT_2;
221 case MMC_READ_MULTIPLE_BLOCK: 220 } else if (flags & MMC_DATA_WRITE) {
222 mmccmd |= SD_CMD_CT_4; 221 if (flags & MMC_DATA_MULTI)
223 break; 222 mmccmd |= SD_CMD_CT_3;
224 case MMC_WRITE_BLOCK: 223 else
225 mmccmd |= SD_CMD_CT_1; 224 mmccmd |= SD_CMD_CT_1;
226 break;
227
228 case MMC_WRITE_MULTIPLE_BLOCK:
229 mmccmd |= SD_CMD_CT_3;
230 break;
231 case MMC_STOP_TRANSMISSION:
232 mmccmd |= SD_CMD_CT_7;
233 break;
234 } 225 }
235 226
236 au_writel(cmd->arg, HOST_CMDARG(host)); 227 au_writel(cmd->arg, HOST_CMDARG(host));
@@ -665,6 +656,7 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
665{ 656{
666 657
667 struct au1xmmc_host *host = mmc_priv(mmc); 658 struct au1xmmc_host *host = mmc_priv(mmc);
659 unsigned int flags = 0;
668 int ret = MMC_ERR_NONE; 660 int ret = MMC_ERR_NONE;
669 661
670 WARN_ON(irqs_disabled()); 662 WARN_ON(irqs_disabled());
@@ -677,11 +669,12 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
677 669
678 if (mrq->data) { 670 if (mrq->data) {
679 FLUSH_FIFO(host); 671 FLUSH_FIFO(host);
672 flags = mrq->data->flags;
680 ret = au1xmmc_prepare_data(host, mrq->data); 673 ret = au1xmmc_prepare_data(host, mrq->data);
681 } 674 }
682 675
683 if (ret == MMC_ERR_NONE) 676 if (ret == MMC_ERR_NONE)
684 ret = au1xmmc_send_command(host, 0, mrq->cmd); 677 ret = au1xmmc_send_command(host, 0, mrq->cmd, flags);
685 678
686 if (ret != MMC_ERR_NONE) { 679 if (ret != MMC_ERR_NONE) {
687 mrq->cmd->error = ret; 680 mrq->cmd->error = ret;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index d97d3864b57f..f8985c508bb9 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -232,20 +232,14 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
232 /* 232 /*
233 * workaround for erratum #42: 233 * workaround for erratum #42:
234 * Intel PXA27x Family Processor Specification Update Rev 001 234 * Intel PXA27x Family Processor Specification Update Rev 001
235 * A bogus CRC error can appear if the msb of a 136 bit
236 * response is a one.
235 */ 237 */
236 if (cmd->opcode == MMC_ALL_SEND_CID || 238 if (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000) {
237 cmd->opcode == MMC_SEND_CSD || 239 pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
238 cmd->opcode == MMC_SEND_CID) { 240 } else
239 /* a bogus CRC error can appear if the msb of
240 the 15 byte response is a one */
241 if ((cmd->resp[0] & 0x80000000) == 0)
242 cmd->error = MMC_ERR_BADCRC;
243 } else {
244 pr_debug("ignoring CRC from command %d - *risky*\n",cmd->opcode);
245 }
246#else
247 cmd->error = MMC_ERR_BADCRC;
248#endif 241#endif
242 cmd->error = MMC_ERR_BADCRC;
249 } 243 }
250 244
251 pxamci_disable_irq(host, END_CMD_RES); 245 pxamci_disable_irq(host, END_CMD_RES);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ff5bf73cdd25..a359efdd77eb 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -963,6 +963,15 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
963 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 963 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
964 sdhci_transfer_pio(host); 964 sdhci_transfer_pio(host);
965 965
966 /*
967 * We currently don't do anything fancy with DMA
968 * boundaries, but as we can't disable the feature
969 * we need to at least restart the transfer.
970 */
971 if (intmask & SDHCI_INT_DMA_END)
972 writel(readl(host->ioaddr + SDHCI_DMA_ADDRESS),
973 host->ioaddr + SDHCI_DMA_ADDRESS);
974
966 if (intmask & SDHCI_INT_DATA_END) 975 if (intmask & SDHCI_INT_DATA_END)
967 sdhci_finish_data(host); 976 sdhci_finish_data(host);
968 } 977 }
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index fb99cd445504..c5baa197bc08 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2508,6 +2508,7 @@ config MLX4_CORE
2508 2508
2509config MLX4_DEBUG 2509config MLX4_DEBUG
2510 bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) 2510 bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
2511 depends on MLX4_CORE
2511 default y 2512 default y
2512 ---help--- 2513 ---help---
2513 This option causes debugging code to be compiled into the 2514 This option causes debugging code to be compiled into the
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 4debb024eaf9..20b8c0d3ced4 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -542,8 +542,6 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
542 struct mlx4_priv *priv = mlx4_priv(dev); 542 struct mlx4_priv *priv = mlx4_priv(dev);
543 int err; 543 int err;
544 544
545 MLX4_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
546
547 err = mlx4_init_uar_table(dev); 545 err = mlx4_init_uar_table(dev);
548 if (err) { 546 if (err) {
549 mlx4_err(dev, "Failed to initialize " 547 mlx4_err(dev, "Failed to initialize "
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 9befbae3d196..3d3b6d24d8d3 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -275,7 +275,6 @@ struct mlx4_priv {
275 275
276 struct mlx4_uar driver_uar; 276 struct mlx4_uar driver_uar;
277 void __iomem *kar; 277 void __iomem *kar;
278 MLX4_DECLARE_DOORBELL_LOCK(doorbell_lock)
279 278
280 u32 rev_id; 279 u32 rev_id;
281 char board_id[MLX4_BOARD_ID_LEN]; 280 char board_id[MLX4_BOARD_ID_LEN];
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 8410587348f1..178155bf9db6 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -18,6 +18,7 @@
18#include <asm/ebus.h> 18#include <asm/ebus.h>
19#include <asm/spitfire.h> 19#include <asm/spitfire.h>
20#include <asm/bbc.h> 20#include <asm/bbc.h>
21#include <asm/io.h>
21 22
22#include "bbc_i2c.h" 23#include "bbc_i2c.h"
23 24
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 2d14a29effe4..3279a1b6501d 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -20,6 +20,7 @@
20#include <asm/ebus.h> /* EBus device */ 20#include <asm/ebus.h> /* EBus device */
21#include <asm/oplib.h> /* OpenProm Library */ 21#include <asm/oplib.h> /* OpenProm Library */
22#include <asm/uaccess.h> /* put_/get_user */ 22#include <asm/uaccess.h> /* put_/get_user */
23#include <asm/io.h>
23 24
24#include <asm/display7seg.h> 25#include <asm/display7seg.h>
25 26
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e62d23f65180..d28c14e23c32 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1757,6 +1757,14 @@ config SCSI_ESP_CORE
1757 tristate "ESP Scsi Driver Core" 1757 tristate "ESP Scsi Driver Core"
1758 depends on SCSI 1758 depends on SCSI
1759 select SCSI_SPI_ATTRS 1759 select SCSI_SPI_ATTRS
1760 help
1761 This is a core driver for NCR53c9x based scsi chipsets,
1762 also known as "ESP" for Emulex Scsi Processor or
1763 Enhanced Scsi Processor. This driver does not exist by
1764 itself, there are front-end drivers which, when enabled,
1765 select and enable this driver. One example is SCSI_SUNESP.
1766 These front-end drivers provide probing, DMA, and register
1767 access support for the core driver.
1760 1768
1761config SCSI_SUNESP 1769config SCSI_SUNESP
1762 tristate "Sparc ESP Scsi Driver" 1770 tristate "Sparc ESP Scsi Driver"
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index 40d48566215c..c3a6bd2e7950 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -493,6 +493,10 @@ static struct of_device_id hv_match[] = {
493 .name = "console", 493 .name = "console",
494 .compatible = "qcn", 494 .compatible = "qcn",
495 }, 495 },
496 {
497 .name = "console",
498 .compatible = "SUNW,sun4v-console",
499 },
496 {}, 500 {},
497}; 501};
498MODULE_DEVICE_TABLE(of, hv_match); 502MODULE_DEVICE_TABLE(of, hv_match);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index eebcb708cff1..4d7485fa553f 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1535,7 +1535,7 @@ config FB_LEO
1535 1535
1536config FB_XVR500 1536config FB_XVR500
1537 bool "Sun XVR-500 3DLABS Wildcat support" 1537 bool "Sun XVR-500 3DLABS Wildcat support"
1538 depends on FB && PCI && SPARC64 1538 depends on (FB = y) && PCI && SPARC64
1539 select FB_CFB_FILLRECT 1539 select FB_CFB_FILLRECT
1540 select FB_CFB_COPYAREA 1540 select FB_CFB_COPYAREA
1541 select FB_CFB_IMAGEBLIT 1541 select FB_CFB_IMAGEBLIT
@@ -1548,7 +1548,7 @@ config FB_XVR500
1548 1548
1549config FB_XVR2500 1549config FB_XVR2500
1550 bool "Sun XVR-2500 3DLABS Wildcat support" 1550 bool "Sun XVR-2500 3DLABS Wildcat support"
1551 depends on FB && PCI && SPARC64 1551 depends on (FB = y) && PCI && SPARC64
1552 select FB_CFB_FILLRECT 1552 select FB_CFB_FILLRECT
1553 select FB_CFB_COPYAREA 1553 select FB_CFB_COPYAREA
1554 select FB_CFB_IMAGEBLIT 1554 select FB_CFB_IMAGEBLIT
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1aad34ea61a4..0b73cd45a06d 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * fs/eventpoll.c ( Efficent event polling implementation ) 2 * fs/eventpoll.c (Efficent event polling implementation)
3 * Copyright (C) 2001,...,2006 Davide Libenzi 3 * Copyright (C) 2001,...,2007 Davide Libenzi
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -26,7 +26,6 @@
26#include <linux/hash.h> 26#include <linux/hash.h>
27#include <linux/spinlock.h> 27#include <linux/spinlock.h>
28#include <linux/syscalls.h> 28#include <linux/syscalls.h>
29#include <linux/rwsem.h>
30#include <linux/rbtree.h> 29#include <linux/rbtree.h>
31#include <linux/wait.h> 30#include <linux/wait.h>
32#include <linux/eventpoll.h> 31#include <linux/eventpoll.h>
@@ -39,15 +38,14 @@
39#include <asm/io.h> 38#include <asm/io.h>
40#include <asm/mman.h> 39#include <asm/mman.h>
41#include <asm/atomic.h> 40#include <asm/atomic.h>
42#include <asm/semaphore.h>
43 41
44/* 42/*
45 * LOCKING: 43 * LOCKING:
46 * There are three level of locking required by epoll : 44 * There are three level of locking required by epoll :
47 * 45 *
48 * 1) epmutex (mutex) 46 * 1) epmutex (mutex)
49 * 2) ep->sem (rw_semaphore) 47 * 2) ep->mtx (mutex)
50 * 3) ep->lock (rw_lock) 48 * 3) ep->lock (spinlock)
51 * 49 *
52 * The acquire order is the one listed above, from 1 to 3. 50 * The acquire order is the one listed above, from 1 to 3.
53 * We need a spinlock (ep->lock) because we manipulate objects 51 * We need a spinlock (ep->lock) because we manipulate objects
@@ -57,20 +55,20 @@
57 * a spinlock. During the event transfer loop (from kernel to 55 * a spinlock. During the event transfer loop (from kernel to
58 * user space) we could end up sleeping due a copy_to_user(), so 56 * user space) we could end up sleeping due a copy_to_user(), so
59 * we need a lock that will allow us to sleep. This lock is a 57 * we need a lock that will allow us to sleep. This lock is a
60 * read-write semaphore (ep->sem). It is acquired on read during 58 * mutex (ep->mtx). It is acquired during the event transfer loop,
61 * the event transfer loop and in write during epoll_ctl(EPOLL_CTL_DEL) 59 * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
62 * and during eventpoll_release_file(). Then we also need a global 60 * Then we also need a global mutex to serialize eventpoll_release_file()
63 * semaphore to serialize eventpoll_release_file() and ep_free(). 61 * and ep_free().
64 * This semaphore is acquired by ep_free() during the epoll file 62 * This mutex is acquired by ep_free() during the epoll file
65 * cleanup path and it is also acquired by eventpoll_release_file() 63 * cleanup path and it is also acquired by eventpoll_release_file()
66 * if a file has been pushed inside an epoll set and it is then 64 * if a file has been pushed inside an epoll set and it is then
67 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). 65 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
68 * It is possible to drop the "ep->sem" and to use the global 66 * It is possible to drop the "ep->mtx" and to use the global
69 * semaphore "epmutex" (together with "ep->lock") to have it working, 67 * mutex "epmutex" (together with "ep->lock") to have it working,
70 * but having "ep->sem" will make the interface more scalable. 68 * but having "ep->mtx" will make the interface more scalable.
71 * Events that require holding "epmutex" are very rare, while for 69 * Events that require holding "epmutex" are very rare, while for
72 * normal operations the epoll private "ep->sem" will guarantee 70 * normal operations the epoll private "ep->mtx" will guarantee
73 * a greater scalability. 71 * a better scalability.
74 */ 72 */
75 73
76#define DEBUG_EPOLL 0 74#define DEBUG_EPOLL 0
@@ -102,6 +100,8 @@
102 100
103#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) 101#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
104 102
103#define EP_UNACTIVE_PTR ((void *) -1L)
104
105struct epoll_filefd { 105struct epoll_filefd {
106 struct file *file; 106 struct file *file;
107 int fd; 107 int fd;
@@ -111,7 +111,7 @@ struct epoll_filefd {
111 * Node that is linked into the "wake_task_list" member of the "struct poll_safewake". 111 * Node that is linked into the "wake_task_list" member of the "struct poll_safewake".
112 * It is used to keep track on all tasks that are currently inside the wake_up() code 112 * It is used to keep track on all tasks that are currently inside the wake_up() code
113 * to 1) short-circuit the one coming from the same task and same wait queue head 113 * to 1) short-circuit the one coming from the same task and same wait queue head
114 * ( loop ) 2) allow a maximum number of epoll descriptors inclusion nesting 114 * (loop) 2) allow a maximum number of epoll descriptors inclusion nesting
115 * 3) let go the ones coming from other tasks. 115 * 3) let go the ones coming from other tasks.
116 */ 116 */
117struct wake_task_node { 117struct wake_task_node {
@@ -130,21 +130,57 @@ struct poll_safewake {
130}; 130};
131 131
132/* 132/*
133 * Each file descriptor added to the eventpoll interface will
134 * have an entry of this type linked to the "rbr" RB tree.
135 */
136struct epitem {
137 /* RB tree node used to link this structure to the eventpoll RB tree */
138 struct rb_node rbn;
139
140 /* List header used to link this structure to the eventpoll ready list */
141 struct list_head rdllink;
142
143 /*
144 * Works together "struct eventpoll"->ovflist in keeping the
145 * single linked chain of items.
146 */
147 struct epitem *next;
148
149 /* The file descriptor information this item refers to */
150 struct epoll_filefd ffd;
151
152 /* Number of active wait queue attached to poll operations */
153 int nwait;
154
155 /* List containing poll wait queues */
156 struct list_head pwqlist;
157
158 /* The "container" of this item */
159 struct eventpoll *ep;
160
161 /* List header used to link this item to the "struct file" items list */
162 struct list_head fllink;
163
164 /* The structure that describe the interested events and the source fd */
165 struct epoll_event event;
166};
167
168/*
133 * This structure is stored inside the "private_data" member of the file 169 * This structure is stored inside the "private_data" member of the file
134 * structure and rapresent the main data sructure for the eventpoll 170 * structure and rapresent the main data sructure for the eventpoll
135 * interface. 171 * interface.
136 */ 172 */
137struct eventpoll { 173struct eventpoll {
138 /* Protect the this structure access */ 174 /* Protect the this structure access */
139 rwlock_t lock; 175 spinlock_t lock;
140 176
141 /* 177 /*
142 * This semaphore is used to ensure that files are not removed 178 * This mutex is used to ensure that files are not removed
143 * while epoll is using them. This is read-held during the event 179 * while epoll is using them. This is held during the event
144 * collection loop and it is write-held during the file cleanup 180 * collection loop, the file cleanup path, the epoll file exit
145 * path, the epoll file exit code and the ctl operations. 181 * code and the ctl operations.
146 */ 182 */
147 struct rw_semaphore sem; 183 struct mutex mtx;
148 184
149 /* Wait queue used by sys_epoll_wait() */ 185 /* Wait queue used by sys_epoll_wait() */
150 wait_queue_head_t wq; 186 wait_queue_head_t wq;
@@ -155,8 +191,15 @@ struct eventpoll {
155 /* List of ready file descriptors */ 191 /* List of ready file descriptors */
156 struct list_head rdllist; 192 struct list_head rdllist;
157 193
158 /* RB-Tree root used to store monitored fd structs */ 194 /* RB tree root used to store monitored fd structs */
159 struct rb_root rbr; 195 struct rb_root rbr;
196
197 /*
198 * This is a single linked list that chains all the "struct epitem" that
199 * happened while transfering ready events to userspace w/out
200 * holding ->lock.
201 */
202 struct epitem *ovflist;
160}; 203};
161 204
162/* Wait structure used by the poll hooks */ 205/* Wait structure used by the poll hooks */
@@ -177,42 +220,6 @@ struct eppoll_entry {
177 wait_queue_head_t *whead; 220 wait_queue_head_t *whead;
178}; 221};
179 222
180/*
181 * Each file descriptor added to the eventpoll interface will
182 * have an entry of this type linked to the "rbr" RB tree.
183 */
184struct epitem {
185 /* RB-Tree node used to link this structure to the eventpoll rb-tree */
186 struct rb_node rbn;
187
188 /* List header used to link this structure to the eventpoll ready list */
189 struct list_head rdllink;
190
191 /* The file descriptor information this item refers to */
192 struct epoll_filefd ffd;
193
194 /* Number of active wait queue attached to poll operations */
195 int nwait;
196
197 /* List containing poll wait queues */
198 struct list_head pwqlist;
199
200 /* The "container" of this item */
201 struct eventpoll *ep;
202
203 /* The structure that describe the interested events and the source fd */
204 struct epoll_event event;
205
206 /*
207 * Used to keep track of the usage count of the structure. This avoids
208 * that the structure will desappear from underneath our processing.
209 */
210 atomic_t usecnt;
211
212 /* List header used to link this item to the "struct file" items list */
213 struct list_head fllink;
214};
215
216/* Wrapper struct used by poll queueing */ 223/* Wrapper struct used by poll queueing */
217struct ep_pqueue { 224struct ep_pqueue {
218 poll_table pt; 225 poll_table pt;
@@ -220,7 +227,7 @@ struct ep_pqueue {
220}; 227};
221 228
222/* 229/*
223 * This semaphore is used to serialize ep_free() and eventpoll_release_file(). 230 * This mutex is used to serialize ep_free() and eventpoll_release_file().
224 */ 231 */
225static struct mutex epmutex; 232static struct mutex epmutex;
226 233
@@ -234,7 +241,7 @@ static struct kmem_cache *epi_cache __read_mostly;
234static struct kmem_cache *pwq_cache __read_mostly; 241static struct kmem_cache *pwq_cache __read_mostly;
235 242
236 243
237/* Setup the structure that is used as key for the rb-tree */ 244/* Setup the structure that is used as key for the RB tree */
238static inline void ep_set_ffd(struct epoll_filefd *ffd, 245static inline void ep_set_ffd(struct epoll_filefd *ffd,
239 struct file *file, int fd) 246 struct file *file, int fd)
240{ 247{
@@ -242,7 +249,7 @@ static inline void ep_set_ffd(struct epoll_filefd *ffd,
242 ffd->fd = fd; 249 ffd->fd = fd;
243} 250}
244 251
245/* Compare rb-tree keys */ 252/* Compare RB tree keys */
246static inline int ep_cmp_ffd(struct epoll_filefd *p1, 253static inline int ep_cmp_ffd(struct epoll_filefd *p1,
247 struct epoll_filefd *p2) 254 struct epoll_filefd *p2)
248{ 255{
@@ -250,20 +257,20 @@ static inline int ep_cmp_ffd(struct epoll_filefd *p1,
250 (p1->file < p2->file ? -1 : p1->fd - p2->fd)); 257 (p1->file < p2->file ? -1 : p1->fd - p2->fd));
251} 258}
252 259
253/* Special initialization for the rb-tree node to detect linkage */ 260/* Special initialization for the RB tree node to detect linkage */
254static inline void ep_rb_initnode(struct rb_node *n) 261static inline void ep_rb_initnode(struct rb_node *n)
255{ 262{
256 rb_set_parent(n, n); 263 rb_set_parent(n, n);
257} 264}
258 265
259/* Removes a node from the rb-tree and marks it for a fast is-linked check */ 266/* Removes a node from the RB tree and marks it for a fast is-linked check */
260static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r) 267static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r)
261{ 268{
262 rb_erase(n, r); 269 rb_erase(n, r);
263 rb_set_parent(n, n); 270 rb_set_parent(n, n);
264} 271}
265 272
266/* Fast check to verify that the item is linked to the main rb-tree */ 273/* Fast check to verify that the item is linked to the main RB tree */
267static inline int ep_rb_linked(struct rb_node *n) 274static inline int ep_rb_linked(struct rb_node *n)
268{ 275{
269 return rb_parent(n) != n; 276 return rb_parent(n) != n;
@@ -381,78 +388,11 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
381} 388}
382 389
383/* 390/*
384 * Unlink the "struct epitem" from all places it might have been hooked up.
385 * This function must be called with write IRQ lock on "ep->lock".
386 */
387static int ep_unlink(struct eventpoll *ep, struct epitem *epi)
388{
389 int error;
390
391 /*
392 * It can happen that this one is called for an item already unlinked.
393 * The check protect us from doing a double unlink ( crash ).
394 */
395 error = -ENOENT;
396 if (!ep_rb_linked(&epi->rbn))
397 goto error_return;
398
399 /*
400 * Clear the event mask for the unlinked item. This will avoid item
401 * notifications to be sent after the unlink operation from inside
402 * the kernel->userspace event transfer loop.
403 */
404 epi->event.events = 0;
405
406 /*
407 * At this point is safe to do the job, unlink the item from our rb-tree.
408 * This operation togheter with the above check closes the door to
409 * double unlinks.
410 */
411 ep_rb_erase(&epi->rbn, &ep->rbr);
412
413 /*
414 * If the item we are going to remove is inside the ready file descriptors
415 * we want to remove it from this list to avoid stale events.
416 */
417 if (ep_is_linked(&epi->rdllink))
418 list_del_init(&epi->rdllink);
419
420 error = 0;
421error_return:
422
423 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
424 current, ep, epi->ffd.file, error));
425
426 return error;
427}
428
429/*
430 * Increment the usage count of the "struct epitem" making it sure
431 * that the user will have a valid pointer to reference.
432 */
433static void ep_use_epitem(struct epitem *epi)
434{
435 atomic_inc(&epi->usecnt);
436}
437
438/*
439 * Decrement ( release ) the usage count by signaling that the user
440 * has finished using the structure. It might lead to freeing the
441 * structure itself if the count goes to zero.
442 */
443static void ep_release_epitem(struct epitem *epi)
444{
445 if (atomic_dec_and_test(&epi->usecnt))
446 kmem_cache_free(epi_cache, epi);
447}
448
449/*
450 * Removes a "struct epitem" from the eventpoll RB tree and deallocates 391 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
451 * all the associated resources. 392 * all the associated resources. Must be called with "mtx" held.
452 */ 393 */
453static int ep_remove(struct eventpoll *ep, struct epitem *epi) 394static int ep_remove(struct eventpoll *ep, struct epitem *epi)
454{ 395{
455 int error;
456 unsigned long flags; 396 unsigned long flags;
457 struct file *file = epi->ffd.file; 397 struct file *file = epi->ffd.file;
458 398
@@ -472,26 +412,21 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
472 list_del_init(&epi->fllink); 412 list_del_init(&epi->fllink);
473 spin_unlock(&file->f_ep_lock); 413 spin_unlock(&file->f_ep_lock);
474 414
475 /* We need to acquire the write IRQ lock before calling ep_unlink() */ 415 if (ep_rb_linked(&epi->rbn))
476 write_lock_irqsave(&ep->lock, flags); 416 ep_rb_erase(&epi->rbn, &ep->rbr);
477
478 /* Really unlink the item from the RB tree */
479 error = ep_unlink(ep, epi);
480
481 write_unlock_irqrestore(&ep->lock, flags);
482 417
483 if (error) 418 spin_lock_irqsave(&ep->lock, flags);
484 goto error_return; 419 if (ep_is_linked(&epi->rdllink))
420 list_del_init(&epi->rdllink);
421 spin_unlock_irqrestore(&ep->lock, flags);
485 422
486 /* At this point it is safe to free the eventpoll item */ 423 /* At this point it is safe to free the eventpoll item */
487 ep_release_epitem(epi); 424 kmem_cache_free(epi_cache, epi);
488 425
489 error = 0; 426 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n",
490error_return: 427 current, ep, file));
491 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p) = %d\n",
492 current, ep, file, error));
493 428
494 return error; 429 return 0;
495} 430}
496 431
497static void ep_free(struct eventpoll *ep) 432static void ep_free(struct eventpoll *ep)
@@ -506,7 +441,7 @@ static void ep_free(struct eventpoll *ep)
506 /* 441 /*
507 * We need to lock this because we could be hit by 442 * We need to lock this because we could be hit by
508 * eventpoll_release_file() while we're freeing the "struct eventpoll". 443 * eventpoll_release_file() while we're freeing the "struct eventpoll".
509 * We do not need to hold "ep->sem" here because the epoll file 444 * We do not need to hold "ep->mtx" here because the epoll file
510 * is on the way to be removed and no one has references to it 445 * is on the way to be removed and no one has references to it
511 * anymore. The only hit might come from eventpoll_release_file() but 446 * anymore. The only hit might come from eventpoll_release_file() but
512 * holding "epmutex" is sufficent here. 447 * holding "epmutex" is sufficent here.
@@ -525,7 +460,7 @@ static void ep_free(struct eventpoll *ep)
525 /* 460 /*
526 * Walks through the whole tree by freeing each "struct epitem". At this 461 * Walks through the whole tree by freeing each "struct epitem". At this
527 * point we are sure no poll callbacks will be lingering around, and also by 462 * point we are sure no poll callbacks will be lingering around, and also by
528 * write-holding "sem" we can be sure that no file cleanup code will hit 463 * holding "epmutex" we can be sure that no file cleanup code will hit
529 * us during this operation. So we can avoid the lock on "ep->lock". 464 * us during this operation. So we can avoid the lock on "ep->lock".
530 */ 465 */
531 while ((rbp = rb_first(&ep->rbr)) != 0) { 466 while ((rbp = rb_first(&ep->rbr)) != 0) {
@@ -534,16 +469,16 @@ static void ep_free(struct eventpoll *ep)
534 } 469 }
535 470
536 mutex_unlock(&epmutex); 471 mutex_unlock(&epmutex);
472 mutex_destroy(&ep->mtx);
473 kfree(ep);
537} 474}
538 475
539static int ep_eventpoll_release(struct inode *inode, struct file *file) 476static int ep_eventpoll_release(struct inode *inode, struct file *file)
540{ 477{
541 struct eventpoll *ep = file->private_data; 478 struct eventpoll *ep = file->private_data;
542 479
543 if (ep) { 480 if (ep)
544 ep_free(ep); 481 ep_free(ep);
545 kfree(ep);
546 }
547 482
548 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep)); 483 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: close() ep=%p\n", current, ep));
549 return 0; 484 return 0;
@@ -559,10 +494,10 @@ static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
559 poll_wait(file, &ep->poll_wait, wait); 494 poll_wait(file, &ep->poll_wait, wait);
560 495
561 /* Check our condition */ 496 /* Check our condition */
562 read_lock_irqsave(&ep->lock, flags); 497 spin_lock_irqsave(&ep->lock, flags);
563 if (!list_empty(&ep->rdllist)) 498 if (!list_empty(&ep->rdllist))
564 pollflags = POLLIN | POLLRDNORM; 499 pollflags = POLLIN | POLLRDNORM;
565 read_unlock_irqrestore(&ep->lock, flags); 500 spin_unlock_irqrestore(&ep->lock, flags);
566 501
567 return pollflags; 502 return pollflags;
568} 503}
@@ -594,9 +529,11 @@ void eventpoll_release_file(struct file *file)
594 * We don't want to get "file->f_ep_lock" because it is not 529 * We don't want to get "file->f_ep_lock" because it is not
595 * necessary. It is not necessary because we're in the "struct file" 530 * necessary. It is not necessary because we're in the "struct file"
596 * cleanup path, and this means that noone is using this file anymore. 531 * cleanup path, and this means that noone is using this file anymore.
597 * The only hit might come from ep_free() but by holding the semaphore 532 * So, for example, epoll_ctl() cannot hit here sicne if we reach this
533 * point, the file counter already went to zero and fget() would fail.
534 * The only hit might come from ep_free() but by holding the mutex
598 * will correctly serialize the operation. We do need to acquire 535 * will correctly serialize the operation. We do need to acquire
599 * "ep->sem" after "epmutex" because ep_remove() requires it when called 536 * "ep->mtx" after "epmutex" because ep_remove() requires it when called
600 * from anywhere but ep_free(). 537 * from anywhere but ep_free().
601 */ 538 */
602 mutex_lock(&epmutex); 539 mutex_lock(&epmutex);
@@ -606,9 +543,9 @@ void eventpoll_release_file(struct file *file)
606 543
607 ep = epi->ep; 544 ep = epi->ep;
608 list_del_init(&epi->fllink); 545 list_del_init(&epi->fllink);
609 down_write(&ep->sem); 546 mutex_lock(&ep->mtx);
610 ep_remove(ep, epi); 547 ep_remove(ep, epi);
611 up_write(&ep->sem); 548 mutex_unlock(&ep->mtx);
612 } 549 }
613 550
614 mutex_unlock(&epmutex); 551 mutex_unlock(&epmutex);
@@ -621,12 +558,13 @@ static int ep_alloc(struct eventpoll **pep)
621 if (!ep) 558 if (!ep)
622 return -ENOMEM; 559 return -ENOMEM;
623 560
624 rwlock_init(&ep->lock); 561 spin_lock_init(&ep->lock);
625 init_rwsem(&ep->sem); 562 mutex_init(&ep->mtx);
626 init_waitqueue_head(&ep->wq); 563 init_waitqueue_head(&ep->wq);
627 init_waitqueue_head(&ep->poll_wait); 564 init_waitqueue_head(&ep->poll_wait);
628 INIT_LIST_HEAD(&ep->rdllist); 565 INIT_LIST_HEAD(&ep->rdllist);
629 ep->rbr = RB_ROOT; 566 ep->rbr = RB_ROOT;
567 ep->ovflist = EP_UNACTIVE_PTR;
630 568
631 *pep = ep; 569 *pep = ep;
632 570
@@ -636,20 +574,18 @@ static int ep_alloc(struct eventpoll **pep)
636} 574}
637 575
638/* 576/*
639 * Search the file inside the eventpoll tree. It add usage count to 577 * Search the file inside the eventpoll tree. The RB tree operations
640 * the returned item, so the caller must call ep_release_epitem() 578 * are protected by the "mtx" mutex, and ep_find() must be called with
641 * after finished using the "struct epitem". 579 * "mtx" held.
642 */ 580 */
643static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) 581static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
644{ 582{
645 int kcmp; 583 int kcmp;
646 unsigned long flags;
647 struct rb_node *rbp; 584 struct rb_node *rbp;
648 struct epitem *epi, *epir = NULL; 585 struct epitem *epi, *epir = NULL;
649 struct epoll_filefd ffd; 586 struct epoll_filefd ffd;
650 587
651 ep_set_ffd(&ffd, file, fd); 588 ep_set_ffd(&ffd, file, fd);
652 read_lock_irqsave(&ep->lock, flags);
653 for (rbp = ep->rbr.rb_node; rbp; ) { 589 for (rbp = ep->rbr.rb_node; rbp; ) {
654 epi = rb_entry(rbp, struct epitem, rbn); 590 epi = rb_entry(rbp, struct epitem, rbn);
655 kcmp = ep_cmp_ffd(&ffd, &epi->ffd); 591 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
@@ -658,12 +594,10 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
658 else if (kcmp < 0) 594 else if (kcmp < 0)
659 rbp = rbp->rb_left; 595 rbp = rbp->rb_left;
660 else { 596 else {
661 ep_use_epitem(epi);
662 epir = epi; 597 epir = epi;
663 break; 598 break;
664 } 599 }
665 } 600 }
666 read_unlock_irqrestore(&ep->lock, flags);
667 601
668 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n", 602 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_find(%p) -> %p\n",
669 current, file, epir)); 603 current, file, epir));
@@ -686,7 +620,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
686 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n", 620 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
687 current, epi->ffd.file, epi, ep)); 621 current, epi->ffd.file, epi, ep));
688 622
689 write_lock_irqsave(&ep->lock, flags); 623 spin_lock_irqsave(&ep->lock, flags);
690 624
691 /* 625 /*
692 * If the event mask does not contain any poll(2) event, we consider the 626 * If the event mask does not contain any poll(2) event, we consider the
@@ -695,7 +629,21 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
695 * until the next EPOLL_CTL_MOD will be issued. 629 * until the next EPOLL_CTL_MOD will be issued.
696 */ 630 */
697 if (!(epi->event.events & ~EP_PRIVATE_BITS)) 631 if (!(epi->event.events & ~EP_PRIVATE_BITS))
698 goto is_disabled; 632 goto out_unlock;
633
634 /*
635 * If we are trasfering events to userspace, we can hold no locks
636 * (because we're accessing user memory, and because of linux f_op->poll()
637 * semantics). All the events that happens during that period of time are
638 * chained in ep->ovflist and requeued later on.
639 */
640 if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
641 if (epi->next == EP_UNACTIVE_PTR) {
642 epi->next = ep->ovflist;
643 ep->ovflist = epi;
644 }
645 goto out_unlock;
646 }
699 647
700 /* If this file is already in the ready list we exit soon */ 648 /* If this file is already in the ready list we exit soon */
701 if (ep_is_linked(&epi->rdllink)) 649 if (ep_is_linked(&epi->rdllink))
@@ -714,8 +662,8 @@ is_linked:
714 if (waitqueue_active(&ep->poll_wait)) 662 if (waitqueue_active(&ep->poll_wait))
715 pwake++; 663 pwake++;
716 664
717is_disabled: 665out_unlock:
718 write_unlock_irqrestore(&ep->lock, flags); 666 spin_unlock_irqrestore(&ep->lock, flags);
719 667
720 /* We have to call this outside the lock */ 668 /* We have to call this outside the lock */
721 if (pwake) 669 if (pwake)
@@ -766,6 +714,9 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
766 rb_insert_color(&epi->rbn, &ep->rbr); 714 rb_insert_color(&epi->rbn, &ep->rbr);
767} 715}
768 716
717/*
718 * Must be called with "mtx" held.
719 */
769static int ep_insert(struct eventpoll *ep, struct epoll_event *event, 720static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
770 struct file *tfile, int fd) 721 struct file *tfile, int fd)
771{ 722{
@@ -786,8 +737,8 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
786 epi->ep = ep; 737 epi->ep = ep;
787 ep_set_ffd(&epi->ffd, tfile, fd); 738 ep_set_ffd(&epi->ffd, tfile, fd);
788 epi->event = *event; 739 epi->event = *event;
789 atomic_set(&epi->usecnt, 1);
790 epi->nwait = 0; 740 epi->nwait = 0;
741 epi->next = EP_UNACTIVE_PTR;
791 742
792 /* Initialize the poll table using the queue callback */ 743 /* Initialize the poll table using the queue callback */
793 epq.epi = epi; 744 epq.epi = epi;
@@ -796,7 +747,9 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
796 /* 747 /*
797 * Attach the item to the poll hooks and get current event bits. 748 * Attach the item to the poll hooks and get current event bits.
798 * We can safely use the file* here because its usage count has 749 * We can safely use the file* here because its usage count has
799 * been increased by the caller of this function. 750 * been increased by the caller of this function. Note that after
751 * this operation completes, the poll callback can start hitting
752 * the new item.
800 */ 753 */
801 revents = tfile->f_op->poll(tfile, &epq.pt); 754 revents = tfile->f_op->poll(tfile, &epq.pt);
802 755
@@ -813,12 +766,15 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
813 list_add_tail(&epi->fllink, &tfile->f_ep_links); 766 list_add_tail(&epi->fllink, &tfile->f_ep_links);
814 spin_unlock(&tfile->f_ep_lock); 767 spin_unlock(&tfile->f_ep_lock);
815 768
816 /* We have to drop the new item inside our item list to keep track of it */ 769 /*
817 write_lock_irqsave(&ep->lock, flags); 770 * Add the current item to the RB tree. All RB tree operations are
818 771 * protected by "mtx", and ep_insert() is called with "mtx" held.
819 /* Add the current item to the rb-tree */ 772 */
820 ep_rbtree_insert(ep, epi); 773 ep_rbtree_insert(ep, epi);
821 774
775 /* We have to drop the new item inside our item list to keep track of it */
776 spin_lock_irqsave(&ep->lock, flags);
777
822 /* If the file is already "ready" we drop it inside the ready list */ 778 /* If the file is already "ready" we drop it inside the ready list */
823 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { 779 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
824 list_add_tail(&epi->rdllink, &ep->rdllist); 780 list_add_tail(&epi->rdllink, &ep->rdllist);
@@ -830,7 +786,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
830 pwake++; 786 pwake++;
831 } 787 }
832 788
833 write_unlock_irqrestore(&ep->lock, flags); 789 spin_unlock_irqrestore(&ep->lock, flags);
834 790
835 /* We have to call this outside the lock */ 791 /* We have to call this outside the lock */
836 if (pwake) 792 if (pwake)
@@ -846,12 +802,14 @@ error_unregister:
846 802
847 /* 803 /*
848 * We need to do this because an event could have been arrived on some 804 * We need to do this because an event could have been arrived on some
849 * allocated wait queue. 805 * allocated wait queue. Note that we don't care about the ep->ovflist
806 * list, since that is used/cleaned only inside a section bound by "mtx".
807 * And ep_insert() is called with "mtx" held.
850 */ 808 */
851 write_lock_irqsave(&ep->lock, flags); 809 spin_lock_irqsave(&ep->lock, flags);
852 if (ep_is_linked(&epi->rdllink)) 810 if (ep_is_linked(&epi->rdllink))
853 list_del_init(&epi->rdllink); 811 list_del_init(&epi->rdllink);
854 write_unlock_irqrestore(&ep->lock, flags); 812 spin_unlock_irqrestore(&ep->lock, flags);
855 813
856 kmem_cache_free(epi_cache, epi); 814 kmem_cache_free(epi_cache, epi);
857error_return: 815error_return:
@@ -860,7 +818,7 @@ error_return:
860 818
861/* 819/*
862 * Modify the interest event mask by dropping an event if the new mask 820 * Modify the interest event mask by dropping an event if the new mask
863 * has a match in the current file status. 821 * has a match in the current file status. Must be called with "mtx" held.
864 */ 822 */
865static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) 823static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
866{ 824{
@@ -882,36 +840,28 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
882 */ 840 */
883 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); 841 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
884 842
885 write_lock_irqsave(&ep->lock, flags); 843 spin_lock_irqsave(&ep->lock, flags);
886 844
887 /* Copy the data member from inside the lock */ 845 /* Copy the data member from inside the lock */
888 epi->event.data = event->data; 846 epi->event.data = event->data;
889 847
890 /* 848 /*
891 * If the item is not linked to the RB tree it means that it's on its 849 * If the item is "hot" and it is not registered inside the ready
892 * way toward the removal. Do nothing in this case. 850 * list, push it inside.
893 */ 851 */
894 if (ep_rb_linked(&epi->rbn)) { 852 if (revents & event->events) {
895 /* 853 if (!ep_is_linked(&epi->rdllink)) {
896 * If the item is "hot" and it is not registered inside the ready 854 list_add_tail(&epi->rdllink, &ep->rdllist);
897 * list, push it inside. If the item is not "hot" and it is currently 855
898 * registered inside the ready list, unlink it. 856 /* Notify waiting tasks that events are available */
899 */ 857 if (waitqueue_active(&ep->wq))
900 if (revents & event->events) { 858 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
901 if (!ep_is_linked(&epi->rdllink)) { 859 TASK_INTERRUPTIBLE);
902 list_add_tail(&epi->rdllink, &ep->rdllist); 860 if (waitqueue_active(&ep->poll_wait))
903 861 pwake++;
904 /* Notify waiting tasks that events are available */
905 if (waitqueue_active(&ep->wq))
906 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
907 TASK_INTERRUPTIBLE);
908 if (waitqueue_active(&ep->poll_wait))
909 pwake++;
910 }
911 } 862 }
912 } 863 }
913 864 spin_unlock_irqrestore(&ep->lock, flags);
914 write_unlock_irqrestore(&ep->lock, flags);
915 865
916 /* We have to call this outside the lock */ 866 /* We have to call this outside the lock */
917 if (pwake) 867 if (pwake)
@@ -920,36 +870,50 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
920 return 0; 870 return 0;
921} 871}
922 872
923/* 873static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events,
924 * This function is called without holding the "ep->lock" since the call to 874 int maxevents)
925 * __copy_to_user() might sleep, and also f_op->poll() might reenable the IRQ
926 * because of the way poll() is traditionally implemented in Linux.
927 */
928static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
929 struct epoll_event __user *events, int maxevents)
930{ 875{
931 int eventcnt, error = -EFAULT, pwake = 0; 876 int eventcnt, error = -EFAULT, pwake = 0;
932 unsigned int revents; 877 unsigned int revents;
933 unsigned long flags; 878 unsigned long flags;
934 struct epitem *epi; 879 struct epitem *epi, *nepi;
935 struct list_head injlist; 880 struct list_head txlist;
881
882 INIT_LIST_HEAD(&txlist);
883
884 /*
885 * We need to lock this because we could be hit by
886 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
887 */
888 mutex_lock(&ep->mtx);
936 889
937 INIT_LIST_HEAD(&injlist); 890 /*
891 * Steal the ready list, and re-init the original one to the
892 * empty list. Also, set ep->ovflist to NULL so that events
893 * happening while looping w/out locks, are not lost. We cannot
894 * have the poll callback to queue directly on ep->rdllist,
895 * because we are doing it in the loop below, in a lockless way.
896 */
897 spin_lock_irqsave(&ep->lock, flags);
898 list_splice(&ep->rdllist, &txlist);
899 INIT_LIST_HEAD(&ep->rdllist);
900 ep->ovflist = NULL;
901 spin_unlock_irqrestore(&ep->lock, flags);
938 902
939 /* 903 /*
940 * We can loop without lock because this is a task private list. 904 * We can loop without lock because this is a task private list.
941 * We just splice'd out the ep->rdllist in ep_collect_ready_items(). 905 * We just splice'd out the ep->rdllist in ep_collect_ready_items().
942 * Items cannot vanish during the loop because we are holding "sem" in 906 * Items cannot vanish during the loop because we are holding "mtx".
943 * read.
944 */ 907 */
945 for (eventcnt = 0; !list_empty(txlist) && eventcnt < maxevents;) { 908 for (eventcnt = 0; !list_empty(&txlist) && eventcnt < maxevents;) {
946 epi = list_first_entry(txlist, struct epitem, rdllink); 909 epi = list_first_entry(&txlist, struct epitem, rdllink);
947 prefetch(epi->rdllink.next); 910
911 list_del_init(&epi->rdllink);
948 912
949 /* 913 /*
950 * Get the ready file event set. We can safely use the file 914 * Get the ready file event set. We can safely use the file
951 * because we are holding the "sem" in read and this will 915 * because we are holding the "mtx" and this will guarantee
952 * guarantee that both the file and the item will not vanish. 916 * that both the file and the item will not vanish.
953 */ 917 */
954 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); 918 revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
955 revents &= epi->event.events; 919 revents &= epi->event.events;
@@ -957,8 +921,8 @@ static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
957 /* 921 /*
958 * Is the event mask intersect the caller-requested one, 922 * Is the event mask intersect the caller-requested one,
959 * deliver the event to userspace. Again, we are holding 923 * deliver the event to userspace. Again, we are holding
960 * "sem" in read, so no operations coming from userspace 924 * "mtx", so no operations coming from userspace can change
961 * can change the item. 925 * the item.
962 */ 926 */
963 if (revents) { 927 if (revents) {
964 if (__put_user(revents, 928 if (__put_user(revents,
@@ -970,59 +934,59 @@ static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
970 epi->event.events &= EP_PRIVATE_BITS; 934 epi->event.events &= EP_PRIVATE_BITS;
971 eventcnt++; 935 eventcnt++;
972 } 936 }
973
974 /* 937 /*
975 * This is tricky. We are holding the "sem" in read, and this 938 * At this point, noone can insert into ep->rdllist besides
976 * means that the operations that can change the "linked" status 939 * us. The epoll_ctl() callers are locked out by us holding
977 * of the epoll item (epi->rbn and epi->rdllink), cannot touch 940 * "mtx" and the poll callback will queue them in ep->ovflist.
978 * them. Also, since we are "linked" from a epi->rdllink POV
979 * (the item is linked to our transmission list we just
980 * spliced), the ep_poll_callback() cannot touch us either,
981 * because of the check present in there. Another parallel
982 * epoll_wait() will not get the same result set, since we
983 * spliced the ready list before. Note that list_del() still
984 * shows the item as linked to the test in ep_poll_callback().
985 */ 941 */
986 list_del(&epi->rdllink);
987 if (!(epi->event.events & EPOLLET) && 942 if (!(epi->event.events & EPOLLET) &&
988 (revents & epi->event.events)) 943 (revents & epi->event.events))
989 list_add_tail(&epi->rdllink, &injlist); 944 list_add_tail(&epi->rdllink, &ep->rdllist);
990 else {
991 /*
992 * Be sure the item is totally detached before re-init
993 * the list_head. After INIT_LIST_HEAD() is committed,
994 * the ep_poll_callback() can requeue the item again,
995 * but we don't care since we are already past it.
996 */
997 smp_mb();
998 INIT_LIST_HEAD(&epi->rdllink);
999 }
1000 } 945 }
1001 error = 0; 946 error = 0;
1002 947
1003 errxit: 948errxit:
1004 949
950 spin_lock_irqsave(&ep->lock, flags);
1005 /* 951 /*
1006 * If the re-injection list or the txlist are not empty, re-splice 952 * During the time we spent in the loop above, some other events
1007 * them to the ready list and do proper wakeups. 953 * might have been queued by the poll callback. We re-insert them
954 * here (in case they are not already queued, or they're one-shot).
1008 */ 955 */
1009 if (!list_empty(&injlist) || !list_empty(txlist)) { 956 for (nepi = ep->ovflist; (epi = nepi) != NULL;
1010 write_lock_irqsave(&ep->lock, flags); 957 nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
958 if (!ep_is_linked(&epi->rdllink) &&
959 (epi->event.events & ~EP_PRIVATE_BITS))
960 list_add_tail(&epi->rdllink, &ep->rdllist);
961 }
962 /*
963 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
964 * releasing the lock, events will be queued in the normal way inside
965 * ep->rdllist.
966 */
967 ep->ovflist = EP_UNACTIVE_PTR;
968
969 /*
970 * In case of error in the event-send loop, or in case the number of
971 * ready events exceeds the userspace limit, we need to splice the
972 * "txlist" back inside ep->rdllist.
973 */
974 list_splice(&txlist, &ep->rdllist);
1011 975
1012 list_splice(txlist, &ep->rdllist); 976 if (!list_empty(&ep->rdllist)) {
1013 list_splice(&injlist, &ep->rdllist);
1014 /* 977 /*
1015 * Wake up ( if active ) both the eventpoll wait list and the ->poll() 978 * Wake up (if active) both the eventpoll wait list and the ->poll()
1016 * wait list. 979 * wait list (delayed after we release the lock).
1017 */ 980 */
1018 if (waitqueue_active(&ep->wq)) 981 if (waitqueue_active(&ep->wq))
1019 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | 982 __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
1020 TASK_INTERRUPTIBLE); 983 TASK_INTERRUPTIBLE);
1021 if (waitqueue_active(&ep->poll_wait)) 984 if (waitqueue_active(&ep->poll_wait))
1022 pwake++; 985 pwake++;
1023
1024 write_unlock_irqrestore(&ep->lock, flags);
1025 } 986 }
987 spin_unlock_irqrestore(&ep->lock, flags);
988
989 mutex_unlock(&ep->mtx);
1026 990
1027 /* We have to call this outside the lock */ 991 /* We have to call this outside the lock */
1028 if (pwake) 992 if (pwake)
@@ -1031,41 +995,6 @@ static int ep_send_events(struct eventpoll *ep, struct list_head *txlist,
1031 return eventcnt == 0 ? error: eventcnt; 995 return eventcnt == 0 ? error: eventcnt;
1032} 996}
1033 997
1034/*
1035 * Perform the transfer of events to user space.
1036 */
1037static int ep_events_transfer(struct eventpoll *ep,
1038 struct epoll_event __user *events, int maxevents)
1039{
1040 int eventcnt;
1041 unsigned long flags;
1042 struct list_head txlist;
1043
1044 INIT_LIST_HEAD(&txlist);
1045
1046 /*
1047 * We need to lock this because we could be hit by
1048 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
1049 */
1050 down_read(&ep->sem);
1051
1052 /*
1053 * Steal the ready list, and re-init the original one to the
1054 * empty list.
1055 */
1056 write_lock_irqsave(&ep->lock, flags);
1057 list_splice(&ep->rdllist, &txlist);
1058 INIT_LIST_HEAD(&ep->rdllist);
1059 write_unlock_irqrestore(&ep->lock, flags);
1060
1061 /* Build result set in userspace */
1062 eventcnt = ep_send_events(ep, &txlist, events, maxevents);
1063
1064 up_read(&ep->sem);
1065
1066 return eventcnt;
1067}
1068
1069static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, 998static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1070 int maxevents, long timeout) 999 int maxevents, long timeout)
1071{ 1000{
@@ -1083,7 +1012,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1083 MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000; 1012 MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000;
1084 1013
1085retry: 1014retry:
1086 write_lock_irqsave(&ep->lock, flags); 1015 spin_lock_irqsave(&ep->lock, flags);
1087 1016
1088 res = 0; 1017 res = 0;
1089 if (list_empty(&ep->rdllist)) { 1018 if (list_empty(&ep->rdllist)) {
@@ -1093,6 +1022,7 @@ retry:
1093 * ep_poll_callback() when events will become available. 1022 * ep_poll_callback() when events will become available.
1094 */ 1023 */
1095 init_waitqueue_entry(&wait, current); 1024 init_waitqueue_entry(&wait, current);
1025 wait.flags |= WQ_FLAG_EXCLUSIVE;
1096 __add_wait_queue(&ep->wq, &wait); 1026 __add_wait_queue(&ep->wq, &wait);
1097 1027
1098 for (;;) { 1028 for (;;) {
@@ -1109,9 +1039,9 @@ retry:
1109 break; 1039 break;
1110 } 1040 }
1111 1041
1112 write_unlock_irqrestore(&ep->lock, flags); 1042 spin_unlock_irqrestore(&ep->lock, flags);
1113 jtimeout = schedule_timeout(jtimeout); 1043 jtimeout = schedule_timeout(jtimeout);
1114 write_lock_irqsave(&ep->lock, flags); 1044 spin_lock_irqsave(&ep->lock, flags);
1115 } 1045 }
1116 __remove_wait_queue(&ep->wq, &wait); 1046 __remove_wait_queue(&ep->wq, &wait);
1117 1047
@@ -1121,7 +1051,7 @@ retry:
1121 /* Is it worth to try to dig for events ? */ 1051 /* Is it worth to try to dig for events ? */
1122 eavail = !list_empty(&ep->rdllist); 1052 eavail = !list_empty(&ep->rdllist);
1123 1053
1124 write_unlock_irqrestore(&ep->lock, flags); 1054 spin_unlock_irqrestore(&ep->lock, flags);
1125 1055
1126 /* 1056 /*
1127 * Try to transfer events to user space. In case we get 0 events and 1057 * Try to transfer events to user space. In case we get 0 events and
@@ -1129,18 +1059,17 @@ retry:
1129 * more luck. 1059 * more luck.
1130 */ 1060 */
1131 if (!res && eavail && 1061 if (!res && eavail &&
1132 !(res = ep_events_transfer(ep, events, maxevents)) && jtimeout) 1062 !(res = ep_send_events(ep, events, maxevents)) && jtimeout)
1133 goto retry; 1063 goto retry;
1134 1064
1135 return res; 1065 return res;
1136} 1066}
1137 1067
1138/* 1068/*
1139 * It opens an eventpoll file descriptor by suggesting a storage of "size" 1069 * It opens an eventpoll file descriptor. The "size" parameter is there
1140 * file descriptors. The size parameter is just an hint about how to size 1070 * for historical reasons, when epoll was using an hash instead of an
1141 * data structures. It won't prevent the user to store more than "size" 1071 * RB tree. With the current implementation, the "size" parameter is ignored
1142 * file descriptors inside the epoll interface. It is the kernel part of 1072 * (besides sanity checks).
1143 * the userspace epoll_create(2).
1144 */ 1073 */
1145asmlinkage long sys_epoll_create(int size) 1074asmlinkage long sys_epoll_create(int size)
1146{ 1075{
@@ -1176,7 +1105,6 @@ asmlinkage long sys_epoll_create(int size)
1176 1105
1177error_free: 1106error_free:
1178 ep_free(ep); 1107 ep_free(ep);
1179 kfree(ep);
1180error_return: 1108error_return:
1181 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n", 1109 DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
1182 current, size, error)); 1110 current, size, error));
@@ -1186,8 +1114,7 @@ error_return:
1186/* 1114/*
1187 * The following function implements the controller interface for 1115 * The following function implements the controller interface for
1188 * the eventpoll file that enables the insertion/removal/change of 1116 * the eventpoll file that enables the insertion/removal/change of
1189 * file descriptors inside the interest set. It represents 1117 * file descriptors inside the interest set.
1190 * the kernel part of the user space epoll_ctl(2).
1191 */ 1118 */
1192asmlinkage long sys_epoll_ctl(int epfd, int op, int fd, 1119asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
1193 struct epoll_event __user *event) 1120 struct epoll_event __user *event)
@@ -1237,9 +1164,13 @@ asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
1237 */ 1164 */
1238 ep = file->private_data; 1165 ep = file->private_data;
1239 1166
1240 down_write(&ep->sem); 1167 mutex_lock(&ep->mtx);
1241 1168
1242 /* Try to lookup the file inside our RB tree */ 1169 /*
1170 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
1171 * above, we can be sure to be able to use the item looked up by
1172 * ep_find() till we release the mutex.
1173 */
1243 epi = ep_find(ep, tfile, fd); 1174 epi = ep_find(ep, tfile, fd);
1244 1175
1245 error = -EINVAL; 1176 error = -EINVAL;
@@ -1266,13 +1197,7 @@ asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
1266 error = -ENOENT; 1197 error = -ENOENT;
1267 break; 1198 break;
1268 } 1199 }
1269 /* 1200 mutex_unlock(&ep->mtx);
1270 * The function ep_find() increments the usage count of the structure
1271 * so, if this is not NULL, we need to release it.
1272 */
1273 if (epi)
1274 ep_release_epitem(epi);
1275 up_write(&ep->sem);
1276 1201
1277error_tgt_fput: 1202error_tgt_fput:
1278 fput(tfile); 1203 fput(tfile);
@@ -1378,7 +1303,7 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
1378 if (sigmask) { 1303 if (sigmask) {
1379 if (error == -EINTR) { 1304 if (error == -EINTR) {
1380 memcpy(&current->saved_sigmask, &sigsaved, 1305 memcpy(&current->saved_sigmask, &sigsaved,
1381 sizeof(sigsaved)); 1306 sizeof(sigsaved));
1382 set_thread_flag(TIF_RESTORE_SIGMASK); 1307 set_thread_flag(TIF_RESTORE_SIGMASK);
1383 } else 1308 } else
1384 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1309 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
diff --git a/include/asm-alpha/mmu_context.h b/include/asm-alpha/mmu_context.h
index 0bd7bd2ccb90..6a5be1f7debf 100644
--- a/include/asm-alpha/mmu_context.h
+++ b/include/asm-alpha/mmu_context.h
@@ -85,8 +85,8 @@ __reload_thread(struct pcb_struct *pcb)
85 * +-------------+----------------+--------------+ 85 * +-------------+----------------+--------------+
86 */ 86 */
87 87
88#ifdef CONFIG_SMP
89#include <asm/smp.h> 88#include <asm/smp.h>
89#ifdef CONFIG_SMP
90#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) 90#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn)
91#else 91#else
92extern unsigned long last_asn; 92extern unsigned long last_asn;
diff --git a/include/asm-h8300/atomic.h b/include/asm-h8300/atomic.h
index 21f54428c86b..b4cf0ea97ede 100644
--- a/include/asm-h8300/atomic.h
+++ b/include/asm-h8300/atomic.h
@@ -37,6 +37,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
37} 37}
38 38
39#define atomic_sub(i, v) atomic_sub_return(i, v) 39#define atomic_sub(i, v) atomic_sub_return(i, v)
40#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
40 41
41static __inline__ int atomic_inc_return(atomic_t *v) 42static __inline__ int atomic_inc_return(atomic_t *v)
42{ 43{
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h
index 05c3117788b9..a185b5f73e7f 100644
--- a/include/asm-i386/kdebug.h
+++ b/include/asm-i386/kdebug.h
@@ -27,7 +27,6 @@ enum die_val {
27 DIE_GPF, 27 DIE_GPF,
28 DIE_CALL, 28 DIE_CALL,
29 DIE_NMI_IPI, 29 DIE_NMI_IPI,
30 DIE_NMI_POST,
31 DIE_PAGE_FAULT, 30 DIE_PAGE_FAULT,
32}; 31};
33 32
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 70f3515c3db0..338668bfb0a2 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -749,9 +749,13 @@ extern unsigned long boot_option_idle_override;
749extern void enable_sep_cpu(void); 749extern void enable_sep_cpu(void);
750extern int sysenter_setup(void); 750extern int sysenter_setup(void);
751 751
752/* Defined in head.S */
753extern struct Xgt_desc_struct early_gdt_descr;
754
752extern void cpu_set_gdt(int); 755extern void cpu_set_gdt(int);
753extern void switch_to_new_gdt(void); 756extern void switch_to_new_gdt(void);
754extern void cpu_init(void); 757extern void cpu_init(void);
758extern void init_gdt(int cpu);
755 759
756extern int force_mwait; 760extern int force_mwait;
757 761
diff --git a/include/asm-m68k/uaccess.h b/include/asm-m68k/uaccess.h
index 6a4cf2081512..5c1264cf0c65 100644
--- a/include/asm-m68k/uaccess.h
+++ b/include/asm-m68k/uaccess.h
@@ -361,7 +361,9 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
361 361
362long strncpy_from_user(char *dst, const char __user *src, long count); 362long strncpy_from_user(char *dst, const char __user *src, long count);
363long strnlen_user(const char __user *src, long n); 363long strnlen_user(const char __user *src, long n);
364unsigned long clear_user(void __user *to, unsigned long n); 364unsigned long __clear_user(void __user *to, unsigned long n);
365
366#define clear_user __clear_user
365 367
366#define strlen_user(str) strnlen_user(str, 32767) 368#define strlen_user(str) strnlen_user(str, 32767)
367 369
diff --git a/include/asm-sparc/kdebug.h b/include/asm-sparc/kdebug.h
index 404d80767323..631f15ffef73 100644
--- a/include/asm-sparc/kdebug.h
+++ b/include/asm-sparc/kdebug.h
@@ -58,6 +58,10 @@ static inline void sp_enter_debugger(void)
58 sp_enter_debugger(); \ 58 sp_enter_debugger(); \
59 } while(0) 59 } while(0)
60 60
61enum die_val {
62 DIE_UNUSED,
63};
64
61#endif /* !(__ASSEMBLY__) */ 65#endif /* !(__ASSEMBLY__) */
62 66
63/* Some nice offset defines for assembler code. */ 67/* Some nice offset defines for assembler code. */
@@ -66,8 +70,4 @@ static inline void sp_enter_debugger(void)
66#define KDEBUG_DUNNO2_OFF 0x8 70#define KDEBUG_DUNNO2_OFF 0x8
67#define KDEBUG_TEACH_OFF 0xc 71#define KDEBUG_TEACH_OFF 0xc
68 72
69enum die_val {
70 DIE_UNUSED,
71};
72
73#endif /* !(_SPARC_KDEBUG_H) */ 73#endif /* !(_SPARC_KDEBUG_H) */
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 8b6d9c9c8b93..8b4e23b3bb38 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -11,6 +11,7 @@
11#include <asm/psr.h> 11#include <asm/psr.h>
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13#include <asm/btfixup.h> 13#include <asm/btfixup.h>
14#include <asm/smp.h>
14 15
15#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
16 17
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 2f858a2df94a..9329429fb7f6 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -10,10 +10,13 @@
10/* need struct page definitions */ 10/* need struct page definitions */
11#include <linux/mm.h> 11#include <linux/mm.h>
12 12
13#include <asm/of_device.h>
14
13static inline int 15static inline int
14dma_supported(struct device *dev, u64 mask) 16dma_supported(struct device *dev, u64 mask)
15{ 17{
16 BUG_ON(dev->bus != &pci_bus_type); 18 BUG_ON(dev->bus != &pci_bus_type &&
19 dev->bus != &ebus_bus_type);
17 20
18 return pci_dma_supported(to_pci_dev(dev), mask); 21 return pci_dma_supported(to_pci_dev(dev), mask);
19} 22}
@@ -21,7 +24,8 @@ dma_supported(struct device *dev, u64 mask)
21static inline int 24static inline int
22dma_set_mask(struct device *dev, u64 dma_mask) 25dma_set_mask(struct device *dev, u64 dma_mask)
23{ 26{
24 BUG_ON(dev->bus != &pci_bus_type); 27 BUG_ON(dev->bus != &pci_bus_type &&
28 dev->bus != &ebus_bus_type);
25 29
26 return pci_set_dma_mask(to_pci_dev(dev), dma_mask); 30 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
27} 31}
@@ -30,7 +34,8 @@ static inline void *
30dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 34dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
31 gfp_t flag) 35 gfp_t flag)
32{ 36{
33 BUG_ON(dev->bus != &pci_bus_type); 37 BUG_ON(dev->bus != &pci_bus_type &&
38 dev->bus != &ebus_bus_type);
34 39
35 return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag); 40 return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag);
36} 41}
@@ -39,7 +44,8 @@ static inline void
39dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 44dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
40 dma_addr_t dma_handle) 45 dma_addr_t dma_handle)
41{ 46{
42 BUG_ON(dev->bus != &pci_bus_type); 47 BUG_ON(dev->bus != &pci_bus_type &&
48 dev->bus != &ebus_bus_type);
43 49
44 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); 50 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
45} 51}
@@ -48,7 +54,8 @@ static inline dma_addr_t
48dma_map_single(struct device *dev, void *cpu_addr, size_t size, 54dma_map_single(struct device *dev, void *cpu_addr, size_t size,
49 enum dma_data_direction direction) 55 enum dma_data_direction direction)
50{ 56{
51 BUG_ON(dev->bus != &pci_bus_type); 57 BUG_ON(dev->bus != &pci_bus_type &&
58 dev->bus != &ebus_bus_type);
52 59
53 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); 60 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
54} 61}
@@ -57,7 +64,8 @@ static inline void
57dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 64dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
58 enum dma_data_direction direction) 65 enum dma_data_direction direction)
59{ 66{
60 BUG_ON(dev->bus != &pci_bus_type); 67 BUG_ON(dev->bus != &pci_bus_type &&
68 dev->bus != &ebus_bus_type);
61 69
62 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); 70 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
63} 71}
@@ -67,7 +75,8 @@ dma_map_page(struct device *dev, struct page *page,
67 unsigned long offset, size_t size, 75 unsigned long offset, size_t size,
68 enum dma_data_direction direction) 76 enum dma_data_direction direction)
69{ 77{
70 BUG_ON(dev->bus != &pci_bus_type); 78 BUG_ON(dev->bus != &pci_bus_type &&
79 dev->bus != &ebus_bus_type);
71 80
72 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); 81 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
73} 82}
@@ -76,7 +85,8 @@ static inline void
76dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 85dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
77 enum dma_data_direction direction) 86 enum dma_data_direction direction)
78{ 87{
79 BUG_ON(dev->bus != &pci_bus_type); 88 BUG_ON(dev->bus != &pci_bus_type &&
89 dev->bus != &ebus_bus_type);
80 90
81 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); 91 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
82} 92}
@@ -85,7 +95,8 @@ static inline int
85dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 95dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
86 enum dma_data_direction direction) 96 enum dma_data_direction direction)
87{ 97{
88 BUG_ON(dev->bus != &pci_bus_type); 98 BUG_ON(dev->bus != &pci_bus_type &&
99 dev->bus != &ebus_bus_type);
89 100
90 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); 101 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
91} 102}
@@ -94,7 +105,8 @@ static inline void
94dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 105dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
95 enum dma_data_direction direction) 106 enum dma_data_direction direction)
96{ 107{
97 BUG_ON(dev->bus != &pci_bus_type); 108 BUG_ON(dev->bus != &pci_bus_type &&
109 dev->bus != &ebus_bus_type);
98 110
99 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); 111 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
100} 112}
@@ -103,7 +115,8 @@ static inline void
103dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 115dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
104 enum dma_data_direction direction) 116 enum dma_data_direction direction)
105{ 117{
106 BUG_ON(dev->bus != &pci_bus_type); 118 BUG_ON(dev->bus != &pci_bus_type &&
119 dev->bus != &ebus_bus_type);
107 120
108 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, 121 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
109 size, (int)direction); 122 size, (int)direction);
@@ -113,7 +126,8 @@ static inline void
113dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 126dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
114 enum dma_data_direction direction) 127 enum dma_data_direction direction)
115{ 128{
116 BUG_ON(dev->bus != &pci_bus_type); 129 BUG_ON(dev->bus != &pci_bus_type &&
130 dev->bus != &ebus_bus_type);
117 131
118 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, 132 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
119 size, (int)direction); 133 size, (int)direction);
@@ -123,7 +137,8 @@ static inline void
123dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 137dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
124 enum dma_data_direction direction) 138 enum dma_data_direction direction)
125{ 139{
126 BUG_ON(dev->bus != &pci_bus_type); 140 BUG_ON(dev->bus != &pci_bus_type &&
141 dev->bus != &ebus_bus_type);
127 142
128 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); 143 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
129} 144}
@@ -132,7 +147,8 @@ static inline void
132dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 147dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
133 enum dma_data_direction direction) 148 enum dma_data_direction direction)
134{ 149{
135 BUG_ON(dev->bus != &pci_bus_type); 150 BUG_ON(dev->bus != &pci_bus_type &&
151 dev->bus != &ebus_bus_type);
136 152
137 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); 153 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
138} 154}
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index 74feae945a26..d7e2bcf49e4f 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -22,7 +22,6 @@ enum die_val {
22 DIE_GPF, 22 DIE_GPF,
23 DIE_CALL, 23 DIE_CALL,
24 DIE_NMI_IPI, 24 DIE_NMI_IPI,
25 DIE_NMI_POST,
26 DIE_PAGE_FAULT, 25 DIE_PAGE_FAULT,
27}; 26};
28 27
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 636502c02734..0e69d2cf14aa 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -261,5 +261,11 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
261asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename, 261asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename,
262 struct compat_timespec __user *t, int flags); 262 struct compat_timespec __user *t, int flags);
263 263
264asmlinkage long compat_sys_signalfd(int ufd,
265 const compat_sigset_t __user *sigmask,
266 compat_size_t sigsetsize);
267asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags,
268 const struct compat_itimerspec __user *utmr);
269
264#endif /* CONFIG_COMPAT */ 270#endif /* CONFIG_COMPAT */
265#endif /* _LINUX_COMPAT_H */ 271#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index 8bc32bb2fce2..e007ae4dc41e 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -52,14 +52,9 @@
52#endif 52#endif
53 53
54/* For assembly routines */ 54/* For assembly routines */
55#ifdef CONFIG_HOTPLUG_CPU
56#define __INIT .section ".text","ax"
57#define __INITDATA .section ".data","aw"
58#else
59#define __INIT .section ".init.text","ax" 55#define __INIT .section ".init.text","ax"
60#define __INITDATA .section ".init.data","aw"
61#endif
62#define __FINIT .previous 56#define __FINIT .previous
57#define __INITDATA .section ".init.data","aw"
63 58
64#ifndef __ASSEMBLY__ 59#ifndef __ASSEMBLY__
65/* 60/*
diff --git a/include/linux/io.h b/include/linux/io.h
index 09d351236379..8423dd376514 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -27,8 +27,16 @@ struct device;
27void __iowrite32_copy(void __iomem *to, const void *from, size_t count); 27void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
28void __iowrite64_copy(void __iomem *to, const void *from, size_t count); 28void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
29 29
30#ifdef CONFIG_MMU
30int ioremap_page_range(unsigned long addr, unsigned long end, 31int ioremap_page_range(unsigned long addr, unsigned long end,
31 unsigned long phys_addr, pgprot_t prot); 32 unsigned long phys_addr, pgprot_t prot);
33#else
34static inline int ioremap_page_range(unsigned long addr, unsigned long end,
35 unsigned long phys_addr, pgprot_t prot)
36{
37 return 0;
38}
39#endif
32 40
33/* 41/*
34 * Managed iomap interface 42 * Managed iomap interface
diff --git a/include/linux/major.h b/include/linux/major.h
index 0a74c52924c9..7e7c9093919a 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -152,6 +152,8 @@
152#define USB_ACM_AUX_MAJOR 167 152#define USB_ACM_AUX_MAJOR 167
153#define USB_CHAR_MAJOR 180 153#define USB_CHAR_MAJOR 180
154 154
155#define MMC_BLOCK_MAJOR 179
156
155#define VXVM_MAJOR 199 /* VERITAS volume i/o driver */ 157#define VXVM_MAJOR 199 /* VERITAS volume i/o driver */
156#define VXSPEC_MAJOR 200 /* VERITAS volume config driver */ 158#define VXSPEC_MAJOR 200 /* VERITAS volume config driver */
157#define VXDMP_MAJOR 201 /* VERITAS volume multipath driver */ 159#define VXDMP_MAJOR 201 /* VERITAS volume multipath driver */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index ea27065e80e6..fd6627e2d115 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -60,7 +60,8 @@ struct kmem_cache {
60#define KMALLOC_SHIFT_LOW 3 60#define KMALLOC_SHIFT_LOW 3
61 61
62#ifdef CONFIG_LARGE_ALLOCS 62#ifdef CONFIG_LARGE_ALLOCS
63#define KMALLOC_SHIFT_HIGH 25 63#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \
64 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
64#else 65#else
65#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256 66#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256
66#define KMALLOC_SHIFT_HIGH 20 67#define KMALLOC_SHIFT_HIGH 20
@@ -87,6 +88,9 @@ static inline int kmalloc_index(int size)
87 */ 88 */
88 WARN_ON_ONCE(size == 0); 89 WARN_ON_ONCE(size == 0);
89 90
91 if (size >= (1 << KMALLOC_SHIFT_HIGH))
92 return -1;
93
90 if (size > 64 && size <= 96) 94 if (size > 64 && size <= 96)
91 return 1; 95 return 1;
92 if (size > 128 && size <= 192) 96 if (size > 128 && size <= 192)
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 3db5c3c460d7..51b6a6a6158c 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -74,7 +74,7 @@ static struct clocksource *watchdog;
74static struct timer_list watchdog_timer; 74static struct timer_list watchdog_timer;
75static DEFINE_SPINLOCK(watchdog_lock); 75static DEFINE_SPINLOCK(watchdog_lock);
76static cycle_t watchdog_last; 76static cycle_t watchdog_last;
77static int watchdog_resumed; 77static unsigned long watchdog_resumed;
78 78
79/* 79/*
80 * Interval: 0.5sec Threshold: 0.0625s 80 * Interval: 0.5sec Threshold: 0.0625s
@@ -104,9 +104,7 @@ static void clocksource_watchdog(unsigned long data)
104 104
105 spin_lock(&watchdog_lock); 105 spin_lock(&watchdog_lock);
106 106
107 resumed = watchdog_resumed; 107 resumed = test_and_clear_bit(0, &watchdog_resumed);
108 if (unlikely(resumed))
109 watchdog_resumed = 0;
110 108
111 wdnow = watchdog->read(); 109 wdnow = watchdog->read();
112 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 110 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
@@ -151,9 +149,7 @@ static void clocksource_watchdog(unsigned long data)
151} 149}
152static void clocksource_resume_watchdog(void) 150static void clocksource_resume_watchdog(void)
153{ 151{
154 spin_lock(&watchdog_lock); 152 set_bit(0, &watchdog_resumed);
155 watchdog_resumed = 1;
156 spin_unlock(&watchdog_lock);
157} 153}
158 154
159static void clocksource_check_watchdog(struct clocksource *cs) 155static void clocksource_check_watchdog(struct clocksource *cs)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f9217bf644f6..3d1042f82a68 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -273,6 +273,8 @@ static int timekeeping_resume(struct sys_device *dev)
273 unsigned long flags; 273 unsigned long flags;
274 unsigned long now = read_persistent_clock(); 274 unsigned long now = read_persistent_clock();
275 275
276 clocksource_resume();
277
276 write_seqlock_irqsave(&xtime_lock, flags); 278 write_seqlock_irqsave(&xtime_lock, flags);
277 279
278 if (now && (now > timekeeping_suspend_time)) { 280 if (now && (now > timekeeping_suspend_time)) {
diff --git a/kernel/timer.c b/kernel/timer.c
index a6c580ac084b..5ec5490f8d85 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1499,8 +1499,6 @@ unregister_time_interpolator(struct time_interpolator *ti)
1499 prev = &curr->next; 1499 prev = &curr->next;
1500 } 1500 }
1501 1501
1502 clocksource_resume();
1503
1504 write_seqlock_irqsave(&xtime_lock, flags); 1502 write_seqlock_irqsave(&xtime_lock, flags);
1505 if (ti == time_interpolator) { 1503 if (ti == time_interpolator) {
1506 /* we lost the best time-interpolator: */ 1504 /* we lost the best time-interpolator: */
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 37ab5802ca08..cdbc6c135849 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -109,6 +109,17 @@ static const struct snmp_mib snmp4_ipstats_list[] = {
109 SNMP_MIB_SENTINEL 109 SNMP_MIB_SENTINEL
110}; 110};
111 111
112/* Following RFC4293 items are displayed in /proc/net/netstat */
113static const struct snmp_mib snmp4_ipextstats_list[] = {
114 SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES),
115 SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
116 SNMP_MIB_ITEM("InMcastPkts", IPSTATS_MIB_INMCASTPKTS),
117 SNMP_MIB_ITEM("OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS),
118 SNMP_MIB_ITEM("InBcastPkts", IPSTATS_MIB_INBCASTPKTS),
119 SNMP_MIB_ITEM("OutBcastPkts", IPSTATS_MIB_OUTBCASTPKTS),
120 SNMP_MIB_SENTINEL
121};
122
112static const struct snmp_mib snmp4_icmp_list[] = { 123static const struct snmp_mib snmp4_icmp_list[] = {
113 SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS), 124 SNMP_MIB_ITEM("InMsgs", ICMP_MIB_INMSGS),
114 SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS), 125 SNMP_MIB_ITEM("InErrors", ICMP_MIB_INERRORS),
@@ -338,6 +349,16 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
338 snmp_fold_field((void **)net_statistics, 349 snmp_fold_field((void **)net_statistics,
339 snmp4_net_list[i].entry)); 350 snmp4_net_list[i].entry));
340 351
352 seq_puts(seq, "\nIpExt:");
353 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
354 seq_printf(seq, " %s", snmp4_ipextstats_list[i].name);
355
356 seq_puts(seq, "\nIpExt:");
357 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
358 seq_printf(seq, " %lu",
359 snmp_fold_field((void **)ip_statistics,
360 snmp4_ipextstats_list[i].entry));
361
341 seq_putc(seq, '\n'); 362 seq_putc(seq, '\n');
342 return 0; 363 return 0;
343} 364}
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index be0ee8a34f9b..30a5cb1b203e 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -235,7 +235,7 @@ int ip6_mc_input(struct sk_buff *skb)
235 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS); 235 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INMCASTPKTS);
236 236
237 hdr = ipv6_hdr(skb); 237 hdr = ipv6_hdr(skb);
238 deliver = likely(!(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI))) || 238 deliver = unlikely(skb->dev->flags & (IFF_PROMISC|IFF_ALLMULTI)) ||
239 ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); 239 ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
240 240
241 /* 241 /*
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 269a6e17c6c4..6d7542c26e47 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -75,7 +75,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
75 band = res.classid; 75 band = res.classid;
76 } 76 }
77 band = TC_H_MIN(band) - 1; 77 band = TC_H_MIN(band) - 1;
78 if (band > q->bands) 78 if (band >= q->bands)
79 return q->queues[q->prio2band[0]]; 79 return q->queues[q->prio2band[0]];
80 80
81 return q->queues[band]; 81 return q->queues[band];
diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c
index 37643bb8768a..55ab5792af56 100644
--- a/net/xfrm/xfrm_hash.c
+++ b/net/xfrm/xfrm_hash.c
@@ -22,7 +22,8 @@ struct hlist_head *xfrm_hash_alloc(unsigned int sz)
22 n = __vmalloc(sz, GFP_KERNEL, PAGE_KERNEL); 22 n = __vmalloc(sz, GFP_KERNEL, PAGE_KERNEL);
23 else 23 else
24 n = (struct hlist_head *) 24 n = (struct hlist_head *)
25 __get_free_pages(GFP_KERNEL, get_order(sz)); 25 __get_free_pages(GFP_KERNEL | __GFP_NOWARN,
26 get_order(sz));
26 27
27 if (n) 28 if (n)
28 memset(n, 0, sz); 29 memset(n, 0, sz);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 95271e8426a1..d0882e53b6fc 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -796,6 +796,10 @@ struct xfrm_policy *xfrm_policy_byid(u8 type, int dir, u32 id, int delete,
796 struct hlist_head *chain; 796 struct hlist_head *chain;
797 struct hlist_node *entry; 797 struct hlist_node *entry;
798 798
799 *err = -ENOENT;
800 if (xfrm_policy_id2dir(id) != dir)
801 return NULL;
802
799 *err = 0; 803 *err = 0;
800 write_lock_bh(&xfrm_policy_lock); 804 write_lock_bh(&xfrm_policy_lock);
801 chain = xfrm_policy_byidx + idx_hash(id); 805 chain = xfrm_policy_byidx + idx_hash(id);