aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-05-12 19:20:04 -0400
committerThomas Gleixner <tglx@linutronix.de>2010-05-12 19:20:04 -0400
commit1540c84b5ed657ed71dce06915bba461e6b09574 (patch)
treea449dc166800a1b0c429bb038bfc974e577eaf72
parent1a3a403aa98b0ccabeb12abd7da90d33250ea36b (diff)
parent4640b4e7d9919e9629fe8456df94f71658431ef9 (diff)
Merge branch '2.6.33.4' into rt/2.6.33
Conflicts: Makefile Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--Makefile2
-rw-r--r--arch/arm/mach-pxa/include/mach/colibri.h1
-rw-r--r--arch/mips/include/asm/mach-sibyte/war.h6
-rw-r--r--arch/mips/sibyte/sb1250/setup.c15
-rw-r--r--arch/powerpc/kernel/head_64.S11
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c5
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c42
-rw-r--r--arch/powerpc/platforms/pseries/offline_states.h1
-rw-r--r--arch/sparc/include/asm/irqflags_64.h23
-rw-r--r--arch/sparc/include/asm/thread_info_64.h2
-rw-r--r--arch/sparc/kernel/pci_common.c11
-rw-r--r--arch/sparc/kernel/rtrap_64.S12
-rw-r--r--arch/sparc/kernel/traps_64.c26
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c3
-rw-r--r--arch/x86/kernel/cpu/intel.c21
-rw-r--r--arch/x86/kernel/k8.c14
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--block/blk-timeout.c12
-rw-r--r--crypto/async_tx/async_raid6_recov.c21
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/power_meter.c2
-rw-r--r--drivers/acpi/sleep.c144
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/char/agp/Kconfig2
-rw-r--r--drivers/cpuidle/governors/menu.c9
-rw-r--r--drivers/edac/edac_mce_amd.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h26
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c24
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c56
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c2
-rw-r--r--drivers/i2c/i2c-core.c21
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/raid5.c54
-rw-r--r--drivers/media/dvb/ttpci/budget.c3
-rw-r--r--drivers/net/bnx2.c6
-rw-r--r--drivers/net/r8169.c13
-rw-r--r--drivers/net/sfc/efx.c4
-rw-r--r--drivers/net/sfc/falcon.c4
-rw-r--r--drivers/net/sfc/falcon_boards.c13
-rw-r--r--drivers/net/sfc/nic.h2
-rw-r--r--drivers/net/sfc/siena.c13
-rw-r--r--drivers/net/tg3.c1
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ether.c1
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/wan/hdlc_ppp.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c4
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c4
-rw-r--r--drivers/scsi/scsi_debug.c3
-rw-r--r--drivers/scsi/scsi_error.c15
-rw-r--r--drivers/scsi/scsi_lib.c10
-rw-r--r--drivers/scsi/sd.c1
-rw-r--r--drivers/serial/8250_pnp.c2
-rw-r--r--drivers/staging/hv/Hv.c2
-rw-r--r--drivers/staging/hv/RndisFilter.c1
-rw-r--r--drivers/staging/hv/netvsc_drv.c3
-rw-r--r--drivers/staging/usbip/usbip_event.c3
-rw-r--r--drivers/usb/core/driver.c32
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/inode.c4
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-mem.c2
-rw-r--r--drivers/usb/host/ehci-sched.c38
-rw-r--r--drivers/usb/host/ehci.h5
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/xhci-mem.c65
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/w1/slaves/w1_therm.c5
-rw-r--r--fs/ext4/extents.c9
-rw-r--r--fs/jfs/resize.c6
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfsd/nfs4xdr.c8
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/dlm/dlmfs.c2
-rw-r--r--fs/ocfs2/inode.c1
-rw-r--r--fs/ocfs2/refcounttree.c3
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/reiserfs/dir.c2
-rw-r--r--fs/reiserfs/xattr.c19
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c107
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h7
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c3
-rw-r--r--fs/xfs/xfs_ag.h1
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/sctp.h1
-rw-r--r--init/initramfs.c3
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/perf_event.c2
-rw-r--r--lib/flex_array.c2
-rw-r--r--mm/memcontrol.c4
-rw-r--r--net/ieee802154/af_ieee802154.c3
-rw-r--r--net/ipv4/tcp.c1
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/mac80211/agg-tx.c1
-rw-r--r--net/sctp/associola.c6
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/sm_make_chunk.c32
-rw-r--r--net/sctp/sm_sideeffect.c26
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/socket.c14
-rw-r--r--net/tipc/bearer.c37
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/net.c25
-rw-r--r--security/inode.c4
-rw-r--r--security/keys/request_key.c9
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/hda/patch_conexant.c7
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c6
-rw-r--r--sound/pci/maestro3.c9
137 files changed, 975 insertions, 404 deletions
diff --git a/Makefile b/Makefile
index b4ec65eab23c..81ea80c75f01 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 33 3SUBLEVEL = 33
4EXTRAVERSION = .3-rt19 4EXTRAVERSION = .4-rt19
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h
index 811743c56147..5f2ba8d9015c 100644
--- a/arch/arm/mach-pxa/include/mach/colibri.h
+++ b/arch/arm/mach-pxa/include/mach/colibri.h
@@ -2,6 +2,7 @@
2#define _COLIBRI_H_ 2#define _COLIBRI_H_
3 3
4#include <net/ax88796.h> 4#include <net/ax88796.h>
5#include <mach/mfp.h>
5 6
6/* 7/*
7 * common settings for all modules 8 * common settings for all modules
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
index 7950ef4f032c..743385d7b5f2 100644
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -16,7 +16,11 @@
16#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \ 16#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \
17 defined(CONFIG_SB1_PASS_2_WORKAROUNDS) 17 defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
18 18
19#define BCM1250_M3_WAR 1 19#ifndef __ASSEMBLY__
20extern int sb1250_m3_workaround_needed(void);
21#endif
22
23#define BCM1250_M3_WAR sb1250_m3_workaround_needed()
20#define SIBYTE_1956_WAR 1 24#define SIBYTE_1956_WAR 1
21 25
22#else 26#else
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 0444da1e23c2..92da3155ce07 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -87,6 +87,21 @@ static int __init setup_bcm1250(void)
87 return ret; 87 return ret;
88} 88}
89 89
90int sb1250_m3_workaround_needed(void)
91{
92 switch (soc_type) {
93 case K_SYS_SOC_TYPE_BCM1250:
94 case K_SYS_SOC_TYPE_BCM1250_ALT:
95 case K_SYS_SOC_TYPE_BCM1250_ALT2:
96 case K_SYS_SOC_TYPE_BCM1125:
97 case K_SYS_SOC_TYPE_BCM1125H:
98 return soc_pass < K_SYS_REVISION_BCM1250_C0;
99
100 default:
101 return 0;
102 }
103}
104
90static int __init setup_bcm112x(void) 105static int __init setup_bcm112x(void)
91{ 106{
92 int ret = 0; 107 int ret = 0;
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 925807488022..567cd57ad7b6 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -615,6 +615,17 @@ _GLOBAL(start_secondary_prolog)
615 std r3,0(r1) /* Zero the stack frame pointer */ 615 std r3,0(r1) /* Zero the stack frame pointer */
616 bl .start_secondary 616 bl .start_secondary
617 b . 617 b .
618/*
619 * Reset stack pointer and call start_secondary
620 * to continue with online operation when woken up
621 * from cede in cpu offline.
622 */
623_GLOBAL(start_secondary_resume)
624 ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */
625 li r3,0
626 std r3,0(r1) /* Zero the stack frame pointer */
627 bl .start_secondary
628 b .
618#endif 629#endif
619 630
620/* 631/*
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index c5394728bf2e..1ce9dd51a2f6 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -155,15 +155,10 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
155 if (cur_cpu_spec->cpu_features & MMU_FTR_BIG_PHYS) 155 if (cur_cpu_spec->cpu_features & MMU_FTR_BIG_PHYS)
156 TLBCAM[index].MAS7 = (u64)phys >> 32; 156 TLBCAM[index].MAS7 = (u64)phys >> 32;
157 157
158#ifndef CONFIG_KGDB /* want user access for breakpoints */
159 if (flags & _PAGE_USER) { 158 if (flags & _PAGE_USER) {
160 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; 159 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
161 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); 160 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
162 } 161 }
163#else
164 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
165 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
166#endif
167 162
168 tlbcam_addrs[index].start = virt; 163 tlbcam_addrs[index].start = virt;
169 tlbcam_addrs[index].limit = virt + size - 1; 164 tlbcam_addrs[index].limit = virt + size - 1;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 6ea4698d9176..b84237884737 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -122,44 +122,32 @@ static void pseries_mach_cpu_die(void)
122 if (!get_lppaca()->shared_proc) 122 if (!get_lppaca()->shared_proc)
123 get_lppaca()->donate_dedicated_cpu = 1; 123 get_lppaca()->donate_dedicated_cpu = 1;
124 124
125 printk(KERN_INFO
126 "cpu %u (hwid %u) ceding for offline with hint %d\n",
127 cpu, hwcpu, cede_latency_hint);
128 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 125 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
129 extended_cede_processor(cede_latency_hint); 126 extended_cede_processor(cede_latency_hint);
130 printk(KERN_INFO "cpu %u (hwid %u) returned from cede.\n",
131 cpu, hwcpu);
132 printk(KERN_INFO
133 "Decrementer value = %x Timebase value = %llx\n",
134 get_dec(), get_tb());
135 } 127 }
136 128
137 printk(KERN_INFO "cpu %u (hwid %u) got prodded to go online\n",
138 cpu, hwcpu);
139
140 if (!get_lppaca()->shared_proc) 129 if (!get_lppaca()->shared_proc)
141 get_lppaca()->donate_dedicated_cpu = 0; 130 get_lppaca()->donate_dedicated_cpu = 0;
142 get_lppaca()->idle = 0; 131 get_lppaca()->idle = 0;
143 }
144 132
145 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { 133 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
146 unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); 134 unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
147 135
148 /* 136 /*
149 * NOTE: Calling start_secondary() here for now to 137 * Call to start_secondary_resume() will not return.
150 * start new context. 138 * Kernel stack will be reset and start_secondary()
151 * However, need to do it cleanly by resetting the 139 * will be called to continue the online operation.
152 * stack pointer. 140 */
153 */ 141 start_secondary_resume();
154 start_secondary(); 142 }
143 }
155 144
156 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { 145 /* Requested state is CPU_STATE_OFFLINE at this point */
146 WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
157 147
158 set_cpu_current_state(cpu, CPU_STATE_OFFLINE); 148 set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
159 unregister_slb_shadow(hard_smp_processor_id(), 149 unregister_slb_shadow(hwcpu, __pa(get_slb_shadow()));
160 __pa(get_slb_shadow())); 150 rtas_stop_self();
161 rtas_stop_self();
162 }
163 151
164 /* Should never get here... */ 152 /* Should never get here... */
165 BUG(); 153 BUG();
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
index 202d8692c6a2..75a6f480d931 100644
--- a/arch/powerpc/platforms/pseries/offline_states.h
+++ b/arch/powerpc/platforms/pseries/offline_states.h
@@ -35,4 +35,5 @@ static inline void set_default_offline_state(int cpu)
35 35
36extern enum cpu_state_vals get_preferred_offline_state(int cpu); 36extern enum cpu_state_vals get_preferred_offline_state(int cpu);
37extern int start_secondary(void); 37extern int start_secondary(void);
38extern void start_secondary_resume(void);
38#endif 39#endif
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
index 8b49bf920df3..bfa1ea45b4cd 100644
--- a/arch/sparc/include/asm/irqflags_64.h
+++ b/arch/sparc/include/asm/irqflags_64.h
@@ -76,9 +76,26 @@ static inline int raw_irqs_disabled(void)
76 */ 76 */
77static inline unsigned long __raw_local_irq_save(void) 77static inline unsigned long __raw_local_irq_save(void)
78{ 78{
79 unsigned long flags = __raw_local_save_flags(); 79 unsigned long flags, tmp;
80 80
81 raw_local_irq_disable(); 81 /* Disable interrupts to PIL_NORMAL_MAX unless we already
82 * are using PIL_NMI, in which case PIL_NMI is retained.
83 *
84 * The only values we ever program into the %pil are 0,
85 * PIL_NORMAL_MAX and PIL_NMI.
86 *
87 * Since PIL_NMI is the largest %pil value and all bits are
88 * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX
89 * actually is.
90 */
91 __asm__ __volatile__(
92 "rdpr %%pil, %0\n\t"
93 "or %0, %2, %1\n\t"
94 "wrpr %1, 0x0, %%pil"
95 : "=r" (flags), "=r" (tmp)
96 : "i" (PIL_NORMAL_MAX)
97 : "memory"
98 );
82 99
83 return flags; 100 return flags;
84} 101}
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 39be9f256e5a..3df02de1984e 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -121,7 +121,7 @@ struct thread_info {
121#define THREAD_SHIFT PAGE_SHIFT 121#define THREAD_SHIFT PAGE_SHIFT
122#endif /* PAGE_SHIFT == 13 */ 122#endif /* PAGE_SHIFT == 13 */
123 123
124#define PREEMPT_ACTIVE 0x4000000 124#define PREEMPT_ACTIVE 0x10000000
125 125
126/* 126/*
127 * macros/functions for gaining access to the thread information structure 127 * macros/functions for gaining access to the thread information structure
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index b775658a927d..8a000583b5cf 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -371,14 +371,19 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
371 struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); 371 struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
372 372
373 if (!rp) { 373 if (!rp) {
374 prom_printf("Cannot allocate IOMMU resource.\n"); 374 pr_info("%s: Cannot allocate IOMMU resource.\n",
375 prom_halt(); 375 pbm->name);
376 return;
376 } 377 }
377 rp->name = "IOMMU"; 378 rp->name = "IOMMU";
378 rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; 379 rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
379 rp->end = rp->start + (unsigned long) vdma[1] - 1UL; 380 rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
380 rp->flags = IORESOURCE_BUSY; 381 rp->flags = IORESOURCE_BUSY;
381 request_resource(&pbm->mem_space, rp); 382 if (request_resource(&pbm->mem_space, rp)) {
383 pr_info("%s: Unable to request IOMMU resource.\n",
384 pbm->name);
385 kfree(rp);
386 }
382 } 387 }
383} 388}
384 389
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index fd3cee4d117c..c720f0ccea1b 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -172,7 +172,17 @@ rtrap_xcall:
172 nop 172 nop
173 call trace_hardirqs_on 173 call trace_hardirqs_on
174 nop 174 nop
175 wrpr %l4, %pil 175 /* Do not actually set the %pil here. We will do that
176 * below after we clear PSTATE_IE in the %pstate register.
177 * If we re-enable interrupts here, we can recurse down
178 * the hardirq stack potentially endlessly, causing a
179 * stack overflow.
180 *
181 * It is tempting to put this test and trace_hardirqs_on
182 * call at the 'rt_continue' label, but that will not work
183 * as that path hits unconditionally and we do not want to
184 * execute this in NMI return paths, for example.
185 */
176#endif 186#endif
177rtrap_no_irq_enable: 187rtrap_no_irq_enable:
178 andcc %l1, TSTATE_PRIV, %l3 188 andcc %l1, TSTATE_PRIV, %l3
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 10f7bb9fc140..22cd4751ef8a 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2202,27 +2202,6 @@ void dump_stack(void)
2202 2202
2203EXPORT_SYMBOL(dump_stack); 2203EXPORT_SYMBOL(dump_stack);
2204 2204
2205static inline int is_kernel_stack(struct task_struct *task,
2206 struct reg_window *rw)
2207{
2208 unsigned long rw_addr = (unsigned long) rw;
2209 unsigned long thread_base, thread_end;
2210
2211 if (rw_addr < PAGE_OFFSET) {
2212 if (task != &init_task)
2213 return 0;
2214 }
2215
2216 thread_base = (unsigned long) task_stack_page(task);
2217 thread_end = thread_base + sizeof(union thread_union);
2218 if (rw_addr >= thread_base &&
2219 rw_addr < thread_end &&
2220 !(rw_addr & 0x7UL))
2221 return 1;
2222
2223 return 0;
2224}
2225
2226static inline struct reg_window *kernel_stack_up(struct reg_window *rw) 2205static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2227{ 2206{
2228 unsigned long fp = rw->ins[6]; 2207 unsigned long fp = rw->ins[6];
@@ -2251,6 +2230,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
2251 show_regs(regs); 2230 show_regs(regs);
2252 add_taint(TAINT_DIE); 2231 add_taint(TAINT_DIE);
2253 if (regs->tstate & TSTATE_PRIV) { 2232 if (regs->tstate & TSTATE_PRIV) {
2233 struct thread_info *tp = current_thread_info();
2254 struct reg_window *rw = (struct reg_window *) 2234 struct reg_window *rw = (struct reg_window *)
2255 (regs->u_regs[UREG_FP] + STACK_BIAS); 2235 (regs->u_regs[UREG_FP] + STACK_BIAS);
2256 2236
@@ -2258,8 +2238,8 @@ void die_if_kernel(char *str, struct pt_regs *regs)
2258 * find some badly aligned kernel stack. 2238 * find some badly aligned kernel stack.
2259 */ 2239 */
2260 while (rw && 2240 while (rw &&
2261 count++ < 30&& 2241 count++ < 30 &&
2262 is_kernel_stack(current, rw)) { 2242 kstack_valid(tp, (unsigned long) rw)) {
2263 printk("Caller[%016lx]: %pS\n", rw->ins[7], 2243 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2264 (void *) rw->ins[7]); 2244 (void *) rw->ins[7]);
2265 2245
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 378ca82b9ccc..95a8e9a5125d 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -49,7 +49,7 @@ static inline enum direction decode_direction(unsigned int insn)
49} 49}
50 50
51/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ 51/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
52static inline int decode_access_size(unsigned int insn) 52static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
53{ 53{
54 unsigned int tmp; 54 unsigned int tmp;
55 55
@@ -65,7 +65,7 @@ static inline int decode_access_size(unsigned int insn)
65 return 2; 65 return 2;
66 else { 66 else {
67 printk("Impossible unaligned trap. insn=%08x\n", insn); 67 printk("Impossible unaligned trap. insn=%08x\n", insn);
68 die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); 68 die_if_kernel("Byte sized unaligned access?!?!", regs);
69 69
70 /* GCC should never warn that control reaches the end 70 /* GCC should never warn that control reaches the end
71 * of this function without returning a value because 71 * of this function without returning a value because
@@ -289,7 +289,7 @@ static void log_unaligned(struct pt_regs *regs)
289asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) 289asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
290{ 290{
291 enum direction dir = decode_direction(insn); 291 enum direction dir = decode_direction(insn);
292 int size = decode_access_size(insn); 292 int size = decode_access_size(regs, insn);
293 int orig_asi, asi; 293 int orig_asi, asi;
294 294
295 current_thread_info()->kern_una_regs = regs; 295 current_thread_info()->kern_una_regs = regs;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ec9733936c96..b6cc9c9dc919 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -636,7 +636,7 @@ config GART_IOMMU
636 bool "GART IOMMU support" if EMBEDDED 636 bool "GART IOMMU support" if EMBEDDED
637 default y 637 default y
638 select SWIOTLB 638 select SWIOTLB
639 depends on X86_64 && PCI 639 depends on X86_64 && PCI && K8_NB
640 ---help--- 640 ---help---
641 Support for full DMA access of devices with 32bit memory access only 641 Support for full DMA access of devices with 32bit memory access only
642 on systems with more than 3GB. This is usually needed for USB, 642 on systems with more than 3GB. This is usually needed for USB,
@@ -2036,7 +2036,7 @@ endif # X86_32
2036 2036
2037config K8_NB 2037config K8_NB
2038 def_bool y 2038 def_bool y
2039 depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA))) 2039 depends on CPU_SUP_AMD && PCI
2040 2040
2041source "drivers/pcmcia/Kconfig" 2041source "drivers/pcmcia/Kconfig"
2042 2042
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c2ac98f62d05..adcb4bfbaae4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2539,6 +2539,9 @@ void irq_force_complete_move(int irq)
2539 struct irq_desc *desc = irq_to_desc(irq); 2539 struct irq_desc *desc = irq_to_desc(irq);
2540 struct irq_cfg *cfg = desc->chip_data; 2540 struct irq_cfg *cfg = desc->chip_data;
2541 2541
2542 if (!cfg)
2543 return;
2544
2542 __irq_complete_move(&desc, cfg->vector); 2545 __irq_complete_move(&desc, cfg->vector);
2543} 2546}
2544#else 2547#else
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 6e44519960c8..3b5ea3831c26 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -929,7 +929,8 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
929 powernow_table[i].index = index; 929 powernow_table[i].index = index;
930 930
931 /* Frequency may be rounded for these */ 931 /* Frequency may be rounded for these */
932 if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) { 932 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
933 || boot_cpu_data.x86 == 0x11) {
933 powernow_table[i].frequency = 934 powernow_table[i].frequency =
934 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); 935 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
935 } else 936 } else
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 7e1cca13af35..1366c7cfd483 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -47,6 +47,27 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
47 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 47 (c->x86 == 0x6 && c->x86_model >= 0x0e))
48 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 48 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
49 49
50 /*
51 * Atom erratum AAE44/AAF40/AAG38/AAH41:
52 *
53 * A race condition between speculative fetches and invalidating
54 * a large page. This is worked around in microcode, but we
55 * need the microcode to have already been loaded... so if it is
56 * not, recommend a BIOS update and disable large pages.
57 */
58 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) {
59 u32 ucode, junk;
60
61 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
62 sync_core();
63 rdmsr(MSR_IA32_UCODE_REV, junk, ucode);
64
65 if (ucode < 0x20e) {
66 printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
67 clear_cpu_cap(c, X86_FEATURE_PSE);
68 }
69 }
70
50#ifdef CONFIG_X86_64 71#ifdef CONFIG_X86_64
51 set_cpu_cap(c, X86_FEATURE_SYSENTER32); 72 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
52#else 73#else
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c
index cbc4332a77b2..9b895464dd03 100644
--- a/arch/x86/kernel/k8.c
+++ b/arch/x86/kernel/k8.c
@@ -121,3 +121,17 @@ void k8_flush_garts(void)
121} 121}
122EXPORT_SYMBOL_GPL(k8_flush_garts); 122EXPORT_SYMBOL_GPL(k8_flush_garts);
123 123
124static __init int init_k8_nbs(void)
125{
126 int err = 0;
127
128 err = cache_k8_northbridges();
129
130 if (err < 0)
131 printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n");
132
133 return err;
134}
135
136/* This has to go after the PCI subsystem */
137fs_initcall(init_k8_nbs);
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 4f41b29fde98..0ae24d9b44b3 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -738,7 +738,7 @@ int __init gart_iommu_init(void)
738 unsigned long scratch; 738 unsigned long scratch;
739 long i; 739 long i;
740 740
741 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) 741 if (num_k8_northbridges == 0)
742 return 0; 742 return 0;
743 743
744#ifndef CONFIG_AGP_AMD64 744#ifndef CONFIG_AGP_AMD64
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index bb62edf74e72..259f8ce8c0fe 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -284,12 +284,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
284 284
285 set_tsk_thread_flag(p, TIF_FORK); 285 set_tsk_thread_flag(p, TIF_FORK);
286 286
287 p->thread.fs = me->thread.fs;
288 p->thread.gs = me->thread.gs;
289 p->thread.io_bitmap_ptr = NULL; 287 p->thread.io_bitmap_ptr = NULL;
290 288
291 savesegment(gs, p->thread.gsindex); 289 savesegment(gs, p->thread.gsindex);
290 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
292 savesegment(fs, p->thread.fsindex); 291 savesegment(fs, p->thread.fsindex);
292 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
293 savesegment(es, p->thread.es); 293 savesegment(es, p->thread.es);
294 savesegment(ds, p->thread.ds); 294 savesegment(ds, p->thread.ds);
295 295
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 1ba7e0aca878..4f0c06c7a338 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -109,6 +109,7 @@ void blk_rq_timed_out_timer(unsigned long data)
109 struct request_queue *q = (struct request_queue *) data; 109 struct request_queue *q = (struct request_queue *) data;
110 unsigned long flags, next = 0; 110 unsigned long flags, next = 0;
111 struct request *rq, *tmp; 111 struct request *rq, *tmp;
112 int next_set = 0;
112 113
113 spin_lock_irqsave(q->queue_lock, flags); 114 spin_lock_irqsave(q->queue_lock, flags);
114 115
@@ -122,16 +123,13 @@ void blk_rq_timed_out_timer(unsigned long data)
122 if (blk_mark_rq_complete(rq)) 123 if (blk_mark_rq_complete(rq))
123 continue; 124 continue;
124 blk_rq_timed_out(rq); 125 blk_rq_timed_out(rq);
125 } else if (!next || time_after(next, rq->deadline)) 126 } else if (!next_set || time_after(next, rq->deadline)) {
126 next = rq->deadline; 127 next = rq->deadline;
128 next_set = 1;
129 }
127 } 130 }
128 131
129 /* 132 if (next_set)
130 * next can never be 0 here with the list non-empty, since we always
131 * bump ->deadline to 1 so we can detect if the timer was ever added
132 * or not. See comment in blk_add_timer()
133 */
134 if (next)
135 mod_timer(&q->timeout, round_jiffies_up(next)); 133 mod_timer(&q->timeout, round_jiffies_up(next));
136 134
137 spin_unlock_irqrestore(q->queue_lock, flags); 135 spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c
index 943f2abac9b4..ce038d861eb9 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -324,6 +324,7 @@ struct dma_async_tx_descriptor *
324async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, 324async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
325 struct page **blocks, struct async_submit_ctl *submit) 325 struct page **blocks, struct async_submit_ctl *submit)
326{ 326{
327 void *scribble = submit->scribble;
327 int non_zero_srcs, i; 328 int non_zero_srcs, i;
328 329
329 BUG_ON(faila == failb); 330 BUG_ON(faila == failb);
@@ -332,11 +333,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
332 333
333 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); 334 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
334 335
335 /* we need to preserve the contents of 'blocks' for the async 336 /* if a dma resource is not available or a scribble buffer is not
336 * case, so punt to synchronous if a scribble buffer is not available 337 * available punt to the synchronous path. In the 'dma not
338 * available' case be sure to use the scribble buffer to
339 * preserve the content of 'blocks' as the caller intended.
337 */ 340 */
338 if (!submit->scribble) { 341 if (!async_dma_find_channel(DMA_PQ) || !scribble) {
339 void **ptrs = (void **) blocks; 342 void **ptrs = scribble ? scribble : (void **) blocks;
340 343
341 async_tx_quiesce(&submit->depend_tx); 344 async_tx_quiesce(&submit->depend_tx);
342 for (i = 0; i < disks; i++) 345 for (i = 0; i < disks; i++)
@@ -406,11 +409,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
406 409
407 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); 410 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
408 411
409 /* we need to preserve the contents of 'blocks' for the async 412 /* if a dma resource is not available or a scribble buffer is not
410 * case, so punt to synchronous if a scribble buffer is not available 413 * available punt to the synchronous path. In the 'dma not
414 * available' case be sure to use the scribble buffer to
415 * preserve the content of 'blocks' as the caller intended.
411 */ 416 */
412 if (!scribble) { 417 if (!async_dma_find_channel(DMA_PQ) || !scribble) {
413 void **ptrs = (void **) blocks; 418 void **ptrs = scribble ? scribble : (void **) blocks;
414 419
415 async_tx_quiesce(&submit->depend_tx); 420 async_tx_quiesce(&submit->depend_tx);
416 for (i = 0; i < disks; i++) 421 for (i = 0; i < disks; i++)
diff --git a/drivers/Makefile b/drivers/Makefile
index 6ee53c7a57a1..8b0b948eb152 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SFI) += sfi/
17obj-$(CONFIG_PNP) += pnp/ 17obj-$(CONFIG_PNP) += pnp/
18obj-$(CONFIG_ARM_AMBA) += amba/ 18obj-$(CONFIG_ARM_AMBA) += amba/
19 19
20obj-$(CONFIG_VIRTIO) += virtio/
20obj-$(CONFIG_XEN) += xen/ 21obj-$(CONFIG_XEN) += xen/
21 22
22# regulators early, since some subsystems rely on them to initialize 23# regulators early, since some subsystems rely on them to initialize
@@ -106,7 +107,6 @@ obj-$(CONFIG_HID) += hid/
106obj-$(CONFIG_PPC_PS3) += ps3/ 107obj-$(CONFIG_PPC_PS3) += ps3/
107obj-$(CONFIG_OF) += of/ 108obj-$(CONFIG_OF) += of/
108obj-$(CONFIG_SSB) += ssb/ 109obj-$(CONFIG_SSB) += ssb/
109obj-$(CONFIG_VIRTIO) += virtio/
110obj-$(CONFIG_VLYNQ) += vlynq/ 110obj-$(CONFIG_VLYNQ) += vlynq/
111obj-$(CONFIG_STAGING) += staging/ 111obj-$(CONFIG_STAGING) += staging/
112obj-y += platform/ 112obj-y += platform/
diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
index dc4ffadf8122..e02d93c10c6a 100644
--- a/drivers/acpi/power_meter.c
+++ b/drivers/acpi/power_meter.c
@@ -34,7 +34,7 @@
34#define ACPI_POWER_METER_NAME "power_meter" 34#define ACPI_POWER_METER_NAME "power_meter"
35ACPI_MODULE_NAME(ACPI_POWER_METER_NAME); 35ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
36#define ACPI_POWER_METER_DEVICE_NAME "Power Meter" 36#define ACPI_POWER_METER_DEVICE_NAME "Power Meter"
37#define ACPI_POWER_METER_CLASS "power_meter_resource" 37#define ACPI_POWER_METER_CLASS "pwr_meter_resource"
38 38
39#define NUM_SENSORS 17 39#define NUM_SENSORS 17
40 40
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 79d33d908b5a..7c85265d58b6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -450,6 +450,126 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
450 }, 450 },
451 }, 451 },
452 { 452 {
453 .callback = init_set_sci_en_on_resume,
454 .ident = "Lenovo ThinkPad T410",
455 .matches = {
456 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
457 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
458 },
459 },
460 {
461 .callback = init_set_sci_en_on_resume,
462 .ident = "Lenovo ThinkPad T510",
463 .matches = {
464 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
465 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
466 },
467 },
468 {
469 .callback = init_set_sci_en_on_resume,
470 .ident = "Lenovo ThinkPad W510",
471 .matches = {
472 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
473 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
474 },
475 },
476 {
477 .callback = init_set_sci_en_on_resume,
478 .ident = "Lenovo ThinkPad X201",
479 .matches = {
480 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
481 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
482 },
483 },
484 {
485 .callback = init_set_sci_en_on_resume,
486 .ident = "Lenovo ThinkPad X201",
487 .matches = {
488 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
489 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
490 },
491 },
492 {
493 .callback = init_set_sci_en_on_resume,
494 .ident = "Lenovo ThinkPad T410",
495 .matches = {
496 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
497 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
498 },
499 },
500 {
501 .callback = init_set_sci_en_on_resume,
502 .ident = "Lenovo ThinkPad T510",
503 .matches = {
504 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
505 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
506 },
507 },
508 {
509 .callback = init_set_sci_en_on_resume,
510 .ident = "Lenovo ThinkPad W510",
511 .matches = {
512 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
513 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
514 },
515 },
516 {
517 .callback = init_set_sci_en_on_resume,
518 .ident = "Lenovo ThinkPad X201",
519 .matches = {
520 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
521 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
522 },
523 },
524 {
525 .callback = init_set_sci_en_on_resume,
526 .ident = "Lenovo ThinkPad X201",
527 .matches = {
528 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
529 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
530 },
531 },
532 {
533 .callback = init_set_sci_en_on_resume,
534 .ident = "Lenovo ThinkPad T410",
535 .matches = {
536 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
537 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
538 },
539 },
540 {
541 .callback = init_set_sci_en_on_resume,
542 .ident = "Lenovo ThinkPad T510",
543 .matches = {
544 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
545 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
546 },
547 },
548 {
549 .callback = init_set_sci_en_on_resume,
550 .ident = "Lenovo ThinkPad W510",
551 .matches = {
552 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
553 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
554 },
555 },
556 {
557 .callback = init_set_sci_en_on_resume,
558 .ident = "Lenovo ThinkPad X201",
559 .matches = {
560 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
561 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
562 },
563 },
564 {
565 .callback = init_set_sci_en_on_resume,
566 .ident = "Lenovo ThinkPad X201",
567 .matches = {
568 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
569 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201s"),
570 },
571 },
572 {
453 .callback = init_old_suspend_ordering, 573 .callback = init_old_suspend_ordering,
454 .ident = "Panasonic CF51-2L", 574 .ident = "Panasonic CF51-2L",
455 .matches = { 575 .matches = {
@@ -458,6 +578,30 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
458 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 578 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
459 }, 579 },
460 }, 580 },
581 {
582 .callback = init_set_sci_en_on_resume,
583 .ident = "Dell Studio 1558",
584 .matches = {
585 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
586 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
587 },
588 },
589 {
590 .callback = init_set_sci_en_on_resume,
591 .ident = "Dell Studio 1557",
592 .matches = {
593 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
594 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
595 },
596 },
597 {
598 .callback = init_set_sci_en_on_resume,
599 .ident = "Dell Studio 1555",
600 .matches = {
601 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
602 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
603 },
604 },
461 {}, 605 {},
462}; 606};
463#endif /* CONFIG_SUSPEND */ 607#endif /* CONFIG_SUSPEND */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 9f6cfac0f2cc..228740f356c9 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -879,6 +879,8 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
879void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 879void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
880{ 880{
881 struct ata_port *ap = qc->ap; 881 struct ata_port *ap = qc->ap;
882 struct request_queue *q = qc->scsicmd->device->request_queue;
883 unsigned long flags;
882 884
883 WARN_ON(!ap->ops->error_handler); 885 WARN_ON(!ap->ops->error_handler);
884 886
@@ -890,7 +892,9 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
890 * Note that ATA_QCFLAG_FAILED is unconditionally set after 892 * Note that ATA_QCFLAG_FAILED is unconditionally set after
891 * this function completes. 893 * this function completes.
892 */ 894 */
895 spin_lock_irqsave(q->queue_lock, flags);
893 blk_abort_request(qc->scsicmd->request); 896 blk_abort_request(qc->scsicmd->request);
897 spin_unlock_irqrestore(q->queue_lock, flags);
894} 898}
895 899
896/** 900/**
@@ -1624,6 +1628,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1624 } 1628 }
1625 1629
1626 /* okay, this error is ours */ 1630 /* okay, this error is ours */
1631 memset(&tf, 0, sizeof(tf));
1627 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1632 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1628 if (rc) { 1633 if (rc) {
1629 ata_link_printk(link, KERN_ERR, "failed to read log page 10h " 1634 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index bd025059711f..d7d77d4a402c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -311,7 +311,7 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
311static ssize_t 311static ssize_t
312print_block_size(struct class *class, char *buf) 312print_block_size(struct class *class, char *buf)
313{ 313{
314 return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); 314 return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
315} 315}
316 316
317static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); 317static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 2fb3a480f6b0..4b66c69eaf57 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -57,7 +57,7 @@ config AGP_AMD
57 57
58config AGP_AMD64 58config AGP_AMD64
59 tristate "AMD Opteron/Athlon64 on-CPU GART support" 59 tristate "AMD Opteron/Athlon64 on-CPU GART support"
60 depends on AGP && X86 60 depends on AGP && X86 && K8_NB
61 help 61 help
62 This option gives you AGP support for the GLX component of 62 This option gives you AGP support for the GLX component of
63 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. 63 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 73655aeb3a60..f8e57c6303f2 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -101,7 +101,6 @@ struct menu_device {
101 101
102 unsigned int expected_us; 102 unsigned int expected_us;
103 u64 predicted_us; 103 u64 predicted_us;
104 unsigned int measured_us;
105 unsigned int exit_us; 104 unsigned int exit_us;
106 unsigned int bucket; 105 unsigned int bucket;
107 u64 correction_factor[BUCKETS]; 106 u64 correction_factor[BUCKETS];
@@ -187,14 +186,14 @@ static int menu_select(struct cpuidle_device *dev)
187 int i; 186 int i;
188 int multiplier; 187 int multiplier;
189 188
190 data->last_state_idx = 0;
191 data->exit_us = 0;
192
193 if (data->needs_update) { 189 if (data->needs_update) {
194 menu_update(dev); 190 menu_update(dev);
195 data->needs_update = 0; 191 data->needs_update = 0;
196 } 192 }
197 193
194 data->last_state_idx = 0;
195 data->exit_us = 0;
196
198 /* Special case when user has set very strict latency requirement */ 197 /* Special case when user has set very strict latency requirement */
199 if (unlikely(latency_req == 0)) 198 if (unlikely(latency_req == 0))
200 return 0; 199 return 0;
@@ -294,7 +293,7 @@ static void menu_update(struct cpuidle_device *dev)
294 new_factor = data->correction_factor[data->bucket] 293 new_factor = data->correction_factor[data->bucket]
295 * (DECAY - 1) / DECAY; 294 * (DECAY - 1) / DECAY;
296 295
297 if (data->expected_us > 0 && data->measured_us < MAX_INTERESTING) 296 if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
298 new_factor += RESOLUTION * measured_us / data->expected_us; 297 new_factor += RESOLUTION * measured_us / data->expected_us;
299 else 298 else
300 /* 299 /*
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index f5b6d9fe4def..97e64bcdbc06 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -294,7 +294,6 @@ wrong_ls_mce:
294void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) 294void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
295{ 295{
296 u32 ec = ERROR_CODE(regs->nbsl); 296 u32 ec = ERROR_CODE(regs->nbsl);
297 u32 xec = EXT_ERROR_CODE(regs->nbsl);
298 297
299 if (!handle_errors) 298 if (!handle_errors)
300 return; 299 return;
@@ -324,7 +323,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
324 pr_cont("\n"); 323 pr_cont("\n");
325 } 324 }
326 325
327 pr_emerg("%s.\n", EXT_ERR_MSG(xec)); 326 pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl));
328 327
329 if (BUS_ERROR(ec) && nb_bus_decoder) 328 if (BUS_ERROR(ec) && nb_bus_decoder)
330 nb_bus_decoder(node_id, regs); 329 nb_bus_decoder(node_id, regs);
@@ -374,7 +373,7 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
374 ((m->status & MCI_STATUS_PCC) ? "yes" : "no")); 373 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
375 374
376 /* do the two bits[14:13] together */ 375 /* do the two bits[14:13] together */
377 ecc = m->status & (3ULL << 45); 376 ecc = (m->status >> 45) & 0x3;
378 if (ecc) 377 if (ecc)
379 pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U")); 378 pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
380 379
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a894ade03093..1372796497c0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
162 struct drm_device *dev = node->minor->dev; 162 struct drm_device *dev = node->minor->dev;
163 drm_i915_private_t *dev_priv = dev->dev_private; 163 drm_i915_private_t *dev_priv = dev->dev_private;
164 164
165 if (!IS_IRONLAKE(dev)) { 165 if (!HAS_PCH_SPLIT(dev)) {
166 seq_printf(m, "Interrupt enable: %08x\n", 166 seq_printf(m, "Interrupt enable: %08x\n",
167 I915_READ(IER)); 167 I915_READ(IER));
168 seq_printf(m, "Interrupt identity: %08x\n", 168 seq_printf(m, "Interrupt identity: %08x\n",
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2307f98349f7..d642efd25f6e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -978,15 +978,21 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
978 * Some of the preallocated space is taken by the GTT 978 * Some of the preallocated space is taken by the GTT
979 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 979 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
980 */ 980 */
981 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev)) 981 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
982 overhead = 4096; 982 overhead = 4096;
983 else 983 else
984 overhead = (*aperture_size / 1024) + 4096; 984 overhead = (*aperture_size / 1024) + 4096;
985 985
986 switch (tmp & INTEL_GMCH_GMS_MASK) { 986 switch (tmp & INTEL_GMCH_GMS_MASK) {
987 case INTEL_855_GMCH_GMS_DISABLED: 987 case INTEL_855_GMCH_GMS_DISABLED:
988 DRM_ERROR("video memory is disabled\n"); 988 /* XXX: This is what my A1 silicon has. */
989 return -1; 989 if (IS_GEN6(dev)) {
990 stolen = 64 * 1024 * 1024;
991 } else {
992 DRM_ERROR("video memory is disabled\n");
993 return -1;
994 }
995 break;
990 case INTEL_855_GMCH_GMS_STOLEN_1M: 996 case INTEL_855_GMCH_GMS_STOLEN_1M:
991 stolen = 1 * 1024 * 1024; 997 stolen = 1 * 1024 * 1024;
992 break; 998 break;
@@ -1064,7 +1070,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1064 int gtt_offset, gtt_size; 1070 int gtt_offset, gtt_size;
1065 1071
1066 if (IS_I965G(dev)) { 1072 if (IS_I965G(dev)) {
1067 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 1073 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1068 gtt_offset = 2*1024*1024; 1074 gtt_offset = 2*1024*1024;
1069 gtt_size = 2*1024*1024; 1075 gtt_size = 2*1024*1024;
1070 } else { 1076 } else {
@@ -1445,7 +1451,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1445 1451
1446 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1452 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1447 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1453 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1448 if (IS_G4X(dev) || IS_IRONLAKE(dev)) { 1454 if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
1449 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1455 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1450 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1456 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1451 } 1457 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b99b6a841d95..16ce3ba4c08d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1026,7 +1026,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1026#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1026#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1027#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1027#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
1028#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1028#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1029#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) 1029#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1030#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1030#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1031#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1031#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1032#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1032#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1045,8 +1045,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1045#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) 1045#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1046#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1046#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1047 1047
1048#define IS_GEN3(dev) (IS_I915G(dev) || \
1049 IS_I915GM(dev) || \
1050 IS_I945G(dev) || \
1051 IS_I945GM(dev) || \
1052 IS_G33(dev) || \
1053 IS_PINEVIEW(dev))
1054#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
1055 (dev)->pci_device == 0x2982 || \
1056 (dev)->pci_device == 0x2992 || \
1057 (dev)->pci_device == 0x29A2 || \
1058 (dev)->pci_device == 0x2A02 || \
1059 (dev)->pci_device == 0x2A12 || \
1060 (dev)->pci_device == 0x2E02 || \
1061 (dev)->pci_device == 0x2E12 || \
1062 (dev)->pci_device == 0x2E22 || \
1063 (dev)->pci_device == 0x2E32 || \
1064 (dev)->pci_device == 0x2A42 || \
1065 (dev)->pci_device == 0x2E42)
1066
1048#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1067#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1049 1068
1069#define IS_GEN6(dev) ((dev)->pci_device == 0x0102)
1070
1050/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1071/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1051 * rows, which changed the alignment requirements and fence programming. 1072 * rows, which changed the alignment requirements and fence programming.
1052 */ 1073 */
@@ -1067,6 +1088,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1067#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1088#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1068#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) 1089#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1069 1090
1091#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1092 IS_GEN6(dev))
1093
1070#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1094#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1071 1095
1072#endif 1096#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fd099a1e9df0..64584009fca0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1819,7 +1819,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1819 return -EIO; 1819 return -EIO;
1820 1820
1821 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1821 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1822 if (IS_IRONLAKE(dev)) 1822 if (HAS_PCH_SPLIT(dev))
1823 ier = I915_READ(DEIER) | I915_READ(GTIER); 1823 ier = I915_READ(DEIER) | I915_READ(GTIER);
1824 else 1824 else
1825 ier = I915_READ(IER); 1825 ier = I915_READ(IER);
@@ -2316,6 +2316,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2316 pitch_val = obj_priv->stride / tile_width; 2316 pitch_val = obj_priv->stride / tile_width;
2317 pitch_val = ffs(pitch_val) - 1; 2317 pitch_val = ffs(pitch_val) - 1;
2318 2318
2319 if (obj_priv->tiling_mode == I915_TILING_Y &&
2320 HAS_128_BYTE_Y_TILING(dev))
2321 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2322 else
2323 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2324
2319 val = obj_priv->gtt_offset; 2325 val = obj_priv->gtt_offset;
2320 if (obj_priv->tiling_mode == I915_TILING_Y) 2326 if (obj_priv->tiling_mode == I915_TILING_Y)
2321 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2327 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index df278b2685bf..040e80cb121c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -209,7 +209,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
210 bool need_disable; 210 bool need_disable;
211 211
212 if (IS_IRONLAKE(dev)) { 212 if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
213 /* On Ironlake whatever DRAM config, GPU always do 213 /* On Ironlake whatever DRAM config, GPU always do
214 * same swizzling setup. 214 * same swizzling setup.
215 */ 215 */
@@ -357,21 +357,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
357 * reg, so dont bother to check the size */ 357 * reg, so dont bother to check the size */
358 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 358 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
359 return false; 359 return false;
360 } else if (IS_I9XX(dev)) { 360 } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
361 uint32_t pitch_val = ffs(stride / tile_width) - 1; 361 if (stride > 8192)
362
363 /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
364 * instead of 4 (2KB) on 945s.
365 */
366 if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
367 size > (I830_FENCE_MAX_SIZE_VAL << 20))
368 return false; 362 return false;
369 } else {
370 uint32_t pitch_val = ffs(stride / tile_width) - 1;
371 363
372 if (pitch_val > I830_FENCE_MAX_PITCH_VAL || 364 if (IS_GEN3(dev)) {
373 size > (I830_FENCE_MAX_SIZE_VAL << 19)) 365 if (size > I830_FENCE_MAX_SIZE_VAL << 20)
374 return false; 366 return false;
367 } else {
368 if (size > I830_FENCE_MAX_SIZE_VAL << 19)
369 return false;
370 }
375 } 371 }
376 372
377 /* 965+ just needs multiples of tile width */ 373 /* 965+ just needs multiples of tile width */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d6bdfe63e..032f667cdedc 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -576,7 +576,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
576 576
577 atomic_inc(&dev_priv->irq_received); 577 atomic_inc(&dev_priv->irq_received);
578 578
579 if (IS_IRONLAKE(dev)) 579 if (HAS_PCH_SPLIT(dev))
580 return ironlake_irq_handler(dev); 580 return ironlake_irq_handler(dev);
581 581
582 iir = I915_READ(IIR); 582 iir = I915_READ(IIR);
@@ -737,7 +737,7 @@ void i915_user_irq_get(struct drm_device *dev)
737 737
738 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 738 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
739 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 739 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
740 if (IS_IRONLAKE(dev)) 740 if (HAS_PCH_SPLIT(dev))
741 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 741 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
742 else 742 else
743 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 743 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -753,7 +753,7 @@ void i915_user_irq_put(struct drm_device *dev)
753 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 753 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
754 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 754 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
755 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 755 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
756 if (IS_IRONLAKE(dev)) 756 if (HAS_PCH_SPLIT(dev))
757 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 757 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
758 else 758 else
759 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 759 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -861,7 +861,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
861 return -EINVAL; 861 return -EINVAL;
862 862
863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
864 if (IS_IRONLAKE(dev)) 864 if (HAS_PCH_SPLIT(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev)) 867 else if (IS_I965G(dev))
@@ -883,7 +883,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
883 unsigned long irqflags; 883 unsigned long irqflags;
884 884
885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
886 if (IS_IRONLAKE(dev)) 886 if (HAS_PCH_SPLIT(dev))
887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else 889 else
@@ -897,7 +897,7 @@ void i915_enable_interrupt (struct drm_device *dev)
897{ 897{
898 struct drm_i915_private *dev_priv = dev->dev_private; 898 struct drm_i915_private *dev_priv = dev->dev_private;
899 899
900 if (!IS_IRONLAKE(dev)) 900 if (!HAS_PCH_SPLIT(dev))
901 opregion_enable_asle(dev); 901 opregion_enable_asle(dev);
902 dev_priv->irq_enabled = 1; 902 dev_priv->irq_enabled = 1;
903} 903}
@@ -1076,7 +1076,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1076 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1076 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1077 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1077 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1078 1078
1079 if (IS_IRONLAKE(dev)) { 1079 if (HAS_PCH_SPLIT(dev)) {
1080 ironlake_irq_preinstall(dev); 1080 ironlake_irq_preinstall(dev);
1081 return; 1081 return;
1082 } 1082 }
@@ -1108,7 +1108,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1108 1108
1109 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1109 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1110 1110
1111 if (IS_IRONLAKE(dev)) 1111 if (HAS_PCH_SPLIT(dev))
1112 return ironlake_irq_postinstall(dev); 1112 return ironlake_irq_postinstall(dev);
1113 1113
1114 /* Unmask the interrupts that we always want on. */ 1114 /* Unmask the interrupts that we always want on. */
@@ -1196,7 +1196,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1196 1196
1197 dev_priv->vblank_pipe = 0; 1197 dev_priv->vblank_pipe = 0;
1198 1198
1199 if (IS_IRONLAKE(dev)) { 1199 if (HAS_PCH_SPLIT(dev)) {
1200 ironlake_irq_uninstall(dev); 1200 ironlake_irq_uninstall(dev);
1201 return; 1201 return;
1202 } 1202 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ab1bd2d3d3b6..fd95bdf7e9a1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -221,7 +221,7 @@
221#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 221#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
222#define I830_FENCE_PITCH_SHIFT 4 222#define I830_FENCE_PITCH_SHIFT 4
223#define I830_FENCE_REG_VALID (1<<0) 223#define I830_FENCE_REG_VALID (1<<0)
224#define I915_FENCE_MAX_PITCH_VAL 0x10 224#define I915_FENCE_MAX_PITCH_VAL 4
225#define I830_FENCE_MAX_PITCH_VAL 6 225#define I830_FENCE_MAX_PITCH_VAL 6
226#define I830_FENCE_MAX_SIZE_VAL (1<<8) 226#define I830_FENCE_MAX_SIZE_VAL (1<<8)
227 227
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 15fbc1b5a83e..70c9d4ba7042 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -247,6 +247,7 @@ static void
247parse_general_features(struct drm_i915_private *dev_priv, 247parse_general_features(struct drm_i915_private *dev_priv,
248 struct bdb_header *bdb) 248 struct bdb_header *bdb)
249{ 249{
250 struct drm_device *dev = dev_priv->dev;
250 struct bdb_general_features *general; 251 struct bdb_general_features *general;
251 252
252 /* Set sensible defaults in case we can't find the general block */ 253 /* Set sensible defaults in case we can't find the general block */
@@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
263 if (IS_I85X(dev_priv->dev)) 264 if (IS_I85X(dev_priv->dev))
264 dev_priv->lvds_ssc_freq = 265 dev_priv->lvds_ssc_freq =
265 general->ssc_freq ? 66 : 48; 266 general->ssc_freq ? 66 : 48;
266 else if (IS_IRONLAKE(dev_priv->dev)) 267 else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
267 dev_priv->lvds_ssc_freq = 268 dev_priv->lvds_ssc_freq =
268 general->ssc_freq ? 100 : 120; 269 general->ssc_freq ? 100 : 120;
269 else 270 else
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 79dd4026586f..fccf07470c8f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp, reg; 40 u32 temp, reg;
41 41
42 if (IS_IRONLAKE(dev)) 42 if (HAS_PCH_SPLIT(dev))
43 reg = PCH_ADPA; 43 reg = PCH_ADPA;
44 else 44 else
45 reg = ADPA; 45 reg = ADPA;
@@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
113 else 113 else
114 dpll_md_reg = DPLL_B_MD; 114 dpll_md_reg = DPLL_B_MD;
115 115
116 if (IS_IRONLAKE(dev)) 116 if (HAS_PCH_SPLIT(dev))
117 adpa_reg = PCH_ADPA; 117 adpa_reg = PCH_ADPA;
118 else 118 else
119 adpa_reg = ADPA; 119 adpa_reg = ADPA;
@@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
122 * Disable separate mode multiplier used when cloning SDVO to CRT 122 * Disable separate mode multiplier used when cloning SDVO to CRT
123 * XXX this needs to be adjusted when we really are cloning 123 * XXX this needs to be adjusted when we really are cloning
124 */ 124 */
125 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { 125 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
126 dpll_md = I915_READ(dpll_md_reg); 126 dpll_md = I915_READ(dpll_md_reg);
127 I915_WRITE(dpll_md_reg, 127 I915_WRITE(dpll_md_reg,
128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
136 136
137 if (intel_crtc->pipe == 0) { 137 if (intel_crtc->pipe == 0) {
138 adpa |= ADPA_PIPE_A_SELECT; 138 adpa |= ADPA_PIPE_A_SELECT;
139 if (!IS_IRONLAKE(dev)) 139 if (!HAS_PCH_SPLIT(dev))
140 I915_WRITE(BCLRPAT_A, 0); 140 I915_WRITE(BCLRPAT_A, 0);
141 } else { 141 } else {
142 adpa |= ADPA_PIPE_B_SELECT; 142 adpa |= ADPA_PIPE_B_SELECT;
143 if (!IS_IRONLAKE(dev)) 143 if (!HAS_PCH_SPLIT(dev))
144 I915_WRITE(BCLRPAT_B, 0); 144 I915_WRITE(BCLRPAT_B, 0);
145 } 145 }
146 146
@@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
202 u32 hotplug_en; 202 u32 hotplug_en;
203 int i, tries = 0; 203 int i, tries = 0;
204 204
205 if (IS_IRONLAKE(dev)) 205 if (HAS_PCH_SPLIT(dev))
206 return intel_ironlake_crt_detect_hotplug(connector); 206 return intel_ironlake_crt_detect_hotplug(connector);
207 207
208 /* 208 /*
@@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev)
524 &intel_output->enc); 524 &intel_output->enc);
525 525
526 /* Set up the DDC bus. */ 526 /* Set up the DDC bus. */
527 if (IS_IRONLAKE(dev)) 527 if (HAS_PCH_SPLIT(dev))
528 i2c_reg = PCH_GPIOA; 528 i2c_reg = PCH_GPIOA;
529 else { 529 else {
530 i2c_reg = GPIOA; 530 i2c_reg = GPIOA;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b27202d23ebc..4b2458d8bf88 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -232,7 +232,7 @@ struct intel_limit {
232#define G4X_P2_DISPLAY_PORT_FAST 10 232#define G4X_P2_DISPLAY_PORT_FAST 10
233#define G4X_P2_DISPLAY_PORT_LIMIT 0 233#define G4X_P2_DISPLAY_PORT_LIMIT 0
234 234
235/* Ironlake */ 235/* Ironlake / Sandybridge */
236/* as we calculate clock using (register_value + 2) for 236/* as we calculate clock using (register_value + 2) for
237 N/M1/M2, so here the range value for them is (actual_value-2). 237 N/M1/M2, so here the range value for them is (actual_value-2).
238 */ 238 */
@@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
690 struct drm_device *dev = crtc->dev; 690 struct drm_device *dev = crtc->dev;
691 const intel_limit_t *limit; 691 const intel_limit_t *limit;
692 692
693 if (IS_IRONLAKE(dev)) 693 if (HAS_PCH_SPLIT(dev))
694 limit = intel_ironlake_limit(crtc); 694 limit = intel_ironlake_limit(crtc);
695 else if (IS_G4X(dev)) { 695 else if (IS_G4X(dev)) {
696 limit = intel_g4x_limit(crtc); 696 limit = intel_g4x_limit(crtc);
@@ -1366,7 +1366,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1366 dspcntr &= ~DISPPLANE_TILED; 1366 dspcntr &= ~DISPPLANE_TILED;
1367 } 1367 }
1368 1368
1369 if (IS_IRONLAKE(dev)) 1369 if (HAS_PCH_SPLIT(dev))
1370 /* must disable */ 1370 /* must disable */
1371 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1371 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1372 1372
@@ -1427,7 +1427,7 @@ static void i915_disable_vga (struct drm_device *dev)
1427 u8 sr1; 1427 u8 sr1;
1428 u32 vga_reg; 1428 u32 vga_reg;
1429 1429
1430 if (IS_IRONLAKE(dev)) 1430 if (HAS_PCH_SPLIT(dev))
1431 vga_reg = CPU_VGACNTRL; 1431 vga_reg = CPU_VGACNTRL;
1432 else 1432 else
1433 vga_reg = VGACNTRL; 1433 vga_reg = VGACNTRL;
@@ -2111,7 +2111,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2111 struct drm_display_mode *adjusted_mode) 2111 struct drm_display_mode *adjusted_mode)
2112{ 2112{
2113 struct drm_device *dev = crtc->dev; 2113 struct drm_device *dev = crtc->dev;
2114 if (IS_IRONLAKE(dev)) { 2114 if (HAS_PCH_SPLIT(dev)) {
2115 /* FDI link clock is fixed at 2.7G */ 2115 /* FDI link clock is fixed at 2.7G */
2116 if (mode->clock * 3 > 27000 * 4) 2116 if (mode->clock * 3 > 27000 * 4)
2117 return MODE_CLOCK_HIGH; 2117 return MODE_CLOCK_HIGH;
@@ -2967,7 +2967,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2967 refclk / 1000); 2967 refclk / 1000);
2968 } else if (IS_I9XX(dev)) { 2968 } else if (IS_I9XX(dev)) {
2969 refclk = 96000; 2969 refclk = 96000;
2970 if (IS_IRONLAKE(dev)) 2970 if (HAS_PCH_SPLIT(dev))
2971 refclk = 120000; /* 120Mhz refclk */ 2971 refclk = 120000; /* 120Mhz refclk */
2972 } else { 2972 } else {
2973 refclk = 48000; 2973 refclk = 48000;
@@ -3025,7 +3025,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3025 } 3025 }
3026 3026
3027 /* FDI link */ 3027 /* FDI link */
3028 if (IS_IRONLAKE(dev)) { 3028 if (HAS_PCH_SPLIT(dev)) {
3029 int lane, link_bw, bpp; 3029 int lane, link_bw, bpp;
3030 /* eDP doesn't require FDI link, so just set DP M/N 3030 /* eDP doesn't require FDI link, so just set DP M/N
3031 according to current link config */ 3031 according to current link config */
@@ -3102,7 +3102,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3102 * PCH B stepping, previous chipset stepping should be 3102 * PCH B stepping, previous chipset stepping should be
3103 * ignoring this setting. 3103 * ignoring this setting.
3104 */ 3104 */
3105 if (IS_IRONLAKE(dev)) { 3105 if (HAS_PCH_SPLIT(dev)) {
3106 temp = I915_READ(PCH_DREF_CONTROL); 3106 temp = I915_READ(PCH_DREF_CONTROL);
3107 /* Always enable nonspread source */ 3107 /* Always enable nonspread source */
3108 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3108 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -3149,7 +3149,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3149 reduced_clock.m2; 3149 reduced_clock.m2;
3150 } 3150 }
3151 3151
3152 if (!IS_IRONLAKE(dev)) 3152 if (!HAS_PCH_SPLIT(dev))
3153 dpll = DPLL_VGA_MODE_DIS; 3153 dpll = DPLL_VGA_MODE_DIS;
3154 3154
3155 if (IS_I9XX(dev)) { 3155 if (IS_I9XX(dev)) {
@@ -3162,7 +3162,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3162 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3162 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3163 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3163 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3164 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3164 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3165 else if (IS_IRONLAKE(dev)) 3165 else if (HAS_PCH_SPLIT(dev))
3166 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 3166 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
3167 } 3167 }
3168 if (is_dp) 3168 if (is_dp)
@@ -3174,7 +3174,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3174 else { 3174 else {
3175 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3175 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3176 /* also FPA1 */ 3176 /* also FPA1 */
3177 if (IS_IRONLAKE(dev)) 3177 if (HAS_PCH_SPLIT(dev))
3178 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3178 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3179 if (IS_G4X(dev) && has_reduced_clock) 3179 if (IS_G4X(dev) && has_reduced_clock)
3180 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3180 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -3193,7 +3193,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3193 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3193 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3194 break; 3194 break;
3195 } 3195 }
3196 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) 3196 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
3197 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3197 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3198 } else { 3198 } else {
3199 if (is_lvds) { 3199 if (is_lvds) {
@@ -3227,7 +3227,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3227 3227
3228 /* Ironlake's plane is forced to pipe, bit 24 is to 3228 /* Ironlake's plane is forced to pipe, bit 24 is to
3229 enable color space conversion */ 3229 enable color space conversion */
3230 if (!IS_IRONLAKE(dev)) { 3230 if (!HAS_PCH_SPLIT(dev)) {
3231 if (pipe == 0) 3231 if (pipe == 0)
3232 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3232 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
3233 else 3233 else
@@ -3254,14 +3254,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3254 3254
3255 3255
3256 /* Disable the panel fitter if it was on our pipe */ 3256 /* Disable the panel fitter if it was on our pipe */
3257 if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe) 3257 if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
3258 I915_WRITE(PFIT_CONTROL, 0); 3258 I915_WRITE(PFIT_CONTROL, 0);
3259 3259
3260 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3260 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3261 drm_mode_debug_printmodeline(mode); 3261 drm_mode_debug_printmodeline(mode);
3262 3262
3263 /* assign to Ironlake registers */ 3263 /* assign to Ironlake registers */
3264 if (IS_IRONLAKE(dev)) { 3264 if (HAS_PCH_SPLIT(dev)) {
3265 fp_reg = pch_fp_reg; 3265 fp_reg = pch_fp_reg;
3266 dpll_reg = pch_dpll_reg; 3266 dpll_reg = pch_dpll_reg;
3267 } 3267 }
@@ -3282,7 +3282,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3282 if (is_lvds) { 3282 if (is_lvds) {
3283 u32 lvds; 3283 u32 lvds;
3284 3284
3285 if (IS_IRONLAKE(dev)) 3285 if (HAS_PCH_SPLIT(dev))
3286 lvds_reg = PCH_LVDS; 3286 lvds_reg = PCH_LVDS;
3287 3287
3288 lvds = I915_READ(lvds_reg); 3288 lvds = I915_READ(lvds_reg);
@@ -3328,7 +3328,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3328 /* Wait for the clocks to stabilize. */ 3328 /* Wait for the clocks to stabilize. */
3329 udelay(150); 3329 udelay(150);
3330 3330
3331 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { 3331 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
3332 if (is_sdvo) { 3332 if (is_sdvo) {
3333 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3333 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3334 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 3334 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3375,14 +3375,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3375 /* pipesrc and dspsize control the size that is scaled from, which should 3375 /* pipesrc and dspsize control the size that is scaled from, which should
3376 * always be the user's requested size. 3376 * always be the user's requested size.
3377 */ 3377 */
3378 if (!IS_IRONLAKE(dev)) { 3378 if (!HAS_PCH_SPLIT(dev)) {
3379 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | 3379 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
3380 (mode->hdisplay - 1)); 3380 (mode->hdisplay - 1));
3381 I915_WRITE(dsppos_reg, 0); 3381 I915_WRITE(dsppos_reg, 0);
3382 } 3382 }
3383 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3383 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
3384 3384
3385 if (IS_IRONLAKE(dev)) { 3385 if (HAS_PCH_SPLIT(dev)) {
3386 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); 3386 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
3387 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); 3387 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
3388 I915_WRITE(link_m1_reg, m_n.link_m); 3388 I915_WRITE(link_m1_reg, m_n.link_m);
@@ -3403,7 +3403,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3403 3403
3404 intel_wait_for_vblank(dev); 3404 intel_wait_for_vblank(dev);
3405 3405
3406 if (IS_IRONLAKE(dev)) { 3406 if (HAS_PCH_SPLIT(dev)) {
3407 /* enable address swizzle for tiling buffer */ 3407 /* enable address swizzle for tiling buffer */
3408 temp = I915_READ(DISP_ARB_CTL); 3408 temp = I915_READ(DISP_ARB_CTL);
3409 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); 3409 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
@@ -3438,7 +3438,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
3438 return; 3438 return;
3439 3439
3440 /* use legacy palette for Ironlake */ 3440 /* use legacy palette for Ironlake */
3441 if (IS_IRONLAKE(dev)) 3441 if (HAS_PCH_SPLIT(dev))
3442 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : 3442 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
3443 LGC_PALETTE_B; 3443 LGC_PALETTE_B;
3444 3444
@@ -3922,7 +3922,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3922 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3922 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3923 int dpll = I915_READ(dpll_reg); 3923 int dpll = I915_READ(dpll_reg);
3924 3924
3925 if (IS_IRONLAKE(dev)) 3925 if (HAS_PCH_SPLIT(dev))
3926 return; 3926 return;
3927 3927
3928 if (!dev_priv->lvds_downclock_avail) 3928 if (!dev_priv->lvds_downclock_avail)
@@ -3961,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3961 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3961 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3962 int dpll = I915_READ(dpll_reg); 3962 int dpll = I915_READ(dpll_reg);
3963 3963
3964 if (IS_IRONLAKE(dev)) 3964 if (HAS_PCH_SPLIT(dev))
3965 return; 3965 return;
3966 3966
3967 if (!dev_priv->lvds_downclock_avail) 3967 if (!dev_priv->lvds_downclock_avail)
@@ -4382,7 +4382,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4382 if (IS_MOBILE(dev) && !IS_I830(dev)) 4382 if (IS_MOBILE(dev) && !IS_I830(dev))
4383 intel_lvds_init(dev); 4383 intel_lvds_init(dev);
4384 4384
4385 if (IS_IRONLAKE(dev)) { 4385 if (HAS_PCH_SPLIT(dev)) {
4386 int found; 4386 int found;
4387 4387
4388 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 4388 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4451,7 +4451,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4451 DRM_DEBUG_KMS("probing DP_D\n"); 4451 DRM_DEBUG_KMS("probing DP_D\n");
4452 intel_dp_init(dev, DP_D); 4452 intel_dp_init(dev, DP_D);
4453 } 4453 }
4454 } else if (IS_I8XX(dev)) 4454 } else if (IS_GEN2(dev))
4455 intel_dvo_init(dev); 4455 intel_dvo_init(dev);
4456 4456
4457 if (SUPPORTS_TV(dev)) 4457 if (SUPPORTS_TV(dev))
@@ -4599,7 +4599,7 @@ void intel_init_clock_gating(struct drm_device *dev)
4599 * Disable clock gating reported to work incorrectly according to the 4599 * Disable clock gating reported to work incorrectly according to the
4600 * specs, but enable as much else as we can. 4600 * specs, but enable as much else as we can.
4601 */ 4601 */
4602 if (IS_IRONLAKE(dev)) { 4602 if (HAS_PCH_SPLIT(dev)) {
4603 return; 4603 return;
4604 } else if (IS_G4X(dev)) { 4604 } else if (IS_G4X(dev)) {
4605 uint32_t dspclk_gate; 4605 uint32_t dspclk_gate;
@@ -4672,7 +4672,7 @@ static void intel_init_display(struct drm_device *dev)
4672 struct drm_i915_private *dev_priv = dev->dev_private; 4672 struct drm_i915_private *dev_priv = dev->dev_private;
4673 4673
4674 /* We always want a DPMS function */ 4674 /* We always want a DPMS function */
4675 if (IS_IRONLAKE(dev)) 4675 if (HAS_PCH_SPLIT(dev))
4676 dev_priv->display.dpms = ironlake_crtc_dpms; 4676 dev_priv->display.dpms = ironlake_crtc_dpms;
4677 else 4677 else
4678 dev_priv->display.dpms = i9xx_crtc_dpms; 4678 dev_priv->display.dpms = i9xx_crtc_dpms;
@@ -4715,7 +4715,7 @@ static void intel_init_display(struct drm_device *dev)
4715 i830_get_display_clock_speed; 4715 i830_get_display_clock_speed;
4716 4716
4717 /* For FIFO watermark updates */ 4717 /* For FIFO watermark updates */
4718 if (IS_IRONLAKE(dev)) 4718 if (HAS_PCH_SPLIT(dev))
4719 dev_priv->display.update_wm = NULL; 4719 dev_priv->display.update_wm = NULL;
4720 else if (IS_G4X(dev)) 4720 else if (IS_G4X(dev))
4721 dev_priv->display.update_wm = g4x_update_wm; 4721 dev_priv->display.update_wm = g4x_update_wm;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 1238bc981bb4..66df0c36bef9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -661,7 +661,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
661 /* ACPI lid methods were generally unreliable in this generation, so 661 /* ACPI lid methods were generally unreliable in this generation, so
662 * don't even bother. 662 * don't even bother.
663 */ 663 */
664 if (IS_I8XX(dev)) 664 if (IS_GEN2(dev))
665 return connector_status_connected; 665 return connector_status_connected;
666 666
667 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) 667 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 63f569b580b5..6b890420d17d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -172,7 +172,7 @@ struct overlay_registers {
172#define OFC_UPDATE 0x1 172#define OFC_UPDATE 0x1
173 173
174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) 174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev)) 175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
176 176
177 177
178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 10be7b5fbe97..855911ebdb4d 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1210,12 +1210,23 @@ static int i2c_detect_address(struct i2c_client *temp_client,
1210 return 0; 1210 return 0;
1211 1211
1212 /* Make sure there is something at this address */ 1212 /* Make sure there is something at this address */
1213 if (i2c_smbus_xfer(adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL) < 0) 1213 if (addr == 0x73 && (adapter->class & I2C_CLASS_HWMON)) {
1214 return 0; 1214 /* Special probe for FSC hwmon chips */
1215 union i2c_smbus_data dummy;
1215 1216
1216 /* Prevent 24RF08 corruption */ 1217 if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_READ, 0,
1217 if ((addr & ~0x0f) == 0x50) 1218 I2C_SMBUS_BYTE_DATA, &dummy) < 0)
1218 i2c_smbus_xfer(adapter, addr, 0, 0, 0, I2C_SMBUS_QUICK, NULL); 1219 return 0;
1220 } else {
1221 if (i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
1222 I2C_SMBUS_QUICK, NULL) < 0)
1223 return 0;
1224
1225 /* Prevent 24RF08 corruption */
1226 if ((addr & ~0x0f) == 0x50)
1227 i2c_smbus_xfer(adapter, addr, 0, I2C_SMBUS_WRITE, 0,
1228 I2C_SMBUS_QUICK, NULL);
1229 }
1219 1230
1220 /* Finally call the custom detection function */ 1231 /* Finally call the custom detection function */
1221 memset(&info, 0, sizeof(struct i2c_board_info)); 1232 memset(&info, 0, sizeof(struct i2c_board_info));
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a20a71e5efd3..2ecd1d5eb4fa 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2108,12 +2108,18 @@ repeat:
2108 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ 2108 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
2109 /* .. if the array isn't clean, an 'even' event must also go 2109 /* .. if the array isn't clean, an 'even' event must also go
2110 * to spares. */ 2110 * to spares. */
2111 if ((mddev->events&1)==0) 2111 if ((mddev->events&1)==0) {
2112 nospares = 0; 2112 nospares = 0;
2113 sync_req = 2; /* force a second update to get the
2114 * even/odd in sync */
2115 }
2113 } else { 2116 } else {
2114 /* otherwise an 'odd' event must go to spares */ 2117 /* otherwise an 'odd' event must go to spares */
2115 if ((mddev->events&1)) 2118 if ((mddev->events&1)) {
2116 nospares = 0; 2119 nospares = 0;
2120 sync_req = 2; /* force a second update to get the
2121 * even/odd in sync */
2122 }
2117 } 2123 }
2118 } 2124 }
2119 2125
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b61eaa63328d..bec86303d915 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1527,7 +1527,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
1527 1527
1528 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1528 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1529 atomic_inc(&rdev->read_errors); 1529 atomic_inc(&rdev->read_errors);
1530 if (conf->mddev->degraded) 1530 if (conf->mddev->degraded >= conf->max_degraded)
1531 printk_rl(KERN_WARNING 1531 printk_rl(KERN_WARNING
1532 "raid5:%s: read error not correctable " 1532 "raid5:%s: read error not correctable "
1533 "(sector %llu on %s).\n", 1533 "(sector %llu on %s).\n",
@@ -1650,8 +1650,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1650 int previous, int *dd_idx, 1650 int previous, int *dd_idx,
1651 struct stripe_head *sh) 1651 struct stripe_head *sh)
1652{ 1652{
1653 long stripe; 1653 sector_t stripe, stripe2;
1654 unsigned long chunk_number; 1654 sector_t chunk_number;
1655 unsigned int chunk_offset; 1655 unsigned int chunk_offset;
1656 int pd_idx, qd_idx; 1656 int pd_idx, qd_idx;
1657 int ddf_layout = 0; 1657 int ddf_layout = 0;
@@ -1671,18 +1671,13 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1671 */ 1671 */
1672 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1672 chunk_offset = sector_div(r_sector, sectors_per_chunk);
1673 chunk_number = r_sector; 1673 chunk_number = r_sector;
1674 BUG_ON(r_sector != chunk_number);
1675 1674
1676 /* 1675 /*
1677 * Compute the stripe number 1676 * Compute the stripe number
1678 */ 1677 */
1679 stripe = chunk_number / data_disks; 1678 stripe = chunk_number;
1680 1679 *dd_idx = sector_div(stripe, data_disks);
1681 /* 1680 stripe2 = stripe;
1682 * Compute the data disk and parity disk indexes inside the stripe
1683 */
1684 *dd_idx = chunk_number % data_disks;
1685
1686 /* 1681 /*
1687 * Select the parity disk based on the user selected algorithm. 1682 * Select the parity disk based on the user selected algorithm.
1688 */ 1683 */
@@ -1694,21 +1689,21 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1694 case 5: 1689 case 5:
1695 switch (algorithm) { 1690 switch (algorithm) {
1696 case ALGORITHM_LEFT_ASYMMETRIC: 1691 case ALGORITHM_LEFT_ASYMMETRIC:
1697 pd_idx = data_disks - stripe % raid_disks; 1692 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1698 if (*dd_idx >= pd_idx) 1693 if (*dd_idx >= pd_idx)
1699 (*dd_idx)++; 1694 (*dd_idx)++;
1700 break; 1695 break;
1701 case ALGORITHM_RIGHT_ASYMMETRIC: 1696 case ALGORITHM_RIGHT_ASYMMETRIC:
1702 pd_idx = stripe % raid_disks; 1697 pd_idx = sector_div(stripe2, raid_disks);
1703 if (*dd_idx >= pd_idx) 1698 if (*dd_idx >= pd_idx)
1704 (*dd_idx)++; 1699 (*dd_idx)++;
1705 break; 1700 break;
1706 case ALGORITHM_LEFT_SYMMETRIC: 1701 case ALGORITHM_LEFT_SYMMETRIC:
1707 pd_idx = data_disks - stripe % raid_disks; 1702 pd_idx = data_disks - sector_div(stripe2, raid_disks);
1708 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1703 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1709 break; 1704 break;
1710 case ALGORITHM_RIGHT_SYMMETRIC: 1705 case ALGORITHM_RIGHT_SYMMETRIC:
1711 pd_idx = stripe % raid_disks; 1706 pd_idx = sector_div(stripe2, raid_disks);
1712 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1707 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1713 break; 1708 break;
1714 case ALGORITHM_PARITY_0: 1709 case ALGORITHM_PARITY_0:
@@ -1728,7 +1723,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1728 1723
1729 switch (algorithm) { 1724 switch (algorithm) {
1730 case ALGORITHM_LEFT_ASYMMETRIC: 1725 case ALGORITHM_LEFT_ASYMMETRIC:
1731 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1726 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1732 qd_idx = pd_idx + 1; 1727 qd_idx = pd_idx + 1;
1733 if (pd_idx == raid_disks-1) { 1728 if (pd_idx == raid_disks-1) {
1734 (*dd_idx)++; /* Q D D D P */ 1729 (*dd_idx)++; /* Q D D D P */
@@ -1737,7 +1732,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1737 (*dd_idx) += 2; /* D D P Q D */ 1732 (*dd_idx) += 2; /* D D P Q D */
1738 break; 1733 break;
1739 case ALGORITHM_RIGHT_ASYMMETRIC: 1734 case ALGORITHM_RIGHT_ASYMMETRIC:
1740 pd_idx = stripe % raid_disks; 1735 pd_idx = sector_div(stripe2, raid_disks);
1741 qd_idx = pd_idx + 1; 1736 qd_idx = pd_idx + 1;
1742 if (pd_idx == raid_disks-1) { 1737 if (pd_idx == raid_disks-1) {
1743 (*dd_idx)++; /* Q D D D P */ 1738 (*dd_idx)++; /* Q D D D P */
@@ -1746,12 +1741,12 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1746 (*dd_idx) += 2; /* D D P Q D */ 1741 (*dd_idx) += 2; /* D D P Q D */
1747 break; 1742 break;
1748 case ALGORITHM_LEFT_SYMMETRIC: 1743 case ALGORITHM_LEFT_SYMMETRIC:
1749 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1744 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1750 qd_idx = (pd_idx + 1) % raid_disks; 1745 qd_idx = (pd_idx + 1) % raid_disks;
1751 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1746 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1752 break; 1747 break;
1753 case ALGORITHM_RIGHT_SYMMETRIC: 1748 case ALGORITHM_RIGHT_SYMMETRIC:
1754 pd_idx = stripe % raid_disks; 1749 pd_idx = sector_div(stripe2, raid_disks);
1755 qd_idx = (pd_idx + 1) % raid_disks; 1750 qd_idx = (pd_idx + 1) % raid_disks;
1756 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1751 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1757 break; 1752 break;
@@ -1770,7 +1765,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1770 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1765 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1771 * of blocks for computing Q is different. 1766 * of blocks for computing Q is different.
1772 */ 1767 */
1773 pd_idx = stripe % raid_disks; 1768 pd_idx = sector_div(stripe2, raid_disks);
1774 qd_idx = pd_idx + 1; 1769 qd_idx = pd_idx + 1;
1775 if (pd_idx == raid_disks-1) { 1770 if (pd_idx == raid_disks-1) {
1776 (*dd_idx)++; /* Q D D D P */ 1771 (*dd_idx)++; /* Q D D D P */
@@ -1785,7 +1780,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1785 * D D D P Q rather than 1780 * D D D P Q rather than
1786 * Q D D D P 1781 * Q D D D P
1787 */ 1782 */
1788 pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); 1783 stripe2 += 1;
1784 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1789 qd_idx = pd_idx + 1; 1785 qd_idx = pd_idx + 1;
1790 if (pd_idx == raid_disks-1) { 1786 if (pd_idx == raid_disks-1) {
1791 (*dd_idx)++; /* Q D D D P */ 1787 (*dd_idx)++; /* Q D D D P */
@@ -1797,7 +1793,7 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1797 1793
1798 case ALGORITHM_ROTATING_N_CONTINUE: 1794 case ALGORITHM_ROTATING_N_CONTINUE:
1799 /* Same as left_symmetric but Q is before P */ 1795 /* Same as left_symmetric but Q is before P */
1800 pd_idx = raid_disks - 1 - (stripe % raid_disks); 1796 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1801 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1797 qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1802 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1798 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1803 ddf_layout = 1; 1799 ddf_layout = 1;
@@ -1805,27 +1801,27 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1805 1801
1806 case ALGORITHM_LEFT_ASYMMETRIC_6: 1802 case ALGORITHM_LEFT_ASYMMETRIC_6:
1807 /* RAID5 left_asymmetric, with Q on last device */ 1803 /* RAID5 left_asymmetric, with Q on last device */
1808 pd_idx = data_disks - stripe % (raid_disks-1); 1804 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1809 if (*dd_idx >= pd_idx) 1805 if (*dd_idx >= pd_idx)
1810 (*dd_idx)++; 1806 (*dd_idx)++;
1811 qd_idx = raid_disks - 1; 1807 qd_idx = raid_disks - 1;
1812 break; 1808 break;
1813 1809
1814 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1810 case ALGORITHM_RIGHT_ASYMMETRIC_6:
1815 pd_idx = stripe % (raid_disks-1); 1811 pd_idx = sector_div(stripe2, raid_disks-1);
1816 if (*dd_idx >= pd_idx) 1812 if (*dd_idx >= pd_idx)
1817 (*dd_idx)++; 1813 (*dd_idx)++;
1818 qd_idx = raid_disks - 1; 1814 qd_idx = raid_disks - 1;
1819 break; 1815 break;
1820 1816
1821 case ALGORITHM_LEFT_SYMMETRIC_6: 1817 case ALGORITHM_LEFT_SYMMETRIC_6:
1822 pd_idx = data_disks - stripe % (raid_disks-1); 1818 pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1823 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1819 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1824 qd_idx = raid_disks - 1; 1820 qd_idx = raid_disks - 1;
1825 break; 1821 break;
1826 1822
1827 case ALGORITHM_RIGHT_SYMMETRIC_6: 1823 case ALGORITHM_RIGHT_SYMMETRIC_6:
1828 pd_idx = stripe % (raid_disks-1); 1824 pd_idx = sector_div(stripe2, raid_disks-1);
1829 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1825 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1830 qd_idx = raid_disks - 1; 1826 qd_idx = raid_disks - 1;
1831 break; 1827 break;
@@ -1870,14 +1866,14 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1870 : conf->algorithm; 1866 : conf->algorithm;
1871 sector_t stripe; 1867 sector_t stripe;
1872 int chunk_offset; 1868 int chunk_offset;
1873 int chunk_number, dummy1, dd_idx = i; 1869 sector_t chunk_number;
1870 int dummy1, dd_idx = i;
1874 sector_t r_sector; 1871 sector_t r_sector;
1875 struct stripe_head sh2; 1872 struct stripe_head sh2;
1876 1873
1877 1874
1878 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1875 chunk_offset = sector_div(new_sector, sectors_per_chunk);
1879 stripe = new_sector; 1876 stripe = new_sector;
1880 BUG_ON(new_sector != stripe);
1881 1877
1882 if (i == sh->pd_idx) 1878 if (i == sh->pd_idx)
1883 return 0; 1879 return 0;
@@ -1970,7 +1966,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1970 } 1966 }
1971 1967
1972 chunk_number = stripe * data_disks + i; 1968 chunk_number = stripe * data_disks + i;
1973 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1969 r_sector = chunk_number * sectors_per_chunk + chunk_offset;
1974 1970
1975 check = raid5_compute_sector(conf, r_sector, 1971 check = raid5_compute_sector(conf, r_sector,
1976 previous, &dummy1, &sh2); 1972 previous, &dummy1, &sh2);
diff --git a/drivers/media/dvb/ttpci/budget.c b/drivers/media/dvb/ttpci/budget.c
index e48380c48990..95a463c1ef85 100644
--- a/drivers/media/dvb/ttpci/budget.c
+++ b/drivers/media/dvb/ttpci/budget.c
@@ -643,9 +643,6 @@ static void frontend_init(struct budget *budget)
643 &budget->i2c_adap, 643 &budget->i2c_adap,
644 &tt1600_isl6423_config); 644 &tt1600_isl6423_config);
645 645
646 } else {
647 dvb_frontend_detach(budget->dvb_frontend);
648 budget->dvb_frontend = NULL;
649 } 646 }
650 } 647 }
651 break; 648 break;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index d0bf40d8a1ea..9be990b15692 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -4772,8 +4772,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4772 rc = bnx2_alloc_bad_rbuf(bp); 4772 rc = bnx2_alloc_bad_rbuf(bp);
4773 } 4773 }
4774 4774
4775 if (bp->flags & BNX2_FLAG_USING_MSIX) 4775 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4776 bnx2_setup_msix_tbl(bp); 4776 bnx2_setup_msix_tbl(bp);
4777 /* Prevent MSIX table reads and write from timing out */
4778 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4779 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4780 }
4777 4781
4778 return rc; 4782 return rc;
4779} 4783}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 3db85daa3671..787befc65322 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2832,8 +2832,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
2832 spin_lock_irq(&tp->lock); 2832 spin_lock_irq(&tp->lock);
2833 2833
2834 RTL_W8(Cfg9346, Cfg9346_Unlock); 2834 RTL_W8(Cfg9346, Cfg9346_Unlock);
2835 RTL_W32(MAC0, low); 2835
2836 RTL_W32(MAC4, high); 2836 RTL_W32(MAC4, high);
2837 RTL_R32(MAC4);
2838
2839 RTL_W32(MAC0, low);
2840 RTL_R32(MAC0);
2841
2837 RTL_W8(Cfg9346, Cfg9346_Lock); 2842 RTL_W8(Cfg9346, Cfg9346_Lock);
2838 2843
2839 spin_unlock_irq(&tp->lock); 2844 spin_unlock_irq(&tp->lock);
@@ -4316,7 +4321,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4316 4321
4317 tp->cur_tx += frags + 1; 4322 tp->cur_tx += frags + 1;
4318 4323
4319 smp_wmb(); 4324 wmb();
4320 4325
4321 RTL_W8(TxPoll, NPQ); /* set polling bit */ 4326 RTL_W8(TxPoll, NPQ); /* set polling bit */
4322 4327
@@ -4675,7 +4680,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
4675 * until it does. 4680 * until it does.
4676 */ 4681 */
4677 tp->intr_mask = 0xffff; 4682 tp->intr_mask = 0xffff;
4678 smp_wmb(); 4683 wmb();
4679 RTL_W16(IntrMask, tp->intr_event); 4684 RTL_W16(IntrMask, tp->intr_event);
4680 } 4685 }
4681 4686
@@ -4813,8 +4818,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
4813 mc_filter[1] = swab32(data); 4818 mc_filter[1] = swab32(data);
4814 } 4819 }
4815 4820
4816 RTL_W32(MAR0 + 0, mc_filter[0]);
4817 RTL_W32(MAR0 + 4, mc_filter[1]); 4821 RTL_W32(MAR0 + 4, mc_filter[1]);
4822 RTL_W32(MAR0 + 0, mc_filter[0]);
4818 4823
4819 RTL_W32(RxConfig, tmp); 4824 RTL_W32(RxConfig, tmp);
4820 4825
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 46997e177ee3..fb52e4705f32 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1862,6 +1862,7 @@ out:
1862 } 1862 }
1863 1863
1864 if (disabled) { 1864 if (disabled) {
1865 dev_close(efx->net_dev);
1865 EFX_ERR(efx, "has been disabled\n"); 1866 EFX_ERR(efx, "has been disabled\n");
1866 efx->state = STATE_DISABLED; 1867 efx->state = STATE_DISABLED;
1867 } else { 1868 } else {
@@ -1885,8 +1886,7 @@ static void efx_reset_work(struct work_struct *data)
1885 } 1886 }
1886 1887
1887 rtnl_lock(); 1888 rtnl_lock();
1888 if (efx_reset(efx, efx->reset_pending)) 1889 (void)efx_reset(efx, efx->reset_pending);
1889 dev_close(efx->net_dev);
1890 rtnl_unlock(); 1890 rtnl_unlock();
1891} 1891}
1892 1892
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 9d009c46e962..e20a824d4393 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -1317,7 +1317,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1317 1317
1318 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 1318 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
1319 1319
1320 falcon_probe_board(efx, board_rev); 1320 rc = falcon_probe_board(efx, board_rev);
1321 if (rc)
1322 goto fail2;
1321 1323
1322 kfree(nvconfig); 1324 kfree(nvconfig);
1323 return 0; 1325 return 0;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 5712fddd72f2..c7a933a3292e 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
728 }, 728 },
729}; 729};
730 730
731static const struct falcon_board_type falcon_dummy_board = { 731int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
732 .init = efx_port_dummy_op_int,
733 .init_phy = efx_port_dummy_op_void,
734 .fini = efx_port_dummy_op_void,
735 .set_id_led = efx_port_dummy_op_set_id_led,
736 .monitor = efx_port_dummy_op_int,
737};
738
739void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
740{ 732{
741 struct falcon_board *board = falcon_board(efx); 733 struct falcon_board *board = falcon_board(efx);
742 u8 type_id = FALCON_BOARD_TYPE(revision_info); 734 u8 type_id = FALCON_BOARD_TYPE(revision_info);
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
754 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) 746 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
755 ? board->type->ref_model : board->type->gen_type, 747 ? board->type->ref_model : board->type->gen_type,
756 'A' + board->major, board->minor); 748 'A' + board->major, board->minor);
749 return 0;
757 } else { 750 } else {
758 EFX_ERR(efx, "unknown board type %d\n", type_id); 751 EFX_ERR(efx, "unknown board type %d\n", type_id);
759 board->type = &falcon_dummy_board; 752 return -ENODEV;
760 } 753 }
761} 754}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 9351c0331a47..3166bafdfbef 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -156,7 +156,7 @@ extern struct efx_nic_type siena_a0_nic_type;
156 ************************************************************************** 156 **************************************************************************
157 */ 157 */
158 158
159extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); 159extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
160 160
161/* TX data path */ 161/* TX data path */
162extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); 162extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index f8c6771e66d8..afbac2d82519 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -454,8 +454,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
454 454
455static void siena_update_nic_stats(struct efx_nic *efx) 455static void siena_update_nic_stats(struct efx_nic *efx)
456{ 456{
457 while (siena_try_update_nic_stats(efx) == -EAGAIN) 457 int retry;
458 cpu_relax(); 458
459 /* If we're unlucky enough to read statistics wduring the DMA, wait
460 * up to 10ms for it to finish (typically takes <500us) */
461 for (retry = 0; retry < 100; ++retry) {
462 if (siena_try_update_nic_stats(efx) == 0)
463 return;
464 udelay(100);
465 }
466
467 /* Use the old values instead */
459} 468}
460 469
461static void siena_start_nic_stats(struct efx_nic *efx) 470static void siena_start_nic_stats(struct efx_nic *efx)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 297680134da5..b04d3db5bc49 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -8572,6 +8572,7 @@ static int tg3_test_msi(struct tg3 *tp)
8572 pci_disable_msi(tp->pdev); 8572 pci_disable_msi(tp->pdev);
8573 8573
8574 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 8574 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8575 tp->napi[0].irq_vec = tp->pdev->irq;
8575 8576
8576 err = tg3_request_irq(tp, 0); 8577 err = tg3_request_irq(tp, 0);
8577 if (err) 8578 if (err)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2834a01bae24..909b73db4a99 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -380,6 +380,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
380 } 380 }
381 } 381 }
382 382
383 /* Orphan the skb - required as we might hang on to it
384 * for indefinite time. */
385 skb_orphan(skb);
386
383 /* Enqueue packet */ 387 /* Enqueue packet */
384 skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); 388 skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
385 dev->trans_start = jiffies; 389 dev->trans_start = jiffies;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5f3b9eaeb04f..8a6e02736324 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -433,6 +433,7 @@ static const struct driver_info mbm_info = {
433 .bind = cdc_bind, 433 .bind = cdc_bind,
434 .unbind = usbnet_cdc_unbind, 434 .unbind = usbnet_cdc_unbind,
435 .status = cdc_status, 435 .status = cdc_status,
436 .manage_power = cdc_manage_power,
436}; 437};
437 438
438/*-------------------------------------------------------------------------*/ 439/*-------------------------------------------------------------------------*/
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 3d406f9b2f29..c60625bb6801 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -238,7 +238,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
238 goto out; 238 goto out;
239 239
240 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg); 240 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
241 dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14); 241 dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
242 242
243 for (i = 0; i < DM_TIMEOUT; i++) { 243 for (i = 0; i < DM_TIMEOUT; i++) {
244 u8 tmp; 244 u8 tmp;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index b9b9d6b01c0b..941f053e650e 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -628,9 +628,15 @@ static void ppp_stop(struct net_device *dev)
628 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); 628 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
629} 629}
630 630
631static void ppp_close(struct net_device *dev)
632{
633 ppp_tx_flush();
634}
635
631static struct hdlc_proto proto = { 636static struct hdlc_proto proto = {
632 .start = ppp_start, 637 .start = ppp_start,
633 .stop = ppp_stop, 638 .stop = ppp_stop,
639 .close = ppp_close,
634 .type_trans = ppp_type_trans, 640 .type_trans = ppp_type_trans,
635 .ioctl = ppp_ioctl, 641 .ioctl = ppp_ioctl,
636 .netif_rx = ppp_rx, 642 .netif_rx = ppp_rx,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 7b1eab4d85cb..e55f7181e653 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1358,9 +1358,9 @@ void ath_cleanup(struct ath_softc *sc)
1358 free_irq(sc->irq, sc); 1358 free_irq(sc->irq, sc);
1359 ath_bus_cleanup(common); 1359 ath_bus_cleanup(common);
1360 kfree(sc->sec_wiphy); 1360 kfree(sc->sec_wiphy);
1361 ieee80211_free_hw(sc->hw);
1362 1361
1363 ath9k_uninit_hw(sc); 1362 ath9k_uninit_hw(sc);
1363 ieee80211_free_hw(sc->hw);
1364} 1364}
1365 1365
1366static int ath9k_reg_notifier(struct wiphy *wiphy, 1366static int ath9k_reg_notifier(struct wiphy *wiphy,
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 4bf4c213baec..41d33cde97c0 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -245,7 +245,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
245 u32 idx, i; 245 u32 idx, i;
246 246
247 i = (*index) % ring_limit; 247 i = (*index) % ring_limit;
248 (*index) = idx = le32_to_cpu(ring_control->device_idx[1]); 248 (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
249 idx %= ring_limit; 249 idx %= ring_limit;
250 250
251 while (i != idx) { 251 while (i != idx) {
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 8742640bed8c..b3c4fbd80d8d 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -36,6 +36,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
36 /* Version 1 devices (pci chip + net2280) */ 36 /* Version 1 devices (pci chip + net2280) */
37 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ 37 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
38 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ 38 {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
39 {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
39 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ 40 {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
40 {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */ 41 {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */
41 {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */ 42 {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index b6dda2b27fb5..9d147ded8741 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -186,7 +186,7 @@ static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
186 struct ieee80211_tx_queue_stats *queue; 186 struct ieee80211_tx_queue_stats *queue;
187 unsigned long flags; 187 unsigned long flags;
188 188
189 if (WARN_ON(p54_queue > P54_QUEUE_NUM)) 189 if (WARN_ON(p54_queue >= P54_QUEUE_NUM))
190 return -EINVAL; 190 return -EINVAL;
191 191
192 queue = &priv->tx_stats[p54_queue]; 192 queue = &priv->tx_stats[p54_queue];
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index be792519adcd..73e45f7688bf 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -624,7 +624,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
624 */ 624 */
625int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 625int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
626{ 626{
627 return state > PCI_D0 ? 627 return state >= PCI_D0 ?
628 pci_platform_power_transition(dev, state) : -EINVAL; 628 pci_platform_power_transition(dev, state) : -EINVAL;
629} 629}
630EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 630EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
@@ -661,10 +661,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
661 */ 661 */
662 return 0; 662 return 0;
663 663
664 /* Check if we're already there */
665 if (dev->current_state == state)
666 return 0;
667
668 __pci_start_power_transition(dev, state); 664 __pci_start_power_transition(dev, state);
669 665
670 /* This device is quirked not to be put into D3, so 666 /* This device is quirked not to be put into D3, so
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index e6b67f2d3229..741672f36688 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -470,12 +470,12 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
470 470
471 WARN_ON(hdrlength >= 256); 471 WARN_ON(hdrlength >= 256);
472 hdr->hlength = hdrlength & 0xFF; 472 hdr->hlength = hdrlength & 0xFF;
473 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
473 474
474 if (session->tt->init_task && session->tt->init_task(task)) 475 if (session->tt->init_task && session->tt->init_task(task))
475 return -EIO; 476 return -EIO;
476 477
477 task->state = ISCSI_TASK_RUNNING; 478 task->state = ISCSI_TASK_RUNNING;
478 hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
479 session->cmdsn++; 479 session->cmdsn++;
480 480
481 conn->scsicmd_pdus_cnt++; 481 conn->scsicmd_pdus_cnt++;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e15501170698..816ab97eb16d 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -394,11 +394,15 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
394void sas_ata_task_abort(struct sas_task *task) 394void sas_ata_task_abort(struct sas_task *task)
395{ 395{
396 struct ata_queued_cmd *qc = task->uldd_task; 396 struct ata_queued_cmd *qc = task->uldd_task;
397 struct request_queue *q = qc->scsicmd->device->request_queue;
397 struct completion *waiting; 398 struct completion *waiting;
399 unsigned long flags;
398 400
399 /* Bounce SCSI-initiated commands to the SCSI EH */ 401 /* Bounce SCSI-initiated commands to the SCSI EH */
400 if (qc->scsicmd) { 402 if (qc->scsicmd) {
403 spin_lock_irqsave(q->queue_lock, flags);
401 blk_abort_request(qc->scsicmd->request); 404 blk_abort_request(qc->scsicmd->request);
405 spin_unlock_irqrestore(q->queue_lock, flags);
402 scsi_schedule_eh(qc->scsicmd->device->host); 406 scsi_schedule_eh(qc->scsicmd->device->host);
403 return; 407 return;
404 } 408 }
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 14b13196b22d..b672d10e2a90 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -1029,6 +1029,8 @@ int __sas_task_abort(struct sas_task *task)
1029void sas_task_abort(struct sas_task *task) 1029void sas_task_abort(struct sas_task *task)
1030{ 1030{
1031 struct scsi_cmnd *sc = task->uldd_task; 1031 struct scsi_cmnd *sc = task->uldd_task;
1032 struct request_queue *q = sc->device->request_queue;
1033 unsigned long flags;
1032 1034
1033 /* Escape for libsas internal commands */ 1035 /* Escape for libsas internal commands */
1034 if (!sc) { 1036 if (!sc) {
@@ -1043,7 +1045,9 @@ void sas_task_abort(struct sas_task *task)
1043 return; 1045 return;
1044 } 1046 }
1045 1047
1048 spin_lock_irqsave(q->queue_lock, flags);
1046 blk_abort_request(sc->request); 1049 blk_abort_request(sc->request);
1050 spin_unlock_irqrestore(q->queue_lock, flags);
1047 scsi_schedule_eh(sc->device->host); 1051 scsi_schedule_eh(sc->device->host);
1048} 1052}
1049 1053
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 0b575c871007..aa2a2dc44e23 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -956,7 +956,8 @@ static int resp_start_stop(struct scsi_cmnd * scp,
956static sector_t get_sdebug_capacity(void) 956static sector_t get_sdebug_capacity(void)
957{ 957{
958 if (scsi_debug_virtual_gb > 0) 958 if (scsi_debug_virtual_gb > 0)
959 return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb; 959 return (sector_t)scsi_debug_virtual_gb *
960 (1073741824 / scsi_debug_sector_size);
960 else 961 else
961 return sdebug_store_sectors; 962 return sdebug_store_sectors;
962} 963}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 08ed506e6059..e46155b006dc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -301,7 +301,20 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
301 if (scmd->device->allow_restart && 301 if (scmd->device->allow_restart &&
302 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) 302 (sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
303 return FAILED; 303 return FAILED;
304 return SUCCESS; 304
305 if (blk_barrier_rq(scmd->request))
306 /*
307 * barrier requests should always retry on UA
308 * otherwise block will get a spurious error
309 */
310 return NEEDS_RETRY;
311 else
312 /*
313 * for normal (non barrier) commands, pass the
314 * UA upwards for a determination in the
315 * completion functions
316 */
317 return SUCCESS;
305 318
306 /* these three are not supported */ 319 /* these three are not supported */
307 case COPY_ABORTED: 320 case COPY_ABORTED:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c6642423cc67..56977097de9f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -773,8 +773,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
773 * we already took a copy of the original into rq->errors which 773 * we already took a copy of the original into rq->errors which
774 * is what gets returned to the user 774 * is what gets returned to the user
775 */ 775 */
776 if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) { 776 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
777 if (!(req->cmd_flags & REQ_QUIET)) 777 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
778 * print since caller wants ATA registers. Only occurs on
779 * SCSI ATA PASS_THROUGH commands when CK_COND=1
780 */
781 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
782 ;
783 else if (!(req->cmd_flags & REQ_QUIET))
778 scsi_print_sense("", cmd); 784 scsi_print_sense("", cmd);
779 result = 0; 785 result = 0;
780 /* BLOCK_PC may have set error */ 786 /* BLOCK_PC may have set error */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 255da53e5a01..bf1592098ccc 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1039,6 +1039,7 @@ static void sd_prepare_flush(struct request_queue *q, struct request *rq)
1039{ 1039{
1040 rq->cmd_type = REQ_TYPE_BLOCK_PC; 1040 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1041 rq->timeout = SD_TIMEOUT; 1041 rq->timeout = SD_TIMEOUT;
1042 rq->retries = SD_MAX_RETRIES;
1042 rq->cmd[0] = SYNCHRONIZE_CACHE; 1043 rq->cmd[0] = SYNCHRONIZE_CACHE;
1043 rq->cmd_len = 10; 1044 rq->cmd_len = 10;
1044} 1045}
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 24485cc62ff8..4822cb50cd0f 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -348,6 +348,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
348 { "FUJ02E6", 0 }, 348 { "FUJ02E6", 0 },
349 /* Fujitsu Wacom 2FGT Tablet PC device */ 349 /* Fujitsu Wacom 2FGT Tablet PC device */
350 { "FUJ02E7", 0 }, 350 { "FUJ02E7", 0 },
351 /* Fujitsu Wacom 1FGT Tablet PC device */
352 { "FUJ02E9", 0 },
351 /* 353 /*
352 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in 354 * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
353 * disguise) 355 * disguise)
diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
index c2809f2a2ce0..b12237f90db2 100644
--- a/drivers/staging/hv/Hv.c
+++ b/drivers/staging/hv/Hv.c
@@ -306,9 +306,9 @@ void HvCleanup(void)
306 DPRINT_ENTER(VMBUS); 306 DPRINT_ENTER(VMBUS);
307 307
308 if (gHvContext.SignalEventBuffer) { 308 if (gHvContext.SignalEventBuffer) {
309 kfree(gHvContext.SignalEventBuffer);
309 gHvContext.SignalEventBuffer = NULL; 310 gHvContext.SignalEventBuffer = NULL;
310 gHvContext.SignalEventParam = NULL; 311 gHvContext.SignalEventParam = NULL;
311 kfree(gHvContext.SignalEventBuffer);
312 } 312 }
313 313
314 if (gHvContext.GuestId == HV_LINUX_GUEST_ID) { 314 if (gHvContext.GuestId == HV_LINUX_GUEST_ID) {
diff --git a/drivers/staging/hv/RndisFilter.c b/drivers/staging/hv/RndisFilter.c
index 26d79975387c..f05f4e125c48 100644
--- a/drivers/staging/hv/RndisFilter.c
+++ b/drivers/staging/hv/RndisFilter.c
@@ -756,6 +756,7 @@ static int RndisFilterOpenDevice(struct rndis_device *Device)
756 756
757 ret = RndisFilterSetPacketFilter(Device, 757 ret = RndisFilterSetPacketFilter(Device,
758 NDIS_PACKET_TYPE_BROADCAST | 758 NDIS_PACKET_TYPE_BROADCAST |
759 NDIS_PACKET_TYPE_ALL_MULTICAST |
759 NDIS_PACKET_TYPE_DIRECTED); 760 NDIS_PACKET_TYPE_DIRECTED);
760 if (ret == 0) 761 if (ret == 0)
761 Device->State = RNDIS_DEV_DATAINITIALIZED; 762 Device->State = RNDIS_DEV_DATAINITIALIZED;
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 0d7459e2d036..4c3c8bc4bc38 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -413,8 +413,7 @@ static int netvsc_probe(struct device *device)
413 if (!net_drv_obj->Base.OnDeviceAdd) 413 if (!net_drv_obj->Base.OnDeviceAdd)
414 return -1; 414 return -1;
415 415
416 net = alloc_netdev(sizeof(struct net_device_context), "seth%d", 416 net = alloc_etherdev(sizeof(struct net_device_context));
417 ether_setup);
418 if (!net) 417 if (!net)
419 return -1; 418 return -1;
420 419
diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c
index 6da1021e8a65..a2566f1075d5 100644
--- a/drivers/staging/usbip/usbip_event.c
+++ b/drivers/staging/usbip/usbip_event.c
@@ -117,6 +117,9 @@ void usbip_stop_eh(struct usbip_device *ud)
117{ 117{
118 struct usbip_task *eh = &ud->eh; 118 struct usbip_task *eh = &ud->eh;
119 119
120 if (eh->thread == current)
121 return; /* do not wait for myself */
122
120 wait_for_completion(&eh->thread_done); 123 wait_for_completion(&eh->thread_done);
121 usbip_dbg_eh("usbip_eh has finished\n"); 124 usbip_dbg_eh("usbip_eh has finished\n");
122} 125}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 3abb06955b2b..4a4e7ee84f0c 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1255,9 +1255,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1255 udev->state == USB_STATE_SUSPENDED) 1255 udev->state == USB_STATE_SUSPENDED)
1256 goto done; 1256 goto done;
1257 1257
1258 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1259
1260 if (msg.event & PM_EVENT_AUTO) { 1258 if (msg.event & PM_EVENT_AUTO) {
1259 udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
1261 status = autosuspend_check(udev, 0); 1260 status = autosuspend_check(udev, 0);
1262 if (status < 0) 1261 if (status < 0)
1263 goto done; 1262 goto done;
@@ -1789,6 +1788,34 @@ int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
1789 return status; 1788 return status;
1790} 1789}
1791 1790
1791static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
1792{
1793 int w, i;
1794 struct usb_interface *intf;
1795
1796 /* Remote wakeup is needed only when we actually go to sleep.
1797 * For things like FREEZE and QUIESCE, if the device is already
1798 * autosuspended then its current wakeup setting is okay.
1799 */
1800 if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_QUIESCE) {
1801 udev->do_remote_wakeup = 0;
1802 return;
1803 }
1804
1805 /* If remote wakeup is permitted, see whether any interface drivers
1806 * actually want it.
1807 */
1808 w = 0;
1809 if (device_may_wakeup(&udev->dev) && udev->actconfig) {
1810 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
1811 intf = udev->actconfig->interface[i];
1812 w |= intf->needs_remote_wakeup;
1813 }
1814 }
1815
1816 udev->do_remote_wakeup = w;
1817}
1818
1792int usb_suspend(struct device *dev, pm_message_t msg) 1819int usb_suspend(struct device *dev, pm_message_t msg)
1793{ 1820{
1794 struct usb_device *udev; 1821 struct usb_device *udev;
@@ -1808,6 +1835,7 @@ int usb_suspend(struct device *dev, pm_message_t msg)
1808 } 1835 }
1809 1836
1810 udev->skip_sys_resume = 0; 1837 udev->skip_sys_resume = 0;
1838 choose_wakeup(udev, msg);
1811 return usb_external_suspend_device(udev, msg); 1839 return usb_external_suspend_device(udev, msg);
1812} 1840}
1813 1841
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index bdf87a8414a1..2c95153c0f24 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -120,7 +120,7 @@ int usb_choose_configuration(struct usb_device *udev)
120 * than a vendor-specific driver. */ 120 * than a vendor-specific driver. */
121 else if (udev->descriptor.bDeviceClass != 121 else if (udev->descriptor.bDeviceClass !=
122 USB_CLASS_VENDOR_SPEC && 122 USB_CLASS_VENDOR_SPEC &&
123 (!desc || desc->bInterfaceClass != 123 (desc && desc->bInterfaceClass !=
124 USB_CLASS_VENDOR_SPEC)) { 124 USB_CLASS_VENDOR_SPEC)) {
125 best = c; 125 best = c;
126 break; 126 break;
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 3f90d28c501b..120500e1c96d 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -514,13 +514,13 @@ static int fs_create_by_name (const char *name, mode_t mode,
514 *dentry = NULL; 514 *dentry = NULL;
515 mutex_lock(&parent->d_inode->i_mutex); 515 mutex_lock(&parent->d_inode->i_mutex);
516 *dentry = lookup_one_len(name, parent, strlen(name)); 516 *dentry = lookup_one_len(name, parent, strlen(name));
517 if (!IS_ERR(dentry)) { 517 if (!IS_ERR(*dentry)) {
518 if ((mode & S_IFMT) == S_IFDIR) 518 if ((mode & S_IFMT) == S_IFDIR)
519 error = usbfs_mkdir (parent->d_inode, *dentry, mode); 519 error = usbfs_mkdir (parent->d_inode, *dentry, mode);
520 else 520 else
521 error = usbfs_create (parent->d_inode, *dentry, mode); 521 error = usbfs_create (parent->d_inode, *dentry, mode);
522 } else 522 } else
523 error = PTR_ERR(dentry); 523 error = PTR_ERR(*dentry);
524 mutex_unlock(&parent->d_inode->i_mutex); 524 mutex_unlock(&parent->d_inode->i_mutex);
525 525
526 return error; 526 return error;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 405f479a7759..5417ab1f715c 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1472,7 +1472,7 @@ int usb_reset_configuration(struct usb_device *dev)
1472 /* If not, reinstate the old alternate settings */ 1472 /* If not, reinstate the old alternate settings */
1473 if (retval < 0) { 1473 if (retval < 0) {
1474reset_old_alts: 1474reset_old_alts:
1475 for (; i >= 0; i--) { 1475 for (i--; i >= 0; i--) {
1476 struct usb_interface *intf = config->interface[i]; 1476 struct usb_interface *intf = config->interface[i];
1477 struct usb_host_interface *alt; 1477 struct usb_host_interface *alt;
1478 1478
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 9c90b67af7b1..efa0372ce9aa 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -543,6 +543,7 @@ static int ehci_init(struct usb_hcd *hcd)
543 */ 543 */
544 ehci->periodic_size = DEFAULT_I_TDPS; 544 ehci->periodic_size = DEFAULT_I_TDPS;
545 INIT_LIST_HEAD(&ehci->cached_itd_list); 545 INIT_LIST_HEAD(&ehci->cached_itd_list);
546 INIT_LIST_HEAD(&ehci->cached_sitd_list);
546 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) 547 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
547 return retval; 548 return retval;
548 549
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index aeda96e0af67..1f3f01eacaf0 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -136,7 +136,7 @@ static inline void qh_put (struct ehci_qh *qh)
136 136
137static void ehci_mem_cleanup (struct ehci_hcd *ehci) 137static void ehci_mem_cleanup (struct ehci_hcd *ehci)
138{ 138{
139 free_cached_itd_list(ehci); 139 free_cached_lists(ehci);
140 if (ehci->async) 140 if (ehci->async)
141 qh_put (ehci->async); 141 qh_put (ehci->async);
142 ehci->async = NULL; 142 ehci->async = NULL;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index df533cec054b..2064045d1aef 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -2137,13 +2137,27 @@ sitd_complete (
2137 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); 2137 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
2138 } 2138 }
2139 iso_stream_put (ehci, stream); 2139 iso_stream_put (ehci, stream);
2140 /* OK to recycle this SITD now that its completion callback ran. */ 2140
2141done: 2141done:
2142 sitd->urb = NULL; 2142 sitd->urb = NULL;
2143 sitd->stream = NULL; 2143 if (ehci->clock_frame != sitd->frame) {
2144 list_move(&sitd->sitd_list, &stream->free_list); 2144 /* OK to recycle this SITD now. */
2145 iso_stream_put(ehci, stream); 2145 sitd->stream = NULL;
2146 2146 list_move(&sitd->sitd_list, &stream->free_list);
2147 iso_stream_put(ehci, stream);
2148 } else {
2149 /* HW might remember this SITD, so we can't recycle it yet.
2150 * Move it to a safe place until a new frame starts.
2151 */
2152 list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
2153 if (stream->refcount == 2) {
2154 /* If iso_stream_put() were called here, stream
2155 * would be freed. Instead, just prevent reuse.
2156 */
2157 stream->ep->hcpriv = NULL;
2158 stream->ep = NULL;
2159 }
2160 }
2147 return retval; 2161 return retval;
2148} 2162}
2149 2163
@@ -2209,9 +2223,10 @@ done:
2209 2223
2210/*-------------------------------------------------------------------------*/ 2224/*-------------------------------------------------------------------------*/
2211 2225
2212static void free_cached_itd_list(struct ehci_hcd *ehci) 2226static void free_cached_lists(struct ehci_hcd *ehci)
2213{ 2227{
2214 struct ehci_itd *itd, *n; 2228 struct ehci_itd *itd, *n;
2229 struct ehci_sitd *sitd, *sn;
2215 2230
2216 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { 2231 list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
2217 struct ehci_iso_stream *stream = itd->stream; 2232 struct ehci_iso_stream *stream = itd->stream;
@@ -2219,6 +2234,13 @@ static void free_cached_itd_list(struct ehci_hcd *ehci)
2219 list_move(&itd->itd_list, &stream->free_list); 2234 list_move(&itd->itd_list, &stream->free_list);
2220 iso_stream_put(ehci, stream); 2235 iso_stream_put(ehci, stream);
2221 } 2236 }
2237
2238 list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
2239 struct ehci_iso_stream *stream = sitd->stream;
2240 sitd->stream = NULL;
2241 list_move(&sitd->sitd_list, &stream->free_list);
2242 iso_stream_put(ehci, stream);
2243 }
2222} 2244}
2223 2245
2224/*-------------------------------------------------------------------------*/ 2246/*-------------------------------------------------------------------------*/
@@ -2245,7 +2267,7 @@ scan_periodic (struct ehci_hcd *ehci)
2245 clock_frame = -1; 2267 clock_frame = -1;
2246 } 2268 }
2247 if (ehci->clock_frame != clock_frame) { 2269 if (ehci->clock_frame != clock_frame) {
2248 free_cached_itd_list(ehci); 2270 free_cached_lists(ehci);
2249 ehci->clock_frame = clock_frame; 2271 ehci->clock_frame = clock_frame;
2250 } 2272 }
2251 clock %= mod; 2273 clock %= mod;
@@ -2408,7 +2430,7 @@ restart:
2408 clock = now; 2430 clock = now;
2409 clock_frame = clock >> 3; 2431 clock_frame = clock >> 3;
2410 if (ehci->clock_frame != clock_frame) { 2432 if (ehci->clock_frame != clock_frame) {
2411 free_cached_itd_list(ehci); 2433 free_cached_lists(ehci);
2412 ehci->clock_frame = clock_frame; 2434 ehci->clock_frame = clock_frame;
2413 } 2435 }
2414 } else { 2436 } else {
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index b1dce96dd621..556c0b48f3ab 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -87,8 +87,9 @@ struct ehci_hcd { /* one per controller */
87 int next_uframe; /* scan periodic, start here */ 87 int next_uframe; /* scan periodic, start here */
88 unsigned periodic_sched; /* periodic activity count */ 88 unsigned periodic_sched; /* periodic activity count */
89 89
90 /* list of itds completed while clock_frame was still active */ 90 /* list of itds & sitds completed while clock_frame was still active */
91 struct list_head cached_itd_list; 91 struct list_head cached_itd_list;
92 struct list_head cached_sitd_list;
92 unsigned clock_frame; 93 unsigned clock_frame;
93 94
94 /* per root hub port */ 95 /* per root hub port */
@@ -195,7 +196,7 @@ timer_action_done (struct ehci_hcd *ehci, enum ehci_timer_action action)
195 clear_bit (action, &ehci->actions); 196 clear_bit (action, &ehci->actions);
196} 197}
197 198
198static void free_cached_itd_list(struct ehci_hcd *ehci); 199static void free_cached_lists(struct ehci_hcd *ehci);
199 200
200/*-------------------------------------------------------------------------*/ 201/*-------------------------------------------------------------------------*/
201 202
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 32bbce9718f0..65cac8cc8921 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -697,7 +697,7 @@ static int ohci_hub_control (
697 u16 wLength 697 u16 wLength
698) { 698) {
699 struct ohci_hcd *ohci = hcd_to_ohci (hcd); 699 struct ohci_hcd *ohci = hcd_to_ohci (hcd);
700 int ports = hcd_to_bus (hcd)->root_hub->maxchild; 700 int ports = ohci->num_ports;
701 u32 temp; 701 u32 temp;
702 int retval = 0; 702 int retval = 0;
703 703
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bffcef7a5545..6c1f673443da 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -549,6 +549,19 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
549 return EP_INTERVAL(interval); 549 return EP_INTERVAL(interval);
550} 550}
551 551
552/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
553 * High speed endpoint descriptors can define "the number of additional
554 * transaction opportunities per microframe", but that goes in the Max Burst
555 * endpoint context field.
556 */
557static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
558 struct usb_host_endpoint *ep)
559{
560 if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
561 return 0;
562 return ep->ss_ep_comp->desc.bmAttributes;
563}
564
552static inline u32 xhci_get_endpoint_type(struct usb_device *udev, 565static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
553 struct usb_host_endpoint *ep) 566 struct usb_host_endpoint *ep)
554{ 567{
@@ -579,6 +592,36 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
579 return type; 592 return type;
580} 593}
581 594
595/* Return the maximum endpoint service interval time (ESIT) payload.
596 * Basically, this is the maxpacket size, multiplied by the burst size
597 * and mult size.
598 */
599static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
600 struct usb_device *udev,
601 struct usb_host_endpoint *ep)
602{
603 int max_burst;
604 int max_packet;
605
606 /* Only applies for interrupt or isochronous endpoints */
607 if (usb_endpoint_xfer_control(&ep->desc) ||
608 usb_endpoint_xfer_bulk(&ep->desc))
609 return 0;
610
611 if (udev->speed == USB_SPEED_SUPER) {
612 if (ep->ss_ep_comp)
613 return ep->ss_ep_comp->desc.wBytesPerInterval;
614 xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
615 /* Assume no bursts, no multiple opportunities to send. */
616 return ep->desc.wMaxPacketSize;
617 }
618
619 max_packet = ep->desc.wMaxPacketSize & 0x3ff;
620 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
621 /* A 0 in max burst means 1 transfer per ESIT */
622 return max_packet * (max_burst + 1);
623}
624
582int xhci_endpoint_init(struct xhci_hcd *xhci, 625int xhci_endpoint_init(struct xhci_hcd *xhci,
583 struct xhci_virt_device *virt_dev, 626 struct xhci_virt_device *virt_dev,
584 struct usb_device *udev, 627 struct usb_device *udev,
@@ -590,6 +633,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
590 struct xhci_ring *ep_ring; 633 struct xhci_ring *ep_ring;
591 unsigned int max_packet; 634 unsigned int max_packet;
592 unsigned int max_burst; 635 unsigned int max_burst;
636 u32 max_esit_payload;
593 637
594 ep_index = xhci_get_endpoint_index(&ep->desc); 638 ep_index = xhci_get_endpoint_index(&ep->desc);
595 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 639 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
@@ -611,6 +655,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
611 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; 655 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
612 656
613 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); 657 ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
658 ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
614 659
615 /* FIXME dig Mult and streams info out of ep companion desc */ 660 /* FIXME dig Mult and streams info out of ep companion desc */
616 661
@@ -656,6 +701,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
656 default: 701 default:
657 BUG(); 702 BUG();
658 } 703 }
704 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
705 ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
706
707 /*
708 * XXX no idea how to calculate the average TRB buffer length for bulk
709 * endpoints, as the driver gives us no clue how big each scatter gather
710 * list entry (or buffer) is going to be.
711 *
712 * For isochronous and interrupt endpoints, we set it to the max
713 * available, until we have new API in the USB core to allow drivers to
714 * declare how much bandwidth they actually need.
715 *
716 * Normally, it would be calculated by taking the total of the buffer
717 * lengths in the TD and then dividing by the number of TRBs in a TD,
718 * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
719 * use Event Data TRBs, and we don't chain in a link TRB on short
720 * transfers, we're basically dividing by 1.
721 */
722 ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
723
659 /* FIXME Debug endpoint context */ 724 /* FIXME Debug endpoint context */
660 return 0; 725 return 0;
661} 726}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 877813505ef2..9e904a6d90de 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -609,6 +609,10 @@ struct xhci_ep_ctx {
609#define MAX_PACKET_MASK (0xffff << 16) 609#define MAX_PACKET_MASK (0xffff << 16)
610#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) 610#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
611 611
612/* tx_info bitmasks */
613#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
614#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
615
612 616
613/** 617/**
614 * struct xhci_input_control_context 618 * struct xhci_input_control_context
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 0cfd62127424..a44298999e20 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -229,6 +229,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
229static struct usb_device_id id_table [] = { 229static struct usb_device_id id_table [] = {
230 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ 230 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
231 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ 231 { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
232 { USB_DEVICE(0x03F0, 0x211D) }, /* HP ev2210 a.k.a MC5725 */
232 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ 233 { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
233 234
234 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ 235 { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 1ed3d554e372..17726a05a0a6 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -115,9 +115,8 @@ static struct w1_therm_family_converter w1_therm_families[] = {
115 115
116static inline int w1_DS18B20_convert_temp(u8 rom[9]) 116static inline int w1_DS18B20_convert_temp(u8 rom[9])
117{ 117{
118 int t = ((s16)rom[1] << 8) | rom[0]; 118 s16 t = le16_to_cpup((__le16 *)rom);
119 t = t*1000/16; 119 return t*1000/16;
120 return t;
121} 120}
122 121
123static inline int w1_DS18S20_convert_temp(u8 rom[9]) 122static inline int w1_DS18S20_convert_temp(u8 rom[9])
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index c56877972b0e..cae75c122ea4 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3767,7 +3767,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3767 __u64 start, __u64 len) 3767 __u64 start, __u64 len)
3768{ 3768{
3769 ext4_lblk_t start_blk; 3769 ext4_lblk_t start_blk;
3770 ext4_lblk_t len_blks;
3771 int error = 0; 3770 int error = 0;
3772 3771
3773 /* fallback to generic here if not in extents fmt */ 3772 /* fallback to generic here if not in extents fmt */
@@ -3781,8 +3780,14 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3781 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { 3780 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
3782 error = ext4_xattr_fiemap(inode, fieinfo); 3781 error = ext4_xattr_fiemap(inode, fieinfo);
3783 } else { 3782 } else {
3783 ext4_lblk_t len_blks;
3784 __u64 last_blk;
3785
3784 start_blk = start >> inode->i_sb->s_blocksize_bits; 3786 start_blk = start >> inode->i_sb->s_blocksize_bits;
3785 len_blks = len >> inode->i_sb->s_blocksize_bits; 3787 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
3788 if (last_blk >= EXT_MAX_BLOCK)
3789 last_blk = EXT_MAX_BLOCK-1;
3790 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
3786 3791
3787 /* 3792 /*
3788 * Walk the extent tree gathering extent information. 3793 * Walk the extent tree gathering extent information.
diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c
index 7f24a0bb08ca..1aba0039f1c9 100644
--- a/fs/jfs/resize.c
+++ b/fs/jfs/resize.c
@@ -81,6 +81,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
81 struct inode *iplist[1]; 81 struct inode *iplist[1];
82 struct jfs_superblock *j_sb, *j_sb2; 82 struct jfs_superblock *j_sb, *j_sb2;
83 uint old_agsize; 83 uint old_agsize;
84 int agsizechanged = 0;
84 struct buffer_head *bh, *bh2; 85 struct buffer_head *bh, *bh2;
85 86
86 /* If the volume hasn't grown, get out now */ 87 /* If the volume hasn't grown, get out now */
@@ -333,6 +334,9 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
333 */ 334 */
334 if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) 335 if ((rc = dbExtendFS(ipbmap, XAddress, nblocks)))
335 goto error_out; 336 goto error_out;
337
338 agsizechanged |= (bmp->db_agsize != old_agsize);
339
336 /* 340 /*
337 * the map now has extended to cover additional nblocks: 341 * the map now has extended to cover additional nblocks:
338 * dn_mapsize = oldMapsize + nblocks; 342 * dn_mapsize = oldMapsize + nblocks;
@@ -432,7 +436,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
432 * will correctly identify the new ag); 436 * will correctly identify the new ag);
433 */ 437 */
434 /* if new AG size the same as old AG size, done! */ 438 /* if new AG size the same as old AG size, done! */
435 if (bmp->db_agsize != old_agsize) { 439 if (agsizechanged) {
436 if ((rc = diExtendFS(ipimap, ipbmap))) 440 if ((rc = diExtendFS(ipimap, ipbmap)))
437 goto error_out; 441 goto error_out;
438 442
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index bd39abc51508..37d555c32cd8 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -965,6 +965,8 @@ out_error:
965static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source) 965static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source)
966{ 966{
967 target->flags = source->flags; 967 target->flags = source->flags;
968 target->rsize = source->rsize;
969 target->wsize = source->wsize;
968 target->acregmin = source->acregmin; 970 target->acregmin = source->acregmin;
969 target->acregmax = source->acregmax; 971 target->acregmax = source->acregmax;
970 target->acdirmin = source->acdirmin; 972 target->acdirmin = source->acdirmin;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index d896d3e55024..01a0b9acb1f8 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -837,6 +837,8 @@ out_zap_parent:
837 /* If we have submounts, don't unhash ! */ 837 /* If we have submounts, don't unhash ! */
838 if (have_submounts(dentry)) 838 if (have_submounts(dentry))
839 goto out_valid; 839 goto out_valid;
840 if (dentry->d_flags & DCACHE_DISCONNECTED)
841 goto out_valid;
840 shrink_dcache_parent(dentry); 842 shrink_dcache_parent(dentry);
841 } 843 }
842 d_drop(dentry); 844 d_drop(dentry);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index bbf72d8f9fc0..718f3fb312d0 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -160,10 +160,10 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
160 argp->p = page_address(argp->pagelist[0]); 160 argp->p = page_address(argp->pagelist[0]);
161 argp->pagelist++; 161 argp->pagelist++;
162 if (argp->pagelen < PAGE_SIZE) { 162 if (argp->pagelen < PAGE_SIZE) {
163 argp->end = p + (argp->pagelen>>2); 163 argp->end = argp->p + (argp->pagelen>>2);
164 argp->pagelen = 0; 164 argp->pagelen = 0;
165 } else { 165 } else {
166 argp->end = p + (PAGE_SIZE>>2); 166 argp->end = argp->p + (PAGE_SIZE>>2);
167 argp->pagelen -= PAGE_SIZE; 167 argp->pagelen -= PAGE_SIZE;
168 } 168 }
169 memcpy(((char*)p)+avail, argp->p, (nbytes - avail)); 169 memcpy(((char*)p)+avail, argp->p, (nbytes - avail));
@@ -1425,10 +1425,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
1425 argp->p = page_address(argp->pagelist[0]); 1425 argp->p = page_address(argp->pagelist[0]);
1426 argp->pagelist++; 1426 argp->pagelist++;
1427 if (argp->pagelen < PAGE_SIZE) { 1427 if (argp->pagelen < PAGE_SIZE) {
1428 argp->end = p + (argp->pagelen>>2); 1428 argp->end = argp->p + (argp->pagelen>>2);
1429 argp->pagelen = 0; 1429 argp->pagelen = 0;
1430 } else { 1430 } else {
1431 argp->end = p + (PAGE_SIZE>>2); 1431 argp->end = argp->p + (PAGE_SIZE>>2);
1432 argp->pagelen -= PAGE_SIZE; 1432 argp->pagelen -= PAGE_SIZE;
1433 } 1433 }
1434 } 1434 }
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 21c808f752d8..b18c6d677f9d 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -407,6 +407,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
407 struct buffer_head *bh) 407 struct buffer_head *bh)
408{ 408{
409 int ret = 0; 409 int ret = 0;
410 struct ocfs2_dinode *di = (struct ocfs2_dinode *)bh->b_data;
410 411
411 mlog_entry_void(); 412 mlog_entry_void();
412 413
@@ -426,6 +427,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
426 427
427 get_bh(bh); /* for end_buffer_write_sync() */ 428 get_bh(bh); /* for end_buffer_write_sync() */
428 bh->b_end_io = end_buffer_write_sync; 429 bh->b_end_io = end_buffer_write_sync;
430 ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
429 submit_bh(WRITE, bh); 431 submit_bh(WRITE, bh);
430 432
431 wait_on_buffer(bh); 433 wait_on_buffer(bh);
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 02bf17808bdc..18bc101d603f 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -205,7 +205,7 @@ static ssize_t dlmfs_file_read(struct file *filp,
205 if ((count + *ppos) > i_size_read(inode)) 205 if ((count + *ppos) > i_size_read(inode))
206 readlen = i_size_read(inode) - *ppos; 206 readlen = i_size_read(inode) - *ppos;
207 else 207 else
208 readlen = count - *ppos; 208 readlen = count;
209 209
210 lvb_buf = kmalloc(readlen, GFP_NOFS); 210 lvb_buf = kmalloc(readlen, GFP_NOFS);
211 if (!lvb_buf) 211 if (!lvb_buf)
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 88459bdd1ff3..ec4d97faffbf 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -559,6 +559,7 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
559 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 559 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
560 if (IS_ERR(handle)) { 560 if (IS_ERR(handle)) {
561 status = PTR_ERR(handle); 561 status = PTR_ERR(handle);
562 handle = NULL;
562 mlog_errno(status); 563 mlog_errno(status);
563 goto out; 564 goto out;
564 } 565 }
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 8ae65c9c020c..a8e85720d1f4 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4083,6 +4083,9 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
4083 di->i_attr = s_di->i_attr; 4083 di->i_attr = s_di->i_attr;
4084 4084
4085 if (preserve) { 4085 if (preserve) {
4086 t_inode->i_uid = s_inode->i_uid;
4087 t_inode->i_gid = s_inode->i_gid;
4088 t_inode->i_mode = s_inode->i_mode;
4086 di->i_uid = s_di->i_uid; 4089 di->i_uid = s_di->i_uid;
4087 di->i_gid = s_di->i_gid; 4090 di->i_gid = s_di->i_gid;
4088 di->i_mode = s_di->i_mode; 4091 di->i_mode = s_di->i_mode;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d59e279874c7..6ab0bd692968 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2912,7 +2912,7 @@ out_no_task:
2912 */ 2912 */
2913static const struct pid_entry tid_base_stuff[] = { 2913static const struct pid_entry tid_base_stuff[] = {
2914 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), 2914 DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
2915 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations), 2915 DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
2916 REG("environ", S_IRUSR, proc_environ_operations), 2916 REG("environ", S_IRUSR, proc_environ_operations),
2917 INF("auxv", S_IRUSR, proc_pid_auxv), 2917 INF("auxv", S_IRUSR, proc_pid_auxv),
2918 ONE("status", S_IRUGO, proc_pid_status), 2918 ONE("status", S_IRUGO, proc_pid_status),
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index c094f58c7448..1e686eeb2867 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -45,8 +45,6 @@ static inline bool is_privroot_deh(struct dentry *dir,
45 struct reiserfs_de_head *deh) 45 struct reiserfs_de_head *deh)
46{ 46{
47 struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root; 47 struct dentry *privroot = REISERFS_SB(dir->d_sb)->priv_root;
48 if (reiserfs_expose_privroot(dir->d_sb))
49 return 0;
50 return (dir == dir->d_parent && privroot->d_inode && 48 return (dir == dir->d_parent && privroot->d_inode &&
51 deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid); 49 deh->deh_objectid == INODE_PKEY(privroot->d_inode)->k_objectid);
52} 50}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 75f4c0bf0506..1347e9f32c52 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -557,7 +557,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
557 if (!err && new_size < i_size_read(dentry->d_inode)) { 557 if (!err && new_size < i_size_read(dentry->d_inode)) {
558 struct iattr newattrs = { 558 struct iattr newattrs = {
559 .ia_ctime = current_fs_time(inode->i_sb), 559 .ia_ctime = current_fs_time(inode->i_sb),
560 .ia_size = buffer_size, 560 .ia_size = new_size,
561 .ia_valid = ATTR_SIZE | ATTR_CTIME, 561 .ia_valid = ATTR_SIZE | ATTR_CTIME,
562 }; 562 };
563 563
@@ -976,21 +976,13 @@ int reiserfs_permission(struct inode *inode, int mask)
976 return generic_permission(inode, mask, NULL); 976 return generic_permission(inode, mask, NULL);
977} 977}
978 978
979/* This will catch lookups from the fs root to .reiserfs_priv */ 979static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
980static int
981xattr_lookup_poison(struct dentry *dentry, struct qstr *q1, struct qstr *name)
982{ 980{
983 struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root; 981 return -EPERM;
984 if (container_of(q1, struct dentry, d_name) == priv_root)
985 return -ENOENT;
986 if (q1->len == name->len &&
987 !memcmp(q1->name, name->name, name->len))
988 return 0;
989 return 1;
990} 982}
991 983
992static const struct dentry_operations xattr_lookup_poison_ops = { 984static const struct dentry_operations xattr_lookup_poison_ops = {
993 .d_compare = xattr_lookup_poison, 985 .d_revalidate = xattr_hide_revalidate,
994}; 986};
995 987
996int reiserfs_lookup_privroot(struct super_block *s) 988int reiserfs_lookup_privroot(struct super_block *s)
@@ -1004,8 +996,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
1004 strlen(PRIVROOT_NAME)); 996 strlen(PRIVROOT_NAME));
1005 if (!IS_ERR(dentry)) { 997 if (!IS_ERR(dentry)) {
1006 REISERFS_SB(s)->priv_root = dentry; 998 REISERFS_SB(s)->priv_root = dentry;
1007 if (!reiserfs_expose_privroot(s)) 999 dentry->d_op = &xattr_lookup_poison_ops;
1008 s->s_root->d_op = &xattr_lookup_poison_ops;
1009 if (dentry->d_inode) 1000 if (dentry->d_inode)
1010 dentry->d_inode->i_flags |= S_PRIVATE; 1001 dentry->d_inode->i_flags |= S_PRIVATE;
1011 } else 1002 } else
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 5eddd6fcc3ab..64e92454ef1a 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1160,6 +1160,7 @@ xfs_fs_put_super(
1160 1160
1161 xfs_unmountfs(mp); 1161 xfs_unmountfs(mp);
1162 xfs_freesb(mp); 1162 xfs_freesb(mp);
1163 xfs_inode_shrinker_unregister(mp);
1163 xfs_icsb_destroy_counters(mp); 1164 xfs_icsb_destroy_counters(mp);
1164 xfs_close_devices(mp); 1165 xfs_close_devices(mp);
1165 xfs_dmops_put(mp); 1166 xfs_dmops_put(mp);
@@ -1523,6 +1524,8 @@ xfs_fs_fill_super(
1523 if (error) 1524 if (error)
1524 goto fail_vnrele; 1525 goto fail_vnrele;
1525 1526
1527 xfs_inode_shrinker_register(mp);
1528
1526 kfree(mtpt); 1529 kfree(mtpt);
1527 return 0; 1530 return 0;
1528 1531
@@ -1767,6 +1770,7 @@ init_xfs_fs(void)
1767 goto out_cleanup_procfs; 1770 goto out_cleanup_procfs;
1768 1771
1769 vfs_initquota(); 1772 vfs_initquota();
1773 xfs_inode_shrinker_init();
1770 1774
1771 error = register_filesystem(&xfs_fs_type); 1775 error = register_filesystem(&xfs_fs_type);
1772 if (error) 1776 if (error)
@@ -1794,6 +1798,7 @@ exit_xfs_fs(void)
1794{ 1798{
1795 vfs_exitquota(); 1799 vfs_exitquota();
1796 unregister_filesystem(&xfs_fs_type); 1800 unregister_filesystem(&xfs_fs_type);
1801 xfs_inode_shrinker_destroy();
1797 xfs_sysctl_unregister(); 1802 xfs_sysctl_unregister();
1798 xfs_cleanup_procfs(); 1803 xfs_cleanup_procfs();
1799 xfs_buf_terminate(); 1804 xfs_buf_terminate();
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 6b6b39416ad3..57adf2d60dfc 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -95,7 +95,8 @@ xfs_inode_ag_walk(
95 struct xfs_perag *pag, int flags), 95 struct xfs_perag *pag, int flags),
96 int flags, 96 int flags,
97 int tag, 97 int tag,
98 int exclusive) 98 int exclusive,
99 int *nr_to_scan)
99{ 100{
100 struct xfs_perag *pag = &mp->m_perag[ag]; 101 struct xfs_perag *pag = &mp->m_perag[ag];
101 uint32_t first_index; 102 uint32_t first_index;
@@ -135,7 +136,7 @@ restart:
135 if (error == EFSCORRUPTED) 136 if (error == EFSCORRUPTED)
136 break; 137 break;
137 138
138 } while (1); 139 } while ((*nr_to_scan)--);
139 140
140 if (skipped) { 141 if (skipped) {
141 delay(1); 142 delay(1);
@@ -153,23 +154,30 @@ xfs_inode_ag_iterator(
153 struct xfs_perag *pag, int flags), 154 struct xfs_perag *pag, int flags),
154 int flags, 155 int flags,
155 int tag, 156 int tag,
156 int exclusive) 157 int exclusive,
158 int *nr_to_scan)
157{ 159{
158 int error = 0; 160 int error = 0;
159 int last_error = 0; 161 int last_error = 0;
160 xfs_agnumber_t ag; 162 xfs_agnumber_t ag;
163 int nr;
161 164
165 nr = nr_to_scan ? *nr_to_scan : INT_MAX;
162 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { 166 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
163 if (!mp->m_perag[ag].pag_ici_init) 167 if (!mp->m_perag[ag].pag_ici_init)
164 continue; 168 continue;
165 error = xfs_inode_ag_walk(mp, ag, execute, flags, tag, 169 error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
166 exclusive); 170 exclusive, &nr);
167 if (error) { 171 if (error) {
168 last_error = error; 172 last_error = error;
169 if (error == EFSCORRUPTED) 173 if (error == EFSCORRUPTED)
170 break; 174 break;
171 } 175 }
176 if (nr <= 0)
177 break;
172 } 178 }
179 if (nr_to_scan)
180 *nr_to_scan = nr;
173 return XFS_ERROR(last_error); 181 return XFS_ERROR(last_error);
174} 182}
175 183
@@ -289,7 +297,7 @@ xfs_sync_data(
289 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); 297 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
290 298
291 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags, 299 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
292 XFS_ICI_NO_TAG, 0); 300 XFS_ICI_NO_TAG, 0, NULL);
293 if (error) 301 if (error)
294 return XFS_ERROR(error); 302 return XFS_ERROR(error);
295 303
@@ -311,7 +319,7 @@ xfs_sync_attr(
311 ASSERT((flags & ~SYNC_WAIT) == 0); 319 ASSERT((flags & ~SYNC_WAIT) == 0);
312 320
313 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags, 321 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
314 XFS_ICI_NO_TAG, 0); 322 XFS_ICI_NO_TAG, 0, NULL);
315} 323}
316 324
317STATIC int 325STATIC int
@@ -679,6 +687,7 @@ __xfs_inode_set_reclaim_tag(
679 radix_tree_tag_set(&pag->pag_ici_root, 687 radix_tree_tag_set(&pag->pag_ici_root,
680 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), 688 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
681 XFS_ICI_RECLAIM_TAG); 689 XFS_ICI_RECLAIM_TAG);
690 pag->pag_ici_reclaimable++;
682} 691}
683 692
684/* 693/*
@@ -710,6 +719,7 @@ __xfs_inode_clear_reclaim_tag(
710{ 719{
711 radix_tree_tag_clear(&pag->pag_ici_root, 720 radix_tree_tag_clear(&pag->pag_ici_root,
712 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); 721 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
722 pag->pag_ici_reclaimable--;
713} 723}
714 724
715STATIC int 725STATIC int
@@ -770,5 +780,88 @@ xfs_reclaim_inodes(
770 int mode) 780 int mode)
771{ 781{
772 return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode, 782 return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
773 XFS_ICI_RECLAIM_TAG, 1); 783 XFS_ICI_RECLAIM_TAG, 1, NULL);
784}
785
786/*
787 * Shrinker infrastructure.
788 *
789 * This is all far more complex than it needs to be. It adds a global list of
790 * mounts because the shrinkers can only call a global context. We need to make
791 * the shrinkers pass a context to avoid the need for global state.
792 */
793static LIST_HEAD(xfs_mount_list);
794static struct rw_semaphore xfs_mount_list_lock;
795
796static int
797xfs_reclaim_inode_shrink(
798 int nr_to_scan,
799 gfp_t gfp_mask)
800{
801 struct xfs_mount *mp;
802 xfs_agnumber_t ag;
803 int reclaimable = 0;
804
805 if (nr_to_scan) {
806 if (!(gfp_mask & __GFP_FS))
807 return -1;
808
809 down_read(&xfs_mount_list_lock);
810 list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
811 xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
812 XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
813 if (nr_to_scan <= 0)
814 break;
815 }
816 up_read(&xfs_mount_list_lock);
817 }
818
819 down_read(&xfs_mount_list_lock);
820 list_for_each_entry(mp, &xfs_mount_list, m_mplist) {
821 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
822
823 if (!mp->m_perag[ag].pag_ici_init)
824 continue;
825 reclaimable += mp->m_perag[ag].pag_ici_reclaimable;
826 }
827 }
828 up_read(&xfs_mount_list_lock);
829 return reclaimable;
830}
831
832static struct shrinker xfs_inode_shrinker = {
833 .shrink = xfs_reclaim_inode_shrink,
834 .seeks = DEFAULT_SEEKS,
835};
836
837void __init
838xfs_inode_shrinker_init(void)
839{
840 init_rwsem(&xfs_mount_list_lock);
841 register_shrinker(&xfs_inode_shrinker);
842}
843
844void
845xfs_inode_shrinker_destroy(void)
846{
847 ASSERT(list_empty(&xfs_mount_list));
848 unregister_shrinker(&xfs_inode_shrinker);
849}
850
851void
852xfs_inode_shrinker_register(
853 struct xfs_mount *mp)
854{
855 down_write(&xfs_mount_list_lock);
856 list_add_tail(&mp->m_mplist, &xfs_mount_list);
857 up_write(&xfs_mount_list_lock);
858}
859
860void
861xfs_inode_shrinker_unregister(
862 struct xfs_mount *mp)
863{
864 down_write(&xfs_mount_list_lock);
865 list_del(&mp->m_mplist);
866 up_write(&xfs_mount_list_lock);
774} 867}
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index ea932b43335d..0b28c13bdf94 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -54,6 +54,11 @@ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
54int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag); 54int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
55int xfs_inode_ag_iterator(struct xfs_mount *mp, 55int xfs_inode_ag_iterator(struct xfs_mount *mp,
56 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), 56 int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
57 int flags, int tag, int write_lock); 57 int flags, int tag, int write_lock, int *nr_to_scan);
58
59void xfs_inode_shrinker_init(void);
60void xfs_inode_shrinker_destroy(void);
61void xfs_inode_shrinker_register(struct xfs_mount *mp);
62void xfs_inode_shrinker_unregister(struct xfs_mount *mp);
58 63
59#endif 64#endif
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 873e07e29074..145f5963917d 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -891,7 +891,8 @@ xfs_qm_dqrele_all_inodes(
891 uint flags) 891 uint flags)
892{ 892{
893 ASSERT(mp->m_quotainfo); 893 ASSERT(mp->m_quotainfo);
894 xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0); 894 xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags,
895 XFS_ICI_NO_TAG, 0, NULL);
895} 896}
896 897
897/*------------------------------------------------------------------------*/ 898/*------------------------------------------------------------------------*/
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 6702bd865811..1182604a16ea 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -229,6 +229,7 @@ typedef struct xfs_perag
229 int pag_ici_init; /* incore inode cache initialised */ 229 int pag_ici_init; /* incore inode cache initialised */
230 rwlock_t pag_ici_lock; /* incore inode lock */ 230 rwlock_t pag_ici_lock; /* incore inode lock */
231 struct radix_tree_root pag_ici_root; /* incore inode cache root */ 231 struct radix_tree_root pag_ici_root; /* incore inode cache root */
232 int pag_ici_reclaimable; /* reclaimable inodes */
232#endif 233#endif
233} xfs_perag_t; 234} xfs_perag_t;
234 235
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 0088311d23e7..b20d59eab00a 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -257,6 +257,7 @@ typedef struct xfs_mount {
257 wait_queue_head_t m_wait_single_sync_task; 257 wait_queue_head_t m_wait_single_sync_task;
258 __int64_t m_update_flags; /* sb flags we need to update 258 __int64_t m_update_flags; /* sb flags we need to update
259 on the next remount,rw */ 259 on the next remount,rw */
260 struct list_head m_mplist; /* inode shrinker mount list */
260} xfs_mount_t; 261} xfs_mount_t;
261 262
262/* 263/*
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 20f31567ccee..f8bd0f92c1d8 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -1024,8 +1024,8 @@ static inline int ata_ok(u8 status)
1024 1024
1025static inline int lba_28_ok(u64 block, u32 n_block) 1025static inline int lba_28_ok(u64 block, u32 n_block)
1026{ 1026{
1027 /* check the ending block number */ 1027 /* check the ending block number: must be LESS THAN 0x0fffffff */
1028 return ((block + n_block) < ((u64)1 << 28)) && (n_block <= 256); 1028 return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256);
1029} 1029}
1030 1030
1031static inline int lba_48_ok(u64 block, u32 n_block) 1031static inline int lba_48_ok(u64 block, u32 n_block)
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 8be5135ff7aa..2c55a7ea20af 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -107,6 +107,7 @@ typedef enum {
107 SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */ 107 SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
108 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */ 108 SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
109 SCTP_CMD_SEND_MSG, /* Send the whole use message */ 109 SCTP_CMD_SEND_MSG, /* Send the whole use message */
110 SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
110 SCTP_CMD_LAST 111 SCTP_CMD_LAST
111} sctp_verb_t; 112} sctp_verb_t;
112 113
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 78740ec57d5d..fa6cde578a1d 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -128,6 +128,7 @@ extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
128int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 128int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
129int sctp_inet_listen(struct socket *sock, int backlog); 129int sctp_inet_listen(struct socket *sock, int backlog);
130void sctp_write_space(struct sock *sk); 130void sctp_write_space(struct sock *sk);
131void sctp_data_ready(struct sock *sk, int len);
131unsigned int sctp_poll(struct file *file, struct socket *sock, 132unsigned int sctp_poll(struct file *file, struct socket *sock,
132 poll_table *wait); 133 poll_table *wait);
133void sctp_sock_rfree(struct sk_buff *skb); 134void sctp_sock_rfree(struct sk_buff *skb);
diff --git a/init/initramfs.c b/init/initramfs.c
index b37d34beb90b..b27d045a3bd0 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -457,7 +457,8 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len)
457 compress_name); 457 compress_name);
458 message = msg_buf; 458 message = msg_buf;
459 } 459 }
460 } 460 } else
461 error("junk in compressed archive");
461 if (state != Reset) 462 if (state != Reset)
462 error("junk in compressed archive"); 463 error("junk in compressed archive");
463 this_header = saved_offset + my_inptr; 464 this_header = saved_offset + my_inptr;
diff --git a/kernel/cred.c b/kernel/cred.c
index 1ed8ca18790c..099f5e6fb94c 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -786,8 +786,6 @@ bool creds_are_invalid(const struct cred *cred)
786{ 786{
787 if (cred->magic != CRED_MAGIC) 787 if (cred->magic != CRED_MAGIC)
788 return true; 788 return true;
789 if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
790 return true;
791#ifdef CONFIG_SECURITY_SELINUX 789#ifdef CONFIG_SECURITY_SELINUX
792 if (selinux_is_enabled()) { 790 if (selinux_is_enabled()) {
793 if ((unsigned long) cred->security < PAGE_SIZE) 791 if ((unsigned long) cred->security < PAGE_SIZE)
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 8891ebf3cc69..43c1dfb7b386 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -4848,7 +4848,7 @@ err_fput_free_put_context:
4848 4848
4849err_free_put_context: 4849err_free_put_context:
4850 if (err < 0) 4850 if (err < 0)
4851 kfree(event); 4851 free_event(event);
4852 4852
4853err_put_context: 4853err_put_context:
4854 if (err < 0) 4854 if (err < 0)
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 66eef2e4483e..41b1804fa728 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -99,7 +99,7 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
99 ret->element_size = element_size; 99 ret->element_size = element_size;
100 ret->total_nr_elements = total; 100 ret->total_nr_elements = total;
101 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) 101 if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
102 memset(ret->parts[0], FLEX_ARRAY_FREE, 102 memset(&ret->parts[0], FLEX_ARRAY_FREE,
103 FLEX_ARRAY_BASE_BYTES_LEFT); 103 FLEX_ARRAY_BASE_BYTES_LEFT);
104 return ret; 104 return ret;
105} 105}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d3e716fbab13..e5f611db3da0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2217,12 +2217,12 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
2217 } 2217 }
2218 unlock_page_cgroup(pc); 2218 unlock_page_cgroup(pc);
2219 2219
2220 *ptr = mem;
2220 if (mem) { 2221 if (mem) {
2221 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false, 2222 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false,
2222 page); 2223 page);
2223 css_put(&mem->css); 2224 css_put(&mem->css);
2224 } 2225 }
2225 *ptr = mem;
2226 return ret; 2226 return ret;
2227} 2227}
2228 2228
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index bad1c49fd960..72340ddb6afc 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -147,6 +147,9 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
147 dev_load(sock_net(sk), ifr.ifr_name); 147 dev_load(sock_net(sk), ifr.ifr_name);
148 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); 148 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
149 149
150 if (!dev)
151 return -ENODEV;
152
150 if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl) 153 if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
151 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd); 154 ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
152 155
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b1234a75d6a6..27dd240ba10e 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1368,6 +1368,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1368 sk_eat_skb(sk, skb, 0); 1368 sk_eat_skb(sk, skb, 0);
1369 if (!desc->count) 1369 if (!desc->count)
1370 break; 1370 break;
1371 tp->copied_seq = seq;
1371 } 1372 }
1372 tp->copied_seq = seq; 1373 tp->copied_seq = seq;
1373 1374
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 112c61135f92..16190cac0b69 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -471,8 +471,8 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
471 if (hslot->count < hslot2->count) 471 if (hslot->count < hslot2->count)
472 goto begin; 472 goto begin;
473 473
474 result = udp4_lib_lookup2(net, INADDR_ANY, sport, 474 result = udp4_lib_lookup2(net, saddr, sport,
475 daddr, hnum, dif, 475 INADDR_ANY, hnum, dif,
476 hslot2, slot2); 476 hslot2, slot2);
477 } 477 }
478 rcu_read_unlock(); 478 rcu_read_unlock();
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 548a06e66b4e..d2ef3a39dff4 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1006,7 +1006,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1006 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); 1006 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1007 1007
1008 t1 = (struct tcphdr *) skb_push(buff, tot_len); 1008 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1009 skb_reset_transport_header(skb); 1009 skb_reset_transport_header(buff);
1010 1010
1011 /* Swap the send and the receive. */ 1011 /* Swap the send and the receive. */
1012 memset(t1, 0, sizeof(*t1)); 1012 memset(t1, 0, sizeof(*t1));
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d9714d20705d..4f57cd2b353a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -258,8 +258,8 @@ static struct sock *__udp6_lib_lookup(struct net *net,
258 if (hslot->count < hslot2->count) 258 if (hslot->count < hslot2->count)
259 goto begin; 259 goto begin;
260 260
261 result = udp6_lib_lookup2(net, &in6addr_any, sport, 261 result = udp6_lib_lookup2(net, saddr, sport,
262 daddr, hnum, dif, 262 &in6addr_any, hnum, dif,
263 hslot2, slot2); 263 hslot2, slot2);
264 } 264 }
265 rcu_read_unlock(); 265 rcu_read_unlock();
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 304b0b65216d..dfdc138ad0d5 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -183,7 +183,6 @@ static void sta_addba_resp_timer_expired(unsigned long data)
183 HT_AGG_STATE_REQ_STOP_BA_MSK)) != 183 HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
184 HT_ADDBA_REQUESTED_MSK) { 184 HT_ADDBA_REQUESTED_MSK) {
185 spin_unlock_bh(&sta->lock); 185 spin_unlock_bh(&sta->lock);
186 *state = HT_AGG_STATE_IDLE;
187#ifdef CONFIG_MAC80211_HT_DEBUG 186#ifdef CONFIG_MAC80211_HT_DEBUG
188 printk(KERN_DEBUG "timer expired on tid %d but we are not " 187 printk(KERN_DEBUG "timer expired on tid %d but we are not "
189 "(or no longer) expecting addBA response there", 188 "(or no longer) expecting addBA response there",
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index df5abbff63e2..99c93ee98ad9 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1194,8 +1194,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
1194 /* Remove any peer addresses not present in the new association. */ 1194 /* Remove any peer addresses not present in the new association. */
1195 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 1195 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1196 trans = list_entry(pos, struct sctp_transport, transports); 1196 trans = list_entry(pos, struct sctp_transport, transports);
1197 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) 1197 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1198 sctp_assoc_del_peer(asoc, &trans->ipaddr); 1198 sctp_assoc_rm_peer(asoc, trans);
1199 continue;
1200 }
1199 1201
1200 if (asoc->state >= SCTP_STATE_ESTABLISHED) 1202 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1201 sctp_transport_reset(trans); 1203 sctp_transport_reset(trans);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 905fda582b92..7ec09ba03a1c 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -144,6 +144,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
144 /* Use SCTP specific send buffer space queues. */ 144 /* Use SCTP specific send buffer space queues. */
145 ep->sndbuf_policy = sctp_sndbuf_policy; 145 ep->sndbuf_policy = sctp_sndbuf_policy;
146 146
147 sk->sk_data_ready = sctp_data_ready;
147 sk->sk_write_space = sctp_write_space; 148 sk->sk_write_space = sctp_write_space;
148 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 149 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
149 150
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9e732916b671..224db014fb6b 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -207,7 +207,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
207 sp = sctp_sk(asoc->base.sk); 207 sp = sctp_sk(asoc->base.sk);
208 num_types = sp->pf->supported_addrs(sp, types); 208 num_types = sp->pf->supported_addrs(sp, types);
209 209
210 chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); 210 chunksize = sizeof(init) + addrs_len;
211 chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
211 chunksize += sizeof(ecap_param); 212 chunksize += sizeof(ecap_param);
212 213
213 if (sctp_prsctp_enable) 214 if (sctp_prsctp_enable)
@@ -237,14 +238,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
237 /* Add HMACS parameter length if any were defined */ 238 /* Add HMACS parameter length if any were defined */
238 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; 239 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
239 if (auth_hmacs->length) 240 if (auth_hmacs->length)
240 chunksize += ntohs(auth_hmacs->length); 241 chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
241 else 242 else
242 auth_hmacs = NULL; 243 auth_hmacs = NULL;
243 244
244 /* Add CHUNKS parameter length */ 245 /* Add CHUNKS parameter length */
245 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; 246 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
246 if (auth_chunks->length) 247 if (auth_chunks->length)
247 chunksize += ntohs(auth_chunks->length); 248 chunksize += WORD_ROUND(ntohs(auth_chunks->length));
248 else 249 else
249 auth_chunks = NULL; 250 auth_chunks = NULL;
250 251
@@ -254,7 +255,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
254 255
255 /* If we have any extensions to report, account for that */ 256 /* If we have any extensions to report, account for that */
256 if (num_ext) 257 if (num_ext)
257 chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; 258 chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
259 num_ext);
258 260
259 /* RFC 2960 3.3.2 Initiation (INIT) (1) 261 /* RFC 2960 3.3.2 Initiation (INIT) (1)
260 * 262 *
@@ -396,13 +398,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
396 398
397 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; 399 auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
398 if (auth_hmacs->length) 400 if (auth_hmacs->length)
399 chunksize += ntohs(auth_hmacs->length); 401 chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
400 else 402 else
401 auth_hmacs = NULL; 403 auth_hmacs = NULL;
402 404
403 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; 405 auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
404 if (auth_chunks->length) 406 if (auth_chunks->length)
405 chunksize += ntohs(auth_chunks->length); 407 chunksize += WORD_ROUND(ntohs(auth_chunks->length));
406 else 408 else
407 auth_chunks = NULL; 409 auth_chunks = NULL;
408 410
@@ -411,7 +413,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
411 } 413 }
412 414
413 if (num_ext) 415 if (num_ext)
414 chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; 416 chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
417 num_ext);
415 418
416 /* Now allocate and fill out the chunk. */ 419 /* Now allocate and fill out the chunk. */
417 retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); 420 retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
@@ -3314,21 +3317,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3314 sctp_chunk_free(asconf); 3317 sctp_chunk_free(asconf);
3315 asoc->addip_last_asconf = NULL; 3318 asoc->addip_last_asconf = NULL;
3316 3319
3317 /* Send the next asconf chunk from the addip chunk queue. */
3318 if (!list_empty(&asoc->addip_chunk_list)) {
3319 struct list_head *entry = asoc->addip_chunk_list.next;
3320 asconf = list_entry(entry, struct sctp_chunk, list);
3321
3322 list_del_init(entry);
3323
3324 /* Hold the chunk until an ASCONF_ACK is received. */
3325 sctp_chunk_hold(asconf);
3326 if (sctp_primitive_ASCONF(asoc, asconf))
3327 sctp_chunk_free(asconf);
3328 else
3329 asoc->addip_last_asconf = asconf;
3330 }
3331
3332 return retval; 3320 return retval;
3333} 3321}
3334 3322
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 4e4ca65cd320..42bbb2410550 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -961,6 +961,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
961} 961}
962 962
963 963
964/* Sent the next ASCONF packet currently stored in the association.
965 * This happens after the ASCONF_ACK was succeffully processed.
966 */
967static void sctp_cmd_send_asconf(struct sctp_association *asoc)
968{
969 /* Send the next asconf chunk from the addip chunk
970 * queue.
971 */
972 if (!list_empty(&asoc->addip_chunk_list)) {
973 struct list_head *entry = asoc->addip_chunk_list.next;
974 struct sctp_chunk *asconf = list_entry(entry,
975 struct sctp_chunk, list);
976 list_del_init(entry);
977
978 /* Hold the chunk until an ASCONF_ACK is received. */
979 sctp_chunk_hold(asconf);
980 if (sctp_primitive_ASCONF(asoc, asconf))
981 sctp_chunk_free(asconf);
982 else
983 asoc->addip_last_asconf = asconf;
984 }
985}
986
964 987
965/* These three macros allow us to pull the debugging code out of the 988/* These three macros allow us to pull the debugging code out of the
966 * main flow of sctp_do_sm() to keep attention focused on the real 989 * main flow of sctp_do_sm() to keep attention focused on the real
@@ -1616,6 +1639,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1616 } 1639 }
1617 error = sctp_cmd_send_msg(asoc, cmd->obj.msg); 1640 error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
1618 break; 1641 break;
1642 case SCTP_CMD_SEND_NEXT_ASCONF:
1643 sctp_cmd_send_asconf(asoc);
1644 break;
1619 default: 1645 default:
1620 printk(KERN_WARNING "Impossible command: %u, %p\n", 1646 printk(KERN_WARNING "Impossible command: %u, %p\n",
1621 cmd->verb, cmd->obj.ptr); 1647 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 47bc20d3a85b..c3f75e79bedc 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3675,8 +3675,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
3675 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 3675 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
3676 3676
3677 if (!sctp_process_asconf_ack((struct sctp_association *)asoc, 3677 if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
3678 asconf_ack)) 3678 asconf_ack)) {
3679 /* Successfully processed ASCONF_ACK. We can
3680 * release the next asconf if we have one.
3681 */
3682 sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
3683 SCTP_NULL());
3679 return SCTP_DISPOSITION_CONSUME; 3684 return SCTP_DISPOSITION_CONSUME;
3685 }
3680 3686
3681 abort = sctp_make_abort(asoc, asconf_ack, 3687 abort = sctp_make_abort(asoc, asconf_ack,
3682 sizeof(sctp_errhdr_t)); 3688 sizeof(sctp_errhdr_t));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9bd9d82a70c3..aa3ba60de54c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3718,12 +3718,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3718 sp->hmac = NULL; 3718 sp->hmac = NULL;
3719 3719
3720 SCTP_DBG_OBJCNT_INC(sock); 3720 SCTP_DBG_OBJCNT_INC(sock);
3721 percpu_counter_inc(&sctp_sockets_allocated);
3722 3721
3723 /* Set socket backlog limit. */ 3722 /* Set socket backlog limit. */
3724 sk->sk_backlog.limit = sysctl_sctp_rmem[1]; 3723 sk->sk_backlog.limit = sysctl_sctp_rmem[1];
3725 3724
3726 local_bh_disable(); 3725 local_bh_disable();
3726 percpu_counter_inc(&sctp_sockets_allocated);
3727 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3727 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
3728 local_bh_enable(); 3728 local_bh_enable();
3729 3729
@@ -3740,8 +3740,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
3740 /* Release our hold on the endpoint. */ 3740 /* Release our hold on the endpoint. */
3741 ep = sctp_sk(sk)->ep; 3741 ep = sctp_sk(sk)->ep;
3742 sctp_endpoint_free(ep); 3742 sctp_endpoint_free(ep);
3743 percpu_counter_dec(&sctp_sockets_allocated);
3744 local_bh_disable(); 3743 local_bh_disable();
3744 percpu_counter_dec(&sctp_sockets_allocated);
3745 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 3745 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
3746 local_bh_enable(); 3746 local_bh_enable();
3747} 3747}
@@ -6188,6 +6188,16 @@ do_nonblock:
6188 goto out; 6188 goto out;
6189} 6189}
6190 6190
6191void sctp_data_ready(struct sock *sk, int len)
6192{
6193 read_lock_bh(&sk->sk_callback_lock);
6194 if (sk_has_sleeper(sk))
6195 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
6196 POLLRDNORM | POLLRDBAND);
6197 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
6198 read_unlock_bh(&sk->sk_callback_lock);
6199}
6200
6191/* If socket sndbuf has changed, wake up all per association waiters. */ 6201/* If socket sndbuf has changed, wake up all per association waiters. */
6192void sctp_write_space(struct sock *sk) 6202void sctp_write_space(struct sock *sk)
6193{ 6203{
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 327011fcc407..78091375ca12 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -45,10 +45,10 @@
45 45
46#define MAX_ADDR_STR 32 46#define MAX_ADDR_STR 32
47 47
48static struct media *media_list = NULL; 48static struct media media_list[MAX_MEDIA];
49static u32 media_count = 0; 49static u32 media_count = 0;
50 50
51struct bearer *tipc_bearers = NULL; 51struct bearer tipc_bearers[MAX_BEARERS];
52 52
53/** 53/**
54 * media_name_valid - validate media name 54 * media_name_valid - validate media name
@@ -108,9 +108,11 @@ int tipc_register_media(u32 media_type,
108 int res = -EINVAL; 108 int res = -EINVAL;
109 109
110 write_lock_bh(&tipc_net_lock); 110 write_lock_bh(&tipc_net_lock);
111 if (!media_list)
112 goto exit;
113 111
112 if (tipc_mode != TIPC_NET_MODE) {
113 warn("Media <%s> rejected, not in networked mode yet\n", name);
114 goto exit;
115 }
114 if (!media_name_valid(name)) { 116 if (!media_name_valid(name)) {
115 warn("Media <%s> rejected, illegal name\n", name); 117 warn("Media <%s> rejected, illegal name\n", name);
116 goto exit; 118 goto exit;
@@ -660,33 +662,10 @@ int tipc_disable_bearer(const char *name)
660 662
661 663
662 664
663int tipc_bearer_init(void)
664{
665 int res;
666
667 write_lock_bh(&tipc_net_lock);
668 tipc_bearers = kcalloc(MAX_BEARERS, sizeof(struct bearer), GFP_ATOMIC);
669 media_list = kcalloc(MAX_MEDIA, sizeof(struct media), GFP_ATOMIC);
670 if (tipc_bearers && media_list) {
671 res = 0;
672 } else {
673 kfree(tipc_bearers);
674 kfree(media_list);
675 tipc_bearers = NULL;
676 media_list = NULL;
677 res = -ENOMEM;
678 }
679 write_unlock_bh(&tipc_net_lock);
680 return res;
681}
682
683void tipc_bearer_stop(void) 665void tipc_bearer_stop(void)
684{ 666{
685 u32 i; 667 u32 i;
686 668
687 if (!tipc_bearers)
688 return;
689
690 for (i = 0; i < MAX_BEARERS; i++) { 669 for (i = 0; i < MAX_BEARERS; i++) {
691 if (tipc_bearers[i].active) 670 if (tipc_bearers[i].active)
692 tipc_bearers[i].publ.blocked = 1; 671 tipc_bearers[i].publ.blocked = 1;
@@ -695,10 +674,6 @@ void tipc_bearer_stop(void)
695 if (tipc_bearers[i].active) 674 if (tipc_bearers[i].active)
696 bearer_disable(tipc_bearers[i].publ.name); 675 bearer_disable(tipc_bearers[i].publ.name);
697 } 676 }
698 kfree(tipc_bearers);
699 kfree(media_list);
700 tipc_bearers = NULL;
701 media_list = NULL;
702 media_count = 0; 677 media_count = 0;
703} 678}
704 679
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index ca5734892713..000228e93f9e 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -114,7 +114,7 @@ struct bearer_name {
114 114
115struct link; 115struct link;
116 116
117extern struct bearer *tipc_bearers; 117extern struct bearer tipc_bearers[];
118 118
119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); 119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
120struct sk_buff *tipc_media_get_names(void); 120struct sk_buff *tipc_media_get_names(void);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7906608bf510..f25b1cdb64eb 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -116,7 +116,8 @@
116*/ 116*/
117 117
118DEFINE_RWLOCK(tipc_net_lock); 118DEFINE_RWLOCK(tipc_net_lock);
119struct network tipc_net = { NULL }; 119struct _zone *tipc_zones[256] = { NULL, };
120struct network tipc_net = { tipc_zones };
120 121
121struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) 122struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
122{ 123{
@@ -158,28 +159,12 @@ void tipc_net_send_external_routes(u32 dest)
158 } 159 }
159} 160}
160 161
161static int net_init(void)
162{
163 memset(&tipc_net, 0, sizeof(tipc_net));
164 tipc_net.zones = kcalloc(tipc_max_zones + 1, sizeof(struct _zone *), GFP_ATOMIC);
165 if (!tipc_net.zones) {
166 return -ENOMEM;
167 }
168 return 0;
169}
170
171static void net_stop(void) 162static void net_stop(void)
172{ 163{
173 u32 z_num; 164 u32 z_num;
174 165
175 if (!tipc_net.zones) 166 for (z_num = 1; z_num <= tipc_max_zones; z_num++)
176 return;
177
178 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
179 tipc_zone_delete(tipc_net.zones[z_num]); 167 tipc_zone_delete(tipc_net.zones[z_num]);
180 }
181 kfree(tipc_net.zones);
182 tipc_net.zones = NULL;
183} 168}
184 169
185static void net_route_named_msg(struct sk_buff *buf) 170static void net_route_named_msg(struct sk_buff *buf)
@@ -282,9 +267,7 @@ int tipc_net_start(u32 addr)
282 tipc_named_reinit(); 267 tipc_named_reinit();
283 tipc_port_reinit(); 268 tipc_port_reinit();
284 269
285 if ((res = tipc_bearer_init()) || 270 if ((res = tipc_cltr_init()) ||
286 (res = net_init()) ||
287 (res = tipc_cltr_init()) ||
288 (res = tipc_bclink_init())) { 271 (res = tipc_bclink_init())) {
289 return res; 272 return res;
290 } 273 }
diff --git a/security/inode.c b/security/inode.c
index c3a793881d04..1c812e874504 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -161,13 +161,13 @@ static int create_by_name(const char *name, mode_t mode,
161 161
162 mutex_lock(&parent->d_inode->i_mutex); 162 mutex_lock(&parent->d_inode->i_mutex);
163 *dentry = lookup_one_len(name, parent, strlen(name)); 163 *dentry = lookup_one_len(name, parent, strlen(name));
164 if (!IS_ERR(dentry)) { 164 if (!IS_ERR(*dentry)) {
165 if ((mode & S_IFMT) == S_IFDIR) 165 if ((mode & S_IFMT) == S_IFDIR)
166 error = mkdir(parent->d_inode, *dentry, mode); 166 error = mkdir(parent->d_inode, *dentry, mode);
167 else 167 else
168 error = create(parent->d_inode, *dentry, mode); 168 error = create(parent->d_inode, *dentry, mode);
169 } else 169 } else
170 error = PTR_ERR(dentry); 170 error = PTR_ERR(*dentry);
171 mutex_unlock(&parent->d_inode->i_mutex); 171 mutex_unlock(&parent->d_inode->i_mutex);
172 172
173 return error; 173 return error;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 03fe63ed55bd..9ac7bfd3bbdd 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -336,8 +336,10 @@ static int construct_alloc_key(struct key_type *type,
336 336
337key_already_present: 337key_already_present:
338 mutex_unlock(&key_construction_mutex); 338 mutex_unlock(&key_construction_mutex);
339 if (dest_keyring) 339 if (dest_keyring) {
340 __key_link(dest_keyring, key_ref_to_ptr(key_ref));
340 up_write(&dest_keyring->sem); 341 up_write(&dest_keyring->sem);
342 }
341 mutex_unlock(&user->cons_lock); 343 mutex_unlock(&user->cons_lock);
342 key_put(key); 344 key_put(key);
343 *_key = key = key_ref_to_ptr(key_ref); 345 *_key = key = key_ref_to_ptr(key_ref);
@@ -428,6 +430,11 @@ struct key *request_key_and_link(struct key_type *type,
428 430
429 if (!IS_ERR(key_ref)) { 431 if (!IS_ERR(key_ref)) {
430 key = key_ref_to_ptr(key_ref); 432 key = key_ref_to_ptr(key_ref);
433 if (dest_keyring) {
434 construct_get_dest_keyring(&dest_keyring);
435 key_link(dest_keyring, key);
436 key_put(dest_keyring);
437 }
431 } else if (PTR_ERR(key_ref) != -EAGAIN) { 438 } else if (PTR_ERR(key_ref) != -EAGAIN) {
432 key = ERR_CAST(key_ref); 439 key = ERR_CAST(key_ref);
433 } else { 440 } else {
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 062a8b051b83..fd831bd78aeb 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2273,6 +2273,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2273 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), 2273 SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
2274 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), 2274 SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
2275 SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB), 2275 SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
2276 SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),
2276 SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), 2277 SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
2277 {} 2278 {}
2278}; 2279};
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 71b7a96b185f..1a97c81b254c 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -1174,9 +1174,10 @@ static int patch_cxt5045(struct hda_codec *codec)
1174 1174
1175 switch (codec->subsystem_id >> 16) { 1175 switch (codec->subsystem_id >> 16) {
1176 case 0x103c: 1176 case 0x103c:
1177 case 0x1631:
1177 case 0x1734: 1178 case 0x1734:
1178 /* HP & Fujitsu-Siemens laptops have really bad sound over 0dB 1179 /* HP, Packard Bell, & Fujitsu-Siemens laptops have really bad
1179 * on NID 0x17. Fix max PCM level to 0 dB 1180 * sound over 0dB on NID 0x17. Fix max PCM level to 0 dB
1180 * (originally it has 0x2b steps with 0dB offset 0x14) 1181 * (originally it has 0x2b steps with 0dB offset 0x14)
1181 */ 1182 */
1182 snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT, 1183 snd_hda_override_amp_caps(codec, 0x17, HDA_INPUT,
@@ -2471,6 +2472,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2471 CXT5066_DELL_LAPTOP), 2472 CXT5066_DELL_LAPTOP),
2472 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), 2473 SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
2473 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), 2474 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO),
2475 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
2476 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
2474 {} 2477 {}
2475}; 2478};
2476 2479
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bd8a567c9367..b486daa17bd0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4033,7 +4033,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = {
4033 SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG), 4033 SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG),
4034 SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734), 4034 SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734),
4035 SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU), 4035 SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FUJITSU),
4036 SND_PCI_QUIRK(0x1734, 0x10ac, "FSC", ALC880_UNIWILL), 4036 SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_F1734),
4037 SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU), 4037 SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU),
4038 SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW), 4038 SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW),
4039 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG), 4039 SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 799ba2570902..ac2d528feb8d 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1602,6 +1602,10 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
1602 "Dell Studio 1555", STAC_DELL_M6_DMIC), 1602 "Dell Studio 1555", STAC_DELL_M6_DMIC),
1603 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd, 1603 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
1604 "Dell Studio 1557", STAC_DELL_M6_DMIC), 1604 "Dell Studio 1557", STAC_DELL_M6_DMIC),
1605 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
1606 "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
1607 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
1608 "Dell Studio 1558", STAC_DELL_M6_BOTH),
1605 {} /* terminator */ 1609 {} /* terminator */
1606}; 1610};
1607 1611
@@ -1725,6 +1729,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
1725 "HP HDX", STAC_HP_HDX), /* HDX16 */ 1729 "HP HDX", STAC_HP_HDX), /* HDX16 */
1726 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620, 1730 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x3620,
1727 "HP dv6", STAC_HP_DV5), 1731 "HP dv6", STAC_HP_DV5),
1732 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3061,
1733 "HP dv6", STAC_HP_DV5), /* HP dv6-1110ax */
1728 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010, 1734 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
1729 "HP", STAC_HP_DV5), 1735 "HP", STAC_HP_DV5),
1730 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, 1736 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 75283fbb4b3f..c2311f85a331 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -849,6 +849,7 @@ struct snd_m3 {
849 struct snd_kcontrol *master_switch; 849 struct snd_kcontrol *master_switch;
850 struct snd_kcontrol *master_volume; 850 struct snd_kcontrol *master_volume;
851 struct tasklet_struct hwvol_tq; 851 struct tasklet_struct hwvol_tq;
852 unsigned int in_suspend;
852 853
853#ifdef CONFIG_PM 854#ifdef CONFIG_PM
854 u16 *suspend_mem; 855 u16 *suspend_mem;
@@ -884,6 +885,7 @@ static struct pci_device_id snd_m3_ids[] = {
884MODULE_DEVICE_TABLE(pci, snd_m3_ids); 885MODULE_DEVICE_TABLE(pci, snd_m3_ids);
885 886
886static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = { 887static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = {
888 SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c),
887 SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d), 889 SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d),
888 SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d), 890 SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d),
889 SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03), 891 SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03),
@@ -1613,6 +1615,11 @@ static void snd_m3_update_hw_volume(unsigned long private_data)
1613 outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER); 1615 outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER);
1614 outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER); 1616 outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER);
1615 1617
1618 /* Ignore spurious HV interrupts during suspend / resume, this avoids
1619 mistaking them for a mute button press. */
1620 if (chip->in_suspend)
1621 return;
1622
1616 if (!chip->master_switch || !chip->master_volume) 1623 if (!chip->master_switch || !chip->master_volume)
1617 return; 1624 return;
1618 1625
@@ -2424,6 +2431,7 @@ static int m3_suspend(struct pci_dev *pci, pm_message_t state)
2424 if (chip->suspend_mem == NULL) 2431 if (chip->suspend_mem == NULL)
2425 return 0; 2432 return 0;
2426 2433
2434 chip->in_suspend = 1;
2427 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); 2435 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
2428 snd_pcm_suspend_all(chip->pcm); 2436 snd_pcm_suspend_all(chip->pcm);
2429 snd_ac97_suspend(chip->ac97); 2437 snd_ac97_suspend(chip->ac97);
@@ -2497,6 +2505,7 @@ static int m3_resume(struct pci_dev *pci)
2497 snd_m3_hv_init(chip); 2505 snd_m3_hv_init(chip);
2498 2506
2499 snd_power_change_state(card, SNDRV_CTL_POWER_D0); 2507 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
2508 chip->in_suspend = 0;
2500 return 0; 2509 return 0;
2501} 2510}
2502#endif /* CONFIG_PM */ 2511#endif /* CONFIG_PM */