aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/entry.S20
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/signal.c56
-rw-r--r--arch/alpha/kernel/systbls.S2
-rw-r--r--arch/arm/Kconfig27
-rw-r--r--arch/arm/boot/compressed/Makefile2
-rw-r--r--arch/arm/include/asm/pgtable.h4
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c4
-rw-r--r--arch/arm/mach-davinci/dm355.c3
-rw-r--r--arch/arm/mach-davinci/dm365.c3
-rw-r--r--arch/arm/mach-davinci/dm644x.c3
-rw-r--r--arch/arm/mach-davinci/dm646x.c3
-rw-r--r--arch/arm/mach-dove/include/mach/io.h6
-rw-r--r--arch/arm/mach-kirkwood/include/mach/kirkwood.h2
-rw-r--r--arch/arm/mach-kirkwood/pcie.c4
-rw-r--r--arch/arm/mach-mmp/include/mach/system.h7
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa2xx.c3
-rw-r--r--arch/arm/mach-pxa/include/mach/hardware.h12
-rw-r--r--arch/arm/mach-pxa/palm27x.c6
-rw-r--r--arch/arm/mach-pxa/vpac270.c1
-rw-r--r--arch/arm/mach-u300/include/mach/gpio.h3
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c8
-rw-r--r--arch/arm/mm/alignment.c19
-rw-r--r--arch/arm/mm/mmu.c31
-rw-r--r--arch/arm/mm/proc-v7.S62
-rw-r--r--arch/arm/plat-nomadik/timer.c33
-rw-r--r--arch/arm/plat-omap/sram.c25
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/include/asm/compat.h208
-rw-r--r--arch/ia64/include/asm/hardirq.h11
-rw-r--r--arch/ia64/include/asm/iommu_table.h6
-rw-r--r--arch/ia64/kernel/cyclone.c2
-rw-r--r--arch/ia64/kernel/iosapic.c60
-rw-r--r--arch/ia64/kernel/irq_ia64.c5
-rw-r--r--arch/ia64/kernel/mca.c38
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/perfmon.c12
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/kernel/unwind.c23
-rw-r--r--arch/ia64/xen/xen_pv_ops.c5
-rw-r--r--arch/m32r/include/asm/signal.h1
-rw-r--r--arch/m32r/include/asm/unistd.h1
-rw-r--r--arch/m32r/kernel/entry.S5
-rw-r--r--arch/m32r/kernel/ptrace.c7
-rw-r--r--arch/m32r/kernel/signal.c105
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/Kconfig.debug2
-rw-r--r--arch/mn10300/include/asm/bitops.h4
-rw-r--r--arch/mn10300/include/asm/signal.h2
-rw-r--r--arch/mn10300/kernel/signal.c35
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-disabled.c21
-rw-r--r--arch/tile/kernel/intvec_32.S7
-rw-r--r--arch/x86/boot/early_serial_console.c14
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h6
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h12
-rw-r--r--arch/x86/include/asm/bitops.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/amd_iommu.c4
-rw-r--r--arch/x86/kernel/amd_iommu_init.c67
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--block/blk-merge.c12
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ahci.h12
-rw-r--r--drivers/ata/ahci_platform.c6
-rw-r--r--drivers/ata/libahci.c16
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/edac/edac_mc.c3
-rw-r--r--drivers/gpu/drm/drm_buffer.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c8
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/coretemp.c57
-rw-r--r--drivers/hwmon/lis3lv02d.c4
-rw-r--r--drivers/hwmon/pkgtemp.c23
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/leds/leds-ns2.c9
-rw-r--r--drivers/mmc/host/sdhci-s3c.c12
-rw-r--r--drivers/net/3c59x.c10
-rw-r--r--drivers/net/atlx/atl1.c11
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c197
-rw-r--r--drivers/net/e1000e/netdev.c29
-rw-r--r--drivers/net/ibm_newemac/core.c4
-rw-r--r--drivers/net/netxen/netxen_nic_init.c3
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c7
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/smsc911x.c1
-rw-r--r--drivers/net/tulip/de2104x.c43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c5
-rw-r--r--drivers/pci/intel-iommu.c27
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pcmcia/pcmcia_resource.c6
-rw-r--r--drivers/pcmcia/pd6729.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/s390/net/ctcm_main.c4
-rw-r--r--drivers/serial/ioc3_serial.c4
-rw-r--r--drivers/staging/ti-st/st.h1
-rw-r--r--drivers/staging/ti-st/st_core.c9
-rw-r--r--drivers/staging/ti-st/st_core.h2
-rw-r--r--drivers/staging/ti-st/st_kim.c22
-rw-r--r--drivers/usb/core/Kconfig6
-rw-r--r--drivers/usb/core/file.c35
-rw-r--r--drivers/usb/core/message.c1
-rw-r--r--drivers/usb/musb/cppi_dma.c1
-rw-r--r--drivers/usb/musb/musb_gadget.c75
-rw-r--r--drivers/usb/musb/musb_gadget.h2
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c9
-rw-r--r--drivers/usb/musb/musb_host.c6
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c7
-rw-r--r--drivers/video/pxa168fb.c4
-rw-r--r--fs/ocfs2/acl.c3
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dir.c24
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h1
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c9
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c1
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c40
-rw-r--r--fs/ocfs2/dlmglue.h1
-rw-r--r--fs/ocfs2/ocfs2_fs.h37
-rw-r--r--fs/ocfs2/ocfs2_ioctl.h8
-rw-r--r--fs/ocfs2/refcounttree.c5
-rw-r--r--fs/ocfs2/reservations.c22
-rw-r--r--fs/ocfs2/suballoc.c4
-rw-r--r--fs/ocfs2/xattr.c4
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/xfrm.h4
-rw-r--r--mm/fremap.c7
-rw-r--r--mm/hugetlb.c24
-rw-r--r--mm/rmap.c15
-rw-r--r--net/9p/trans_rdma.c29
-rw-r--r--net/9p/trans_virtio.c3
-rw-r--r--net/atm/br2684.c12
-rw-r--r--net/core/iovec.c5
-rw-r--r--net/core/sock.c8
-rw-r--r--net/ipv4/ip_gre.c8
-rw-r--r--net/ipv4/ip_output.c19
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c1
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c6
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv4/xfrm4_state.c33
-rw-r--r--net/ipv6/addrconf.c11
-rw-r--r--net/ipv6/addrlabel.c5
-rw-r--r--net/ipv6/ip6_output.c18
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/xfrm6_state.c33
-rw-r--r--net/netfilter/nf_conntrack_extend.c4
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nf_tproxy_core.c6
-rw-r--r--net/rds/tcp_connect.c4
-rw-r--r--net/rds/tcp_listen.c4
-rw-r--r--net/rds/tcp_recv.c4
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/sunrpc/xprtsock.c28
-rw-r--r--net/wireless/wext-priv.c2
-rw-r--r--net/xfrm/xfrm_policy.c5
-rw-r--r--net/xfrm/xfrm_state.c45
-rw-r--r--security/tomoyo/common.c6
-rw-r--r--security/tomoyo/common.h3
-rw-r--r--sound/pci/hda/patch_analog.c1
-rw-r--r--sound/pci/hda/patch_realtek.c22
-rw-r--r--sound/pci/oxygen/oxygen.c4
-rw-r--r--sound/pci/rme9652/hdsp.c1
-rw-r--r--sound/pci/rme9652/hdspm.c1
-rw-r--r--sound/soc/sh/migor.c15
-rw-r--r--sound/soc/soc-cache.c5
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/kvm_main.c4
195 files changed, 1445 insertions, 1023 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 4d4881d909da..668682d1f5fa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2668,6 +2668,8 @@ M: Guenter Roeck <guenter.roeck@ericsson.com>
2668L: lm-sensors@lm-sensors.org 2668L: lm-sensors@lm-sensors.org
2669W: http://www.lm-sensors.org/ 2669W: http://www.lm-sensors.org/
2670T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ 2670T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
2671T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
2672T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
2671S: Maintained 2673S: Maintained
2672F: Documentation/hwmon/ 2674F: Documentation/hwmon/
2673F: drivers/hwmon/ 2675F: drivers/hwmon/
@@ -3905,10 +3907,8 @@ F: Documentation/serial/moxa-smartio
3905F: drivers/char/mxser.* 3907F: drivers/char/mxser.*
3906 3908
3907MSI LAPTOP SUPPORT 3909MSI LAPTOP SUPPORT
3908M: Lennart Poettering <mzxreary@0pointer.de> 3910M: Lee, Chun-Yi <jlee@novell.com>
3909L: platform-driver-x86@vger.kernel.org 3911L: platform-driver-x86@vger.kernel.org
3910W: https://tango.0pointer.de/mailman/listinfo/s270-linux
3911W: http://0pointer.de/lennart/tchibo.html
3912S: Maintained 3912S: Maintained
3913F: drivers/platform/x86/msi-laptop.c 3913F: drivers/platform/x86/msi-laptop.c
3914 3914
@@ -3925,8 +3925,10 @@ S: Supported
3925F: drivers/mfd/ 3925F: drivers/mfd/
3926 3926
3927MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM 3927MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
3928S: Orphan 3928M: Chris Ball <cjb@laptop.org>
3929L: linux-mmc@vger.kernel.org 3929L: linux-mmc@vger.kernel.org
3930T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
3931S: Maintained
3930F: drivers/mmc/ 3932F: drivers/mmc/
3931F: include/linux/mmc/ 3933F: include/linux/mmc/
3932 3934
@@ -5097,8 +5099,10 @@ S: Maintained
5097F: drivers/mmc/host/sdricoh_cs.c 5099F: drivers/mmc/host/sdricoh_cs.c
5098 5100
5099SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER 5101SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
5100S: Orphan 5102M: Chris Ball <cjb@laptop.org>
5101L: linux-mmc@vger.kernel.org 5103L: linux-mmc@vger.kernel.org
5104T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
5105S: Maintained
5102F: drivers/mmc/host/sdhci.* 5106F: drivers/mmc/host/sdhci.*
5103 5107
5104SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF) 5108SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)
diff --git a/Makefile b/Makefile
index 3133a5772eeb..471c49fd2f43 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 36 3SUBLEVEL = 36
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc6
5NAME = Sheep on Meth 5NAME = Sheep on Meth
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index ab1ee0ab082b..6d159cee5f2f 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -73,8 +73,6 @@
73 ldq $20, HAE_REG($19); \ 73 ldq $20, HAE_REG($19); \
74 stq $21, HAE_CACHE($19); \ 74 stq $21, HAE_CACHE($19); \
75 stq $21, 0($20); \ 75 stq $21, 0($20); \
76 ldq $0, 0($sp); \
77 ldq $1, 8($sp); \
7899:; \ 7699:; \
79 ldq $19, 72($sp); \ 77 ldq $19, 72($sp); \
80 ldq $20, 80($sp); \ 78 ldq $20, 80($sp); \
@@ -316,7 +314,7 @@ ret_from_sys_call:
316 cmovne $26, 0, $19 /* $19 = 0 => non-restartable */ 314 cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
317 ldq $0, SP_OFF($sp) 315 ldq $0, SP_OFF($sp)
318 and $0, 8, $0 316 and $0, 8, $0
319 beq $0, restore_all 317 beq $0, ret_to_kernel
320ret_to_user: 318ret_to_user:
321 /* Make sure need_resched and sigpending don't change between 319 /* Make sure need_resched and sigpending don't change between
322 sampling and the rti. */ 320 sampling and the rti. */
@@ -329,6 +327,11 @@ restore_all:
329 RESTORE_ALL 327 RESTORE_ALL
330 call_pal PAL_rti 328 call_pal PAL_rti
331 329
330ret_to_kernel:
331 lda $16, 7
332 call_pal PAL_swpipl
333 br restore_all
334
332 .align 3 335 .align 3
333$syscall_error: 336$syscall_error:
334 /* 337 /*
@@ -657,7 +660,7 @@ kernel_thread:
657 /* We don't actually care for a3 success widgetry in the kernel. 660 /* We don't actually care for a3 success widgetry in the kernel.
658 Not for positive errno values. */ 661 Not for positive errno values. */
659 stq $0, 0($sp) /* $0 */ 662 stq $0, 0($sp) /* $0 */
660 br restore_all 663 br ret_to_kernel
661.end kernel_thread 664.end kernel_thread
662 665
663/* 666/*
@@ -912,15 +915,6 @@ sys_execve:
912.end sys_execve 915.end sys_execve
913 916
914 .align 4 917 .align 4
915 .globl osf_sigprocmask
916 .ent osf_sigprocmask
917osf_sigprocmask:
918 .prologue 0
919 mov $sp, $18
920 jmp $31, sys_osf_sigprocmask
921.end osf_sigprocmask
922
923 .align 4
924 .globl alpha_ni_syscall 918 .globl alpha_ni_syscall
925 .ent alpha_ni_syscall 919 .ent alpha_ni_syscall
926alpha_ni_syscall: 920alpha_ni_syscall:
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 842dba308eab..3ec35066f1dc 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
356 dest[27] = pt->r27; 356 dest[27] = pt->r27;
357 dest[28] = pt->r28; 357 dest[28] = pt->r28;
358 dest[29] = pt->gp; 358 dest[29] = pt->gp;
359 dest[30] = rdusp(); 359 dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
360 dest[31] = pt->pc; 360 dest[31] = pt->pc;
361 361
362 /* Once upon a time this was the PS value. Which is stupid 362 /* Once upon a time this was the PS value. Which is stupid
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 0f6b51ae865a..d290845aef59 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
41/* 41/*
42 * The OSF/1 sigprocmask calling sequence is different from the 42 * The OSF/1 sigprocmask calling sequence is different from the
43 * C sigprocmask() sequence.. 43 * C sigprocmask() sequence..
44 *
45 * how:
46 * 1 - SIG_BLOCK
47 * 2 - SIG_UNBLOCK
48 * 3 - SIG_SETMASK
49 *
50 * We change the range to -1 .. 1 in order to let gcc easily
51 * use the conditional move instructions.
52 *
53 * Note that we don't need to acquire the kernel lock for SMP
54 * operation, as all of this is local to this thread.
55 */ 44 */
56SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask, 45SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
57 struct pt_regs *, regs)
58{ 46{
59 unsigned long oldmask = -EINVAL; 47 sigset_t oldmask;
60 48 sigset_t mask;
61 if ((unsigned long)how-1 <= 2) { 49 unsigned long res;
62 long sign = how-2; /* -1 .. 1 */ 50
63 unsigned long block, unblock; 51 siginitset(&mask, newmask & ~_BLOCKABLE);
64 52 res = sigprocmask(how, &mask, &oldmask);
65 newmask &= _BLOCKABLE; 53 if (!res) {
66 spin_lock_irq(&current->sighand->siglock); 54 force_successful_syscall_return();
67 oldmask = current->blocked.sig[0]; 55 res = oldmask.sig[0];
68
69 unblock = oldmask & ~newmask;
70 block = oldmask | newmask;
71 if (!sign)
72 block = unblock;
73 if (sign <= 0)
74 newmask = block;
75 if (_NSIG_WORDS > 1 && sign > 0)
76 sigemptyset(&current->blocked);
77 current->blocked.sig[0] = newmask;
78 recalc_sigpending();
79 spin_unlock_irq(&current->sighand->siglock);
80
81 regs->r0 = 0; /* special no error return */
82 } 56 }
83 return oldmask; 57 return res;
84} 58}
85 59
86SYSCALL_DEFINE3(osf_sigaction, int, sig, 60SYSCALL_DEFINE3(osf_sigaction, int, sig,
@@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
94 old_sigset_t mask; 68 old_sigset_t mask;
95 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 69 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
96 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 70 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
97 __get_user(new_ka.sa.sa_flags, &act->sa_flags)) 71 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
72 __get_user(mask, &act->sa_mask))
98 return -EFAULT; 73 return -EFAULT;
99 __get_user(mask, &act->sa_mask);
100 siginitset(&new_ka.sa.sa_mask, mask); 74 siginitset(&new_ka.sa.sa_mask, mask);
101 new_ka.ka_restorer = NULL; 75 new_ka.ka_restorer = NULL;
102 } 76 }
@@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
106 if (!ret && oact) { 80 if (!ret && oact) {
107 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 81 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
108 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 82 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
109 __put_user(old_ka.sa.sa_flags, &oact->sa_flags)) 83 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
84 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
110 return -EFAULT; 85 return -EFAULT;
111 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
112 } 86 }
113 87
114 return ret; 88 return ret;
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index ce594ef533cc..a6a1de9db16f 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -58,7 +58,7 @@ sys_call_table:
58 .quad sys_open /* 45 */ 58 .quad sys_open /* 45 */
59 .quad alpha_ni_syscall 59 .quad alpha_ni_syscall
60 .quad sys_getxgid 60 .quad sys_getxgid
61 .quad osf_sigprocmask 61 .quad sys_osf_sigprocmask
62 .quad alpha_ni_syscall 62 .quad alpha_ni_syscall
63 .quad alpha_ni_syscall /* 50 */ 63 .quad alpha_ni_syscall /* 50 */
64 .quad sys_acct 64 .quad sys_acct
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 553b7cf17bfb..88c97bc7a6f5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -271,7 +271,6 @@ config ARCH_AT91
271 bool "Atmel AT91" 271 bool "Atmel AT91"
272 select ARCH_REQUIRE_GPIOLIB 272 select ARCH_REQUIRE_GPIOLIB
273 select HAVE_CLK 273 select HAVE_CLK
274 select ARCH_USES_GETTIMEOFFSET
275 help 274 help
276 This enables support for systems based on the Atmel AT91RM9200, 275 This enables support for systems based on the Atmel AT91RM9200,
277 AT91SAM9 and AT91CAP9 processors. 276 AT91SAM9 and AT91CAP9 processors.
@@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075
1051 ACTLR register. Note that setting specific bits in the ACTLR register 1050 ACTLR register. Note that setting specific bits in the ACTLR register
1052 may not be available in non-secure mode. 1051 may not be available in non-secure mode.
1053 1052
1053config ARM_ERRATA_742230
1054 bool "ARM errata: DMB operation may be faulty"
1055 depends on CPU_V7 && SMP
1056 help
1057 This option enables the workaround for the 742230 Cortex-A9
1058 (r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
1059 between two write operations may not ensure the correct visibility
1060 ordering of the two writes. This workaround sets a specific bit in
1061 the diagnostic register of the Cortex-A9 which causes the DMB
1062 instruction to behave as a DSB, ensuring the correct behaviour of
1063 the two writes.
1064
1065config ARM_ERRATA_742231
1066 bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
1067 depends on CPU_V7 && SMP
1068 help
1069 This option enables the workaround for the 742231 Cortex-A9
1070 (r2p0..r2p2) erratum. Under certain conditions, specific to the
1071 Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
1072 accessing some data located in the same cache line, may get corrupted
1073 data due to bad handling of the address hazard when the line gets
1074 replaced from one of the CPUs at the same time as another CPU is
1075 accessing it. This workaround sets specific bits in the diagnostic
1076 register of the Cortex-A9 which reduces the linefill issuing
1077 capabilities of the processor.
1078
1054config PL310_ERRATA_588369 1079config PL310_ERRATA_588369
1055 bool "Clean & Invalidate maintenance operations do not invalidate clean lines" 1080 bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
1056 depends on CACHE_L2X0 && ARCH_OMAP4 1081 depends on CACHE_L2X0 && ARCH_OMAP4
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index b23f6bc46cfa..65a7c1c588a9 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -116,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
116$(obj)/font.c: $(FONTC) 116$(obj)/font.c: $(FONTC)
117 $(call cmd,shipped) 117 $(call cmd,shipped)
118 118
119$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config 119$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
120 @sed "$(SEDFLAGS)" < $< > $@ 120 @sed "$(SEDFLAGS)" < $< > $@
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index ab68cf1ef80f..e90b167ea848 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
317#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 317#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
318#define pgprot_dmacoherent(prot) \ 318#define pgprot_dmacoherent(prot) \
319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) 319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
320#define __HAVE_PHYS_MEM_ACCESS_PROT
321struct file;
322extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
323 unsigned long size, pgprot_t vma_prot);
320#else 324#else
321#define pgprot_dmacoherent(prot) \ 325#define pgprot_dmacoherent(prot) \
322 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) 326 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 1b560825e1cf..7885722bdf4e 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -48,6 +48,8 @@ work_pending:
48 beq no_work_pending 48 beq no_work_pending
49 mov r0, sp @ 'regs' 49 mov r0, sp @ 'regs'
50 mov r2, why @ 'syscall' 50 mov r2, why @ 'syscall'
51 tst r1, #_TIF_SIGPENDING @ delivering a signal?
52 movne why, #0 @ prevent further restarts
51 bl do_notify_resume 53 bl do_notify_resume
52 b ret_slow_syscall @ Check work again 54 b ret_slow_syscall @ Check work again
53 55
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 5e71ccd5e7d3..1276babf84d5 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
426 .sda_is_open_drain = 1, 426 .sda_is_open_drain = 1,
427 .scl_pin = AT91_PIN_PA21, 427 .scl_pin = AT91_PIN_PA21,
428 .scl_is_open_drain = 1, 428 .scl_is_open_drain = 1,
429 .udelay = 2, /* ~100 kHz */ 429 .udelay = 5, /* ~100 kHz */
430}; 430};
431 431
432static struct platform_device at91sam9g45_twi0_device = { 432static struct platform_device at91sam9g45_twi0_device = {
@@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
440 .sda_is_open_drain = 1, 440 .sda_is_open_drain = 1,
441 .scl_pin = AT91_PIN_PB11, 441 .scl_pin = AT91_PIN_PB11,
442 .scl_is_open_drain = 1, 442 .scl_is_open_drain = 1,
443 .udelay = 2, /* ~100 kHz */ 443 .udelay = 5, /* ~100 kHz */
444}; 444};
445 445
446static struct platform_device at91sam9g45_twi1_device = { 446static struct platform_device at91sam9g45_twi1_device = {
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 3d996b659ff4..9be261beae7d 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
769 .virtual = SRAM_VIRT, 769 .virtual = SRAM_VIRT,
770 .pfn = __phys_to_pfn(0x00010000), 770 .pfn = __phys_to_pfn(0x00010000),
771 .length = SZ_32K, 771 .length = SZ_32K,
772 /* MT_MEMORY_NONCACHED requires supersection alignment */ 772 .type = MT_MEMORY_NONCACHED,
773 .type = MT_DEVICE,
774 }, 773 },
775}; 774};
776 775
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 6b6f4c643709..7781e35daec3 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
969 .virtual = SRAM_VIRT, 969 .virtual = SRAM_VIRT,
970 .pfn = __phys_to_pfn(0x00010000), 970 .pfn = __phys_to_pfn(0x00010000),
971 .length = SZ_32K, 971 .length = SZ_32K,
972 /* MT_MEMORY_NONCACHED requires supersection alignment */ 972 .type = MT_MEMORY_NONCACHED,
973 .type = MT_DEVICE,
974 }, 973 },
975}; 974};
976 975
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 40fec315c99a..5e5b0a7831fb 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
653 .virtual = SRAM_VIRT, 653 .virtual = SRAM_VIRT,
654 .pfn = __phys_to_pfn(0x00008000), 654 .pfn = __phys_to_pfn(0x00008000),
655 .length = SZ_16K, 655 .length = SZ_16K,
656 /* MT_MEMORY_NONCACHED requires supersection alignment */ 656 .type = MT_MEMORY_NONCACHED,
657 .type = MT_DEVICE,
658 }, 657 },
659}; 658};
660 659
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index e4a3df1872ac..26e8a9c7f50b 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
737 .virtual = SRAM_VIRT, 737 .virtual = SRAM_VIRT,
738 .pfn = __phys_to_pfn(0x00010000), 738 .pfn = __phys_to_pfn(0x00010000),
739 .length = SZ_32K, 739 .length = SZ_32K,
740 /* MT_MEMORY_NONCACHED requires supersection alignment */ 740 .type = MT_MEMORY_NONCACHED,
741 .type = MT_DEVICE,
742 }, 741 },
743}; 742};
744 743
diff --git a/arch/arm/mach-dove/include/mach/io.h b/arch/arm/mach-dove/include/mach/io.h
index 3b3e4721ce2e..eb4936ff90ad 100644
--- a/arch/arm/mach-dove/include/mach/io.h
+++ b/arch/arm/mach-dove/include/mach/io.h
@@ -13,8 +13,8 @@
13 13
14#define IO_SPACE_LIMIT 0xffffffff 14#define IO_SPACE_LIMIT 0xffffffff
15 15
16#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\ 16#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
17 DOVE_PCIE0_IO_VIRT_BASE)) 17 DOVE_PCIE0_IO_VIRT_BASE))
18#define __mem_pci(a) (a) 18#define __mem_pci(a) (a)
19 19
20#endif 20#endif
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index 93fc2ec95e76..6e924b398919 100644
--- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -38,7 +38,7 @@
38 38
39#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000 39#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000
40#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000 40#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000
41#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000 41#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000
42#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M 42#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M
43 43
44#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000 44#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index 55e7f00836b7..513ad3102d7c 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
117 * IORESOURCE_IO 117 * IORESOURCE_IO
118 */ 118 */
119 pp->res[0].name = "PCIe 0 I/O Space"; 119 pp->res[0].name = "PCIe 0 I/O Space";
120 pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE; 120 pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1; 121 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
122 pp->res[0].flags = IORESOURCE_IO; 122 pp->res[0].flags = IORESOURCE_IO;
123 123
@@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
139 * IORESOURCE_IO 139 * IORESOURCE_IO
140 */ 140 */
141 pp->res[0].name = "PCIe 1 I/O Space"; 141 pp->res[0].name = "PCIe 1 I/O Space";
142 pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE; 142 pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1; 143 pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
144 pp->res[0].flags = IORESOURCE_IO; 144 pp->res[0].flags = IORESOURCE_IO;
145 145
diff --git a/arch/arm/mach-mmp/include/mach/system.h b/arch/arm/mach-mmp/include/mach/system.h
index 4f5b0e0ce6cf..1a8a25edb1b4 100644
--- a/arch/arm/mach-mmp/include/mach/system.h
+++ b/arch/arm/mach-mmp/include/mach/system.h
@@ -9,6 +9,8 @@
9#ifndef __ASM_MACH_SYSTEM_H 9#ifndef __ASM_MACH_SYSTEM_H
10#define __ASM_MACH_SYSTEM_H 10#define __ASM_MACH_SYSTEM_H
11 11
12#include <mach/cputype.h>
13
12static inline void arch_idle(void) 14static inline void arch_idle(void)
13{ 15{
14 cpu_do_idle(); 16 cpu_do_idle();
@@ -16,6 +18,9 @@ static inline void arch_idle(void)
16 18
17static inline void arch_reset(char mode, const char *cmd) 19static inline void arch_reset(char mode, const char *cmd)
18{ 20{
19 cpu_reset(0); 21 if (cpu_is_pxa168())
22 cpu_reset(0xffff0000);
23 else
24 cpu_reset(0);
20} 25}
21#endif /* __ASM_MACH_SYSTEM_H */ 26#endif /* __ASM_MACH_SYSTEM_H */
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
index 50d5939a78f1..58093d9e07be 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
@@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
312 freqs.cpu = policy->cpu; 312 freqs.cpu = policy->cpu;
313 313
314 if (freq_debug) 314 if (freq_debug)
315 pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, " 315 pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
316 "(SDRAM %d Mhz)\n",
317 freqs.new / 1000, (pxa_freq_settings[idx].div2) ? 316 freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
318 (new_freq_mem / 2000) : (new_freq_mem / 1000)); 317 (new_freq_mem / 2000) : (new_freq_mem / 1000));
319 318
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h
index 428cc7bda9a4..814f1458a06a 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/arch/arm/mach-pxa/include/mach/hardware.h
@@ -264,23 +264,35 @@
264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x 264 * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
265 * == 0x3 for pxa300/pxa310/pxa320 265 * == 0x3 for pxa300/pxa310/pxa320
266 */ 266 */
267#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
267#define __cpu_is_pxa2xx(id) \ 268#define __cpu_is_pxa2xx(id) \
268 ({ \ 269 ({ \
269 unsigned int _id = (id) >> 13 & 0x7; \ 270 unsigned int _id = (id) >> 13 & 0x7; \
270 _id <= 0x2; \ 271 _id <= 0x2; \
271 }) 272 })
273#else
274#define __cpu_is_pxa2xx(id) (0)
275#endif
272 276
277#ifdef CONFIG_PXA3xx
273#define __cpu_is_pxa3xx(id) \ 278#define __cpu_is_pxa3xx(id) \
274 ({ \ 279 ({ \
275 unsigned int _id = (id) >> 13 & 0x7; \ 280 unsigned int _id = (id) >> 13 & 0x7; \
276 _id == 0x3; \ 281 _id == 0x3; \
277 }) 282 })
283#else
284#define __cpu_is_pxa3xx(id) (0)
285#endif
278 286
287#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
279#define __cpu_is_pxa93x(id) \ 288#define __cpu_is_pxa93x(id) \
280 ({ \ 289 ({ \
281 unsigned int _id = (id) >> 4 & 0xfff; \ 290 unsigned int _id = (id) >> 4 & 0xfff; \
282 _id == 0x683 || _id == 0x693; \ 291 _id == 0x683 || _id == 0x693; \
283 }) 292 })
293#else
294#define __cpu_is_pxa93x(id) (0)
295#endif
284 296
285#define cpu_is_pxa2xx() \ 297#define cpu_is_pxa2xx() \
286 ({ \ 298 ({ \
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 77ad6d34ab5b..405b92a29793 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
469 }, 469 },
470}; 470};
471 471
472static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
473 .use_pio = 1,
474};
475
472void __init palm27x_pmic_init(void) 476void __init palm27x_pmic_init(void)
473{ 477{
474 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info)); 478 i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
475 pxa27x_set_i2c_power_info(NULL); 479 pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
476} 480}
477#endif 481#endif
diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
index c9b747cedea8..37d6173bbb66 100644
--- a/arch/arm/mach-pxa/vpac270.c
+++ b/arch/arm/mach-pxa/vpac270.c
@@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
240#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) 240#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
241static struct pxamci_platform_data vpac270_mci_platform_data = { 241static struct pxamci_platform_data vpac270_mci_platform_data = {
242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 242 .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
243 .gpio_power = -1,
243 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N, 244 .gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N,
244 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY, 245 .gpio_card_ro = GPIO52_VPAC270_SD_READONLY,
245 .detect_delay_ms = 200, 246 .detect_delay_ms = 200,
diff --git a/arch/arm/mach-u300/include/mach/gpio.h b/arch/arm/mach-u300/include/mach/gpio.h
index 7b1fc984abb6..d5a71abcbaea 100644
--- a/arch/arm/mach-u300/include/mach/gpio.h
+++ b/arch/arm/mach-u300/include/mach/gpio.h
@@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
273extern int gpio_get_value(unsigned gpio); 273extern int gpio_get_value(unsigned gpio);
274extern void gpio_set_value(unsigned gpio, int value); 274extern void gpio_set_value(unsigned gpio, int value);
275 275
276#define gpio_get_value_cansleep gpio_get_value
277#define gpio_set_value_cansleep gpio_set_value
278
276/* wrappers to sleep-enable the previous two functions */ 279/* wrappers to sleep-enable the previous two functions */
277static inline unsigned gpio_to_irq(unsigned gpio) 280static inline unsigned gpio_to_irq(unsigned gpio)
278{ 281{
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index 577df6cccb08..efb127022d42 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -227,7 +227,13 @@ static void ct_ca9x4_init(void)
227 int i; 227 int i;
228 228
229#ifdef CONFIG_CACHE_L2X0 229#ifdef CONFIG_CACHE_L2X0
230 l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); 230 void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
231
232 /* set RAM latencies to 1 cycle for this core tile. */
233 writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
234 writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
235
236 l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
231#endif 237#endif
232 238
233 clkdev_add_table(lookups, ARRAY_SIZE(lookups)); 239 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index d073b64ae87e..724ba3bce72c 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
885 885
886 if (ai_usermode & UM_SIGNAL) 886 if (ai_usermode & UM_SIGNAL)
887 force_sig(SIGBUS, current); 887 force_sig(SIGBUS, current);
888 else 888 else {
889 set_cr(cr_no_alignment); 889 /*
890 * We're about to disable the alignment trap and return to
891 * user space. But if an interrupt occurs before actually
892 * reaching user space, then the IRQ vector entry code will
893 * notice that we were still in kernel space and therefore
894 * the alignment trap won't be re-enabled in that case as it
895 * is presumed to be always on from kernel space.
896 * Let's prevent that race by disabling interrupts here (they
897 * are disabled on the way back to user space anyway in
898 * entry-common.S) and disable the alignment trap only if
899 * there is no work pending for this thread.
900 */
901 raw_local_irq_disable();
902 if (!(current_thread_info()->flags & _TIF_WORK_MASK))
903 set_cr(cr_no_alignment);
904 }
890 905
891 return 0; 906 return 0;
892} 907}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6e1c4f6a2b3f..6a3a2d0cd6db 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
15#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/memblock.h> 16#include <linux/memblock.h>
17#include <linux/sort.h> 17#include <linux/sort.h>
18#include <linux/fs.h>
18 19
19#include <asm/cputype.h> 20#include <asm/cputype.h>
20#include <asm/sections.h> 21#include <asm/sections.h>
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
246 .domain = DOMAIN_USER, 247 .domain = DOMAIN_USER,
247 }, 248 },
248 [MT_MEMORY] = { 249 [MT_MEMORY] = {
250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
251 L_PTE_USER | L_PTE_EXEC,
252 .prot_l1 = PMD_TYPE_TABLE,
249 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
250 .domain = DOMAIN_KERNEL, 254 .domain = DOMAIN_KERNEL,
251 }, 255 },
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
254 .domain = DOMAIN_KERNEL, 258 .domain = DOMAIN_KERNEL,
255 }, 259 },
256 [MT_MEMORY_NONCACHED] = { 260 [MT_MEMORY_NONCACHED] = {
261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
262 L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
263 .prot_l1 = PMD_TYPE_TABLE,
257 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
258 .domain = DOMAIN_KERNEL, 265 .domain = DOMAIN_KERNEL,
259 }, 266 },
@@ -411,9 +418,12 @@ static void __init build_mem_type_table(void)
411 * Enable CPU-specific coherency if supported. 418 * Enable CPU-specific coherency if supported.
412 * (Only available on XSC3 at the moment.) 419 * (Only available on XSC3 at the moment.)
413 */ 420 */
414 if (arch_is_coherent() && cpu_is_xsc3()) 421 if (arch_is_coherent() && cpu_is_xsc3()) {
415 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 422 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
416 423 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
424 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
425 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
426 }
417 /* 427 /*
418 * ARMv6 and above have extended page tables. 428 * ARMv6 and above have extended page tables.
419 */ 429 */
@@ -438,7 +448,9 @@ static void __init build_mem_type_table(void)
438 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 448 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
439 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 449 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
440 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 450 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
451 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
441 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 452 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
453 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
442#endif 454#endif
443 } 455 }
444 456
@@ -475,6 +487,8 @@ static void __init build_mem_type_table(void)
475 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 487 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
476 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 488 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
477 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 489 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
490 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
491 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
478 mem_types[MT_ROM].prot_sect |= cp->pmd; 492 mem_types[MT_ROM].prot_sect |= cp->pmd;
479 493
480 switch (cp->pmd) { 494 switch (cp->pmd) {
@@ -498,6 +512,19 @@ static void __init build_mem_type_table(void)
498 } 512 }
499} 513}
500 514
515#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
516pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
517 unsigned long size, pgprot_t vma_prot)
518{
519 if (!pfn_valid(pfn))
520 return pgprot_noncached(vma_prot);
521 else if (file->f_flags & O_SYNC)
522 return pgprot_writecombine(vma_prot);
523 return vma_prot;
524}
525EXPORT_SYMBOL(phys_mem_access_prot);
526#endif
527
501#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 528#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
502 529
503static void __init *early_alloc(unsigned long sz) 530static void __init *early_alloc(unsigned long sz)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 6a8506d99ee9..7563ff0141bd 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -186,13 +186,14 @@ cpu_v7_name:
186 * It is assumed that: 186 * It is assumed that:
187 * - cache type register is implemented 187 * - cache type register is implemented
188 */ 188 */
189__v7_setup: 189__v7_ca9mp_setup:
190#ifdef CONFIG_SMP 190#ifdef CONFIG_SMP
191 mrc p15, 0, r0, c1, c0, 1 191 mrc p15, 0, r0, c1, c0, 1
192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting 194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
195#endif 195#endif
196__v7_setup:
196 adr r12, __v7_setup_stack @ the local stack 197 adr r12, __v7_setup_stack @ the local stack
197 stmia r12, {r0-r5, r7, r9, r11, lr} 198 stmia r12, {r0-r5, r7, r9, r11, lr}
198 bl v7_flush_dcache_all 199 bl v7_flush_dcache_all
@@ -201,11 +202,16 @@ __v7_setup:
201 mrc p15, 0, r0, c0, c0, 0 @ read main ID register 202 mrc p15, 0, r0, c0, c0, 0 @ read main ID register
202 and r10, r0, #0xff000000 @ ARM? 203 and r10, r0, #0xff000000 @ ARM?
203 teq r10, #0x41000000 204 teq r10, #0x41000000
204 bne 2f 205 bne 3f
205 and r5, r0, #0x00f00000 @ variant 206 and r5, r0, #0x00f00000 @ variant
206 and r6, r0, #0x0000000f @ revision 207 and r6, r0, #0x0000000f @ revision
207 orr r0, r6, r5, lsr #20-4 @ combine variant and revision 208 orr r6, r6, r5, lsr #20-4 @ combine variant and revision
209 ubfx r0, r0, #4, #12 @ primary part number
208 210
211 /* Cortex-A8 Errata */
212 ldr r10, =0x00000c08 @ Cortex-A8 primary part number
213 teq r0, r10
214 bne 2f
209#ifdef CONFIG_ARM_ERRATA_430973 215#ifdef CONFIG_ARM_ERRATA_430973
210 teq r5, #0x00100000 @ only present in r1p* 216 teq r5, #0x00100000 @ only present in r1p*
211 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 217 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
@@ -213,21 +219,42 @@ __v7_setup:
213 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 219 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
214#endif 220#endif
215#ifdef CONFIG_ARM_ERRATA_458693 221#ifdef CONFIG_ARM_ERRATA_458693
216 teq r0, #0x20 @ only present in r2p0 222 teq r6, #0x20 @ only present in r2p0
217 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register 223 mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
218 orreq r10, r10, #(1 << 5) @ set L1NEON to 1 224 orreq r10, r10, #(1 << 5) @ set L1NEON to 1
219 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 225 orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
220 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register 226 mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
221#endif 227#endif
222#ifdef CONFIG_ARM_ERRATA_460075 228#ifdef CONFIG_ARM_ERRATA_460075
223 teq r0, #0x20 @ only present in r2p0 229 teq r6, #0x20 @ only present in r2p0
224 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register 230 mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
225 tsteq r10, #1 << 22 231 tsteq r10, #1 << 22
226 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit 232 orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
227 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register 233 mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
228#endif 234#endif
235 b 3f
236
237 /* Cortex-A9 Errata */
2382: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
239 teq r0, r10
240 bne 3f
241#ifdef CONFIG_ARM_ERRATA_742230
242 cmp r6, #0x22 @ only present up to r2p2
243 mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
244 orrle r10, r10, #1 << 4 @ set bit #4
245 mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
246#endif
247#ifdef CONFIG_ARM_ERRATA_742231
248 teq r6, #0x20 @ present in r2p0
249 teqne r6, #0x21 @ present in r2p1
250 teqne r6, #0x22 @ present in r2p2
251 mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
252 orreq r10, r10, #1 << 12 @ set bit #12
253 orreq r10, r10, #1 << 22 @ set bit #22
254 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
255#endif
229 256
2302: mov r10, #0 2573: mov r10, #0
231#ifdef HARVARD_CACHE 258#ifdef HARVARD_CACHE
232 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 259 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
233#endif 260#endif
@@ -323,6 +350,29 @@ cpu_elf_name:
323 350
324 .section ".proc.info.init", #alloc, #execinstr 351 .section ".proc.info.init", #alloc, #execinstr
325 352
353 .type __v7_ca9mp_proc_info, #object
354__v7_ca9mp_proc_info:
355 .long 0x410fc090 @ Required ID value
356 .long 0xff0ffff0 @ Mask for ID
357 .long PMD_TYPE_SECT | \
358 PMD_SECT_AP_WRITE | \
359 PMD_SECT_AP_READ | \
360 PMD_FLAGS
361 .long PMD_TYPE_SECT | \
362 PMD_SECT_XN | \
363 PMD_SECT_AP_WRITE | \
364 PMD_SECT_AP_READ
365 b __v7_ca9mp_setup
366 .long cpu_arch_name
367 .long cpu_elf_name
368 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
369 .long cpu_v7_name
370 .long v7_processor_functions
371 .long v7wbi_tlb_fns
372 .long v6_user_fns
373 .long v7_cache_fns
374 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
375
326 /* 376 /*
327 * Match any ARMv7 processor core. 377 * Match any ARMv7 processor core.
328 */ 378 */
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index ea3ca86c5283..aedf9c1d645e 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/mach-nomadik/timer.c 2 * linux/arch/arm/plat-nomadik/timer.c
3 * 3 *
4 * Copyright (C) 2008 STMicroelectronics 4 * Copyright (C) 2008 STMicroelectronics
5 * Copyright (C) 2010 Alessandro Rubini 5 * Copyright (C) 2010 Alessandro Rubini
@@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
75 cr = readl(mtu_base + MTU_CR(1)); 75 cr = readl(mtu_base + MTU_CR(1));
76 writel(0, mtu_base + MTU_LR(1)); 76 writel(0, mtu_base + MTU_LR(1));
77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1)); 77 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
78 writel(0x2, mtu_base + MTU_IMSC); 78 writel(1 << 1, mtu_base + MTU_IMSC);
79 break; 79 break;
80 case CLOCK_EVT_MODE_SHUTDOWN: 80 case CLOCK_EVT_MODE_SHUTDOWN:
81 case CLOCK_EVT_MODE_UNUSED: 81 case CLOCK_EVT_MODE_UNUSED:
@@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
131{ 131{
132 unsigned long rate; 132 unsigned long rate;
133 struct clk *clk0; 133 struct clk *clk0;
134 struct clk *clk1; 134 u32 cr = MTU_CRn_32BITS;
135 u32 cr;
136 135
137 clk0 = clk_get_sys("mtu0", NULL); 136 clk0 = clk_get_sys("mtu0", NULL);
138 BUG_ON(IS_ERR(clk0)); 137 BUG_ON(IS_ERR(clk0));
139 138
140 clk1 = clk_get_sys("mtu1", NULL);
141 BUG_ON(IS_ERR(clk1));
142
143 clk_enable(clk0); 139 clk_enable(clk0);
144 clk_enable(clk1);
145 140
146 /* 141 /*
147 * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500: 142 * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
148 * use a divide-by-16 counter if it's more than 16MHz 143 * for ux500.
144 * Use a divide-by-16 counter if the tick rate is more than 32MHz.
145 * At 32 MHz, the timer (with 32 bit counter) can be programmed
146 * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
147 * with 16 gives too low timer resolution.
149 */ 148 */
150 cr = MTU_CRn_32BITS;;
151 rate = clk_get_rate(clk0); 149 rate = clk_get_rate(clk0);
152 if (rate > 16 << 20) { 150 if (rate > 32000000) {
153 rate /= 16; 151 rate /= 16;
154 cr |= MTU_CRn_PRESCALE_16; 152 cr |= MTU_CRn_PRESCALE_16;
155 } else { 153 } else {
@@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
170 pr_err("timer: failed to initialize clock source %s\n", 168 pr_err("timer: failed to initialize clock source %s\n",
171 nmdk_clksrc.name); 169 nmdk_clksrc.name);
172 170
173 /* Timer 1 is used for events, fix according to rate */ 171 /* Timer 1 is used for events */
174 cr = MTU_CRn_32BITS; 172
175 rate = clk_get_rate(clk1);
176 if (rate > 16 << 20) {
177 rate /= 16;
178 cr |= MTU_CRn_PRESCALE_16;
179 } else {
180 cr |= MTU_CRn_PRESCALE_1;
181 }
182 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); 173 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
183 174
184 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */ 175 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 226b2e858d6c..10b3b4c63372 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -220,20 +220,7 @@ void __init omap_map_sram(void)
220 if (omap_sram_size == 0) 220 if (omap_sram_size == 0)
221 return; 221 return;
222 222
223 if (cpu_is_omap24xx()) {
224 omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
225
226 base = OMAP2_SRAM_PA;
227 base = ROUND_DOWN(base, PAGE_SIZE);
228 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
229 }
230
231 if (cpu_is_omap34xx()) { 223 if (cpu_is_omap34xx()) {
232 omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
233 base = OMAP3_SRAM_PA;
234 base = ROUND_DOWN(base, PAGE_SIZE);
235 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
236
237 /* 224 /*
238 * SRAM must be marked as non-cached on OMAP3 since the 225 * SRAM must be marked as non-cached on OMAP3 since the
239 * CORE DPLL M2 divider change code (in SRAM) runs with the 226 * CORE DPLL M2 divider change code (in SRAM) runs with the
@@ -244,13 +231,11 @@ void __init omap_map_sram(void)
244 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED; 231 omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
245 } 232 }
246 233
247 if (cpu_is_omap44xx()) { 234 omap_sram_io_desc[0].virtual = omap_sram_base;
248 omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA; 235 base = omap_sram_start;
249 base = OMAP4_SRAM_PA; 236 base = ROUND_DOWN(base, PAGE_SIZE);
250 base = ROUND_DOWN(base, PAGE_SIZE); 237 omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
251 omap_sram_io_desc[0].pfn = __phys_to_pfn(base); 238 omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
252 }
253 omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */
254 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 239 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
255 240
256 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", 241 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index e93f44e62edb..7c82fa1fc911 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -53,6 +53,9 @@ config MMU
53 bool 53 bool
54 default y 54 default y
55 55
56config ARCH_DMA_ADDR_T_64BIT
57 def_bool y
58
56config NEED_DMA_MAP_STATE 59config NEED_DMA_MAP_STATE
57 def_bool y 60 def_bool y
58 61
@@ -686,8 +689,10 @@ source "lib/Kconfig"
686# Use the generic interrupt handling code in kernel/irq/: 689# Use the generic interrupt handling code in kernel/irq/:
687# 690#
688config GENERIC_HARDIRQS 691config GENERIC_HARDIRQS
689 bool 692 def_bool y
690 default y 693
694config GENERIC_HARDIRQS_NO__DO_IRQ
695 def_bool y
691 696
692config GENERIC_IRQ_PROBE 697config GENERIC_IRQ_PROBE
693 bool 698 bool
diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h
deleted file mode 100644
index 9301a2821615..000000000000
--- a/arch/ia64/include/asm/compat.h
+++ /dev/null
@@ -1,208 +0,0 @@
1#ifndef _ASM_IA64_COMPAT_H
2#define _ASM_IA64_COMPAT_H
3/*
4 * Architecture specific compatibility types
5 */
6#include <linux/types.h>
7
8#define COMPAT_USER_HZ 100
9#define COMPAT_UTS_MACHINE "i686\0\0\0"
10
11typedef u32 compat_size_t;
12typedef s32 compat_ssize_t;
13typedef s32 compat_time_t;
14typedef s32 compat_clock_t;
15typedef s32 compat_key_t;
16typedef s32 compat_pid_t;
17typedef u16 __compat_uid_t;
18typedef u16 __compat_gid_t;
19typedef u32 __compat_uid32_t;
20typedef u32 __compat_gid32_t;
21typedef u16 compat_mode_t;
22typedef u32 compat_ino_t;
23typedef u16 compat_dev_t;
24typedef s32 compat_off_t;
25typedef s64 compat_loff_t;
26typedef u16 compat_nlink_t;
27typedef u16 compat_ipc_pid_t;
28typedef s32 compat_daddr_t;
29typedef u32 compat_caddr_t;
30typedef __kernel_fsid_t compat_fsid_t;
31typedef s32 compat_timer_t;
32
33typedef s32 compat_int_t;
34typedef s32 compat_long_t;
35typedef s64 __attribute__((aligned(4))) compat_s64;
36typedef u32 compat_uint_t;
37typedef u32 compat_ulong_t;
38typedef u64 __attribute__((aligned(4))) compat_u64;
39
40struct compat_timespec {
41 compat_time_t tv_sec;
42 s32 tv_nsec;
43};
44
45struct compat_timeval {
46 compat_time_t tv_sec;
47 s32 tv_usec;
48};
49
50struct compat_stat {
51 compat_dev_t st_dev;
52 u16 __pad1;
53 compat_ino_t st_ino;
54 compat_mode_t st_mode;
55 compat_nlink_t st_nlink;
56 __compat_uid_t st_uid;
57 __compat_gid_t st_gid;
58 compat_dev_t st_rdev;
59 u16 __pad2;
60 u32 st_size;
61 u32 st_blksize;
62 u32 st_blocks;
63 u32 st_atime;
64 u32 st_atime_nsec;
65 u32 st_mtime;
66 u32 st_mtime_nsec;
67 u32 st_ctime;
68 u32 st_ctime_nsec;
69 u32 __unused4;
70 u32 __unused5;
71};
72
73struct compat_flock {
74 short l_type;
75 short l_whence;
76 compat_off_t l_start;
77 compat_off_t l_len;
78 compat_pid_t l_pid;
79};
80
81#define F_GETLK64 12
82#define F_SETLK64 13
83#define F_SETLKW64 14
84
85/*
86 * IA32 uses 4 byte alignment for 64 bit quantities,
87 * so we need to pack this structure.
88 */
89struct compat_flock64 {
90 short l_type;
91 short l_whence;
92 compat_loff_t l_start;
93 compat_loff_t l_len;
94 compat_pid_t l_pid;
95} __attribute__((packed));
96
97struct compat_statfs {
98 int f_type;
99 int f_bsize;
100 int f_blocks;
101 int f_bfree;
102 int f_bavail;
103 int f_files;
104 int f_ffree;
105 compat_fsid_t f_fsid;
106 int f_namelen; /* SunOS ignores this field. */
107 int f_frsize;
108 int f_spare[5];
109};
110
111#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
112#define COMPAT_RLIM_INFINITY 0xffffffff
113
114typedef u32 compat_old_sigset_t; /* at least 32 bits */
115
116#define _COMPAT_NSIG 64
117#define _COMPAT_NSIG_BPW 32
118
119typedef u32 compat_sigset_word;
120
121#define COMPAT_OFF_T_MAX 0x7fffffff
122#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
123
124struct compat_ipc64_perm {
125 compat_key_t key;
126 __compat_uid32_t uid;
127 __compat_gid32_t gid;
128 __compat_uid32_t cuid;
129 __compat_gid32_t cgid;
130 unsigned short mode;
131 unsigned short __pad1;
132 unsigned short seq;
133 unsigned short __pad2;
134 compat_ulong_t unused1;
135 compat_ulong_t unused2;
136};
137
138struct compat_semid64_ds {
139 struct compat_ipc64_perm sem_perm;
140 compat_time_t sem_otime;
141 compat_ulong_t __unused1;
142 compat_time_t sem_ctime;
143 compat_ulong_t __unused2;
144 compat_ulong_t sem_nsems;
145 compat_ulong_t __unused3;
146 compat_ulong_t __unused4;
147};
148
149struct compat_msqid64_ds {
150 struct compat_ipc64_perm msg_perm;
151 compat_time_t msg_stime;
152 compat_ulong_t __unused1;
153 compat_time_t msg_rtime;
154 compat_ulong_t __unused2;
155 compat_time_t msg_ctime;
156 compat_ulong_t __unused3;
157 compat_ulong_t msg_cbytes;
158 compat_ulong_t msg_qnum;
159 compat_ulong_t msg_qbytes;
160 compat_pid_t msg_lspid;
161 compat_pid_t msg_lrpid;
162 compat_ulong_t __unused4;
163 compat_ulong_t __unused5;
164};
165
166struct compat_shmid64_ds {
167 struct compat_ipc64_perm shm_perm;
168 compat_size_t shm_segsz;
169 compat_time_t shm_atime;
170 compat_ulong_t __unused1;
171 compat_time_t shm_dtime;
172 compat_ulong_t __unused2;
173 compat_time_t shm_ctime;
174 compat_ulong_t __unused3;
175 compat_pid_t shm_cpid;
176 compat_pid_t shm_lpid;
177 compat_ulong_t shm_nattch;
178 compat_ulong_t __unused4;
179 compat_ulong_t __unused5;
180};
181
182/*
183 * A pointer passed in from user mode. This should not be used for syscall parameters,
184 * just declare them as pointers because the syscall entry code will have appropriately
185 * converted them already.
186 */
187typedef u32 compat_uptr_t;
188
189static inline void __user *
190compat_ptr (compat_uptr_t uptr)
191{
192 return (void __user *) (unsigned long) uptr;
193}
194
195static inline compat_uptr_t
196ptr_to_compat(void __user *uptr)
197{
198 return (u32)(unsigned long)uptr;
199}
200
201static __inline__ void __user *
202arch_compat_alloc_user_space (long len)
203{
204 struct pt_regs *regs = task_pt_regs(current);
205 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
206}
207
208#endif /* _ASM_IA64_COMPAT_H */
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h
index d514cd9edb49..8fb7d33a661f 100644
--- a/arch/ia64/include/asm/hardirq.h
+++ b/arch/ia64/include/asm/hardirq.h
@@ -6,12 +6,6 @@
6 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */ 7 */
8 8
9
10#include <linux/threads.h>
11#include <linux/irq.h>
12
13#include <asm/processor.h>
14
15/* 9/*
16 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. 10 * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure.
17 */ 11 */
@@ -20,6 +14,11 @@
20 14
21#define local_softirq_pending() (local_cpu_data->softirq_pending) 15#define local_softirq_pending() (local_cpu_data->softirq_pending)
22 16
17#include <linux/threads.h>
18#include <linux/irq.h>
19
20#include <asm/processor.h>
21
23extern void __iomem *ipi_base_addr; 22extern void __iomem *ipi_base_addr;
24 23
25void ack_bad_irq(unsigned int irq); 24void ack_bad_irq(unsigned int irq);
diff --git a/arch/ia64/include/asm/iommu_table.h b/arch/ia64/include/asm/iommu_table.h
new file mode 100644
index 000000000000..92c8d36ae5ae
--- /dev/null
+++ b/arch/ia64/include/asm/iommu_table.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_IA64_IOMMU_TABLE_H
2#define _ASM_IA64_IOMMU_TABLE_H
3
4#define IOMMU_INIT_POST(_detect)
5
6#endif /* _ASM_IA64_IOMMU_TABLE_H */
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 71e35864d2e2..d52f1f78eff2 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -59,13 +59,13 @@ int __init init_cyclone_clock(void)
59 return -ENODEV; 59 return -ENODEV;
60 } 60 }
61 base = readq(reg); 61 base = readq(reg);
62 iounmap(reg);
62 if(!base){ 63 if(!base){
63 printk(KERN_ERR "Summit chipset: Could not find valid CBAR" 64 printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
64 " value.\n"); 65 " value.\n");
65 use_cyclone = 0; 66 use_cyclone = 0;
66 return -ENODEV; 67 return -ENODEV;
67 } 68 }
68 iounmap(reg);
69 69
70 /* setup PMCC */ 70 /* setup PMCC */
71 offset = (base + CYCLONE_PMCC_OFFSET); 71 offset = (base + CYCLONE_PMCC_OFFSET);
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7ded76658d2d..22c38404f539 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -108,10 +108,6 @@
108#define DBG(fmt...) 108#define DBG(fmt...)
109#endif 109#endif
110 110
111#define NR_PREALLOCATE_RTE_ENTRIES \
112 (PAGE_SIZE / sizeof(struct iosapic_rte_info))
113#define RTE_PREALLOCATED (1)
114
115static DEFINE_SPINLOCK(iosapic_lock); 111static DEFINE_SPINLOCK(iosapic_lock);
116 112
117/* 113/*
@@ -136,7 +132,6 @@ struct iosapic_rte_info {
136 struct list_head rte_list; /* RTEs sharing the same vector */ 132 struct list_head rte_list; /* RTEs sharing the same vector */
137 char rte_index; /* IOSAPIC RTE index */ 133 char rte_index; /* IOSAPIC RTE index */
138 int refcnt; /* reference counter */ 134 int refcnt; /* reference counter */
139 unsigned int flags; /* flags */
140 struct iosapic *iosapic; 135 struct iosapic *iosapic;
141} ____cacheline_aligned; 136} ____cacheline_aligned;
142 137
@@ -155,9 +150,6 @@ static struct iosapic_intr_info {
155 150
156static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */ 151static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
157 152
158static int iosapic_kmalloc_ok;
159static LIST_HEAD(free_rte_list);
160
161static inline void 153static inline void
162iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val) 154iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
163{ 155{
@@ -394,7 +386,7 @@ iosapic_startup_level_irq (unsigned int irq)
394} 386}
395 387
396static void 388static void
397iosapic_end_level_irq (unsigned int irq) 389iosapic_unmask_level_irq (unsigned int irq)
398{ 390{
399 ia64_vector vec = irq_to_vector(irq); 391 ia64_vector vec = irq_to_vector(irq);
400 struct iosapic_rte_info *rte; 392 struct iosapic_rte_info *rte;
@@ -404,7 +396,8 @@ iosapic_end_level_irq (unsigned int irq)
404 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 396 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
405 do_unmask_irq = 1; 397 do_unmask_irq = 1;
406 mask_irq(irq); 398 mask_irq(irq);
407 } 399 } else
400 unmask_irq(irq);
408 401
409 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) 402 list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
410 iosapic_eoi(rte->iosapic->addr, vec); 403 iosapic_eoi(rte->iosapic->addr, vec);
@@ -427,9 +420,8 @@ static struct irq_chip irq_type_iosapic_level = {
427 .enable = iosapic_enable_level_irq, 420 .enable = iosapic_enable_level_irq,
428 .disable = iosapic_disable_level_irq, 421 .disable = iosapic_disable_level_irq,
429 .ack = iosapic_ack_level_irq, 422 .ack = iosapic_ack_level_irq,
430 .end = iosapic_end_level_irq,
431 .mask = mask_irq, 423 .mask = mask_irq,
432 .unmask = unmask_irq, 424 .unmask = iosapic_unmask_level_irq,
433 .set_affinity = iosapic_set_affinity 425 .set_affinity = iosapic_set_affinity
434}; 426};
435 427
@@ -552,37 +544,6 @@ iosapic_reassign_vector (int irq)
552 } 544 }
553} 545}
554 546
555static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
556{
557 int i;
558 struct iosapic_rte_info *rte;
559 int preallocated = 0;
560
561 if (!iosapic_kmalloc_ok && list_empty(&free_rte_list)) {
562 rte = alloc_bootmem(sizeof(struct iosapic_rte_info) *
563 NR_PREALLOCATE_RTE_ENTRIES);
564 for (i = 0; i < NR_PREALLOCATE_RTE_ENTRIES; i++, rte++)
565 list_add(&rte->rte_list, &free_rte_list);
566 }
567
568 if (!list_empty(&free_rte_list)) {
569 rte = list_entry(free_rte_list.next, struct iosapic_rte_info,
570 rte_list);
571 list_del(&rte->rte_list);
572 preallocated++;
573 } else {
574 rte = kmalloc(sizeof(struct iosapic_rte_info), GFP_ATOMIC);
575 if (!rte)
576 return NULL;
577 }
578
579 memset(rte, 0, sizeof(struct iosapic_rte_info));
580 if (preallocated)
581 rte->flags |= RTE_PREALLOCATED;
582
583 return rte;
584}
585
586static inline int irq_is_shared (int irq) 547static inline int irq_is_shared (int irq)
587{ 548{
588 return (iosapic_intr_info[irq].count > 1); 549 return (iosapic_intr_info[irq].count > 1);
@@ -615,7 +576,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
615 576
616 rte = find_rte(irq, gsi); 577 rte = find_rte(irq, gsi);
617 if (!rte) { 578 if (!rte) {
618 rte = iosapic_alloc_rte(); 579 rte = kzalloc(sizeof (*rte), GFP_ATOMIC);
619 if (!rte) { 580 if (!rte) {
620 printk(KERN_WARNING "%s: cannot allocate memory\n", 581 printk(KERN_WARNING "%s: cannot allocate memory\n",
621 __func__); 582 __func__);
@@ -658,6 +619,10 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery,
658 idesc->chip->name, irq_type->name); 619 idesc->chip->name, irq_type->name);
659 idesc->chip = irq_type; 620 idesc->chip = irq_type;
660 } 621 }
622 if (trigger == IOSAPIC_EDGE)
623 __set_irq_handler_unlocked(irq, handle_edge_irq);
624 else
625 __set_irq_handler_unlocked(irq, handle_level_irq);
661 return 0; 626 return 0;
662} 627}
663 628
@@ -1161,10 +1126,3 @@ map_iosapic_to_node(unsigned int gsi_base, int node)
1161 return; 1126 return;
1162} 1127}
1163#endif 1128#endif
1164
1165static int __init iosapic_enable_kmalloc (void)
1166{
1167 iosapic_kmalloc_ok = 1;
1168 return 0;
1169}
1170core_initcall (iosapic_enable_kmalloc);
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index f14c35f9b03a..9a26015c3e50 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -30,6 +30,7 @@
30#include <linux/bitops.h> 30#include <linux/bitops.h>
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/ratelimit.h> 32#include <linux/ratelimit.h>
33#include <linux/acpi.h>
33 34
34#include <asm/delay.h> 35#include <asm/delay.h>
35#include <asm/intrinsics.h> 36#include <asm/intrinsics.h>
@@ -635,6 +636,7 @@ ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
635 desc->chip = &irq_type_ia64_lsapic; 636 desc->chip = &irq_type_ia64_lsapic;
636 if (action) 637 if (action)
637 setup_irq(irq, action); 638 setup_irq(irq, action);
639 set_irq_handler(irq, handle_percpu_irq);
638} 640}
639 641
640void __init 642void __init
@@ -650,6 +652,9 @@ ia64_native_register_ipi(void)
650void __init 652void __init
651init_IRQ (void) 653init_IRQ (void)
652{ 654{
655#ifdef CONFIG_ACPI
656 acpi_boot_init();
657#endif
653 ia64_register_ipi(); 658 ia64_register_ipi();
654 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 659 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
655#ifdef CONFIG_SMP 660#ifdef CONFIG_SMP
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index a0220dc5ff42..1753f6a30d55 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -2055,25 +2055,6 @@ ia64_mca_init(void)
2055 2055
2056 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); 2056 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
2057 2057
2058 /*
2059 * Configure the CMCI/P vector and handler. Interrupts for CMC are
2060 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
2061 */
2062 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
2063 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
2064 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
2065
2066 /* Setup the MCA rendezvous interrupt vector */
2067 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
2068
2069 /* Setup the MCA wakeup interrupt vector */
2070 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
2071
2072#ifdef CONFIG_ACPI
2073 /* Setup the CPEI/P handler */
2074 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2075#endif
2076
2077 /* Initialize the areas set aside by the OS to buffer the 2058 /* Initialize the areas set aside by the OS to buffer the
2078 * platform/processor error states for MCA/INIT/CMC 2059 * platform/processor error states for MCA/INIT/CMC
2079 * handling. 2060 * handling.
@@ -2103,6 +2084,25 @@ ia64_mca_late_init(void)
2103 if (!mca_init) 2084 if (!mca_init)
2104 return 0; 2085 return 0;
2105 2086
2087 /*
2088 * Configure the CMCI/P vector and handler. Interrupts for CMC are
2089 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
2090 */
2091 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
2092 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
2093 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */
2094
2095 /* Setup the MCA rendezvous interrupt vector */
2096 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
2097
2098 /* Setup the MCA wakeup interrupt vector */
2099 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
2100
2101#ifdef CONFIG_ACPI
2102 /* Setup the CPEI/P handler */
2103 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2104#endif
2105
2106 register_hotcpu_notifier(&mca_cpu_notifier); 2106 register_hotcpu_notifier(&mca_cpu_notifier);
2107 2107
2108 /* Setup the CMCI/P vector and handler */ 2108 /* Setup the CMCI/P vector and handler */
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index fdf6f9d013e5..77597e5ea60a 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -434,7 +434,7 @@ register_info(char *page)
434 unsigned long phys_stacked; 434 unsigned long phys_stacked;
435 pal_hints_u_t hints; 435 pal_hints_u_t hints;
436 unsigned long iregs, dregs; 436 unsigned long iregs, dregs;
437 char *info_type[]={ 437 static const char * const info_type[] = {
438 "Implemented AR(s)", 438 "Implemented AR(s)",
439 "AR(s) with read side-effects", 439 "AR(s) with read side-effects",
440 "Implemented CR(s)", 440 "Implemented CR(s)",
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index cce050e85c73..6b1852f7f972 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1573,7 +1573,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1573 return -EINVAL; 1573 return -EINVAL;
1574 } 1574 }
1575 1575
1576 ctx = (pfm_context_t *)filp->private_data; 1576 ctx = filp->private_data;
1577 if (ctx == NULL) { 1577 if (ctx == NULL) {
1578 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); 1578 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1579 return -EINVAL; 1579 return -EINVAL;
@@ -1673,7 +1673,7 @@ pfm_poll(struct file *filp, poll_table * wait)
1673 return 0; 1673 return 0;
1674 } 1674 }
1675 1675
1676 ctx = (pfm_context_t *)filp->private_data; 1676 ctx = filp->private_data;
1677 if (ctx == NULL) { 1677 if (ctx == NULL) {
1678 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); 1678 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1679 return 0; 1679 return 0;
@@ -1733,7 +1733,7 @@ pfm_fasync(int fd, struct file *filp, int on)
1733 return -EBADF; 1733 return -EBADF;
1734 } 1734 }
1735 1735
1736 ctx = (pfm_context_t *)filp->private_data; 1736 ctx = filp->private_data;
1737 if (ctx == NULL) { 1737 if (ctx == NULL) {
1738 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); 1738 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1739 return -EBADF; 1739 return -EBADF;
@@ -1841,7 +1841,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
1841 return -EBADF; 1841 return -EBADF;
1842 } 1842 }
1843 1843
1844 ctx = (pfm_context_t *)filp->private_data; 1844 ctx = filp->private_data;
1845 if (ctx == NULL) { 1845 if (ctx == NULL) {
1846 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); 1846 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1847 return -EBADF; 1847 return -EBADF;
@@ -1984,7 +1984,7 @@ pfm_close(struct inode *inode, struct file *filp)
1984 return -EBADF; 1984 return -EBADF;
1985 } 1985 }
1986 1986
1987 ctx = (pfm_context_t *)filp->private_data; 1987 ctx = filp->private_data;
1988 if (ctx == NULL) { 1988 if (ctx == NULL) {
1989 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); 1989 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1990 return -EBADF; 1990 return -EBADF;
@@ -4907,7 +4907,7 @@ restart_args:
4907 goto error_args; 4907 goto error_args;
4908 } 4908 }
4909 4909
4910 ctx = (pfm_context_t *)file->private_data; 4910 ctx = file->private_data;
4911 if (unlikely(ctx == NULL)) { 4911 if (unlikely(ctx == NULL)) {
4912 DPRINT(("no context for fd %d\n", fd)); 4912 DPRINT(("no context for fd %d\n", fd));
4913 goto error_args; 4913 goto error_args;
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index aa8b5fa1a8de..45d7543b69cc 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -642,7 +642,7 @@ salinfo_init(void)
642 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { 642 for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
643 data = salinfo_data + i; 643 data = salinfo_data + i;
644 data->type = i; 644 data->type = i;
645 init_MUTEX(&data->mutex); 645 sema_init(&data->mutex, 1);
646 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir); 646 dir = proc_mkdir(salinfo_log_name[i], salinfo_dir);
647 if (!dir) 647 if (!dir)
648 continue; 648 continue;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 8fb958abf8d0..911cf9749700 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -594,10 +594,6 @@ setup_arch (char **cmdline_p)
594 cpu_init(); /* initialize the bootstrap CPU */ 594 cpu_init(); /* initialize the bootstrap CPU */
595 mmu_context_init(); /* initialize context_id bitmap */ 595 mmu_context_init(); /* initialize context_id bitmap */
596 596
597#ifdef CONFIG_ACPI
598 acpi_boot_init();
599#endif
600
601 paravirt_banner(); 597 paravirt_banner();
602 paravirt_arch_setup_console(cmdline_p); 598 paravirt_arch_setup_console(cmdline_p);
603 599
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index b6c0e63a0bf6..fed6afa2e8a9 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -1204,10 +1204,10 @@ desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word
1204static inline unw_hash_index_t 1204static inline unw_hash_index_t
1205hash (unsigned long ip) 1205hash (unsigned long ip)
1206{ 1206{
1207# define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */ 1207 /* magic number = ((sqrt(5)-1)/2)*2^64 */
1208 static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL;
1208 1209
1209 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE); 1210 return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1210#undef hashmagic
1211} 1211}
1212 1212
1213static inline long 1213static inline long
@@ -1531,7 +1531,7 @@ build_script (struct unw_frame_info *info)
1531 struct unw_labeled_state *ls, *next; 1531 struct unw_labeled_state *ls, *next;
1532 unsigned long ip = info->ip; 1532 unsigned long ip = info->ip;
1533 struct unw_state_record sr; 1533 struct unw_state_record sr;
1534 struct unw_table *table; 1534 struct unw_table *table, *prev;
1535 struct unw_reg_info *r; 1535 struct unw_reg_info *r;
1536 struct unw_insn insn; 1536 struct unw_insn insn;
1537 u8 *dp, *desc_end; 1537 u8 *dp, *desc_end;
@@ -1560,11 +1560,26 @@ build_script (struct unw_frame_info *info)
1560 1560
1561 STAT(parse_start = ia64_get_itc()); 1561 STAT(parse_start = ia64_get_itc());
1562 1562
1563 prev = NULL;
1563 for (table = unw.tables; table; table = table->next) { 1564 for (table = unw.tables; table; table = table->next) {
1564 if (ip >= table->start && ip < table->end) { 1565 if (ip >= table->start && ip < table->end) {
1566 /*
1567 * Leave the kernel unwind table at the very front,
1568 * lest moving it breaks some assumption elsewhere.
1569 * Otherwise, move the matching table to the second
1570 * position in the list so that traversals can benefit
1571 * from commonality in backtrace paths.
1572 */
1573 if (prev && prev != unw.tables) {
1574 /* unw is safe - we're already spinlocked */
1575 prev->next = table->next;
1576 table->next = unw.tables->next;
1577 unw.tables->next = table;
1578 }
1565 e = lookup(table, ip - table->segment_base); 1579 e = lookup(table, ip - table->segment_base);
1566 break; 1580 break;
1567 } 1581 }
1582 prev = table;
1568 } 1583 }
1569 if (!e) { 1584 if (!e) {
1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ 1585 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
index 8adc6a14272a..3e8d350fdf39 100644
--- a/arch/ia64/xen/xen_pv_ops.c
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -1136,7 +1136,6 @@ __initconst = {
1136static void __init 1136static void __init
1137xen_patch_branch(unsigned long tag, unsigned long type) 1137xen_patch_branch(unsigned long tag, unsigned long type)
1138{ 1138{
1139 const unsigned long nelem = 1139 __paravirt_patch_apply_branch(tag, type, xen_branch_target,
1140 sizeof(xen_branch_target) / sizeof(xen_branch_target[0]); 1140 ARRAY_SIZE(xen_branch_target));
1141 __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem);
1142} 1141}
diff --git a/arch/m32r/include/asm/signal.h b/arch/m32r/include/asm/signal.h
index 9c1acb2b1a92..b2eeb0de1c8d 100644
--- a/arch/m32r/include/asm/signal.h
+++ b/arch/m32r/include/asm/signal.h
@@ -157,7 +157,6 @@ typedef struct sigaltstack {
157#undef __HAVE_ARCH_SIG_BITOPS 157#undef __HAVE_ARCH_SIG_BITOPS
158 158
159struct pt_regs; 159struct pt_regs;
160extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
161 160
162#define ptrace_signal_deliver(regs, cookie) do { } while (0) 161#define ptrace_signal_deliver(regs, cookie) do { } while (0)
163 162
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index 76125777483c..c70545689da8 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -351,6 +351,7 @@
351#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/ 351#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
352#define __ARCH_WANT_SYS_OLDUMOUNT 352#define __ARCH_WANT_SYS_OLDUMOUNT
353#define __ARCH_WANT_SYS_RT_SIGACTION 353#define __ARCH_WANT_SYS_RT_SIGACTION
354#define __ARCH_WANT_SYS_RT_SIGSUSPEND
354 355
355#define __IGNORE_lchown 356#define __IGNORE_lchown
356#define __IGNORE_setuid 357#define __IGNORE_setuid
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 403869833b98..225412bc227e 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -235,10 +235,9 @@ work_resched:
235work_notifysig: ; deal with pending signals and 235work_notifysig: ; deal with pending signals and
236 ; notify-resume requests 236 ; notify-resume requests
237 mv r0, sp ; arg1 : struct pt_regs *regs 237 mv r0, sp ; arg1 : struct pt_regs *regs
238 ldi r1, #0 ; arg2 : sigset_t *oldset 238 mv r1, r9 ; arg2 : __u32 thread_info_flags
239 mv r2, r9 ; arg3 : __u32 thread_info_flags
240 bl do_notify_resume 239 bl do_notify_resume
241 bra restore_all 240 bra resume_userspace
242 241
243 ; perform syscall exit tracing 242 ; perform syscall exit tracing
244 ALIGN 243 ALIGN
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index e555091eb97c..0021ade4cba8 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
592 592
593 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0) 593 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
594 != sizeof(insn)) 594 != sizeof(insn))
595 break; 595 return -EIO;
596 596
597 compute_next_pc(insn, pc, &next_pc, child); 597 compute_next_pc(insn, pc, &next_pc, child);
598 if (next_pc & 0x80000000) 598 if (next_pc & 0x80000000)
599 break; 599 return -EIO;
600 600
601 if (embed_debug_trap(child, next_pc)) 601 if (embed_debug_trap(child, next_pc))
602 break; 602 return -EIO;
603 603
604 invalidate_cache(); 604 invalidate_cache();
605 return 0;
605} 606}
606 607
607void user_disable_single_step(struct task_struct *child) 608void user_disable_single_step(struct task_struct *child)
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 144b0f124fc7..7bbe38645ed5 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -28,37 +28,6 @@
28 28
29#define DEBUG_SIG 0 29#define DEBUG_SIG 0
30 30
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
32
33int do_signal(struct pt_regs *, sigset_t *);
34
35asmlinkage int
36sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
37 unsigned long r2, unsigned long r3, unsigned long r4,
38 unsigned long r5, unsigned long r6, struct pt_regs *regs)
39{
40 sigset_t newset;
41
42 /* XXX: Don't preclude handling different sized sigset_t's. */
43 if (sigsetsize != sizeof(sigset_t))
44 return -EINVAL;
45
46 if (copy_from_user(&newset, unewset, sizeof(newset)))
47 return -EFAULT;
48 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
49
50 spin_lock_irq(&current->sighand->siglock);
51 current->saved_sigmask = current->blocked;
52 current->blocked = newset;
53 recalc_sigpending();
54 spin_unlock_irq(&current->sighand->siglock);
55
56 current->state = TASK_INTERRUPTIBLE;
57 schedule();
58 set_thread_flag(TIF_RESTORE_SIGMASK);
59 return -ERESTARTNOHAND;
60}
61
62asmlinkage int 31asmlinkage int
63sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 32sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
64 unsigned long r2, unsigned long r3, unsigned long r4, 33 unsigned long r2, unsigned long r3, unsigned long r4,
@@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
218 return (void __user *)((sp - frame_size) & -8ul); 187 return (void __user *)((sp - frame_size) & -8ul);
219} 188}
220 189
221static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 190static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
222 sigset_t *set, struct pt_regs *regs) 191 sigset_t *set, struct pt_regs *regs)
223{ 192{
224 struct rt_sigframe __user *frame; 193 struct rt_sigframe __user *frame;
@@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
275 current->comm, current->pid, frame, regs->pc); 244 current->comm, current->pid, frame, regs->pc);
276#endif 245#endif
277 246
278 return; 247 return 0;
279 248
280give_sigsegv: 249give_sigsegv:
281 force_sigsegv(sig, current); 250 force_sigsegv(sig, current);
251 return -EFAULT;
252}
253
254static int prev_insn(struct pt_regs *regs)
255{
256 u16 inst;
257 if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
258 return -EFAULT;
259 if ((inst & 0xfff0) == 0x10f0) /* trap ? */
260 regs->bpc -= 2;
261 else
262 regs->bpc -= 4;
263 regs->syscall_nr = -1;
264 return 0;
282} 265}
283 266
284/* 267/*
285 * OK, we're invoking a handler 268 * OK, we're invoking a handler
286 */ 269 */
287 270
288static void 271static int
289handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, 272handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
290 sigset_t *oldset, struct pt_regs *regs) 273 sigset_t *oldset, struct pt_regs *regs)
291{ 274{
292 unsigned short inst;
293
294 /* Are we from a system call? */ 275 /* Are we from a system call? */
295 if (regs->syscall_nr >= 0) { 276 if (regs->syscall_nr >= 0) {
296 /* If so, check system call restarting.. */ 277 /* If so, check system call restarting.. */
@@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
308 /* fallthrough */ 289 /* fallthrough */
309 case -ERESTARTNOINTR: 290 case -ERESTARTNOINTR:
310 regs->r0 = regs->orig_r0; 291 regs->r0 = regs->orig_r0;
311 inst = *(unsigned short *)(regs->bpc - 2); 292 if (prev_insn(regs) < 0)
312 if ((inst & 0xfff0) == 0x10f0) /* trap ? */ 293 return -EFAULT;
313 regs->bpc -= 2;
314 else
315 regs->bpc -= 4;
316 } 294 }
317 } 295 }
318 296
319 /* Set up the stack frame */ 297 /* Set up the stack frame */
320 setup_rt_frame(sig, ka, info, oldset, regs); 298 if (setup_rt_frame(sig, ka, info, oldset, regs))
299 return -EFAULT;
321 300
322 spin_lock_irq(&current->sighand->siglock); 301 spin_lock_irq(&current->sighand->siglock);
323 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 302 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
325 sigaddset(&current->blocked,sig); 304 sigaddset(&current->blocked,sig);
326 recalc_sigpending(); 305 recalc_sigpending();
327 spin_unlock_irq(&current->sighand->siglock); 306 spin_unlock_irq(&current->sighand->siglock);
307 return 0;
328} 308}
329 309
330/* 310/*
@@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
332 * want to handle. Thus you cannot kill init even with a SIGKILL even by 312 * want to handle. Thus you cannot kill init even with a SIGKILL even by
333 * mistake. 313 * mistake.
334 */ 314 */
335int do_signal(struct pt_regs *regs, sigset_t *oldset) 315static void do_signal(struct pt_regs *regs)
336{ 316{
337 siginfo_t info; 317 siginfo_t info;
338 int signr; 318 int signr;
339 struct k_sigaction ka; 319 struct k_sigaction ka;
340 unsigned short inst; 320 sigset_t *oldset;
341 321
342 /* 322 /*
343 * We want the common case to go fast, which 323 * We want the common case to go fast, which
@@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
346 * if so. 326 * if so.
347 */ 327 */
348 if (!user_mode(regs)) 328 if (!user_mode(regs))
349 return 1; 329 return;
350 330
351 if (try_to_freeze()) 331 if (try_to_freeze())
352 goto no_signal; 332 goto no_signal;
353 333
354 if (!oldset) 334 if (test_thread_flag(TIF_RESTORE_SIGMASK))
335 oldset = &current->saved_sigmask;
336 else
355 oldset = &current->blocked; 337 oldset = &current->blocked;
356 338
357 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 339 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
363 */ 345 */
364 346
365 /* Whee! Actually deliver the signal. */ 347 /* Whee! Actually deliver the signal. */
366 handle_signal(signr, &ka, &info, oldset, regs); 348 if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
367 return 1; 349 clear_thread_flag(TIF_RESTORE_SIGMASK);
350
351 return;
368 } 352 }
369 353
370 no_signal: 354 no_signal:
@@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
375 regs->r0 == -ERESTARTSYS || 359 regs->r0 == -ERESTARTSYS ||
376 regs->r0 == -ERESTARTNOINTR) { 360 regs->r0 == -ERESTARTNOINTR) {
377 regs->r0 = regs->orig_r0; 361 regs->r0 = regs->orig_r0;
378 inst = *(unsigned short *)(regs->bpc - 2); 362 prev_insn(regs);
379 if ((inst & 0xfff0) == 0x10f0) /* trap ? */ 363 } else if (regs->r0 == -ERESTART_RESTARTBLOCK){
380 regs->bpc -= 2;
381 else
382 regs->bpc -= 4;
383 }
384 if (regs->r0 == -ERESTART_RESTARTBLOCK){
385 regs->r0 = regs->orig_r0; 364 regs->r0 = regs->orig_r0;
386 regs->r7 = __NR_restart_syscall; 365 regs->r7 = __NR_restart_syscall;
387 inst = *(unsigned short *)(regs->bpc - 2); 366 prev_insn(regs);
388 if ((inst & 0xfff0) == 0x10f0) /* trap ? */
389 regs->bpc -= 2;
390 else
391 regs->bpc -= 4;
392 } 367 }
393 } 368 }
394 return 0; 369 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
370 clear_thread_flag(TIF_RESTORE_SIGMASK);
371 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
372 }
395} 373}
396 374
397/* 375/*
398 * notification of userspace execution resumption 376 * notification of userspace execution resumption
399 * - triggered by current->work.notify_resume 377 * - triggered by current->work.notify_resume
400 */ 378 */
401void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, 379void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
402 __u32 thread_info_flags)
403{ 380{
404 /* Pending single-step? */ 381 /* Pending single-step? */
405 if (thread_info_flags & _TIF_SINGLESTEP) 382 if (thread_info_flags & _TIF_SINGLESTEP)
@@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
407 384
408 /* deal with pending signal delivery */ 385 /* deal with pending signal delivery */
409 if (thread_info_flags & _TIF_SIGPENDING) 386 if (thread_info_flags & _TIF_SIGPENDING)
410 do_signal(regs,oldset); 387 do_signal(regs);
411 388
412 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 389 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
413 clear_thread_flag(TIF_NOTIFY_RESUME); 390 clear_thread_flag(TIF_NOTIFY_RESUME);
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 444b9f918fdf..7c2a2f7f8dc1 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration"
8config MN10300 8config MN10300
9 def_bool y 9 def_bool y
10 select HAVE_OPROFILE 10 select HAVE_OPROFILE
11 select HAVE_ARCH_TRACEHOOK
12 11
13config AM33 12config AM33
14 def_bool y 13 def_bool y
diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug
index ff80e86b9bd2..ce83c74b3fd7 100644
--- a/arch/mn10300/Kconfig.debug
+++ b/arch/mn10300/Kconfig.debug
@@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
101 101
102choice 102choice
103 prompt "GDB stub port" 103 prompt "GDB stub port"
104 default GDBSTUB_TTYSM0 104 default GDBSTUB_ON_TTYSM0
105 depends on GDBSTUB 105 depends on GDBSTUB
106 help 106 help
107 Select the serial port used for GDB-stub. 107 Select the serial port used for GDB-stub.
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index f49ac49e09ad..3f50e9661076 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -229,9 +229,9 @@ int ffs(int x)
229#include <asm-generic/bitops/hweight.h> 229#include <asm-generic/bitops/hweight.h>
230 230
231#define ext2_set_bit_atomic(lock, nr, addr) \ 231#define ext2_set_bit_atomic(lock, nr, addr) \
232 test_and_set_bit((nr) ^ 0x18, (addr)) 232 test_and_set_bit((nr), (addr))
233#define ext2_clear_bit_atomic(lock, nr, addr) \ 233#define ext2_clear_bit_atomic(lock, nr, addr) \
234 test_and_clear_bit((nr) ^ 0x18, (addr)) 234 test_and_clear_bit((nr), (addr))
235 235
236#include <asm-generic/bitops/ext2-non-atomic.h> 236#include <asm-generic/bitops/ext2-non-atomic.h>
237#include <asm-generic/bitops/minix-le.h> 237#include <asm-generic/bitops/minix-le.h>
diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h
index 7e891fce2370..1865d72a86ff 100644
--- a/arch/mn10300/include/asm/signal.h
+++ b/arch/mn10300/include/asm/signal.h
@@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
78 78
79/* These should not be considered constants from userland. */ 79/* These should not be considered constants from userland. */
80#define SIGRTMIN 32 80#define SIGRTMIN 32
81#define SIGRTMAX (_NSIG-1) 81#define SIGRTMAX _NSIG
82 82
83/* 83/*
84 * SA_FLAGS values: 84 * SA_FLAGS values:
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index 717db14c2cc3..d4de05ab7864 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
65 old_sigset_t mask; 65 old_sigset_t mask;
66 if (verify_area(VERIFY_READ, act, sizeof(*act)) || 66 if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
67 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 67 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
68 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) 68 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
69 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
70 __get_user(mask, &act->sa_mask))
69 return -EFAULT; 71 return -EFAULT;
70 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
71 __get_user(mask, &act->sa_mask);
72 siginitset(&new_ka.sa.sa_mask, mask); 72 siginitset(&new_ka.sa.sa_mask, mask);
73 } 73 }
74 74
@@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
77 if (!ret && oact) { 77 if (!ret && oact) {
78 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || 78 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
79 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 79 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
80 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) 80 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
81 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
82 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
81 return -EFAULT; 83 return -EFAULT;
82 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
83 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
84 } 84 }
85 85
86 return ret; 86 return ret;
@@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs,
102{ 102{
103 unsigned int err = 0; 103 unsigned int err = 0;
104 104
105 /* Always make any pending restarted system calls return -EINTR */
106 current_thread_info()->restart_block.fn = do_no_restart_syscall;
107
105 if (is_using_fpu(current)) 108 if (is_using_fpu(current))
106 fpu_kill_state(current); 109 fpu_kill_state(current);
107 110
@@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
330 regs->d0 = sig; 333 regs->d0 = sig;
331 regs->d1 = (unsigned long) &frame->sc; 334 regs->d1 = (unsigned long) &frame->sc;
332 335
333 set_fs(USER_DS);
334
335 /* the tracer may want to single-step inside the handler */ 336 /* the tracer may want to single-step inside the handler */
336 if (test_thread_flag(TIF_SINGLESTEP)) 337 if (test_thread_flag(TIF_SINGLESTEP))
337 ptrace_notify(SIGTRAP); 338 ptrace_notify(SIGTRAP);
@@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
345 return 0; 346 return 0;
346 347
347give_sigsegv: 348give_sigsegv:
348 force_sig(SIGSEGV, current); 349 force_sigsegv(sig, current);
349 return -EFAULT; 350 return -EFAULT;
350} 351}
351 352
@@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
413 regs->d0 = sig; 414 regs->d0 = sig;
414 regs->d1 = (long) &frame->info; 415 regs->d1 = (long) &frame->info;
415 416
416 set_fs(USER_DS);
417
418 /* the tracer may want to single-step inside the handler */ 417 /* the tracer may want to single-step inside the handler */
419 if (test_thread_flag(TIF_SINGLESTEP)) 418 if (test_thread_flag(TIF_SINGLESTEP))
420 ptrace_notify(SIGTRAP); 419 ptrace_notify(SIGTRAP);
@@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
428 return 0; 427 return 0;
429 428
430give_sigsegv: 429give_sigsegv:
431 force_sig(SIGSEGV, current); 430 force_sigsegv(sig, current);
432 return -EFAULT; 431 return -EFAULT;
433} 432}
434 433
434static inline void stepback(struct pt_regs *regs)
435{
436 regs->pc -= 2;
437 regs->orig_d0 = -1;
438}
439
435/* 440/*
436 * handle the actual delivery of a signal to userspace 441 * handle the actual delivery of a signal to userspace
437 */ 442 */
@@ -459,7 +464,7 @@ static int handle_signal(int sig,
459 /* fallthrough */ 464 /* fallthrough */
460 case -ERESTARTNOINTR: 465 case -ERESTARTNOINTR:
461 regs->d0 = regs->orig_d0; 466 regs->d0 = regs->orig_d0;
462 regs->pc -= 2; 467 stepback(regs);
463 } 468 }
464 } 469 }
465 470
@@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs)
527 case -ERESTARTSYS: 532 case -ERESTARTSYS:
528 case -ERESTARTNOINTR: 533 case -ERESTARTNOINTR:
529 regs->d0 = regs->orig_d0; 534 regs->d0 = regs->orig_d0;
530 regs->pc -= 2; 535 stepback(regs);
531 break; 536 break;
532 537
533 case -ERESTART_RESTARTBLOCK: 538 case -ERESTART_RESTARTBLOCK:
534 regs->d0 = __NR_restart_syscall; 539 regs->d0 = __NR_restart_syscall;
535 regs->pc -= 2; 540 stepback(regs);
536 break; 541 break;
537 } 542 }
538 } 543 }
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 28b9d983db0c..1557277fbc5c 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,13 +2,11 @@
2# Makefile for the MN10300-specific memory management code 2# Makefile for the MN10300-specific memory management code
3# 3#
4 4
5cacheflush-y := cache.o cache-mn10300.o
6cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
7
8cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
9
5obj-y := \ 10obj-y := \
6 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ 11 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
7 misalignment.o dma-alloc.o 12 misalignment.o dma-alloc.o $(cacheflush-y)
8
9ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
10obj-y += cache.o cache-mn10300.o
11ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
12obj-y += cache-flush-mn10300.o
13endif
14endif
diff --git a/arch/mn10300/mm/cache-disabled.c b/arch/mn10300/mm/cache-disabled.c
new file mode 100644
index 000000000000..f669ea42aba6
--- /dev/null
+++ b/arch/mn10300/mm/cache-disabled.c
@@ -0,0 +1,21 @@
1/* Handle the cache being disabled
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/mm.h>
12
13/*
14 * allow userspace to flush the instruction cache
15 */
16asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
17{
18 if (end < start)
19 return -EINVAL;
20 return 0;
21}
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 84f296ca9e63..8f58bdff20d7 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1506,13 +1506,6 @@ handle_ill:
1506 } 1506 }
1507 STD_ENDPROC(handle_ill) 1507 STD_ENDPROC(handle_ill)
1508 1508
1509 .pushsection .rodata, "a"
1510 .align 8
1511bpt_code:
1512 bpt
1513 ENDPROC(bpt_code)
1514 .popsection
1515
1516/* Various stub interrupt handlers and syscall handlers */ 1509/* Various stub interrupt handlers and syscall handlers */
1517 1510
1518STD_ENTRY_LOCAL(_kernel_double_fault) 1511STD_ENTRY_LOCAL(_kernel_double_fault)
diff --git a/arch/x86/boot/early_serial_console.c b/arch/x86/boot/early_serial_console.c
index 030f4b93e255..5df2869c874b 100644
--- a/arch/x86/boot/early_serial_console.c
+++ b/arch/x86/boot/early_serial_console.c
@@ -58,7 +58,19 @@ static void parse_earlyprintk(void)
58 if (arg[pos] == ',') 58 if (arg[pos] == ',')
59 pos++; 59 pos++;
60 60
61 if (!strncmp(arg, "ttyS", 4)) { 61 /*
62 * make sure we have
63 * "serial,0x3f8,115200"
64 * "serial,ttyS0,115200"
65 * "ttyS0,115200"
66 */
67 if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
68 port = simple_strtoull(arg + pos, &e, 16);
69 if (port == 0 || arg + pos == e)
70 port = DEFAULT_SERIAL_PORT;
71 else
72 pos = e - arg;
73 } else if (!strncmp(arg + pos, "ttyS", 4)) {
62 static const int bases[] = { 0x3f8, 0x2f8 }; 74 static const int bases[] = { 0x3f8, 0x2f8 };
63 int idx = 0; 75 int idx = 0;
64 76
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
index d2544f1d705d..cb030374b90a 100644
--- a/arch/x86/include/asm/amd_iommu_proto.h
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
38 38
39#endif /* !CONFIG_AMD_IOMMU_STATS */ 39#endif /* !CONFIG_AMD_IOMMU_STATS */
40 40
41static inline bool is_rd890_iommu(struct pci_dev *pdev)
42{
43 return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
44 (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
45}
46
41#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ 47#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 7014e88bc779..08616180deaf 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -368,6 +368,9 @@ struct amd_iommu {
368 /* capabilities of that IOMMU read from ACPI */ 368 /* capabilities of that IOMMU read from ACPI */
369 u32 cap; 369 u32 cap;
370 370
371 /* flags read from acpi table */
372 u8 acpi_flags;
373
371 /* 374 /*
372 * Capability pointer. There could be more than one IOMMU per PCI 375 * Capability pointer. There could be more than one IOMMU per PCI
373 * device function if there are more than one AMD IOMMU capability 376 * device function if there are more than one AMD IOMMU capability
@@ -411,6 +414,15 @@ struct amd_iommu {
411 414
412 /* default dma_ops domain for that IOMMU */ 415 /* default dma_ops domain for that IOMMU */
413 struct dma_ops_domain *default_dom; 416 struct dma_ops_domain *default_dom;
417
418 /*
419 * This array is required to work around a potential BIOS bug.
420 * The BIOS may miss to restore parts of the PCI configuration
421 * space when the system resumes from S3. The result is that the
422 * IOMMU does not execute commands anymore which leads to system
423 * failure.
424 */
425 u32 cache_cfg[4];
414}; 426};
415 427
416/* 428/*
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 545776efeb16..bafd80defa43 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
309static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) 309static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
310{ 310{
311 return ((1UL << (nr % BITS_PER_LONG)) & 311 return ((1UL << (nr % BITS_PER_LONG)) &
312 (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; 312 (addr[nr / BITS_PER_LONG])) != 0;
313} 313}
314 314
315static inline int variable_test_bit(int nr, volatile const unsigned long *addr) 315static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index c6fbb7b430d1..3f76523589af 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -168,6 +168,7 @@
168#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ 168#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
169#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ 169#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
170#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ 170#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
171#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
171 172
172/* Virtualization flags: Linux defined, word 8 */ 173/* Virtualization flags: Linux defined, word 8 */
173#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ 174#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0925676266bd..fedf32a8c3ec 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER
11CFLAGS_REMOVE_tsc.o = -pg 11CFLAGS_REMOVE_tsc.o = -pg
12CFLAGS_REMOVE_rtc.o = -pg 12CFLAGS_REMOVE_rtc.o = -pg
13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg 13CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
14CFLAGS_REMOVE_pvclock.o = -pg
15CFLAGS_REMOVE_kvmclock.o = -pg
14CFLAGS_REMOVE_ftrace.o = -pg 16CFLAGS_REMOVE_ftrace.o = -pg
15CFLAGS_REMOVE_early_printk.o = -pg 17CFLAGS_REMOVE_early_printk.o = -pg
16endif 18endif
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index fa044e1e30a2..679b6450382b 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1953 size_t size, 1953 size_t size,
1954 int dir) 1954 int dir)
1955{ 1955{
1956 dma_addr_t flush_addr;
1956 dma_addr_t i, start; 1957 dma_addr_t i, start;
1957 unsigned int pages; 1958 unsigned int pages;
1958 1959
@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1960 (dma_addr + size > dma_dom->aperture_size)) 1961 (dma_addr + size > dma_dom->aperture_size))
1961 return; 1962 return;
1962 1963
1964 flush_addr = dma_addr;
1963 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 1965 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
1964 dma_addr &= PAGE_MASK; 1966 dma_addr &= PAGE_MASK;
1965 start = dma_addr; 1967 start = dma_addr;
@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
1974 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1976 dma_ops_free_addresses(dma_dom, dma_addr, pages);
1975 1977
1976 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1978 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
1977 iommu_flush_pages(&dma_dom->domain, dma_addr, size); 1979 iommu_flush_pages(&dma_dom->domain, flush_addr, size);
1978 dma_dom->need_flush = false; 1980 dma_dom->need_flush = false;
1979 } 1981 }
1980} 1982}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 3cc63e2b8dd4..5a170cbbbed8 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
632 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 632 iommu->last_device = calc_devid(MMIO_GET_BUS(range),
633 MMIO_GET_LD(range)); 633 MMIO_GET_LD(range));
634 iommu->evt_msi_num = MMIO_MSI_NUM(misc); 634 iommu->evt_msi_num = MMIO_MSI_NUM(misc);
635
636 if (is_rd890_iommu(iommu->dev)) {
637 pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
638 pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
639 pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
640 pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
641 }
635} 642}
636 643
637/* 644/*
@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
649 struct ivhd_entry *e; 656 struct ivhd_entry *e;
650 657
651 /* 658 /*
652 * First set the recommended feature enable bits from ACPI 659 * First save the recommended feature enable bits from ACPI
653 * into the IOMMU control registers
654 */ 660 */
655 h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? 661 iommu->acpi_flags = h->flags;
656 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
657 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
658
659 h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
660 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
661 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
662
663 h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
664 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
665 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
666
667 h->flags & IVHD_FLAG_ISOC_EN_MASK ?
668 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
669 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
670
671 /*
672 * make IOMMU memory accesses cache coherent
673 */
674 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
675 662
676 /* 663 /*
677 * Done. Now parse the device entries 664 * Done. Now parse the device entries
@@ -1116,6 +1103,40 @@ static void init_device_table(void)
1116 } 1103 }
1117} 1104}
1118 1105
1106static void iommu_init_flags(struct amd_iommu *iommu)
1107{
1108 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
1109 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
1110 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
1111
1112 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
1113 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
1114 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
1115
1116 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
1117 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
1118 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
1119
1120 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
1121 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
1122 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
1123
1124 /*
1125 * make IOMMU memory accesses cache coherent
1126 */
1127 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
1128}
1129
1130static void iommu_apply_quirks(struct amd_iommu *iommu)
1131{
1132 if (is_rd890_iommu(iommu->dev)) {
1133 pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
1134 pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
1135 pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
1136 pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
1137 }
1138}
1139
1119/* 1140/*
1120 * This function finally enables all IOMMUs found in the system after 1141 * This function finally enables all IOMMUs found in the system after
1121 * they have been initialized 1142 * they have been initialized
@@ -1126,6 +1147,8 @@ static void enable_iommus(void)
1126 1147
1127 for_each_iommu(iommu) { 1148 for_each_iommu(iommu) {
1128 iommu_disable(iommu); 1149 iommu_disable(iommu);
1150 iommu_apply_quirks(iommu);
1151 iommu_init_flags(iommu);
1129 iommu_set_device_table(iommu); 1152 iommu_set_device_table(iommu);
1130 iommu_enable_command_buffer(iommu); 1153 iommu_enable_command_buffer(iommu);
1131 iommu_enable_event_buffer(iommu); 1154 iommu_enable_event_buffer(iommu);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3efdf2870a35..03a5b0385ad6 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -102,6 +102,7 @@ struct cpu_hw_events {
102 */ 102 */
103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */ 103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
105 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
105 int enabled; 106 int enabled;
106 107
107 int n_events; 108 int n_events;
@@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event)
1010 x86_perf_event_set_period(event); 1011 x86_perf_event_set_period(event);
1011 cpuc->events[idx] = event; 1012 cpuc->events[idx] = event;
1012 __set_bit(idx, cpuc->active_mask); 1013 __set_bit(idx, cpuc->active_mask);
1014 __set_bit(idx, cpuc->running);
1013 x86_pmu.enable(event); 1015 x86_pmu.enable(event);
1014 perf_event_update_userpage(event); 1016 perf_event_update_userpage(event);
1015 1017
@@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
1141 cpuc = &__get_cpu_var(cpu_hw_events); 1143 cpuc = &__get_cpu_var(cpu_hw_events);
1142 1144
1143 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1145 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1144 if (!test_bit(idx, cpuc->active_mask)) 1146 if (!test_bit(idx, cpuc->active_mask)) {
1147 /*
1148 * Though we deactivated the counter some cpus
1149 * might still deliver spurious interrupts still
1150 * in flight. Catch them:
1151 */
1152 if (__test_and_clear_bit(idx, cpuc->running))
1153 handled++;
1145 continue; 1154 continue;
1155 }
1146 1156
1147 event = cpuc->events[idx]; 1157 event = cpuc->events[idx];
1148 hwc = &event->hw; 1158 hwc = &event->hw;
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 34b4dad6f0b8..d49079515122 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
31 const struct cpuid_bit *cb; 31 const struct cpuid_bit *cb;
32 32
33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
34 { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
34 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, 35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
35 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
36 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, 37 { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 3b0cd4249671..eafc94f68d79 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -362,6 +362,18 @@ static int attempt_merge(struct request_queue *q, struct request *req,
362 return 0; 362 return 0;
363 363
364 /* 364 /*
365 * Don't merge file system requests and discard requests
366 */
367 if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
368 return 0;
369
370 /*
371 * Don't merge discard requests and secure discard requests
372 */
373 if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
374 return 0;
375
376 /*
365 * not contiguous 377 * not contiguous
366 */ 378 */
367 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 379 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ff1c945fba98..99d0e5a51148 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -90,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
90static int ahci_pci_device_resume(struct pci_dev *pdev); 90static int ahci_pci_device_resume(struct pci_dev *pdev);
91#endif 91#endif
92 92
93static struct scsi_host_template ahci_sht = {
94 AHCI_SHT("ahci"),
95};
96
93static struct ata_port_operations ahci_vt8251_ops = { 97static struct ata_port_operations ahci_vt8251_ops = {
94 .inherits = &ahci_ops, 98 .inherits = &ahci_ops,
95 .hardreset = ahci_vt8251_hardreset, 99 .hardreset = ahci_vt8251_hardreset,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 474427b6f99f..e5fdeebf9ef0 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -298,7 +298,17 @@ struct ahci_host_priv {
298 298
299extern int ahci_ignore_sss; 299extern int ahci_ignore_sss;
300 300
301extern struct scsi_host_template ahci_sht; 301extern struct device_attribute *ahci_shost_attrs[];
302extern struct device_attribute *ahci_sdev_attrs[];
303
304#define AHCI_SHT(drv_name) \
305 ATA_NCQ_SHT(drv_name), \
306 .can_queue = AHCI_MAX_CMDS - 1, \
307 .sg_tablesize = AHCI_MAX_SG, \
308 .dma_boundary = AHCI_DMA_BOUNDARY, \
309 .shost_attrs = ahci_shost_attrs, \
310 .sdev_attrs = ahci_sdev_attrs
311
302extern struct ata_port_operations ahci_ops; 312extern struct ata_port_operations ahci_ops;
303 313
304void ahci_save_initial_config(struct device *dev, 314void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 4e97f33cca44..84b643270e7a 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,6 +23,10 @@
23#include <linux/ahci_platform.h> 23#include <linux/ahci_platform.h>
24#include "ahci.h" 24#include "ahci.h"
25 25
26static struct scsi_host_template ahci_platform_sht = {
27 AHCI_SHT("ahci_platform"),
28};
29
26static int __init ahci_probe(struct platform_device *pdev) 30static int __init ahci_probe(struct platform_device *pdev)
27{ 31{
28 struct device *dev = &pdev->dev; 32 struct device *dev = &pdev->dev;
@@ -145,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev)
145 ahci_print_info(host, "platform"); 149 ahci_print_info(host, "platform");
146 150
147 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, 151 rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
148 &ahci_sht); 152 &ahci_platform_sht);
149 if (rc) 153 if (rc)
150 goto err0; 154 goto err0;
151 155
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 68dc6785472f..8eea309ea212 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 121static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
122 ahci_read_em_buffer, ahci_store_em_buffer); 122 ahci_read_em_buffer, ahci_store_em_buffer);
123 123
124static struct device_attribute *ahci_shost_attrs[] = { 124struct device_attribute *ahci_shost_attrs[] = {
125 &dev_attr_link_power_management_policy, 125 &dev_attr_link_power_management_policy,
126 &dev_attr_em_message_type, 126 &dev_attr_em_message_type,
127 &dev_attr_em_message, 127 &dev_attr_em_message,
@@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
132 &dev_attr_em_buffer, 132 &dev_attr_em_buffer,
133 NULL 133 NULL
134}; 134};
135EXPORT_SYMBOL_GPL(ahci_shost_attrs);
135 136
136static struct device_attribute *ahci_sdev_attrs[] = { 137struct device_attribute *ahci_sdev_attrs[] = {
137 &dev_attr_sw_activity, 138 &dev_attr_sw_activity,
138 &dev_attr_unload_heads, 139 &dev_attr_unload_heads,
139 NULL 140 NULL
140}; 141};
141 142EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
142struct scsi_host_template ahci_sht = {
143 ATA_NCQ_SHT("ahci"),
144 .can_queue = AHCI_MAX_CMDS - 1,
145 .sg_tablesize = AHCI_MAX_SG,
146 .dma_boundary = AHCI_DMA_BOUNDARY,
147 .shost_attrs = ahci_shost_attrs,
148 .sdev_attrs = ahci_sdev_attrs,
149};
150EXPORT_SYMBOL_GPL(ahci_sht);
151 143
152struct ata_port_operations ahci_ops = { 144struct ata_port_operations ahci_ops = {
153 .inherits = &sata_pmp_port_ops, 145 .inherits = &sata_pmp_port_ops,
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b1cbeb59bb76..37a2bb595076 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2369,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2369 pkt_shrink_pktlist(pd); 2369 pkt_shrink_pktlist(pd);
2370} 2370}
2371 2371
2372static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) 2372static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
2373{ 2373{
2374 if (dev_minor >= MAX_WRITERS) 2374 if (dev_minor >= MAX_WRITERS)
2375 return NULL; 2375 return NULL;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 86c5ae9fde34..411d5bf50fc4 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
162 162
163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 163static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
164{ 164{
165 u32 val = (1 << (1 + (chan->idx * 16))); 165 u32 val = ~(1 << (chan->idx * 16));
166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 167 __raw_writel(val, XOR_INTR_CAUSE(chan));
168} 168}
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 3630308e7b81..6b21e25f7a84 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
339{ 339{
340 int status; 340 int status;
341 341
342 if (mci->op_state != OP_RUNNING_POLL)
343 return;
344
342 status = cancel_delayed_work(&mci->work); 345 status = cancel_delayed_work(&mci->work);
343 if (status == 0) { 346 if (status == 0) {
344 debugf0("%s() not canceled, flush the queue\n", 347 debugf0("%s() not canceled, flush the queue\n",
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 55d03ed05000..529a0dbe9fc6 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
98 * user_data: A pointer the data that is copied to the buffer. 98 * user_data: A pointer the data that is copied to the buffer.
99 * size: The Number of bytes to copy. 99 * size: The Number of bytes to copy.
100 */ 100 */
101extern int drm_buffer_copy_from_user(struct drm_buffer *buf, 101int drm_buffer_copy_from_user(struct drm_buffer *buf,
102 void __user *user_data, int size) 102 void __user *user_data, int size)
103{ 103{
104 int nr_pages = size / PAGE_SIZE + 1; 104 int nr_pages = size / PAGE_SIZE + 1;
105 int idx; 105 int idx;
@@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
163{ 163{
164 int idx = drm_buffer_index(buf); 164 int idx = drm_buffer_index(buf);
165 int page = drm_buffer_page(buf); 165 int page = drm_buffer_page(buf);
166 void *obj = 0; 166 void *obj = NULL;
167 167
168 if (idx + objsize <= PAGE_SIZE) { 168 if (idx + objsize <= PAGE_SIZE) {
169 obj = &buf->data[page][idx]; 169 obj = &buf->data[page][idx];
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf4ffbee1c00..bced9b25c71e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2400,7 +2400,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2400 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2400 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
2401 break; 2401 break;
2402 case 3: 2402 case 3:
2403 if (obj_priv->fence_reg > 8) 2403 if (obj_priv->fence_reg >= 8)
2404 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; 2404 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
2405 else 2405 else
2406 case 2: 2406 case 2:
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e8e902d614ed..ee73e428a84a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2170,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2170 return true; 2170 return true;
2171 2171
2172err: 2172err:
2173 intel_sdvo_destroy_enhance_property(connector); 2173 intel_sdvo_destroy(connector);
2174 kfree(intel_sdvo_connector);
2175 return false; 2174 return false;
2176} 2175}
2177 2176
@@ -2243,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2243 return true; 2242 return true;
2244 2243
2245err: 2244err:
2246 intel_sdvo_destroy_enhance_property(connector); 2245 intel_sdvo_destroy(connector);
2247 kfree(intel_sdvo_connector);
2248 return false; 2246 return false;
2249} 2247}
2250 2248
@@ -2522,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2522 uint16_t response; 2520 uint16_t response;
2523 } enhancements; 2521 } enhancements;
2524 2522
2525 if (!intel_sdvo_get_value(intel_sdvo, 2523 enhancements.response = 0;
2526 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2524 intel_sdvo_get_value(intel_sdvo,
2527 &enhancements, sizeof(enhancements))) 2525 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2528 return false; 2526 &enhancements, sizeof(enhancements));
2529
2530 if (enhancements.response == 0) { 2527 if (enhancements.response == 0) {
2531 DRM_DEBUG_KMS("No enhancement is supported\n"); 2528 DRM_DEBUG_KMS("No enhancement is supported\n");
2532 return true; 2529 return true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 87186a4bbf03..fc737037f751 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
558 if (nv_encoder->dcb->type == OUTPUT_LVDS && 558 if (nv_encoder->dcb->type == OUTPUT_LVDS &&
559 (nv_encoder->dcb->lvdsconf.use_straps_for_mode || 559 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
560 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { 560 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
561 nv_connector->native_mode = drm_mode_create(dev); 561 struct drm_display_mode mode;
562 nouveau_bios_fp_mode(dev, nv_connector->native_mode); 562
563 nouveau_bios_fp_mode(dev, &mode);
564 nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
563 } 565 }
564 566
565 /* Find the native mode if this is a digital panel, if we didn't 567 /* Find the native mode if this is a digital panel, if we didn't
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 1bc72c3190a9..fe359a239df3 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
4999#define SW_I2C_CNTL_WRITE1BIT 6 4999#define SW_I2C_CNTL_WRITE1BIT 6
5000 5000
5001//==============================VESA definition Portion=============================== 5001//==============================VESA definition Portion===============================
5002#define VESA_OEM_PRODUCT_REV '01.00' 5002#define VESA_OEM_PRODUCT_REV "01.00"
5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support 5003#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
5004#define VESA_MODE_WIN_ATTRIBUTE 7 5004#define VESA_MODE_WIN_ATTRIBUTE 7
5005#define VESA_WIN_SIZE 64 5005#define VESA_WIN_SIZE 64
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index afc18d87fdca..ddc3adea1dda 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2729,7 +2729,7 @@ int r600_ib_test(struct radeon_device *rdev)
2729 if (i < rdev->usec_timeout) { 2729 if (i < rdev->usec_timeout) {
2730 DRM_INFO("ib test succeeded in %u usecs\n", i); 2730 DRM_INFO("ib test succeeded in %u usecs\n", i);
2731 } else { 2731 } else {
2732 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", 2732 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2733 scratch, tmp); 2733 scratch, tmp);
2734 r = -EINVAL; 2734 r = -EINVAL;
2735 } 2735 }
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 5eee3c41d124..8fbbe1c6ebbd 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
203 */ 203 */
204int radeon_driver_firstopen_kms(struct drm_device *dev) 204int radeon_driver_firstopen_kms(struct drm_device *dev)
205{ 205{
206 struct radeon_device *rdev = dev->dev_private;
207
208 if (rdev->powered_down)
209 return -EINVAL;
206 return 0; 210 return 0;
207} 211}
208 212
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7cffb3e04232..3451a82adba7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
351 INIT_LIST_HEAD(&fbo->lru); 351 INIT_LIST_HEAD(&fbo->lru);
352 INIT_LIST_HEAD(&fbo->swap); 352 INIT_LIST_HEAD(&fbo->swap);
353 fbo->vm_node = NULL; 353 fbo->vm_node = NULL;
354 atomic_set(&fbo->cpu_writers, 0);
354 355
355 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 356 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
356 kref_init(&fbo->list_kref); 357 kref_init(&fbo->list_kref);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ca904799f018..b1e02fffd3cc 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -69,7 +69,7 @@ struct ttm_page_pool {
69 spinlock_t lock; 69 spinlock_t lock;
70 bool fill_lock; 70 bool fill_lock;
71 struct list_head list; 71 struct list_head list;
72 int gfp_flags; 72 gfp_t gfp_flags;
73 unsigned npages; 73 unsigned npages;
74 char *name; 74 char *name;
75 unsigned long nfrees; 75 unsigned long nfrees;
@@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
475 * This function is reentrant if caller updates count depending on number of 475 * This function is reentrant if caller updates count depending on number of
476 * pages returned in pages array. 476 * pages returned in pages array.
477 */ 477 */
478static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, 478static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
479 int ttm_flags, enum ttm_caching_state cstate, unsigned count) 479 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
480{ 480{
481 struct page **caching_array; 481 struct page **caching_array;
@@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
666{ 666{
667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); 667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
668 struct page *p = NULL; 668 struct page *p = NULL;
669 int gfp_flags = GFP_USER; 669 gfp_t gfp_flags = GFP_USER;
670 int r; 670 int r;
671 671
672 /* set zero flag for page allocation if required */ 672 /* set zero flag for page allocation if required */
@@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
818 return 0; 818 return 0;
819} 819}
820 820
821void ttm_page_alloc_fini() 821void ttm_page_alloc_fini(void)
822{ 822{
823 int i; 823 int i;
824 824
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index b87569e96b16..f366f968155a 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 598 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count);
599} 599}
600 600
601void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) 601static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
602{ 602{
603 struct vga_device *vgadev; 603 struct vga_device *vgadev;
604 unsigned long flags; 604 unsigned long flags;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4d4d09bdec0a..97499d00615a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -409,7 +409,7 @@ config SENSORS_CORETEMP
409 409
410config SENSORS_PKGTEMP 410config SENSORS_PKGTEMP
411 tristate "Intel processor package temperature sensor" 411 tristate "Intel processor package temperature sensor"
412 depends on X86 && PCI && EXPERIMENTAL 412 depends on X86 && EXPERIMENTAL
413 help 413 help
414 If you say yes here you get support for the package level temperature 414 If you say yes here you get support for the package level temperature
415 sensor inside your CPU. Check documentation/driver for details. 415 sensor inside your CPU. Check documentation/driver for details.
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index de8111114f46..a23b17a78ace 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -36,6 +36,7 @@
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <asm/msr.h> 37#include <asm/msr.h>
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/smp.h>
39 40
40#define DRVNAME "coretemp" 41#define DRVNAME "coretemp"
41 42
@@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
423 int err; 424 int err;
424 struct platform_device *pdev; 425 struct platform_device *pdev;
425 struct pdev_entry *pdev_entry; 426 struct pdev_entry *pdev_entry;
426#ifdef CONFIG_SMP
427 struct cpuinfo_x86 *c = &cpu_data(cpu); 427 struct cpuinfo_x86 *c = &cpu_data(cpu);
428#endif 428
429 /*
430 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
431 * sensors. We check this bit only, all the early CPUs
432 * without thermal sensors will be filtered out.
433 */
434 if (!cpu_has(c, X86_FEATURE_DTS)) {
435 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
436 " has no thermal sensor.\n", c->x86_model);
437 return 0;
438 }
429 439
430 mutex_lock(&pdev_list_mutex); 440 mutex_lock(&pdev_list_mutex);
431 441
@@ -482,14 +492,22 @@ exit:
482 492
483static void coretemp_device_remove(unsigned int cpu) 493static void coretemp_device_remove(unsigned int cpu)
484{ 494{
485 struct pdev_entry *p, *n; 495 struct pdev_entry *p;
496 unsigned int i;
497
486 mutex_lock(&pdev_list_mutex); 498 mutex_lock(&pdev_list_mutex);
487 list_for_each_entry_safe(p, n, &pdev_list, list) { 499 list_for_each_entry(p, &pdev_list, list) {
488 if (p->cpu == cpu) { 500 if (p->cpu != cpu)
489 platform_device_unregister(p->pdev); 501 continue;
490 list_del(&p->list); 502
491 kfree(p); 503 platform_device_unregister(p->pdev);
492 } 504 list_del(&p->list);
505 mutex_unlock(&pdev_list_mutex);
506 kfree(p);
507 for_each_cpu(i, cpu_sibling_mask(cpu))
508 if (i != cpu && !coretemp_device_add(i))
509 break;
510 return;
493 } 511 }
494 mutex_unlock(&pdev_list_mutex); 512 mutex_unlock(&pdev_list_mutex);
495} 513}
@@ -527,30 +545,21 @@ static int __init coretemp_init(void)
527 if (err) 545 if (err)
528 goto exit; 546 goto exit;
529 547
530 for_each_online_cpu(i) { 548 for_each_online_cpu(i)
531 struct cpuinfo_x86 *c = &cpu_data(i); 549 coretemp_device_add(i);
532 /* 550
533 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 551#ifndef CONFIG_HOTPLUG_CPU
534 * sensors. We check this bit only, all the early CPUs
535 * without thermal sensors will be filtered out.
536 */
537 if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01))
538 coretemp_device_add(i);
539 else {
540 printk(KERN_INFO DRVNAME ": CPU (model=0x%x)"
541 " has no thermal sensor.\n", c->x86_model);
542 }
543 }
544 if (list_empty(&pdev_list)) { 552 if (list_empty(&pdev_list)) {
545 err = -ENODEV; 553 err = -ENODEV;
546 goto exit_driver_unreg; 554 goto exit_driver_unreg;
547 } 555 }
556#endif
548 557
549 register_hotcpu_notifier(&coretemp_cpu_notifier); 558 register_hotcpu_notifier(&coretemp_cpu_notifier);
550 return 0; 559 return 0;
551 560
552exit_driver_unreg:
553#ifndef CONFIG_HOTPLUG_CPU 561#ifndef CONFIG_HOTPLUG_CPU
562exit_driver_unreg:
554 platform_driver_unregister(&coretemp_driver); 563 platform_driver_unregister(&coretemp_driver);
555#endif 564#endif
556exit: 565exit:
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 6138f036b159..fc591ae53107 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
277 wake_up_interruptible(&lis3_dev.misc_wait); 277 wake_up_interruptible(&lis3_dev.misc_wait);
278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); 278 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
279out: 279out:
280 if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && 280 if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev &&
281 lis3_dev.idev->input->users) 281 lis3_dev.idev->input->users)
282 return IRQ_WAKE_THREAD; 282 return IRQ_WAKE_THREAD;
283 return IRQ_HANDLED; 283 return IRQ_HANDLED;
@@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev)
718 * io-apic is not configurable (and generates a warning) but I keep it 718 * io-apic is not configurable (and generates a warning) but I keep it
719 * in case of support for other hardware. 719 * in case of support for other hardware.
720 */ 720 */
721 if (dev->whoami == WAI_8B) 721 if (dev->pdata && dev->whoami == WAI_8B)
722 thread_fn = lis302dl_interrupt_thread1_8b; 722 thread_fn = lis302dl_interrupt_thread1_8b;
723 else 723 else
724 thread_fn = NULL; 724 thread_fn = NULL;
diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c
index 74157fcda6ed..f11903936c8b 100644
--- a/drivers/hwmon/pkgtemp.c
+++ b/drivers/hwmon/pkgtemp.c
@@ -33,7 +33,6 @@
33#include <linux/list.h> 33#include <linux/list.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/cpu.h> 35#include <linux/cpu.h>
36#include <linux/pci.h>
37#include <asm/msr.h> 36#include <asm/msr.h>
38#include <asm/processor.h> 37#include <asm/processor.h>
39 38
@@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
224 223
225 err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); 224 err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group);
226 if (err) 225 if (err)
227 goto exit_free; 226 goto exit_dev;
228 227
229 data->hwmon_dev = hwmon_device_register(&pdev->dev); 228 data->hwmon_dev = hwmon_device_register(&pdev->dev);
230 if (IS_ERR(data->hwmon_dev)) { 229 if (IS_ERR(data->hwmon_dev)) {
@@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev)
238 237
239exit_class: 238exit_class:
240 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); 239 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
240exit_dev:
241 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
241exit_free: 242exit_free:
242 kfree(data); 243 kfree(data);
243exit: 244exit:
@@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev)
250 251
251 hwmon_device_unregister(data->hwmon_dev); 252 hwmon_device_unregister(data->hwmon_dev);
252 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); 253 sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group);
254 device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr);
253 platform_set_drvdata(pdev, NULL); 255 platform_set_drvdata(pdev, NULL);
254 kfree(data); 256 kfree(data);
255 return 0; 257 return 0;
@@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu)
281 int err; 283 int err;
282 struct platform_device *pdev; 284 struct platform_device *pdev;
283 struct pdev_entry *pdev_entry; 285 struct pdev_entry *pdev_entry;
284#ifdef CONFIG_SMP
285 struct cpuinfo_x86 *c = &cpu_data(cpu); 286 struct cpuinfo_x86 *c = &cpu_data(cpu);
286#endif 287
288 if (!cpu_has(c, X86_FEATURE_PTS))
289 return 0;
287 290
288 mutex_lock(&pdev_list_mutex); 291 mutex_lock(&pdev_list_mutex);
289 292
@@ -339,17 +342,18 @@ exit:
339#ifdef CONFIG_HOTPLUG_CPU 342#ifdef CONFIG_HOTPLUG_CPU
340static void pkgtemp_device_remove(unsigned int cpu) 343static void pkgtemp_device_remove(unsigned int cpu)
341{ 344{
342 struct pdev_entry *p, *n; 345 struct pdev_entry *p;
343 unsigned int i; 346 unsigned int i;
344 int err; 347 int err;
345 348
346 mutex_lock(&pdev_list_mutex); 349 mutex_lock(&pdev_list_mutex);
347 list_for_each_entry_safe(p, n, &pdev_list, list) { 350 list_for_each_entry(p, &pdev_list, list) {
348 if (p->cpu != cpu) 351 if (p->cpu != cpu)
349 continue; 352 continue;
350 353
351 platform_device_unregister(p->pdev); 354 platform_device_unregister(p->pdev);
352 list_del(&p->list); 355 list_del(&p->list);
356 mutex_unlock(&pdev_list_mutex);
353 kfree(p); 357 kfree(p);
354 for_each_cpu(i, cpu_core_mask(cpu)) { 358 for_each_cpu(i, cpu_core_mask(cpu)) {
355 if (i != cpu) { 359 if (i != cpu) {
@@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu)
358 break; 362 break;
359 } 363 }
360 } 364 }
361 break; 365 return;
362 } 366 }
363 mutex_unlock(&pdev_list_mutex); 367 mutex_unlock(&pdev_list_mutex);
364} 368}
@@ -399,11 +403,6 @@ static int __init pkgtemp_init(void)
399 goto exit; 403 goto exit;
400 404
401 for_each_online_cpu(i) { 405 for_each_online_cpu(i) {
402 struct cpuinfo_x86 *c = &cpu_data(i);
403
404 if (!cpu_has(c, X86_FEATURE_PTS))
405 continue;
406
407 err = pkgtemp_device_add(i); 406 err = pkgtemp_device_add(i);
408 if (err) 407 if (err)
409 goto exit_devices_unreg; 408 goto exit_devices_unreg;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d88077a21994..13c88871dc3b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep)
463 V_MSS_IDX(mtu_idx) | 463 V_MSS_IDX(mtu_idx) |
464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 464 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 465 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
466 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 466 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
467 V_CONG_CONTROL_FLAVOR(cong_flavor);
467 skb->priority = CPL_PRIORITY_SETUP; 468 skb->priority = CPL_PRIORITY_SETUP;
468 set_arp_failure_handler(skb, act_open_req_arp_failure); 469 set_arp_failure_handler(skb, act_open_req_arp_failure);
469 470
@@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
1280 V_MSS_IDX(mtu_idx) | 1281 V_MSS_IDX(mtu_idx) |
1281 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 1282 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx);
1282 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 1283 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10);
1283 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 1284 opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1285 V_CONG_CONTROL_FLAVOR(cong_flavor);
1284 1286
1285 rpl = cplhdr(skb); 1287 rpl = cplhdr(skb);
1286 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1288 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 74dce4ba0262..350eb34f049c 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
81 int cmd_level; 81 int cmd_level;
82 int slow_level; 82 int slow_level;
83 83
84 read_lock(&led_dat->rw_lock); 84 read_lock_irq(&led_dat->rw_lock);
85 85
86 cmd_level = gpio_get_value(led_dat->cmd); 86 cmd_level = gpio_get_value(led_dat->cmd);
87 slow_level = gpio_get_value(led_dat->slow); 87 slow_level = gpio_get_value(led_dat->slow);
@@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
95 } 95 }
96 } 96 }
97 97
98 read_unlock(&led_dat->rw_lock); 98 read_unlock_irq(&led_dat->rw_lock);
99 99
100 return ret; 100 return ret;
101} 101}
@@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
104 enum ns2_led_modes mode) 104 enum ns2_led_modes mode)
105{ 105{
106 int i; 106 int i;
107 unsigned long flags;
107 108
108 write_lock(&led_dat->rw_lock); 109 write_lock_irqsave(&led_dat->rw_lock, flags);
109 110
110 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { 111 for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
111 if (mode == ns2_led_modval[i].mode) { 112 if (mode == ns2_led_modval[i].mode) {
@@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
116 } 117 }
117 } 118 }
118 119
119 write_unlock(&led_dat->rw_lock); 120 write_unlock_irqrestore(&led_dat->rw_lock, flags);
120} 121}
121 122
122static void ns2_led_set(struct led_classdev *led_cdev, 123static void ns2_led_set(struct led_classdev *led_cdev,
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 71ad4163b95e..aacb862ecc8a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -241,8 +241,10 @@ static struct sdhci_ops sdhci_s3c_ops = {
241static void sdhci_s3c_notify_change(struct platform_device *dev, int state) 241static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
242{ 242{
243 struct sdhci_host *host = platform_get_drvdata(dev); 243 struct sdhci_host *host = platform_get_drvdata(dev);
244 unsigned long flags;
245
244 if (host) { 246 if (host) {
245 spin_lock(&host->lock); 247 spin_lock_irqsave(&host->lock, flags);
246 if (state) { 248 if (state) {
247 dev_dbg(&dev->dev, "card inserted.\n"); 249 dev_dbg(&dev->dev, "card inserted.\n");
248 host->flags &= ~SDHCI_DEVICE_DEAD; 250 host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -253,7 +255,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 255 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
254 } 256 }
255 tasklet_schedule(&host->card_tasklet); 257 tasklet_schedule(&host->card_tasklet);
256 spin_unlock(&host->lock); 258 spin_unlock_irqrestore(&host->lock, flags);
257 } 259 }
258} 260}
259 261
@@ -481,8 +483,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
481 sdhci_remove_host(host, 1); 483 sdhci_remove_host(host, 1);
482 484
483 for (ptr = 0; ptr < 3; ptr++) { 485 for (ptr = 0; ptr < 3; ptr++) {
484 clk_disable(sc->clk_bus[ptr]); 486 if (sc->clk_bus[ptr]) {
485 clk_put(sc->clk_bus[ptr]); 487 clk_disable(sc->clk_bus[ptr]);
488 clk_put(sc->clk_bus[ptr]);
489 }
486 } 490 }
487 clk_disable(sc->clk_io); 491 clk_disable(sc->clk_io);
488 clk_put(sc->clk_io); 492 clk_put(sc->clk_io);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index fa42103b2874..179871d9e71f 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2942,6 +2942,9 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2942{ 2942{
2943 struct vortex_private *vp = netdev_priv(dev); 2943 struct vortex_private *vp = netdev_priv(dev);
2944 2944
2945 if (!VORTEX_PCI(vp))
2946 return;
2947
2945 wol->supported = WAKE_MAGIC; 2948 wol->supported = WAKE_MAGIC;
2946 2949
2947 wol->wolopts = 0; 2950 wol->wolopts = 0;
@@ -2952,6 +2955,10 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2952static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2955static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2953{ 2956{
2954 struct vortex_private *vp = netdev_priv(dev); 2957 struct vortex_private *vp = netdev_priv(dev);
2958
2959 if (!VORTEX_PCI(vp))
2960 return -EOPNOTSUPP;
2961
2955 if (wol->wolopts & ~WAKE_MAGIC) 2962 if (wol->wolopts & ~WAKE_MAGIC)
2956 return -EINVAL; 2963 return -EINVAL;
2957 2964
@@ -3201,6 +3208,9 @@ static void acpi_set_WOL(struct net_device *dev)
3201 return; 3208 return;
3202 } 3209 }
3203 3210
3211 if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
3212 return;
3213
3204 /* Change the power state to D3; RxEnable doesn't take effect. */ 3214 /* Change the power state to D3; RxEnable doesn't take effect. */
3205 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); 3215 pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
3206 } 3216 }
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 63b9ba0cc67e..c73be2848319 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1251,6 +1251,12 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
1251 1251
1252 rrd_ring->desc = NULL; 1252 rrd_ring->desc = NULL;
1253 rrd_ring->dma = 0; 1253 rrd_ring->dma = 0;
1254
1255 adapter->cmb.dma = 0;
1256 adapter->cmb.cmb = NULL;
1257
1258 adapter->smb.dma = 0;
1259 adapter->smb.smb = NULL;
1254} 1260}
1255 1261
1256static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) 1262static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
@@ -2847,10 +2853,11 @@ static int atl1_resume(struct pci_dev *pdev)
2847 pci_enable_wake(pdev, PCI_D3cold, 0); 2853 pci_enable_wake(pdev, PCI_D3cold, 0);
2848 2854
2849 atl1_reset_hw(&adapter->hw); 2855 atl1_reset_hw(&adapter->hw);
2850 adapter->cmb.cmb->int_stats = 0;
2851 2856
2852 if (netif_running(netdev)) 2857 if (netif_running(netdev)) {
2858 adapter->cmb.cmb->int_stats = 0;
2853 atl1_up(adapter); 2859 atl1_up(adapter);
2860 }
2854 netif_device_attach(netdev); 2861 netif_device_attach(netdev);
2855 2862
2856 return 0; 2863 return 0;
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 66ed08f726fb..ba302a5c2c30 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -57,6 +57,7 @@ enum e1e_registers {
57 E1000_SCTL = 0x00024, /* SerDes Control - RW */ 57 E1000_SCTL = 0x00024, /* SerDes Control - RW */
58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ 58 E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ 59 E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
60 E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
60 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ 61 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
61 E1000_FCT = 0x00030, /* Flow Control Type - RW */ 62 E1000_FCT = 0x00030, /* Flow Control Type - RW */
62 E1000_VET = 0x00038, /* VLAN Ether Type - RW */ 63 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 63930d12711c..57b5435599ab 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -105,6 +105,10 @@
105#define E1000_FEXTNVM_SW_CONFIG 1 105#define E1000_FEXTNVM_SW_CONFIG 1
106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ 106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107 107
108#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
109#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
110#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
111
108#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 112#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
109 113
110#define E1000_ICH_RAR_ENTRIES 7 114#define E1000_ICH_RAR_ENTRIES 7
@@ -125,6 +129,7 @@
125 129
126/* SMBus Address Phy Register */ 130/* SMBus Address Phy Register */
127#define HV_SMB_ADDR PHY_REG(768, 26) 131#define HV_SMB_ADDR PHY_REG(768, 26)
132#define HV_SMB_ADDR_MASK 0x007F
128#define HV_SMB_ADDR_PEC_EN 0x0200 133#define HV_SMB_ADDR_PEC_EN 0x0200
129#define HV_SMB_ADDR_VALID 0x0080 134#define HV_SMB_ADDR_VALID 0x0080
130 135
@@ -237,6 +242,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
237static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 242static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
238static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 243static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
239static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 244static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
245static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
246static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
240 247
241static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 248static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
242{ 249{
@@ -272,7 +279,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
272static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 279static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
273{ 280{
274 struct e1000_phy_info *phy = &hw->phy; 281 struct e1000_phy_info *phy = &hw->phy;
275 u32 ctrl; 282 u32 ctrl, fwsm;
276 s32 ret_val = 0; 283 s32 ret_val = 0;
277 284
278 phy->addr = 1; 285 phy->addr = 1;
@@ -294,7 +301,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
294 * disabled, then toggle the LANPHYPC Value bit to force 301 * disabled, then toggle the LANPHYPC Value bit to force
295 * the interconnect to PCIe mode. 302 * the interconnect to PCIe mode.
296 */ 303 */
297 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 304 fwsm = er32(FWSM);
305 if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
298 ctrl = er32(CTRL); 306 ctrl = er32(CTRL);
299 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 307 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
300 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 308 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
@@ -303,6 +311,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
303 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 311 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
304 ew32(CTRL, ctrl); 312 ew32(CTRL, ctrl);
305 msleep(50); 313 msleep(50);
314
315 /*
316 * Gate automatic PHY configuration by hardware on
317 * non-managed 82579
318 */
319 if (hw->mac.type == e1000_pch2lan)
320 e1000_gate_hw_phy_config_ich8lan(hw, true);
306 } 321 }
307 322
308 /* 323 /*
@@ -315,6 +330,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
315 if (ret_val) 330 if (ret_val)
316 goto out; 331 goto out;
317 332
333 /* Ungate automatic PHY configuration on non-managed 82579 */
334 if ((hw->mac.type == e1000_pch2lan) &&
335 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
336 msleep(10);
337 e1000_gate_hw_phy_config_ich8lan(hw, false);
338 }
339
318 phy->id = e1000_phy_unknown; 340 phy->id = e1000_phy_unknown;
319 ret_val = e1000e_get_phy_id(hw); 341 ret_val = e1000e_get_phy_id(hw);
320 if (ret_val) 342 if (ret_val)
@@ -561,13 +583,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
561 if (mac->type == e1000_ich8lan) 583 if (mac->type == e1000_ich8lan)
562 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 584 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
563 585
564 /* Disable PHY configuration by hardware, config by software */ 586 /* Gate automatic PHY configuration by hardware on managed 82579 */
565 if (mac->type == e1000_pch2lan) { 587 if ((mac->type == e1000_pch2lan) &&
566 u32 extcnf_ctrl = er32(EXTCNF_CTRL); 588 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
567 589 e1000_gate_hw_phy_config_ich8lan(hw, true);
568 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
569 ew32(EXTCNF_CTRL, extcnf_ctrl);
570 }
571 590
572 return 0; 591 return 0;
573} 592}
@@ -652,6 +671,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
652 goto out; 671 goto out;
653 } 672 }
654 673
674 if (hw->mac.type == e1000_pch2lan) {
675 ret_val = e1000_k1_workaround_lv(hw);
676 if (ret_val)
677 goto out;
678 }
679
655 /* 680 /*
656 * Check if there was DownShift, must be checked 681 * Check if there was DownShift, must be checked
657 * immediately after link-up 682 * immediately after link-up
@@ -895,6 +920,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
895} 920}
896 921
897/** 922/**
923 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
924 * @hw: pointer to the HW structure
925 *
926 * Assumes semaphore already acquired.
927 *
928 **/
929static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
930{
931 u16 phy_data;
932 u32 strap = er32(STRAP);
933 s32 ret_val = 0;
934
935 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
936
937 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
938 if (ret_val)
939 goto out;
940
941 phy_data &= ~HV_SMB_ADDR_MASK;
942 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
943 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
944 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
945
946out:
947 return ret_val;
948}
949
950/**
898 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration 951 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
899 * @hw: pointer to the HW structure 952 * @hw: pointer to the HW structure
900 * 953 *
@@ -903,7 +956,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
903 **/ 956 **/
904static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 957static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
905{ 958{
906 struct e1000_adapter *adapter = hw->adapter;
907 struct e1000_phy_info *phy = &hw->phy; 959 struct e1000_phy_info *phy = &hw->phy;
908 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 960 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
909 s32 ret_val = 0; 961 s32 ret_val = 0;
@@ -921,7 +973,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
921 if (phy->type != e1000_phy_igp_3) 973 if (phy->type != e1000_phy_igp_3)
922 return ret_val; 974 return ret_val;
923 975
924 if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) { 976 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
977 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
925 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; 978 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
926 break; 979 break;
927 } 980 }
@@ -961,21 +1014,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
961 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1014 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
962 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1015 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
963 1016
964 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 1017 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
965 ((hw->mac.type == e1000_pchlan) || 1018 (hw->mac.type == e1000_pchlan)) ||
966 (hw->mac.type == e1000_pch2lan))) { 1019 (hw->mac.type == e1000_pch2lan)) {
967 /* 1020 /*
968 * HW configures the SMBus address and LEDs when the 1021 * HW configures the SMBus address and LEDs when the
969 * OEM and LCD Write Enable bits are set in the NVM. 1022 * OEM and LCD Write Enable bits are set in the NVM.
970 * When both NVM bits are cleared, SW will configure 1023 * When both NVM bits are cleared, SW will configure
971 * them instead. 1024 * them instead.
972 */ 1025 */
973 data = er32(STRAP); 1026 ret_val = e1000_write_smbus_addr(hw);
974 data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
975 reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
976 reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
977 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
978 reg_data);
979 if (ret_val) 1027 if (ret_val)
980 goto out; 1028 goto out;
981 1029
@@ -1440,10 +1488,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1440 goto out; 1488 goto out;
1441 1489
1442 /* Enable jumbo frame workaround in the PHY */ 1490 /* Enable jumbo frame workaround in the PHY */
1443 e1e_rphy(hw, PHY_REG(769, 20), &data);
1444 ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
1445 if (ret_val)
1446 goto out;
1447 e1e_rphy(hw, PHY_REG(769, 23), &data); 1491 e1e_rphy(hw, PHY_REG(769, 23), &data);
1448 data &= ~(0x7F << 5); 1492 data &= ~(0x7F << 5);
1449 data |= (0x37 << 5); 1493 data |= (0x37 << 5);
@@ -1452,7 +1496,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1452 goto out; 1496 goto out;
1453 e1e_rphy(hw, PHY_REG(769, 16), &data); 1497 e1e_rphy(hw, PHY_REG(769, 16), &data);
1454 data &= ~(1 << 13); 1498 data &= ~(1 << 13);
1455 data |= (1 << 12);
1456 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1499 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1457 if (ret_val) 1500 if (ret_val)
1458 goto out; 1501 goto out;
@@ -1477,7 +1520,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1477 1520
1478 mac_reg = er32(RCTL); 1521 mac_reg = er32(RCTL);
1479 mac_reg &= ~E1000_RCTL_SECRC; 1522 mac_reg &= ~E1000_RCTL_SECRC;
1480 ew32(FFLT_DBG, mac_reg); 1523 ew32(RCTL, mac_reg);
1481 1524
1482 ret_val = e1000e_read_kmrn_reg(hw, 1525 ret_val = e1000e_read_kmrn_reg(hw,
1483 E1000_KMRNCTRLSTA_CTRL_OFFSET, 1526 E1000_KMRNCTRLSTA_CTRL_OFFSET,
@@ -1503,17 +1546,12 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1503 goto out; 1546 goto out;
1504 1547
1505 /* Write PHY register values back to h/w defaults */ 1548 /* Write PHY register values back to h/w defaults */
1506 e1e_rphy(hw, PHY_REG(769, 20), &data);
1507 ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14));
1508 if (ret_val)
1509 goto out;
1510 e1e_rphy(hw, PHY_REG(769, 23), &data); 1549 e1e_rphy(hw, PHY_REG(769, 23), &data);
1511 data &= ~(0x7F << 5); 1550 data &= ~(0x7F << 5);
1512 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); 1551 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1513 if (ret_val) 1552 if (ret_val)
1514 goto out; 1553 goto out;
1515 e1e_rphy(hw, PHY_REG(769, 16), &data); 1554 e1e_rphy(hw, PHY_REG(769, 16), &data);
1516 data &= ~(1 << 12);
1517 data |= (1 << 13); 1555 data |= (1 << 13);
1518 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); 1556 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1519 if (ret_val) 1557 if (ret_val)
@@ -1559,6 +1597,69 @@ out:
1559} 1597}
1560 1598
1561/** 1599/**
1600 * e1000_k1_gig_workaround_lv - K1 Si workaround
1601 * @hw: pointer to the HW structure
1602 *
1603 * Workaround to set the K1 beacon duration for 82579 parts
1604 **/
1605static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1606{
1607 s32 ret_val = 0;
1608 u16 status_reg = 0;
1609 u32 mac_reg;
1610
1611 if (hw->mac.type != e1000_pch2lan)
1612 goto out;
1613
1614 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1615 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
1616 if (ret_val)
1617 goto out;
1618
1619 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1620 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1621 mac_reg = er32(FEXTNVM4);
1622 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1623
1624 if (status_reg & HV_M_STATUS_SPEED_1000)
1625 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1626 else
1627 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1628
1629 ew32(FEXTNVM4, mac_reg);
1630 }
1631
1632out:
1633 return ret_val;
1634}
1635
1636/**
1637 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1638 * @hw: pointer to the HW structure
1639 * @gate: boolean set to true to gate, false to ungate
1640 *
1641 * Gate/ungate the automatic PHY configuration via hardware; perform
1642 * the configuration via software instead.
1643 **/
1644static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1645{
1646 u32 extcnf_ctrl;
1647
1648 if (hw->mac.type != e1000_pch2lan)
1649 return;
1650
1651 extcnf_ctrl = er32(EXTCNF_CTRL);
1652
1653 if (gate)
1654 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1655 else
1656 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1657
1658 ew32(EXTCNF_CTRL, extcnf_ctrl);
1659 return;
1660}
1661
1662/**
1562 * e1000_lan_init_done_ich8lan - Check for PHY config completion 1663 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1563 * @hw: pointer to the HW structure 1664 * @hw: pointer to the HW structure
1564 * 1665 *
@@ -1602,6 +1703,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1602 if (e1000_check_reset_block(hw)) 1703 if (e1000_check_reset_block(hw))
1603 goto out; 1704 goto out;
1604 1705
1706 /* Allow time for h/w to get to quiescent state after reset */
1707 msleep(10);
1708
1605 /* Perform any necessary post-reset workarounds */ 1709 /* Perform any necessary post-reset workarounds */
1606 switch (hw->mac.type) { 1710 switch (hw->mac.type) {
1607 case e1000_pchlan: 1711 case e1000_pchlan:
@@ -1630,6 +1734,13 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1630 /* Configure the LCD with the OEM bits in NVM */ 1734 /* Configure the LCD with the OEM bits in NVM */
1631 ret_val = e1000_oem_bits_config_ich8lan(hw, true); 1735 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1632 1736
1737 /* Ungate automatic PHY configuration on non-managed 82579 */
1738 if ((hw->mac.type == e1000_pch2lan) &&
1739 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1740 msleep(10);
1741 e1000_gate_hw_phy_config_ich8lan(hw, false);
1742 }
1743
1633out: 1744out:
1634 return ret_val; 1745 return ret_val;
1635} 1746}
@@ -1646,6 +1757,11 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1646{ 1757{
1647 s32 ret_val = 0; 1758 s32 ret_val = 0;
1648 1759
1760 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1761 if ((hw->mac.type == e1000_pch2lan) &&
1762 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1763 e1000_gate_hw_phy_config_ich8lan(hw, true);
1764
1649 ret_val = e1000e_phy_hw_reset_generic(hw); 1765 ret_val = e1000e_phy_hw_reset_generic(hw);
1650 if (ret_val) 1766 if (ret_val)
1651 goto out; 1767 goto out;
@@ -2910,6 +3026,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2910 * external PHY is reset. 3026 * external PHY is reset.
2911 */ 3027 */
2912 ctrl |= E1000_CTRL_PHY_RST; 3028 ctrl |= E1000_CTRL_PHY_RST;
3029
3030 /*
3031 * Gate automatic PHY configuration by hardware on
3032 * non-managed 82579
3033 */
3034 if ((hw->mac.type == e1000_pch2lan) &&
3035 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
3036 e1000_gate_hw_phy_config_ich8lan(hw, true);
2913 } 3037 }
2914 ret_val = e1000_acquire_swflag_ich8lan(hw); 3038 ret_val = e1000_acquire_swflag_ich8lan(hw);
2915 e_dbg("Issuing a global reset to ich8lan\n"); 3039 e_dbg("Issuing a global reset to ich8lan\n");
@@ -3460,13 +3584,20 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3460void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 3584void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3461{ 3585{
3462 u32 phy_ctrl; 3586 u32 phy_ctrl;
3587 s32 ret_val;
3463 3588
3464 phy_ctrl = er32(PHY_CTRL); 3589 phy_ctrl = er32(PHY_CTRL);
3465 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; 3590 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
3466 ew32(PHY_CTRL, phy_ctrl); 3591 ew32(PHY_CTRL, phy_ctrl);
3467 3592
3468 if (hw->mac.type >= e1000_pchlan) 3593 if (hw->mac.type >= e1000_pchlan) {
3469 e1000_phy_hw_reset_ich8lan(hw); 3594 e1000_oem_bits_config_ich8lan(hw, true);
3595 ret_val = hw->phy.ops.acquire(hw);
3596 if (ret_val)
3597 return;
3598 e1000_write_smbus_addr(hw);
3599 hw->phy.ops.release(hw);
3600 }
3470} 3601}
3471 3602
3472/** 3603/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 2b8ef44bd2b1..e561d15c3eb1 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2704,6 +2704,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2704 u32 psrctl = 0; 2704 u32 psrctl = 0;
2705 u32 pages = 0; 2705 u32 pages = 0;
2706 2706
2707 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2708 if (hw->mac.type == e1000_pch2lan) {
2709 s32 ret_val;
2710
2711 if (adapter->netdev->mtu > ETH_DATA_LEN)
2712 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2713 else
2714 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2715 }
2716
2707 /* Program MC offset vector base */ 2717 /* Program MC offset vector base */
2708 rctl = er32(RCTL); 2718 rctl = er32(RCTL);
2709 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2719 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
@@ -2744,16 +2754,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2744 e1e_wphy(hw, 22, phy_data); 2754 e1e_wphy(hw, 22, phy_data);
2745 } 2755 }
2746 2756
2747 /* Workaround Si errata on 82579 - configure jumbo frame flow */
2748 if (hw->mac.type == e1000_pch2lan) {
2749 s32 ret_val;
2750
2751 if (rctl & E1000_RCTL_LPE)
2752 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2753 else
2754 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2755 }
2756
2757 /* Setup buffer sizes */ 2757 /* Setup buffer sizes */
2758 rctl &= ~E1000_RCTL_SZ_4096; 2758 rctl &= ~E1000_RCTL_SZ_4096;
2759 rctl |= E1000_RCTL_BSEX; 2759 rctl |= E1000_RCTL_BSEX;
@@ -4833,6 +4833,15 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4833 return -EINVAL; 4833 return -EINVAL;
4834 } 4834 }
4835 4835
4836 /* Jumbo frame workaround on 82579 requires CRC be stripped */
4837 if ((adapter->hw.mac.type == e1000_pch2lan) &&
4838 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
4839 (new_mtu > ETH_DATA_LEN)) {
4840 e_err("Jumbo Frames not supported on 82579 when CRC "
4841 "stripping is disabled.\n");
4842 return -EINVAL;
4843 }
4844
4836 /* 82573 Errata 17 */ 4845 /* 82573 Errata 17 */
4837 if (((adapter->hw.mac.type == e1000_82573) || 4846 if (((adapter->hw.mac.type == e1000_82573) ||
4838 (adapter->hw.mac.type == e1000_82574)) && 4847 (adapter->hw.mac.type == e1000_82574)) &&
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3506fd6ad726..519e19e23955 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2928,7 +2928,7 @@ static int __devinit emac_probe(struct platform_device *ofdev,
2928 if (dev->emac_irq != NO_IRQ) 2928 if (dev->emac_irq != NO_IRQ)
2929 irq_dispose_mapping(dev->emac_irq); 2929 irq_dispose_mapping(dev->emac_irq);
2930 err_free: 2930 err_free:
2931 kfree(ndev); 2931 free_netdev(ndev);
2932 err_gone: 2932 err_gone:
2933 /* if we were on the bootlist, remove us as we won't show up and 2933 /* if we were on the bootlist, remove us as we won't show up and
2934 * wake up all waiters to notify them in case they were waiting 2934 * wake up all waiters to notify them in case they were waiting
@@ -2971,7 +2971,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
2971 if (dev->emac_irq != NO_IRQ) 2971 if (dev->emac_irq != NO_IRQ)
2972 irq_dispose_mapping(dev->emac_irq); 2972 irq_dispose_mapping(dev->emac_irq);
2973 2973
2974 kfree(dev->ndev); 2974 free_netdev(dev->ndev);
2975 2975
2976 return 0; 2976 return 0;
2977} 2977}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index cabae7bb1fc6..b075a35b85d4 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter,
1540 if (pkt_offset) 1540 if (pkt_offset)
1541 skb_pull(skb, pkt_offset); 1541 skb_pull(skb, pkt_offset);
1542 1542
1543 skb->truesize = skb->len + sizeof(struct sk_buff);
1544 skb->protocol = eth_type_trans(skb, netdev); 1543 skb->protocol = eth_type_trans(skb, netdev);
1545 1544
1546 napi_gro_receive(&sds_ring->napi, skb); 1545 napi_gro_receive(&sds_ring->napi, skb);
@@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter,
1602 1601
1603 skb_put(skb, lro_length + data_offset); 1602 skb_put(skb, lro_length + data_offset);
1604 1603
1605 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1606
1607 skb_pull(skb, l2_hdr_offset); 1604 skb_pull(skb, l2_hdr_offset);
1608 skb->protocol = eth_type_trans(skb, netdev); 1605 skb->protocol = eth_type_trans(skb, netdev);
1609 1606
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 75ba744b173c..2c7cf0b64811 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1316,7 +1316,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1316 return -ENOMEM; 1316 return -ENOMEM;
1317 } 1317 }
1318 1318
1319 skb_reserve(skb, 2); 1319 skb_reserve(skb, NET_IP_ALIGN);
1320 1320
1321 dma = pci_map_single(pdev, skb->data, 1321 dma = pci_map_single(pdev, skb->data,
1322 rds_ring->dma_size, PCI_DMA_FROMDEVICE); 1322 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
@@ -1404,7 +1404,6 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1404 if (pkt_offset) 1404 if (pkt_offset)
1405 skb_pull(skb, pkt_offset); 1405 skb_pull(skb, pkt_offset);
1406 1406
1407 skb->truesize = skb->len + sizeof(struct sk_buff);
1408 skb->protocol = eth_type_trans(skb, netdev); 1407 skb->protocol = eth_type_trans(skb, netdev);
1409 1408
1410 napi_gro_receive(&sds_ring->napi, skb); 1409 napi_gro_receive(&sds_ring->napi, skb);
@@ -1466,8 +1465,6 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1466 1465
1467 skb_put(skb, lro_length + data_offset); 1466 skb_put(skb, lro_length + data_offset);
1468 1467
1469 skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
1470
1471 skb_pull(skb, l2_hdr_offset); 1468 skb_pull(skb, l2_hdr_offset);
1472 skb->protocol = eth_type_trans(skb, netdev); 1469 skb->protocol = eth_type_trans(skb, netdev);
1473 1470
@@ -1700,8 +1697,6 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1700 if (pkt_offset) 1697 if (pkt_offset)
1701 skb_pull(skb, pkt_offset); 1698 skb_pull(skb, pkt_offset);
1702 1699
1703 skb->truesize = skb->len + sizeof(struct sk_buff);
1704
1705 if (!qlcnic_check_loopback_buff(skb->data)) 1700 if (!qlcnic_check_loopback_buff(skb->data))
1706 adapter->diag_cnt++; 1701 adapter->diag_cnt++;
1707 1702
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 07eb884ff982..44150f2f7bfd 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -384,7 +384,7 @@ static void rionet_remove(struct rio_dev *rdev)
384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? 384 free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
385 __ilog2(sizeof(void *)) + 4 : 0); 385 __ilog2(sizeof(void *)) + 4 : 0);
386 unregister_netdev(ndev); 386 unregister_netdev(ndev);
387 kfree(ndev); 387 free_netdev(ndev);
388 388
389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { 389 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
390 list_del(&peer->node); 390 list_del(&peer->node);
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index cc4bd8c65f8b..9265315baa0b 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -804,7 +804,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev)
804err_out_free_page: 804err_out_free_page:
805 free_page((unsigned long) sp->srings); 805 free_page((unsigned long) sp->srings);
806err_out_free_dev: 806err_out_free_dev:
807 kfree(dev); 807 free_netdev(dev);
808 808
809err_out: 809err_out:
810 return err; 810 return err;
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 0909ae934ad0..8150ba154116 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -58,6 +58,7 @@
58 58
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60MODULE_VERSION(SMSC_DRV_VERSION); 60MODULE_VERSION(SMSC_DRV_VERSION);
61MODULE_ALIAS("platform:smsc911x");
61 62
62#if USE_DEBUG > 0 63#if USE_DEBUG > 0
63static int debug = 16; 64static int debug = 16;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 5efa57757a2c..6888e3d41462 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -243,6 +243,7 @@ enum {
243 NWayState = (1 << 14) | (1 << 13) | (1 << 12), 243 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
244 NWayRestart = (1 << 12), 244 NWayRestart = (1 << 12),
245 NonselPortActive = (1 << 9), 245 NonselPortActive = (1 << 9),
246 SelPortActive = (1 << 8),
246 LinkFailStatus = (1 << 2), 247 LinkFailStatus = (1 << 2),
247 NetCxnErr = (1 << 1), 248 NetCxnErr = (1 << 1),
248}; 249};
@@ -363,7 +364,9 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
363 364
364/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ 365/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
365static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; 366static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
366static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, }; 367static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
368/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
369static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
367static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; 370static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
368 371
369 372
@@ -1064,6 +1067,9 @@ static void de21041_media_timer (unsigned long data)
1064 unsigned int carrier; 1067 unsigned int carrier;
1065 unsigned long flags; 1068 unsigned long flags;
1066 1069
1070 /* clear port active bits */
1071 dw32(SIAStatus, NonselPortActive | SelPortActive);
1072
1067 carrier = (status & NetCxnErr) ? 0 : 1; 1073 carrier = (status & NetCxnErr) ? 0 : 1;
1068 1074
1069 if (carrier) { 1075 if (carrier) {
@@ -1158,14 +1164,29 @@ no_link_yet:
1158static void de_media_interrupt (struct de_private *de, u32 status) 1164static void de_media_interrupt (struct de_private *de, u32 status)
1159{ 1165{
1160 if (status & LinkPass) { 1166 if (status & LinkPass) {
1167 /* Ignore if current media is AUI or BNC and we can't use TP */
1168 if ((de->media_type == DE_MEDIA_AUI ||
1169 de->media_type == DE_MEDIA_BNC) &&
1170 (de->media_lock ||
1171 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1172 return;
1173 /* If current media is not TP, change it to TP */
1174 if ((de->media_type == DE_MEDIA_AUI ||
1175 de->media_type == DE_MEDIA_BNC)) {
1176 de->media_type = DE_MEDIA_TP_AUTO;
1177 de_stop_rxtx(de);
1178 de_set_media(de);
1179 de_start_rxtx(de);
1180 }
1161 de_link_up(de); 1181 de_link_up(de);
1162 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); 1182 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1163 return; 1183 return;
1164 } 1184 }
1165 1185
1166 BUG_ON(!(status & LinkFail)); 1186 BUG_ON(!(status & LinkFail));
1167 1187 /* Mark the link as down only if current media is TP */
1168 if (netif_carrier_ok(de->dev)) { 1188 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1189 de->media_type != DE_MEDIA_BNC) {
1169 de_link_down(de); 1190 de_link_down(de);
1170 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); 1191 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1171 } 1192 }
@@ -1229,6 +1250,7 @@ static void de_adapter_sleep (struct de_private *de)
1229 if (de->de21040) 1250 if (de->de21040)
1230 return; 1251 return;
1231 1252
1253 dw32(CSR13, 0); /* Reset phy */
1232 pci_read_config_dword(de->pdev, PCIPM, &pmctl); 1254 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1233 pmctl |= PM_Sleep; 1255 pmctl |= PM_Sleep;
1234 pci_write_config_dword(de->pdev, PCIPM, pmctl); 1256 pci_write_config_dword(de->pdev, PCIPM, pmctl);
@@ -1574,12 +1596,15 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1574 return 0; /* nothing to change */ 1596 return 0; /* nothing to change */
1575 1597
1576 de_link_down(de); 1598 de_link_down(de);
1599 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1577 de_stop_rxtx(de); 1600 de_stop_rxtx(de);
1578 1601
1579 de->media_type = new_media; 1602 de->media_type = new_media;
1580 de->media_lock = media_lock; 1603 de->media_lock = media_lock;
1581 de->media_advertise = ecmd->advertising; 1604 de->media_advertise = ecmd->advertising;
1582 de_set_media(de); 1605 de_set_media(de);
1606 if (netif_running(de->dev))
1607 de_start_rxtx(de);
1583 1608
1584 return 0; 1609 return 0;
1585} 1610}
@@ -1911,8 +1936,14 @@ fill_defaults:
1911 for (i = 0; i < DE_MAX_MEDIA; i++) { 1936 for (i = 0; i < DE_MAX_MEDIA; i++) {
1912 if (de->media[i].csr13 == 0xffff) 1937 if (de->media[i].csr13 == 0xffff)
1913 de->media[i].csr13 = t21041_csr13[i]; 1938 de->media[i].csr13 = t21041_csr13[i];
1914 if (de->media[i].csr14 == 0xffff) 1939 if (de->media[i].csr14 == 0xffff) {
1915 de->media[i].csr14 = t21041_csr14[i]; 1940 /* autonegotiation is broken at least on some chip
1941 revisions - rev. 0x21 works, 0x11 does not */
1942 if (de->pdev->revision < 0x20)
1943 de->media[i].csr14 = t21041_csr14_brk[i];
1944 else
1945 de->media[i].csr14 = t21041_csr14[i];
1946 }
1916 if (de->media[i].csr15 == 0xffff) 1947 if (de->media[i].csr15 == 0xffff)
1917 de->media[i].csr15 = t21041_csr15[i]; 1948 de->media[i].csr15 = t21041_csr15[i];
1918 } 1949 }
@@ -2158,6 +2189,8 @@ static int de_resume (struct pci_dev *pdev)
2158 dev_err(&dev->dev, "pci_enable_device failed in resume\n"); 2189 dev_err(&dev->dev, "pci_enable_device failed in resume\n");
2159 goto out; 2190 goto out;
2160 } 2191 }
2192 pci_set_master(pdev);
2193 de_init_rings(de);
2161 de_init_hw(de); 2194 de_init_hw(de);
2162out_attach: 2195out_attach:
2163 netif_device_attach(dev); 2196 netif_device_attach(dev);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 07dbc2796448..e23c4060a0f0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2613,6 +2613,11 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
2613 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2613 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2614 return -EINVAL; 2614 return -EINVAL;
2615 2615
2616 if (test_bit(STATUS_SCANNING, &priv->status)) {
2617 IWL_DEBUG_INFO(priv, "scan in progress.\n");
2618 return -EINVAL;
2619 }
2620
2616 if (mode >= IWL_MAX_FORCE_RESET) { 2621 if (mode >= IWL_MAX_FORCE_RESET) {
2617 IWL_DEBUG_INFO(priv, "invalid reset request.\n"); 2622 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
2618 return -EINVAL; 2623 return -EINVAL;
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 5ac2aa783f58..4789f8e8bf7a 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -3757,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
3757 3757
3758DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 3758DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
3759 3759
3760#define GGC 0x52
3761#define GGC_MEMORY_SIZE_MASK (0xf << 8)
3762#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
3763#define GGC_MEMORY_SIZE_1M (0x1 << 8)
3764#define GGC_MEMORY_SIZE_2M (0x3 << 8)
3765#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
3766#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
3767#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
3768#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
3769
3770static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
3771{
3772 unsigned short ggc;
3773
3774 if (pci_read_config_word(dev, GGC, &ggc))
3775 return;
3776
3777 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
3778 printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
3779 dmar_map_gfx = 0;
3780 }
3781}
3782DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
3783DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
3784DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
3785DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
3786
3760/* On Tylersburg chipsets, some BIOSes have been known to enable the 3787/* On Tylersburg chipsets, some BIOSes have been known to enable the
3761 ISOCH DMAR unit for the Azalia sound device, but not give it any 3788 ISOCH DMAR unit for the Azalia sound device, but not give it any
3762 TLB entries, which causes it to deadlock. Check for that. We do 3789 TLB entries, which causes it to deadlock. Check for that. We do
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ce6a3666b3d9..553d8ee55c1c 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
608 * the VF BAR size multiplied by the number of VFs. The alignment 608 * the VF BAR size multiplied by the number of VFs. The alignment
609 * is just the VF BAR size. 609 * is just the VF BAR size.
610 */ 610 */
611int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) 611resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
612{ 612{
613 struct resource tmp; 613 struct resource tmp;
614 enum pci_bar_type type; 614 enum pci_bar_type type;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 7754a678ab15..6beb11b617a9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -264,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev);
264extern void pci_iov_release(struct pci_dev *dev); 264extern void pci_iov_release(struct pci_dev *dev);
265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 265extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
266 enum pci_bar_type *type); 266 enum pci_bar_type *type);
267extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 267extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
268 int resno);
268extern void pci_restore_iov_state(struct pci_dev *dev); 269extern void pci_restore_iov_state(struct pci_dev *dev);
269extern int pci_iov_bus_range(struct pci_bus *bus); 270extern int pci_iov_bus_range(struct pci_bus *bus);
270 271
@@ -320,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
320} 321}
321#endif /* CONFIG_PCI_IOV */ 322#endif /* CONFIG_PCI_IOV */
322 323
323static inline int pci_resource_alignment(struct pci_dev *dev, 324static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
324 struct resource *res) 325 struct resource *res)
325{ 326{
326#ifdef CONFIG_PCI_IOV 327#ifdef CONFIG_PCI_IOV
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index a5c176598d95..9ba4dade69a4 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -595,7 +595,13 @@ int pcmcia_request_io(struct pcmcia_device *p_dev)
595 if (c->io[1].end) { 595 if (c->io[1].end) {
596 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); 596 ret = alloc_io_space(s, &c->io[1], p_dev->io_lines);
597 if (ret) { 597 if (ret) {
598 struct resource tmp = c->io[0];
599 /* release the previously allocated resource */
598 release_io_space(s, &c->io[0]); 600 release_io_space(s, &c->io[0]);
601 /* but preserve the settings, for they worked... */
602 c->io[0].end = resource_size(&tmp);
603 c->io[0].start = tmp.start;
604 c->io[0].flags = tmp.flags;
599 goto out; 605 goto out;
600 } 606 }
601 } else 607 } else
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index b8a869af0f44..deef6656ab7b 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -646,7 +646,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
646 if (!pci_resource_start(dev, 0)) { 646 if (!pci_resource_start(dev, 0)) {
647 dev_warn(&dev->dev, "refusing to load the driver as the " 647 dev_warn(&dev->dev, "refusing to load the driver as the "
648 "io_base is NULL.\n"); 648 "io_base is NULL.\n");
649 goto err_out_free_mem; 649 goto err_out_disable;
650 } 650 }
651 651
652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " 652 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e35ed128bdef..2d61186ad5a2 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3093,7 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ 3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
3094}; 3094};
3095 3095
3096typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; 3096typedef u16 tpacpi_keymap_entry_t;
3097typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
3097 3098
3098static int __init hotkey_init(struct ibm_init_struct *iibm) 3099static int __init hotkey_init(struct ibm_init_struct *iibm)
3099{ 3100{
@@ -3230,7 +3231,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3230 }; 3231 };
3231 3232
3232#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) 3233#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
3233#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0]) 3234#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t)
3234 3235
3235 int res, i; 3236 int res, i;
3236 int status; 3237 int status;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 6edf20b62de5..2c7d2d9be4d0 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1154,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1154 dev_fsm, dev_fsm_len, GFP_KERNEL); 1154 dev_fsm, dev_fsm_len, GFP_KERNEL);
1155 if (priv->fsm == NULL) { 1155 if (priv->fsm == NULL) {
1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); 1156 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
1157 kfree(dev); 1157 free_netdev(dev);
1158 return NULL; 1158 return NULL;
1159 } 1159 }
1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED); 1160 fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
@@ -1165,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1165 grp = ctcmpc_init_mpc_group(priv); 1165 grp = ctcmpc_init_mpc_group(priv);
1166 if (grp == NULL) { 1166 if (grp == NULL) {
1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); 1167 MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
1168 kfree(dev); 1168 free_netdev(dev);
1169 return NULL; 1169 return NULL;
1170 } 1170 }
1171 tasklet_init(&grp->mpc_tasklet2, 1171 tasklet_init(&grp->mpc_tasklet2,
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 93de907b1208..08d77b041bd8 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -2017,6 +2017,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
2017 struct ioc3_port *port; 2017 struct ioc3_port *port;
2018 struct ioc3_port *ports[PORTS_PER_CARD]; 2018 struct ioc3_port *ports[PORTS_PER_CARD];
2019 int phys_port; 2019 int phys_port;
2020 int cnt;
2020 2021
2021 DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd)); 2022 DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd));
2022 2023
@@ -2146,6 +2147,9 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
2146 2147
2147 /* error exits that give back resources */ 2148 /* error exits that give back resources */
2148out4: 2149out4:
2150 for (cnt = 0; cnt < phys_port; cnt++)
2151 kfree(ports[cnt]);
2152
2149 kfree(card_ptr); 2153 kfree(card_ptr);
2150 return ret; 2154 return ret;
2151} 2155}
diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h
index 9952579425b9..1b3060eb2921 100644
--- a/drivers/staging/ti-st/st.h
+++ b/drivers/staging/ti-st/st.h
@@ -80,5 +80,4 @@ struct st_proto_s {
80extern long st_register(struct st_proto_s *); 80extern long st_register(struct st_proto_s *);
81extern long st_unregister(enum proto_type); 81extern long st_unregister(enum proto_type);
82 82
83extern struct platform_device *st_get_plat_device(void);
84#endif /* ST_H */ 83#endif /* ST_H */
diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c
index 063c9b1db1ab..b85d8bfdf600 100644
--- a/drivers/staging/ti-st/st_core.c
+++ b/drivers/staging/ti-st/st_core.c
@@ -38,7 +38,6 @@
38#include "st_ll.h" 38#include "st_ll.h"
39#include "st.h" 39#include "st.h"
40 40
41#define VERBOSE
42/* strings to be used for rfkill entries and by 41/* strings to be used for rfkill entries and by
43 * ST Core to be used for sysfs debug entry 42 * ST Core to be used for sysfs debug entry
44 */ 43 */
@@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto)
581 long err = 0; 580 long err = 0;
582 unsigned long flags = 0; 581 unsigned long flags = 0;
583 582
584 st_kim_ref(&st_gdata); 583 st_kim_ref(&st_gdata, 0);
585 pr_info("%s(%d) ", __func__, new_proto->type); 584 pr_info("%s(%d) ", __func__, new_proto->type);
586 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL 585 if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL
587 || new_proto->reg_complete_cb == NULL) { 586 || new_proto->reg_complete_cb == NULL) {
@@ -713,7 +712,7 @@ long st_unregister(enum proto_type type)
713 712
714 pr_debug("%s: %d ", __func__, type); 713 pr_debug("%s: %d ", __func__, type);
715 714
716 st_kim_ref(&st_gdata); 715 st_kim_ref(&st_gdata, 0);
717 if (type < ST_BT || type >= ST_MAX) { 716 if (type < ST_BT || type >= ST_MAX) {
718 pr_err(" protocol %d not supported", type); 717 pr_err(" protocol %d not supported", type);
719 return -EPROTONOSUPPORT; 718 return -EPROTONOSUPPORT;
@@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb)
767#endif 766#endif
768 long len; 767 long len;
769 768
770 st_kim_ref(&st_gdata); 769 st_kim_ref(&st_gdata, 0);
771 if (unlikely(skb == NULL || st_gdata == NULL 770 if (unlikely(skb == NULL || st_gdata == NULL
772 || st_gdata->tty == NULL)) { 771 || st_gdata->tty == NULL)) {
773 pr_err("data/tty unavailable to perform write"); 772 pr_err("data/tty unavailable to perform write");
@@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty)
818 struct st_data_s *st_gdata; 817 struct st_data_s *st_gdata;
819 pr_info("%s ", __func__); 818 pr_info("%s ", __func__);
820 819
821 st_kim_ref(&st_gdata); 820 st_kim_ref(&st_gdata, 0);
822 st_gdata->tty = tty; 821 st_gdata->tty = tty;
823 tty->disc_data = st_gdata; 822 tty->disc_data = st_gdata;
824 823
diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h
index e0c32d149f5f..8601320a679e 100644
--- a/drivers/staging/ti-st/st_core.h
+++ b/drivers/staging/ti-st/st_core.h
@@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **);
117void st_core_exit(struct st_data_s *); 117void st_core_exit(struct st_data_s *);
118 118
119/* ask for reference from KIM */ 119/* ask for reference from KIM */
120void st_kim_ref(struct st_data_s **); 120void st_kim_ref(struct st_data_s **, int);
121 121
122#define GPS_STUB_TEST 122#define GPS_STUB_TEST
123#ifdef GPS_STUB_TEST 123#ifdef GPS_STUB_TEST
diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c
index b4a6c7fdc4e6..9e99463f76e8 100644
--- a/drivers/staging/ti-st/st_kim.c
+++ b/drivers/staging/ti-st/st_kim.c
@@ -72,11 +72,26 @@ const unsigned char *protocol_names[] = {
72 PROTO_ENTRY(ST_GPS, "GPS"), 72 PROTO_ENTRY(ST_GPS, "GPS"),
73}; 73};
74 74
75#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */
76struct platform_device *st_kim_devices[MAX_ST_DEVICES];
75 77
76/**********************************************************************/ 78/**********************************************************************/
77/* internal functions */ 79/* internal functions */
78 80
79/** 81/**
82 * st_get_plat_device -
83 * function which returns the reference to the platform device
84 * requested by id. As of now only 1 such device exists (id=0)
85 * the context requesting for reference can get the id to be
86 * requested by a. The protocol driver which is registering or
87 * b. the tty device which is opened.
88 */
89static struct platform_device *st_get_plat_device(int id)
90{
91 return st_kim_devices[id];
92}
93
94/**
80 * validate_firmware_response - 95 * validate_firmware_response -
81 * function to return whether the firmware response was proper 96 * function to return whether the firmware response was proper
82 * in case of error don't complete so that waiting for proper 97 * in case of error don't complete so that waiting for proper
@@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state)
353 struct kim_data_s *kim_gdata; 368 struct kim_data_s *kim_gdata;
354 pr_info(" %s ", __func__); 369 pr_info(" %s ", __func__);
355 370
356 kim_pdev = st_get_plat_device(); 371 kim_pdev = st_get_plat_device(0);
357 kim_gdata = dev_get_drvdata(&kim_pdev->dev); 372 kim_gdata = dev_get_drvdata(&kim_pdev->dev);
358 373
359 if (kim_gdata->gpios[type] == -1) { 374 if (kim_gdata->gpios[type] == -1) {
@@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked)
574 * This would enable multiple such platform devices to exist 589 * This would enable multiple such platform devices to exist
575 * on a given platform 590 * on a given platform
576 */ 591 */
577void st_kim_ref(struct st_data_s **core_data) 592void st_kim_ref(struct st_data_s **core_data, int id)
578{ 593{
579 struct platform_device *pdev; 594 struct platform_device *pdev;
580 struct kim_data_s *kim_gdata; 595 struct kim_data_s *kim_gdata;
581 /* get kim_gdata reference from platform device */ 596 /* get kim_gdata reference from platform device */
582 pdev = st_get_plat_device(); 597 pdev = st_get_plat_device(id);
583 kim_gdata = dev_get_drvdata(&pdev->dev); 598 kim_gdata = dev_get_drvdata(&pdev->dev);
584 *core_data = kim_gdata->core_data; 599 *core_data = kim_gdata->core_data;
585} 600}
@@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev)
623 long *gpios = pdev->dev.platform_data; 638 long *gpios = pdev->dev.platform_data;
624 struct kim_data_s *kim_gdata; 639 struct kim_data_s *kim_gdata;
625 640
641 st_kim_devices[pdev->id] = pdev;
626 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); 642 kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC);
627 if (!kim_gdata) { 643 if (!kim_gdata) {
628 pr_err("no mem to allocate"); 644 pr_err("no mem to allocate");
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 7e594449600e..9eed5b52d9de 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS
91 If you are unsure about this, say N here. 91 If you are unsure about this, say N here.
92 92
93config USB_SUSPEND 93config USB_SUSPEND
94 bool "USB runtime power management (suspend/resume and wakeup)" 94 bool "USB runtime power management (autosuspend) and wakeup"
95 depends on USB && PM_RUNTIME 95 depends on USB && PM_RUNTIME
96 help 96 help
97 If you say Y here, you can use driver calls or the sysfs 97 If you say Y here, you can use driver calls or the sysfs
98 "power/level" file to suspend or resume individual USB 98 "power/control" file to enable or disable autosuspend for
99 peripherals and to enable or disable autosuspend (see 99 individual USB peripherals (see
100 Documentation/usb/power-management.txt for more details). 100 Documentation/usb/power-management.txt for more details).
101 101
102 Also, USB "remote wakeup" signaling is supported, whereby some 102 Also, USB "remote wakeup" signaling is supported, whereby some
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index f06f5dbc8cdc..1e6ccef2cf0c 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -159,9 +159,9 @@ void usb_major_cleanup(void)
159int usb_register_dev(struct usb_interface *intf, 159int usb_register_dev(struct usb_interface *intf,
160 struct usb_class_driver *class_driver) 160 struct usb_class_driver *class_driver)
161{ 161{
162 int retval = -EINVAL; 162 int retval;
163 int minor_base = class_driver->minor_base; 163 int minor_base = class_driver->minor_base;
164 int minor = 0; 164 int minor;
165 char name[20]; 165 char name[20];
166 char *temp; 166 char *temp;
167 167
@@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf,
173 */ 173 */
174 minor_base = 0; 174 minor_base = 0;
175#endif 175#endif
176 intf->minor = -1;
177
178 dbg ("looking for a minor, starting at %d", minor_base);
179 176
180 if (class_driver->fops == NULL) 177 if (class_driver->fops == NULL)
181 goto exit; 178 return -EINVAL;
179 if (intf->minor >= 0)
180 return -EADDRINUSE;
181
182 retval = init_usb_class();
183 if (retval)
184 return retval;
185
186 dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base);
182 187
183 down_write(&minor_rwsem); 188 down_write(&minor_rwsem);
184 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { 189 for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) {
@@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf,
186 continue; 191 continue;
187 192
188 usb_minors[minor] = class_driver->fops; 193 usb_minors[minor] = class_driver->fops;
189 194 intf->minor = minor;
190 retval = 0;
191 break; 195 break;
192 } 196 }
193 up_write(&minor_rwsem); 197 up_write(&minor_rwsem);
194 198 if (intf->minor < 0)
195 if (retval) 199 return -EXFULL;
196 goto exit;
197
198 retval = init_usb_class();
199 if (retval)
200 goto exit;
201
202 intf->minor = minor;
203 200
204 /* create a usb class device for this usb interface */ 201 /* create a usb class device for this usb interface */
205 snprintf(name, sizeof(name), class_driver->name, minor - minor_base); 202 snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf,
213 "%s", temp); 210 "%s", temp);
214 if (IS_ERR(intf->usb_dev)) { 211 if (IS_ERR(intf->usb_dev)) {
215 down_write(&minor_rwsem); 212 down_write(&minor_rwsem);
216 usb_minors[intf->minor] = NULL; 213 usb_minors[minor] = NULL;
214 intf->minor = -1;
217 up_write(&minor_rwsem); 215 up_write(&minor_rwsem);
218 retval = PTR_ERR(intf->usb_dev); 216 retval = PTR_ERR(intf->usb_dev);
219 } 217 }
220exit:
221 return retval; 218 return retval;
222} 219}
223EXPORT_SYMBOL_GPL(usb_register_dev); 220EXPORT_SYMBOL_GPL(usb_register_dev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 844683e50383..9f0ce7de0e36 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1802,6 +1802,7 @@ free_interfaces:
1802 intf->dev.groups = usb_interface_groups; 1802 intf->dev.groups = usb_interface_groups;
1803 intf->dev.dma_mask = dev->dev.dma_mask; 1803 intf->dev.dma_mask = dev->dev.dma_mask;
1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); 1804 INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
1805 intf->minor = -1;
1805 device_initialize(&intf->dev); 1806 device_initialize(&intf->dev);
1806 dev_set_name(&intf->dev, "%d-%s:%d.%d", 1807 dev_set_name(&intf->dev, "%d-%s:%d.%d",
1807 dev->bus->busnum, dev->devpath, 1808 dev->bus->busnum, dev->devpath,
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 59dc3d351b60..5ab5bb89bae3 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c,
322 index, transmit ? 'T' : 'R', cppi_ch); 322 index, transmit ? 'T' : 'R', cppi_ch);
323 cppi_ch->hw_ep = ep; 323 cppi_ch->hw_ep = ep;
324 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; 324 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
325 cppi_ch->channel.max_len = 0x7fffffff;
325 326
326 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); 327 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
327 return &cppi_ch->channel; 328 return &cppi_ch->channel;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6fca870e957e..d065e23f123e 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req)
300#ifndef CONFIG_MUSB_PIO_ONLY 300#ifndef CONFIG_MUSB_PIO_ONLY
301 if (is_dma_capable() && musb_ep->dma) { 301 if (is_dma_capable() && musb_ep->dma) {
302 struct dma_controller *c = musb->dma_controller; 302 struct dma_controller *c = musb->dma_controller;
303 size_t request_size;
304
305 /* setup DMA, then program endpoint CSR */
306 request_size = min_t(size_t, request->length - request->actual,
307 musb_ep->dma->max_len);
303 308
304 use_dma = (request->dma != DMA_ADDR_INVALID); 309 use_dma = (request->dma != DMA_ADDR_INVALID);
305 310
@@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req)
307 312
308#ifdef CONFIG_USB_INVENTRA_DMA 313#ifdef CONFIG_USB_INVENTRA_DMA
309 { 314 {
310 size_t request_size;
311
312 /* setup DMA, then program endpoint CSR */
313 request_size = min_t(size_t, request->length,
314 musb_ep->dma->max_len);
315 if (request_size < musb_ep->packet_sz) 315 if (request_size < musb_ep->packet_sz)
316 musb_ep->dma->desired_mode = 0; 316 musb_ep->dma->desired_mode = 0;
317 else 317 else
@@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
373 use_dma = use_dma && c->channel_program( 373 use_dma = use_dma && c->channel_program(
374 musb_ep->dma, musb_ep->packet_sz, 374 musb_ep->dma, musb_ep->packet_sz,
375 0, 375 0,
376 request->dma, 376 request->dma + request->actual,
377 request->length); 377 request_size);
378 if (!use_dma) { 378 if (!use_dma) {
379 c->channel_release(musb_ep->dma); 379 c->channel_release(musb_ep->dma);
380 musb_ep->dma = NULL; 380 musb_ep->dma = NULL;
@@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req)
386 use_dma = use_dma && c->channel_program( 386 use_dma = use_dma && c->channel_program(
387 musb_ep->dma, musb_ep->packet_sz, 387 musb_ep->dma, musb_ep->packet_sz,
388 request->zero, 388 request->zero,
389 request->dma, 389 request->dma + request->actual,
390 request->length); 390 request_size);
391#endif 391#endif
392 } 392 }
393#endif 393#endif
@@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum)
501 request->zero = 0; 501 request->zero = 0;
502 } 502 }
503 503
504 /* ... or if not, then complete it. */ 504 if (request->actual == request->length) {
505 musb_g_giveback(musb_ep, request, 0); 505 musb_g_giveback(musb_ep, request, 0);
506 506 request = musb_ep->desc ? next_request(musb_ep) : NULL;
507 /* 507 if (!request) {
508 * Kickstart next transfer if appropriate; 508 DBG(4, "%s idle now\n",
509 * the packet that just completed might not 509 musb_ep->end_point.name);
510 * be transmitted for hours or days. 510 return;
511 * REVISIT for double buffering... 511 }
512 * FIXME revisit for stalls too...
513 */
514 musb_ep_select(mbase, epnum);
515 csr = musb_readw(epio, MUSB_TXCSR);
516 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
517 return;
518
519 request = musb_ep->desc ? next_request(musb_ep) : NULL;
520 if (!request) {
521 DBG(4, "%s idle now\n",
522 musb_ep->end_point.name);
523 return;
524 } 512 }
525 } 513 }
526 514
@@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
568{ 556{
569 const u8 epnum = req->epnum; 557 const u8 epnum = req->epnum;
570 struct usb_request *request = &req->request; 558 struct usb_request *request = &req->request;
571 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 559 struct musb_ep *musb_ep;
572 void __iomem *epio = musb->endpoints[epnum].regs; 560 void __iomem *epio = musb->endpoints[epnum].regs;
573 unsigned fifo_count = 0; 561 unsigned fifo_count = 0;
574 u16 len = musb_ep->packet_sz; 562 u16 len;
575 u16 csr = musb_readw(epio, MUSB_RXCSR); 563 u16 csr = musb_readw(epio, MUSB_RXCSR);
564 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
565
566 if (hw_ep->is_shared_fifo)
567 musb_ep = &hw_ep->ep_in;
568 else
569 musb_ep = &hw_ep->ep_out;
570
571 len = musb_ep->packet_sz;
576 572
577 /* We shouldn't get here while DMA is active, but we do... */ 573 /* We shouldn't get here while DMA is active, but we do... */
578 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 574 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
@@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
647 */ 643 */
648 644
649 csr |= MUSB_RXCSR_DMAENAB; 645 csr |= MUSB_RXCSR_DMAENAB;
650#ifdef USE_MODE1
651 csr |= MUSB_RXCSR_AUTOCLEAR; 646 csr |= MUSB_RXCSR_AUTOCLEAR;
647#ifdef USE_MODE1
652 /* csr |= MUSB_RXCSR_DMAMODE; */ 648 /* csr |= MUSB_RXCSR_DMAMODE; */
653 649
654 /* this special sequence (enabling and then 650 /* this special sequence (enabling and then
@@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req)
663 if (request->actual < request->length) { 659 if (request->actual < request->length) {
664 int transfer_size = 0; 660 int transfer_size = 0;
665#ifdef USE_MODE1 661#ifdef USE_MODE1
666 transfer_size = min(request->length, 662 transfer_size = min(request->length - request->actual,
667 channel->max_len); 663 channel->max_len);
668#else 664#else
669 transfer_size = len; 665 transfer_size = min(request->length - request->actual,
666 (unsigned)len);
670#endif 667#endif
671 if (transfer_size <= musb_ep->packet_sz) 668 if (transfer_size <= musb_ep->packet_sz)
672 musb_ep->dma->desired_mode = 0; 669 musb_ep->dma->desired_mode = 0;
@@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
740 u16 csr; 737 u16 csr;
741 struct usb_request *request; 738 struct usb_request *request;
742 void __iomem *mbase = musb->mregs; 739 void __iomem *mbase = musb->mregs;
743 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 740 struct musb_ep *musb_ep;
744 void __iomem *epio = musb->endpoints[epnum].regs; 741 void __iomem *epio = musb->endpoints[epnum].regs;
745 struct dma_channel *dma; 742 struct dma_channel *dma;
743 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
744
745 if (hw_ep->is_shared_fifo)
746 musb_ep = &hw_ep->ep_in;
747 else
748 musb_ep = &hw_ep->ep_out;
746 749
747 musb_ep_select(mbase, epnum); 750 musb_ep_select(mbase, epnum);
748 751
@@ -1081,7 +1084,7 @@ struct free_record {
1081/* 1084/*
1082 * Context: controller locked, IRQs blocked. 1085 * Context: controller locked, IRQs blocked.
1083 */ 1086 */
1084static void musb_ep_restart(struct musb *musb, struct musb_request *req) 1087void musb_ep_restart(struct musb *musb, struct musb_request *req)
1085{ 1088{
1086 DBG(3, "<== %s request %p len %u on hw_ep%d\n", 1089 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1087 req->tx ? "TX/IN" : "RX/OUT", 1090 req->tx ? "TX/IN" : "RX/OUT",
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index c8b140325d82..572b1da7f2dc 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *);
105 105
106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); 106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
107 107
108extern void musb_ep_restart(struct musb *, struct musb_request *);
109
108#endif /* __MUSB_GADGET_H */ 110#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 59bef8f3a358..6dd03f4c5f49 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -261,6 +261,7 @@ __acquires(musb->lock)
261 ctrlrequest->wIndex & 0x0f; 261 ctrlrequest->wIndex & 0x0f;
262 struct musb_ep *musb_ep; 262 struct musb_ep *musb_ep;
263 struct musb_hw_ep *ep; 263 struct musb_hw_ep *ep;
264 struct musb_request *request;
264 void __iomem *regs; 265 void __iomem *regs;
265 int is_in; 266 int is_in;
266 u16 csr; 267 u16 csr;
@@ -302,6 +303,14 @@ __acquires(musb->lock)
302 musb_writew(regs, MUSB_RXCSR, csr); 303 musb_writew(regs, MUSB_RXCSR, csr);
303 } 304 }
304 305
306 /* Maybe start the first request in the queue */
307 request = to_musb_request(
308 next_request(musb_ep));
309 if (!musb_ep->busy && request) {
310 DBG(3, "restarting the request\n");
311 musb_ep_restart(musb, request);
312 }
313
305 /* select ep0 again */ 314 /* select ep0 again */
306 musb_ep_select(mbase, 0); 315 musb_ep_select(mbase, 0);
307 } break; 316 } break;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 877d20b1dff9..9e65c47cc98b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
660 660
661 qh->segsize = length; 661 qh->segsize = length;
662 662
663 /*
664 * Ensure the data reaches to main memory before starting
665 * DMA transfer
666 */
667 wmb();
668
663 if (!dma->channel_program(channel, pkt_size, mode, 669 if (!dma->channel_program(channel, pkt_size, mode,
664 urb->transfer_dma + offset, length)) { 670 urb->transfer_dma + offset, length)) {
665 dma->channel_release(channel); 671 dma->channel_release(channel);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 29e850a7a2f9..7c8008225ee3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
243 int r, nlogs = 0; 243 int r, nlogs = 0;
244 244
245 while (datalen > 0) { 245 while (datalen > 0) {
246 if (unlikely(headcount >= VHOST_NET_MAX_SG)) { 246 if (unlikely(seg >= VHOST_NET_MAX_SG)) {
247 r = -ENOBUFS; 247 r = -ENOBUFS;
248 goto err; 248 goto err;
249 } 249 }
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c579dcc9200c..dd3d6f7406f8 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -858,11 +858,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
858 if (r < 0) 858 if (r < 0)
859 return r; 859 return r;
860 len -= l; 860 len -= l;
861 if (!len) 861 if (!len) {
862 if (vq->log_ctx)
863 eventfd_signal(vq->log_ctx, 1);
862 return 0; 864 return 0;
865 }
863 } 866 }
864 if (vq->log_ctx)
865 eventfd_signal(vq->log_ctx, 1);
866 /* Length written exceeds what we have stored. This is a bug. */ 867 /* Length written exceeds what we have stored. This is a bug. */
867 BUG(); 868 BUG();
868 return 0; 869 return 0;
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index 5d786bd3e304..a31a77ff6f3d 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi)
298 * Set bit to enable graphics DMA. 298 * Set bit to enable graphics DMA.
299 */ 299 */
300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); 300 x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
301 x |= fbi->active ? 0x00000100 : 0; 301 x &= ~CFG_GRA_ENA_MASK;
302 fbi->active = 0; 302 x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
303 303
304 /* 304 /*
305 * If we are in a pseudo-color mode, we need to enable 305 * If we are in a pseudo-color mode, we need to enable
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index a76e0aa5cd3f..391915093fe1 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -209,7 +209,10 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
209 } 209 }
210 210
211 inode->i_mode = new_mode; 211 inode->i_mode = new_mode;
212 inode->i_ctime = CURRENT_TIME;
212 di->i_mode = cpu_to_le16(inode->i_mode); 213 di->i_mode = cpu_to_le16(inode->i_mode);
214 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
215 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
213 216
214 ocfs2_journal_dirty(handle, di_bh); 217 ocfs2_journal_dirty(handle, di_bh);
215 218
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 1361997cf205..cbe2f057cc28 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -977,7 +977,7 @@ static int o2net_tx_can_proceed(struct o2net_node *nn,
977int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, 977int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
978 size_t caller_veclen, u8 target_node, int *status) 978 size_t caller_veclen, u8 target_node, int *status)
979{ 979{
980 int ret; 980 int ret = 0;
981 struct o2net_msg *msg = NULL; 981 struct o2net_msg *msg = NULL;
982 size_t veclen, caller_bytes = 0; 982 size_t veclen, caller_bytes = 0;
983 struct kvec *vec = NULL; 983 struct kvec *vec = NULL;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index f04ebcfffc4a..c49f6de0e7ab 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3931,6 +3931,15 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3931 goto out_commit; 3931 goto out_commit;
3932 } 3932 }
3933 3933
3934 cpos = split_hash;
3935 ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle,
3936 data_ac, meta_ac, new_dx_leaves,
3937 num_dx_leaves);
3938 if (ret) {
3939 mlog_errno(ret);
3940 goto out_commit;
3941 }
3942
3934 for (i = 0; i < num_dx_leaves; i++) { 3943 for (i = 0; i < num_dx_leaves; i++) {
3935 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), 3944 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3936 orig_dx_leaves[i], 3945 orig_dx_leaves[i],
@@ -3939,15 +3948,14 @@ static int ocfs2_dx_dir_rebalance(struct ocfs2_super *osb, struct inode *dir,
3939 mlog_errno(ret); 3948 mlog_errno(ret);
3940 goto out_commit; 3949 goto out_commit;
3941 } 3950 }
3942 }
3943 3951
3944 cpos = split_hash; 3952 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir),
3945 ret = ocfs2_dx_dir_new_cluster(dir, &et, cpos, handle, 3953 new_dx_leaves[i],
3946 data_ac, meta_ac, new_dx_leaves, 3954 OCFS2_JOURNAL_ACCESS_WRITE);
3947 num_dx_leaves); 3955 if (ret) {
3948 if (ret) { 3956 mlog_errno(ret);
3949 mlog_errno(ret); 3957 goto out_commit;
3950 goto out_commit; 3958 }
3951 } 3959 }
3952 3960
3953 ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf, 3961 ocfs2_dx_dir_transfer_leaf(dir, split_hash, handle, tmp_dx_leaf,
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 4b6ae2c13b47..765298908f1d 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -1030,6 +1030,7 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm,
1030 struct dlm_lock_resource *res); 1030 struct dlm_lock_resource *res);
1031void dlm_clean_master_list(struct dlm_ctxt *dlm, 1031void dlm_clean_master_list(struct dlm_ctxt *dlm,
1032 u8 dead_node); 1032 u8 dead_node);
1033void dlm_force_free_mles(struct dlm_ctxt *dlm);
1033int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); 1034int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
1034int __dlm_lockres_has_locks(struct dlm_lock_resource *res); 1035int __dlm_lockres_has_locks(struct dlm_lock_resource *res);
1035int __dlm_lockres_unused(struct dlm_lock_resource *res); 1036int __dlm_lockres_unused(struct dlm_lock_resource *res);
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 5efdd37dfe48..901ca52bf86b 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -636,8 +636,14 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
636 spin_lock(&dlm->track_lock); 636 spin_lock(&dlm->track_lock);
637 if (oldres) 637 if (oldres)
638 track_list = &oldres->tracking; 638 track_list = &oldres->tracking;
639 else 639 else {
640 track_list = &dlm->tracking_list; 640 track_list = &dlm->tracking_list;
641 if (list_empty(track_list)) {
642 dl = NULL;
643 spin_unlock(&dlm->track_lock);
644 goto bail;
645 }
646 }
641 647
642 list_for_each_entry(res, track_list, tracking) { 648 list_for_each_entry(res, track_list, tracking) {
643 if (&res->tracking == &dlm->tracking_list) 649 if (&res->tracking == &dlm->tracking_list)
@@ -660,6 +666,7 @@ static void *lockres_seq_start(struct seq_file *m, loff_t *pos)
660 } else 666 } else
661 dl = NULL; 667 dl = NULL;
662 668
669bail:
663 /* passed to seq_show */ 670 /* passed to seq_show */
664 return dl; 671 return dl;
665} 672}
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 153abb5abef0..11a5c87fd7f7 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -693,6 +693,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
693 693
694 dlm_mark_domain_leaving(dlm); 694 dlm_mark_domain_leaving(dlm);
695 dlm_leave_domain(dlm); 695 dlm_leave_domain(dlm);
696 dlm_force_free_mles(dlm);
696 dlm_complete_dlm_shutdown(dlm); 697 dlm_complete_dlm_shutdown(dlm);
697 } 698 }
698 dlm_put(dlm); 699 dlm_put(dlm);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index ffb4c68dafa4..f564b0e5f80d 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -3433,3 +3433,43 @@ void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3433 wake_up(&res->wq); 3433 wake_up(&res->wq);
3434 wake_up(&dlm->migration_wq); 3434 wake_up(&dlm->migration_wq);
3435} 3435}
3436
3437void dlm_force_free_mles(struct dlm_ctxt *dlm)
3438{
3439 int i;
3440 struct hlist_head *bucket;
3441 struct dlm_master_list_entry *mle;
3442 struct hlist_node *tmp, *list;
3443
3444 /*
3445 * We notified all other nodes that we are exiting the domain and
3446 * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still
3447 * around we force free them and wake any processes that are waiting
3448 * on the mles
3449 */
3450 spin_lock(&dlm->spinlock);
3451 spin_lock(&dlm->master_lock);
3452
3453 BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
3454 BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
3455
3456 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
3457 bucket = dlm_master_hash(dlm, i);
3458 hlist_for_each_safe(list, tmp, bucket) {
3459 mle = hlist_entry(list, struct dlm_master_list_entry,
3460 master_hash_node);
3461 if (mle->type != DLM_MLE_BLOCK) {
3462 mlog(ML_ERROR, "bad mle: %p\n", mle);
3463 dlm_print_one_mle(mle);
3464 }
3465 atomic_set(&mle->woken, 1);
3466 wake_up(&mle->wq);
3467
3468 __dlm_unlink_mle(dlm, mle);
3469 __dlm_mle_detach_hb_events(dlm, mle);
3470 __dlm_put_mle(mle);
3471 }
3472 }
3473 spin_unlock(&dlm->master_lock);
3474 spin_unlock(&dlm->spinlock);
3475}
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index d1ce48e1b3d6..1d596d8c4a4a 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -84,6 +84,7 @@ enum {
84 OI_LS_PARENT, 84 OI_LS_PARENT,
85 OI_LS_RENAME1, 85 OI_LS_RENAME1,
86 OI_LS_RENAME2, 86 OI_LS_RENAME2,
87 OI_LS_REFLINK_TARGET,
87}; 88};
88 89
89int ocfs2_dlm_init(struct ocfs2_super *osb); 90int ocfs2_dlm_init(struct ocfs2_super *osb);
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 33f1c9a8258d..fa31d05e41b7 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -235,18 +235,31 @@
235#define OCFS2_HAS_REFCOUNT_FL (0x0010) 235#define OCFS2_HAS_REFCOUNT_FL (0x0010)
236 236
237/* Inode attributes, keep in sync with EXT2 */ 237/* Inode attributes, keep in sync with EXT2 */
238#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ 238#define OCFS2_SECRM_FL FS_SECRM_FL /* Secure deletion */
239#define OCFS2_UNRM_FL (0x00000002) /* Undelete */ 239#define OCFS2_UNRM_FL FS_UNRM_FL /* Undelete */
240#define OCFS2_COMPR_FL (0x00000004) /* Compress file */ 240#define OCFS2_COMPR_FL FS_COMPR_FL /* Compress file */
241#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */ 241#define OCFS2_SYNC_FL FS_SYNC_FL /* Synchronous updates */
242#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */ 242#define OCFS2_IMMUTABLE_FL FS_IMMUTABLE_FL /* Immutable file */
243#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */ 243#define OCFS2_APPEND_FL FS_APPEND_FL /* writes to file may only append */
244#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */ 244#define OCFS2_NODUMP_FL FS_NODUMP_FL /* do not dump file */
245#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */ 245#define OCFS2_NOATIME_FL FS_NOATIME_FL /* do not update atime */
246#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */ 246/* Reserved for compression usage... */
247 247#define OCFS2_DIRTY_FL FS_DIRTY_FL
248#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */ 248#define OCFS2_COMPRBLK_FL FS_COMPRBLK_FL /* One or more compressed clusters */
249#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ 249#define OCFS2_NOCOMP_FL FS_NOCOMP_FL /* Don't compress */
250#define OCFS2_ECOMPR_FL FS_ECOMPR_FL /* Compression error */
251/* End compression flags --- maybe not all used */
252#define OCFS2_BTREE_FL FS_BTREE_FL /* btree format dir */
253#define OCFS2_INDEX_FL FS_INDEX_FL /* hash-indexed directory */
254#define OCFS2_IMAGIC_FL FS_IMAGIC_FL /* AFS directory */
255#define OCFS2_JOURNAL_DATA_FL FS_JOURNAL_DATA_FL /* Reserved for ext3 */
256#define OCFS2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
257#define OCFS2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
258#define OCFS2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
259#define OCFS2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
260
261#define OCFS2_FL_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
262#define OCFS2_FL_MODIFIABLE FS_FL_USER_MODIFIABLE /* User modifiable flags */
250 263
251/* 264/*
252 * Extent record flags (e_node.leaf.flags) 265 * Extent record flags (e_node.leaf.flags)
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h
index 2d3420af1a83..5d241505690b 100644
--- a/fs/ocfs2/ocfs2_ioctl.h
+++ b/fs/ocfs2/ocfs2_ioctl.h
@@ -23,10 +23,10 @@
23/* 23/*
24 * ioctl commands 24 * ioctl commands
25 */ 25 */
26#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) 26#define OCFS2_IOC_GETFLAGS FS_IOC_GETFLAGS
27#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) 27#define OCFS2_IOC_SETFLAGS FS_IOC_SETFLAGS
28#define OCFS2_IOC32_GETFLAGS _IOR('f', 1, int) 28#define OCFS2_IOC32_GETFLAGS FS_IOC32_GETFLAGS
29#define OCFS2_IOC32_SETFLAGS _IOW('f', 2, int) 29#define OCFS2_IOC32_SETFLAGS FS_IOC32_SETFLAGS
30 30
31/* 31/*
32 * Space reservation / allocation / free ioctls and argument structure 32 * Space reservation / allocation / free ioctls and argument structure
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 0afeda83120f..efdd75607406 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4201,8 +4201,9 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
4201 goto out; 4201 goto out;
4202 } 4202 }
4203 4203
4204 mutex_lock(&new_inode->i_mutex); 4204 mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
4205 ret = ocfs2_inode_lock(new_inode, &new_bh, 1); 4205 ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
4206 OI_LS_REFLINK_TARGET);
4206 if (ret) { 4207 if (ret) {
4207 mlog_errno(ret); 4208 mlog_errno(ret);
4208 goto out_unlock; 4209 goto out_unlock;
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index d8b6e4259b80..3e78db361bc7 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -732,25 +732,23 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
732 struct ocfs2_alloc_reservation *resv, 732 struct ocfs2_alloc_reservation *resv,
733 int *cstart, int *clen) 733 int *cstart, int *clen)
734{ 734{
735 unsigned int wanted = *clen;
736
737 if (resv == NULL || ocfs2_resmap_disabled(resmap)) 735 if (resv == NULL || ocfs2_resmap_disabled(resmap))
738 return -ENOSPC; 736 return -ENOSPC;
739 737
740 spin_lock(&resv_lock); 738 spin_lock(&resv_lock);
741 739
742 /*
743 * We don't want to over-allocate for temporary
744 * windows. Otherwise, we run the risk of fragmenting the
745 * allocation space.
746 */
747 wanted = ocfs2_resv_window_bits(resmap, resv);
748 if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
749 wanted = *clen;
750
751 if (ocfs2_resv_empty(resv)) { 740 if (ocfs2_resv_empty(resv)) {
752 mlog(0, "empty reservation, find new window\n"); 741 /*
742 * We don't want to over-allocate for temporary
743 * windows. Otherwise, we run the risk of fragmenting the
744 * allocation space.
745 */
746 unsigned int wanted = ocfs2_resv_window_bits(resmap, resv);
753 747
748 if ((resv->r_flags & OCFS2_RESV_FLAG_TMP) || wanted < *clen)
749 wanted = *clen;
750
751 mlog(0, "empty reservation, find new window\n");
754 /* 752 /*
755 * Try to get a window here. If it works, we must fall 753 * Try to get a window here. If it works, we must fall
756 * through and test the bitmap . This avoids some 754 * through and test the bitmap . This avoids some
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 8a286f54dca1..849c2f0e0a0e 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -357,7 +357,7 @@ out:
357static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb, 357static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
358 struct ocfs2_group_desc *bg, 358 struct ocfs2_group_desc *bg,
359 struct ocfs2_chain_list *cl, 359 struct ocfs2_chain_list *cl,
360 u64 p_blkno, u32 clusters) 360 u64 p_blkno, unsigned int clusters)
361{ 361{
362 struct ocfs2_extent_list *el = &bg->bg_list; 362 struct ocfs2_extent_list *el = &bg->bg_list;
363 struct ocfs2_extent_rec *rec; 363 struct ocfs2_extent_rec *rec;
@@ -369,7 +369,7 @@ static void ocfs2_bg_discontig_add_extent(struct ocfs2_super *osb,
369 rec->e_blkno = cpu_to_le64(p_blkno); 369 rec->e_blkno = cpu_to_le64(p_blkno);
370 rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) / 370 rec->e_cpos = cpu_to_le32(le16_to_cpu(bg->bg_bits) /
371 le16_to_cpu(cl->cl_bpc)); 371 le16_to_cpu(cl->cl_bpc));
372 rec->e_leaf_clusters = cpu_to_le32(clusters); 372 rec->e_leaf_clusters = cpu_to_le16(clusters);
373 le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc)); 373 le16_add_cpu(&bg->bg_bits, clusters * le16_to_cpu(cl->cl_bpc));
374 le16_add_cpu(&bg->bg_free_bits_count, 374 le16_add_cpu(&bg->bg_free_bits_count,
375 clusters * le16_to_cpu(cl->cl_bpc)); 375 clusters * le16_to_cpu(cl->cl_bpc));
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d03469f61801..06fa5e77c40e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1286,13 +1286,11 @@ int ocfs2_xattr_get_nolock(struct inode *inode,
1286 xis.inode_bh = xbs.inode_bh = di_bh; 1286 xis.inode_bh = xbs.inode_bh = di_bh;
1287 di = (struct ocfs2_dinode *)di_bh->b_data; 1287 di = (struct ocfs2_dinode *)di_bh->b_data;
1288 1288
1289 down_read(&oi->ip_xattr_sem);
1290 ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer, 1289 ret = ocfs2_xattr_ibody_get(inode, name_index, name, buffer,
1291 buffer_size, &xis); 1290 buffer_size, &xis);
1292 if (ret == -ENODATA && di->i_xattr_loc) 1291 if (ret == -ENODATA && di->i_xattr_loc)
1293 ret = ocfs2_xattr_block_get(inode, name_index, name, buffer, 1292 ret = ocfs2_xattr_block_get(inode, name_index, name, buffer,
1294 buffer_size, &xbs); 1293 buffer_size, &xbs);
1295 up_read(&oi->ip_xattr_sem);
1296 1294
1297 return ret; 1295 return ret;
1298} 1296}
@@ -1316,8 +1314,10 @@ static int ocfs2_xattr_get(struct inode *inode,
1316 mlog_errno(ret); 1314 mlog_errno(ret);
1317 return ret; 1315 return ret;
1318 } 1316 }
1317 down_read(&OCFS2_I(inode)->ip_xattr_sem);
1319 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, 1318 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index,
1320 name, buffer, buffer_size); 1319 name, buffer, buffer_size);
1320 up_read(&OCFS2_I(inode)->ip_xattr_sem);
1321 1321
1322 ocfs2_inode_unlock(inode, 0); 1322 ocfs2_inode_unlock(inode, 0);
1323 1323
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 59d066936ab9..123566912d73 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -27,8 +27,6 @@
27 27
28#define MAX_LINKS 32 28#define MAX_LINKS 32
29 29
30struct net;
31
32struct sockaddr_nl { 30struct sockaddr_nl {
33 sa_family_t nl_family; /* AF_NETLINK */ 31 sa_family_t nl_family; /* AF_NETLINK */
34 unsigned short nl_pad; /* zero */ 32 unsigned short nl_pad; /* zero */
@@ -151,6 +149,8 @@ struct nlattr {
151#include <linux/capability.h> 149#include <linux/capability.h>
152#include <linux/skbuff.h> 150#include <linux/skbuff.h>
153 151
152struct net;
153
154static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) 154static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
155{ 155{
156 return (struct nlmsghdr *)skb->data; 156 return (struct nlmsghdr *)skb->data;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 10d33309e9a6..570fddeb0388 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -393,6 +393,9 @@
393#define PCI_DEVICE_ID_VLSI_82C147 0x0105 393#define PCI_DEVICE_ID_VLSI_82C147 0x0105
394#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 394#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
395 395
396/* AMD RD890 Chipset */
397#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
398
396#define PCI_VENDOR_ID_ADL 0x1005 399#define PCI_VENDOR_ID_ADL 0x1005
397#define PCI_DEVICE_ID_ADL_2301 0x2301 400#define PCI_DEVICE_ID_ADL_2301 0x2301
398 401
diff --git a/include/linux/socket.h b/include/linux/socket.h
index a2fada9becb6..a8f56e1ec760 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
322 int offset, 322 int offset,
323 unsigned int len, __wsum *csump); 323 unsigned int len, __wsum *csump);
324 324
325extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); 325extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
326extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); 326extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
327extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, 327extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
328 int offset, int len); 328 int offset, int len);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 45375b41a2a0..4d40c4d0230b 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -121,6 +121,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
121 * IPv6 Address Label subsystem (addrlabel.c) 121 * IPv6 Address Label subsystem (addrlabel.c)
122 */ 122 */
123extern int ipv6_addr_label_init(void); 123extern int ipv6_addr_label_init(void);
124extern void ipv6_addr_label_cleanup(void);
124extern void ipv6_addr_label_rtnl_register(void); 125extern void ipv6_addr_label_rtnl_register(void);
125extern u32 ipv6_addr_label(struct net *net, 126extern u32 ipv6_addr_label(struct net *net,
126 const struct in6_addr *addr, 127 const struct in6_addr *addr,
diff --git a/include/net/dst.h b/include/net/dst.h
index 81d1413a8701..02386505033d 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -242,6 +242,7 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
242 dev->stats.rx_packets++; 242 dev->stats.rx_packets++;
243 dev->stats.rx_bytes += skb->len; 243 dev->stats.rx_bytes += skb->len;
244 skb->rxhash = 0; 244 skb->rxhash = 0;
245 skb_set_queue_mapping(skb, 0);
245 skb_dst_drop(skb); 246 skb_dst_drop(skb);
246 nf_reset(skb); 247 nf_reset(skb);
247} 248}
diff --git a/include/net/route.h b/include/net/route.h
index bd732d62e1c3..7e5e73bfa4de 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -199,6 +199,8 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol,
199 fl.fl_ip_sport = sport; 199 fl.fl_ip_sport = sport;
200 fl.fl_ip_dport = dport; 200 fl.fl_ip_dport = dport;
201 fl.proto = protocol; 201 fl.proto = protocol;
202 if (inet_sk(sk)->transparent)
203 fl.flags |= FLOWI_FLAG_ANYSRC;
202 ip_rt_put(*rp); 204 ip_rt_put(*rp);
203 *rp = NULL; 205 *rp = NULL;
204 security_sk_classify_flow(sk, &fl); 206 security_sk_classify_flow(sk, &fl);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index fc8f36dd0f5c..4f53532d4c2f 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -298,8 +298,8 @@ struct xfrm_state_afinfo {
298 const struct xfrm_type *type_map[IPPROTO_MAX]; 298 const struct xfrm_type *type_map[IPPROTO_MAX];
299 struct xfrm_mode *mode_map[XFRM_MODE_MAX]; 299 struct xfrm_mode *mode_map[XFRM_MODE_MAX];
300 int (*init_flags)(struct xfrm_state *x); 300 int (*init_flags)(struct xfrm_state *x);
301 void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl, 301 void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl);
302 struct xfrm_tmpl *tmpl, 302 void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
303 xfrm_address_t *daddr, xfrm_address_t *saddr); 303 xfrm_address_t *daddr, xfrm_address_t *saddr);
304 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 304 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
305 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); 305 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
diff --git a/mm/fremap.c b/mm/fremap.c
index 46f5dacf90a2..ec520c7b28df 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -125,7 +125,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
125{ 125{
126 struct mm_struct *mm = current->mm; 126 struct mm_struct *mm = current->mm;
127 struct address_space *mapping; 127 struct address_space *mapping;
128 unsigned long end = start + size;
129 struct vm_area_struct *vma; 128 struct vm_area_struct *vma;
130 int err = -EINVAL; 129 int err = -EINVAL;
131 int has_write_lock = 0; 130 int has_write_lock = 0;
@@ -142,6 +141,10 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
142 if (start + size <= start) 141 if (start + size <= start)
143 return err; 142 return err;
144 143
144 /* Does pgoff wrap? */
145 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
146 return err;
147
145 /* Can we represent this offset inside this architecture's pte's? */ 148 /* Can we represent this offset inside this architecture's pte's? */
146#if PTE_FILE_MAX_BITS < BITS_PER_LONG 149#if PTE_FILE_MAX_BITS < BITS_PER_LONG
147 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS)) 150 if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
@@ -168,7 +171,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
168 if (!(vma->vm_flags & VM_CAN_NONLINEAR)) 171 if (!(vma->vm_flags & VM_CAN_NONLINEAR))
169 goto out; 172 goto out;
170 173
171 if (end <= start || start < vma->vm_start || end > vma->vm_end) 174 if (start < vma->vm_start || start + size > vma->vm_end)
172 goto out; 175 goto out;
173 176
174 /* Must set VM_NONLINEAR before any pages are populated. */ 177 /* Must set VM_NONLINEAR before any pages are populated. */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cc5be788a39f..c03273807182 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2324,11 +2324,8 @@ retry_avoidcopy:
2324 * and just make the page writable */ 2324 * and just make the page writable */
2325 avoidcopy = (page_mapcount(old_page) == 1); 2325 avoidcopy = (page_mapcount(old_page) == 1);
2326 if (avoidcopy) { 2326 if (avoidcopy) {
2327 if (!trylock_page(old_page)) { 2327 if (PageAnon(old_page))
2328 if (PageAnon(old_page)) 2328 page_move_anon_rmap(old_page, vma, address);
2329 page_move_anon_rmap(old_page, vma, address);
2330 } else
2331 unlock_page(old_page);
2332 set_huge_ptep_writable(vma, address, ptep); 2329 set_huge_ptep_writable(vma, address, ptep);
2333 return 0; 2330 return 0;
2334 } 2331 }
@@ -2404,7 +2401,7 @@ retry_avoidcopy:
2404 set_huge_pte_at(mm, address, ptep, 2401 set_huge_pte_at(mm, address, ptep,
2405 make_huge_pte(vma, new_page, 1)); 2402 make_huge_pte(vma, new_page, 1));
2406 page_remove_rmap(old_page); 2403 page_remove_rmap(old_page);
2407 hugepage_add_anon_rmap(new_page, vma, address); 2404 hugepage_add_new_anon_rmap(new_page, vma, address);
2408 /* Make the old page be freed below */ 2405 /* Make the old page be freed below */
2409 new_page = old_page; 2406 new_page = old_page;
2410 mmu_notifier_invalidate_range_end(mm, 2407 mmu_notifier_invalidate_range_end(mm,
@@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2631 vma, address); 2628 vma, address);
2632 } 2629 }
2633 2630
2634 if (!pagecache_page) { 2631 /*
2635 page = pte_page(entry); 2632 * hugetlb_cow() requires page locks of pte_page(entry) and
2633 * pagecache_page, so here we need take the former one
2634 * when page != pagecache_page or !pagecache_page.
2635 * Note that locking order is always pagecache_page -> page,
2636 * so no worry about deadlock.
2637 */
2638 page = pte_page(entry);
2639 if (page != pagecache_page)
2636 lock_page(page); 2640 lock_page(page);
2637 }
2638 2641
2639 spin_lock(&mm->page_table_lock); 2642 spin_lock(&mm->page_table_lock);
2640 /* Check for a racing update before calling hugetlb_cow */ 2643 /* Check for a racing update before calling hugetlb_cow */
@@ -2661,9 +2664,8 @@ out_page_table_lock:
2661 if (pagecache_page) { 2664 if (pagecache_page) {
2662 unlock_page(pagecache_page); 2665 unlock_page(pagecache_page);
2663 put_page(pagecache_page); 2666 put_page(pagecache_page);
2664 } else {
2665 unlock_page(page);
2666 } 2667 }
2668 unlock_page(page);
2667 2669
2668out_mutex: 2670out_mutex:
2669 mutex_unlock(&hugetlb_instantiation_mutex); 2671 mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/rmap.c b/mm/rmap.c
index f6f0d2dda2ea..9d2ba01bd4f9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1564,13 +1564,14 @@ static void __hugepage_set_anon_rmap(struct page *page,
1564 struct vm_area_struct *vma, unsigned long address, int exclusive) 1564 struct vm_area_struct *vma, unsigned long address, int exclusive)
1565{ 1565{
1566 struct anon_vma *anon_vma = vma->anon_vma; 1566 struct anon_vma *anon_vma = vma->anon_vma;
1567
1567 BUG_ON(!anon_vma); 1568 BUG_ON(!anon_vma);
1568 if (!exclusive) { 1569
1569 struct anon_vma_chain *avc; 1570 if (PageAnon(page))
1570 avc = list_entry(vma->anon_vma_chain.prev, 1571 return;
1571 struct anon_vma_chain, same_vma); 1572 if (!exclusive)
1572 anon_vma = avc->anon_vma; 1573 anon_vma = anon_vma->root;
1573 } 1574
1574 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1575 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1575 page->mapping = (struct address_space *) anon_vma; 1576 page->mapping = (struct address_space *) anon_vma;
1576 page->index = linear_page_index(vma, address); 1577 page->index = linear_page_index(vma, address);
@@ -1581,6 +1582,8 @@ void hugepage_add_anon_rmap(struct page *page,
1581{ 1582{
1582 struct anon_vma *anon_vma = vma->anon_vma; 1583 struct anon_vma *anon_vma = vma->anon_vma;
1583 int first; 1584 int first;
1585
1586 BUG_ON(!PageLocked(page));
1584 BUG_ON(!anon_vma); 1587 BUG_ON(!anon_vma);
1585 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1588 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1586 first = atomic_inc_and_test(&page->_mapcount); 1589 first = atomic_inc_and_test(&page->_mapcount);
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 0ea20c30466c..17c5ba7551a5 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -426,8 +426,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
426 426
427 /* Allocate an fcall for the reply */ 427 /* Allocate an fcall for the reply */
428 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL); 428 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
429 if (!rpl_context) 429 if (!rpl_context) {
430 err = -ENOMEM;
430 goto err_close; 431 goto err_close;
432 }
431 433
432 /* 434 /*
433 * If the request has a buffer, steal it, otherwise 435 * If the request has a buffer, steal it, otherwise
@@ -445,8 +447,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
445 } 447 }
446 rpl_context->rc = req->rc; 448 rpl_context->rc = req->rc;
447 if (!rpl_context->rc) { 449 if (!rpl_context->rc) {
448 kfree(rpl_context); 450 err = -ENOMEM;
449 goto err_close; 451 goto err_free2;
450 } 452 }
451 453
452 /* 454 /*
@@ -458,11 +460,8 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
458 */ 460 */
459 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { 461 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
460 err = post_recv(client, rpl_context); 462 err = post_recv(client, rpl_context);
461 if (err) { 463 if (err)
462 kfree(rpl_context->rc); 464 goto err_free1;
463 kfree(rpl_context);
464 goto err_close;
465 }
466 } else 465 } else
467 atomic_dec(&rdma->rq_count); 466 atomic_dec(&rdma->rq_count);
468 467
@@ -471,8 +470,10 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
471 470
472 /* Post the request */ 471 /* Post the request */
473 c = kmalloc(sizeof *c, GFP_KERNEL); 472 c = kmalloc(sizeof *c, GFP_KERNEL);
474 if (!c) 473 if (!c) {
475 goto err_close; 474 err = -ENOMEM;
475 goto err_free1;
476 }
476 c->req = req; 477 c->req = req;
477 478
478 c->busa = ib_dma_map_single(rdma->cm_id->device, 479 c->busa = ib_dma_map_single(rdma->cm_id->device,
@@ -499,9 +500,15 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
499 return ib_post_send(rdma->qp, &wr, &bad_wr); 500 return ib_post_send(rdma->qp, &wr, &bad_wr);
500 501
501 error: 502 error:
503 kfree(c);
504 kfree(rpl_context->rc);
505 kfree(rpl_context);
502 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n"); 506 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
503 return -EIO; 507 return -EIO;
504 508 err_free1:
509 kfree(rpl_context->rc);
510 err_free2:
511 kfree(rpl_context);
505 err_close: 512 err_close:
506 spin_lock_irqsave(&rdma->req_lock, flags); 513 spin_lock_irqsave(&rdma->req_lock, flags);
507 if (rdma->state < P9_RDMA_CLOSING) { 514 if (rdma->state < P9_RDMA_CLOSING) {
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index dcfbe99ff81c..b88515936e4b 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -329,7 +329,8 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
329 329
330 mutex_lock(&virtio_9p_lock); 330 mutex_lock(&virtio_9p_lock);
331 list_for_each_entry(chan, &virtio_chan_list, chan_list) { 331 list_for_each_entry(chan, &virtio_chan_list, chan_list) {
332 if (!strncmp(devname, chan->tag, chan->tag_len)) { 332 if (!strncmp(devname, chan->tag, chan->tag_len) &&
333 strlen(devname) == chan->tag_len) {
333 if (!chan->inuse) { 334 if (!chan->inuse) {
334 chan->inuse = true; 335 chan->inuse = true;
335 found = 1; 336 found = 1;
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 651babdfab38..ad2b232a2055 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -399,12 +399,6 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
399 unregister_netdev(net_dev); 399 unregister_netdev(net_dev);
400 free_netdev(net_dev); 400 free_netdev(net_dev);
401 } 401 }
402 read_lock_irq(&devs_lock);
403 if (list_empty(&br2684_devs)) {
404 /* last br2684 device */
405 unregister_atmdevice_notifier(&atm_dev_notifier);
406 }
407 read_unlock_irq(&devs_lock);
408 return; 402 return;
409 } 403 }
410 404
@@ -675,7 +669,6 @@ static int br2684_create(void __user *arg)
675 669
676 if (list_empty(&br2684_devs)) { 670 if (list_empty(&br2684_devs)) {
677 /* 1st br2684 device */ 671 /* 1st br2684 device */
678 register_atmdevice_notifier(&atm_dev_notifier);
679 brdev->number = 1; 672 brdev->number = 1;
680 } else 673 } else
681 brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; 674 brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
@@ -815,6 +808,7 @@ static int __init br2684_init(void)
815 return -ENOMEM; 808 return -ENOMEM;
816#endif 809#endif
817 register_atm_ioctl(&br2684_ioctl_ops); 810 register_atm_ioctl(&br2684_ioctl_ops);
811 register_atmdevice_notifier(&atm_dev_notifier);
818 return 0; 812 return 0;
819} 813}
820 814
@@ -830,9 +824,7 @@ static void __exit br2684_exit(void)
830#endif 824#endif
831 825
832 826
833 /* if not already empty */ 827 unregister_atmdevice_notifier(&atm_dev_notifier);
834 if (!list_empty(&br2684_devs))
835 unregister_atmdevice_notifier(&atm_dev_notifier);
836 828
837 while (!list_empty(&br2684_devs)) { 829 while (!list_empty(&br2684_devs)) {
838 net_dev = list_entry_brdev(br2684_devs.next); 830 net_dev = list_entry_brdev(br2684_devs.next);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 1cd98df412df..e6b133b77ccb 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,9 +35,10 @@
35 * in any case. 35 * in any case.
36 */ 36 */
37 37
38int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) 38long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
39{ 39{
40 int size, err, ct; 40 int size, ct;
41 long err;
41 42
42 if (m->msg_namelen) { 43 if (m->msg_namelen) {
43 if (mode == VERIFY_READ) { 44 if (mode == VERIFY_READ) {
diff --git a/net/core/sock.c b/net/core/sock.c
index b05b9b6ddb87..ef30e9d286e7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1351,9 +1351,9 @@ int sock_i_uid(struct sock *sk)
1351{ 1351{
1352 int uid; 1352 int uid;
1353 1353
1354 read_lock(&sk->sk_callback_lock); 1354 read_lock_bh(&sk->sk_callback_lock);
1355 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1355 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1356 read_unlock(&sk->sk_callback_lock); 1356 read_unlock_bh(&sk->sk_callback_lock);
1357 return uid; 1357 return uid;
1358} 1358}
1359EXPORT_SYMBOL(sock_i_uid); 1359EXPORT_SYMBOL(sock_i_uid);
@@ -1362,9 +1362,9 @@ unsigned long sock_i_ino(struct sock *sk)
1362{ 1362{
1363 unsigned long ino; 1363 unsigned long ino;
1364 1364
1365 read_lock(&sk->sk_callback_lock); 1365 read_lock_bh(&sk->sk_callback_lock);
1366 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1366 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1367 read_unlock(&sk->sk_callback_lock); 1367 read_unlock_bh(&sk->sk_callback_lock);
1368 return ino; 1368 return ino;
1369} 1369}
1370EXPORT_SYMBOL(sock_i_ino); 1370EXPORT_SYMBOL(sock_i_ino);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 945b20a5ad50..35c93e8b6a46 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -45,7 +45,7 @@
45#include <net/netns/generic.h> 45#include <net/netns/generic.h>
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47 47
48#ifdef CONFIG_IPV6 48#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
49#include <net/ipv6.h> 49#include <net/ipv6.h>
50#include <net/ip6_fib.h> 50#include <net/ip6_fib.h>
51#include <net/ip6_route.h> 51#include <net/ip6_route.h>
@@ -699,7 +699,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
699 if ((dst = rt->rt_gateway) == 0) 699 if ((dst = rt->rt_gateway) == 0)
700 goto tx_error_icmp; 700 goto tx_error_icmp;
701 } 701 }
702#ifdef CONFIG_IPV6 702#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
703 else if (skb->protocol == htons(ETH_P_IPV6)) { 703 else if (skb->protocol == htons(ETH_P_IPV6)) {
704 struct in6_addr *addr6; 704 struct in6_addr *addr6;
705 int addr_type; 705 int addr_type;
@@ -774,7 +774,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
774 goto tx_error; 774 goto tx_error;
775 } 775 }
776 } 776 }
777#ifdef CONFIG_IPV6 777#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
778 else if (skb->protocol == htons(ETH_P_IPV6)) { 778 else if (skb->protocol == htons(ETH_P_IPV6)) {
779 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); 779 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
780 780
@@ -850,7 +850,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
850 if ((iph->ttl = tiph->ttl) == 0) { 850 if ((iph->ttl = tiph->ttl) == 0) {
851 if (skb->protocol == htons(ETH_P_IP)) 851 if (skb->protocol == htons(ETH_P_IP))
852 iph->ttl = old_iph->ttl; 852 iph->ttl = old_iph->ttl;
853#ifdef CONFIG_IPV6 853#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
854 else if (skb->protocol == htons(ETH_P_IPV6)) 854 else if (skb->protocol == htons(ETH_P_IPV6))
855 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 855 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
856#endif 856#endif
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 04b69896df5f..7649d7750075 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -488,9 +488,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
488 * we can switch to copy when see the first bad fragment. 488 * we can switch to copy when see the first bad fragment.
489 */ 489 */
490 if (skb_has_frags(skb)) { 490 if (skb_has_frags(skb)) {
491 struct sk_buff *frag; 491 struct sk_buff *frag, *frag2;
492 int first_len = skb_pagelen(skb); 492 int first_len = skb_pagelen(skb);
493 int truesizes = 0;
494 493
495 if (first_len - hlen > mtu || 494 if (first_len - hlen > mtu ||
496 ((first_len - hlen) & 7) || 495 ((first_len - hlen) & 7) ||
@@ -503,18 +502,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
503 if (frag->len > mtu || 502 if (frag->len > mtu ||
504 ((frag->len & 7) && frag->next) || 503 ((frag->len & 7) && frag->next) ||
505 skb_headroom(frag) < hlen) 504 skb_headroom(frag) < hlen)
506 goto slow_path; 505 goto slow_path_clean;
507 506
508 /* Partially cloned skb? */ 507 /* Partially cloned skb? */
509 if (skb_shared(frag)) 508 if (skb_shared(frag))
510 goto slow_path; 509 goto slow_path_clean;
511 510
512 BUG_ON(frag->sk); 511 BUG_ON(frag->sk);
513 if (skb->sk) { 512 if (skb->sk) {
514 frag->sk = skb->sk; 513 frag->sk = skb->sk;
515 frag->destructor = sock_wfree; 514 frag->destructor = sock_wfree;
516 } 515 }
517 truesizes += frag->truesize; 516 skb->truesize -= frag->truesize;
518 } 517 }
519 518
520 /* Everything is OK. Generate! */ 519 /* Everything is OK. Generate! */
@@ -524,7 +523,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
524 frag = skb_shinfo(skb)->frag_list; 523 frag = skb_shinfo(skb)->frag_list;
525 skb_frag_list_init(skb); 524 skb_frag_list_init(skb);
526 skb->data_len = first_len - skb_headlen(skb); 525 skb->data_len = first_len - skb_headlen(skb);
527 skb->truesize -= truesizes;
528 skb->len = first_len; 526 skb->len = first_len;
529 iph->tot_len = htons(first_len); 527 iph->tot_len = htons(first_len);
530 iph->frag_off = htons(IP_MF); 528 iph->frag_off = htons(IP_MF);
@@ -576,6 +574,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
576 } 574 }
577 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 575 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
578 return err; 576 return err;
577
578slow_path_clean:
579 skb_walk_frags(skb, frag2) {
580 if (frag2 == frag)
581 break;
582 frag2->sk = NULL;
583 frag2->destructor = NULL;
584 skb->truesize += frag2->truesize;
585 }
579 } 586 }
580 587
581slow_path: 588slow_path:
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index b254dafaf429..43eec80c0e7c 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -112,6 +112,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
112 /* ip_route_me_harder expects skb->dst to be set */ 112 /* ip_route_me_harder expects skb->dst to be set */
113 skb_dst_set_noref(nskb, skb_dst(oldskb)); 113 skb_dst_set_noref(nskb, skb_dst(oldskb));
114 114
115 nskb->protocol = htons(ETH_P_IP);
115 if (ip_route_me_harder(nskb, addr_type)) 116 if (ip_route_me_harder(nskb, addr_type))
116 goto free_nskb; 117 goto free_nskb;
117 118
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index eab8de32f200..f3a9b42b16c6 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,9 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
66 const struct net_device *out, 66 const struct net_device *out,
67 int (*okfn)(struct sk_buff *)) 67 int (*okfn)(struct sk_buff *))
68{ 68{
69 struct sock *sk = skb->sk;
69 struct inet_sock *inet = inet_sk(skb->sk); 70 struct inet_sock *inet = inet_sk(skb->sk);
70 71
71 if (inet && inet->nodefrag) 72 if (sk && (sk->sk_family == PF_INET) &&
73 inet->nodefrag)
72 return NF_ACCEPT; 74 return NF_ACCEPT;
73 75
74#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 76#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 1679e2c0963d..ee5f419d0a56 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -893,13 +893,15 @@ static void fast_csum(__sum16 *csum,
893 unsigned char s[4]; 893 unsigned char s[4];
894 894
895 if (offset & 1) { 895 if (offset & 1) {
896 s[0] = s[2] = 0; 896 s[0] = ~0;
897 s[1] = ~*optr; 897 s[1] = ~*optr;
898 s[2] = 0;
898 s[3] = *nptr; 899 s[3] = *nptr;
899 } else { 900 } else {
900 s[1] = s[3] = 0;
901 s[0] = ~*optr; 901 s[0] = ~*optr;
902 s[1] = ~0;
902 s[2] = *nptr; 903 s[2] = *nptr;
904 s[3] = 0;
903 } 905 }
904 906
905 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum))); 907 *csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6298f75d5e93..ac6559cb54f9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1231,7 +1231,7 @@ restart:
1231 } 1231 }
1232 1232
1233 if (net_ratelimit()) 1233 if (net_ratelimit())
1234 printk(KERN_WARNING "Neighbour table overflow.\n"); 1234 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1235 rt_drop(rt); 1235 rt_drop(rt);
1236 return -ENOBUFS; 1236 return -ENOBUFS;
1237 } 1237 }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3fb1428e526e..f115ea68a4ef 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
386 */ 386 */
387 387
388 mask = 0; 388 mask = 0;
389 if (sk->sk_err)
390 mask = POLLERR;
391 389
392 /* 390 /*
393 * POLLHUP is certainly not done right. But poll() doesn't 391 * POLLHUP is certainly not done right. But poll() doesn't
@@ -457,6 +455,11 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
457 if (tp->urg_data & TCP_URG_VALID) 455 if (tp->urg_data & TCP_URG_VALID)
458 mask |= POLLPRI; 456 mask |= POLLPRI;
459 } 457 }
458 /* This barrier is coupled with smp_wmb() in tcp_reset() */
459 smp_rmb();
460 if (sk->sk_err)
461 mask |= POLLERR;
462
460 return mask; 463 return mask;
461} 464}
462EXPORT_SYMBOL(tcp_poll); 465EXPORT_SYMBOL(tcp_poll);
@@ -940,7 +943,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
940 sg = sk->sk_route_caps & NETIF_F_SG; 943 sg = sk->sk_route_caps & NETIF_F_SG;
941 944
942 while (--iovlen >= 0) { 945 while (--iovlen >= 0) {
943 int seglen = iov->iov_len; 946 size_t seglen = iov->iov_len;
944 unsigned char __user *from = iov->iov_base; 947 unsigned char __user *from = iov->iov_base;
945 948
946 iov++; 949 iov++;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e663b78a2ef6..b55f60f6fcbe 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2545,7 +2545,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2545 cnt += tcp_skb_pcount(skb); 2545 cnt += tcp_skb_pcount(skb);
2546 2546
2547 if (cnt > packets) { 2547 if (cnt > packets) {
2548 if (tcp_is_sack(tp) || (oldcnt >= packets)) 2548 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
2549 (oldcnt >= packets))
2549 break; 2550 break;
2550 2551
2551 mss = skb_shinfo(skb)->gso_size; 2552 mss = skb_shinfo(skb)->gso_size;
@@ -4048,6 +4049,8 @@ static void tcp_reset(struct sock *sk)
4048 default: 4049 default:
4049 sk->sk_err = ECONNRESET; 4050 sk->sk_err = ECONNRESET;
4050 } 4051 }
4052 /* This barrier is coupled with smp_rmb() in tcp_poll() */
4053 smp_wmb();
4051 4054
4052 if (!sock_flag(sk, SOCK_DEAD)) 4055 if (!sock_flag(sk, SOCK_DEAD))
4053 sk->sk_error_report(sk); 4056 sk->sk_error_report(sk);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 869078d4eeb9..a580349f0b8a 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -61,7 +61,7 @@ static int xfrm4_get_saddr(struct net *net,
61 61
62static int xfrm4_get_tos(struct flowi *fl) 62static int xfrm4_get_tos(struct flowi *fl)
63{ 63{
64 return fl->fl4_tos; 64 return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
65} 65}
66 66
67static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst, 67static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 1ef1366a0a03..47947624eccc 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -21,21 +21,25 @@ static int xfrm4_init_flags(struct xfrm_state *x)
21} 21}
22 22
23static void 23static void
24__xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl, 24__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
25 struct xfrm_tmpl *tmpl, 25{
26 xfrm_address_t *daddr, xfrm_address_t *saddr) 26 sel->daddr.a4 = fl->fl4_dst;
27 sel->saddr.a4 = fl->fl4_src;
28 sel->dport = xfrm_flowi_dport(fl);
29 sel->dport_mask = htons(0xffff);
30 sel->sport = xfrm_flowi_sport(fl);
31 sel->sport_mask = htons(0xffff);
32 sel->family = AF_INET;
33 sel->prefixlen_d = 32;
34 sel->prefixlen_s = 32;
35 sel->proto = fl->proto;
36 sel->ifindex = fl->oif;
37}
38
39static void
40xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
41 xfrm_address_t *daddr, xfrm_address_t *saddr)
27{ 42{
28 x->sel.daddr.a4 = fl->fl4_dst;
29 x->sel.saddr.a4 = fl->fl4_src;
30 x->sel.dport = xfrm_flowi_dport(fl);
31 x->sel.dport_mask = htons(0xffff);
32 x->sel.sport = xfrm_flowi_sport(fl);
33 x->sel.sport_mask = htons(0xffff);
34 x->sel.family = AF_INET;
35 x->sel.prefixlen_d = 32;
36 x->sel.prefixlen_s = 32;
37 x->sel.proto = fl->proto;
38 x->sel.ifindex = fl->oif;
39 x->id = tmpl->id; 43 x->id = tmpl->id;
40 if (x->id.daddr.a4 == 0) 44 if (x->id.daddr.a4 == 0)
41 x->id.daddr.a4 = daddr->a4; 45 x->id.daddr.a4 = daddr->a4;
@@ -70,6 +74,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
70 .owner = THIS_MODULE, 74 .owner = THIS_MODULE,
71 .init_flags = xfrm4_init_flags, 75 .init_flags = xfrm4_init_flags,
72 .init_tempsel = __xfrm4_init_tempsel, 76 .init_tempsel = __xfrm4_init_tempsel,
77 .init_temprop = xfrm4_init_temprop,
73 .output = xfrm4_output, 78 .output = xfrm4_output,
74 .extract_input = xfrm4_extract_input, 79 .extract_input = xfrm4_extract_input,
75 .extract_output = xfrm4_extract_output, 80 .extract_output = xfrm4_extract_output,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ab70a3fbcafa..324fac3b6c16 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4637,10 +4637,12 @@ int __init addrconf_init(void)
4637 if (err < 0) { 4637 if (err < 0) {
4638 printk(KERN_CRIT "IPv6 Addrconf:" 4638 printk(KERN_CRIT "IPv6 Addrconf:"
4639 " cannot initialize default policy table: %d.\n", err); 4639 " cannot initialize default policy table: %d.\n", err);
4640 return err; 4640 goto out;
4641 } 4641 }
4642 4642
4643 register_pernet_subsys(&addrconf_ops); 4643 err = register_pernet_subsys(&addrconf_ops);
4644 if (err < 0)
4645 goto out_addrlabel;
4644 4646
4645 /* The addrconf netdev notifier requires that loopback_dev 4647 /* The addrconf netdev notifier requires that loopback_dev
4646 * has it's ipv6 private information allocated and setup 4648 * has it's ipv6 private information allocated and setup
@@ -4692,7 +4694,9 @@ errout:
4692 unregister_netdevice_notifier(&ipv6_dev_notf); 4694 unregister_netdevice_notifier(&ipv6_dev_notf);
4693errlo: 4695errlo:
4694 unregister_pernet_subsys(&addrconf_ops); 4696 unregister_pernet_subsys(&addrconf_ops);
4695 4697out_addrlabel:
4698 ipv6_addr_label_cleanup();
4699out:
4696 return err; 4700 return err;
4697} 4701}
4698 4702
@@ -4703,6 +4707,7 @@ void addrconf_cleanup(void)
4703 4707
4704 unregister_netdevice_notifier(&ipv6_dev_notf); 4708 unregister_netdevice_notifier(&ipv6_dev_notf);
4705 unregister_pernet_subsys(&addrconf_ops); 4709 unregister_pernet_subsys(&addrconf_ops);
4710 ipv6_addr_label_cleanup();
4706 4711
4707 rtnl_lock(); 4712 rtnl_lock();
4708 4713
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index f0e774cea386..8175f802651b 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -393,6 +393,11 @@ int __init ipv6_addr_label_init(void)
393 return register_pernet_subsys(&ipv6_addr_label_ops); 393 return register_pernet_subsys(&ipv6_addr_label_ops);
394} 394}
395 395
396void ipv6_addr_label_cleanup(void)
397{
398 unregister_pernet_subsys(&ipv6_addr_label_ops);
399}
400
396static const struct nla_policy ifal_policy[IFAL_MAX+1] = { 401static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
397 [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), }, 402 [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), },
398 [IFAL_LABEL] = { .len = sizeof(u32), }, 403 [IFAL_LABEL] = { .len = sizeof(u32), },
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d40b330c0ee6..980912ed7a38 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -639,7 +639,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
639 639
640 if (skb_has_frags(skb)) { 640 if (skb_has_frags(skb)) {
641 int first_len = skb_pagelen(skb); 641 int first_len = skb_pagelen(skb);
642 int truesizes = 0; 642 struct sk_buff *frag2;
643 643
644 if (first_len - hlen > mtu || 644 if (first_len - hlen > mtu ||
645 ((first_len - hlen) & 7) || 645 ((first_len - hlen) & 7) ||
@@ -651,18 +651,18 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
651 if (frag->len > mtu || 651 if (frag->len > mtu ||
652 ((frag->len & 7) && frag->next) || 652 ((frag->len & 7) && frag->next) ||
653 skb_headroom(frag) < hlen) 653 skb_headroom(frag) < hlen)
654 goto slow_path; 654 goto slow_path_clean;
655 655
656 /* Partially cloned skb? */ 656 /* Partially cloned skb? */
657 if (skb_shared(frag)) 657 if (skb_shared(frag))
658 goto slow_path; 658 goto slow_path_clean;
659 659
660 BUG_ON(frag->sk); 660 BUG_ON(frag->sk);
661 if (skb->sk) { 661 if (skb->sk) {
662 frag->sk = skb->sk; 662 frag->sk = skb->sk;
663 frag->destructor = sock_wfree; 663 frag->destructor = sock_wfree;
664 truesizes += frag->truesize;
665 } 664 }
665 skb->truesize -= frag->truesize;
666 } 666 }
667 667
668 err = 0; 668 err = 0;
@@ -693,7 +693,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
693 693
694 first_len = skb_pagelen(skb); 694 first_len = skb_pagelen(skb);
695 skb->data_len = first_len - skb_headlen(skb); 695 skb->data_len = first_len - skb_headlen(skb);
696 skb->truesize -= truesizes;
697 skb->len = first_len; 696 skb->len = first_len;
698 ipv6_hdr(skb)->payload_len = htons(first_len - 697 ipv6_hdr(skb)->payload_len = htons(first_len -
699 sizeof(struct ipv6hdr)); 698 sizeof(struct ipv6hdr));
@@ -756,6 +755,15 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
756 IPSTATS_MIB_FRAGFAILS); 755 IPSTATS_MIB_FRAGFAILS);
757 dst_release(&rt->dst); 756 dst_release(&rt->dst);
758 return err; 757 return err;
758
759slow_path_clean:
760 skb_walk_frags(skb, frag2) {
761 if (frag2 == frag)
762 break;
763 frag2->sk = NULL;
764 frag2->destructor = NULL;
765 skb->truesize += frag2->truesize;
766 }
759 } 767 }
760 768
761slow_path: 769slow_path:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d126365ac046..8323136bdc54 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -670,7 +670,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
670 670
671 if (net_ratelimit()) 671 if (net_ratelimit())
672 printk(KERN_WARNING 672 printk(KERN_WARNING
673 "Neighbour table overflow.\n"); 673 "ipv6: Neighbour table overflow.\n");
674 dst_free(&rt->dst); 674 dst_free(&rt->dst);
675 return NULL; 675 return NULL;
676 } 676 }
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index f417b77fa0e1..a67575d472a3 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -20,23 +20,27 @@
20#include <net/addrconf.h> 20#include <net/addrconf.h>
21 21
22static void 22static void
23__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl, 23__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
24 struct xfrm_tmpl *tmpl,
25 xfrm_address_t *daddr, xfrm_address_t *saddr)
26{ 24{
27 /* Initialize temporary selector matching only 25 /* Initialize temporary selector matching only
28 * to current session. */ 26 * to current session. */
29 ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst); 27 ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst);
30 ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src); 28 ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src);
31 x->sel.dport = xfrm_flowi_dport(fl); 29 sel->dport = xfrm_flowi_dport(fl);
32 x->sel.dport_mask = htons(0xffff); 30 sel->dport_mask = htons(0xffff);
33 x->sel.sport = xfrm_flowi_sport(fl); 31 sel->sport = xfrm_flowi_sport(fl);
34 x->sel.sport_mask = htons(0xffff); 32 sel->sport_mask = htons(0xffff);
35 x->sel.family = AF_INET6; 33 sel->family = AF_INET6;
36 x->sel.prefixlen_d = 128; 34 sel->prefixlen_d = 128;
37 x->sel.prefixlen_s = 128; 35 sel->prefixlen_s = 128;
38 x->sel.proto = fl->proto; 36 sel->proto = fl->proto;
39 x->sel.ifindex = fl->oif; 37 sel->ifindex = fl->oif;
38}
39
40static void
41xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
42 xfrm_address_t *daddr, xfrm_address_t *saddr)
43{
40 x->id = tmpl->id; 44 x->id = tmpl->id;
41 if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) 45 if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
42 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); 46 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
@@ -168,6 +172,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
168 .eth_proto = htons(ETH_P_IPV6), 172 .eth_proto = htons(ETH_P_IPV6),
169 .owner = THIS_MODULE, 173 .owner = THIS_MODULE,
170 .init_tempsel = __xfrm6_init_tempsel, 174 .init_tempsel = __xfrm6_init_tempsel,
175 .init_temprop = xfrm6_init_temprop,
171 .tmpl_sort = __xfrm6_tmpl_sort, 176 .tmpl_sort = __xfrm6_tmpl_sort,
172 .state_sort = __xfrm6_state_sort, 177 .state_sort = __xfrm6_state_sort,
173 .output = xfrm6_output, 178 .output = xfrm6_output,
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 7dcf7a404190..8d9e4c949b96 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -48,15 +48,17 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
48{ 48{
49 unsigned int off, len; 49 unsigned int off, len;
50 struct nf_ct_ext_type *t; 50 struct nf_ct_ext_type *t;
51 size_t alloc_size;
51 52
52 rcu_read_lock(); 53 rcu_read_lock();
53 t = rcu_dereference(nf_ct_ext_types[id]); 54 t = rcu_dereference(nf_ct_ext_types[id]);
54 BUG_ON(t == NULL); 55 BUG_ON(t == NULL);
55 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 56 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
56 len = off + t->len; 57 len = off + t->len;
58 alloc_size = t->alloc_size;
57 rcu_read_unlock(); 59 rcu_read_unlock();
58 60
59 *ext = kzalloc(t->alloc_size, gfp); 61 *ext = kzalloc(alloc_size, gfp);
60 if (!*ext) 62 if (!*ext)
61 return NULL; 63 return NULL;
62 64
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 53d892210a04..f64de9544866 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1376,7 +1376,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1376 unsigned int msglen, origlen; 1376 unsigned int msglen, origlen;
1377 const char *dptr, *end; 1377 const char *dptr, *end;
1378 s16 diff, tdiff = 0; 1378 s16 diff, tdiff = 0;
1379 int ret; 1379 int ret = NF_ACCEPT;
1380 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; 1380 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
1381 1381
1382 if (ctinfo != IP_CT_ESTABLISHED && 1382 if (ctinfo != IP_CT_ESTABLISHED &&
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 5490fc37c92d..daab8c4a903c 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -70,7 +70,11 @@ nf_tproxy_destructor(struct sk_buff *skb)
70int 70int
71nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) 71nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
72{ 72{
73 if (inet_sk(sk)->transparent) { 73 bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
74 inet_twsk(sk)->tw_transparent :
75 inet_sk(sk)->transparent;
76
77 if (transparent) {
74 skb_orphan(skb); 78 skb_orphan(skb);
75 skb->sk = sk; 79 skb->sk = sk;
76 skb->destructor = nf_tproxy_destructor; 80 skb->destructor = nf_tproxy_destructor;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index c397524c039c..c519939e8da9 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
43 struct rds_connection *conn; 43 struct rds_connection *conn;
44 struct rds_tcp_connection *tc; 44 struct rds_tcp_connection *tc;
45 45
46 read_lock(&sk->sk_callback_lock); 46 read_lock_bh(&sk->sk_callback_lock);
47 conn = sk->sk_user_data; 47 conn = sk->sk_user_data;
48 if (conn == NULL) { 48 if (conn == NULL) {
49 state_change = sk->sk_state_change; 49 state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
68 break; 68 break;
69 } 69 }
70out: 70out:
71 read_unlock(&sk->sk_callback_lock); 71 read_unlock_bh(&sk->sk_callback_lock);
72 state_change(sk); 72 state_change(sk);
73} 73}
74 74
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 975183fe6950..27844f231d10 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
114 114
115 rdsdebug("listen data ready sk %p\n", sk); 115 rdsdebug("listen data ready sk %p\n", sk);
116 116
117 read_lock(&sk->sk_callback_lock); 117 read_lock_bh(&sk->sk_callback_lock);
118 ready = sk->sk_user_data; 118 ready = sk->sk_user_data;
119 if (ready == NULL) { /* check for teardown race */ 119 if (ready == NULL) { /* check for teardown race */
120 ready = sk->sk_data_ready; 120 ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
131 queue_work(rds_wq, &rds_tcp_listen_work); 131 queue_work(rds_wq, &rds_tcp_listen_work);
132 132
133out: 133out:
134 read_unlock(&sk->sk_callback_lock); 134 read_unlock_bh(&sk->sk_callback_lock);
135 ready(sk, bytes); 135 ready(sk, bytes);
136} 136}
137 137
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 1aba6878fa5d..e43797404102 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -324,7 +324,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
324 324
325 rdsdebug("data ready sk %p bytes %d\n", sk, bytes); 325 rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
326 326
327 read_lock(&sk->sk_callback_lock); 327 read_lock_bh(&sk->sk_callback_lock);
328 conn = sk->sk_user_data; 328 conn = sk->sk_user_data;
329 if (conn == NULL) { /* check for teardown race */ 329 if (conn == NULL) { /* check for teardown race */
330 ready = sk->sk_data_ready; 330 ready = sk->sk_data_ready;
@@ -338,7 +338,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
338 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) 338 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
339 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 339 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
340out: 340out:
341 read_unlock(&sk->sk_callback_lock); 341 read_unlock_bh(&sk->sk_callback_lock);
342 ready(sk, bytes); 342 ready(sk, bytes);
343} 343}
344 344
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index a28b895ff0d1..2f012a07d94d 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -224,7 +224,7 @@ void rds_tcp_write_space(struct sock *sk)
224 struct rds_connection *conn; 224 struct rds_connection *conn;
225 struct rds_tcp_connection *tc; 225 struct rds_tcp_connection *tc;
226 226
227 read_lock(&sk->sk_callback_lock); 227 read_lock_bh(&sk->sk_callback_lock);
228 conn = sk->sk_user_data; 228 conn = sk->sk_user_data;
229 if (conn == NULL) { 229 if (conn == NULL) {
230 write_space = sk->sk_write_space; 230 write_space = sk->sk_write_space;
@@ -244,7 +244,7 @@ void rds_tcp_write_space(struct sock *sk)
244 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 244 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
245 245
246out: 246out:
247 read_unlock(&sk->sk_callback_lock); 247 read_unlock_bh(&sk->sk_callback_lock);
248 248
249 /* 249 /*
250 * write_space is only called when data leaves tcp's send queue if 250 * write_space is only called when data leaves tcp's send queue if
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8e45e76a95f5..d952e7eac188 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -679,7 +679,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
679 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 679 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
680 return -EINVAL; 680 return -EINVAL;
681 681
682 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
683 return -EINVAL; 683 return -EINVAL;
684 684
685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
@@ -739,7 +739,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
739 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) 739 if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
740 return -EINVAL; 740 return -EINVAL;
741 741
742 if (addr->srose_ndigis > ROSE_MAX_DIGIS) 742 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
743 return -EINVAL; 743 return -EINVAL;
744 744
745 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ 745 /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b6309db56226..fe9306bf10cc 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -800,7 +800,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
800 u32 _xid; 800 u32 _xid;
801 __be32 *xp; 801 __be32 *xp;
802 802
803 read_lock(&sk->sk_callback_lock); 803 read_lock_bh(&sk->sk_callback_lock);
804 dprintk("RPC: xs_udp_data_ready...\n"); 804 dprintk("RPC: xs_udp_data_ready...\n");
805 if (!(xprt = xprt_from_sock(sk))) 805 if (!(xprt = xprt_from_sock(sk)))
806 goto out; 806 goto out;
@@ -852,7 +852,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
852 dropit: 852 dropit:
853 skb_free_datagram(sk, skb); 853 skb_free_datagram(sk, skb);
854 out: 854 out:
855 read_unlock(&sk->sk_callback_lock); 855 read_unlock_bh(&sk->sk_callback_lock);
856} 856}
857 857
858static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) 858static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
@@ -1229,7 +1229,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
1229 1229
1230 dprintk("RPC: xs_tcp_data_ready...\n"); 1230 dprintk("RPC: xs_tcp_data_ready...\n");
1231 1231
1232 read_lock(&sk->sk_callback_lock); 1232 read_lock_bh(&sk->sk_callback_lock);
1233 if (!(xprt = xprt_from_sock(sk))) 1233 if (!(xprt = xprt_from_sock(sk)))
1234 goto out; 1234 goto out;
1235 if (xprt->shutdown) 1235 if (xprt->shutdown)
@@ -1248,7 +1248,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
1248 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1248 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1249 } while (read > 0); 1249 } while (read > 0);
1250out: 1250out:
1251 read_unlock(&sk->sk_callback_lock); 1251 read_unlock_bh(&sk->sk_callback_lock);
1252} 1252}
1253 1253
1254/* 1254/*
@@ -1301,7 +1301,7 @@ static void xs_tcp_state_change(struct sock *sk)
1301{ 1301{
1302 struct rpc_xprt *xprt; 1302 struct rpc_xprt *xprt;
1303 1303
1304 read_lock(&sk->sk_callback_lock); 1304 read_lock_bh(&sk->sk_callback_lock);
1305 if (!(xprt = xprt_from_sock(sk))) 1305 if (!(xprt = xprt_from_sock(sk)))
1306 goto out; 1306 goto out;
1307 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); 1307 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
@@ -1313,7 +1313,7 @@ static void xs_tcp_state_change(struct sock *sk)
1313 1313
1314 switch (sk->sk_state) { 1314 switch (sk->sk_state) {
1315 case TCP_ESTABLISHED: 1315 case TCP_ESTABLISHED:
1316 spin_lock_bh(&xprt->transport_lock); 1316 spin_lock(&xprt->transport_lock);
1317 if (!xprt_test_and_set_connected(xprt)) { 1317 if (!xprt_test_and_set_connected(xprt)) {
1318 struct sock_xprt *transport = container_of(xprt, 1318 struct sock_xprt *transport = container_of(xprt,
1319 struct sock_xprt, xprt); 1319 struct sock_xprt, xprt);
@@ -1327,7 +1327,7 @@ static void xs_tcp_state_change(struct sock *sk)
1327 1327
1328 xprt_wake_pending_tasks(xprt, -EAGAIN); 1328 xprt_wake_pending_tasks(xprt, -EAGAIN);
1329 } 1329 }
1330 spin_unlock_bh(&xprt->transport_lock); 1330 spin_unlock(&xprt->transport_lock);
1331 break; 1331 break;
1332 case TCP_FIN_WAIT1: 1332 case TCP_FIN_WAIT1:
1333 /* The client initiated a shutdown of the socket */ 1333 /* The client initiated a shutdown of the socket */
@@ -1365,7 +1365,7 @@ static void xs_tcp_state_change(struct sock *sk)
1365 xs_sock_mark_closed(xprt); 1365 xs_sock_mark_closed(xprt);
1366 } 1366 }
1367 out: 1367 out:
1368 read_unlock(&sk->sk_callback_lock); 1368 read_unlock_bh(&sk->sk_callback_lock);
1369} 1369}
1370 1370
1371/** 1371/**
@@ -1376,7 +1376,7 @@ static void xs_error_report(struct sock *sk)
1376{ 1376{
1377 struct rpc_xprt *xprt; 1377 struct rpc_xprt *xprt;
1378 1378
1379 read_lock(&sk->sk_callback_lock); 1379 read_lock_bh(&sk->sk_callback_lock);
1380 if (!(xprt = xprt_from_sock(sk))) 1380 if (!(xprt = xprt_from_sock(sk)))
1381 goto out; 1381 goto out;
1382 dprintk("RPC: %s client %p...\n" 1382 dprintk("RPC: %s client %p...\n"
@@ -1384,7 +1384,7 @@ static void xs_error_report(struct sock *sk)
1384 __func__, xprt, sk->sk_err); 1384 __func__, xprt, sk->sk_err);
1385 xprt_wake_pending_tasks(xprt, -EAGAIN); 1385 xprt_wake_pending_tasks(xprt, -EAGAIN);
1386out: 1386out:
1387 read_unlock(&sk->sk_callback_lock); 1387 read_unlock_bh(&sk->sk_callback_lock);
1388} 1388}
1389 1389
1390static void xs_write_space(struct sock *sk) 1390static void xs_write_space(struct sock *sk)
@@ -1416,13 +1416,13 @@ static void xs_write_space(struct sock *sk)
1416 */ 1416 */
1417static void xs_udp_write_space(struct sock *sk) 1417static void xs_udp_write_space(struct sock *sk)
1418{ 1418{
1419 read_lock(&sk->sk_callback_lock); 1419 read_lock_bh(&sk->sk_callback_lock);
1420 1420
1421 /* from net/core/sock.c:sock_def_write_space */ 1421 /* from net/core/sock.c:sock_def_write_space */
1422 if (sock_writeable(sk)) 1422 if (sock_writeable(sk))
1423 xs_write_space(sk); 1423 xs_write_space(sk);
1424 1424
1425 read_unlock(&sk->sk_callback_lock); 1425 read_unlock_bh(&sk->sk_callback_lock);
1426} 1426}
1427 1427
1428/** 1428/**
@@ -1437,13 +1437,13 @@ static void xs_udp_write_space(struct sock *sk)
1437 */ 1437 */
1438static void xs_tcp_write_space(struct sock *sk) 1438static void xs_tcp_write_space(struct sock *sk)
1439{ 1439{
1440 read_lock(&sk->sk_callback_lock); 1440 read_lock_bh(&sk->sk_callback_lock);
1441 1441
1442 /* from net/core/stream.c:sk_stream_write_space */ 1442 /* from net/core/stream.c:sk_stream_write_space */
1443 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) 1443 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
1444 xs_write_space(sk); 1444 xs_write_space(sk);
1445 1445
1446 read_unlock(&sk->sk_callback_lock); 1446 read_unlock_bh(&sk->sk_callback_lock);
1447} 1447}
1448 1448
1449static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1449static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c
index 3feb28e41c53..674d426a9d24 100644
--- a/net/wireless/wext-priv.c
+++ b/net/wireless/wext-priv.c
@@ -152,7 +152,7 @@ static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd,
152 } else if (!iwp->pointer) 152 } else if (!iwp->pointer)
153 return -EFAULT; 153 return -EFAULT;
154 154
155 extra = kmalloc(extra_size, GFP_KERNEL); 155 extra = kzalloc(extra_size, GFP_KERNEL);
156 if (!extra) 156 if (!extra)
157 return -ENOMEM; 157 return -ENOMEM;
158 158
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2b3ed7ad4933..cbab6e1a8c9c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1175,9 +1175,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
1175 tmpl->mode == XFRM_MODE_BEET) { 1175 tmpl->mode == XFRM_MODE_BEET) {
1176 remote = &tmpl->id.daddr; 1176 remote = &tmpl->id.daddr;
1177 local = &tmpl->saddr; 1177 local = &tmpl->saddr;
1178 family = tmpl->encap_family; 1178 if (xfrm_addr_any(local, tmpl->encap_family)) {
1179 if (xfrm_addr_any(local, family)) { 1179 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1180 error = xfrm_get_saddr(net, &tmp, remote, family);
1181 if (error) 1180 if (error)
1182 goto fail; 1181 goto fail;
1183 local = &tmp; 1182 local = &tmp;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5208b12fbfb4..eb96ce52f178 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -656,15 +656,23 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
656EXPORT_SYMBOL(xfrm_sad_getinfo); 656EXPORT_SYMBOL(xfrm_sad_getinfo);
657 657
658static int 658static int
659xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl, 659xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
660 struct xfrm_tmpl *tmpl, 660 struct xfrm_tmpl *tmpl,
661 xfrm_address_t *daddr, xfrm_address_t *saddr, 661 xfrm_address_t *daddr, xfrm_address_t *saddr,
662 unsigned short family) 662 unsigned short family)
663{ 663{
664 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 664 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
665 if (!afinfo) 665 if (!afinfo)
666 return -1; 666 return -1;
667 afinfo->init_tempsel(x, fl, tmpl, daddr, saddr); 667 afinfo->init_tempsel(&x->sel, fl);
668
669 if (family != tmpl->encap_family) {
670 xfrm_state_put_afinfo(afinfo);
671 afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
672 if (!afinfo)
673 return -1;
674 }
675 afinfo->init_temprop(x, tmpl, daddr, saddr);
668 xfrm_state_put_afinfo(afinfo); 676 xfrm_state_put_afinfo(afinfo);
669 return 0; 677 return 0;
670} 678}
@@ -790,37 +798,38 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
790 int error = 0; 798 int error = 0;
791 struct xfrm_state *best = NULL; 799 struct xfrm_state *best = NULL;
792 u32 mark = pol->mark.v & pol->mark.m; 800 u32 mark = pol->mark.v & pol->mark.m;
801 unsigned short encap_family = tmpl->encap_family;
793 802
794 to_put = NULL; 803 to_put = NULL;
795 804
796 spin_lock_bh(&xfrm_state_lock); 805 spin_lock_bh(&xfrm_state_lock);
797 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family); 806 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
798 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) { 807 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
799 if (x->props.family == family && 808 if (x->props.family == encap_family &&
800 x->props.reqid == tmpl->reqid && 809 x->props.reqid == tmpl->reqid &&
801 (mark & x->mark.m) == x->mark.v && 810 (mark & x->mark.m) == x->mark.v &&
802 !(x->props.flags & XFRM_STATE_WILDRECV) && 811 !(x->props.flags & XFRM_STATE_WILDRECV) &&
803 xfrm_state_addr_check(x, daddr, saddr, family) && 812 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
804 tmpl->mode == x->props.mode && 813 tmpl->mode == x->props.mode &&
805 tmpl->id.proto == x->id.proto && 814 tmpl->id.proto == x->id.proto &&
806 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 815 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
807 xfrm_state_look_at(pol, x, fl, family, daddr, saddr, 816 xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
808 &best, &acquire_in_progress, &error); 817 &best, &acquire_in_progress, &error);
809 } 818 }
810 if (best) 819 if (best)
811 goto found; 820 goto found;
812 821
813 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family); 822 h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
814 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) { 823 hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
815 if (x->props.family == family && 824 if (x->props.family == encap_family &&
816 x->props.reqid == tmpl->reqid && 825 x->props.reqid == tmpl->reqid &&
817 (mark & x->mark.m) == x->mark.v && 826 (mark & x->mark.m) == x->mark.v &&
818 !(x->props.flags & XFRM_STATE_WILDRECV) && 827 !(x->props.flags & XFRM_STATE_WILDRECV) &&
819 xfrm_state_addr_check(x, daddr, saddr, family) && 828 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
820 tmpl->mode == x->props.mode && 829 tmpl->mode == x->props.mode &&
821 tmpl->id.proto == x->id.proto && 830 tmpl->id.proto == x->id.proto &&
822 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 831 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
823 xfrm_state_look_at(pol, x, fl, family, daddr, saddr, 832 xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
824 &best, &acquire_in_progress, &error); 833 &best, &acquire_in_progress, &error);
825 } 834 }
826 835
@@ -829,7 +838,7 @@ found:
829 if (!x && !error && !acquire_in_progress) { 838 if (!x && !error && !acquire_in_progress) {
830 if (tmpl->id.spi && 839 if (tmpl->id.spi &&
831 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi, 840 (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
832 tmpl->id.proto, family)) != NULL) { 841 tmpl->id.proto, encap_family)) != NULL) {
833 to_put = x0; 842 to_put = x0;
834 error = -EEXIST; 843 error = -EEXIST;
835 goto out; 844 goto out;
@@ -839,9 +848,9 @@ found:
839 error = -ENOMEM; 848 error = -ENOMEM;
840 goto out; 849 goto out;
841 } 850 }
842 /* Initialize temporary selector matching only 851 /* Initialize temporary state matching only
843 * to current session. */ 852 * to current session. */
844 xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family); 853 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
845 memcpy(&x->mark, &pol->mark, sizeof(x->mark)); 854 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
846 855
847 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid); 856 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
@@ -856,10 +865,10 @@ found:
856 x->km.state = XFRM_STATE_ACQ; 865 x->km.state = XFRM_STATE_ACQ;
857 list_add(&x->km.all, &net->xfrm.state_all); 866 list_add(&x->km.all, &net->xfrm.state_all);
858 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); 867 hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
859 h = xfrm_src_hash(net, daddr, saddr, family); 868 h = xfrm_src_hash(net, daddr, saddr, encap_family);
860 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); 869 hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
861 if (x->id.spi) { 870 if (x->id.spi) {
862 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family); 871 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
863 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); 872 hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
864 } 873 }
865 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 874 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index ef43995119a4..c668b447c725 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -1416,15 +1416,19 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
1416 const pid_t gpid = task_pid_nr(current); 1416 const pid_t gpid = task_pid_nr(current);
1417 static const int tomoyo_buffer_len = 4096; 1417 static const int tomoyo_buffer_len = 4096;
1418 char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); 1418 char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
1419 pid_t ppid;
1419 if (!buffer) 1420 if (!buffer)
1420 return NULL; 1421 return NULL;
1421 do_gettimeofday(&tv); 1422 do_gettimeofday(&tv);
1423 rcu_read_lock();
1424 ppid = task_tgid_vnr(current->real_parent);
1425 rcu_read_unlock();
1422 snprintf(buffer, tomoyo_buffer_len - 1, 1426 snprintf(buffer, tomoyo_buffer_len - 1,
1423 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)" 1427 "#timestamp=%lu profile=%u mode=%s (global-pid=%u)"
1424 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u" 1428 " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u"
1425 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }", 1429 " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }",
1426 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid, 1430 tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid,
1427 (pid_t) sys_getpid(), (pid_t) sys_getppid(), 1431 task_tgid_vnr(current), ppid,
1428 current_uid(), current_gid(), current_euid(), 1432 current_uid(), current_gid(), current_euid(),
1429 current_egid(), current_suid(), current_sgid(), 1433 current_egid(), current_suid(), current_sgid(),
1430 current_fsuid(), current_fsgid()); 1434 current_fsuid(), current_fsgid());
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 04454cb7b24a..7c66bd898782 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -689,9 +689,6 @@ struct tomoyo_profile {
689 689
690/********** Function prototypes. **********/ 690/********** Function prototypes. **********/
691 691
692extern asmlinkage long sys_getpid(void);
693extern asmlinkage long sys_getppid(void);
694
695/* Check whether the given string starts with the given keyword. */ 692/* Check whether the given string starts with the given keyword. */
696bool tomoyo_str_starts(char **src, const char *find); 693bool tomoyo_str_starts(char **src, const char *find);
697/* Get tomoyo_realpath() of current process. */ 694/* Get tomoyo_realpath() of current process. */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index b697fd2a6f8b..10bbbaf6ebc3 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -3641,6 +3641,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = {
3641 /* Lenovo Thinkpad T61/X61 */ 3641 /* Lenovo Thinkpad T61/X61 */
3642 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD), 3642 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
3643 SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP), 3643 SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
3644 SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
3644 {} 3645 {}
3645}; 3646};
3646 3647
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a1312a6c8af2..a432e6efd19b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1594,12 +1594,22 @@ static void alc_auto_parse_digital(struct hda_codec *codec)
1594 } 1594 }
1595 1595
1596 if (spec->autocfg.dig_in_pin) { 1596 if (spec->autocfg.dig_in_pin) {
1597 hda_nid_t dig_nid; 1597 dig_nid = codec->start_nid;
1598 err = snd_hda_get_connections(codec, 1598 for (i = 0; i < codec->num_nodes; i++, dig_nid++) {
1599 spec->autocfg.dig_in_pin, 1599 unsigned int wcaps = get_wcaps(codec, dig_nid);
1600 &dig_nid, 1); 1600 if (get_wcaps_type(wcaps) != AC_WID_AUD_IN)
1601 if (err > 0) 1601 continue;
1602 spec->dig_in_nid = dig_nid; 1602 if (!(wcaps & AC_WCAP_DIGITAL))
1603 continue;
1604 if (!(wcaps & AC_WCAP_CONN_LIST))
1605 continue;
1606 err = get_connection_index(codec, dig_nid,
1607 spec->autocfg.dig_in_pin);
1608 if (err >= 0) {
1609 spec->dig_in_nid = dig_nid;
1610 break;
1611 }
1612 }
1603 } 1613 }
1604} 1614}
1605 1615
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 289cb4dacfc7..6c0a11adb2a8 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -543,6 +543,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip,
543 chip->model.suspend = claro_suspend; 543 chip->model.suspend = claro_suspend;
544 chip->model.resume = claro_resume; 544 chip->model.resume = claro_resume;
545 chip->model.set_adc_params = set_ak5385_params; 545 chip->model.set_adc_params = set_ak5385_params;
546 chip->model.device_config = PLAYBACK_0_TO_I2S |
547 PLAYBACK_1_TO_SPDIF |
548 CAPTURE_0_FROM_I2S_2 |
549 CAPTURE_1_FROM_SPDIF;
546 break; 550 break;
547 } 551 }
548 if (id->driver_data == MODEL_MERIDIAN || 552 if (id->driver_data == MODEL_MERIDIAN ||
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index b92adef8e81e..d6fa7bfd9aa1 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -4609,6 +4609,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
4609 if (err < 0) 4609 if (err < 0)
4610 return err; 4610 return err;
4611 4611
4612 memset(&info, 0, sizeof(info));
4612 spin_lock_irqsave(&hdsp->lock, flags); 4613 spin_lock_irqsave(&hdsp->lock, flags);
4613 info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp); 4614 info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp);
4614 info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp); 4615 info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp);
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 547b713d7204..0c98ef9156d8 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file,
4127 4127
4128 case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO: 4128 case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO:
4129 4129
4130 memset(&info, 0, sizeof(info));
4130 spin_lock_irq(&hdspm->lock); 4131 spin_lock_irq(&hdspm->lock);
4131 info.pref_sync_ref = hdspm_pref_sync_ref(hdspm); 4132 info.pref_sync_ref = hdspm_pref_sync_ref(hdspm);
4132 info.wordclock_sync_check = hdspm_wc_sync_check(hdspm); 4133 info.wordclock_sync_check = hdspm_wc_sync_check(hdspm);
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index b823a5c9b9bc..87e2b7fcbf17 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -12,6 +12,7 @@
12#include <linux/firmware.h> 12#include <linux/firmware.h>
13#include <linux/module.h> 13#include <linux/module.h>
14 14
15#include <asm/clkdev.h>
15#include <asm/clock.h> 16#include <asm/clock.h>
16 17
17#include <cpu/sh7722.h> 18#include <cpu/sh7722.h>
@@ -40,12 +41,12 @@ static struct clk_ops siumckb_clk_ops = {
40}; 41};
41 42
42static struct clk siumckb_clk = { 43static struct clk siumckb_clk = {
43 .name = "siumckb_clk",
44 .id = -1,
45 .ops = &siumckb_clk_ops, 44 .ops = &siumckb_clk_ops,
46 .rate = 0, /* initialised at run-time */ 45 .rate = 0, /* initialised at run-time */
47}; 46};
48 47
48static struct clk_lookup *siumckb_lookup;
49
49static int migor_hw_params(struct snd_pcm_substream *substream, 50static int migor_hw_params(struct snd_pcm_substream *substream,
50 struct snd_pcm_hw_params *params) 51 struct snd_pcm_hw_params *params)
51{ 52{
@@ -180,6 +181,13 @@ static int __init migor_init(void)
180 if (ret < 0) 181 if (ret < 0)
181 return ret; 182 return ret;
182 183
184 siumckb_lookup = clkdev_alloc(&siumckb_clk, "siumckb_clk", NULL);
185 if (!siumckb_lookup) {
186 ret = -ENOMEM;
187 goto eclkdevalloc;
188 }
189 clkdev_add(siumckb_lookup);
190
183 /* Port number used on this machine: port B */ 191 /* Port number used on this machine: port B */
184 migor_snd_device = platform_device_alloc("soc-audio", 1); 192 migor_snd_device = platform_device_alloc("soc-audio", 1);
185 if (!migor_snd_device) { 193 if (!migor_snd_device) {
@@ -200,12 +208,15 @@ static int __init migor_init(void)
200epdevadd: 208epdevadd:
201 platform_device_put(migor_snd_device); 209 platform_device_put(migor_snd_device);
202epdevalloc: 210epdevalloc:
211 clkdev_drop(siumckb_lookup);
212eclkdevalloc:
203 clk_unregister(&siumckb_clk); 213 clk_unregister(&siumckb_clk);
204 return ret; 214 return ret;
205} 215}
206 216
207static void __exit migor_exit(void) 217static void __exit migor_exit(void)
208{ 218{
219 clkdev_drop(siumckb_lookup);
209 clk_unregister(&siumckb_clk); 220 clk_unregister(&siumckb_clk);
210 platform_device_unregister(migor_snd_device); 221 platform_device_unregister(migor_snd_device);
211} 222}
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index adbc68ce9050..f6b0d2829ea9 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -203,8 +203,9 @@ static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
203 data[1] = (value >> 8) & 0xff; 203 data[1] = (value >> 8) & 0xff;
204 data[2] = value & 0xff; 204 data[2] = value & 0xff;
205 205
206 if (!snd_soc_codec_volatile_register(codec, reg)) 206 if (!snd_soc_codec_volatile_register(codec, reg)
207 reg_cache[reg] = value; 207 && reg < codec->reg_cache_size)
208 reg_cache[reg] = value;
208 209
209 if (codec->cache_only) { 210 if (codec->cache_only) {
210 codec->cache_sync = 1; 211 codec->cache_sync = 1;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 66cf65b510b1..c1f1e3c62984 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -218,7 +218,6 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
218 events = file->f_op->poll(file, &irqfd->pt); 218 events = file->f_op->poll(file, &irqfd->pt);
219 219
220 list_add_tail(&irqfd->list, &kvm->irqfds.items); 220 list_add_tail(&irqfd->list, &kvm->irqfds.items);
221 spin_unlock_irq(&kvm->irqfds.lock);
222 221
223 /* 222 /*
224 * Check if there was an event already pending on the eventfd 223 * Check if there was an event already pending on the eventfd
@@ -227,6 +226,8 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
227 if (events & POLLIN) 226 if (events & POLLIN)
228 schedule_work(&irqfd->inject); 227 schedule_work(&irqfd->inject);
229 228
229 spin_unlock_irq(&kvm->irqfds.lock);
230
230 /* 231 /*
231 * do not drop the file until the irqfd is fully initialized, otherwise 232 * do not drop the file until the irqfd is fully initialized, otherwise
232 * we might race against the POLLHUP 233 * we might race against the POLLHUP
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d4853a54771a..5186e728c53e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1970,10 +1970,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1970 1970
1971asmlinkage void kvm_handle_fault_on_reboot(void) 1971asmlinkage void kvm_handle_fault_on_reboot(void)
1972{ 1972{
1973 if (kvm_rebooting) 1973 if (kvm_rebooting) {
1974 /* spin while reset goes on */ 1974 /* spin while reset goes on */
1975 local_irq_enable();
1975 while (true) 1976 while (true)
1976 ; 1977 ;
1978 }
1977 /* Fault while not rebooting. We want the trace. */ 1979 /* Fault while not rebooting. We want the trace. */
1978 BUG(); 1980 BUG();
1979} 1981}