aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/ima_policy12
-rw-r--r--Documentation/cpu-freq/governors.txt4
-rw-r--r--Documentation/fault-injection/fault-injection.txt4
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/mach-mx25/clock.c58
-rw-r--r--arch/arm/mach-mx25/mx25pdk.c2
-rw-r--r--arch/arm/mach-mx3/mx31ads.c4
-rw-r--r--arch/arm/mach-omap2/gpmc.c5
-rw-r--r--arch/arm/mach-omap2/irq.c4
-rw-r--r--arch/arm/mach-omap2/mux.c10
-rw-r--r--arch/arm/mach-omap2/mux34xx.c47
-rw-r--r--arch/arm/mach-omap2/serial.c11
-rw-r--r--arch/arm/mach-realview/realview_pbx.c4
-rw-r--r--arch/arm/mm/proc-arm6_7.S2
-rw-r--r--arch/arm/plat-mxc/audmux-v2.c137
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31lite.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx35.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/irqs.h5
-rw-r--r--arch/arm/plat-omap/clock.c4
-rw-r--r--arch/arm/plat-omap/gpio.c4
-rw-r--r--arch/arm/plat-omap/omap_device.c10
-rw-r--r--arch/arm/vfp/vfpmodule.c5
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c15
-rw-r--r--arch/microblaze/configs/mmu_defconfig112
-rw-r--r--arch/microblaze/configs/nommu_defconfig101
-rw-r--r--arch/microblaze/kernel/entry-nommu.S10
-rw-r--r--arch/microblaze/kernel/setup.c1
-rw-r--r--arch/powerpc/mm/tlb_hash64.c12
-rw-r--r--arch/powerpc/platforms/pseries/xics.c14
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S3
-rw-r--r--arch/sh/kernel/dwarf.c20
-rw-r--r--arch/sh/kernel/entry-common.S8
-rw-r--r--arch/sparc/kernel/process_32.c2
-rw-r--r--arch/sparc/kernel/process_64.c8
-rw-r--r--arch/sparc/kernel/signal32.c10
-rw-r--r--arch/sparc/kernel/signal_32.c6
-rw-r--r--arch/sparc/kernel/signal_64.c8
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c3
-rw-r--r--arch/x86/kvm/i8254.c3
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/mm/init_64.c19
-rw-r--r--drivers/ata/ahci.c15
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c3
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/bluetooth/Kconfig13
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c187
-rw-r--r--drivers/bluetooth/bluecard_cs.c4
-rw-r--r--drivers/bluetooth/bt3c_cs.c4
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c1
-rw-r--r--drivers/bluetooth/btuart_cs.c4
-rw-r--r--drivers/bluetooth/dtl1_cs.c4
-rw-r--r--drivers/char/mem.c30
-rw-r--r--drivers/char/tpm/tpm_infineon.c79
-rw-r--r--drivers/char/tty_io.c4
-rw-r--r--drivers/connector/connector.c175
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/edac/mpc85xx_edac.c8
-rw-r--r--drivers/gpu/drm/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c42
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/radeon/r100.c14
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/r420.c3
-rw-r--r--drivers/gpu/drm/radeon/r520.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c48
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/rs400.c28
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c24
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/lm78.c25
-rw-r--r--drivers/hwmon/w83781d.c26
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c12
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/common/saa7146_video.c4
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c20
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c8
-rw-r--r--drivers/mmc/card/mmc_test.c9
-rw-r--r--drivers/net/ax88796.c2
-rw-r--r--drivers/net/benet/be.h5
-rw-r--r--drivers/net/benet/be_cmds.h3
-rw-r--r--drivers/net/benet/be_main.c27
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/igb/igb_main.c20
-rw-r--r--drivers/net/igbvf/netdev.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/ks8851_mll.c4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/sky2.c8
-rw-r--r--drivers/net/starfire.c5
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c4
-rw-r--r--drivers/pci/quirks.c17
-rw-r--r--drivers/rtc/rtc-fm3130.c6
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/serial/uartlite.c2
-rw-r--r--drivers/usb/host/r8a66597-hcd.c41
-rw-r--r--drivers/video/imxfb.c6
-rw-r--r--drivers/video/mx3fb.c12
-rw-r--r--fs/9p/v9fs.c33
-rw-r--r--fs/9p/v9fs_vfs.h1
-rw-r--r--fs/9p/vfs_file.c19
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/btrfs/disk-io.c7
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode.c50
-rw-r--r--fs/btrfs/relocation.c3
-rw-r--r--fs/cifs/CHANGES4
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/inode.c12
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c11
-rw-r--r--fs/compat_ioctl.c3
-rw-r--r--fs/exec.c21
-rw-r--r--fs/fcntl.c6
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/fuse/file.c3
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/lock_dlm.c11
-rw-r--r--fs/gfs2/ops_fstype.c12
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/namei.c6
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/inode.c4
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c78
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/pagelist.c17
-rw-r--r--fs/nfs/super.c15
-rw-r--r--fs/nfs/sysctl.c2
-rw-r--r--fs/nfs/write.c3
-rw-r--r--fs/nfsd/export.c10
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c10
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h4
-rw-r--r--fs/ocfs2/dlm/dlmapi.h2
-rw-r--r--fs/ocfs2/dlm/dlmast.c2
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c2
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmlock.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c38
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c147
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c8
-rw-r--r--fs/ocfs2/dlmglue.c85
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/file.c18
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/ocfs2/ioctl.c14
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/ocfs2_fs.h11
-rw-r--r--fs/ocfs2/refcounttree.c12
-rw-r--r--fs/ocfs2/stack_o2cb.c12
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/symlink.c10
-rw-r--r--fs/ocfs2/uptodate.c4
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/connector.h32
-rw-r--r--include/linux/ima.h4
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/net/netns/conntrack.h3
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--init/main.c2
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/futex.c30
-rw-r--r--kernel/kfifo.c3
-rw-r--r--kernel/kgdb.c6
-rw-r--r--kernel/softlockup.c15
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/migrate.c3
-rw-r--r--net/9p/client.c53
-rw-r--r--net/9p/trans_fd.c10
-rw-r--r--net/9p/trans_rdma.c9
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/bluetooth/hci_conn.c3
-rw-r--r--net/bluetooth/hci_event.c1
-rw-r--r--net/bluetooth/hidp/core.c119
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/l2cap.c14
-rw-r--r--net/bluetooth/rfcomm/core.c8
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/pktgen.c1
-rw-r--r--net/dccp/ccid.c2
-rw-r--r--net/dccp/ccid.h8
-rw-r--r--net/dccp/probe.c4
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c22
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c1
-rw-r--r--net/irda/irnet/irnet_ppp.c5
-rw-r--r--net/key/af_key.c15
-rw-r--r--net/mac80211/driver-trace.h2
-rw-r--r--net/netfilter/nf_conntrack_core.c116
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c7
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sched/Kconfig16
-rwxr-xr-xscripts/get_maintainer.pl4
-rw-r--r--scripts/markup_oops.pl2
-rw-r--r--security/integrity/ima/ima.h3
-rw-r--r--security/integrity/ima/ima_api.c4
-rw-r--r--security/integrity/ima/ima_iint.c9
-rw-r--r--security/integrity/ima/ima_main.c239
-rw-r--r--security/integrity/ima/ima_policy.c9
-rw-r--r--security/security.c2
-rw-r--r--sound/pci/ctxfi/ctatc.c15
-rw-r--r--sound/pci/ctxfi/ctvmem.c38
-rw-r--r--sound/pci/ctxfi/ctvmem.h8
-rw-r--r--sound/pci/hda/hda_intel.c21
-rw-r--r--sound/pci/ice1712/aureon.c12
-rw-r--r--sound/soc/omap/omap3pandora.c1
260 files changed, 2361 insertions, 1329 deletions
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index 6434f0df012e..6cd6daefaaed 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -20,7 +20,7 @@ Description:
20 lsm: [[subj_user=] [subj_role=] [subj_type=] 20 lsm: [[subj_user=] [subj_role=] [subj_type=]
21 [obj_user=] [obj_role=] [obj_type=]] 21 [obj_user=] [obj_role=] [obj_type=]]
22 22
23 base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION] 23 base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK]
24 mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] 24 mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
25 fsmagic:= hex value 25 fsmagic:= hex value
26 uid:= decimal value 26 uid:= decimal value
@@ -40,11 +40,11 @@ Description:
40 40
41 measure func=BPRM_CHECK 41 measure func=BPRM_CHECK
42 measure func=FILE_MMAP mask=MAY_EXEC 42 measure func=FILE_MMAP mask=MAY_EXEC
43 measure func=INODE_PERM mask=MAY_READ uid=0 43 measure func=FILE_CHECK mask=MAY_READ uid=0
44 44
45 The default policy measures all executables in bprm_check, 45 The default policy measures all executables in bprm_check,
46 all files mmapped executable in file_mmap, and all files 46 all files mmapped executable in file_mmap, and all files
47 open for read by root in inode_permission. 47 open for read by root in do_filp_open.
48 48
49 Examples of LSM specific definitions: 49 Examples of LSM specific definitions:
50 50
@@ -54,8 +54,8 @@ Description:
54 54
55 dont_measure obj_type=var_log_t 55 dont_measure obj_type=var_log_t
56 dont_measure obj_type=auditd_log_t 56 dont_measure obj_type=auditd_log_t
57 measure subj_user=system_u func=INODE_PERM mask=MAY_READ 57 measure subj_user=system_u func=FILE_CHECK mask=MAY_READ
58 measure subj_role=system_r func=INODE_PERM mask=MAY_READ 58 measure subj_role=system_r func=FILE_CHECK mask=MAY_READ
59 59
60 Smack: 60 Smack:
61 measure subj_user=_ func=INODE_PERM mask=MAY_READ 61 measure subj_user=_ func=FILE_CHECK mask=MAY_READ
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index aed082f49d09..737988fca64d 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
145up_threshold: defines what the average CPU usage between the samplings 145up_threshold: defines what the average CPU usage between the samplings
146of 'sampling_rate' needs to be for the kernel to make a decision on 146of 'sampling_rate' needs to be for the kernel to make a decision on
147whether it should increase the frequency. For example when it is set 147whether it should increase the frequency. For example when it is set
148to its default value of '80' it means that between the checking 148to its default value of '95' it means that between the checking
149intervals the CPU needs to be on average more than 80% in use to then 149intervals the CPU needs to be on average more than 95% in use to then
150decide that the CPU frequency needs to be increased. 150decide that the CPU frequency needs to be increased.
151 151
152ignore_nice_load: this parameter takes a value of '0' or '1'. When 152ignore_nice_load: this parameter takes a value of '0' or '1'. When
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 079305640790..7be15e44d481 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -143,8 +143,8 @@ o provide a way to configure fault attributes
143 failslab, fail_page_alloc, and fail_make_request use this way. 143 failslab, fail_page_alloc, and fail_make_request use this way.
144 Helper functions: 144 Helper functions:
145 145
146 init_fault_attr_entries(entries, attr, name); 146 init_fault_attr_dentries(entries, attr, name);
147 void cleanup_fault_attr_entries(entries); 147 void cleanup_fault_attr_dentries(entries);
148 148
149- module parameters 149- module parameters
150 150
diff --git a/MAINTAINERS b/MAINTAINERS
index 03f38c18f323..412eff60c33d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3411,8 +3411,10 @@ S: Maintained
3411F: drivers/scsi/sym53c8xx_2/ 3411F: drivers/scsi/sym53c8xx_2/
3412 3412
3413LTP (Linux Test Project) 3413LTP (Linux Test Project)
3414M: Subrata Modak <subrata@linux.vnet.ibm.com> 3414M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com>
3415M: Mike Frysinger <vapier@gentoo.org> 3415M: Garrett Cooper <yanegomi@gmail.com>
3416M: Mike Frysinger <vapier@gentoo.org>
3417M: Subrata Modak <subrata@linux.vnet.ibm.com>
3416L: ltp-list@lists.sourceforge.net (subscribers-only) 3418L: ltp-list@lists.sourceforge.net (subscribers-only)
3417W: http://ltp.sourceforge.net/ 3419W: http://ltp.sourceforge.net/
3418T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git 3420T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git
@@ -3836,6 +3838,7 @@ NETWORKING DRIVERS
3836L: netdev@vger.kernel.org 3838L: netdev@vger.kernel.org
3837W: http://www.linuxfoundation.org/en/Net 3839W: http://www.linuxfoundation.org/en/Net
3838T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git 3840T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
3841T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
3839S: Odd Fixes 3842S: Odd Fixes
3840F: drivers/net/ 3843F: drivers/net/
3841F: include/linux/if_* 3844F: include/linux/if_*
diff --git a/Makefile b/Makefile
index 394aec712c7d..f8e02e9491d0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 33 3SUBLEVEL = 33
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4c33ca82f9b1..184a6bd54825 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -702,6 +702,7 @@ config ARCH_OMAP
702 select ARCH_HAS_CPUFREQ 702 select ARCH_HAS_CPUFREQ
703 select GENERIC_TIME 703 select GENERIC_TIME
704 select GENERIC_CLOCKEVENTS 704 select GENERIC_CLOCKEVENTS
705 select ARCH_HAS_HOLES_MEMORYMODEL
705 help 706 help
706 Support for TI's OMAP platform (OMAP1 and OMAP2). 707 Support for TI's OMAP platform (OMAP1 and OMAP2).
707 708
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 9e7582572741..356d702c0808 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -94,7 +94,7 @@ CFLAGS_ABI +=-funwind-tables
94endif 94endif
95 95
96ifeq ($(CONFIG_THUMB2_KERNEL),y) 96ifeq ($(CONFIG_THUMB2_KERNEL),y)
97AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=thumb,-Wa$(comma)-mauto-it) 97AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
98AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) 98AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
99CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN) 99CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
100AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb 100AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
diff --git a/arch/arm/mach-mx25/clock.c b/arch/arm/mach-mx25/clock.c
index 6e838b857712..6acc88bcdc40 100644
--- a/arch/arm/mach-mx25/clock.c
+++ b/arch/arm/mach-mx25/clock.c
@@ -119,6 +119,11 @@ static unsigned long get_rate_nfc(struct clk *clk)
119 return get_rate_per(8); 119 return get_rate_per(8);
120} 120}
121 121
122static unsigned long get_rate_gpt(struct clk *clk)
123{
124 return get_rate_per(5);
125}
126
122static unsigned long get_rate_otg(struct clk *clk) 127static unsigned long get_rate_otg(struct clk *clk)
123{ 128{
124 return 48000000; /* FIXME */ 129 return 48000000; /* FIXME */
@@ -144,7 +149,7 @@ static void clk_cgcr_disable(struct clk *clk)
144 __raw_writel(reg, clk->enable_reg); 149 __raw_writel(reg, clk->enable_reg);
145} 150}
146 151
147#define DEFINE_CLOCK(name, i, er, es, gr, sr) \ 152#define DEFINE_CLOCK(name, i, er, es, gr, sr, s) \
148 static struct clk name = { \ 153 static struct clk name = { \
149 .id = i, \ 154 .id = i, \
150 .enable_reg = CRM_BASE + er, \ 155 .enable_reg = CRM_BASE + er, \
@@ -153,27 +158,30 @@ static void clk_cgcr_disable(struct clk *clk)
153 .set_rate = sr, \ 158 .set_rate = sr, \
154 .enable = clk_cgcr_enable, \ 159 .enable = clk_cgcr_enable, \
155 .disable = clk_cgcr_disable, \ 160 .disable = clk_cgcr_disable, \
161 .secondary = s, \
156 } 162 }
157 163
158DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_ipg, NULL); 164DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_gpt, NULL, NULL);
159DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL); 165DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL);
160DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL); 166DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL, NULL);
161DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL); 167DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL, NULL);
162DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL); 168DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL, NULL);
163DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL); 169DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL, NULL, NULL);
164DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL); 170DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk);
165DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL); 171DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk);
166DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL); 172DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk);
167DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL); 173DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk);
168DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL); 174DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk);
169DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL); 175DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL, NULL);
170DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL); 176DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL);
171DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL); 177DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL);
172DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL); 178DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL, NULL);
173DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL); 179DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL, NULL);
174DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL); 180DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL, NULL);
175DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL); 181DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL);
176DEFINE_CLOCK(fec_clk, 0, CCM_CGCR0, 23, get_rate_ipg, NULL); 182DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL);
183DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL, NULL);
184DEFINE_CLOCK(fec_clk, 0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk);
177 185
178#define _REGISTER_CLOCK(d, n, c) \ 186#define _REGISTER_CLOCK(d, n, c) \
179 { \ 187 { \
@@ -208,13 +216,21 @@ static struct clk_lookup lookups[] = {
208 _REGISTER_CLOCK("fec.0", NULL, fec_clk) 216 _REGISTER_CLOCK("fec.0", NULL, fec_clk)
209}; 217};
210 218
211int __init mx25_clocks_init(unsigned long fref) 219int __init mx25_clocks_init(void)
212{ 220{
213 int i; 221 int i;
214 222
215 for (i = 0; i < ARRAY_SIZE(lookups); i++) 223 for (i = 0; i < ARRAY_SIZE(lookups); i++)
216 clkdev_add(&lookups[i]); 224 clkdev_add(&lookups[i]);
217 225
226 /* Turn off all clocks except the ones we need to survive, namely:
227 * EMI, GPIO1-3 (CCM_CGCR1[18:16]), GPT1, IOMUXC (CCM_CGCR1[27]), IIM,
228 * SCC
229 */
230 __raw_writel((1 << 19), CRM_BASE + CCM_CGCR0);
231 __raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1);
232 __raw_writel((1 << 5), CRM_BASE + CCM_CGCR2);
233
218 mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); 234 mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
219 235
220 return 0; 236 return 0;
diff --git a/arch/arm/mach-mx25/mx25pdk.c b/arch/arm/mach-mx25/mx25pdk.c
index 921bc99ea231..6f06089246eb 100644
--- a/arch/arm/mach-mx25/mx25pdk.c
+++ b/arch/arm/mach-mx25/mx25pdk.c
@@ -91,7 +91,7 @@ static void __init mx25pdk_init(void)
91 91
92static void __init mx25pdk_timer_init(void) 92static void __init mx25pdk_timer_init(void)
93{ 93{
94 mx25_clocks_init(26000000); 94 mx25_clocks_init();
95} 95}
96 96
97static struct sys_timer mx25pdk_timer = { 97static struct sys_timer mx25pdk_timer = {
diff --git a/arch/arm/mach-mx3/mx31ads.c b/arch/arm/mach-mx3/mx31ads.c
index 3e7bafa2ddbb..938c549767dc 100644
--- a/arch/arm/mach-mx3/mx31ads.c
+++ b/arch/arm/mach-mx3/mx31ads.c
@@ -173,6 +173,7 @@ static void expio_unmask_irq(u32 irq)
173} 173}
174 174
175static struct irq_chip expio_irq_chip = { 175static struct irq_chip expio_irq_chip = {
176 .name = "EXPIO(CPLD)",
176 .ack = expio_ack_irq, 177 .ack = expio_ack_irq,
177 .mask = expio_mask_irq, 178 .mask = expio_mask_irq,
178 .unmask = expio_unmask_irq, 179 .unmask = expio_unmask_irq,
@@ -302,6 +303,7 @@ static struct regulator_init_data ldo1_data = {
302 .min_uV = 2800000, 303 .min_uV = 2800000,
303 .max_uV = 2800000, 304 .max_uV = 2800000,
304 .valid_modes_mask = REGULATOR_MODE_NORMAL, 305 .valid_modes_mask = REGULATOR_MODE_NORMAL,
306 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
305 .apply_uV = 1, 307 .apply_uV = 1,
306 }, 308 },
307}; 309};
@@ -322,6 +324,7 @@ static struct regulator_init_data ldo2_data = {
322 .min_uV = 3300000, 324 .min_uV = 3300000,
323 .max_uV = 3300000, 325 .max_uV = 3300000,
324 .valid_modes_mask = REGULATOR_MODE_NORMAL, 326 .valid_modes_mask = REGULATOR_MODE_NORMAL,
327 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
325 .apply_uV = 1, 328 .apply_uV = 1,
326 }, 329 },
327 .num_consumer_supplies = ARRAY_SIZE(ldo2_consumers), 330 .num_consumer_supplies = ARRAY_SIZE(ldo2_consumers),
@@ -459,6 +462,7 @@ static int mx31_wm8350_init(struct wm8350 *wm8350)
459 462
460static struct wm8350_platform_data __initdata mx31_wm8350_pdata = { 463static struct wm8350_platform_data __initdata mx31_wm8350_pdata = {
461 .init = mx31_wm8350_init, 464 .init = mx31_wm8350_init,
465 .irq_base = MXC_BOARD_IRQ_START + MXC_MAX_EXP_IO_LINES,
462}; 466};
463#endif 467#endif
464 468
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 3f1334f62e7a..7027cdc1ba49 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -505,7 +505,7 @@ static void __init gpmc_mem_init(void)
505void __init gpmc_init(void) 505void __init gpmc_init(void)
506{ 506{
507 u32 l; 507 u32 l;
508 char *ck; 508 char *ck = NULL;
509 509
510 if (cpu_is_omap24xx()) { 510 if (cpu_is_omap24xx()) {
511 ck = "core_l3_ck"; 511 ck = "core_l3_ck";
@@ -521,6 +521,9 @@ void __init gpmc_init(void)
521 l = OMAP44XX_GPMC_BASE; 521 l = OMAP44XX_GPMC_BASE;
522 } 522 }
523 523
524 if (WARN_ON(!ck))
525 return;
526
524 gpmc_l3_clk = clk_get(NULL, ck); 527 gpmc_l3_clk = clk_get(NULL, ck);
525 if (IS_ERR(gpmc_l3_clk)) { 528 if (IS_ERR(gpmc_l3_clk)) {
526 printk(KERN_ERR "Could not get GPMC clock %s\n", ck); 529 printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 27054025da2b..26aeef560aa3 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -194,7 +194,7 @@ void __init omap_init_irq(void)
194 int i; 194 int i;
195 195
196 for (i = 0; i < ARRAY_SIZE(irq_banks); i++) { 196 for (i = 0; i < ARRAY_SIZE(irq_banks); i++) {
197 unsigned long base; 197 unsigned long base = 0;
198 struct omap_irq_bank *bank = irq_banks + i; 198 struct omap_irq_bank *bank = irq_banks + i;
199 199
200 if (cpu_is_omap24xx()) 200 if (cpu_is_omap24xx())
@@ -202,6 +202,8 @@ void __init omap_init_irq(void)
202 else if (cpu_is_omap34xx()) 202 else if (cpu_is_omap34xx())
203 base = OMAP34XX_IC_BASE; 203 base = OMAP34XX_IC_BASE;
204 204
205 BUG_ON(!base);
206
205 /* Static mapping, never released */ 207 /* Static mapping, never released */
206 bank->base_reg = ioremap(base, SZ_4K); 208 bank->base_reg = ioremap(base, SZ_4K);
207 if (!bank->base_reg) { 209 if (!bank->base_reg) {
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 3f59bd12cbbf..5fedc50c58e4 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -486,7 +486,7 @@ int __init omap_mux_init_signal(char *muxname, int val)
486static inline void omap_mux_decode(struct seq_file *s, u16 val) 486static inline void omap_mux_decode(struct seq_file *s, u16 val)
487{ 487{
488 char *flags[OMAP_MUX_MAX_NR_FLAGS]; 488 char *flags[OMAP_MUX_MAX_NR_FLAGS];
489 char mode[14]; 489 char mode[sizeof("OMAP_MUX_MODE") + 1];
490 int i = -1; 490 int i = -1;
491 491
492 sprintf(mode, "OMAP_MUX_MODE%d", val & 0x7); 492 sprintf(mode, "OMAP_MUX_MODE%d", val & 0x7);
@@ -553,6 +553,7 @@ static int omap_mux_dbg_board_show(struct seq_file *s, void *unused)
553 if (!m0_name) 553 if (!m0_name)
554 continue; 554 continue;
555 555
556 /* REVISIT: Needs to be updated if mode0 names get longer */
556 for (i = 0; i < OMAP_MUX_DEFNAME_LEN; i++) { 557 for (i = 0; i < OMAP_MUX_DEFNAME_LEN; i++) {
557 if (m0_name[i] == '\0') { 558 if (m0_name[i] == '\0') {
558 m0_def[i] = m0_name[i]; 559 m0_def[i] = m0_name[i];
@@ -968,6 +969,13 @@ static void __init omap_mux_init_list(struct omap_mux *superset)
968 } 969 }
969#endif 970#endif
970 971
972#if defined(CONFIG_OMAP_MUX) && defined(CONFIG_DEBUG_FS)
973 if (!superset->muxnames || !superset->muxnames[0]) {
974 superset++;
975 continue;
976 }
977#endif
978
971 entry = omap_mux_list_add(superset); 979 entry = omap_mux_list_add(superset);
972 if (!entry) { 980 if (!entry) {
973 printk(KERN_ERR "mux: Could not add entry\n"); 981 printk(KERN_ERR "mux: Could not add entry\n");
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index 68e0a595f9a1..07aa7b3c95f7 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -649,6 +649,53 @@ static struct omap_mux __initdata omap3_muxmodes[] = {
649 _OMAP3_MUXENTRY(UART3_TX_IRTX, 166, 649 _OMAP3_MUXENTRY(UART3_TX_IRTX, 166,
650 "uart3_tx_irtx", NULL, NULL, NULL, 650 "uart3_tx_irtx", NULL, NULL, NULL,
651 "gpio_166", NULL, NULL, "safe_mode"), 651 "gpio_166", NULL, NULL, "safe_mode"),
652
653 /* Only on 3630, see omap36xx_cbp_subset for the signals */
654 _OMAP3_MUXENTRY(GPMC_A11, 0,
655 NULL, NULL, NULL, NULL,
656 NULL, NULL, NULL, NULL),
657 _OMAP3_MUXENTRY(SAD2D_MBUSFLAG, 0,
658 NULL, NULL, NULL, NULL,
659 NULL, NULL, NULL, NULL),
660 _OMAP3_MUXENTRY(SAD2D_MREAD, 0,
661 NULL, NULL, NULL, NULL,
662 NULL, NULL, NULL, NULL),
663 _OMAP3_MUXENTRY(SAD2D_MWRITE, 0,
664 NULL, NULL, NULL, NULL,
665 NULL, NULL, NULL, NULL),
666 _OMAP3_MUXENTRY(SAD2D_SBUSFLAG, 0,
667 NULL, NULL, NULL, NULL,
668 NULL, NULL, NULL, NULL),
669 _OMAP3_MUXENTRY(SAD2D_SREAD, 0,
670 NULL, NULL, NULL, NULL,
671 NULL, NULL, NULL, NULL),
672 _OMAP3_MUXENTRY(SAD2D_SWRITE, 0,
673 NULL, NULL, NULL, NULL,
674 NULL, NULL, NULL, NULL),
675 _OMAP3_MUXENTRY(GPMC_A11, 0,
676 NULL, NULL, NULL, NULL,
677 NULL, NULL, NULL, NULL),
678 _OMAP3_MUXENTRY(SAD2D_MCAD28, 0,
679 NULL, NULL, NULL, NULL,
680 NULL, NULL, NULL, NULL),
681 _OMAP3_MUXENTRY(SAD2D_MCAD29, 0,
682 NULL, NULL, NULL, NULL,
683 NULL, NULL, NULL, NULL),
684 _OMAP3_MUXENTRY(SAD2D_MCAD32, 0,
685 NULL, NULL, NULL, NULL,
686 NULL, NULL, NULL, NULL),
687 _OMAP3_MUXENTRY(SAD2D_MCAD33, 0,
688 NULL, NULL, NULL, NULL,
689 NULL, NULL, NULL, NULL),
690 _OMAP3_MUXENTRY(SAD2D_MCAD34, 0,
691 NULL, NULL, NULL, NULL,
692 NULL, NULL, NULL, NULL),
693 _OMAP3_MUXENTRY(SAD2D_MCAD35, 0,
694 NULL, NULL, NULL, NULL,
695 NULL, NULL, NULL, NULL),
696 _OMAP3_MUXENTRY(SAD2D_MCAD36, 0,
697 NULL, NULL, NULL, NULL,
698 NULL, NULL, NULL, NULL),
652 { .reg_offset = OMAP_MUX_TERMINATOR }, 699 { .reg_offset = OMAP_MUX_TERMINATOR },
653}; 700};
654 701
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 8c964bec8159..e10a02df6e1d 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -36,7 +36,13 @@
36#define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52 36#define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52
37#define UART_OMAP_WER 0x17 /* Wake-up enable register */ 37#define UART_OMAP_WER 0x17 /* Wake-up enable register */
38 38
39#define DEFAULT_TIMEOUT (5 * HZ) 39/*
40 * NOTE: By default the serial timeout is disabled as it causes lost characters
41 * over the serial ports. This means that the UART clocks will stay on until
42 * disabled via sysfs. This also causes that any deeper omap sleep states are
43 * blocked.
44 */
45#define DEFAULT_TIMEOUT 0
40 46
41struct omap_uart_state { 47struct omap_uart_state {
42 int num; 48 int num;
@@ -422,7 +428,8 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
422 uart->timeout = DEFAULT_TIMEOUT; 428 uart->timeout = DEFAULT_TIMEOUT;
423 setup_timer(&uart->timer, omap_uart_idle_timer, 429 setup_timer(&uart->timer, omap_uart_idle_timer,
424 (unsigned long) uart); 430 (unsigned long) uart);
425 mod_timer(&uart->timer, jiffies + uart->timeout); 431 if (uart->timeout)
432 mod_timer(&uart->timer, jiffies + uart->timeout);
426 omap_uart_smart_idle_enable(uart, 0); 433 omap_uart_smart_idle_enable(uart, 0);
427 434
428 if (cpu_is_omap34xx()) { 435 if (cpu_is_omap34xx()) {
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index a21a4b395f73..d94857eb0690 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -334,8 +334,8 @@ static void realview_pbx_reset(char mode)
334 * in the system FPGA 334 * in the system FPGA
335 */ 335 */
336 __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl); 336 __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
337 __raw_writel(0x0000, reset_ctrl); 337 __raw_writel(0x00F0, reset_ctrl);
338 __raw_writel(0x0004, reset_ctrl); 338 __raw_writel(0x00F4, reset_ctrl);
339} 339}
340 340
341static void __init realview_pbx_init(void) 341static void __init realview_pbx_init(void)
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 3f9cd3d8f6d5..795dc615f43b 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -41,7 +41,7 @@ ENTRY(cpu_arm7_dcache_clean_area)
41ENTRY(cpu_arm7_data_abort) 41ENTRY(cpu_arm7_data_abort)
42 mrc p15, 0, r1, c5, c0, 0 @ get FSR 42 mrc p15, 0, r1, c5, c0, 0 @ get FSR
43 mrc p15, 0, r0, c6, c0, 0 @ get FAR 43 mrc p15, 0, r0, c6, c0, 0 @ get FAR
44 ldr r8, [r0] @ read arm instruction 44 ldr r8, [r2] @ read arm instruction
45 tst r8, #1 << 20 @ L = 0 -> write? 45 tst r8, #1 << 20 @ L = 0 -> write?
46 orreq r1, r1, #1 << 11 @ yes. 46 orreq r1, r1, #1 << 11 @ yes.
47 and r7, r8, #15 << 24 47 and r7, r8, #15 << 24
diff --git a/arch/arm/plat-mxc/audmux-v2.c b/arch/arm/plat-mxc/audmux-v2.c
index 6f21096086fd..b06954a84436 100644
--- a/arch/arm/plat-mxc/audmux-v2.c
+++ b/arch/arm/plat-mxc/audmux-v2.c
@@ -23,6 +23,7 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/debugfs.h>
26#include <mach/audmux.h> 27#include <mach/audmux.h>
27#include <mach/hardware.h> 28#include <mach/hardware.h>
28 29
@@ -32,6 +33,140 @@ static void __iomem *audmux_base;
32#define MXC_AUDMUX_V2_PTCR(x) ((x) * 8) 33#define MXC_AUDMUX_V2_PTCR(x) ((x) * 8)
33#define MXC_AUDMUX_V2_PDCR(x) ((x) * 8 + 4) 34#define MXC_AUDMUX_V2_PDCR(x) ((x) * 8 + 4)
34 35
36#ifdef CONFIG_DEBUG_FS
37static struct dentry *audmux_debugfs_root;
38
39static int audmux_open_file(struct inode *inode, struct file *file)
40{
41 file->private_data = inode->i_private;
42 return 0;
43}
44
45/* There is an annoying discontinuity in the SSI numbering with regard
46 * to the Linux number of the devices */
47static const char *audmux_port_string(int port)
48{
49 switch (port) {
50 case MX31_AUDMUX_PORT1_SSI0:
51 return "imx-ssi.0";
52 case MX31_AUDMUX_PORT2_SSI1:
53 return "imx-ssi.1";
54 case MX31_AUDMUX_PORT3_SSI_PINS_3:
55 return "SSI3";
56 case MX31_AUDMUX_PORT4_SSI_PINS_4:
57 return "SSI4";
58 case MX31_AUDMUX_PORT5_SSI_PINS_5:
59 return "SSI5";
60 case MX31_AUDMUX_PORT6_SSI_PINS_6:
61 return "SSI6";
62 default:
63 return "UNKNOWN";
64 }
65}
66
67static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
68 size_t count, loff_t *ppos)
69{
70 ssize_t ret;
71 char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
72 int port = (int)file->private_data;
73 u32 pdcr, ptcr;
74
75 if (!buf)
76 return -ENOMEM;
77
78 if (audmux_clk)
79 clk_enable(audmux_clk);
80
81 ptcr = readl(audmux_base + MXC_AUDMUX_V2_PTCR(port));
82 pdcr = readl(audmux_base + MXC_AUDMUX_V2_PDCR(port));
83
84 if (audmux_clk)
85 clk_disable(audmux_clk);
86
87 ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
88 pdcr, ptcr);
89
90 if (ptcr & MXC_AUDMUX_V2_PTCR_TFSDIR)
91 ret += snprintf(buf + ret, PAGE_SIZE - ret,
92 "TxFS output from %s, ",
93 audmux_port_string((ptcr >> 27) & 0x7));
94 else
95 ret += snprintf(buf + ret, PAGE_SIZE - ret,
96 "TxFS input, ");
97
98 if (ptcr & MXC_AUDMUX_V2_PTCR_TCLKDIR)
99 ret += snprintf(buf + ret, PAGE_SIZE - ret,
100 "TxClk output from %s",
101 audmux_port_string((ptcr >> 22) & 0x7));
102 else
103 ret += snprintf(buf + ret, PAGE_SIZE - ret,
104 "TxClk input");
105
106 ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
107
108 if (ptcr & MXC_AUDMUX_V2_PTCR_SYN) {
109 ret += snprintf(buf + ret, PAGE_SIZE - ret,
110 "Port is symmetric");
111 } else {
112 if (ptcr & MXC_AUDMUX_V2_PTCR_RFSDIR)
113 ret += snprintf(buf + ret, PAGE_SIZE - ret,
114 "RxFS output from %s, ",
115 audmux_port_string((ptcr >> 17) & 0x7));
116 else
117 ret += snprintf(buf + ret, PAGE_SIZE - ret,
118 "RxFS input, ");
119
120 if (ptcr & MXC_AUDMUX_V2_PTCR_RCLKDIR)
121 ret += snprintf(buf + ret, PAGE_SIZE - ret,
122 "RxClk output from %s",
123 audmux_port_string((ptcr >> 12) & 0x7));
124 else
125 ret += snprintf(buf + ret, PAGE_SIZE - ret,
126 "RxClk input");
127 }
128
129 ret += snprintf(buf + ret, PAGE_SIZE - ret,
130 "\nData received from %s\n",
131 audmux_port_string((pdcr >> 13) & 0x7));
132
133 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
134
135 kfree(buf);
136
137 return ret;
138}
139
140static const struct file_operations audmux_debugfs_fops = {
141 .open = audmux_open_file,
142 .read = audmux_read_file,
143};
144
145static void audmux_debugfs_init(void)
146{
147 int i;
148 char buf[20];
149
150 audmux_debugfs_root = debugfs_create_dir("audmux", NULL);
151 if (!audmux_debugfs_root) {
152 pr_warning("Failed to create AUDMUX debugfs root\n");
153 return;
154 }
155
156 for (i = 1; i < 8; i++) {
157 snprintf(buf, sizeof(buf), "ssi%d", i);
158 if (!debugfs_create_file(buf, 0444, audmux_debugfs_root,
159 (void *)i, &audmux_debugfs_fops))
160 pr_warning("Failed to create AUDMUX port %d debugfs file\n",
161 i);
162 }
163}
164#else
165static inline void audmux_debugfs_init(void)
166{
167}
168#endif
169
35int mxc_audmux_v2_configure_port(unsigned int port, unsigned int ptcr, 170int mxc_audmux_v2_configure_port(unsigned int port, unsigned int ptcr,
36 unsigned int pdcr) 171 unsigned int pdcr)
37{ 172{
@@ -68,6 +203,8 @@ static int mxc_audmux_v2_init(void)
68 if (cpu_is_mx31() || cpu_is_mx35()) 203 if (cpu_is_mx31() || cpu_is_mx35())
69 audmux_base = IO_ADDRESS(AUDMUX_BASE_ADDR); 204 audmux_base = IO_ADDRESS(AUDMUX_BASE_ADDR);
70 205
206 audmux_debugfs_init();
207
71 return 0; 208 return 0;
72} 209}
73 210
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31lite.h b/arch/arm/plat-mxc/include/mach/board-mx31lite.h
index 0184b638c268..2b2da0367578 100644
--- a/arch/arm/plat-mxc/include/mach/board-mx31lite.h
+++ b/arch/arm/plat-mxc/include/mach/board-mx31lite.h
@@ -25,7 +25,7 @@
25 25
26#ifndef __ASSEMBLY__ 26#ifndef __ASSEMBLY__
27 27
28enum mx31lilly_boards { 28enum mx31lite_boards {
29 MX31LITE_NOBOARD = 0, 29 MX31LITE_NOBOARD = 0,
30 MX31LITE_DB = 1, 30 MX31LITE_DB = 1,
31}; 31};
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 286cb9b0a25b..4bf1068ffad9 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -32,7 +32,7 @@ extern void mxc91231_init_irq(void);
32extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int); 32extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
33extern int mx1_clocks_init(unsigned long fref); 33extern int mx1_clocks_init(unsigned long fref);
34extern int mx21_clocks_init(unsigned long lref, unsigned long fref); 34extern int mx21_clocks_init(unsigned long lref, unsigned long fref);
35extern int mx25_clocks_init(unsigned long fref); 35extern int mx25_clocks_init(void);
36extern int mx27_clocks_init(unsigned long fref); 36extern int mx27_clocks_init(unsigned long fref);
37extern int mx31_clocks_init(unsigned long fref); 37extern int mx31_clocks_init(unsigned long fref);
38extern int mx35_clocks_init(void); 38extern int mx35_clocks_init(void);
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx35.h b/arch/arm/plat-mxc/include/mach/iomux-mx35.h
index 00b0ac1db225..c88d40795f7a 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx35.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx35.h
@@ -671,7 +671,7 @@
671#define MX35_PAD_LD8__SDMA_SDMA_DEBUG_PC_8 IOMUX_PAD(0x634, 0x1d0, 6, 0x0, 0, NO_PAD_CTRL) 671#define MX35_PAD_LD8__SDMA_SDMA_DEBUG_PC_8 IOMUX_PAD(0x634, 0x1d0, 6, 0x0, 0, NO_PAD_CTRL)
672 672
673#define MX35_PAD_LD9__IPU_DISPB_DAT_9 IOMUX_PAD(0x638, 0x1d4, 0, 0x0, 0, NO_PAD_CTRL) 673#define MX35_PAD_LD9__IPU_DISPB_DAT_9 IOMUX_PAD(0x638, 0x1d4, 0, 0x0, 0, NO_PAD_CTRL)
674#define MX35_PAD_LD9__GPIO2_9 IOMUX_PAD(0x638, 0x1d4, 5, 0x8e4 0, NO_PAD_CTRL) 674#define MX35_PAD_LD9__GPIO2_9 IOMUX_PAD(0x638, 0x1d4, 5, 0x8e4, 0, NO_PAD_CTRL)
675#define MX35_PAD_LD9__SDMA_SDMA_DEBUG_PC_9 IOMUX_PAD(0x638, 0x1d4, 6, 0x0, 0, NO_PAD_CTRL) 675#define MX35_PAD_LD9__SDMA_SDMA_DEBUG_PC_9 IOMUX_PAD(0x638, 0x1d4, 6, 0x0, 0, NO_PAD_CTRL)
676 676
677#define MX35_PAD_LD10__IPU_DISPB_DAT_10 IOMUX_PAD(0x63c, 0x1d8, 0, 0x0, 0, NO_PAD_CTRL) 677#define MX35_PAD_LD10__IPU_DISPB_DAT_10 IOMUX_PAD(0x63c, 0x1d8, 0, 0x0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h
index ead9d592168d..0cb347645db4 100644
--- a/arch/arm/plat-mxc/include/mach/irqs.h
+++ b/arch/arm/plat-mxc/include/mach/irqs.h
@@ -37,7 +37,12 @@
37 * within sensible limits. 37 * within sensible limits.
38 */ 38 */
39#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS) 39#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS)
40
41#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
42#define MXC_BOARD_IRQS 80
43#else
40#define MXC_BOARD_IRQS 16 44#define MXC_BOARD_IRQS 16
45#endif
41 46
42#define MXC_IPU_IRQ_START (MXC_BOARD_IRQ_START + MXC_BOARD_IRQS) 47#define MXC_IPU_IRQ_START (MXC_BOARD_IRQ_START + MXC_BOARD_IRQS)
43 48
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index d9f8c844c385..4becbdd1935c 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -391,7 +391,7 @@ static struct dentry *clk_debugfs_root;
391static int clk_debugfs_register_one(struct clk *c) 391static int clk_debugfs_register_one(struct clk *c)
392{ 392{
393 int err; 393 int err;
394 struct dentry *d, *child; 394 struct dentry *d, *child, *child_tmp;
395 struct clk *pa = c->parent; 395 struct clk *pa = c->parent;
396 char s[255]; 396 char s[255];
397 char *p = s; 397 char *p = s;
@@ -423,7 +423,7 @@ static int clk_debugfs_register_one(struct clk *c)
423 423
424err_out: 424err_out:
425 d = c->dent; 425 d = c->dent;
426 list_for_each_entry(child, &d->d_subdirs, d_u.d_child) 426 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
427 debugfs_remove(child); 427 debugfs_remove(child);
428 debugfs_remove(c->dent); 428 debugfs_remove(c->dent);
429 return err; 429 return err;
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index d17620c50c28..d2422c766cca 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -750,6 +750,7 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
750} 750}
751#endif 751#endif
752 752
753#ifdef CONFIG_ARCH_OMAP1
753/* 754/*
754 * This only applies to chips that can't do both rising and falling edge 755 * This only applies to chips that can't do both rising and falling edge
755 * detection at once. For all other chips, this function is a noop. 756 * detection at once. For all other chips, this function is a noop.
@@ -760,11 +761,9 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
760 u32 l = 0; 761 u32 l = 0;
761 762
762 switch (bank->method) { 763 switch (bank->method) {
763#ifdef CONFIG_ARCH_OMAP1
764 case METHOD_MPUIO: 764 case METHOD_MPUIO:
765 reg += OMAP_MPUIO_GPIO_INT_EDGE; 765 reg += OMAP_MPUIO_GPIO_INT_EDGE;
766 break; 766 break;
767#endif
768#ifdef CONFIG_ARCH_OMAP15XX 767#ifdef CONFIG_ARCH_OMAP15XX
769 case METHOD_GPIO_1510: 768 case METHOD_GPIO_1510:
770 reg += OMAP1510_GPIO_INT_CONTROL; 769 reg += OMAP1510_GPIO_INT_CONTROL;
@@ -787,6 +786,7 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
787 786
788 __raw_writel(l, reg); 787 __raw_writel(l, reg);
789} 788}
789#endif
790 790
791static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger) 791static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
792{ 792{
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 1e5648d3e3d8..2ed72013c2e2 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -89,16 +89,6 @@
89#define USE_WAKEUP_LAT 0 89#define USE_WAKEUP_LAT 0
90#define IGNORE_WAKEUP_LAT 1 90#define IGNORE_WAKEUP_LAT 1
91 91
92/* XXX this should be moved into a separate file */
93#if defined(CONFIG_ARCH_OMAP2420)
94# define OMAP_32KSYNCT_BASE 0x48004000
95#elif defined(CONFIG_ARCH_OMAP2430)
96# define OMAP_32KSYNCT_BASE 0x49020000
97#elif defined(CONFIG_ARCH_OMAP3430)
98# define OMAP_32KSYNCT_BASE 0x48320000
99#else
100# error Unknown OMAP device
101#endif
102 92
103/* Private functions */ 93/* Private functions */
104 94
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f60a5400a25b..a63c4be99b36 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -197,10 +197,13 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
197 } 197 }
198 198
199 /* 199 /*
200 * Update the FPSCR with the additional exception flags. 200 * If any of the status flags are set, update the FPSCR.
201 * Comparison instructions always return at least one of 201 * Comparison instructions always return at least one of
202 * these flags set. 202 * these flags set.
203 */ 203 */
204 if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
205 fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
206
204 fpscr |= exceptions; 207 fpscr |= exceptions;
205 208
206 fmxr(FPSCR, fpscr); 209 fmxr(FPSCR, fpscr);
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 1aa1ea5e9212..b13d1879e51b 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1325,7 +1325,7 @@ struct platform_device *__init
1325at32_add_device_mci(unsigned int id, struct mci_platform_data *data) 1325at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1326{ 1326{
1327 struct platform_device *pdev; 1327 struct platform_device *pdev;
1328 struct mci_dma_slave *slave; 1328 struct mci_dma_data *slave;
1329 u32 pioa_mask; 1329 u32 pioa_mask;
1330 u32 piob_mask; 1330 u32 piob_mask;
1331 1331
@@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1344 ARRAY_SIZE(atmel_mci0_resource))) 1344 ARRAY_SIZE(atmel_mci0_resource)))
1345 goto fail; 1345 goto fail;
1346 1346
1347 slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL); 1347 slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
1348 if (!slave)
1349 goto fail;
1348 1350
1349 slave->sdata.dma_dev = &dw_dmac0_device.dev; 1351 slave->sdata.dma_dev = &dw_dmac0_device.dev;
1350 slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT; 1352 slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
@@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1357 1359
1358 if (platform_device_add_data(pdev, data, 1360 if (platform_device_add_data(pdev, data,
1359 sizeof(struct mci_platform_data))) 1361 sizeof(struct mci_platform_data)))
1360 goto fail; 1362 goto fail_free;
1361 1363
1362 /* CLK line is common to both slots */ 1364 /* CLK line is common to both slots */
1363 pioa_mask = 1 << 10; 1365 pioa_mask = 1 << 10;
@@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1381 /* Slot is unused */ 1383 /* Slot is unused */
1382 break; 1384 break;
1383 default: 1385 default:
1384 goto fail; 1386 goto fail_free;
1385 } 1387 }
1386 1388
1387 select_peripheral(PIOA, pioa_mask, PERIPH_A, 0); 1389 select_peripheral(PIOA, pioa_mask, PERIPH_A, 0);
@@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1408 break; 1410 break;
1409 default: 1411 default:
1410 if (!data->slot[0].bus_width) 1412 if (!data->slot[0].bus_width)
1411 goto fail; 1413 goto fail_free;
1412 1414
1413 data->slot[1].bus_width = 0; 1415 data->slot[1].bus_width = 0;
1414 break; 1416 break;
@@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1419 platform_device_add(pdev); 1421 platform_device_add(pdev);
1420 return pdev; 1422 return pdev;
1421 1423
1424fail_free:
1425 kfree(slave);
1422fail: 1426fail:
1423 data->dma_slave = NULL; 1427 data->dma_slave = NULL;
1424 kfree(slave);
1425 platform_device_put(pdev); 1428 platform_device_put(pdev);
1426 return NULL; 1429 return NULL;
1427} 1430}
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index bb7c374713ad..6fced1fe3bf0 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31 3# Linux kernel version: 2.6.33-rc6
4# Thu Sep 24 10:28:50 2009 4# Wed Feb 3 10:02:59 2010
5# 5#
6CONFIG_MICROBLAZE=y 6CONFIG_MICROBLAZE=y
7# CONFIG_SWAP is not set 7# CONFIG_SWAP is not set
@@ -19,8 +19,12 @@ CONFIG_GENERIC_CLOCKEVENTS=y
19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
20CONFIG_GENERIC_GPIO=y 20CONFIG_GENERIC_GPIO=y
21CONFIG_GENERIC_CSUM=y 21CONFIG_GENERIC_CSUM=y
22CONFIG_STACKTRACE_SUPPORT=y
23CONFIG_LOCKDEP_SUPPORT=y
24CONFIG_HAVE_LATENCYTOP_SUPPORT=y
22# CONFIG_PCI is not set 25# CONFIG_PCI is not set
23CONFIG_NO_DMA=y 26CONFIG_NO_DMA=y
27CONFIG_DTC=y
24CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 28CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
25CONFIG_CONSTRUCTORS=y 29CONFIG_CONSTRUCTORS=y
26 30
@@ -44,6 +48,7 @@ CONFIG_SYSVIPC_SYSCTL=y
44# 48#
45CONFIG_TREE_RCU=y 49CONFIG_TREE_RCU=y
46# CONFIG_TREE_PREEMPT_RCU is not set 50# CONFIG_TREE_PREEMPT_RCU is not set
51# CONFIG_TINY_RCU is not set
47# CONFIG_RCU_TRACE is not set 52# CONFIG_RCU_TRACE is not set
48CONFIG_RCU_FANOUT=32 53CONFIG_RCU_FANOUT=32
49# CONFIG_RCU_FANOUT_EXACT is not set 54# CONFIG_RCU_FANOUT_EXACT is not set
@@ -64,10 +69,12 @@ CONFIG_INITRAMFS_ROOT_GID=0
64CONFIG_RD_GZIP=y 69CONFIG_RD_GZIP=y
65# CONFIG_RD_BZIP2 is not set 70# CONFIG_RD_BZIP2 is not set
66# CONFIG_RD_LZMA is not set 71# CONFIG_RD_LZMA is not set
72# CONFIG_RD_LZO is not set
67# CONFIG_INITRAMFS_COMPRESSION_NONE is not set 73# CONFIG_INITRAMFS_COMPRESSION_NONE is not set
68CONFIG_INITRAMFS_COMPRESSION_GZIP=y 74CONFIG_INITRAMFS_COMPRESSION_GZIP=y
69# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set 75# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
70# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set 76# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
77# CONFIG_INITRAMFS_COMPRESSION_LZO is not set
71# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 78# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
72CONFIG_SYSCTL=y 79CONFIG_SYSCTL=y
73CONFIG_ANON_INODES=y 80CONFIG_ANON_INODES=y
@@ -90,21 +97,20 @@ CONFIG_EVENTFD=y
90CONFIG_AIO=y 97CONFIG_AIO=y
91 98
92# 99#
93# Performance Counters 100# Kernel Performance Events And Counters
94# 101#
95CONFIG_VM_EVENT_COUNTERS=y 102CONFIG_VM_EVENT_COUNTERS=y
96# CONFIG_STRIP_ASM_SYMS is not set
97CONFIG_COMPAT_BRK=y 103CONFIG_COMPAT_BRK=y
98CONFIG_SLAB=y 104CONFIG_SLAB=y
99# CONFIG_SLUB is not set 105# CONFIG_SLUB is not set
100# CONFIG_SLOB is not set 106# CONFIG_SLOB is not set
101# CONFIG_PROFILING is not set 107# CONFIG_PROFILING is not set
102# CONFIG_MARKERS is not set 108CONFIG_HAVE_OPROFILE=y
103 109
104# 110#
105# GCOV-based kernel profiling 111# GCOV-based kernel profiling
106# 112#
107# CONFIG_SLOW_WORK is not set 113CONFIG_SLOW_WORK=y
108# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 114# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
109CONFIG_SLABINFO=y 115CONFIG_SLABINFO=y
110CONFIG_BASE_SMALL=1 116CONFIG_BASE_SMALL=1
@@ -123,14 +129,41 @@ CONFIG_LBDAF=y
123# IO Schedulers 129# IO Schedulers
124# 130#
125CONFIG_IOSCHED_NOOP=y 131CONFIG_IOSCHED_NOOP=y
126CONFIG_IOSCHED_AS=y
127CONFIG_IOSCHED_DEADLINE=y 132CONFIG_IOSCHED_DEADLINE=y
128CONFIG_IOSCHED_CFQ=y 133CONFIG_IOSCHED_CFQ=y
129# CONFIG_DEFAULT_AS is not set
130# CONFIG_DEFAULT_DEADLINE is not set 134# CONFIG_DEFAULT_DEADLINE is not set
131CONFIG_DEFAULT_CFQ=y 135CONFIG_DEFAULT_CFQ=y
132# CONFIG_DEFAULT_NOOP is not set 136# CONFIG_DEFAULT_NOOP is not set
133CONFIG_DEFAULT_IOSCHED="cfq" 137CONFIG_DEFAULT_IOSCHED="cfq"
138# CONFIG_INLINE_SPIN_TRYLOCK is not set
139# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
140# CONFIG_INLINE_SPIN_LOCK is not set
141# CONFIG_INLINE_SPIN_LOCK_BH is not set
142# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
143# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
144# CONFIG_INLINE_SPIN_UNLOCK is not set
145# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
146# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
147# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
148# CONFIG_INLINE_READ_TRYLOCK is not set
149# CONFIG_INLINE_READ_LOCK is not set
150# CONFIG_INLINE_READ_LOCK_BH is not set
151# CONFIG_INLINE_READ_LOCK_IRQ is not set
152# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
153# CONFIG_INLINE_READ_UNLOCK is not set
154# CONFIG_INLINE_READ_UNLOCK_BH is not set
155# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
156# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
157# CONFIG_INLINE_WRITE_TRYLOCK is not set
158# CONFIG_INLINE_WRITE_LOCK is not set
159# CONFIG_INLINE_WRITE_LOCK_BH is not set
160# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
161# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
162# CONFIG_INLINE_WRITE_UNLOCK is not set
163# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
164# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
165# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
166# CONFIG_MUTEX_SPIN_ON_OWNER is not set
134# CONFIG_FREEZER is not set 167# CONFIG_FREEZER is not set
135 168
136# 169#
@@ -139,11 +172,6 @@ CONFIG_DEFAULT_IOSCHED="cfq"
139CONFIG_PLATFORM_GENERIC=y 172CONFIG_PLATFORM_GENERIC=y
140CONFIG_OPT_LIB_FUNCTION=y 173CONFIG_OPT_LIB_FUNCTION=y
141CONFIG_OPT_LIB_ASM=y 174CONFIG_OPT_LIB_ASM=y
142CONFIG_ALLOW_EDIT_AUTO=y
143
144#
145# Automatic platform settings from Kconfig.auto
146#
147 175
148# 176#
149# Definitions for MICROBLAZE0 177# Definitions for MICROBLAZE0
@@ -203,12 +231,11 @@ CONFIG_FLATMEM_MANUAL=y
203CONFIG_FLATMEM=y 231CONFIG_FLATMEM=y
204CONFIG_FLAT_NODE_MEM_MAP=y 232CONFIG_FLAT_NODE_MEM_MAP=y
205CONFIG_PAGEFLAGS_EXTENDED=y 233CONFIG_PAGEFLAGS_EXTENDED=y
206CONFIG_SPLIT_PTLOCK_CPUS=4 234CONFIG_SPLIT_PTLOCK_CPUS=999999
207# CONFIG_PHYS_ADDR_T_64BIT is not set 235# CONFIG_PHYS_ADDR_T_64BIT is not set
208CONFIG_ZONE_DMA_FLAG=0 236CONFIG_ZONE_DMA_FLAG=0
209CONFIG_VIRT_TO_BUS=y 237CONFIG_VIRT_TO_BUS=y
210CONFIG_HAVE_MLOCK=y 238# CONFIG_KSM is not set
211CONFIG_HAVE_MLOCKED_PAGE_BIT=y
212CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 239CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
213 240
214# 241#
@@ -289,7 +316,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
289# CONFIG_IRDA is not set 316# CONFIG_IRDA is not set
290# CONFIG_BT is not set 317# CONFIG_BT is not set
291# CONFIG_AF_RXRPC is not set 318# CONFIG_AF_RXRPC is not set
292# CONFIG_WIRELESS is not set 319CONFIG_WIRELESS=y
320# CONFIG_CFG80211 is not set
321# CONFIG_LIB80211 is not set
322
323#
324# CFG80211 needs to be enabled for MAC80211
325#
293# CONFIG_WIMAX is not set 326# CONFIG_WIMAX is not set
294# CONFIG_RFKILL is not set 327# CONFIG_RFKILL is not set
295# CONFIG_NET_9P is not set 328# CONFIG_NET_9P is not set
@@ -313,6 +346,10 @@ CONFIG_OF_DEVICE=y
313CONFIG_BLK_DEV=y 346CONFIG_BLK_DEV=y
314# CONFIG_BLK_DEV_COW_COMMON is not set 347# CONFIG_BLK_DEV_COW_COMMON is not set
315# CONFIG_BLK_DEV_LOOP is not set 348# CONFIG_BLK_DEV_LOOP is not set
349
350#
351# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
352#
316# CONFIG_BLK_DEV_NBD is not set 353# CONFIG_BLK_DEV_NBD is not set
317CONFIG_BLK_DEV_RAM=y 354CONFIG_BLK_DEV_RAM=y
318CONFIG_BLK_DEV_RAM_COUNT=16 355CONFIG_BLK_DEV_RAM_COUNT=16
@@ -349,7 +386,6 @@ CONFIG_NETDEVICES=y
349# CONFIG_PHYLIB is not set 386# CONFIG_PHYLIB is not set
350CONFIG_NET_ETHERNET=y 387CONFIG_NET_ETHERNET=y
351# CONFIG_MII is not set 388# CONFIG_MII is not set
352# CONFIG_ETHOC is not set
353# CONFIG_DNET is not set 389# CONFIG_DNET is not set
354# CONFIG_IBM_NEW_EMAC_ZMII is not set 390# CONFIG_IBM_NEW_EMAC_ZMII is not set
355# CONFIG_IBM_NEW_EMAC_RGMII is not set 391# CONFIG_IBM_NEW_EMAC_RGMII is not set
@@ -359,12 +395,12 @@ CONFIG_NET_ETHERNET=y
359# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 395# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
360# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 396# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
361# CONFIG_KS8842 is not set 397# CONFIG_KS8842 is not set
398# CONFIG_KS8851_MLL is not set
362CONFIG_XILINX_EMACLITE=y 399CONFIG_XILINX_EMACLITE=y
363CONFIG_NETDEV_1000=y 400CONFIG_NETDEV_1000=y
364CONFIG_NETDEV_10000=y 401CONFIG_NETDEV_10000=y
365CONFIG_WLAN=y 402CONFIG_WLAN=y
366# CONFIG_WLAN_PRE80211 is not set 403# CONFIG_HOSTAP is not set
367# CONFIG_WLAN_80211 is not set
368 404
369# 405#
370# Enable WiMAX (Networking options) to see the WiMAX drivers 406# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -408,6 +444,7 @@ CONFIG_SERIAL_UARTLITE=y
408CONFIG_SERIAL_UARTLITE_CONSOLE=y 444CONFIG_SERIAL_UARTLITE_CONSOLE=y
409CONFIG_SERIAL_CORE=y 445CONFIG_SERIAL_CORE=y
410CONFIG_SERIAL_CORE_CONSOLE=y 446CONFIG_SERIAL_CORE_CONSOLE=y
447# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
411CONFIG_UNIX98_PTYS=y 448CONFIG_UNIX98_PTYS=y
412# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 449# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
413CONFIG_LEGACY_PTYS=y 450CONFIG_LEGACY_PTYS=y
@@ -433,7 +470,6 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
433# CONFIG_POWER_SUPPLY is not set 470# CONFIG_POWER_SUPPLY is not set
434# CONFIG_HWMON is not set 471# CONFIG_HWMON is not set
435# CONFIG_THERMAL is not set 472# CONFIG_THERMAL is not set
436# CONFIG_THERMAL_HWMON is not set
437# CONFIG_WATCHDOG is not set 473# CONFIG_WATCHDOG is not set
438 474
439# 475#
@@ -526,8 +562,6 @@ CONFIG_PROC_FS=y
526CONFIG_PROC_SYSCTL=y 562CONFIG_PROC_SYSCTL=y
527CONFIG_PROC_PAGE_MONITOR=y 563CONFIG_PROC_PAGE_MONITOR=y
528CONFIG_SYSFS=y 564CONFIG_SYSFS=y
529CONFIG_TMPFS=y
530# CONFIG_TMPFS_POSIX_ACL is not set
531# CONFIG_HUGETLB_PAGE is not set 565# CONFIG_HUGETLB_PAGE is not set
532# CONFIG_CONFIGFS_FS is not set 566# CONFIG_CONFIGFS_FS is not set
533CONFIG_MISC_FILESYSTEMS=y 567CONFIG_MISC_FILESYSTEMS=y
@@ -638,11 +672,13 @@ CONFIG_NLS_DEFAULT="iso8859-1"
638# 672#
639# Kernel hacking 673# Kernel hacking
640# 674#
675CONFIG_TRACE_IRQFLAGS_SUPPORT=y
641# CONFIG_PRINTK_TIME is not set 676# CONFIG_PRINTK_TIME is not set
642CONFIG_ENABLE_WARN_DEPRECATED=y 677CONFIG_ENABLE_WARN_DEPRECATED=y
643CONFIG_ENABLE_MUST_CHECK=y 678CONFIG_ENABLE_MUST_CHECK=y
644CONFIG_FRAME_WARN=1024 679CONFIG_FRAME_WARN=1024
645# CONFIG_MAGIC_SYSRQ is not set 680# CONFIG_MAGIC_SYSRQ is not set
681# CONFIG_STRIP_ASM_SYMS is not set
646# CONFIG_UNUSED_SYMBOLS is not set 682# CONFIG_UNUSED_SYMBOLS is not set
647# CONFIG_DEBUG_FS is not set 683# CONFIG_DEBUG_FS is not set
648# CONFIG_HEADERS_CHECK is not set 684# CONFIG_HEADERS_CHECK is not set
@@ -662,6 +698,9 @@ CONFIG_DEBUG_SLAB=y
662# CONFIG_DEBUG_SLAB_LEAK is not set 698# CONFIG_DEBUG_SLAB_LEAK is not set
663CONFIG_DEBUG_SPINLOCK=y 699CONFIG_DEBUG_SPINLOCK=y
664# CONFIG_DEBUG_MUTEXES is not set 700# CONFIG_DEBUG_MUTEXES is not set
701# CONFIG_DEBUG_LOCK_ALLOC is not set
702# CONFIG_PROVE_LOCKING is not set
703# CONFIG_LOCK_STAT is not set
665# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 704# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
666# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 705# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
667# CONFIG_DEBUG_KOBJECT is not set 706# CONFIG_DEBUG_KOBJECT is not set
@@ -680,10 +719,29 @@ CONFIG_DEBUG_INFO=y
680# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 719# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
681# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set 720# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
682# CONFIG_FAULT_INJECTION is not set 721# CONFIG_FAULT_INJECTION is not set
722# CONFIG_LATENCYTOP is not set
683# CONFIG_SYSCTL_SYSCALL_CHECK is not set 723# CONFIG_SYSCTL_SYSCALL_CHECK is not set
684# CONFIG_PAGE_POISONING is not set 724# CONFIG_PAGE_POISONING is not set
725CONFIG_HAVE_FUNCTION_TRACER=y
726CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
727CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
728CONFIG_HAVE_DYNAMIC_FTRACE=y
729CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
730CONFIG_TRACING_SUPPORT=y
731CONFIG_FTRACE=y
732# CONFIG_FUNCTION_TRACER is not set
733# CONFIG_IRQSOFF_TRACER is not set
734# CONFIG_SCHED_TRACER is not set
735# CONFIG_ENABLE_DEFAULT_TRACERS is not set
736# CONFIG_BOOT_TRACER is not set
737CONFIG_BRANCH_PROFILE_NONE=y
738# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
739# CONFIG_PROFILE_ALL_BRANCHES is not set
740# CONFIG_STACK_TRACER is not set
741# CONFIG_KMEMTRACE is not set
742# CONFIG_WORKQUEUE_TRACER is not set
743# CONFIG_BLK_DEV_IO_TRACE is not set
685# CONFIG_SAMPLES is not set 744# CONFIG_SAMPLES is not set
686# CONFIG_KMEMCHECK is not set
687CONFIG_EARLY_PRINTK=y 745CONFIG_EARLY_PRINTK=y
688# CONFIG_HEART_BEAT is not set 746# CONFIG_HEART_BEAT is not set
689CONFIG_DEBUG_BOOTMEM=y 747CONFIG_DEBUG_BOOTMEM=y
@@ -694,7 +752,11 @@ CONFIG_DEBUG_BOOTMEM=y
694# CONFIG_KEYS is not set 752# CONFIG_KEYS is not set
695# CONFIG_SECURITY is not set 753# CONFIG_SECURITY is not set
696# CONFIG_SECURITYFS is not set 754# CONFIG_SECURITYFS is not set
697# CONFIG_SECURITY_FILE_CAPABILITIES is not set 755# CONFIG_DEFAULT_SECURITY_SELINUX is not set
756# CONFIG_DEFAULT_SECURITY_SMACK is not set
757# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
758CONFIG_DEFAULT_SECURITY_DAC=y
759CONFIG_DEFAULT_SECURITY=""
698CONFIG_CRYPTO=y 760CONFIG_CRYPTO=y
699 761
700# 762#
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index adb839bab704..ce2da535246a 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.31 3# Linux kernel version: 2.6.33-rc6
4# Thu Sep 24 10:29:43 2009 4# Wed Feb 3 10:03:21 2010
5# 5#
6CONFIG_MICROBLAZE=y 6CONFIG_MICROBLAZE=y
7# CONFIG_SWAP is not set 7# CONFIG_SWAP is not set
@@ -19,8 +19,12 @@ CONFIG_GENERIC_CLOCKEVENTS=y
19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y 19CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
20CONFIG_GENERIC_GPIO=y 20CONFIG_GENERIC_GPIO=y
21CONFIG_GENERIC_CSUM=y 21CONFIG_GENERIC_CSUM=y
22CONFIG_STACKTRACE_SUPPORT=y
23CONFIG_LOCKDEP_SUPPORT=y
24CONFIG_HAVE_LATENCYTOP_SUPPORT=y
22# CONFIG_PCI is not set 25# CONFIG_PCI is not set
23CONFIG_NO_DMA=y 26CONFIG_NO_DMA=y
27CONFIG_DTC=y
24CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 28CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
25CONFIG_CONSTRUCTORS=y 29CONFIG_CONSTRUCTORS=y
26 30
@@ -46,6 +50,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
46# 50#
47CONFIG_TREE_RCU=y 51CONFIG_TREE_RCU=y
48# CONFIG_TREE_PREEMPT_RCU is not set 52# CONFIG_TREE_PREEMPT_RCU is not set
53# CONFIG_TINY_RCU is not set
49# CONFIG_RCU_TRACE is not set 54# CONFIG_RCU_TRACE is not set
50CONFIG_RCU_FANOUT=32 55CONFIG_RCU_FANOUT=32
51# CONFIG_RCU_FANOUT_EXACT is not set 56# CONFIG_RCU_FANOUT_EXACT is not set
@@ -81,16 +86,16 @@ CONFIG_EVENTFD=y
81CONFIG_AIO=y 86CONFIG_AIO=y
82 87
83# 88#
84# Performance Counters 89# Kernel Performance Events And Counters
85# 90#
86CONFIG_VM_EVENT_COUNTERS=y 91CONFIG_VM_EVENT_COUNTERS=y
87# CONFIG_STRIP_ASM_SYMS is not set
88CONFIG_COMPAT_BRK=y 92CONFIG_COMPAT_BRK=y
89CONFIG_SLAB=y 93CONFIG_SLAB=y
90# CONFIG_SLUB is not set 94# CONFIG_SLUB is not set
91# CONFIG_SLOB is not set 95# CONFIG_SLOB is not set
96# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set
92# CONFIG_PROFILING is not set 97# CONFIG_PROFILING is not set
93# CONFIG_MARKERS is not set 98CONFIG_HAVE_OPROFILE=y
94 99
95# 100#
96# GCOV-based kernel profiling 101# GCOV-based kernel profiling
@@ -116,14 +121,41 @@ CONFIG_LBDAF=y
116# IO Schedulers 121# IO Schedulers
117# 122#
118CONFIG_IOSCHED_NOOP=y 123CONFIG_IOSCHED_NOOP=y
119CONFIG_IOSCHED_AS=y
120CONFIG_IOSCHED_DEADLINE=y 124CONFIG_IOSCHED_DEADLINE=y
121CONFIG_IOSCHED_CFQ=y 125CONFIG_IOSCHED_CFQ=y
122# CONFIG_DEFAULT_AS is not set
123# CONFIG_DEFAULT_DEADLINE is not set 126# CONFIG_DEFAULT_DEADLINE is not set
124CONFIG_DEFAULT_CFQ=y 127CONFIG_DEFAULT_CFQ=y
125# CONFIG_DEFAULT_NOOP is not set 128# CONFIG_DEFAULT_NOOP is not set
126CONFIG_DEFAULT_IOSCHED="cfq" 129CONFIG_DEFAULT_IOSCHED="cfq"
130# CONFIG_INLINE_SPIN_TRYLOCK is not set
131# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
132# CONFIG_INLINE_SPIN_LOCK is not set
133# CONFIG_INLINE_SPIN_LOCK_BH is not set
134# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
135# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
136CONFIG_INLINE_SPIN_UNLOCK=y
137# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
138CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
139# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
140# CONFIG_INLINE_READ_TRYLOCK is not set
141# CONFIG_INLINE_READ_LOCK is not set
142# CONFIG_INLINE_READ_LOCK_BH is not set
143# CONFIG_INLINE_READ_LOCK_IRQ is not set
144# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
145CONFIG_INLINE_READ_UNLOCK=y
146# CONFIG_INLINE_READ_UNLOCK_BH is not set
147CONFIG_INLINE_READ_UNLOCK_IRQ=y
148# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
149# CONFIG_INLINE_WRITE_TRYLOCK is not set
150# CONFIG_INLINE_WRITE_LOCK is not set
151# CONFIG_INLINE_WRITE_LOCK_BH is not set
152# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
153# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
154CONFIG_INLINE_WRITE_UNLOCK=y
155# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
156CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
157# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
158# CONFIG_MUTEX_SPIN_ON_OWNER is not set
127# CONFIG_FREEZER is not set 159# CONFIG_FREEZER is not set
128 160
129# 161#
@@ -132,7 +164,10 @@ CONFIG_DEFAULT_IOSCHED="cfq"
132CONFIG_PLATFORM_GENERIC=y 164CONFIG_PLATFORM_GENERIC=y
133# CONFIG_SELFMOD is not set 165# CONFIG_SELFMOD is not set
134# CONFIG_OPT_LIB_FUNCTION is not set 166# CONFIG_OPT_LIB_FUNCTION is not set
135# CONFIG_ALLOW_EDIT_AUTO is not set 167
168#
169# Definitions for MICROBLAZE0
170#
136CONFIG_KERNEL_BASE_ADDR=0x90000000 171CONFIG_KERNEL_BASE_ADDR=0x90000000
137CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5" 172CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5"
138CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 173CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
@@ -190,7 +225,6 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
190# CONFIG_PHYS_ADDR_T_64BIT is not set 225# CONFIG_PHYS_ADDR_T_64BIT is not set
191CONFIG_ZONE_DMA_FLAG=0 226CONFIG_ZONE_DMA_FLAG=0
192CONFIG_VIRT_TO_BUS=y 227CONFIG_VIRT_TO_BUS=y
193CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
194CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1 228CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1
195 229
196# 230#
@@ -274,9 +308,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
274# CONFIG_AF_RXRPC is not set 308# CONFIG_AF_RXRPC is not set
275CONFIG_WIRELESS=y 309CONFIG_WIRELESS=y
276# CONFIG_CFG80211 is not set 310# CONFIG_CFG80211 is not set
277CONFIG_CFG80211_DEFAULT_PS_VALUE=0
278CONFIG_WIRELESS_OLD_REGULATORY=y
279# CONFIG_WIRELESS_EXT is not set
280# CONFIG_LIB80211 is not set 311# CONFIG_LIB80211 is not set
281 312
282# 313#
@@ -301,9 +332,9 @@ CONFIG_STANDALONE=y
301# CONFIG_CONNECTOR is not set 332# CONFIG_CONNECTOR is not set
302CONFIG_MTD=y 333CONFIG_MTD=y
303# CONFIG_MTD_DEBUG is not set 334# CONFIG_MTD_DEBUG is not set
335# CONFIG_MTD_TESTS is not set
304CONFIG_MTD_CONCAT=y 336CONFIG_MTD_CONCAT=y
305CONFIG_MTD_PARTITIONS=y 337CONFIG_MTD_PARTITIONS=y
306# CONFIG_MTD_TESTS is not set
307# CONFIG_MTD_REDBOOT_PARTS is not set 338# CONFIG_MTD_REDBOOT_PARTS is not set
308CONFIG_MTD_CMDLINE_PARTS=y 339CONFIG_MTD_CMDLINE_PARTS=y
309# CONFIG_MTD_OF_PARTS is not set 340# CONFIG_MTD_OF_PARTS is not set
@@ -387,6 +418,10 @@ CONFIG_OF_DEVICE=y
387CONFIG_BLK_DEV=y 418CONFIG_BLK_DEV=y
388# CONFIG_BLK_DEV_COW_COMMON is not set 419# CONFIG_BLK_DEV_COW_COMMON is not set
389# CONFIG_BLK_DEV_LOOP is not set 420# CONFIG_BLK_DEV_LOOP is not set
421
422#
423# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
424#
390CONFIG_BLK_DEV_NBD=y 425CONFIG_BLK_DEV_NBD=y
391CONFIG_BLK_DEV_RAM=y 426CONFIG_BLK_DEV_RAM=y
392CONFIG_BLK_DEV_RAM_COUNT=16 427CONFIG_BLK_DEV_RAM_COUNT=16
@@ -423,7 +458,6 @@ CONFIG_NETDEVICES=y
423# CONFIG_PHYLIB is not set 458# CONFIG_PHYLIB is not set
424CONFIG_NET_ETHERNET=y 459CONFIG_NET_ETHERNET=y
425# CONFIG_MII is not set 460# CONFIG_MII is not set
426# CONFIG_ETHOC is not set
427# CONFIG_DNET is not set 461# CONFIG_DNET is not set
428# CONFIG_IBM_NEW_EMAC_ZMII is not set 462# CONFIG_IBM_NEW_EMAC_ZMII is not set
429# CONFIG_IBM_NEW_EMAC_RGMII is not set 463# CONFIG_IBM_NEW_EMAC_RGMII is not set
@@ -433,12 +467,12 @@ CONFIG_NET_ETHERNET=y
433# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set 467# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
434# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set 468# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
435# CONFIG_KS8842 is not set 469# CONFIG_KS8842 is not set
470# CONFIG_KS8851_MLL is not set
436# CONFIG_XILINX_EMACLITE is not set 471# CONFIG_XILINX_EMACLITE is not set
437CONFIG_NETDEV_1000=y 472CONFIG_NETDEV_1000=y
438CONFIG_NETDEV_10000=y 473CONFIG_NETDEV_10000=y
439CONFIG_WLAN=y 474CONFIG_WLAN=y
440# CONFIG_WLAN_PRE80211 is not set 475# CONFIG_HOSTAP is not set
441# CONFIG_WLAN_80211 is not set
442 476
443# 477#
444# Enable WiMAX (Networking options) to see the WiMAX drivers 478# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -482,6 +516,7 @@ CONFIG_SERIAL_UARTLITE=y
482CONFIG_SERIAL_UARTLITE_CONSOLE=y 516CONFIG_SERIAL_UARTLITE_CONSOLE=y
483CONFIG_SERIAL_CORE=y 517CONFIG_SERIAL_CORE=y
484CONFIG_SERIAL_CORE_CONSOLE=y 518CONFIG_SERIAL_CORE_CONSOLE=y
519# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
485CONFIG_UNIX98_PTYS=y 520CONFIG_UNIX98_PTYS=y
486# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set 521# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
487CONFIG_LEGACY_PTYS=y 522CONFIG_LEGACY_PTYS=y
@@ -508,7 +543,6 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
508# CONFIG_POWER_SUPPLY is not set 543# CONFIG_POWER_SUPPLY is not set
509# CONFIG_HWMON is not set 544# CONFIG_HWMON is not set
510# CONFIG_THERMAL is not set 545# CONFIG_THERMAL is not set
511# CONFIG_THERMAL_HWMON is not set
512# CONFIG_WATCHDOG is not set 546# CONFIG_WATCHDOG is not set
513 547
514# 548#
@@ -616,7 +650,6 @@ CONFIG_INOTIFY_USER=y
616CONFIG_PROC_FS=y 650CONFIG_PROC_FS=y
617CONFIG_PROC_SYSCTL=y 651CONFIG_PROC_SYSCTL=y
618CONFIG_SYSFS=y 652CONFIG_SYSFS=y
619# CONFIG_TMPFS is not set
620# CONFIG_HUGETLB_PAGE is not set 653# CONFIG_HUGETLB_PAGE is not set
621# CONFIG_CONFIGFS_FS is not set 654# CONFIG_CONFIGFS_FS is not set
622CONFIG_MISC_FILESYSTEMS=y 655CONFIG_MISC_FILESYSTEMS=y
@@ -672,11 +705,13 @@ CONFIG_MSDOS_PARTITION=y
672# 705#
673# Kernel hacking 706# Kernel hacking
674# 707#
708CONFIG_TRACE_IRQFLAGS_SUPPORT=y
675# CONFIG_PRINTK_TIME is not set 709# CONFIG_PRINTK_TIME is not set
676CONFIG_ENABLE_WARN_DEPRECATED=y 710CONFIG_ENABLE_WARN_DEPRECATED=y
677CONFIG_ENABLE_MUST_CHECK=y 711CONFIG_ENABLE_MUST_CHECK=y
678CONFIG_FRAME_WARN=1024 712CONFIG_FRAME_WARN=1024
679# CONFIG_MAGIC_SYSRQ is not set 713# CONFIG_MAGIC_SYSRQ is not set
714# CONFIG_STRIP_ASM_SYMS is not set
680CONFIG_UNUSED_SYMBOLS=y 715CONFIG_UNUSED_SYMBOLS=y
681CONFIG_DEBUG_FS=y 716CONFIG_DEBUG_FS=y
682# CONFIG_HEADERS_CHECK is not set 717# CONFIG_HEADERS_CHECK is not set
@@ -695,12 +730,16 @@ CONFIG_DEBUG_OBJECTS=y
695CONFIG_DEBUG_OBJECTS_SELFTEST=y 730CONFIG_DEBUG_OBJECTS_SELFTEST=y
696CONFIG_DEBUG_OBJECTS_FREE=y 731CONFIG_DEBUG_OBJECTS_FREE=y
697CONFIG_DEBUG_OBJECTS_TIMERS=y 732CONFIG_DEBUG_OBJECTS_TIMERS=y
733# CONFIG_DEBUG_OBJECTS_WORK is not set
698CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 734CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
699# CONFIG_DEBUG_SLAB is not set 735# CONFIG_DEBUG_SLAB is not set
700# CONFIG_DEBUG_RT_MUTEXES is not set 736# CONFIG_DEBUG_RT_MUTEXES is not set
701# CONFIG_RT_MUTEX_TESTER is not set 737# CONFIG_RT_MUTEX_TESTER is not set
702# CONFIG_DEBUG_SPINLOCK is not set 738# CONFIG_DEBUG_SPINLOCK is not set
703# CONFIG_DEBUG_MUTEXES is not set 739# CONFIG_DEBUG_MUTEXES is not set
740# CONFIG_DEBUG_LOCK_ALLOC is not set
741# CONFIG_PROVE_LOCKING is not set
742# CONFIG_LOCK_STAT is not set
704# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 743# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
705# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 744# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
706# CONFIG_DEBUG_KOBJECT is not set 745# CONFIG_DEBUG_KOBJECT is not set
@@ -720,8 +759,28 @@ CONFIG_DEBUG_SG=y
720# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set 759# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
721# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set 760# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
722# CONFIG_FAULT_INJECTION is not set 761# CONFIG_FAULT_INJECTION is not set
762# CONFIG_LATENCYTOP is not set
723CONFIG_SYSCTL_SYSCALL_CHECK=y 763CONFIG_SYSCTL_SYSCALL_CHECK=y
724# CONFIG_PAGE_POISONING is not set 764# CONFIG_PAGE_POISONING is not set
765CONFIG_HAVE_FUNCTION_TRACER=y
766CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
767CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
768CONFIG_HAVE_DYNAMIC_FTRACE=y
769CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
770CONFIG_TRACING_SUPPORT=y
771CONFIG_FTRACE=y
772# CONFIG_FUNCTION_TRACER is not set
773# CONFIG_IRQSOFF_TRACER is not set
774# CONFIG_SCHED_TRACER is not set
775# CONFIG_ENABLE_DEFAULT_TRACERS is not set
776# CONFIG_BOOT_TRACER is not set
777CONFIG_BRANCH_PROFILE_NONE=y
778# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
779# CONFIG_PROFILE_ALL_BRANCHES is not set
780# CONFIG_STACK_TRACER is not set
781# CONFIG_KMEMTRACE is not set
782# CONFIG_WORKQUEUE_TRACER is not set
783# CONFIG_BLK_DEV_IO_TRACE is not set
725# CONFIG_DYNAMIC_DEBUG is not set 784# CONFIG_DYNAMIC_DEBUG is not set
726# CONFIG_SAMPLES is not set 785# CONFIG_SAMPLES is not set
727CONFIG_EARLY_PRINTK=y 786CONFIG_EARLY_PRINTK=y
@@ -734,7 +793,11 @@ CONFIG_EARLY_PRINTK=y
734# CONFIG_KEYS is not set 793# CONFIG_KEYS is not set
735# CONFIG_SECURITY is not set 794# CONFIG_SECURITY is not set
736# CONFIG_SECURITYFS is not set 795# CONFIG_SECURITYFS is not set
737# CONFIG_SECURITY_FILE_CAPABILITIES is not set 796# CONFIG_DEFAULT_SECURITY_SELINUX is not set
797# CONFIG_DEFAULT_SECURITY_SMACK is not set
798# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
799CONFIG_DEFAULT_SECURITY_DAC=y
800CONFIG_DEFAULT_SECURITY=""
738CONFIG_CRYPTO=y 801CONFIG_CRYPTO=y
739 802
740# 803#
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 95b0855802df..391d6197fc3b 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -122,7 +122,7 @@ ENTRY(_interrupt)
122 122
123ret_from_intr: 123ret_from_intr:
124 lwi r11, r1, PT_MODE 124 lwi r11, r1, PT_MODE
125 bneid r11, 3f 125 bneid r11, no_intr_resched
126 126
127 lwi r6, r31, TS_THREAD_INFO /* get thread info */ 127 lwi r6, r31, TS_THREAD_INFO /* get thread info */
128 lwi r19, r6, TI_FLAGS /* get flags in thread info */ 128 lwi r19, r6, TI_FLAGS /* get flags in thread info */
@@ -133,16 +133,18 @@ ret_from_intr:
133 bralid r15, schedule 133 bralid r15, schedule
134 nop 134 nop
1351: andi r11, r19, _TIF_SIGPENDING 1351: andi r11, r19, _TIF_SIGPENDING
136 beqid r11, no_intr_reshed 136 beqid r11, no_intr_resched
137 addk r5, r1, r0 137 addk r5, r1, r0
138 addk r7, r0, r0 138 addk r7, r0, r0
139 bralid r15, do_signal 139 bralid r15, do_signal
140 addk r6, r0, r0 140 addk r6, r0, r0
141 141
142no_intr_reshed: 142no_intr_resched:
143 /* Disable interrupts, we are now committed to the state restore */
144 disable_irq
145
143 /* save mode indicator */ 146 /* save mode indicator */
144 lwi r11, r1, PT_MODE 147 lwi r11, r1, PT_MODE
1453:
146 swi r11, r0, PER_CPU(KM) 148 swi r11, r0, PER_CPU(KM)
147 149
148 /* save r31 */ 150 /* save r31 */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 5372b24ad049..bb8c4b9ccb80 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p)
54 54
55 microblaze_cache_init(); 55 microblaze_cache_init();
56 56
57 invalidate_dcache();
57 enable_dcache(); 58 enable_dcache();
58 59
59 invalidate_icache(); 60 invalidate_icache();
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 282d9306361f..1ec06576f619 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
63 if (huge) { 63 if (huge) {
64#ifdef CONFIG_HUGETLB_PAGE 64#ifdef CONFIG_HUGETLB_PAGE
65 psize = get_slice_psize(mm, addr); 65 psize = get_slice_psize(mm, addr);
66 /* Mask the address for the correct page size */
67 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
66#else 68#else
67 BUG(); 69 BUG();
68 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ 70 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
69#endif 71#endif
70 } else 72 } else {
71 psize = pte_pagesize_index(mm, addr, pte); 73 psize = pte_pagesize_index(mm, addr, pte);
74 /* Mask the address for the standard page size. If we
75 * have a 64k page kernel, but the hardware does not
76 * support 64k pages, this might be different from the
77 * hardware page size encoded in the slice table. */
78 addr &= PAGE_MASK;
79 }
72 80
73 /* Mask the address for the correct page size */
74 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
75 81
76 /* Build full vaddr */ 82 /* Build full vaddr */
77 if (!is_kernel_addr(addr)) { 83 if (!is_kernel_addr(addr)) {
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 1ee66db003be..f5f79196721c 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr)
784{ 784{
785 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 785 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
786 786
787 BUG_ON(os_cppr->index != 0); 787 /*
788 * we only really want to set the priority when there's
789 * just one cppr value on the stack
790 */
791 WARN_ON(os_cppr->index != 0);
788 792
789 os_cppr->stack[os_cppr->index] = cppr; 793 os_cppr->stack[0] = cppr;
790 794
791 if (firmware_has_feature(FW_FEATURE_LPAR)) 795 if (firmware_has_feature(FW_FEATURE_LPAR))
792 lpar_cppr_info(cppr); 796 lpar_cppr_info(cppr);
@@ -821,8 +825,14 @@ void xics_setup_cpu(void)
821 825
822void xics_teardown_cpu(void) 826void xics_teardown_cpu(void)
823{ 827{
828 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
824 int cpu = smp_processor_id(); 829 int cpu = smp_processor_id();
825 830
831 /*
832 * we have to reset the cppr index to 0 because we're
833 * not going to return from the IPI
834 */
835 os_cppr->index = 0;
826 xics_set_cpu_priority(0); 836 xics_set_cpu_priority(0);
827 837
828 /* Clear any pending IPI request */ 838 /* Clear any pending IPI request */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index f2ef4b619ce1..c25dfac7dd76 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -293,12 +293,12 @@ struct _lowcore
293 __u64 clock_comparator; /* 0x02d0 */ 293 __u64 clock_comparator; /* 0x02d0 */
294 __u32 machine_flags; /* 0x02d8 */ 294 __u32 machine_flags; /* 0x02d8 */
295 __u32 ftrace_func; /* 0x02dc */ 295 __u32 ftrace_func; /* 0x02dc */
296 __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ 296 __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */
297 297
298 /* Interrupt response block */ 298 /* Interrupt response block */
299 __u8 irb[64]; /* 0x0300 */ 299 __u8 irb[64]; /* 0x0300 */
300 300
301 __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ 301 __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
302 302
303 /* 303 /*
304 * 0xe00 contains the address of the IPL Parameter Information 304 * 0xe00 contains the address of the IPL Parameter Information
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 3f7e2a22c7c2..f6a389c996cb 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
132 mov #1, r5 132 mov #1, r5
133 133
134call_handle_tlbmiss: 134call_handle_tlbmiss:
135 setup_frame_reg
136 mov.l 1f, r0 135 mov.l 1f, r0
137 mov r5, r8 136 mov r5, r8
138 mov.l @r0, r6 137 mov.l @r0, r6
@@ -365,6 +364,8 @@ handle_exception:
365 mov.l @k2, k2 ! read out vector and keep in k2 364 mov.l @k2, k2 ! read out vector and keep in k2
366 365
367handle_exception_special: 366handle_exception_special:
367 setup_frame_reg
368
368 ! Setup return address and jump to exception handler 369 ! Setup return address and jump to exception handler
369 mov.l 7f, r9 ! fetch return address 370 mov.l 7f, r9 ! fetch return address
370 stc r2_bank, r0 ! k2 (vector) 371 stc r2_bank, r0 ! k2 (vector)
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 88d28ec3780a..e51168064e56 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
540 mempool_free(frame, dwarf_frame_pool); 540 mempool_free(frame, dwarf_frame_pool);
541} 541}
542 542
543extern void ret_from_irq(void);
544
543/** 545/**
544 * dwarf_unwind_stack - unwind the stack 546 * dwarf_unwind_stack - unwind the stack
545 * 547 *
@@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
678 addr = frame->cfa + reg->addr; 680 addr = frame->cfa + reg->addr;
679 frame->return_addr = __raw_readl(addr); 681 frame->return_addr = __raw_readl(addr);
680 682
683 /*
684 * Ah, the joys of unwinding through interrupts.
685 *
686 * Interrupts are tricky - the DWARF info needs to be _really_
687 * accurate and unfortunately I'm seeing a lot of bogus DWARF
688 * info. For example, I've seen interrupts occur in epilogues
689 * just after the frame pointer (r14) had been restored. The
690 * problem was that the DWARF info claimed that the CFA could be
691 * reached by using the value of the frame pointer before it was
692 * restored.
693 *
694 * So until the compiler can be trusted to produce reliable
695 * DWARF info when it really matters, let's stop unwinding once
696 * we've calculated the function that was interrupted.
697 */
698 if (prev && prev->pc == (unsigned long)ret_from_irq)
699 frame->return_addr = 0;
700
681 return frame; 701 return frame;
682 702
683bail: 703bail:
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index f0abd58c3a69..2b15ae60c3a0 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -70,8 +70,14 @@ ret_from_exception:
70 CFI_STARTPROC simple 70 CFI_STARTPROC simple
71 CFI_DEF_CFA r14, 0 71 CFI_DEF_CFA r14, 0
72 CFI_REL_OFFSET 17, 64 72 CFI_REL_OFFSET 17, 64
73 CFI_REL_OFFSET 15, 0 73 CFI_REL_OFFSET 15, 60
74 CFI_REL_OFFSET 14, 56 74 CFI_REL_OFFSET 14, 56
75 CFI_REL_OFFSET 13, 52
76 CFI_REL_OFFSET 12, 48
77 CFI_REL_OFFSET 11, 44
78 CFI_REL_OFFSET 10, 40
79 CFI_REL_OFFSET 9, 36
80 CFI_REL_OFFSET 8, 32
75 preempt_stop() 81 preempt_stop()
76ENTRY(ret_from_irq) 82ENTRY(ret_from_irq)
77 ! 83 !
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 2830b415e214..c49865b30719 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
526 * Set some valid stack frames to give to the child. 526 * Set some valid stack frames to give to the child.
527 */ 527 */
528 childstack = (struct sparc_stackf __user *) 528 childstack = (struct sparc_stackf __user *)
529 (sp & ~0x7UL); 529 (sp & ~0xfUL);
530 parentstack = (struct sparc_stackf __user *) 530 parentstack = (struct sparc_stackf __user *)
531 regs->u_regs[UREG_FP]; 531 regs->u_regs[UREG_FP];
532 532
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index c3f1cce0e95e..cb70476bd8f5 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -398,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
398 } else 398 } else
399 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 399 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
400 400
401 /* Now 8-byte align the stack as this is mandatory in the 401 /* Now align the stack as this is mandatory in the Sparc ABI
402 * Sparc ABI due to how register windows work. This hides 402 * due to how register windows work. This hides the
403 * the restriction from thread libraries etc. -DaveM 403 * restriction from thread libraries etc.
404 */ 404 */
405 csp &= ~7UL; 405 csp &= ~15UL;
406 406
407 distance = fp - psp; 407 distance = fp - psp;
408 rval = (csp - distance); 408 rval = (csp - distance);
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ba5b09ad6666..ea22cd373c64 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -120,8 +120,8 @@ struct rt_signal_frame32 {
120}; 120};
121 121
122/* Align macros */ 122/* Align macros */
123#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7))) 123#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15)))
124#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7))) 124#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
125 125
126int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) 126int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
127{ 127{
@@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
420 sp = current->sas_ss_sp + current->sas_ss_size; 420 sp = current->sas_ss_sp + current->sas_ss_size;
421 } 421 }
422 422
423 sp -= framesize;
424
423 /* Always align the stack frame. This handles two cases. First, 425 /* Always align the stack frame. This handles two cases. First,
424 * sigaltstack need not be mindful of platform specific stack 426 * sigaltstack need not be mindful of platform specific stack
425 * alignment. Second, if we took this signal because the stack 427 * alignment. Second, if we took this signal because the stack
426 * is not aligned properly, we'd like to take the signal cleanly 428 * is not aligned properly, we'd like to take the signal cleanly
427 * and report that. 429 * and report that.
428 */ 430 */
429 sp &= ~7UL; 431 sp &= ~15UL;
430 432
431 return (void __user *)(sp - framesize); 433 return (void __user *) sp;
432} 434}
433 435
434static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) 436static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 7ce1a1005b1d..9882df92ba0a 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
267 sp = current->sas_ss_sp + current->sas_ss_size; 267 sp = current->sas_ss_sp + current->sas_ss_size;
268 } 268 }
269 269
270 sp -= framesize;
271
270 /* Always align the stack frame. This handles two cases. First, 272 /* Always align the stack frame. This handles two cases. First,
271 * sigaltstack need not be mindful of platform specific stack 273 * sigaltstack need not be mindful of platform specific stack
272 * alignment. Second, if we took this signal because the stack 274 * alignment. Second, if we took this signal because the stack
273 * is not aligned properly, we'd like to take the signal cleanly 275 * is not aligned properly, we'd like to take the signal cleanly
274 * and report that. 276 * and report that.
275 */ 277 */
276 sp &= ~7UL; 278 sp &= ~15UL;
277 279
278 return (void __user *)(sp - framesize); 280 return (void __user *) sp;
279} 281}
280 282
281static inline int 283static inline int
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 647afbda7ae1..9fa48c30037e 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -353,7 +353,7 @@ segv:
353/* Checks if the fp is valid */ 353/* Checks if the fp is valid */
354static int invalid_frame_pointer(void __user *fp, int fplen) 354static int invalid_frame_pointer(void __user *fp, int fplen)
355{ 355{
356 if (((unsigned long) fp) & 7) 356 if (((unsigned long) fp) & 15)
357 return 1; 357 return 1;
358 return 0; 358 return 0;
359} 359}
@@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
396 sp = current->sas_ss_sp + current->sas_ss_size; 396 sp = current->sas_ss_sp + current->sas_ss_size;
397 } 397 }
398 398
399 sp -= framesize;
400
399 /* Always align the stack frame. This handles two cases. First, 401 /* Always align the stack frame. This handles two cases. First,
400 * sigaltstack need not be mindful of platform specific stack 402 * sigaltstack need not be mindful of platform specific stack
401 * alignment. Second, if we took this signal because the stack 403 * alignment. Second, if we took this signal because the stack
402 * is not aligned properly, we'd like to take the signal cleanly 404 * is not aligned properly, we'd like to take the signal cleanly
403 * and report that. 405 * and report that.
404 */ 406 */
405 sp &= ~7UL; 407 sp &= ~15UL;
406 408
407 return (void __user *)(sp - framesize); 409 return (void __user *) sp;
408} 410}
409 411
410static inline void 412static inline void
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index f125e5c551c0..6e44519960c8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
1356 1356
1357 kfree(data->powernow_table); 1357 kfree(data->powernow_table);
1358 kfree(data); 1358 kfree(data);
1359 per_cpu(powernow_data, pol->cpu) = NULL;
1359 1360
1360 return 0; 1361 return 0;
1361} 1362}
@@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
1375 int err; 1376 int err;
1376 1377
1377 if (!data) 1378 if (!data)
1378 return -EINVAL; 1379 return 0;
1379 1380
1380 smp_call_function_single(cpu, query_values_on_cpu, &err, true); 1381 smp_call_function_single(cpu, query_values_on_cpu, &err, true);
1381 if (err) 1382 if (err)
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 296aba49472a..15578f180e59 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this,
467 return -EOPNOTSUPP; 467 return -EOPNOTSUPP;
468 468
469 addr &= KVM_PIT_CHANNEL_MASK; 469 addr &= KVM_PIT_CHANNEL_MASK;
470 if (addr == 3)
471 return 0;
472
470 s = &pit_state->channels[addr]; 473 s = &pit_state->channels[addr];
471 474
472 mutex_lock(&pit_state->lock); 475 mutex_lock(&pit_state->lock);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1ddcad452add..a1e1bc9d412d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
670{ 670{
671 static int version; 671 static int version;
672 struct pvclock_wall_clock wc; 672 struct pvclock_wall_clock wc;
673 struct timespec now, sys, boot; 673 struct timespec boot;
674 674
675 if (!wall_clock) 675 if (!wall_clock)
676 return; 676 return;
@@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
685 * wall clock specified here. guest system time equals host 685 * wall clock specified here. guest system time equals host
686 * system time for us, thus we must fill in host boot time here. 686 * system time for us, thus we must fill in host boot time here.
687 */ 687 */
688 now = current_kernel_time(); 688 getboottime(&boot);
689 ktime_get_ts(&sys);
690 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
691 689
692 wc.sec = boot.tv_sec; 690 wc.sec = boot.tv_sec;
693 wc.nsec = boot.tv_nsec; 691 wc.nsec = boot.tv_nsec;
@@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
762 local_irq_save(flags); 760 local_irq_save(flags);
763 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); 761 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
764 ktime_get_ts(&ts); 762 ktime_get_ts(&ts);
763 monotonic_to_bootbased(&ts);
765 local_irq_restore(flags); 764 local_irq_restore(flags);
766 765
767 /* With all the info we got, fill in the values */ 766 /* With all the info we got, fill in the values */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5198b9bb34ef..69ddfbd91135 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -49,6 +49,7 @@
49#include <asm/numa.h> 49#include <asm/numa.h>
50#include <asm/cacheflush.h> 50#include <asm/cacheflush.h>
51#include <asm/init.h> 51#include <asm/init.h>
52#include <linux/bootmem.h>
52 53
53static unsigned long dma_reserve __initdata; 54static unsigned long dma_reserve __initdata;
54 55
@@ -616,6 +617,21 @@ void __init paging_init(void)
616 */ 617 */
617#ifdef CONFIG_MEMORY_HOTPLUG 618#ifdef CONFIG_MEMORY_HOTPLUG
618/* 619/*
620 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
621 * updating.
622 */
623static void update_end_of_memory_vars(u64 start, u64 size)
624{
625 unsigned long end_pfn = PFN_UP(start + size);
626
627 if (end_pfn > max_pfn) {
628 max_pfn = end_pfn;
629 max_low_pfn = end_pfn;
630 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
631 }
632}
633
634/*
619 * Memory is added always to NORMAL zone. This means you will never get 635 * Memory is added always to NORMAL zone. This means you will never get
620 * additional DMA/DMA32 memory. 636 * additional DMA/DMA32 memory.
621 */ 637 */
@@ -634,6 +650,9 @@ int arch_add_memory(int nid, u64 start, u64 size)
634 ret = __add_pages(nid, zone, start_pfn, nr_pages); 650 ret = __add_pages(nid, zone, start_pfn, nr_pages);
635 WARN_ON_ONCE(ret); 651 WARN_ON_ONCE(ret);
636 652
653 /* update max_pfn, max_low_pfn and high_memory */
654 update_end_of_memory_vars(start, size);
655
637 return ret; 656 return ret;
638} 657}
639EXPORT_SYMBOL_GPL(arch_add_memory); 658EXPORT_SYMBOL_GPL(arch_add_memory);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b8bea100a160..b34390347c16 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
2868 }, 2868 },
2869 .driver_data = "F.23", /* cutoff BIOS version */ 2869 .driver_data = "F.23", /* cutoff BIOS version */
2870 }, 2870 },
2871 /*
2872 * Acer eMachines G725 has the same problem. BIOS
2873 * V1.03 is known to be broken. V3.04 is known to
2874 * work. Inbetween, there are V1.06, V2.06 and V3.03
2875 * that we don't have much idea about. For now,
2876 * blacklist anything older than V3.04.
2877 */
2878 {
2879 .ident = "G725",
2880 .matches = {
2881 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
2882 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
2883 },
2884 .driver_data = "V3.04", /* cutoff BIOS version */
2885 },
2871 { } /* terminate list */ 2886 { } /* terminate list */
2872 }; 2887 };
2873 const struct dmi_system_id *dmi = dmi_first_match(sysids); 2888 const struct dmi_system_id *dmi = dmi_first_match(sysids);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f4ea5a8c325b..d096fbcbc771 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2875 * write indication (used for PIO/DMA setup), result TF is 2875 * write indication (used for PIO/DMA setup), result TF is
2876 * copied back and we don't whine too much about its failure. 2876 * copied back and we don't whine too much about its failure.
2877 */ 2877 */
2878 tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2878 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2879 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2879 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2880 tf->flags |= ATA_TFLAG_WRITE; 2880 tf->flags |= ATA_TFLAG_WRITE;
2881 2881
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 741065c9da67..730ef3c384ca 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
893 do_write); 893 do_write);
894 } 894 }
895 895
896 if (!do_write)
897 flush_dcache_page(page);
898
896 qc->curbytes += qc->sect_size; 899 qc->curbytes += qc->sect_size;
897 qc->cursg_ofs += qc->sect_size; 900 qc->cursg_ofs += qc->sect_size;
898 901
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 2ddf03ae034e..68b5957f107c 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -322,7 +322,7 @@ static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
322 pkt_kobj_remove(pd->kobj_stat); 322 pkt_kobj_remove(pd->kobj_stat);
323 pkt_kobj_remove(pd->kobj_wqueue); 323 pkt_kobj_remove(pd->kobj_wqueue);
324 if (class_pktcdvd) 324 if (class_pktcdvd)
325 device_destroy(class_pktcdvd, pd->pkt_dev); 325 device_unregister(pd->dev);
326} 326}
327 327
328 328
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 652367aa6546..058fbccf2f52 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -195,5 +195,16 @@ config BT_MRVL_SDIO
195 Say Y here to compile support for Marvell BT-over-SDIO driver 195 Say Y here to compile support for Marvell BT-over-SDIO driver
196 into the kernel or say M to compile it as module. 196 into the kernel or say M to compile it as module.
197 197
198endmenu 198config BT_ATH3K
199 tristate "Atheros firmware download driver"
200 depends on BT_HCIBTUSB
201 select FW_LOADER
202 help
203 Bluetooth firmware download driver.
204 This driver loads the firmware into the Atheros Bluetooth
205 chipset.
199 206
207 Say Y here to compile support for "Atheros firmware download driver"
208 into the kernel or say M to compile it as module (ath3k).
209
210endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b3f57d2d4eb0..7e5aed598121 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o
15obj-$(CONFIG_BT_HCIBTUSB) += btusb.o 15obj-$(CONFIG_BT_HCIBTUSB) += btusb.o
16obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o 16obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o
17 17
18obj-$(CONFIG_BT_ATH3K) += ath3k.o
18obj-$(CONFIG_BT_MRVL) += btmrvl.o 19obj-$(CONFIG_BT_MRVL) += btmrvl.o
19obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o 20obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
20 21
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
new file mode 100644
index 000000000000..add9485ca5b6
--- /dev/null
+++ b/drivers/bluetooth/ath3k.c
@@ -0,0 +1,187 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 */
19
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/slab.h>
25#include <linux/types.h>
26#include <linux/errno.h>
27#include <linux/device.h>
28#include <linux/firmware.h>
29#include <linux/usb.h>
30#include <net/bluetooth/bluetooth.h>
31
32#define VERSION "1.0"
33
34
35static struct usb_device_id ath3k_table[] = {
36 /* Atheros AR3011 */
37 { USB_DEVICE(0x0CF3, 0x3000) },
38 { } /* Terminating entry */
39};
40
41MODULE_DEVICE_TABLE(usb, ath3k_table);
42
43#define USB_REQ_DFU_DNLOAD 1
44#define BULK_SIZE 4096
45
46struct ath3k_data {
47 struct usb_device *udev;
48 u8 *fw_data;
49 u32 fw_size;
50 u32 fw_sent;
51};
52
53static int ath3k_load_firmware(struct ath3k_data *data,
54 unsigned char *firmware,
55 int count)
56{
57 u8 *send_buf;
58 int err, pipe, len, size, sent = 0;
59
60 BT_DBG("ath3k %p udev %p", data, data->udev);
61
62 pipe = usb_sndctrlpipe(data->udev, 0);
63
64 if ((usb_control_msg(data->udev, pipe,
65 USB_REQ_DFU_DNLOAD,
66 USB_TYPE_VENDOR, 0, 0,
67 firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
68 BT_ERR("Can't change to loading configuration err");
69 return -EBUSY;
70 }
71 sent += 20;
72 count -= 20;
73
74 send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
75 if (!send_buf) {
76 BT_ERR("Can't allocate memory chunk for firmware");
77 return -ENOMEM;
78 }
79
80 while (count) {
81 size = min_t(uint, count, BULK_SIZE);
82 pipe = usb_sndbulkpipe(data->udev, 0x02);
83 memcpy(send_buf, firmware + sent, size);
84
85 err = usb_bulk_msg(data->udev, pipe, send_buf, size,
86 &len, 3000);
87
88 if (err || (len != size)) {
89 BT_ERR("Error in firmware loading err = %d,"
90 "len = %d, size = %d", err, len, size);
91 goto error;
92 }
93
94 sent += size;
95 count -= size;
96 }
97
98 kfree(send_buf);
99 return 0;
100
101error:
102 kfree(send_buf);
103 return err;
104}
105
106static int ath3k_probe(struct usb_interface *intf,
107 const struct usb_device_id *id)
108{
109 const struct firmware *firmware;
110 struct usb_device *udev = interface_to_usbdev(intf);
111 struct ath3k_data *data;
112 int size;
113
114 BT_DBG("intf %p id %p", intf, id);
115
116 if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
117 return -ENODEV;
118
119 data = kzalloc(sizeof(*data), GFP_KERNEL);
120 if (!data)
121 return -ENOMEM;
122
123 data->udev = udev;
124
125 if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
126 kfree(data);
127 return -EIO;
128 }
129
130 size = max_t(uint, firmware->size, 4096);
131 data->fw_data = kmalloc(size, GFP_KERNEL);
132 if (!data->fw_data) {
133 release_firmware(firmware);
134 kfree(data);
135 return -ENOMEM;
136 }
137
138 memcpy(data->fw_data, firmware->data, firmware->size);
139 data->fw_size = firmware->size;
140 data->fw_sent = 0;
141 release_firmware(firmware);
142
143 usb_set_intfdata(intf, data);
144 if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) {
145 usb_set_intfdata(intf, NULL);
146 return -EIO;
147 }
148
149 return 0;
150}
151
152static void ath3k_disconnect(struct usb_interface *intf)
153{
154 struct ath3k_data *data = usb_get_intfdata(intf);
155
156 BT_DBG("ath3k_disconnect intf %p", intf);
157
158 kfree(data->fw_data);
159 kfree(data);
160}
161
162static struct usb_driver ath3k_driver = {
163 .name = "ath3k",
164 .probe = ath3k_probe,
165 .disconnect = ath3k_disconnect,
166 .id_table = ath3k_table,
167};
168
169static int __init ath3k_init(void)
170{
171 BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION);
172 return usb_register(&ath3k_driver);
173}
174
175static void __exit ath3k_exit(void)
176{
177 usb_deregister(&ath3k_driver);
178}
179
180module_init(ath3k_init);
181module_exit(ath3k_exit);
182
183MODULE_AUTHOR("Atheros Communications");
184MODULE_DESCRIPTION("Atheros AR30xx firmware driver");
185MODULE_VERSION(VERSION);
186MODULE_LICENSE("GPL");
187MODULE_FIRMWARE("ath3k-1.fw");
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 2acdc605cb4b..c2cf81144715 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -503,7 +503,9 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst)
503 unsigned int iobase; 503 unsigned int iobase;
504 unsigned char reg; 504 unsigned char reg;
505 505
506 BUG_ON(!info->hdev); 506 if (!info || !info->hdev)
507 /* our irq handler is shared */
508 return IRQ_NONE;
507 509
508 if (!test_bit(CARD_READY, &(info->hw_state))) 510 if (!test_bit(CARD_READY, &(info->hw_state)))
509 return IRQ_HANDLED; 511 return IRQ_HANDLED;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index d814a2755ccb..9f5926aaf57f 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -345,7 +345,9 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
345 int iir; 345 int iir;
346 irqreturn_t r = IRQ_NONE; 346 irqreturn_t r = IRQ_NONE;
347 347
348 BUG_ON(!info->hdev); 348 if (!info || !info->hdev)
349 /* our irq handler is shared */
350 return IRQ_NONE;
349 351
350 iobase = info->p_dev->io.BasePort1; 352 iobase = info->p_dev->io.BasePort1;
351 353
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index f36defa37764..57d965b7f521 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
808 808
809exit: 809exit:
810 sdio_release_host(card->func); 810 sdio_release_host(card->func);
811 kfree(tmpbuf);
811 812
812 return ret; 813 return ret;
813} 814}
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index d339464dc15e..91c523099804 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -295,7 +295,9 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst)
295 int iir, lsr; 295 int iir, lsr;
296 irqreturn_t r = IRQ_NONE; 296 irqreturn_t r = IRQ_NONE;
297 297
298 BUG_ON(!info->hdev); 298 if (!info || !info->hdev)
299 /* our irq handler is shared */
300 return IRQ_NONE;
299 301
300 iobase = info->p_dev->io.BasePort1; 302 iobase = info->p_dev->io.BasePort1;
301 303
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 4f02a6f3c980..697591941e17 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -299,7 +299,9 @@ static irqreturn_t dtl1_interrupt(int irq, void *dev_inst)
299 int iir, lsr; 299 int iir, lsr;
300 irqreturn_t r = IRQ_NONE; 300 irqreturn_t r = IRQ_NONE;
301 301
302 BUG_ON(!info->hdev); 302 if (!info || !info->hdev)
303 /* our irq handler is shared */
304 return IRQ_NONE;
303 305
304 iobase = info->p_dev->io.BasePort1; 306 iobase = info->p_dev->io.BasePort1;
305 307
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index be832b6f8279..48788db4e280 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -395,6 +395,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
395 unsigned long p = *ppos; 395 unsigned long p = *ppos;
396 ssize_t low_count, read, sz; 396 ssize_t low_count, read, sz;
397 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ 397 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
398 int err = 0;
398 399
399 read = 0; 400 read = 0;
400 if (p < (unsigned long) high_memory) { 401 if (p < (unsigned long) high_memory) {
@@ -441,12 +442,16 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
441 return -ENOMEM; 442 return -ENOMEM;
442 while (count > 0) { 443 while (count > 0) {
443 sz = size_inside_page(p, count); 444 sz = size_inside_page(p, count);
445 if (!is_vmalloc_or_module_addr((void *)p)) {
446 err = -ENXIO;
447 break;
448 }
444 sz = vread(kbuf, (char *)p, sz); 449 sz = vread(kbuf, (char *)p, sz);
445 if (!sz) 450 if (!sz)
446 break; 451 break;
447 if (copy_to_user(buf, kbuf, sz)) { 452 if (copy_to_user(buf, kbuf, sz)) {
448 free_page((unsigned long)kbuf); 453 err = -EFAULT;
449 return -EFAULT; 454 break;
450 } 455 }
451 count -= sz; 456 count -= sz;
452 buf += sz; 457 buf += sz;
@@ -455,8 +460,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
455 } 460 }
456 free_page((unsigned long)kbuf); 461 free_page((unsigned long)kbuf);
457 } 462 }
458 *ppos = p; 463 *ppos = p;
459 return read; 464 return read ? read : err;
460} 465}
461 466
462 467
@@ -520,6 +525,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
520 ssize_t wrote = 0; 525 ssize_t wrote = 0;
521 ssize_t virtr = 0; 526 ssize_t virtr = 0;
522 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ 527 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
528 int err = 0;
523 529
524 if (p < (unsigned long) high_memory) { 530 if (p < (unsigned long) high_memory) {
525 unsigned long to_write = min_t(unsigned long, count, 531 unsigned long to_write = min_t(unsigned long, count,
@@ -540,14 +546,16 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
540 unsigned long sz = size_inside_page(p, count); 546 unsigned long sz = size_inside_page(p, count);
541 unsigned long n; 547 unsigned long n;
542 548
549 if (!is_vmalloc_or_module_addr((void *)p)) {
550 err = -ENXIO;
551 break;
552 }
543 n = copy_from_user(kbuf, buf, sz); 553 n = copy_from_user(kbuf, buf, sz);
544 if (n) { 554 if (n) {
545 if (wrote + virtr) 555 err = -EFAULT;
546 break; 556 break;
547 free_page((unsigned long)kbuf);
548 return -EFAULT;
549 } 557 }
550 sz = vwrite(kbuf, (char *)p, sz); 558 vwrite(kbuf, (char *)p, sz);
551 count -= sz; 559 count -= sz;
552 buf += sz; 560 buf += sz;
553 virtr += sz; 561 virtr += sz;
@@ -556,8 +564,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
556 free_page((unsigned long)kbuf); 564 free_page((unsigned long)kbuf);
557 } 565 }
558 566
559 *ppos = p; 567 *ppos = p;
560 return virtr + wrote; 568 return virtr + wrote ? : err;
561} 569}
562#endif 570#endif
563 571
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index ecba4942fc8e..f58440791e65 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -39,12 +39,12 @@
39struct tpm_inf_dev { 39struct tpm_inf_dev {
40 int iotype; 40 int iotype;
41 41
42 void __iomem *mem_base; /* MMIO ioremap'd addr */ 42 void __iomem *mem_base; /* MMIO ioremap'd addr */
43 unsigned long map_base; /* phys MMIO base */ 43 unsigned long map_base; /* phys MMIO base */
44 unsigned long map_size; /* MMIO region size */ 44 unsigned long map_size; /* MMIO region size */
45 unsigned int index_off; /* index register offset */ 45 unsigned int index_off; /* index register offset */
46 46
47 unsigned int data_regs; /* Data registers */ 47 unsigned int data_regs; /* Data registers */
48 unsigned int data_size; 48 unsigned int data_size;
49 49
50 unsigned int config_port; /* IO Port config index reg */ 50 unsigned int config_port; /* IO Port config index reg */
@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
406 .miscdev = {.fops = &inf_ops,}, 406 .miscdev = {.fops = &inf_ops,},
407}; 407};
408 408
409static const struct pnp_device_id tpm_pnp_tbl[] = { 409static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
410 /* Infineon TPMs */ 410 /* Infineon TPMs */
411 {"IFX0101", 0}, 411 {"IFX0101", 0},
412 {"IFX0102", 0}, 412 {"IFX0102", 0},
413 {"", 0} 413 {"", 0}
414}; 414};
415 415
416MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl); 416MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
417 417
418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev, 418static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
419 const struct pnp_device_id *dev_id) 419 const struct pnp_device_id *dev_id)
@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
430 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && 430 if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
431 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) { 431 !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
432 432
433 tpm_dev.iotype = TPM_INF_IO_PORT; 433 tpm_dev.iotype = TPM_INF_IO_PORT;
434 434
435 tpm_dev.config_port = pnp_port_start(dev, 0); 435 tpm_dev.config_port = pnp_port_start(dev, 0);
436 tpm_dev.config_size = pnp_port_len(dev, 0); 436 tpm_dev.config_size = pnp_port_len(dev, 0);
@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
459 goto err_last; 459 goto err_last;
460 } 460 }
461 } else if (pnp_mem_valid(dev, 0) && 461 } else if (pnp_mem_valid(dev, 0) &&
462 !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) { 462 !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
463 463
464 tpm_dev.iotype = TPM_INF_IO_MEM; 464 tpm_dev.iotype = TPM_INF_IO_MEM;
465 465
466 tpm_dev.map_base = pnp_mem_start(dev, 0); 466 tpm_dev.map_base = pnp_mem_start(dev, 0);
467 tpm_dev.map_size = pnp_mem_len(dev, 0); 467 tpm_dev.map_size = pnp_mem_len(dev, 0);
@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
563 "product id 0x%02x%02x" 563 "product id 0x%02x%02x"
564 "%s\n", 564 "%s\n",
565 tpm_dev.iotype == TPM_INF_IO_PORT ? 565 tpm_dev.iotype == TPM_INF_IO_PORT ?
566 tpm_dev.config_port : 566 tpm_dev.config_port :
567 tpm_dev.map_base + tpm_dev.index_off, 567 tpm_dev.map_base + tpm_dev.index_off,
568 tpm_dev.iotype == TPM_INF_IO_PORT ? 568 tpm_dev.iotype == TPM_INF_IO_PORT ?
569 tpm_dev.data_regs : 569 tpm_dev.data_regs :
570 tpm_dev.map_base + tpm_dev.data_regs, 570 tpm_dev.map_base + tpm_dev.data_regs,
571 version[0], version[1], 571 version[0], version[1],
572 vendorid[0], vendorid[1], 572 vendorid[0], vendorid[1],
573 productid[0], productid[1], chipname); 573 productid[0], productid[1], chipname);
@@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
607 iounmap(tpm_dev.mem_base); 607 iounmap(tpm_dev.mem_base);
608 release_mem_region(tpm_dev.map_base, tpm_dev.map_size); 608 release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
609 } 609 }
610 tpm_dev_vendor_release(chip);
610 tpm_remove_hardware(chip->dev); 611 tpm_remove_hardware(chip->dev);
611 } 612 }
612} 613}
613 614
615static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
616{
617 struct tpm_chip *chip = pnp_get_drvdata(dev);
618 int rc;
619 if (chip) {
620 u8 savestate[] = {
621 0, 193, /* TPM_TAG_RQU_COMMAND */
622 0, 0, 0, 10, /* blob length (in bytes) */
623 0, 0, 0, 152 /* TPM_ORD_SaveState */
624 };
625 dev_info(&dev->dev, "saving TPM state\n");
626 rc = tpm_inf_send(chip, savestate, sizeof(savestate));
627 if (rc < 0) {
628 dev_err(&dev->dev, "error while saving TPM state\n");
629 return rc;
630 }
631 }
632 return 0;
633}
634
635static int tpm_inf_pnp_resume(struct pnp_dev *dev)
636{
637 /* Re-configure TPM after suspending */
638 tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
639 tpm_config_out(IOLIMH, TPM_INF_ADDR);
640 tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
641 tpm_config_out(IOLIML, TPM_INF_ADDR);
642 tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
643 /* activate register */
644 tpm_config_out(TPM_DAR, TPM_INF_ADDR);
645 tpm_config_out(0x01, TPM_INF_DATA);
646 tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
647 /* disable RESET, LP and IRQC */
648 tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
649 return tpm_pm_resume(&dev->dev);
650}
651
614static struct pnp_driver tpm_inf_pnp_driver = { 652static struct pnp_driver tpm_inf_pnp_driver = {
615 .name = "tpm_inf_pnp", 653 .name = "tpm_inf_pnp",
616 .driver = { 654 .id_table = tpm_inf_pnp_tbl,
617 .owner = THIS_MODULE,
618 .suspend = tpm_pm_suspend,
619 .resume = tpm_pm_resume,
620 },
621 .id_table = tpm_pnp_tbl,
622 .probe = tpm_inf_pnp_probe, 655 .probe = tpm_inf_pnp_probe,
623 .remove = __devexit_p(tpm_inf_pnp_remove), 656 .suspend = tpm_inf_pnp_suspend,
657 .resume = tpm_inf_pnp_resume,
658 .remove = __devexit_p(tpm_inf_pnp_remove)
624}; 659};
625 660
626static int __init init_inf(void) 661static int __init init_inf(void)
@@ -638,5 +673,5 @@ module_exit(cleanup_inf);
638 673
639MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>"); 674MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
640MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2"); 675MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
641MODULE_VERSION("1.9"); 676MODULE_VERSION("1.9.2");
642MODULE_LICENSE("GPL"); 677MODULE_LICENSE("GPL");
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index c6f3b48be9dd..dcb9083ecde0 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on)
1951 pid = task_pid(current); 1951 pid = task_pid(current);
1952 type = PIDTYPE_PID; 1952 type = PIDTYPE_PID;
1953 } 1953 }
1954 retval = __f_setown(filp, pid, type, 0); 1954 get_pid(pid);
1955 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 1955 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1956 retval = __f_setown(filp, pid, type, 0);
1957 put_pid(pid);
1956 if (retval) 1958 if (retval)
1957 goto out; 1959 goto out;
1958 } else { 1960 } else {
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index f06024668f99..537c29ac4487 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -36,17 +36,6 @@ MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); 36MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
37MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); 37MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
38 38
39static u32 cn_idx = CN_IDX_CONNECTOR;
40static u32 cn_val = CN_VAL_CONNECTOR;
41
42module_param(cn_idx, uint, 0);
43module_param(cn_val, uint, 0);
44MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
45MODULE_PARM_DESC(cn_val, "Connector's main device val.");
46
47static DEFINE_MUTEX(notify_lock);
48static LIST_HEAD(notify_list);
49
50static struct cn_dev cdev; 39static struct cn_dev cdev;
51 40
52static int cn_already_initialized; 41static int cn_already_initialized;
@@ -210,54 +199,6 @@ static void cn_rx_skb(struct sk_buff *__skb)
210} 199}
211 200
212/* 201/*
213 * Notification routing.
214 *
215 * Gets id and checks if there are notification request for it's idx
216 * and val. If there are such requests notify the listeners with the
217 * given notify event.
218 *
219 */
220static void cn_notify(struct cb_id *id, u32 notify_event)
221{
222 struct cn_ctl_entry *ent;
223
224 mutex_lock(&notify_lock);
225 list_for_each_entry(ent, &notify_list, notify_entry) {
226 int i;
227 struct cn_notify_req *req;
228 struct cn_ctl_msg *ctl = ent->msg;
229 int idx_found, val_found;
230
231 idx_found = val_found = 0;
232
233 req = (struct cn_notify_req *)ctl->data;
234 for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
235 if (id->idx >= req->first &&
236 id->idx < req->first + req->range) {
237 idx_found = 1;
238 break;
239 }
240 }
241
242 for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
243 if (id->val >= req->first &&
244 id->val < req->first + req->range) {
245 val_found = 1;
246 break;
247 }
248 }
249
250 if (idx_found && val_found) {
251 struct cn_msg m = { .ack = notify_event, };
252
253 memcpy(&m.id, id, sizeof(m.id));
254 cn_netlink_send(&m, ctl->group, GFP_KERNEL);
255 }
256 }
257 mutex_unlock(&notify_lock);
258}
259
260/*
261 * Callback add routing - adds callback with given ID and name. 202 * Callback add routing - adds callback with given ID and name.
262 * If there is registered callback with the same ID it will not be added. 203 * If there is registered callback with the same ID it will not be added.
263 * 204 *
@@ -276,8 +217,6 @@ int cn_add_callback(struct cb_id *id, char *name,
276 if (err) 217 if (err)
277 return err; 218 return err;
278 219
279 cn_notify(id, 0);
280
281 return 0; 220 return 0;
282} 221}
283EXPORT_SYMBOL_GPL(cn_add_callback); 222EXPORT_SYMBOL_GPL(cn_add_callback);
@@ -295,111 +234,9 @@ void cn_del_callback(struct cb_id *id)
295 struct cn_dev *dev = &cdev; 234 struct cn_dev *dev = &cdev;
296 235
297 cn_queue_del_callback(dev->cbdev, id); 236 cn_queue_del_callback(dev->cbdev, id);
298 cn_notify(id, 1);
299} 237}
300EXPORT_SYMBOL_GPL(cn_del_callback); 238EXPORT_SYMBOL_GPL(cn_del_callback);
301 239
302/*
303 * Checks two connector's control messages to be the same.
304 * Returns 1 if they are the same or if the first one is corrupted.
305 */
306static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
307{
308 int i;
309 struct cn_notify_req *req1, *req2;
310
311 if (m1->idx_notify_num != m2->idx_notify_num)
312 return 0;
313
314 if (m1->val_notify_num != m2->val_notify_num)
315 return 0;
316
317 if (m1->len != m2->len)
318 return 0;
319
320 if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) !=
321 m1->len)
322 return 1;
323
324 req1 = (struct cn_notify_req *)m1->data;
325 req2 = (struct cn_notify_req *)m2->data;
326
327 for (i = 0; i < m1->idx_notify_num; ++i) {
328 if (req1->first != req2->first || req1->range != req2->range)
329 return 0;
330 req1++;
331 req2++;
332 }
333
334 for (i = 0; i < m1->val_notify_num; ++i) {
335 if (req1->first != req2->first || req1->range != req2->range)
336 return 0;
337 req1++;
338 req2++;
339 }
340
341 return 1;
342}
343
344/*
345 * Main connector device's callback.
346 *
347 * Used for notification of a request's processing.
348 */
349static void cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
350{
351 struct cn_ctl_msg *ctl;
352 struct cn_ctl_entry *ent;
353 u32 size;
354
355 if (msg->len < sizeof(*ctl))
356 return;
357
358 ctl = (struct cn_ctl_msg *)msg->data;
359
360 size = (sizeof(*ctl) + ((ctl->idx_notify_num +
361 ctl->val_notify_num) *
362 sizeof(struct cn_notify_req)));
363
364 if (msg->len != size)
365 return;
366
367 if (ctl->len + sizeof(*ctl) != msg->len)
368 return;
369
370 /*
371 * Remove notification.
372 */
373 if (ctl->group == 0) {
374 struct cn_ctl_entry *n;
375
376 mutex_lock(&notify_lock);
377 list_for_each_entry_safe(ent, n, &notify_list, notify_entry) {
378 if (cn_ctl_msg_equals(ent->msg, ctl)) {
379 list_del(&ent->notify_entry);
380 kfree(ent);
381 }
382 }
383 mutex_unlock(&notify_lock);
384
385 return;
386 }
387
388 size += sizeof(*ent);
389
390 ent = kzalloc(size, GFP_KERNEL);
391 if (!ent)
392 return;
393
394 ent->msg = (struct cn_ctl_msg *)(ent + 1);
395
396 memcpy(ent->msg, ctl, size - sizeof(*ent));
397
398 mutex_lock(&notify_lock);
399 list_add(&ent->notify_entry, &notify_list);
400 mutex_unlock(&notify_lock);
401}
402
403static int cn_proc_show(struct seq_file *m, void *v) 240static int cn_proc_show(struct seq_file *m, void *v)
404{ 241{
405 struct cn_queue_dev *dev = cdev.cbdev; 242 struct cn_queue_dev *dev = cdev.cbdev;
@@ -437,11 +274,8 @@ static const struct file_operations cn_file_ops = {
437static int __devinit cn_init(void) 274static int __devinit cn_init(void)
438{ 275{
439 struct cn_dev *dev = &cdev; 276 struct cn_dev *dev = &cdev;
440 int err;
441 277
442 dev->input = cn_rx_skb; 278 dev->input = cn_rx_skb;
443 dev->id.idx = cn_idx;
444 dev->id.val = cn_val;
445 279
446 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, 280 dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
447 CN_NETLINK_USERS + 0xf, 281 CN_NETLINK_USERS + 0xf,
@@ -457,14 +291,6 @@ static int __devinit cn_init(void)
457 291
458 cn_already_initialized = 1; 292 cn_already_initialized = 1;
459 293
460 err = cn_add_callback(&dev->id, "connector", &cn_callback);
461 if (err) {
462 cn_already_initialized = 0;
463 cn_queue_free_dev(dev->cbdev);
464 netlink_kernel_release(dev->nls);
465 return -EINVAL;
466 }
467
468 proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops); 294 proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
469 295
470 return 0; 296 return 0;
@@ -478,7 +304,6 @@ static void __devexit cn_fini(void)
478 304
479 proc_net_remove(&init_net, "connector"); 305 proc_net_remove(&init_net, "connector");
480 306
481 cn_del_callback(&dev->id);
482 cn_queue_free_dev(dev->cbdev); 307 cn_queue_free_dev(dev->cbdev);
483 netlink_kernel_release(dev->nls); 308 netlink_kernel_release(dev->nls);
484} 309}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b34ade2332b..bd444dc93cf2 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
554 (dbs_tuners_ins.up_threshold - 554 (dbs_tuners_ins.up_threshold -
555 dbs_tuners_ins.down_differential); 555 dbs_tuners_ins.down_differential);
556 556
557 if (freq_next < policy->min)
558 freq_next = policy->min;
559
557 if (!dbs_tuners_ins.powersave_bias) { 560 if (!dbs_tuners_ins.powersave_bias) {
558 __cpufreq_driver_target(policy, freq_next, 561 __cpufreq_driver_target(policy, freq_next,
559 CPUFREQ_RELATION_L); 562 CPUFREQ_RELATION_L);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index cf27402af97b..ecd5928d7110 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
804 end <<= (24 - PAGE_SHIFT); 804 end <<= (24 - PAGE_SHIFT);
805 end |= (1 << (24 - PAGE_SHIFT)) - 1; 805 end |= (1 << (24 - PAGE_SHIFT)) - 1;
806 806
807 csrow->first_page = start >> PAGE_SHIFT; 807 csrow->first_page = start;
808 csrow->last_page = end >> PAGE_SHIFT; 808 csrow->last_page = end;
809 csrow->nr_pages = end + 1 - start; 809 csrow->nr_pages = end + 1 - start;
810 csrow->grain = 8; 810 csrow->grain = 8;
811 csrow->mtype = mtype; 811 csrow->mtype = mtype;
@@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
892 892
893 mpc85xx_init_csrows(mci); 893 mpc85xx_init_csrows(mci);
894 894
895#ifdef CONFIG_EDAC_DEBUG
896 edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
897#endif
898
899 /* store the original error disable bits */ 895 /* store the original error disable bits */
900 orig_ddr_err_disable = 896 orig_ddr_err_disable =
901 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE); 897 in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index a1fce68e3bbe..17be051b7aa3 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
113 113
114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { 114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
115 DRM_ERROR("fail to set dma mask to 0x%Lx\n", 115 DRM_ERROR("fail to set dma mask to 0x%Lx\n",
116 gart_info->table_mask); 116 (unsigned long long)gart_info->table_mask);
117 ret = 1; 117 ret = 1;
118 goto done; 118 goto done;
119 } 119 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 46d88965852a..ecac882e1d54 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
120 120
121const static struct intel_device_info intel_pineview_info = { 121const static struct intel_device_info intel_pineview_info = {
122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
123 .has_pipe_cxsr = 1, 123 .need_gfx_hws = 1,
124 .has_hotplug = 1, 124 .has_hotplug = 1,
125}; 125};
126 126
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dda787aafcc6..b4c8c0230689 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3564,6 +3564,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3564 uint32_t reloc_count = 0, i; 3564 uint32_t reloc_count = 0, i;
3565 int ret = 0; 3565 int ret = 0;
3566 3566
3567 if (relocs == NULL)
3568 return 0;
3569
3567 for (i = 0; i < buffer_count; i++) { 3570 for (i = 0; i < buffer_count; i++) {
3568 struct drm_i915_gem_relocation_entry __user *user_relocs; 3571 struct drm_i915_gem_relocation_entry __user *user_relocs;
3569 int unwritten; 3572 int unwritten;
@@ -3653,7 +3656,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3653 struct drm_gem_object *batch_obj; 3656 struct drm_gem_object *batch_obj;
3654 struct drm_i915_gem_object *obj_priv; 3657 struct drm_i915_gem_object *obj_priv;
3655 struct drm_clip_rect *cliprects = NULL; 3658 struct drm_clip_rect *cliprects = NULL;
3656 struct drm_i915_gem_relocation_entry *relocs; 3659 struct drm_i915_gem_relocation_entry *relocs = NULL;
3657 int ret = 0, ret2, i, pinned = 0; 3660 int ret = 0, ret2, i, pinned = 0;
3658 uint64_t exec_offset; 3661 uint64_t exec_offset;
3659 uint32_t seqno, flush_domains, reloc_index; 3662 uint32_t seqno, flush_domains, reloc_index;
@@ -3722,6 +3725,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3722 if (object_list[i] == NULL) { 3725 if (object_list[i] == NULL) {
3723 DRM_ERROR("Invalid object handle %d at index %d\n", 3726 DRM_ERROR("Invalid object handle %d at index %d\n",
3724 exec_list[i].handle, i); 3727 exec_list[i].handle, i);
3728 /* prevent error path from reading uninitialized data */
3729 args->buffer_count = i + 1;
3725 ret = -EBADF; 3730 ret = -EBADF;
3726 goto err; 3731 goto err;
3727 } 3732 }
@@ -3730,6 +3735,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3730 if (obj_priv->in_execbuffer) { 3735 if (obj_priv->in_execbuffer) {
3731 DRM_ERROR("Object %p appears more than once in object list\n", 3736 DRM_ERROR("Object %p appears more than once in object list\n",
3732 object_list[i]); 3737 object_list[i]);
3738 /* prevent error path from reading uninitialized data */
3739 args->buffer_count = i + 1;
3733 ret = -EBADF; 3740 ret = -EBADF;
3734 goto err; 3741 goto err;
3735 } 3742 }
@@ -3926,6 +3933,7 @@ err:
3926 3933
3927 mutex_unlock(&dev->struct_mutex); 3934 mutex_unlock(&dev->struct_mutex);
3928 3935
3936pre_mutex_err:
3929 /* Copy the updated relocations out regardless of current error 3937 /* Copy the updated relocations out regardless of current error
3930 * state. Failure to update the relocs would mean that the next 3938 * state. Failure to update the relocs would mean that the next
3931 * time userland calls execbuf, it would do so with presumed offset 3939 * time userland calls execbuf, it would do so with presumed offset
@@ -3940,7 +3948,6 @@ err:
3940 ret = ret2; 3948 ret = ret2;
3941 } 3949 }
3942 3950
3943pre_mutex_err:
3944 drm_free_large(object_list); 3951 drm_free_large(object_list);
3945 kfree(cliprects); 3952 kfree(cliprects);
3946 3953
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 89a071a3e6fb..50ddf4a95c5e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309 if (de_iir & DE_GSE) 309 if (de_iir & DE_GSE)
310 ironlake_opregion_gse_intr(dev); 310 ironlake_opregion_gse_intr(dev);
311 311
312 if (de_iir & DE_PLANEA_FLIP_DONE)
313 intel_prepare_page_flip(dev, 0);
314
315 if (de_iir & DE_PLANEB_FLIP_DONE)
316 intel_prepare_page_flip(dev, 1);
317
318 if (de_iir & DE_PIPEA_VBLANK) {
319 drm_handle_vblank(dev, 0);
320 intel_finish_page_flip(dev, 0);
321 }
322
323 if (de_iir & DE_PIPEB_VBLANK) {
324 drm_handle_vblank(dev, 1);
325 intel_finish_page_flip(dev, 1);
326 }
327
312 /* check event from PCH */ 328 /* check event from PCH */
313 if ((de_iir & DE_PCH_EVENT) && 329 if ((de_iir & DE_PCH_EVENT) &&
314 (pch_iir & SDE_HOTPLUG_MASK)) { 330 (pch_iir & SDE_HOTPLUG_MASK)) {
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
844 if (!(pipeconf & PIPEACONF_ENABLE)) 860 if (!(pipeconf & PIPEACONF_ENABLE))
845 return -EINVAL; 861 return -EINVAL;
846 862
847 if (IS_IRONLAKE(dev))
848 return 0;
849
850 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
851 if (IS_I965G(dev)) 864 if (IS_IRONLAKE(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev))
852 i915_enable_pipestat(dev_priv, pipe, 868 i915_enable_pipestat(dev_priv, pipe,
853 PIPE_START_VBLANK_INTERRUPT_ENABLE); 869 PIPE_START_VBLANK_INTERRUPT_ENABLE);
854 else 870 else
@@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
866 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
867 unsigned long irqflags; 883 unsigned long irqflags;
868 884
869 if (IS_IRONLAKE(dev))
870 return;
871
872 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
873 i915_disable_pipestat(dev_priv, pipe, 886 if (IS_IRONLAKE(dev))
874 PIPE_VBLANK_INTERRUPT_ENABLE | 887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
875 PIPE_START_VBLANK_INTERRUPT_ENABLE); 888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else
890 i915_disable_pipestat(dev_priv, pipe,
891 PIPE_VBLANK_INTERRUPT_ENABLE |
892 PIPE_START_VBLANK_INTERRUPT_ENABLE);
876 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 893 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
877} 894}
878 895
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1015{ 1032{
1016 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1033 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1017 /* enable kind of interrupts always enabled */ 1034 /* enable kind of interrupts always enabled */
1018 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; 1035 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1036 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1019 u32 render_mask = GT_USER_INTERRUPT; 1037 u32 render_mask = GT_USER_INTERRUPT;
1020 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1038 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1021 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1039 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1022 1040
1023 dev_priv->irq_mask_reg = ~display_mask; 1041 dev_priv->irq_mask_reg = ~display_mask;
1024 dev_priv->de_irq_enable_reg = display_mask; 1042 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1025 1043
1026 /* should always can generate irq */ 1044 /* should always can generate irq */
1027 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1045 I915_WRITE(DEIIR, I915_READ(DEIIR));
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ddefc871edfe..79dd4026586f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
157 adpa = I915_READ(PCH_ADPA); 157 adpa = I915_READ(PCH_ADPA);
158 158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160 /* disable HPD first */
161 I915_WRITE(PCH_ADPA, adpa);
162 (void)I915_READ(PCH_ADPA);
160 163
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS | 165 ADPA_CRT_HOTPLUG_WARMUP_10MS |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 45da78ef4a92..12775df1bbfd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1638,6 +1638,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1638 case DRM_MODE_DPMS_OFF: 1638 case DRM_MODE_DPMS_OFF:
1639 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1639 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1640 1640
1641 drm_vblank_off(dev, pipe);
1641 /* Disable display plane */ 1642 /* Disable display plane */
1642 temp = I915_READ(dspcntr_reg); 1643 temp = I915_READ(dspcntr_reg);
1643 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 1644 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2519,6 +2520,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2519 sr_entries = roundup(sr_entries / cacheline_size, 1); 2520 sr_entries = roundup(sr_entries / cacheline_size, 1);
2520 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2521 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2521 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2522 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2523 } else {
2524 /* Turn off self refresh if both pipes are enabled */
2525 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2526 & ~FW_BLC_SELF_EN);
2522 } 2527 }
2523 2528
2524 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 2529 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2562,6 +2567,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2562 srwm = 1; 2567 srwm = 1;
2563 srwm &= 0x3f; 2568 srwm &= 0x3f;
2564 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2569 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2570 } else {
2571 /* Turn off self refresh if both pipes are enabled */
2572 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2573 & ~FW_BLC_SELF_EN);
2565 } 2574 }
2566 2575
2567 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2576 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2630,6 +2639,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2630 if (srwm < 0) 2639 if (srwm < 0)
2631 srwm = 1; 2640 srwm = 1;
2632 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2641 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2642 } else {
2643 /* Turn off self refresh if both pipes are enabled */
2644 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2645 & ~FW_BLC_SELF_EN);
2633 } 2646 }
2634 2647
2635 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2648 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3984,6 +3997,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
3984 spin_lock_irqsave(&dev->event_lock, flags); 3997 spin_lock_irqsave(&dev->event_lock, flags);
3985 work = intel_crtc->unpin_work; 3998 work = intel_crtc->unpin_work;
3986 if (work == NULL || !work->pending) { 3999 if (work == NULL || !work->pending) {
4000 if (work && !work->pending) {
4001 obj_priv = work->obj->driver_private;
4002 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4003 obj_priv,
4004 atomic_read(&obj_priv->pending_flip));
4005 }
3987 spin_unlock_irqrestore(&dev->event_lock, flags); 4006 spin_unlock_irqrestore(&dev->event_lock, flags);
3988 return; 4007 return;
3989 } 4008 }
@@ -4005,7 +4024,10 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4005 spin_unlock_irqrestore(&dev->event_lock, flags); 4024 spin_unlock_irqrestore(&dev->event_lock, flags);
4006 4025
4007 obj_priv = work->obj->driver_private; 4026 obj_priv = work->obj->driver_private;
4008 if (atomic_dec_and_test(&obj_priv->pending_flip)) 4027
4028 /* Initial scanout buffer will have a 0 pending flip count */
4029 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4030 atomic_dec_and_test(&obj_priv->pending_flip))
4009 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4031 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4010 schedule_work(&work->work); 4032 schedule_work(&work->work);
4011} 4033}
@@ -4018,8 +4040,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
4018 unsigned long flags; 4040 unsigned long flags;
4019 4041
4020 spin_lock_irqsave(&dev->event_lock, flags); 4042 spin_lock_irqsave(&dev->event_lock, flags);
4021 if (intel_crtc->unpin_work) 4043 if (intel_crtc->unpin_work) {
4022 intel_crtc->unpin_work->pending = 1; 4044 intel_crtc->unpin_work->pending = 1;
4045 } else {
4046 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4047 }
4023 spin_unlock_irqrestore(&dev->event_lock, flags); 4048 spin_unlock_irqrestore(&dev->event_lock, flags);
4024} 4049}
4025 4050
@@ -4053,6 +4078,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4053 /* We borrow the event spin lock for protecting unpin_work */ 4078 /* We borrow the event spin lock for protecting unpin_work */
4054 spin_lock_irqsave(&dev->event_lock, flags); 4079 spin_lock_irqsave(&dev->event_lock, flags);
4055 if (intel_crtc->unpin_work) { 4080 if (intel_crtc->unpin_work) {
4081 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4056 spin_unlock_irqrestore(&dev->event_lock, flags); 4082 spin_unlock_irqrestore(&dev->event_lock, flags);
4057 kfree(work); 4083 kfree(work);
4058 mutex_unlock(&dev->struct_mutex); 4084 mutex_unlock(&dev->struct_mutex);
@@ -4066,7 +4092,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4066 4092
4067 ret = intel_pin_and_fence_fb_obj(dev, obj); 4093 ret = intel_pin_and_fence_fb_obj(dev, obj);
4068 if (ret != 0) { 4094 if (ret != 0) {
4095 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4096 obj->driver_private);
4069 kfree(work); 4097 kfree(work);
4098 intel_crtc->unpin_work = NULL;
4070 mutex_unlock(&dev->struct_mutex); 4099 mutex_unlock(&dev->struct_mutex);
4071 return ret; 4100 return ret;
4072 } 4101 }
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa74e59bec61..b1d0acbae4e4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = {
611 { 611 {
612 .ident = "Samsung SX20S", 612 .ident = "Samsung SX20S",
613 .matches = { 613 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), 614 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"), 615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
616 }, 616 },
617 }, 617 },
@@ -623,6 +623,13 @@ static const struct dmi_system_id bad_lid_status[] = {
623 }, 623 },
624 }, 624 },
625 { 625 {
626 .ident = "Aspire 1810T",
627 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
630 },
631 },
632 {
626 .ident = "PC-81005", 633 .ident = "PC-81005",
627 .matches = { 634 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), 635 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
@@ -643,7 +650,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
643{ 650{
644 enum drm_connector_status status = connector_status_connected; 651 enum drm_connector_status status = connector_status_connected;
645 652
646 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 653 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
647 status = connector_status_disconnected; 654 status = connector_status_disconnected;
648 655
649 return status; 656 return status;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index eaacfd0920df..82678d30ab06 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2347 (1 << INTEL_ANALOG_CLONE_BIT); 2347 (1 << INTEL_ANALOG_CLONE_BIT);
2348 } else if (flags & SDVO_OUTPUT_CVBS0) {
2349
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2351 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2352 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2353 sdvo_priv->is_tv = true;
2354 intel_output->needs_tv_clock = true;
2355 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2348 } else if (flags & SDVO_OUTPUT_LVDS0) { 2356 } else if (flags & SDVO_OUTPUT_LVDS0) {
2349 2357
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2358 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 11c9a3fe6810..c0d4650cdb79 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
354 return RREG32(RADEON_CRTC2_CRNT_FRAME); 354 return RREG32(RADEON_CRTC2_CRNT_FRAME);
355} 355}
356 356
357/* Who ever call radeon_fence_emit should call ring_lock and ask
358 * for enough space (today caller are ib schedule and buffer move) */
357void r100_fence_ring_emit(struct radeon_device *rdev, 359void r100_fence_ring_emit(struct radeon_device *rdev,
358 struct radeon_fence *fence) 360 struct radeon_fence *fence)
359{ 361{
360 /* Who ever call radeon_fence_emit should call ring_lock and ask 362 /* We have to make sure that caches are flushed before
361 * for enough space (today caller are ib schedule and buffer move) */ 363 * CPU might read something from VRAM. */
364 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
365 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
362 /* Wait until IDLE & CLEAN */ 368 /* Wait until IDLE & CLEAN */
363 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 369 radeon_ring_write(rdev, PACKET0(0x1720, 0));
364 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 370 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
@@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev)
3369 3375
3370void r100_fini(struct radeon_device *rdev) 3376void r100_fini(struct radeon_device *rdev)
3371{ 3377{
3372 r100_suspend(rdev);
3373 r100_cp_fini(rdev); 3378 r100_cp_fini(rdev);
3374 r100_wb_fini(rdev); 3379 r100_wb_fini(rdev);
3375 r100_ib_fini(rdev); 3380 r100_ib_fini(rdev);
@@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
3481 if (r) { 3486 if (r) {
3482 /* Somethings want wront with the accel init stop accel */ 3487 /* Somethings want wront with the accel init stop accel */
3483 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 3488 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3484 r100_suspend(rdev);
3485 r100_cp_fini(rdev); 3489 r100_cp_fini(rdev);
3486 r100_wb_fini(rdev); 3490 r100_wb_fini(rdev);
3487 r100_ib_fini(rdev); 3491 r100_ib_fini(rdev);
3492 radeon_irq_kms_fini(rdev);
3488 if (rdev->flags & RADEON_IS_PCI) 3493 if (rdev->flags & RADEON_IS_PCI)
3489 r100_pci_gart_fini(rdev); 3494 r100_pci_gart_fini(rdev);
3490 radeon_irq_kms_fini(rdev);
3491 rdev->accel_working = false; 3495 rdev->accel_working = false;
3492 } 3496 }
3493 return 0; 3497 return 0;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 0051d11b907c..43b55a030b4d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
506 506
507 /* DDR for all card after R300 & IGP */ 507 /* DDR for all card after R300 & IGP */
508 rdev->mc.vram_is_ddr = true; 508 rdev->mc.vram_is_ddr = true;
509
509 tmp = RREG32(RADEON_MEM_CNTL); 510 tmp = RREG32(RADEON_MEM_CNTL);
510 if (tmp & R300_MEM_NUM_CHANNELS_MASK) { 511 tmp &= R300_MEM_NUM_CHANNELS_MASK;
511 rdev->mc.vram_width = 128; 512 switch (tmp) {
512 } else { 513 case 0: rdev->mc.vram_width = 64; break;
513 rdev->mc.vram_width = 64; 514 case 1: rdev->mc.vram_width = 128; break;
515 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break;
514 } 517 }
515 518
516 r100_vram_init_sizes(rdev); 519 r100_vram_init_sizes(rdev);
@@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
1327 1330
1328void r300_fini(struct radeon_device *rdev) 1331void r300_fini(struct radeon_device *rdev)
1329{ 1332{
1330 r300_suspend(rdev);
1331 r100_cp_fini(rdev); 1333 r100_cp_fini(rdev);
1332 r100_wb_fini(rdev); 1334 r100_wb_fini(rdev);
1333 r100_ib_fini(rdev); 1335 r100_ib_fini(rdev);
@@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
1418 if (r) { 1420 if (r) {
1419 /* Somethings want wront with the accel init stop accel */ 1421 /* Somethings want wront with the accel init stop accel */
1420 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1422 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1421 r300_suspend(rdev);
1422 r100_cp_fini(rdev); 1423 r100_cp_fini(rdev);
1423 r100_wb_fini(rdev); 1424 r100_wb_fini(rdev);
1424 r100_ib_fini(rdev); 1425 r100_ib_fini(rdev);
1426 radeon_irq_kms_fini(rdev);
1425 if (rdev->flags & RADEON_IS_PCIE) 1427 if (rdev->flags & RADEON_IS_PCIE)
1426 rv370_pcie_gart_fini(rdev); 1428 rv370_pcie_gart_fini(rdev);
1427 if (rdev->flags & RADEON_IS_PCI) 1429 if (rdev->flags & RADEON_IS_PCI)
1428 r100_pci_gart_fini(rdev); 1430 r100_pci_gart_fini(rdev);
1429 radeon_irq_kms_fini(rdev); 1431 radeon_agp_fini(rdev);
1430 rdev->accel_working = false; 1432 rdev->accel_working = false;
1431 } 1433 }
1432 return 0; 1434 return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4526faaacca8..d9373246c97f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev)
389 if (r) { 389 if (r) {
390 /* Somethings want wront with the accel init stop accel */ 390 /* Somethings want wront with the accel init stop accel */
391 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 391 dev_err(rdev->dev, "Disabling GPU acceleration\n");
392 r420_suspend(rdev);
393 r100_cp_fini(rdev); 392 r100_cp_fini(rdev);
394 r100_wb_fini(rdev); 393 r100_wb_fini(rdev);
395 r100_ib_fini(rdev); 394 r100_ib_fini(rdev);
395 radeon_irq_kms_fini(rdev);
396 if (rdev->flags & RADEON_IS_PCIE) 396 if (rdev->flags & RADEON_IS_PCIE)
397 rv370_pcie_gart_fini(rdev); 397 rv370_pcie_gart_fini(rdev);
398 if (rdev->flags & RADEON_IS_PCI) 398 if (rdev->flags & RADEON_IS_PCI)
399 r100_pci_gart_fini(rdev); 399 r100_pci_gart_fini(rdev);
400 radeon_agp_fini(rdev); 400 radeon_agp_fini(rdev);
401 radeon_irq_kms_fini(rdev);
402 rdev->accel_working = false; 401 rdev->accel_working = false;
403 } 402 }
404 return 0; 403 return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 9a189072f2b9..ddf5731eba0d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
294 if (r) { 294 if (r) {
295 /* Somethings want wront with the accel init stop accel */ 295 /* Somethings want wront with the accel init stop accel */
296 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 296 dev_err(rdev->dev, "Disabling GPU acceleration\n");
297 rv515_suspend(rdev);
298 r100_cp_fini(rdev); 297 r100_cp_fini(rdev);
299 r100_wb_fini(rdev); 298 r100_wb_fini(rdev);
300 r100_ib_fini(rdev); 299 r100_ib_fini(rdev);
300 radeon_irq_kms_fini(rdev);
301 rv370_pcie_gart_fini(rdev); 301 rv370_pcie_gart_fini(rdev);
302 radeon_agp_fini(rdev); 302 radeon_agp_fini(rdev);
303 radeon_irq_kms_fini(rdev);
304 rdev->accel_working = false; 303 rdev->accel_working = false;
305 } 304 }
306 return 0; 305 return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 1b6d0001b20e..a1198d99cdf9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1654 rdev->cp.align_mask = 16 - 1; 1654 rdev->cp.align_mask = 16 - 1;
1655} 1655}
1656 1656
1657void r600_cp_fini(struct radeon_device *rdev)
1658{
1659 r600_cp_stop(rdev);
1660 radeon_ring_fini(rdev);
1661}
1662
1657 1663
1658/* 1664/*
1659 * GPU scratch registers helpers function. 1665 * GPU scratch registers helpers function.
@@ -1861,6 +1867,12 @@ int r600_startup(struct radeon_device *rdev)
1861 return r; 1867 return r;
1862 } 1868 }
1863 r600_gpu_init(rdev); 1869 r600_gpu_init(rdev);
1870 r = r600_blit_init(rdev);
1871 if (r) {
1872 r600_blit_fini(rdev);
1873 rdev->asic->copy = NULL;
1874 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1875 }
1864 /* pin copy shader into vram */ 1876 /* pin copy shader into vram */
1865 if (rdev->r600_blit.shader_obj) { 1877 if (rdev->r600_blit.shader_obj) {
1866 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1878 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
@@ -2045,19 +2057,15 @@ int r600_init(struct radeon_device *rdev)
2045 r = r600_pcie_gart_init(rdev); 2057 r = r600_pcie_gart_init(rdev);
2046 if (r) 2058 if (r)
2047 return r; 2059 return r;
2048 r = r600_blit_init(rdev);
2049 if (r) {
2050 r600_blit_fini(rdev);
2051 rdev->asic->copy = NULL;
2052 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2053 }
2054 2060
2055 rdev->accel_working = true; 2061 rdev->accel_working = true;
2056 r = r600_startup(rdev); 2062 r = r600_startup(rdev);
2057 if (r) { 2063 if (r) {
2058 r600_suspend(rdev); 2064 dev_err(rdev->dev, "disabling GPU acceleration\n");
2065 r600_cp_fini(rdev);
2059 r600_wb_fini(rdev); 2066 r600_wb_fini(rdev);
2060 radeon_ring_fini(rdev); 2067 r600_irq_fini(rdev);
2068 radeon_irq_kms_fini(rdev);
2061 r600_pcie_gart_fini(rdev); 2069 r600_pcie_gart_fini(rdev);
2062 rdev->accel_working = false; 2070 rdev->accel_working = false;
2063 } 2071 }
@@ -2083,20 +2091,17 @@ int r600_init(struct radeon_device *rdev)
2083 2091
2084void r600_fini(struct radeon_device *rdev) 2092void r600_fini(struct radeon_device *rdev)
2085{ 2093{
2086 /* Suspend operations */
2087 r600_suspend(rdev);
2088
2089 r600_audio_fini(rdev); 2094 r600_audio_fini(rdev);
2090 r600_blit_fini(rdev); 2095 r600_blit_fini(rdev);
2096 r600_cp_fini(rdev);
2097 r600_wb_fini(rdev);
2091 r600_irq_fini(rdev); 2098 r600_irq_fini(rdev);
2092 radeon_irq_kms_fini(rdev); 2099 radeon_irq_kms_fini(rdev);
2093 radeon_ring_fini(rdev);
2094 r600_wb_fini(rdev);
2095 r600_pcie_gart_fini(rdev); 2100 r600_pcie_gart_fini(rdev);
2101 radeon_agp_fini(rdev);
2096 radeon_gem_fini(rdev); 2102 radeon_gem_fini(rdev);
2097 radeon_fence_driver_fini(rdev); 2103 radeon_fence_driver_fini(rdev);
2098 radeon_clocks_fini(rdev); 2104 radeon_clocks_fini(rdev);
2099 radeon_agp_fini(rdev);
2100 radeon_bo_fini(rdev); 2105 radeon_bo_fini(rdev);
2101 radeon_atombios_fini(rdev); 2106 radeon_atombios_fini(rdev);
2102 kfree(rdev->bios); 2107 kfree(rdev->bios);
@@ -2900,3 +2905,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2900 return 0; 2905 return 0;
2901#endif 2906#endif
2902} 2907}
2908
2909/**
2910 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2911 * rdev: radeon device structure
2912 * bo: buffer object struct which userspace is waiting for idle
2913 *
2914 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2915 * through ring buffer, this leads to corruption in rendering, see
2916 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2917 * directly perform HDP flush by writing register through MMIO.
2918 */
2919void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2920{
2921 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2922}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 99e2c3891a7d..b1c1d3433454 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return rdev->family >= CHIP_R600 38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2d5f2bfa7201..f57480ba1355 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -661,6 +661,13 @@ struct radeon_asic {
661 void (*hpd_fini)(struct radeon_device *rdev); 661 void (*hpd_fini)(struct radeon_device *rdev);
662 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 662 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
663 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 663 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
664 /* ioctl hw specific callback. Some hw might want to perform special
665 * operation on specific ioctl. For instance on wait idle some hw
666 * might want to perform and HDP flush through MMIO as it seems that
667 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
668 * through ring.
669 */
670 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
664}; 671};
665 672
666/* 673/*
@@ -1143,6 +1150,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
1143extern void r600_cp_stop(struct radeon_device *rdev); 1150extern void r600_cp_stop(struct radeon_device *rdev);
1144extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1151extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1145extern int r600_cp_resume(struct radeon_device *rdev); 1152extern int r600_cp_resume(struct radeon_device *rdev);
1153extern void r600_cp_fini(struct radeon_device *rdev);
1146extern int r600_count_pipe_bits(uint32_t val); 1154extern int r600_count_pipe_bits(uint32_t val);
1147extern int r600_gart_clear_page(struct radeon_device *rdev, int i); 1155extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
1148extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1156extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2fbd2e4e9df..05ee1aeac3fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = {
117 .hpd_fini = &r100_hpd_fini, 117 .hpd_fini = &r100_hpd_fini,
118 .hpd_sense = &r100_hpd_sense, 118 .hpd_sense = &r100_hpd_sense,
119 .hpd_set_polarity = &r100_hpd_set_polarity, 119 .hpd_set_polarity = &r100_hpd_set_polarity,
120 .ioctl_wait_idle = NULL,
120}; 121};
121 122
122 123
@@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = {
176 .hpd_fini = &r100_hpd_fini, 177 .hpd_fini = &r100_hpd_fini,
177 .hpd_sense = &r100_hpd_sense, 178 .hpd_sense = &r100_hpd_sense,
178 .hpd_set_polarity = &r100_hpd_set_polarity, 179 .hpd_set_polarity = &r100_hpd_set_polarity,
180 .ioctl_wait_idle = NULL,
179}; 181};
180 182
181/* 183/*
@@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = {
219 .hpd_fini = &r100_hpd_fini, 221 .hpd_fini = &r100_hpd_fini,
220 .hpd_sense = &r100_hpd_sense, 222 .hpd_sense = &r100_hpd_sense,
221 .hpd_set_polarity = &r100_hpd_set_polarity, 223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
222}; 225};
223 226
224 227
@@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = {
267 .hpd_fini = &r100_hpd_fini, 270 .hpd_fini = &r100_hpd_fini,
268 .hpd_sense = &r100_hpd_sense, 271 .hpd_sense = &r100_hpd_sense,
269 .hpd_set_polarity = &r100_hpd_set_polarity, 272 .hpd_set_polarity = &r100_hpd_set_polarity,
273 .ioctl_wait_idle = NULL,
270}; 274};
271 275
272 276
@@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = {
323 .hpd_fini = &rs600_hpd_fini, 327 .hpd_fini = &rs600_hpd_fini,
324 .hpd_sense = &rs600_hpd_sense, 328 .hpd_sense = &rs600_hpd_sense,
325 .hpd_set_polarity = &rs600_hpd_set_polarity, 329 .hpd_set_polarity = &rs600_hpd_set_polarity,
330 .ioctl_wait_idle = NULL,
326}; 331};
327 332
328 333
@@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = {
370 .hpd_fini = &rs600_hpd_fini, 375 .hpd_fini = &rs600_hpd_fini,
371 .hpd_sense = &rs600_hpd_sense, 376 .hpd_sense = &rs600_hpd_sense,
372 .hpd_set_polarity = &rs600_hpd_set_polarity, 377 .hpd_set_polarity = &rs600_hpd_set_polarity,
378 .ioctl_wait_idle = NULL,
373}; 379};
374 380
375 381
@@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = {
421 .hpd_fini = &rs600_hpd_fini, 427 .hpd_fini = &rs600_hpd_fini,
422 .hpd_sense = &rs600_hpd_sense, 428 .hpd_sense = &rs600_hpd_sense,
423 .hpd_set_polarity = &rs600_hpd_set_polarity, 429 .hpd_set_polarity = &rs600_hpd_set_polarity,
430 .ioctl_wait_idle = NULL,
424}; 431};
425 432
426 433
@@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = {
463 .hpd_fini = &rs600_hpd_fini, 470 .hpd_fini = &rs600_hpd_fini,
464 .hpd_sense = &rs600_hpd_sense, 471 .hpd_sense = &rs600_hpd_sense,
465 .hpd_set_polarity = &rs600_hpd_set_polarity, 472 .hpd_set_polarity = &rs600_hpd_set_polarity,
473 .ioctl_wait_idle = NULL,
466}; 474};
467 475
468/* 476/*
@@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
504bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 512bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
505void r600_hpd_set_polarity(struct radeon_device *rdev, 513void r600_hpd_set_polarity(struct radeon_device *rdev,
506 enum radeon_hpd_id hpd); 514 enum radeon_hpd_id hpd);
515extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
507 516
508static struct radeon_asic r600_asic = { 517static struct radeon_asic r600_asic = {
509 .init = &r600_init, 518 .init = &r600_init,
@@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = {
538 .hpd_fini = &r600_hpd_fini, 547 .hpd_fini = &r600_hpd_fini,
539 .hpd_sense = &r600_hpd_sense, 548 .hpd_sense = &r600_hpd_sense,
540 .hpd_set_polarity = &r600_hpd_set_polarity, 549 .hpd_set_polarity = &r600_hpd_set_polarity,
550 .ioctl_wait_idle = r600_ioctl_wait_idle,
541}; 551};
542 552
543/* 553/*
@@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = {
582 .hpd_fini = &r600_hpd_fini, 592 .hpd_fini = &r600_hpd_fini,
583 .hpd_sense = &r600_hpd_sense, 593 .hpd_sense = &r600_hpd_sense,
584 .hpd_set_polarity = &r600_hpd_set_polarity, 594 .hpd_set_polarity = &r600_hpd_set_polarity,
595 .ioctl_wait_idle = r600_ioctl_wait_idle,
585}; 596};
586 597
587#endif 598#endif
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 579c8920e081..e7b19440102e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
971 lvds->native_mode.vdisplay); 971 lvds->native_mode.vdisplay);
972 972
973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); 973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
974 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) 974 lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
975 lvds->panel_vcc_delay = 2000;
976 975
977 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); 976 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
978 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; 977 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 55266416fa47..2d8e5a70f284 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1343,7 +1343,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1343 radeon_connector->dac_load_detect = false; 1343 radeon_connector->dac_load_detect = false;
1344 drm_connector_attach_property(&radeon_connector->base, 1344 drm_connector_attach_property(&radeon_connector->base,
1345 rdev->mode_info.load_detect_property, 1345 rdev->mode_info.load_detect_property,
1346 1); 1346 radeon_connector->dac_load_detect);
1347 drm_connector_attach_property(&radeon_connector->base, 1347 drm_connector_attach_property(&radeon_connector->base,
1348 rdev->mode_info.tv_std_property, 1348 rdev->mode_info.tv_std_property,
1349 radeon_combios_get_tv_info(rdev)); 1349 radeon_combios_get_tv_info(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0e1325e18534..db8e9a355a01 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
308 } 308 }
309 robj = gobj->driver_private; 309 robj = gobj->driver_private;
310 r = radeon_bo_wait(robj, NULL, false); 310 r = radeon_bo_wait(robj, NULL, false);
311 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
311 mutex_lock(&dev->struct_mutex); 314 mutex_lock(&dev->struct_mutex);
312 drm_gem_object_unreference(gobj); 315 drm_gem_object_unreference(gobj);
313 mutex_unlock(&dev->struct_mutex); 316 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9f5418983e2a..287fcebfb4e6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
223 return 0; 223 return 0;
224} 224}
225 225
226int rs400_mc_wait_for_idle(struct radeon_device *rdev)
227{
228 unsigned i;
229 uint32_t tmp;
230
231 for (i = 0; i < rdev->usec_timeout; i++) {
232 /* read MC_STATUS */
233 tmp = RREG32(0x0150);
234 if (tmp & (1 << 2)) {
235 return 0;
236 }
237 DRM_UDELAY(1);
238 }
239 return -1;
240}
241
226void rs400_gpu_init(struct radeon_device *rdev) 242void rs400_gpu_init(struct radeon_device *rdev)
227{ 243{
228 /* FIXME: HDP same place on rs400 ? */ 244 /* FIXME: HDP same place on rs400 ? */
229 r100_hdp_reset(rdev); 245 r100_hdp_reset(rdev);
230 /* FIXME: is this correct ? */ 246 /* FIXME: is this correct ? */
231 r420_pipes_init(rdev); 247 r420_pipes_init(rdev);
232 if (r300_mc_wait_for_idle(rdev)) { 248 if (rs400_mc_wait_for_idle(rdev)) {
233 printk(KERN_WARNING "Failed to wait MC idle while " 249 printk(KERN_WARNING "rs400: Failed to wait MC idle while "
234 "programming pipes. Bad things might happen.\n"); 250 "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
235 } 251 }
236} 252}
237 253
@@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
370 r100_mc_stop(rdev, &save); 386 r100_mc_stop(rdev, &save);
371 387
372 /* Wait for mc idle */ 388 /* Wait for mc idle */
373 if (r300_mc_wait_for_idle(rdev)) 389 if (rs400_mc_wait_for_idle(rdev))
374 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 390 dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
375 WREG32(R_000148_MC_FB_LOCATION, 391 WREG32(R_000148_MC_FB_LOCATION,
376 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 392 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
377 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 393 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
448 464
449void rs400_fini(struct radeon_device *rdev) 465void rs400_fini(struct radeon_device *rdev)
450{ 466{
451 rs400_suspend(rdev);
452 r100_cp_fini(rdev); 467 r100_cp_fini(rdev);
453 r100_wb_fini(rdev); 468 r100_wb_fini(rdev);
454 r100_ib_fini(rdev); 469 r100_ib_fini(rdev);
@@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
527 if (r) { 542 if (r) {
528 /* Somethings want wront with the accel init stop accel */ 543 /* Somethings want wront with the accel init stop accel */
529 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 544 dev_err(rdev->dev, "Disabling GPU acceleration\n");
530 rs400_suspend(rdev);
531 r100_cp_fini(rdev); 545 r100_cp_fini(rdev);
532 r100_wb_fini(rdev); 546 r100_wb_fini(rdev);
533 r100_ib_fini(rdev); 547 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d5255751e7b3..c3818562a13e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
610 610
611void rs600_fini(struct radeon_device *rdev) 611void rs600_fini(struct radeon_device *rdev)
612{ 612{
613 rs600_suspend(rdev);
614 r100_cp_fini(rdev); 613 r100_cp_fini(rdev);
615 r100_wb_fini(rdev); 614 r100_wb_fini(rdev);
616 r100_ib_fini(rdev); 615 r100_ib_fini(rdev);
@@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
689 if (r) { 688 if (r) {
690 /* Somethings want wront with the accel init stop accel */ 689 /* Somethings want wront with the accel init stop accel */
691 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 690 dev_err(rdev->dev, "Disabling GPU acceleration\n");
692 rs600_suspend(rdev);
693 r100_cp_fini(rdev); 691 r100_cp_fini(rdev);
694 r100_wb_fini(rdev); 692 r100_wb_fini(rdev);
695 r100_ib_fini(rdev); 693 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index cd31da913771..06e2771aee5a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
676 676
677void rs690_fini(struct radeon_device *rdev) 677void rs690_fini(struct radeon_device *rdev)
678{ 678{
679 rs690_suspend(rdev);
680 r100_cp_fini(rdev); 679 r100_cp_fini(rdev);
681 r100_wb_fini(rdev); 680 r100_wb_fini(rdev);
682 r100_ib_fini(rdev); 681 r100_ib_fini(rdev);
@@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
756 if (r) { 755 if (r) {
757 /* Somethings want wront with the accel init stop accel */ 756 /* Somethings want wront with the accel init stop accel */
758 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 757 dev_err(rdev->dev, "Disabling GPU acceleration\n");
759 rs690_suspend(rdev);
760 r100_cp_fini(rdev); 758 r100_cp_fini(rdev);
761 r100_wb_fini(rdev); 759 r100_wb_fini(rdev);
762 r100_ib_fini(rdev); 760 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 62756717b044..0e1e6b8632b8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
537 537
538void rv515_fini(struct radeon_device *rdev) 538void rv515_fini(struct radeon_device *rdev)
539{ 539{
540 rv515_suspend(rdev);
541 r100_cp_fini(rdev); 540 r100_cp_fini(rdev);
542 r100_wb_fini(rdev); 541 r100_wb_fini(rdev);
543 r100_ib_fini(rdev); 542 r100_ib_fini(rdev);
@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
615 if (r) { 614 if (r) {
616 /* Somethings want wront with the accel init stop accel */ 615 /* Somethings want wront with the accel init stop accel */
617 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 616 dev_err(rdev->dev, "Disabling GPU acceleration\n");
618 rv515_suspend(rdev);
619 r100_cp_fini(rdev); 617 r100_cp_fini(rdev);
620 r100_wb_fini(rdev); 618 r100_wb_fini(rdev);
621 r100_ib_fini(rdev); 619 r100_ib_fini(rdev);
620 radeon_irq_kms_fini(rdev);
622 rv370_pcie_gart_fini(rdev); 621 rv370_pcie_gart_fini(rdev);
623 radeon_agp_fini(rdev); 622 radeon_agp_fini(rdev);
624 radeon_irq_kms_fini(rdev);
625 rdev->accel_working = false; 623 rdev->accel_working = false;
626 } 624 }
627 return 0; 625 return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index afd9e8213c29..5943d561fd1e 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -887,6 +887,12 @@ static int rv770_startup(struct radeon_device *rdev)
887 return r; 887 return r;
888 } 888 }
889 rv770_gpu_init(rdev); 889 rv770_gpu_init(rdev);
890 r = r600_blit_init(rdev);
891 if (r) {
892 r600_blit_fini(rdev);
893 rdev->asic->copy = NULL;
894 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
895 }
890 /* pin copy shader into vram */ 896 /* pin copy shader into vram */
891 if (rdev->r600_blit.shader_obj) { 897 if (rdev->r600_blit.shader_obj) {
892 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 898 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
@@ -1055,19 +1061,15 @@ int rv770_init(struct radeon_device *rdev)
1055 r = r600_pcie_gart_init(rdev); 1061 r = r600_pcie_gart_init(rdev);
1056 if (r) 1062 if (r)
1057 return r; 1063 return r;
1058 r = r600_blit_init(rdev);
1059 if (r) {
1060 r600_blit_fini(rdev);
1061 rdev->asic->copy = NULL;
1062 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1063 }
1064 1064
1065 rdev->accel_working = true; 1065 rdev->accel_working = true;
1066 r = rv770_startup(rdev); 1066 r = rv770_startup(rdev);
1067 if (r) { 1067 if (r) {
1068 rv770_suspend(rdev); 1068 dev_err(rdev->dev, "disabling GPU acceleration\n");
1069 r600_cp_fini(rdev);
1069 r600_wb_fini(rdev); 1070 r600_wb_fini(rdev);
1070 radeon_ring_fini(rdev); 1071 r600_irq_fini(rdev);
1072 radeon_irq_kms_fini(rdev);
1071 rv770_pcie_gart_fini(rdev); 1073 rv770_pcie_gart_fini(rdev);
1072 rdev->accel_working = false; 1074 rdev->accel_working = false;
1073 } 1075 }
@@ -1089,13 +1091,11 @@ int rv770_init(struct radeon_device *rdev)
1089 1091
1090void rv770_fini(struct radeon_device *rdev) 1092void rv770_fini(struct radeon_device *rdev)
1091{ 1093{
1092 rv770_suspend(rdev);
1093
1094 r600_blit_fini(rdev); 1094 r600_blit_fini(rdev);
1095 r600_cp_fini(rdev);
1096 r600_wb_fini(rdev);
1095 r600_irq_fini(rdev); 1097 r600_irq_fini(rdev);
1096 radeon_irq_kms_fini(rdev); 1098 radeon_irq_kms_fini(rdev);
1097 radeon_ring_fini(rdev);
1098 r600_wb_fini(rdev);
1099 rv770_pcie_gart_fini(rdev); 1099 rv770_pcie_gart_fini(rdev);
1100 radeon_gem_fini(rdev); 1100 radeon_gem_fini(rdev);
1101 radeon_fence_driver_fini(rdev); 1101 radeon_fence_driver_fini(rdev);
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index a31e77c776ae..b8156b4893bb 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
179 * 179 *
180 * Some, but not all, of these voltages have low/high limits. 180 * Some, but not all, of these voltages have low/high limits.
181 */ 181 */
182#define ADT7462_VOLT_COUNT 12 182#define ADT7462_VOLT_COUNT 13
183 183
184#define ADT7462_VENDOR 0x41 184#define ADT7462_VENDOR 0x41
185#define ADT7462_DEVICE 0x62 185#define ADT7462_DEVICE 0x62
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index cadcbd90ff3b..72ff2c4e757d 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev)
851static int __init lm78_isa_found(unsigned short address) 851static int __init lm78_isa_found(unsigned short address)
852{ 852{
853 int val, save, found = 0; 853 int val, save, found = 0;
854 854 int port;
855 /* We have to request the region in two parts because some 855
856 boards declare base+4 to base+7 as a PNP device */ 856 /* Some boards declare base+0 to base+7 as a PNP device, some base+4
857 if (!request_region(address, 4, "lm78")) { 857 * to base+7 and some base+5 to base+6. So we better request each port
858 pr_debug("lm78: Failed to request low part of region\n"); 858 * individually for the probing phase. */
859 return 0; 859 for (port = address; port < address + LM78_EXTENT; port++) {
860 } 860 if (!request_region(port, 1, "lm78")) {
861 if (!request_region(address + 4, 4, "lm78")) { 861 pr_debug("lm78: Failed to request port 0x%x\n", port);
862 pr_debug("lm78: Failed to request high part of region\n"); 862 goto release;
863 release_region(address, 4); 863 }
864 return 0;
865 } 864 }
866 865
867#define REALLY_SLOW_IO 866#define REALLY_SLOW_IO
@@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address)
925 val & 0x80 ? "LM79" : "LM78", (int)address); 924 val & 0x80 ? "LM79" : "LM78", (int)address);
926 925
927 release: 926 release:
928 release_region(address + 4, 4); 927 for (port--; port >= address; port--)
929 release_region(address, 4); 928 release_region(port, 1);
930 return found; 929 return found;
931} 930}
932 931
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 05f9225b6f94..32d4adee73db 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1793,17 +1793,17 @@ static int __init
1793w83781d_isa_found(unsigned short address) 1793w83781d_isa_found(unsigned short address)
1794{ 1794{
1795 int val, save, found = 0; 1795 int val, save, found = 0;
1796 1796 int port;
1797 /* We have to request the region in two parts because some 1797
1798 boards declare base+4 to base+7 as a PNP device */ 1798 /* Some boards declare base+0 to base+7 as a PNP device, some base+4
1799 if (!request_region(address, 4, "w83781d")) { 1799 * to base+7 and some base+5 to base+6. So we better request each port
1800 pr_debug("w83781d: Failed to request low part of region\n"); 1800 * individually for the probing phase. */
1801 return 0; 1801 for (port = address; port < address + W83781D_EXTENT; port++) {
1802 } 1802 if (!request_region(port, 1, "w83781d")) {
1803 if (!request_region(address + 4, 4, "w83781d")) { 1803 pr_debug("w83781d: Failed to request port 0x%x\n",
1804 pr_debug("w83781d: Failed to request high part of region\n"); 1804 port);
1805 release_region(address, 4); 1805 goto release;
1806 return 0; 1806 }
1807 } 1807 }
1808 1808
1809#define REALLY_SLOW_IO 1809#define REALLY_SLOW_IO
@@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address)
1877 val == 0x30 ? "W83782D" : "W83781D", (int)address); 1877 val == 0x30 ? "W83782D" : "W83781D", (int)address);
1878 1878
1879 release: 1879 release:
1880 release_region(address + 4, 4); 1880 for (port--; port >= address; port--)
1881 release_region(address, 4); 1881 release_region(port, 1);
1882 return found; 1882 return found;
1883} 1883}
1884 1884
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index b1c050ff311d..e29b6d5ba8ef 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/types.h>
16 17
17/* include interfaces to usb layer */ 18/* include interfaces to usb layer */
18#include <linux/usb.h> 19#include <linux/usb.h>
@@ -31,8 +32,8 @@
31#define CMD_I2C_IO_END (1<<1) 32#define CMD_I2C_IO_END (1<<1)
32 33
33/* i2c bit delay, default is 10us -> 100kHz */ 34/* i2c bit delay, default is 10us -> 100kHz */
34static int delay = 10; 35static unsigned short delay = 10;
35module_param(delay, int, 0); 36module_param(delay, ushort, 0);
36MODULE_PARM_DESC(delay, "bit delay in microseconds, " 37MODULE_PARM_DESC(delay, "bit delay in microseconds, "
37 "e.g. 10 for 100kHz (default is 100kHz)"); 38 "e.g. 10 for 100kHz (default is 100kHz)");
38 39
@@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
109 110
110static u32 usb_func(struct i2c_adapter *adapter) 111static u32 usb_func(struct i2c_adapter *adapter)
111{ 112{
112 u32 func; 113 __le32 func;
113 114
114 /* get functionality from adapter */ 115 /* get functionality from adapter */
115 if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) != 116 if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
@@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter)
118 return 0; 119 return 0;
119 } 120 }
120 121
121 return func; 122 return le32_to_cpu(func);
122} 123}
123 124
124/* This is the actual algorithm we define */ 125/* This is the actual algorithm we define */
@@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
216 "i2c-tiny-usb at bus %03d device %03d", 217 "i2c-tiny-usb at bus %03d device %03d",
217 dev->usb_dev->bus->busnum, dev->usb_dev->devnum); 218 dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
218 219
219 if (usb_write(&dev->adapter, CMD_SET_DELAY, 220 if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
220 cpu_to_le16(delay), 0, NULL, 0) != 0) {
221 dev_err(&dev->adapter.dev, 221 dev_err(&dev->adapter.dev,
222 "failure setting delay to %dus\n", delay); 222 "failure setting delay to %dus\n", delay);
223 retval = -EIO; 223 retval = -EIO;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dd3dfe42d5a9..a20a71e5efd3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws)
4075{ 4075{
4076 mddev_t *mddev = container_of(ws, mddev_t, del_work); 4076 mddev_t *mddev = container_of(ws, mddev_t, del_work);
4077 4077
4078 if (mddev->private == &md_redundancy_group) { 4078 if (mddev->private) {
4079 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 4079 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
4080 if (mddev->private != (void*)1)
4081 sysfs_remove_group(&mddev->kobj, mddev->private);
4080 if (mddev->sysfs_action) 4082 if (mddev->sysfs_action)
4081 sysfs_put(mddev->sysfs_action); 4083 sysfs_put(mddev->sysfs_action);
4082 mddev->sysfs_action = NULL; 4084 mddev->sysfs_action = NULL;
@@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev)
4287 sysfs_notify_dirent(rdev->sysfs_state); 4289 sysfs_notify_dirent(rdev->sysfs_state);
4288 } 4290 }
4289 4291
4290 md_probe(mddev->unit, NULL, NULL);
4291 disk = mddev->gendisk; 4292 disk = mddev->gendisk;
4292 if (!disk)
4293 return -ENOMEM;
4294 4293
4295 spin_lock(&pers_lock); 4294 spin_lock(&pers_lock);
4296 pers = find_pers(mddev->level, mddev->clevel); 4295 pers = find_pers(mddev->level, mddev->clevel);
@@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4530 mddev->queue->unplug_fn = NULL; 4529 mddev->queue->unplug_fn = NULL;
4531 mddev->queue->backing_dev_info.congested_fn = NULL; 4530 mddev->queue->backing_dev_info.congested_fn = NULL;
4532 module_put(mddev->pers->owner); 4531 module_put(mddev->pers->owner);
4533 if (mddev->pers->sync_request) 4532 if (mddev->pers->sync_request && mddev->private == NULL)
4534 mddev->private = &md_redundancy_group; 4533 mddev->private = (void*)1;
4535 mddev->pers = NULL; 4534 mddev->pers = NULL;
4536 /* tell userspace to handle 'inactive' */ 4535 /* tell userspace to handle 'inactive' */
4537 sysfs_notify_dirent(mddev->sysfs_state); 4536 sysfs_notify_dirent(mddev->sysfs_state);
@@ -4578,9 +4577,6 @@ out:
4578 } 4577 }
4579 mddev->bitmap_info.offset = 0; 4578 mddev->bitmap_info.offset = 0;
4580 4579
4581 /* make sure all md_delayed_delete calls have finished */
4582 flush_scheduled_work();
4583
4584 export_array(mddev); 4580 export_array(mddev);
4585 4581
4586 mddev->array_sectors = 0; 4582 mddev->array_sectors = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e84204eb12df..ceb24afdc147 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev)
5136 mddev->thread = NULL; 5136 mddev->thread = NULL;
5137 mddev->queue->backing_dev_info.congested_fn = NULL; 5137 mddev->queue->backing_dev_info.congested_fn = NULL;
5138 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5138 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5139 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
5140 free_conf(conf); 5139 free_conf(conf);
5141 mddev->private = NULL; 5140 mddev->private = &raid5_attrs_group;
5142 return 0; 5141 return 0;
5143} 5142}
5144 5143
@@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev)
5464 !test_bit(Faulty, &rdev->flags)) { 5463 !test_bit(Faulty, &rdev->flags)) {
5465 if (raid5_add_disk(mddev, rdev) == 0) { 5464 if (raid5_add_disk(mddev, rdev) == 0) {
5466 char nm[20]; 5465 char nm[20];
5467 if (rdev->raid_disk >= conf->previous_raid_disks) 5466 if (rdev->raid_disk >= conf->previous_raid_disks) {
5468 set_bit(In_sync, &rdev->flags); 5467 set_bit(In_sync, &rdev->flags);
5469 else 5468 added_devices++;
5469 } else
5470 rdev->recovery_offset = 0; 5470 rdev->recovery_offset = 0;
5471 added_devices++;
5472 sprintf(nm, "rd%d", rdev->raid_disk); 5471 sprintf(nm, "rd%d", rdev->raid_disk);
5473 if (sysfs_create_link(&mddev->kobj, 5472 if (sysfs_create_link(&mddev->kobj,
5474 &rdev->kobj, nm)) 5473 &rdev->kobj, nm))
@@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev)
5480 break; 5479 break;
5481 } 5480 }
5482 5481
5482 /* When a reshape changes the number of devices, ->degraded
5483 * is measured against the large of the pre and post number of
5484 * devices.*/
5483 if (mddev->delta_disks > 0) { 5485 if (mddev->delta_disks > 0) {
5484 spin_lock_irqsave(&conf->device_lock, flags); 5486 spin_lock_irqsave(&conf->device_lock, flags);
5485 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) 5487 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5486 - added_devices; 5488 - added_devices;
5487 spin_unlock_irqrestore(&conf->device_lock, flags); 5489 spin_unlock_irqrestore(&conf->device_lock, flags);
5488 } 5490 }
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index becbaadb3b77..5ed75263340a 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1333,9 +1333,9 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
1333 1333
1334 DEB_CAP(("vbuf:%p\n",vb)); 1334 DEB_CAP(("vbuf:%p\n",vb));
1335 1335
1336 release_all_pagetables(dev, buf);
1337
1338 saa7146_dma_free(dev,q,buf); 1336 saa7146_dma_free(dev,q,buf);
1337
1338 release_all_pagetables(dev, buf);
1339} 1339}
1340 1340
1341static struct videobuf_queue_ops video_qops = { 1341static struct videobuf_queue_ops video_qops = {
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index c37790ad92d0..9ddc57909d49 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
761 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); 761 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
762 dmxdevfilter->type = DMXDEV_TYPE_NONE; 762 dmxdevfilter->type = DMXDEV_TYPE_NONE;
763 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); 763 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
764 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
765 init_timer(&dmxdevfilter->timer); 764 init_timer(&dmxdevfilter->timer);
766 765
767 dvbdev->users++; 766 dvbdev->users++;
@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
887 dmxdevfilter->type = DMXDEV_TYPE_PES; 886 dmxdevfilter->type = DMXDEV_TYPE_PES;
888 memcpy(&dmxdevfilter->params, params, 887 memcpy(&dmxdevfilter->params, params,
889 sizeof(struct dmx_pes_filter_params)); 888 sizeof(struct dmx_pes_filter_params));
889 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
890 890
891 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 891 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
892 892
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index b78cfb7d1897..67f189b7aa1f 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
426 }; 426 };
427 }; 427 };
428 428
429 if (dvb_demux_tscheck) { 429 if (demux->cnt_storage) {
430 if (!demux->cnt_storage)
431 demux->cnt_storage = vmalloc(MAX_PID + 1);
432
433 if (!demux->cnt_storage) {
434 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
435 dvb_demux_tscheck = 0;
436 goto no_dvb_demux_tscheck;
437 }
438
439 /* check pkt counter */ 430 /* check pkt counter */
440 if (pid < MAX_PID) { 431 if (pid < MAX_PID) {
441 if (buf[1] & 0x80) 432 if (buf[1] & 0x80)
@@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
454 }; 445 };
455 /* end check */ 446 /* end check */
456 }; 447 };
457no_dvb_demux_tscheck:
458 448
459 list_for_each_entry(feed, &demux->feed_list, list_head) { 449 list_for_each_entry(feed, &demux->feed_list, list_head) {
460 if ((feed->pid != pid) && (feed->pid != 0x2000)) 450 if ((feed->pid != pid) && (feed->pid != 0x2000))
@@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1246 dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); 1236 dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
1247 if (!dvbdemux->feed) { 1237 if (!dvbdemux->feed) {
1248 vfree(dvbdemux->filter); 1238 vfree(dvbdemux->filter);
1239 dvbdemux->filter = NULL;
1249 return -ENOMEM; 1240 return -ENOMEM;
1250 } 1241 }
1251 for (i = 0; i < dvbdemux->filternum; i++) { 1242 for (i = 0; i < dvbdemux->filternum; i++) {
@@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1257 dvbdemux->feed[i].index = i; 1248 dvbdemux->feed[i].index = i;
1258 } 1249 }
1259 1250
1251 if (dvb_demux_tscheck) {
1252 dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
1253
1254 if (!dvbdemux->cnt_storage)
1255 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
1256 }
1257
1260 INIT_LIST_HEAD(&dvbdemux->frontend_list); 1258 INIT_LIST_HEAD(&dvbdemux->frontend_list);
1261 1259
1262 for (i = 0; i < DMX_TS_PES_OTHER; i++) { 1260 for (i = 0; i < DMX_TS_PES_OTHER; i++) {
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 7dfecfc6017c..ee5bff02a92c 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -93,9 +93,9 @@ static int ts_open(struct file *file)
93 dprintk("open dev=%s\n", video_device_node_name(vdev)); 93 dprintk("open dev=%s\n", video_device_node_name(vdev));
94 err = -EBUSY; 94 err = -EBUSY;
95 if (!mutex_trylock(&dev->empress_tsq.vb_lock)) 95 if (!mutex_trylock(&dev->empress_tsq.vb_lock))
96 goto done; 96 return err;
97 if (atomic_read(&dev->empress_users)) 97 if (atomic_read(&dev->empress_users))
98 goto done_up; 98 goto done;
99 99
100 /* Unmute audio */ 100 /* Unmute audio */
101 saa_writeb(SAA7134_AUDIO_MUTE_CTRL, 101 saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
@@ -105,10 +105,8 @@ static int ts_open(struct file *file)
105 file->private_data = dev; 105 file->private_data = dev;
106 err = 0; 106 err = 0;
107 107
108done_up:
109 mutex_unlock(&dev->empress_tsq.vb_lock);
110done: 108done:
111 unlock_kernel(); 109 mutex_unlock(&dev->empress_tsq.vb_lock);
112 return err; 110 return err;
113} 111}
114 112
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b9f1e84897cc..e7f8027165e6 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
74 } 74 }
75 75
76 mrq->cmd->arg = dev_addr; 76 mrq->cmd->arg = dev_addr;
77 if (!mmc_card_blockaddr(test->card))
78 mrq->cmd->arg <<= 9;
79
77 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; 80 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
78 81
79 if (blocks == 1) 82 if (blocks == 1)
@@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
190 } 193 }
191 194
192 for (i = 0;i < BUFFER_SIZE / 512;i++) { 195 for (i = 0;i < BUFFER_SIZE / 512;i++) {
193 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 196 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
194 if (ret) 197 if (ret)
195 return ret; 198 return ret;
196 } 199 }
@@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
219 memset(test->buffer, 0, 512); 222 memset(test->buffer, 0, 512);
220 223
221 for (i = 0;i < BUFFER_SIZE / 512;i++) { 224 for (i = 0;i < BUFFER_SIZE / 512;i++) {
222 ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1); 225 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
223 if (ret) 226 if (ret)
224 return ret; 227 return ret;
225 } 228 }
@@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
426 for (i = 0;i < sectors;i++) { 429 for (i = 0;i < sectors;i++) {
427 ret = mmc_test_buffer_transfer(test, 430 ret = mmc_test_buffer_transfer(test,
428 test->buffer + i * 512, 431 test->buffer + i * 512,
429 dev_addr + i * 512, 512, 0); 432 dev_addr + i, 512, 0);
430 if (ret) 433 if (ret)
431 return ret; 434 return ret;
432 } 435 }
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 62d9c9cc5671..1dd4403247ca 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev)
921 size = (res->end - res->start) + 1; 921 size = (res->end - res->start) + 1;
922 922
923 ax->mem2 = request_mem_region(res->start, size, pdev->name); 923 ax->mem2 = request_mem_region(res->start, size, pdev->name);
924 if (ax->mem == NULL) { 924 if (ax->mem2 == NULL) {
925 dev_err(&pdev->dev, "cannot reserve registers\n"); 925 dev_err(&pdev->dev, "cannot reserve registers\n");
926 ret = -ENXIO; 926 ret = -ENXIO;
927 goto exit_mem1; 927 goto exit_mem1;
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 9fd8e5ecd5d7..5bc74590c73e 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -276,8 +276,13 @@ struct be_adapter {
276 int link_speed; 276 int link_speed;
277 u8 port_type; 277 u8 port_type;
278 u8 transceiver; 278 u8 transceiver;
279 u8 generation; /* BladeEngine ASIC generation */
279}; 280};
280 281
282/* BladeEngine Generation numbers */
283#define BE_GEN2 2
284#define BE_GEN3 3
285
281extern const struct ethtool_ops be_ethtool_ops; 286extern const struct ethtool_ops be_ethtool_ops;
282 287
283#define drvr_stats(adapter) (&adapter->stats.drvr_stats) 288#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index c002b8391b4d..13b33c841083 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -164,7 +164,8 @@ struct be_cmd_req_hdr {
164 u8 domain; /* dword 0 */ 164 u8 domain; /* dword 0 */
165 u32 timeout; /* dword 1 */ 165 u32 timeout; /* dword 1 */
166 u32 request_length; /* dword 2 */ 166 u32 request_length; /* dword 2 */
167 u32 rsvd; /* dword 3 */ 167 u8 version; /* dword 3 */
168 u8 rsvd[3]; /* dword 3 */
168}; 169};
169 170
170#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ 171#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 33ab8c7f14fe..626b76c0ebc7 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1350,7 +1350,7 @@ static irqreturn_t be_intx(int irq, void *dev)
1350 int isr; 1350 int isr;
1351 1351
1352 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + 1352 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1353 be_pci_func(adapter) * CEV_ISR_SIZE); 1353 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
1354 if (!isr) 1354 if (!isr)
1355 return IRQ_NONE; 1355 return IRQ_NONE;
1356 1356
@@ -2051,6 +2051,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
2051static int be_map_pci_bars(struct be_adapter *adapter) 2051static int be_map_pci_bars(struct be_adapter *adapter)
2052{ 2052{
2053 u8 __iomem *addr; 2053 u8 __iomem *addr;
2054 int pcicfg_reg;
2054 2055
2055 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), 2056 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2056 pci_resource_len(adapter->pdev, 2)); 2057 pci_resource_len(adapter->pdev, 2));
@@ -2064,8 +2065,13 @@ static int be_map_pci_bars(struct be_adapter *adapter)
2064 goto pci_map_err; 2065 goto pci_map_err;
2065 adapter->db = addr; 2066 adapter->db = addr;
2066 2067
2067 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1), 2068 if (adapter->generation == BE_GEN2)
2068 pci_resource_len(adapter->pdev, 1)); 2069 pcicfg_reg = 1;
2070 else
2071 pcicfg_reg = 0;
2072
2073 addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
2074 pci_resource_len(adapter->pdev, pcicfg_reg));
2069 if (addr == NULL) 2075 if (addr == NULL)
2070 goto pci_map_err; 2076 goto pci_map_err;
2071 adapter->pcicfg = addr; 2077 adapter->pcicfg = addr;
@@ -2162,6 +2168,7 @@ static int be_stats_init(struct be_adapter *adapter)
2162 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); 2168 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2163 if (cmd->va == NULL) 2169 if (cmd->va == NULL)
2164 return -1; 2170 return -1;
2171 memset(cmd->va, 0, cmd->size);
2165 return 0; 2172 return 0;
2166} 2173}
2167 2174
@@ -2240,6 +2247,20 @@ static int __devinit be_probe(struct pci_dev *pdev,
2240 goto rel_reg; 2247 goto rel_reg;
2241 } 2248 }
2242 adapter = netdev_priv(netdev); 2249 adapter = netdev_priv(netdev);
2250
2251 switch (pdev->device) {
2252 case BE_DEVICE_ID1:
2253 case OC_DEVICE_ID1:
2254 adapter->generation = BE_GEN2;
2255 break;
2256 case BE_DEVICE_ID2:
2257 case OC_DEVICE_ID2:
2258 adapter->generation = BE_GEN3;
2259 break;
2260 default:
2261 adapter->generation = 0;
2262 }
2263
2243 adapter->pdev = pdev; 2264 adapter->pdev = pdev;
2244 pci_set_drvdata(pdev, adapter); 2265 pci_set_drvdata(pdev, adapter);
2245 adapter->netdev = netdev; 2266 adapter->netdev = netdev;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3f0071cfe56b..efa0e41bf3ec 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3639,7 +3639,7 @@ static int bond_open(struct net_device *bond_dev)
3639 */ 3639 */
3640 if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) { 3640 if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) {
3641 /* something went wrong - fail the open operation */ 3641 /* something went wrong - fail the open operation */
3642 return -1; 3642 return -ENOMEM;
3643 } 3643 }
3644 3644
3645 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); 3645 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd14727e4b..318a018ca7c5 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2079 struct sge_fl *fl, int len, int complete) 2079 struct sge_fl *fl, int len, int complete)
2080{ 2080{
2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2082 struct port_info *pi = netdev_priv(qs->netdev);
2082 struct sk_buff *skb = NULL; 2083 struct sk_buff *skb = NULL;
2083 struct cpl_rx_pkt *cpl; 2084 struct cpl_rx_pkt *cpl;
2084 struct skb_frag_struct *rx_frag; 2085 struct skb_frag_struct *rx_frag;
@@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2116 2117
2117 if (!nr_frags) { 2118 if (!nr_frags) {
2118 offset = 2 + sizeof(struct cpl_rx_pkt); 2119 offset = 2 + sizeof(struct cpl_rx_pkt);
2119 qs->lro_va = sd->pg_chunk.va + 2; 2120 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2120 }
2121 len -= offset;
2122 2121
2123 prefetch(qs->lro_va); 2122 if ((pi->rx_offload & T3_RX_CSUM) &&
2123 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2124 skb->ip_summed = CHECKSUM_UNNECESSARY;
2125 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2126 } else
2127 skb->ip_summed = CHECKSUM_NONE;
2128 } else
2129 cpl = qs->lro_va;
2130
2131 len -= offset;
2124 2132
2125 rx_frag += nr_frags; 2133 rx_frag += nr_frags;
2126 rx_frag->page = sd->pg_chunk.page; 2134 rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2136 return; 2144 return;
2137 2145
2138 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2146 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2139 skb->ip_summed = CHECKSUM_UNNECESSARY;
2140 cpl = qs->lro_va;
2141 2147
2142 if (unlikely(cpl->vlan_valid)) { 2148 if (unlikely(cpl->vlan_valid)) {
2143 struct net_device *dev = qs->netdev;
2144 struct port_info *pi = netdev_priv(dev);
2145 struct vlan_group *grp = pi->vlan_grp; 2149 struct vlan_group *grp = pi->vlan_grp;
2146 2150
2147 if (likely(grp != NULL)) { 2151 if (likely(grp != NULL)) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 997124d2992a..c881347cb26d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
422 if (tx_queue > IGB_N0_QUEUE) 422 if (tx_queue > IGB_N0_QUEUE)
423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
424 if (!adapter->msix_entries && msix_vector == 0)
425 msixbm |= E1000_EIMS_OTHER;
424 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 426 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
425 q_vector->eims_value = msixbm; 427 q_vector->eims_value = msixbm;
426 break; 428 break;
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
877{ 879{
878 struct net_device *netdev = adapter->netdev; 880 struct net_device *netdev = adapter->netdev;
879 struct pci_dev *pdev = adapter->pdev; 881 struct pci_dev *pdev = adapter->pdev;
880 struct e1000_hw *hw = &adapter->hw;
881 int err = 0; 882 int err = 0;
882 883
883 if (adapter->msix_entries) { 884 if (adapter->msix_entries) {
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
909 igb_setup_all_tx_resources(adapter); 910 igb_setup_all_tx_resources(adapter);
910 igb_setup_all_rx_resources(adapter); 911 igb_setup_all_rx_resources(adapter);
911 } else { 912 } else {
912 switch (hw->mac.type) { 913 igb_assign_vector(adapter->q_vector[0], 0);
913 case e1000_82575:
914 wr32(E1000_MSIXBM(0),
915 (E1000_EICR_RX_QUEUE0 |
916 E1000_EICR_TX_QUEUE0 |
917 E1000_EIMS_OTHER));
918 break;
919 case e1000_82580:
920 case e1000_82576:
921 wr32(E1000_IVAR0, E1000_IVAR_VALID);
922 break;
923 default:
924 break;
925 }
926 } 914 }
927 915
928 if (adapter->flags & IGB_FLAG_HAS_MSI) { 916 if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter)
1140 } 1128 }
1141 if (adapter->msix_entries) 1129 if (adapter->msix_entries)
1142 igb_configure_msix(adapter); 1130 igb_configure_msix(adapter);
1131 else
1132 igb_assign_vector(adapter->q_vector[0], 0);
1143 1133
1144 /* Clear any pending interrupts. */ 1134 /* Clear any pending interrupts. */
1145 rd32(E1000_ICR); 1135 rd32(E1000_ICR);
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 297a5ddd77f0..2aa71a766c35 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2117,6 +2117,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2117 /* set time_stamp *before* dma to help avoid a possible race */ 2117 /* set time_stamp *before* dma to help avoid a possible race */
2118 buffer_info->time_stamp = jiffies; 2118 buffer_info->time_stamp = jiffies;
2119 buffer_info->next_to_watch = i; 2119 buffer_info->next_to_watch = i;
2120 buffer_info->mapped_as_page = false;
2120 buffer_info->dma = pci_map_single(pdev, skb->data, len, 2121 buffer_info->dma = pci_map_single(pdev, skb->data, len,
2121 PCI_DMA_TODEVICE); 2122 PCI_DMA_TODEVICE);
2122 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2123 if (pci_dma_mapping_error(pdev, buffer_info->dma))
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 56f37f66b696..dd4883f642be 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -223,7 +223,7 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
223 223
224 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != 224 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
225 adapter->dcb_cfg.bw_percentage[0][bwg_id]) { 225 adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
226 adapter->dcb_set_bitmap |= BIT_PG_RX; 226 adapter->dcb_set_bitmap |= BIT_PG_TX;
227 adapter->dcb_set_bitmap |= BIT_RESETLINK; 227 adapter->dcb_set_bitmap |= BIT_RESETLINK;
228 } 228 }
229} 229}
@@ -341,6 +341,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
341 if (!adapter->dcb_set_bitmap) 341 if (!adapter->dcb_set_bitmap)
342 return DCB_NO_HW_CHG; 342 return DCB_NO_HW_CHG;
343 343
344 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
345 adapter->ring_feature[RING_F_DCB].indices);
346
347 if (ret)
348 return DCB_NO_HW_CHG;
349
344 /* 350 /*
345 * Only take down the adapter if the configuration change 351 * Only take down the adapter if the configuration change
346 * requires a reset. 352 * requires a reset.
@@ -359,14 +365,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
359 } 365 }
360 } 366 }
361 367
362 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
363 adapter->ring_feature[RING_F_DCB].indices);
364 if (ret) {
365 if (adapter->dcb_set_bitmap & BIT_RESETLINK)
366 clear_bit(__IXGBE_RESETTING, &adapter->state);
367 return DCB_NO_HW_CHG;
368 }
369
370 if (adapter->dcb_cfg.pfc_mode_enable) { 368 if (adapter->dcb_cfg.pfc_mode_enable) {
371 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && 369 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
372 (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) 370 (adapter->hw.fc.current_mode != ixgbe_fc_pfc))
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index b5f64ad67975..7b7c8486c0bf 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5179,7 +5179,7 @@ dma_error:
5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
5180 } 5180 }
5181 5181
5182 return count; 5182 return 0;
5183} 5183}
5184 5184
5185static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 5185static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5329 struct ixgbe_adapter *adapter = netdev_priv(dev); 5329 struct ixgbe_adapter *adapter = netdev_priv(dev);
5330 int txq = smp_processor_id(); 5330 int txq = smp_processor_id();
5331 5331
5332 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) 5332 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5333 while (unlikely(txq >= dev->real_num_tx_queues))
5334 txq -= dev->real_num_tx_queues;
5333 return txq; 5335 return txq;
5336 }
5334 5337
5335#ifdef IXGBE_FCOE 5338#ifdef IXGBE_FCOE
5336 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5339 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index c146304d8d6c..c0ceebccaa49 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -854,8 +854,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
854 854
855static irqreturn_t ks_irq(int irq, void *pw) 855static irqreturn_t ks_irq(int irq, void *pw)
856{ 856{
857 struct ks_net *ks = pw; 857 struct net_device *netdev = pw;
858 struct net_device *netdev = ks->netdev; 858 struct ks_net *ks = netdev_priv(netdev);
859 u16 status; 859 u16 status;
860 860
861 /*this should be the first in IRQ handler */ 861 /*this should be the first in IRQ handler */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 9f9d6081959b..24279e6e55f5 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1941 netif_wake_queue(adapter->netdev); 1941 netif_wake_queue(adapter->netdev);
1942 1942
1943 clear_bit(__NX_RESETTING, &adapter->state); 1943 clear_bit(__NX_RESETTING, &adapter->state);
1944 1944 return;
1945 } else { 1945 } else {
1946 clear_bit(__NX_RESETTING, &adapter->state); 1946 clear_bit(__NX_RESETTING, &adapter->state);
1947 if (!netxen_nic_reset_context(adapter)) { 1947 if (!netxen_nic_reset_context(adapter)) {
@@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work)
2240 2240
2241 netxen_nic_down(adapter, netdev); 2241 netxen_nic_down(adapter, netdev);
2242 2242
2243 rtnl_lock();
2243 netxen_nic_detach(adapter); 2244 netxen_nic_detach(adapter);
2245 rtnl_unlock();
2244 2246
2245 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2247 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
2246 2248
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d760650c5c04..67249c3c9f50 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
1025static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) 1025static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
1026{ 1026{
1027 struct sky2_tx_le *le = sky2->tx_le + *slot; 1027 struct sky2_tx_le *le = sky2->tx_le + *slot;
1028 struct tx_ring_info *re = sky2->tx_ring + *slot;
1029 1028
1030 *slot = RING_NEXT(*slot, sky2->tx_ring_size); 1029 *slot = RING_NEXT(*slot, sky2->tx_ring_size);
1031 re->flags = 0;
1032 re->skb = NULL;
1033 le->ctrl = 0; 1030 le->ctrl = 0;
1034 return le; 1031 return le;
1035} 1032}
@@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1622 return count; 1619 return count;
1623} 1620}
1624 1621
1625static void sky2_tx_unmap(struct pci_dev *pdev, 1622static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
1626 const struct tx_ring_info *re)
1627{ 1623{
1628 if (re->flags & TX_MAP_SINGLE) 1624 if (re->flags & TX_MAP_SINGLE)
1629 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), 1625 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
1633 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), 1629 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
1634 pci_unmap_len(re, maplen), 1630 pci_unmap_len(re, maplen),
1635 PCI_DMA_TODEVICE); 1631 PCI_DMA_TODEVICE);
1632 re->flags = 0;
1636} 1633}
1637 1634
1638/* 1635/*
@@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1839 dev->stats.tx_packets++; 1836 dev->stats.tx_packets++;
1840 dev->stats.tx_bytes += skb->len; 1837 dev->stats.tx_bytes += skb->len;
1841 1838
1839 re->skb = NULL;
1842 dev_kfree_skb_any(skb); 1840 dev_kfree_skb_any(skb);
1843 1841
1844 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); 1842 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 95db60adde41..f9521136a869 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1063,7 +1063,7 @@ static int netdev_open(struct net_device *dev)
1063 if (retval) { 1063 if (retval) {
1064 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n", 1064 printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1065 FIRMWARE_RX); 1065 FIRMWARE_RX);
1066 return retval; 1066 goto out_init;
1067 } 1067 }
1068 if (fw_rx->size % 4) { 1068 if (fw_rx->size % 4) {
1069 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n", 1069 printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
@@ -1108,6 +1108,9 @@ out_tx:
1108 release_firmware(fw_tx); 1108 release_firmware(fw_tx);
1109out_rx: 1109out_rx:
1110 release_firmware(fw_rx); 1110 release_firmware(fw_rx);
1111out_init:
1112 if (retval)
1113 netdev_close(dev);
1111 return retval; 1114 return retval;
1112} 1115}
1113 1116
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 21e183a83b99..4f27f022fbf7 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -419,7 +419,7 @@ static int cdc_manage_power(struct usbnet *dev, int on)
419 419
420static const struct driver_info cdc_info = { 420static const struct driver_info cdc_info = {
421 .description = "CDC Ethernet Device", 421 .description = "CDC Ethernet Device",
422 .flags = FLAG_ETHER | FLAG_LINK_INTR, 422 .flags = FLAG_ETHER,
423 // .check_connect = cdc_check_connect, 423 // .check_connect = cdc_check_connect,
424 .bind = cdc_bind, 424 .bind = cdc_bind,
425 .unbind = usbnet_cdc_unbind, 425 .unbind = usbnet_cdc_unbind,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2ec61f08cfdb..ae371448b5a0 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -855,12 +855,11 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
855 } 855 }
856} 856}
857 857
858static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah) 858static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
859{ 859{
860 u32 i, j; 860 u32 i, j;
861 861
862 if ((ah->hw_version.devid == AR9280_DEVID_PCI) && 862 if (ah->hw_version.devid == AR9280_DEVID_PCI) {
863 test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes)) {
864 863
865 /* EEPROM Fixup */ 864 /* EEPROM Fixup */
866 for (i = 0; i < ah->iniModes.ia_rows; i++) { 865 for (i = 0; i < ah->iniModes.ia_rows; i++) {
@@ -980,7 +979,7 @@ int ath9k_hw_init(struct ath_hw *ah)
980 if (r) 979 if (r)
981 return r; 980 return r;
982 981
983 ath9k_hw_init_11a_eeprom_fix(ah); 982 ath9k_hw_init_eeprom_fix(ah);
984 983
985 r = ath9k_hw_init_macaddr(ah); 984 r = ath9k_hw_init_macaddr(ah);
986 if (r) { 985 if (r) {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 996eb90263cc..643bea35686f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2655,10 +2655,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2655 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { 2655 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2656 ath9k_ps_wakeup(sc); 2656 ath9k_ps_wakeup(sc);
2657 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 2657 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2658 ath_beacon_return(sc, avp);
2659 ath9k_ps_restore(sc); 2658 ath9k_ps_restore(sc);
2660 } 2659 }
2661 2660
2661 ath_beacon_return(sc, avp);
2662 sc->sc_flags &= ~SC_OP_BEACONS; 2662 sc->sc_flags &= ~SC_OP_BEACONS;
2663 2663
2664 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 2664 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index cde09a890b73..90fbdb25399e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -297,7 +297,7 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
297} 297}
298EXPORT_SYMBOL(iwl_add_station); 298EXPORT_SYMBOL(iwl_add_station);
299 299
300static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr) 300static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr)
301{ 301{
302 unsigned long flags; 302 unsigned long flags;
303 u8 sta_id = iwl_find_station(priv, addr); 303 u8 sta_id = iwl_find_station(priv, addr);
@@ -324,7 +324,7 @@ static void iwl_remove_sta_callback(struct iwl_priv *priv,
324{ 324{
325 struct iwl_rem_sta_cmd *rm_sta = 325 struct iwl_rem_sta_cmd *rm_sta =
326 (struct iwl_rem_sta_cmd *)cmd->cmd.payload; 326 (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
327 const char *addr = rm_sta->addr; 327 const u8 *addr = rm_sta->addr;
328 328
329 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 329 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
330 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 330 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c74694345b6e..d58b94030ef3 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev)
338DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); 338DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
339DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); 339DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
340 340
341/*
342 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
343 * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
344 * BAR0 should be 8 bytes; instead, it may be set to something like 8k
345 * (which conflicts w/ BAR1's memory range).
346 */
347static void __devinit quirk_cs5536_vsa(struct pci_dev *dev)
348{
349 if (pci_resource_len(dev, 0) != 8) {
350 struct resource *res = &dev->resource[0];
351 res->end = res->start + 8 - 1;
352 dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
353 "(incorrect header); workaround applied.\n");
354 }
355}
356DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
357
341static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, 358static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
342 unsigned size, int nr, const char *name) 359 unsigned size, int nr, const char *name)
343{ 360{
diff --git a/drivers/rtc/rtc-fm3130.c b/drivers/rtc/rtc-fm3130.c
index 3a7be11cc6b9..812c66755083 100644
--- a/drivers/rtc/rtc-fm3130.c
+++ b/drivers/rtc/rtc-fm3130.c
@@ -376,20 +376,22 @@ static int __devinit fm3130_probe(struct i2c_client *client,
376 } 376 }
377 377
378 /* Disabling calibration mode */ 378 /* Disabling calibration mode */
379 if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) 379 if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_CAL) {
380 i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, 380 i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
381 fm3130->regs[FM3130_RTC_CONTROL] & 381 fm3130->regs[FM3130_RTC_CONTROL] &
382 ~(FM3130_RTC_CONTROL_BIT_CAL)); 382 ~(FM3130_RTC_CONTROL_BIT_CAL));
383 dev_warn(&client->dev, "Disabling calibration mode!\n"); 383 dev_warn(&client->dev, "Disabling calibration mode!\n");
384 }
384 385
385 /* Disabling read and write modes */ 386 /* Disabling read and write modes */
386 if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE || 387 if (fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_WRITE ||
387 fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) 388 fm3130->regs[FM3130_RTC_CONTROL] & FM3130_RTC_CONTROL_BIT_READ) {
388 i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL, 389 i2c_smbus_write_byte_data(client, FM3130_RTC_CONTROL,
389 fm3130->regs[FM3130_RTC_CONTROL] & 390 fm3130->regs[FM3130_RTC_CONTROL] &
390 ~(FM3130_RTC_CONTROL_BIT_READ | 391 ~(FM3130_RTC_CONTROL_BIT_READ |
391 FM3130_RTC_CONTROL_BIT_WRITE)); 392 FM3130_RTC_CONTROL_BIT_WRITE));
392 dev_warn(&client->dev, "Disabling READ or WRITE mode!\n"); 393 dev_warn(&client->dev, "Disabling READ or WRITE mode!\n");
394 }
393 395
394 /* oscillator off? turn it on, so clock can tick. */ 396 /* oscillator off? turn it on, so clock can tick. */
395 if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN) 397 if (fm3130->regs[FM3130_CAL_CONTROL] & FM3130_CAL_CONTROL_BIT_nOSCEN)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 999fe80c4051..62b654af9237 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
531 qdio_siga_sync_q(q); 531 qdio_siga_sync_q(q);
532 get_buf_state(q, q->first_to_check, &state, 0); 532 get_buf_state(q, q->first_to_check, &state, 0);
533 533
534 if (state == SLSB_P_INPUT_PRIMED) 534 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
535 /* more work coming */ 535 /* more work coming */
536 return 0; 536 return 0;
537 537
@@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
960 qdio_handle_activate_check(cdev, intparm, cstat, 960 qdio_handle_activate_check(cdev, intparm, cstat,
961 dstat); 961 dstat);
962 break; 962 break;
963 case QDIO_IRQ_STATE_STOPPED:
964 break;
963 default: 965 default:
964 WARN_ON(1); 966 WARN_ON(1);
965 } 967 }
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index 377f2712289e..ab2ab3c81834 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -394,7 +394,7 @@ static void ulite_console_write(struct console *co, const char *s,
394 spin_unlock_irqrestore(&port->lock, flags); 394 spin_unlock_irqrestore(&port->lock, flags);
395} 395}
396 396
397static int __init ulite_console_setup(struct console *co, char *options) 397static int __devinit ulite_console_setup(struct console *co, char *options)
398{ 398{
399 struct uart_port *port; 399 struct uart_port *port;
400 int baud = 9600; 400 int baud = 9600;
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 0ceec123ddfd..bee558aed427 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -35,7 +35,9 @@
35#include <linux/usb.h> 35#include <linux/usb.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/mm.h>
38#include <linux/irq.h> 39#include <linux/irq.h>
40#include <asm/cacheflush.h>
39 41
40#include "../core/hcd.h" 42#include "../core/hcd.h"
41#include "r8a66597.h" 43#include "r8a66597.h"
@@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
820 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); 822 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
821} 823}
822 824
825static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
826 int status)
827__releases(r8a66597->lock)
828__acquires(r8a66597->lock)
829{
830 if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
831 void *ptr;
832
833 for (ptr = urb->transfer_buffer;
834 ptr < urb->transfer_buffer + urb->transfer_buffer_length;
835 ptr += PAGE_SIZE)
836 flush_dcache_page(virt_to_page(ptr));
837 }
838
839 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
840 spin_unlock(&r8a66597->lock);
841 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
842 spin_lock(&r8a66597->lock);
843}
844
823/* this function must be called with interrupt disabled */ 845/* this function must be called with interrupt disabled */
824static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) 846static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
825{ 847{
@@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
838 list_del(&td->queue); 860 list_del(&td->queue);
839 kfree(td); 861 kfree(td);
840 862
841 if (urb) { 863 if (urb)
842 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), 864 r8a66597_urb_done(r8a66597, urb, -ENODEV);
843 urb);
844 865
845 spin_unlock(&r8a66597->lock);
846 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
847 -ENODEV);
848 spin_lock(&r8a66597->lock);
849 }
850 break; 866 break;
851 } 867 }
852} 868}
@@ -1006,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
1006/* this function must be called with interrupt disabled */ 1022/* this function must be called with interrupt disabled */
1007static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, 1023static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1008 u16 syssts) 1024 u16 syssts)
1025__releases(r8a66597->lock)
1026__acquires(r8a66597->lock)
1009{ 1027{
1010 if (syssts == SE0) { 1028 if (syssts == SE0) {
1011 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); 1029 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
@@ -1023,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1023 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); 1041 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
1024 } 1042 }
1025 1043
1044 spin_unlock(&r8a66597->lock);
1026 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); 1045 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
1046 spin_lock(&r8a66597->lock);
1027} 1047}
1028 1048
1029/* this function must be called with interrupt disabled */ 1049/* this function must be called with interrupt disabled */
@@ -1283,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
1283 if (usb_pipeisoc(urb->pipe)) 1303 if (usb_pipeisoc(urb->pipe))
1284 urb->start_frame = r8a66597_get_frame(hcd); 1304 urb->start_frame = r8a66597_get_frame(hcd);
1285 1305
1286 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); 1306 r8a66597_urb_done(r8a66597, urb, status);
1287 spin_unlock(&r8a66597->lock);
1288 usb_hcd_giveback_urb(hcd, urb, status);
1289 spin_lock(&r8a66597->lock);
1290 } 1307 }
1291 1308
1292 if (restart) { 1309 if (restart) {
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index 66358fa825f3..b4b6deceed15 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -593,7 +593,8 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
593 */ 593 */
594static int imxfb_suspend(struct platform_device *dev, pm_message_t state) 594static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
595{ 595{
596 struct imxfb_info *fbi = platform_get_drvdata(dev); 596 struct fb_info *info = platform_get_drvdata(dev);
597 struct imxfb_info *fbi = info->par;
597 598
598 pr_debug("%s\n", __func__); 599 pr_debug("%s\n", __func__);
599 600
@@ -603,7 +604,8 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
603 604
604static int imxfb_resume(struct platform_device *dev) 605static int imxfb_resume(struct platform_device *dev)
605{ 606{
606 struct imxfb_info *fbi = platform_get_drvdata(dev); 607 struct fb_info *info = platform_get_drvdata(dev);
608 struct imxfb_info *fbi = info->par;
607 609
608 pr_debug("%s\n", __func__); 610 pr_debug("%s\n", __func__);
609 611
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 054ef29be479..772ba3f45e6f 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -324,8 +324,11 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
324 unsigned long flags; 324 unsigned long flags;
325 dma_cookie_t cookie; 325 dma_cookie_t cookie;
326 326
327 dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi, 327 if (mx3_fbi->txd)
328 to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg); 328 dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
329 to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
330 else
331 dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi);
329 332
330 /* This enables the channel */ 333 /* This enables the channel */
331 if (mx3_fbi->cookie < 0) { 334 if (mx3_fbi->cookie < 0) {
@@ -646,6 +649,7 @@ static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t a
646 649
647static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value) 650static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
648{ 651{
652 dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value);
649 /* This might be board-specific */ 653 /* This might be board-specific */
650 mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL); 654 mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
651 return; 655 return;
@@ -1486,12 +1490,12 @@ static int mx3fb_probe(struct platform_device *pdev)
1486 goto ersdc0; 1490 goto ersdc0;
1487 } 1491 }
1488 1492
1493 mx3fb->backlight_level = 255;
1494
1489 ret = init_fb_chan(mx3fb, to_idmac_chan(chan)); 1495 ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
1490 if (ret < 0) 1496 if (ret < 0)
1491 goto eisdc0; 1497 goto eisdc0;
1492 1498
1493 mx3fb->backlight_level = 255;
1494
1495 return 0; 1499 return 0;
1496 1500
1497eisdc0: 1501eisdc0:
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index cf62b05e296a..7d6c2139891d 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -84,7 +84,7 @@ static const match_table_t tokens = {
84 84
85static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) 85static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
86{ 86{
87 char *options; 87 char *options, *tmp_options;
88 substring_t args[MAX_OPT_ARGS]; 88 substring_t args[MAX_OPT_ARGS];
89 char *p; 89 char *p;
90 int option = 0; 90 int option = 0;
@@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
102 if (!opts) 102 if (!opts)
103 return 0; 103 return 0;
104 104
105 options = kstrdup(opts, GFP_KERNEL); 105 tmp_options = kstrdup(opts, GFP_KERNEL);
106 if (!options) 106 if (!tmp_options) {
107 ret = -ENOMEM;
107 goto fail_option_alloc; 108 goto fail_option_alloc;
109 }
110 options = tmp_options;
108 111
109 while ((p = strsep(&options, ",")) != NULL) { 112 while ((p = strsep(&options, ",")) != NULL) {
110 int token; 113 int token;
@@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
159 break; 162 break;
160 case Opt_cache: 163 case Opt_cache:
161 s = match_strdup(&args[0]); 164 s = match_strdup(&args[0]);
162 if (!s) 165 if (!s) {
163 goto fail_option_alloc; 166 ret = -ENOMEM;
167 P9_DPRINTK(P9_DEBUG_ERROR,
168 "problem allocating copy of cache arg\n");
169 goto free_and_return;
170 }
164 171
165 if (strcmp(s, "loose") == 0) 172 if (strcmp(s, "loose") == 0)
166 v9ses->cache = CACHE_LOOSE; 173 v9ses->cache = CACHE_LOOSE;
@@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
173 180
174 case Opt_access: 181 case Opt_access:
175 s = match_strdup(&args[0]); 182 s = match_strdup(&args[0]);
176 if (!s) 183 if (!s) {
177 goto fail_option_alloc; 184 ret = -ENOMEM;
185 P9_DPRINTK(P9_DEBUG_ERROR,
186 "problem allocating copy of access arg\n");
187 goto free_and_return;
188 }
178 189
179 v9ses->flags &= ~V9FS_ACCESS_MASK; 190 v9ses->flags &= ~V9FS_ACCESS_MASK;
180 if (strcmp(s, "user") == 0) 191 if (strcmp(s, "user") == 0)
@@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
194 continue; 205 continue;
195 } 206 }
196 } 207 }
197 kfree(options);
198 return ret;
199 208
209free_and_return:
210 kfree(tmp_options);
200fail_option_alloc: 211fail_option_alloc:
201 P9_DPRINTK(P9_DEBUG_ERROR, 212 return ret;
202 "failed to allocate copy of option argument\n");
203 return -ENOMEM;
204} 213}
205 214
206/** 215/**
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 3a7560e35865..ed835836e0dc 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *);
60int v9fs_uflags2omode(int uflags, int extended); 60int v9fs_uflags2omode(int uflags, int extended);
61 61
62ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); 62ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
63void v9fs_blank_wstat(struct p9_wstat *wstat);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 3902bf43a088..74a0461a9ac0 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data,
257 return total; 257 return total;
258} 258}
259 259
260static int v9fs_file_fsync(struct file *filp, struct dentry *dentry,
261 int datasync)
262{
263 struct p9_fid *fid;
264 struct p9_wstat wstat;
265 int retval;
266
267 P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp,
268 dentry, datasync);
269
270 fid = filp->private_data;
271 v9fs_blank_wstat(&wstat);
272
273 retval = p9_client_wstat(fid, &wstat);
274 return retval;
275}
276
260static const struct file_operations v9fs_cached_file_operations = { 277static const struct file_operations v9fs_cached_file_operations = {
261 .llseek = generic_file_llseek, 278 .llseek = generic_file_llseek,
262 .read = do_sync_read, 279 .read = do_sync_read,
@@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = {
266 .release = v9fs_dir_release, 283 .release = v9fs_dir_release,
267 .lock = v9fs_file_lock, 284 .lock = v9fs_file_lock,
268 .mmap = generic_file_readonly_mmap, 285 .mmap = generic_file_readonly_mmap,
286 .fsync = v9fs_file_fsync,
269}; 287};
270 288
271const struct file_operations v9fs_file_operations = { 289const struct file_operations v9fs_file_operations = {
@@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = {
276 .release = v9fs_dir_release, 294 .release = v9fs_dir_release,
277 .lock = v9fs_file_lock, 295 .lock = v9fs_file_lock,
278 .mmap = generic_file_readonly_mmap, 296 .mmap = generic_file_readonly_mmap,
297 .fsync = v9fs_file_fsync,
279}; 298};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 9d03d1ebca6f..a407fa3388c0 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended)
176 * 176 *
177 */ 177 */
178 178
179static void 179void
180v9fs_blank_wstat(struct p9_wstat *wstat) 180v9fs_blank_wstat(struct p9_wstat *wstat)
181{ 181{
182 wstat->type = ~0; 182 wstat->type = ~0;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 33baf27fac78..34ddda888e63 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
873 brelse(bh); 873 brelse(bh);
874 874
875 unacquire_priv_sbp: 875 unacquire_priv_sbp:
876 kfree(befs_sb->mount_opts.iocharset);
876 kfree(sb->s_fs_info); 877 kfree(sb->s_fs_info);
877 878
878 unacquire_none: 879 unacquire_none:
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 73d6a735b8f3..d11d0289f3d2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
246 if (!sb) 246 if (!sb)
247 goto out; 247 goto out;
248 if (sb->s_flags & MS_RDONLY) { 248 if (sb->s_flags & MS_RDONLY) {
249 deactivate_locked_super(sb); 249 sb->s_frozen = SB_FREEZE_TRANS;
250 up_write(&sb->s_umount);
250 mutex_unlock(&bdev->bd_fsfreeze_mutex); 251 mutex_unlock(&bdev->bd_fsfreeze_mutex);
251 return sb; 252 return sb;
252 } 253 }
@@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
307 BUG_ON(sb->s_bdev != bdev); 308 BUG_ON(sb->s_bdev != bdev);
308 down_write(&sb->s_umount); 309 down_write(&sb->s_umount);
309 if (sb->s_flags & MS_RDONLY) 310 if (sb->s_flags & MS_RDONLY)
310 goto out_deactivate; 311 goto out_unfrozen;
311 312
312 if (sb->s_op->unfreeze_fs) { 313 if (sb->s_op->unfreeze_fs) {
313 error = sb->s_op->unfreeze_fs(sb); 314 error = sb->s_op->unfreeze_fs(sb);
@@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
321 } 322 }
322 } 323 }
323 324
325out_unfrozen:
324 sb->s_frozen = SB_UNFROZEN; 326 sb->s_frozen = SB_UNFROZEN;
325 smp_wmb(); 327 smp_wmb();
326 wake_up(&sb->s_wait_unfrozen); 328 wake_up(&sb->s_wait_unfrozen);
327 329
328out_deactivate:
329 if (sb) 330 if (sb)
330 deactivate_locked_super(sb); 331 deactivate_locked_super(sb);
331out_unlock: 332out_unlock:
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 87b25543d7d1..2b59201b955c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1982 1982
1983 if (!(sb->s_flags & MS_RDONLY)) { 1983 if (!(sb->s_flags & MS_RDONLY)) {
1984 ret = btrfs_recover_relocation(tree_root); 1984 ret = btrfs_recover_relocation(tree_root);
1985 BUG_ON(ret); 1985 if (ret < 0) {
1986 printk(KERN_WARNING
1987 "btrfs: failed to recover relocation\n");
1988 err = -EINVAL;
1989 goto fail_trans_kthread;
1990 }
1986 } 1991 }
1987 1992
1988 location.objectid = BTRFS_FS_TREE_OBJECTID; 1993 location.objectid = BTRFS_FS_TREE_OBJECTID;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 432a2da4641e..559f72489b3b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5402,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5402 int ret; 5402 int ret;
5403 5403
5404 while (level >= 0) { 5404 while (level >= 0) {
5405 if (path->slots[level] >=
5406 btrfs_header_nritems(path->nodes[level]))
5407 break;
5408
5409 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5405 ret = walk_down_proc(trans, root, path, wc, lookup_info);
5410 if (ret > 0) 5406 if (ret > 0)
5411 break; 5407 break;
@@ -5413,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5413 if (level == 0) 5409 if (level == 0)
5414 break; 5410 break;
5415 5411
5412 if (path->slots[level] >=
5413 btrfs_header_nritems(path->nodes[level]))
5414 break;
5415
5416 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5416 ret = do_walk_down(trans, root, path, wc, &lookup_info);
5417 if (ret > 0) { 5417 if (ret > 0) {
5418 path->slots[level]++; 5418 path->slots[level]++;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 96577e8bf9fd..b177ed319612 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3165 spin_unlock(&tree->buffer_lock); 3165 spin_unlock(&tree->buffer_lock);
3166 goto free_eb; 3166 goto free_eb;
3167 } 3167 }
3168 spin_unlock(&tree->buffer_lock);
3169
3170 /* add one reference for the tree */ 3168 /* add one reference for the tree */
3171 atomic_inc(&eb->refs); 3169 atomic_inc(&eb->refs);
3170 spin_unlock(&tree->buffer_lock);
3172 return eb; 3171 return eb;
3173 3172
3174free_eb: 3173free_eb:
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c02033596f02..9d0809629967 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1133,7 +1133,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1133 } 1133 }
1134 mutex_lock(&dentry->d_inode->i_mutex); 1134 mutex_lock(&dentry->d_inode->i_mutex);
1135out: 1135out:
1136 return ret > 0 ? EIO : ret; 1136 return ret > 0 ? -EIO : ret;
1137} 1137}
1138 1138
1139static const struct vm_operations_struct btrfs_file_vm_ops = { 1139static const struct vm_operations_struct btrfs_file_vm_ops = {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8cd109972fa6..4deb280f8969 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1681,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1681 * before we start the transaction. It limits the amount of btree 1681 * before we start the transaction. It limits the amount of btree
1682 * reads required while inside the transaction. 1682 * reads required while inside the transaction.
1683 */ 1683 */
1684static noinline void reada_csum(struct btrfs_root *root,
1685 struct btrfs_path *path,
1686 struct btrfs_ordered_extent *ordered_extent)
1687{
1688 struct btrfs_ordered_sum *sum;
1689 u64 bytenr;
1690
1691 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1692 list);
1693 bytenr = sum->sums[0].bytenr;
1694
1695 /*
1696 * we don't care about the results, the point of this search is
1697 * just to get the btree leaves into ram
1698 */
1699 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1700}
1701
1702/* as ordered data IO finishes, this gets called so we can finish 1684/* as ordered data IO finishes, this gets called so we can finish
1703 * an ordered extent if the range of bytes in the file it covers are 1685 * an ordered extent if the range of bytes in the file it covers are
1704 * fully written. 1686 * fully written.
@@ -1709,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1709 struct btrfs_trans_handle *trans; 1691 struct btrfs_trans_handle *trans;
1710 struct btrfs_ordered_extent *ordered_extent = NULL; 1692 struct btrfs_ordered_extent *ordered_extent = NULL;
1711 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1693 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1712 struct btrfs_path *path;
1713 int compressed = 0; 1694 int compressed = 0;
1714 int ret; 1695 int ret;
1715 1696
@@ -1717,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1717 if (!ret) 1698 if (!ret)
1718 return 0; 1699 return 0;
1719 1700
1720 /* 1701 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1721 * before we join the transaction, try to do some of our IO.
1722 * This will limit the amount of IO that we have to do with
1723 * the transaction running. We're unlikely to need to do any
1724 * IO if the file extents are new, the disk_i_size checks
1725 * covers the most common case.
1726 */
1727 if (start < BTRFS_I(inode)->disk_i_size) {
1728 path = btrfs_alloc_path();
1729 if (path) {
1730 ret = btrfs_lookup_file_extent(NULL, root, path,
1731 inode->i_ino,
1732 start, 0);
1733 ordered_extent = btrfs_lookup_ordered_extent(inode,
1734 start);
1735 if (!list_empty(&ordered_extent->list)) {
1736 btrfs_release_path(root, path);
1737 reada_csum(root, path, ordered_extent);
1738 }
1739 btrfs_free_path(path);
1740 }
1741 }
1742
1743 if (!ordered_extent)
1744 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1745 BUG_ON(!ordered_extent); 1702 BUG_ON(!ordered_extent);
1703
1746 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1704 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1747 BUG_ON(!list_empty(&ordered_extent->list)); 1705 BUG_ON(!list_empty(&ordered_extent->list));
1748 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1706 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -5841,7 +5799,9 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
5841 inode->i_ctime = CURRENT_TIME; 5799 inode->i_ctime = CURRENT_TIME;
5842 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 5800 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5843 if (!(mode & FALLOC_FL_KEEP_SIZE) && 5801 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5844 cur_offset > inode->i_size) { 5802 (actual_len > inode->i_size) &&
5803 (cur_offset > inode->i_size)) {
5804
5845 if (cur_offset > actual_len) 5805 if (cur_offset > actual_len)
5846 i_size = actual_len; 5806 i_size = actual_len;
5847 else 5807 else
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ed3e4a2ec2c8..ab7ab5318745 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3764,7 +3764,8 @@ out:
3764 BTRFS_DATA_RELOC_TREE_OBJECTID); 3764 BTRFS_DATA_RELOC_TREE_OBJECTID);
3765 if (IS_ERR(fs_root)) 3765 if (IS_ERR(fs_root))
3766 err = PTR_ERR(fs_root); 3766 err = PTR_ERR(fs_root);
3767 btrfs_orphan_cleanup(fs_root); 3767 else
3768 btrfs_orphan_cleanup(fs_root);
3768 } 3769 }
3769 return err; 3770 return err;
3770} 3771}
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 7b2600b380d7..49503d2edc7e 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,7 @@
1Version 1.62
2------------
3Add sockopt=TCP_NODELAY mount option.
4
1Version 1.61 5Version 1.61
2------------ 6------------
3Fix append problem to Samba servers (files opened with O_APPEND could 7Fix append problem to Samba servers (files opened with O_APPEND could
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ac2b24c192f8..78c1b86d55f6 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
113extern const struct export_operations cifs_export_ops; 113extern const struct export_operations cifs_export_ops;
114#endif /* EXPERIMENTAL */ 114#endif /* EXPERIMENTAL */
115 115
116#define CIFS_VERSION "1.61" 116#define CIFS_VERSION "1.62"
117#endif /* _CIFSFS_H */ 117#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4b35f7ec0583..ed751bb657db 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -149,6 +149,7 @@ struct TCP_Server_Info {
149 bool svlocal:1; /* local server or remote */ 149 bool svlocal:1; /* local server or remote */
150 bool noblocksnd; /* use blocking sendmsg */ 150 bool noblocksnd; /* use blocking sendmsg */
151 bool noautotune; /* do not autotune send buf sizes */ 151 bool noautotune; /* do not autotune send buf sizes */
152 bool tcp_nodelay;
152 atomic_t inFlight; /* number of requests on the wire to server */ 153 atomic_t inFlight; /* number of requests on the wire to server */
153#ifdef CONFIG_CIFS_STATS2 154#ifdef CONFIG_CIFS_STATS2
154 atomic_t inSend; /* requests trying to send */ 155 atomic_t inSend; /* requests trying to send */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 3bbcaa716b3c..2e9e09ca0e30 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -98,7 +98,7 @@ struct smb_vol {
98 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ 98 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
99 unsigned int rsize; 99 unsigned int rsize;
100 unsigned int wsize; 100 unsigned int wsize;
101 unsigned int sockopt; 101 bool sockopt_tcp_nodelay:1;
102 unsigned short int port; 102 unsigned short int port;
103 char *prepath; 103 char *prepath;
104}; 104};
@@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname,
1142 simple_strtoul(value, &value, 0); 1142 simple_strtoul(value, &value, 0);
1143 } 1143 }
1144 } else if (strnicmp(data, "sockopt", 5) == 0) { 1144 } else if (strnicmp(data, "sockopt", 5) == 0) {
1145 if (value && *value) { 1145 if (!value || !*value) {
1146 vol->sockopt = 1146 cERROR(1, ("no socket option specified"));
1147 simple_strtoul(value, &value, 0); 1147 continue;
1148 } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
1149 vol->sockopt_tcp_nodelay = 1;
1148 } 1150 }
1149 } else if (strnicmp(data, "netbiosname", 4) == 0) { 1151 } else if (strnicmp(data, "netbiosname", 4) == 0) {
1150 if (!value || !*value || (*value == ' ')) { 1152 if (!value || !*value || (*value == ' ')) {
@@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1514 1516
1515 tcp_ses->noblocksnd = volume_info->noblocksnd; 1517 tcp_ses->noblocksnd = volume_info->noblocksnd;
1516 tcp_ses->noautotune = volume_info->noautotune; 1518 tcp_ses->noautotune = volume_info->noautotune;
1519 tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
1517 atomic_set(&tcp_ses->inFlight, 0); 1520 atomic_set(&tcp_ses->inFlight, 0);
1518 init_waitqueue_head(&tcp_ses->response_q); 1521 init_waitqueue_head(&tcp_ses->response_q);
1519 init_waitqueue_head(&tcp_ses->request_q); 1522 init_waitqueue_head(&tcp_ses->request_q);
@@ -1764,6 +1767,7 @@ static int
1764ipv4_connect(struct TCP_Server_Info *server) 1767ipv4_connect(struct TCP_Server_Info *server)
1765{ 1768{
1766 int rc = 0; 1769 int rc = 0;
1770 int val;
1767 bool connected = false; 1771 bool connected = false;
1768 __be16 orig_port = 0; 1772 __be16 orig_port = 0;
1769 struct socket *socket = server->ssocket; 1773 struct socket *socket = server->ssocket;
@@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server)
1845 socket->sk->sk_rcvbuf = 140 * 1024; 1849 socket->sk->sk_rcvbuf = 140 * 1024;
1846 } 1850 }
1847 1851
1852 if (server->tcp_nodelay) {
1853 val = 1;
1854 rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
1855 (char *)&val, sizeof(val));
1856 if (rc)
1857 cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
1858 }
1859
1848 cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", 1860 cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
1849 socket->sk->sk_sndbuf, 1861 socket->sk->sk_sndbuf,
1850 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo)); 1862 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
@@ -1916,6 +1928,7 @@ static int
1916ipv6_connect(struct TCP_Server_Info *server) 1928ipv6_connect(struct TCP_Server_Info *server)
1917{ 1929{
1918 int rc = 0; 1930 int rc = 0;
1931 int val;
1919 bool connected = false; 1932 bool connected = false;
1920 __be16 orig_port = 0; 1933 __be16 orig_port = 0;
1921 struct socket *socket = server->ssocket; 1934 struct socket *socket = server->ssocket;
@@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server)
1987 */ 2000 */
1988 socket->sk->sk_rcvtimeo = 7 * HZ; 2001 socket->sk->sk_rcvtimeo = 7 * HZ;
1989 socket->sk->sk_sndtimeo = 5 * HZ; 2002 socket->sk->sk_sndtimeo = 5 * HZ;
2003
2004 if (server->tcp_nodelay) {
2005 val = 1;
2006 rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
2007 (char *)&val, sizeof(val));
2008 if (rc)
2009 cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
2010 }
2011
1990 server->ssocket = socket; 2012 server->ssocket = socket;
1991 2013
1992 return rc; 2014 return rc;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index cf18ee765590..e3fda978f481 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
1762 CIFS_MOUNT_MAP_SPECIAL_CHR); 1762 CIFS_MOUNT_MAP_SPECIAL_CHR);
1763 } 1763 }
1764 1764
1765 if (!rc) 1765 if (!rc) {
1766 rc = inode_setattr(inode, attrs); 1766 rc = inode_setattr(inode, attrs);
1767
1768 /* force revalidate when any of these times are set since some
1769 of the fs types (eg ext3, fat) do not have fine enough
1770 time granularity to match protocol, and we do not have a
1771 a way (yet) to query the server fs's time granularity (and
1772 whether it rounds times down).
1773 */
1774 if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)))
1775 cifsInode->time = 0;
1776 }
1767out: 1777out:
1768 kfree(args); 1778 kfree(args);
1769 kfree(full_path); 1779 kfree(full_path);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f84062f9a985..c343b14ba2d3 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
77 77
78 cFYI(1, ("For %s", name->name)); 78 cFYI(1, ("For %s", name->name));
79 79
80 if (parent->d_op && parent->d_op->d_hash)
81 parent->d_op->d_hash(parent, name);
82 else
83 name->hash = full_name_hash(name->name, name->len);
84
80 dentry = d_lookup(parent, name); 85 dentry = d_lookup(parent, name);
81 if (dentry) { 86 if (dentry) {
82 /* FIXME: check for inode number changes? */ 87 /* FIXME: check for inode number changes? */
@@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
666 min(len, max_len), nlt, 671 min(len, max_len), nlt,
667 cifs_sb->mnt_cifs_flags & 672 cifs_sb->mnt_cifs_flags &
668 CIFS_MOUNT_MAP_SPECIAL_CHR); 673 CIFS_MOUNT_MAP_SPECIAL_CHR);
674 pqst->len -= nls_nullsize(nlt);
669 } else { 675 } else {
670 pqst->name = filename; 676 pqst->name = filename;
671 pqst->len = len; 677 pqst->len = len;
672 } 678 }
673 pqst->hash = full_name_hash(pqst->name, pqst->len);
674/* cFYI(1, ("filldir on %s",pqst->name)); */
675 return rc; 679 return rc;
676} 680}
677 681
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7085a6275c4c..aaa9c1c5a5bd 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
223 /* null user mount */ 223 /* null user mount */
224 *bcc_ptr = 0; 224 *bcc_ptr = 0;
225 *(bcc_ptr+1) = 0; 225 *(bcc_ptr+1) = 0;
226 } else { /* 300 should be long enough for any conceivable user name */ 226 } else {
227 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, 227 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
228 300, nls_cp); 228 MAX_USERNAME_SIZE, nls_cp);
229 } 229 }
230 bcc_ptr += 2 * bytes_ret; 230 bcc_ptr += 2 * bytes_ret;
231 bcc_ptr += 2; /* account for null termination */ 231 bcc_ptr += 2; /* account for null termination */
@@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
246 /* copy user */ 246 /* copy user */
247 if (ses->userName == NULL) { 247 if (ses->userName == NULL) {
248 /* BB what about null user mounts - check that we do this BB */ 248 /* BB what about null user mounts - check that we do this BB */
249 } else { /* 300 should be long enough for any conceivable user name */ 249 } else {
250 strncpy(bcc_ptr, ses->userName, 300); 250 strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
251 } 251 }
252 /* BB improve check for overflow */ 252 bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
253 bcc_ptr += strnlen(ses->userName, 300);
254 *bcc_ptr = 0; 253 *bcc_ptr = 0;
255 bcc_ptr++; /* account for null termination */ 254 bcc_ptr++; /* account for null termination */
256 255
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c5c45de1a2ee..30698a13fb22 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -936,6 +936,7 @@ COMPATIBLE_IOCTL(TCSETSF)
936COMPATIBLE_IOCTL(TIOCLINUX) 936COMPATIBLE_IOCTL(TIOCLINUX)
937COMPATIBLE_IOCTL(TIOCSBRK) 937COMPATIBLE_IOCTL(TIOCSBRK)
938COMPATIBLE_IOCTL(TIOCCBRK) 938COMPATIBLE_IOCTL(TIOCCBRK)
939COMPATIBLE_IOCTL(TIOCGSID)
939COMPATIBLE_IOCTL(TIOCGICOUNT) 940COMPATIBLE_IOCTL(TIOCGICOUNT)
940/* Little t */ 941/* Little t */
941COMPATIBLE_IOCTL(TIOCGETD) 942COMPATIBLE_IOCTL(TIOCGETD)
@@ -1038,6 +1039,8 @@ COMPATIBLE_IOCTL(FIOQSIZE)
1038#ifdef CONFIG_BLOCK 1039#ifdef CONFIG_BLOCK
1039/* loop */ 1040/* loop */
1040IGNORE_IOCTL(LOOP_CLR_FD) 1041IGNORE_IOCTL(LOOP_CLR_FD)
1042/* md calls this on random blockdevs */
1043IGNORE_IOCTL(RAID_VERSION)
1041/* SG stuff */ 1044/* SG stuff */
1042COMPATIBLE_IOCTL(SG_SET_TIMEOUT) 1045COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
1043COMPATIBLE_IOCTL(SG_GET_TIMEOUT) 1046COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
diff --git a/fs/exec.c b/fs/exec.c
index 0790a107ff7e..e95c692ef0e4 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -571,6 +571,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
571 struct vm_area_struct *prev = NULL; 571 struct vm_area_struct *prev = NULL;
572 unsigned long vm_flags; 572 unsigned long vm_flags;
573 unsigned long stack_base; 573 unsigned long stack_base;
574 unsigned long stack_size;
575 unsigned long stack_expand;
576 unsigned long rlim_stack;
574 577
575#ifdef CONFIG_STACK_GROWSUP 578#ifdef CONFIG_STACK_GROWSUP
576 /* Limit stack size to 1GB */ 579 /* Limit stack size to 1GB */
@@ -627,10 +630,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
627 goto out_unlock; 630 goto out_unlock;
628 } 631 }
629 632
633 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
634 stack_size = vma->vm_end - vma->vm_start;
635 /*
636 * Align this down to a page boundary as expand_stack
637 * will align it up.
638 */
639 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
640 rlim_stack = min(rlim_stack, stack_size);
630#ifdef CONFIG_STACK_GROWSUP 641#ifdef CONFIG_STACK_GROWSUP
631 stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE; 642 if (stack_size + stack_expand > rlim_stack)
643 stack_base = vma->vm_start + rlim_stack;
644 else
645 stack_base = vma->vm_end + stack_expand;
632#else 646#else
633 stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE; 647 if (stack_size + stack_expand > rlim_stack)
648 stack_base = vma->vm_end - rlim_stack;
649 else
650 stack_base = vma->vm_start - stack_expand;
634#endif 651#endif
635 ret = expand_stack(vma, stack_base); 652 ret = expand_stack(vma, stack_base);
636 if (ret) 653 if (ret)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 5ef953e6f908..97e01dc0d95f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -199,9 +199,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
199static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, 199static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
200 int force) 200 int force)
201{ 201{
202 unsigned long flags; 202 write_lock_irq(&filp->f_owner.lock);
203
204 write_lock_irqsave(&filp->f_owner.lock, flags);
205 if (force || !filp->f_owner.pid) { 203 if (force || !filp->f_owner.pid) {
206 put_pid(filp->f_owner.pid); 204 put_pid(filp->f_owner.pid);
207 filp->f_owner.pid = get_pid(pid); 205 filp->f_owner.pid = get_pid(pid);
@@ -213,7 +211,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
213 filp->f_owner.euid = cred->euid; 211 filp->f_owner.euid = cred->euid;
214 } 212 }
215 } 213 }
216 write_unlock_irqrestore(&filp->f_owner.lock, flags); 214 write_unlock_irq(&filp->f_owner.lock);
217} 215}
218 216
219int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, 217int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
diff --git a/fs/file_table.c b/fs/file_table.c
index 69652c5bd5f0..b98404b54383 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -253,6 +253,7 @@ void __fput(struct file *file)
253 if (file->f_op && file->f_op->release) 253 if (file->f_op && file->f_op->release)
254 file->f_op->release(inode, file); 254 file->f_op->release(inode, file);
255 security_file_free(file); 255 security_file_free(file);
256 ima_file_free(file);
256 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) 257 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
257 cdev_put(inode->i_cdev); 258 cdev_put(inode->i_cdev);
258 fops_put(file->f_op); 259 fops_put(file->f_op);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index c18913a777ae..a9f5e137f1d3 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -828,6 +828,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
828 if (!page) 828 if (!page)
829 break; 829 break;
830 830
831 if (mapping_writably_mapped(mapping))
832 flush_dcache_page(page);
833
831 pagefault_disable(); 834 pagefault_disable();
832 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); 835 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
833 pagefault_enable(); 836 pagefault_enable();
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f455a03a09e2..f42663325931 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -769,6 +769,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
769 if (!gl) 769 if (!gl)
770 return -ENOMEM; 770 return -ENOMEM;
771 771
772 atomic_inc(&sdp->sd_glock_disposal);
772 gl->gl_flags = 0; 773 gl->gl_flags = 0;
773 gl->gl_name = name; 774 gl->gl_name = name;
774 atomic_set(&gl->gl_ref, 1); 775 atomic_set(&gl->gl_ref, 1);
@@ -1538,6 +1539,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1538 up_write(&gfs2_umount_flush_sem); 1539 up_write(&gfs2_umount_flush_sem);
1539 msleep(10); 1540 msleep(10);
1540 } 1541 }
1542 flush_workqueue(glock_workqueue);
1543 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1544 gfs2_dump_lockstate(sdp);
1541} 1545}
1542 1546
1543void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 1547void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 13f0bd228132..c0262faf4725 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -123,7 +123,7 @@ struct lm_lockops {
123 int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname); 123 int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname);
124 void (*lm_unmount) (struct gfs2_sbd *sdp); 124 void (*lm_unmount) (struct gfs2_sbd *sdp);
125 void (*lm_withdraw) (struct gfs2_sbd *sdp); 125 void (*lm_withdraw) (struct gfs2_sbd *sdp);
126 void (*lm_put_lock) (struct kmem_cache *cachep, void *gl); 126 void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl);
127 unsigned int (*lm_lock) (struct gfs2_glock *gl, 127 unsigned int (*lm_lock) (struct gfs2_glock *gl,
128 unsigned int req_state, unsigned int flags); 128 unsigned int req_state, unsigned int flags);
129 void (*lm_cancel) (struct gfs2_glock *gl); 129 void (*lm_cancel) (struct gfs2_glock *gl);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 4792200978c8..bc0ad158e6b4 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -544,6 +544,8 @@ struct gfs2_sbd {
544 struct gfs2_holder sd_live_gh; 544 struct gfs2_holder sd_live_gh;
545 struct gfs2_glock *sd_rename_gl; 545 struct gfs2_glock *sd_rename_gl;
546 struct gfs2_glock *sd_trans_gl; 546 struct gfs2_glock *sd_trans_gl;
547 wait_queue_head_t sd_glock_wait;
548 atomic_t sd_glock_disposal;
547 549
548 /* Inode Stuff */ 550 /* Inode Stuff */
549 551
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 46df988323bc..0e5e0e7022e5 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -21,6 +21,7 @@ static void gdlm_ast(void *arg)
21{ 21{
22 struct gfs2_glock *gl = arg; 22 struct gfs2_glock *gl = arg;
23 unsigned ret = gl->gl_state; 23 unsigned ret = gl->gl_state;
24 struct gfs2_sbd *sdp = gl->gl_sbd;
24 25
25 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); 26 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
26 27
@@ -30,6 +31,8 @@ static void gdlm_ast(void *arg)
30 switch (gl->gl_lksb.sb_status) { 31 switch (gl->gl_lksb.sb_status) {
31 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ 32 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
32 kmem_cache_free(gfs2_glock_cachep, gl); 33 kmem_cache_free(gfs2_glock_cachep, gl);
34 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
35 wake_up(&sdp->sd_glock_wait);
33 return; 36 return;
34 case -DLM_ECANCEL: /* Cancel while getting lock */ 37 case -DLM_ECANCEL: /* Cancel while getting lock */
35 ret |= LM_OUT_CANCELED; 38 ret |= LM_OUT_CANCELED;
@@ -164,14 +167,16 @@ static unsigned int gdlm_lock(struct gfs2_glock *gl,
164 return LM_OUT_ASYNC; 167 return LM_OUT_ASYNC;
165} 168}
166 169
167static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr) 170static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
168{ 171{
169 struct gfs2_glock *gl = ptr; 172 struct gfs2_sbd *sdp = gl->gl_sbd;
170 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 173 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
171 int error; 174 int error;
172 175
173 if (gl->gl_lksb.sb_lkid == 0) { 176 if (gl->gl_lksb.sb_lkid == 0) {
174 kmem_cache_free(cachep, gl); 177 kmem_cache_free(cachep, gl);
178 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
179 wake_up(&sdp->sd_glock_wait);
175 return; 180 return;
176 } 181 }
177 182
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index edfee24f3636..8a102f731003 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -82,6 +82,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
82 82
83 gfs2_tune_init(&sdp->sd_tune); 83 gfs2_tune_init(&sdp->sd_tune);
84 84
85 init_waitqueue_head(&sdp->sd_glock_wait);
86 atomic_set(&sdp->sd_glock_disposal, 0);
85 spin_lock_init(&sdp->sd_statfs_spin); 87 spin_lock_init(&sdp->sd_statfs_spin);
86 88
87 spin_lock_init(&sdp->sd_rindex_spin); 89 spin_lock_init(&sdp->sd_rindex_spin);
@@ -983,9 +985,17 @@ static const match_table_t nolock_tokens = {
983 { Opt_err, NULL }, 985 { Opt_err, NULL },
984}; 986};
985 987
988static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
989{
990 struct gfs2_sbd *sdp = gl->gl_sbd;
991 kmem_cache_free(cachep, gl);
992 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
993 wake_up(&sdp->sd_glock_wait);
994}
995
986static const struct lm_lockops nolock_ops = { 996static const struct lm_lockops nolock_ops = {
987 .lm_proto_name = "lock_nolock", 997 .lm_proto_name = "lock_nolock",
988 .lm_put_lock = kmem_cache_free, 998 .lm_put_lock = nolock_put_lock,
989 .lm_tokens = &nolock_tokens, 999 .lm_tokens = &nolock_tokens,
990}; 1000};
991 1001
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index c282ad41f3d1..b9dd3da22c0a 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -21,6 +21,7 @@
21#include <linux/gfs2_ondisk.h> 21#include <linux/gfs2_ondisk.h>
22#include <linux/crc32.h> 22#include <linux/crc32.h>
23#include <linux/time.h> 23#include <linux/time.h>
24#include <linux/wait.h>
24 25
25#include "gfs2.h" 26#include "gfs2.h"
26#include "incore.h" 27#include "incore.h"
diff --git a/fs/namei.c b/fs/namei.c
index 94a5e60779f9..d62fdc875f22 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1736,8 +1736,7 @@ do_last:
1736 if (nd.root.mnt) 1736 if (nd.root.mnt)
1737 path_put(&nd.root); 1737 path_put(&nd.root);
1738 if (!IS_ERR(filp)) { 1738 if (!IS_ERR(filp)) {
1739 error = ima_path_check(&filp->f_path, filp->f_mode & 1739 error = ima_file_check(filp, acc_mode);
1740 (MAY_READ | MAY_WRITE | MAY_EXEC));
1741 if (error) { 1740 if (error) {
1742 fput(filp); 1741 fput(filp);
1743 filp = ERR_PTR(error); 1742 filp = ERR_PTR(error);
@@ -1797,8 +1796,7 @@ ok:
1797 } 1796 }
1798 filp = nameidata_to_filp(&nd); 1797 filp = nameidata_to_filp(&nd);
1799 if (!IS_ERR(filp)) { 1798 if (!IS_ERR(filp)) {
1800 error = ima_path_check(&filp->f_path, filp->f_mode & 1799 error = ima_file_check(filp, acc_mode);
1801 (MAY_READ | MAY_WRITE | MAY_EXEC));
1802 if (error) { 1800 if (error) {
1803 fput(filp); 1801 fput(filp);
1804 filp = ERR_PTR(error); 1802 filp = ERR_PTR(error);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6b891328f332..63f2071d6445 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -486,6 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
486{ 486{
487 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); 487 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
488 488
489 if (gfp & __GFP_WAIT)
490 nfs_wb_page(page->mapping->host, page);
489 /* If PagePrivate() is set, then the page is not freeable */ 491 /* If PagePrivate() is set, then the page is not freeable */
490 if (PagePrivate(page)) 492 if (PagePrivate(page))
491 return 0; 493 return 0;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index faa091865ad0..f141bde7756a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1261,8 +1261,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1261 1261
1262 if (fattr->valid & NFS_ATTR_FATTR_MODE) { 1262 if (fattr->valid & NFS_ATTR_FATTR_MODE) {
1263 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) { 1263 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
1264 umode_t newmode = inode->i_mode & S_IFMT;
1265 newmode |= fattr->mode & S_IALLUGO;
1266 inode->i_mode = newmode;
1264 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 1267 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
1265 inode->i_mode = fattr->mode;
1266 } 1268 }
1267 } else if (server->caps & NFS_CAP_MODE) 1269 } else if (server->caps & NFS_CAP_MODE)
1268 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR 1270 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 865265bdca03..0c6fda33d66e 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -146,6 +146,7 @@ enum {
146 NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ 146 NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
147 NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */ 147 NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
148 NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */ 148 NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
149 NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
149}; 150};
150 151
151struct nfs4_state { 152struct nfs4_state {
@@ -277,6 +278,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
277extern void nfs4_schedule_state_recovery(struct nfs_client *); 278extern void nfs4_schedule_state_recovery(struct nfs_client *);
278extern void nfs4_schedule_state_manager(struct nfs_client *); 279extern void nfs4_schedule_state_manager(struct nfs_client *);
279extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); 280extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
281extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
280extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 282extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
281extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 283extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
282extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); 284extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 198d51d17c13..375f0fae2c6a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -249,19 +249,15 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
249 if (state == NULL) 249 if (state == NULL)
250 break; 250 break;
251 nfs4_state_mark_reclaim_nograce(clp, state); 251 nfs4_state_mark_reclaim_nograce(clp, state);
252 case -NFS4ERR_STALE_CLIENTID: 252 goto do_state_recovery;
253 case -NFS4ERR_STALE_STATEID: 253 case -NFS4ERR_STALE_STATEID:
254 case -NFS4ERR_EXPIRED: 254 if (state == NULL)
255 nfs4_schedule_state_recovery(clp);
256 ret = nfs4_wait_clnt_recover(clp);
257 if (ret == 0)
258 exception->retry = 1;
259#if !defined(CONFIG_NFS_V4_1)
260 break;
261#else /* !defined(CONFIG_NFS_V4_1) */
262 if (!nfs4_has_session(server->nfs_client))
263 break; 255 break;
264 /* FALLTHROUGH */ 256 nfs4_state_mark_reclaim_reboot(clp, state);
257 case -NFS4ERR_STALE_CLIENTID:
258 case -NFS4ERR_EXPIRED:
259 goto do_state_recovery;
260#if defined(CONFIG_NFS_V4_1)
265 case -NFS4ERR_BADSESSION: 261 case -NFS4ERR_BADSESSION:
266 case -NFS4ERR_BADSLOT: 262 case -NFS4ERR_BADSLOT:
267 case -NFS4ERR_BAD_HIGH_SLOT: 263 case -NFS4ERR_BAD_HIGH_SLOT:
@@ -274,7 +270,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
274 nfs4_schedule_state_recovery(clp); 270 nfs4_schedule_state_recovery(clp);
275 exception->retry = 1; 271 exception->retry = 1;
276 break; 272 break;
277#endif /* !defined(CONFIG_NFS_V4_1) */ 273#endif /* defined(CONFIG_NFS_V4_1) */
278 case -NFS4ERR_FILE_OPEN: 274 case -NFS4ERR_FILE_OPEN:
279 if (exception->timeout > HZ) { 275 if (exception->timeout > HZ) {
280 /* We have retried a decent amount, time to 276 /* We have retried a decent amount, time to
@@ -293,6 +289,12 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
293 } 289 }
294 /* We failed to handle the error */ 290 /* We failed to handle the error */
295 return nfs4_map_errors(ret); 291 return nfs4_map_errors(ret);
292do_state_recovery:
293 nfs4_schedule_state_recovery(clp);
294 ret = nfs4_wait_clnt_recover(clp);
295 if (ret == 0)
296 exception->retry = 1;
297 return ret;
296} 298}
297 299
298 300
@@ -1658,6 +1660,8 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
1658 status = PTR_ERR(state); 1660 status = PTR_ERR(state);
1659 if (IS_ERR(state)) 1661 if (IS_ERR(state))
1660 goto err_opendata_put; 1662 goto err_opendata_put;
1663 if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
1664 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1661 nfs4_opendata_put(opendata); 1665 nfs4_opendata_put(opendata);
1662 nfs4_put_state_owner(sp); 1666 nfs4_put_state_owner(sp);
1663 *res = state; 1667 *res = state;
@@ -3422,15 +3426,14 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3422 if (state == NULL) 3426 if (state == NULL)
3423 break; 3427 break;
3424 nfs4_state_mark_reclaim_nograce(clp, state); 3428 nfs4_state_mark_reclaim_nograce(clp, state);
3425 case -NFS4ERR_STALE_CLIENTID: 3429 goto do_state_recovery;
3426 case -NFS4ERR_STALE_STATEID: 3430 case -NFS4ERR_STALE_STATEID:
3431 if (state == NULL)
3432 break;
3433 nfs4_state_mark_reclaim_reboot(clp, state);
3434 case -NFS4ERR_STALE_CLIENTID:
3427 case -NFS4ERR_EXPIRED: 3435 case -NFS4ERR_EXPIRED:
3428 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 3436 goto do_state_recovery;
3429 nfs4_schedule_state_recovery(clp);
3430 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3431 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3432 task->tk_status = 0;
3433 return -EAGAIN;
3434#if defined(CONFIG_NFS_V4_1) 3437#if defined(CONFIG_NFS_V4_1)
3435 case -NFS4ERR_BADSESSION: 3438 case -NFS4ERR_BADSESSION:
3436 case -NFS4ERR_BADSLOT: 3439 case -NFS4ERR_BADSLOT:
@@ -3458,6 +3461,13 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3458 } 3461 }
3459 task->tk_status = nfs4_map_errors(task->tk_status); 3462 task->tk_status = nfs4_map_errors(task->tk_status);
3460 return 0; 3463 return 0;
3464do_state_recovery:
3465 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3466 nfs4_schedule_state_recovery(clp);
3467 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3468 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3469 task->tk_status = 0;
3470 return -EAGAIN;
3461} 3471}
3462 3472
3463static int 3473static int
@@ -4088,6 +4098,28 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = {
4088 .rpc_release = nfs4_lock_release, 4098 .rpc_release = nfs4_lock_release,
4089}; 4099};
4090 4100
4101static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4102{
4103 struct nfs_client *clp = server->nfs_client;
4104 struct nfs4_state *state = lsp->ls_state;
4105
4106 switch (error) {
4107 case -NFS4ERR_ADMIN_REVOKED:
4108 case -NFS4ERR_BAD_STATEID:
4109 case -NFS4ERR_EXPIRED:
4110 if (new_lock_owner != 0 ||
4111 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4112 nfs4_state_mark_reclaim_nograce(clp, state);
4113 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4114 break;
4115 case -NFS4ERR_STALE_STATEID:
4116 if (new_lock_owner != 0 ||
4117 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4118 nfs4_state_mark_reclaim_reboot(clp, state);
4119 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4120 };
4121}
4122
4091static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 4123static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4092{ 4124{
4093 struct nfs4_lockdata *data; 4125 struct nfs4_lockdata *data;
@@ -4126,6 +4158,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
4126 ret = nfs4_wait_for_completion_rpc_task(task); 4158 ret = nfs4_wait_for_completion_rpc_task(task);
4127 if (ret == 0) { 4159 if (ret == 0) {
4128 ret = data->rpc_status; 4160 ret = data->rpc_status;
4161 if (ret)
4162 nfs4_handle_setlk_error(data->server, data->lsp,
4163 data->arg.new_lock_owner, ret);
4129 } else 4164 } else
4130 data->cancelled = 1; 4165 data->cancelled = 1;
4131 rpc_put_task(task); 4166 rpc_put_task(task);
@@ -4181,8 +4216,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
4181{ 4216{
4182 struct nfs_inode *nfsi = NFS_I(state->inode); 4217 struct nfs_inode *nfsi = NFS_I(state->inode);
4183 unsigned char fl_flags = request->fl_flags; 4218 unsigned char fl_flags = request->fl_flags;
4184 int status; 4219 int status = -ENOLCK;
4185 4220
4221 if ((fl_flags & FL_POSIX) &&
4222 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4223 goto out;
4186 /* Is this a delegated open? */ 4224 /* Is this a delegated open? */
4187 status = nfs4_set_lock_state(state, request); 4225 status = nfs4_set_lock_state(state, request);
4188 if (status != 0) 4226 if (status != 0)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6d263ed79e92..c1e2733f4fa4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -901,7 +901,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp)
901 nfs4_schedule_state_manager(clp); 901 nfs4_schedule_state_manager(clp);
902} 902}
903 903
904static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) 904int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
905{ 905{
906 906
907 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 907 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index e2975939126a..a12c45b65dd4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -176,6 +176,12 @@ void nfs_release_request(struct nfs_page *req)
176 kref_put(&req->wb_kref, nfs_free_request); 176 kref_put(&req->wb_kref, nfs_free_request);
177} 177}
178 178
179static int nfs_wait_bit_uninterruptible(void *word)
180{
181 io_schedule();
182 return 0;
183}
184
179/** 185/**
180 * nfs_wait_on_request - Wait for a request to complete. 186 * nfs_wait_on_request - Wait for a request to complete.
181 * @req: request to wait upon. 187 * @req: request to wait upon.
@@ -186,14 +192,9 @@ void nfs_release_request(struct nfs_page *req)
186int 192int
187nfs_wait_on_request(struct nfs_page *req) 193nfs_wait_on_request(struct nfs_page *req)
188{ 194{
189 int ret = 0; 195 return wait_on_bit(&req->wb_flags, PG_BUSY,
190 196 nfs_wait_bit_uninterruptible,
191 if (!test_bit(PG_BUSY, &req->wb_flags)) 197 TASK_UNINTERRUPTIBLE);
192 goto out;
193 ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
194 nfs_wait_bit_killable, TASK_KILLABLE);
195out:
196 return ret;
197} 198}
198 199
199/** 200/**
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ce907efc5508..f1afee4eea77 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -243,6 +243,7 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *);
243static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *); 243static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *);
244static int nfs_xdev_get_sb(struct file_system_type *fs_type, 244static int nfs_xdev_get_sb(struct file_system_type *fs_type,
245 int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt); 245 int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
246static void nfs_put_super(struct super_block *);
246static void nfs_kill_super(struct super_block *); 247static void nfs_kill_super(struct super_block *);
247static int nfs_remount(struct super_block *sb, int *flags, char *raw_data); 248static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
248 249
@@ -266,6 +267,7 @@ static const struct super_operations nfs_sops = {
266 .alloc_inode = nfs_alloc_inode, 267 .alloc_inode = nfs_alloc_inode,
267 .destroy_inode = nfs_destroy_inode, 268 .destroy_inode = nfs_destroy_inode,
268 .write_inode = nfs_write_inode, 269 .write_inode = nfs_write_inode,
270 .put_super = nfs_put_super,
269 .statfs = nfs_statfs, 271 .statfs = nfs_statfs,
270 .clear_inode = nfs_clear_inode, 272 .clear_inode = nfs_clear_inode,
271 .umount_begin = nfs_umount_begin, 273 .umount_begin = nfs_umount_begin,
@@ -335,6 +337,7 @@ static const struct super_operations nfs4_sops = {
335 .alloc_inode = nfs_alloc_inode, 337 .alloc_inode = nfs_alloc_inode,
336 .destroy_inode = nfs_destroy_inode, 338 .destroy_inode = nfs_destroy_inode,
337 .write_inode = nfs_write_inode, 339 .write_inode = nfs_write_inode,
340 .put_super = nfs_put_super,
338 .statfs = nfs_statfs, 341 .statfs = nfs_statfs,
339 .clear_inode = nfs4_clear_inode, 342 .clear_inode = nfs4_clear_inode,
340 .umount_begin = nfs_umount_begin, 343 .umount_begin = nfs_umount_begin,
@@ -2258,6 +2261,17 @@ error_splat_super:
2258} 2261}
2259 2262
2260/* 2263/*
2264 * Ensure that we unregister the bdi before kill_anon_super
2265 * releases the device name
2266 */
2267static void nfs_put_super(struct super_block *s)
2268{
2269 struct nfs_server *server = NFS_SB(s);
2270
2271 bdi_unregister(&server->backing_dev_info);
2272}
2273
2274/*
2261 * Destroy an NFS2/3 superblock 2275 * Destroy an NFS2/3 superblock
2262 */ 2276 */
2263static void nfs_kill_super(struct super_block *s) 2277static void nfs_kill_super(struct super_block *s)
@@ -2265,7 +2279,6 @@ static void nfs_kill_super(struct super_block *s)
2265 struct nfs_server *server = NFS_SB(s); 2279 struct nfs_server *server = NFS_SB(s);
2266 2280
2267 kill_anon_super(s); 2281 kill_anon_super(s);
2268 bdi_unregister(&server->backing_dev_info);
2269 nfs_fscache_release_super_cookie(s); 2282 nfs_fscache_release_super_cookie(s);
2270 nfs_free_server(server); 2283 nfs_free_server(server);
2271} 2284}
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index 70e1fbbaaeab..ad4d2e787b20 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -15,8 +15,10 @@
15 15
16#include "callback.h" 16#include "callback.h"
17 17
18#ifdef CONFIG_NFS_V4
18static const int nfs_set_port_min = 0; 19static const int nfs_set_port_min = 0;
19static const int nfs_set_port_max = 65535; 20static const int nfs_set_port_max = 65535;
21#endif
20static struct ctl_table_header *nfs_callback_sysctl_table; 22static struct ctl_table_header *nfs_callback_sysctl_table;
21 23
22static ctl_table nfs_cb_sysctls[] = { 24static ctl_table nfs_cb_sysctls[] = {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d171696017f4..7b54b8bb101f 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1233,7 +1233,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1233 1233
1234 1234
1235#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 1235#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1236void nfs_commitdata_release(void *data) 1236static void nfs_commitdata_release(void *data)
1237{ 1237{
1238 struct nfs_write_data *wdata = data; 1238 struct nfs_write_data *wdata = data;
1239 1239
@@ -1541,6 +1541,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1541 break; 1541 break;
1542 } 1542 }
1543 ret = nfs_wait_on_request(req); 1543 ret = nfs_wait_on_request(req);
1544 nfs_release_request(req);
1544 if (ret < 0) 1545 if (ret < 0)
1545 goto out; 1546 goto out;
1546 } 1547 }
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c487810a2366..a0c4016413f1 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
1316 1316
1317static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp) 1317static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
1318{ 1318{
1319 struct svc_export *exp;
1320 u32 fsidv[2]; 1319 u32 fsidv[2];
1321 1320
1322 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); 1321 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
1323 1322
1324 exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); 1323 return rqst_exp_find(rqstp, FSID_NUM, fsidv);
1325 /*
1326 * We shouldn't have accepting an nfsv4 request at all if we
1327 * don't have a pseudoexport!:
1328 */
1329 if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
1330 exp = ERR_PTR(-ESERVERFAULT);
1331 return exp;
1332} 1324}
1333 1325
1334/* 1326/*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c194793b642b..97d79eff6b7f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -752,6 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
752 flags, current_cred()); 752 flags, current_cred());
753 if (IS_ERR(*filp)) 753 if (IS_ERR(*filp))
754 host_err = PTR_ERR(*filp); 754 host_err = PTR_ERR(*filp);
755 host_err = ima_file_check(*filp, access);
755out_nfserr: 756out_nfserr:
756 err = nfserrno(host_err); 757 err = nfserrno(host_err);
757out: 758out:
@@ -2127,7 +2128,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
2127 */ 2128 */
2128 path.mnt = exp->ex_path.mnt; 2129 path.mnt = exp->ex_path.mnt;
2129 path.dentry = dentry; 2130 path.dentry = dentry;
2130 err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
2131nfsd_out: 2131nfsd_out:
2132 return err? nfserrno(err) : 0; 2132 return err? nfserrno(err) : 0;
2133} 2133}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 17584c524486..105b508b47a8 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2829,7 +2829,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2829 || sci->sc_seq_request != sci->sc_seq_done); 2829 || sci->sc_seq_request != sci->sc_seq_done);
2830 spin_unlock(&sci->sc_state_lock); 2830 spin_unlock(&sci->sc_state_lock);
2831 2831
2832 if (flag || nilfs_segctor_confirm(sci)) 2832 if (flag || !nilfs_segctor_confirm(sci))
2833 nilfs_segctor_write_out(sci); 2833 nilfs_segctor_write_out(sci);
2834 2834
2835 WARN_ON(!list_empty(&sci->sc_copied_buffers)); 2835 WARN_ON(!list_empty(&sci->sc_copied_buffers));
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 3dae4a13f6e4..7e9df11260f4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -599,7 +599,7 @@ bail:
599 return ret; 599 return ret;
600} 600}
601 601
602/* 602/*
603 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're 603 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
604 * particularly interested in the aio/dio case. Like the core uses 604 * particularly interested in the aio/dio case. Like the core uses
605 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from 605 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
@@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
670 670
671 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, 671 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
672 inode->i_sb->s_bdev, iov, offset, 672 inode->i_sb->s_bdev, iov, offset,
673 nr_segs, 673 nr_segs,
674 ocfs2_direct_IO_get_blocks, 674 ocfs2_direct_IO_get_blocks,
675 ocfs2_dio_end_io); 675 ocfs2_dio_end_io);
676 676
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d43d34a1dd31..21c808f752d8 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
368 } 368 }
369 ocfs2_metadata_cache_io_unlock(ci); 369 ocfs2_metadata_cache_io_unlock(ci);
370 370
371 mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", 371 mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
372 (unsigned long long)block, nr, 372 (unsigned long long)block, nr,
373 ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", 373 ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
374 flags); 374 flags);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index eda5b8bcddd5..5c9890006708 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
78 78
79unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; 79unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
80 80
81/* Only sets a new threshold if there are no active regions. 81/* Only sets a new threshold if there are no active regions.
82 * 82 *
83 * No locking or otherwise interesting code is required for reading 83 * No locking or otherwise interesting code is required for reading
84 * o2hb_dead_threshold as it can't change once regions are active and 84 * o2hb_dead_threshold as it can't change once regions are active and
@@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
170 170
171 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " 171 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
172 "milliseconds\n", reg->hr_dev_name, 172 "milliseconds\n", reg->hr_dev_name,
173 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); 173 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
174 o2quo_disk_timeout(); 174 o2quo_disk_timeout();
175} 175}
176 176
@@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
624 "seq %llu last %llu changed %u equal %u\n", 624 "seq %llu last %llu changed %u equal %u\n",
625 slot->ds_node_num, (long long)slot->ds_last_generation, 625 slot->ds_node_num, (long long)slot->ds_last_generation,
626 le32_to_cpu(hb_block->hb_cksum), 626 le32_to_cpu(hb_block->hb_cksum),
627 (unsigned long long)le64_to_cpu(hb_block->hb_seq), 627 (unsigned long long)le64_to_cpu(hb_block->hb_seq),
628 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, 628 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
629 slot->ds_equal_samples); 629 slot->ds_equal_samples);
630 630
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 334f231a422c..d8d0c65ac03c 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
485 } 485 }
486 486
487 if (was_valid && !valid) { 487 if (was_valid && !valid) {
488 printk(KERN_INFO "o2net: no longer connected to " 488 printk(KERN_NOTICE "o2net: no longer connected to "
489 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); 489 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
490 o2net_complete_nodes_nsw(nn); 490 o2net_complete_nodes_nsw(nn);
491 } 491 }
@@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
493 if (!was_valid && valid) { 493 if (!was_valid && valid) {
494 o2quo_conn_up(o2net_num_from_nn(nn)); 494 o2quo_conn_up(o2net_num_from_nn(nn));
495 cancel_delayed_work(&nn->nn_connect_expired); 495 cancel_delayed_work(&nn->nn_connect_expired);
496 printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", 496 printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
497 o2nm_this_node() > sc->sc_node->nd_num ? 497 o2nm_this_node() > sc->sc_node->nd_num ?
498 "connected to" : "accepted connection from", 498 "connected to" : "accepted connection from",
499 SC_NODEF_ARGS(sc)); 499 SC_NODEF_ARGS(sc));
@@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
930 cond_resched(); 930 cond_resched();
931 continue; 931 continue;
932 } 932 }
933 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT 933 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
934 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); 934 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
935 o2net_ensure_shutdown(nn, sc, 0); 935 o2net_ensure_shutdown(nn, sc, 0);
936 break; 936 break;
@@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data)
1476 1476
1477 do_gettimeofday(&now); 1477 do_gettimeofday(&now);
1478 1478
1479 printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " 1479 printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
1480 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), 1480 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
1481 o2net_idle_timeout() / 1000, 1481 o2net_idle_timeout() / 1000,
1482 o2net_idle_timeout() % 1000); 1482 o2net_idle_timeout() % 1000);
1483 mlog(ML_NOTICE, "here are some times that might help debug the " 1483 mlog(ML_NOTICE, "here are some times that might help debug the "
1484 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " 1484 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
1485 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", 1485 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
1486 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, 1486 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
1487 now.tv_sec, (long) now.tv_usec, 1487 now.tv_sec, (long) now.tv_usec,
1488 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, 1488 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
1489 sc->sc_tv_advance_start.tv_sec, 1489 sc->sc_tv_advance_start.tv_sec,
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 8d58cfe410b1..96fa7ebc530c 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -32,10 +32,10 @@
32 * on their number */ 32 * on their number */
33#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) 33#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
34 34
35/* 35/*
36 * This version number represents quite a lot, unfortunately. It not 36 * This version number represents quite a lot, unfortunately. It not
37 * only represents the raw network message protocol on the wire but also 37 * only represents the raw network message protocol on the wire but also
38 * locking semantics of the file system using the protocol. It should 38 * locking semantics of the file system using the protocol. It should
39 * be somewhere else, I'm sure, but right now it isn't. 39 * be somewhere else, I'm sure, but right now it isn't.
40 * 40 *
41 * With version 11, we separate out the filesystem locking portion. The 41 * With version 11, we separate out the filesystem locking portion. The
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index b5786a787fab..3cfa114aa391 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
95 mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ 95 mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
96} while (0) 96} while (0)
97 97
98#define DLM_LKSB_UNUSED1 0x01 98#define DLM_LKSB_UNUSED1 0x01
99#define DLM_LKSB_PUT_LVB 0x02 99#define DLM_LKSB_PUT_LVB 0x02
100#define DLM_LKSB_GET_LVB 0x04 100#define DLM_LKSB_GET_LVB 0x04
101#define DLM_LKSB_UNUSED2 0x08 101#define DLM_LKSB_UNUSED2 0x08
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 01cf8cc3d286..dccc439fa087 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
123 dlm_lock_put(lock); 123 dlm_lock_put(lock);
124 /* free up the reserved bast that we are cancelling. 124 /* free up the reserved bast that we are cancelling.
125 * guaranteed that this will not be the last reserved 125 * guaranteed that this will not be the last reserved
126 * ast because *both* an ast and a bast were reserved 126 * ast because *both* an ast and a bast were reserved
127 * to get to this point. the res->spinlock will not be 127 * to get to this point. the res->spinlock will not be
128 * taken here */ 128 * taken here */
129 dlm_lockres_release_ast(dlm, res); 129 dlm_lockres_release_ast(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index ca96bce50e18..f283bce776b4 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
396 /* instead of logging the same network error over 396 /* instead of logging the same network error over
397 * and over, sleep here and wait for the heartbeat 397 * and over, sleep here and wait for the heartbeat
398 * to notice the node is dead. times out after 5s. */ 398 * to notice the node is dead. times out after 5s. */
399 dlm_wait_for_node_death(dlm, res->owner, 399 dlm_wait_for_node_death(dlm, res->owner,
400 DLM_NODE_DEATH_WAIT_MAX); 400 DLM_NODE_DEATH_WAIT_MAX);
401 ret = DLM_RECOVERING; 401 ret = DLM_RECOVERING;
402 mlog(0, "node %u died so returning DLM_RECOVERING " 402 mlog(0, "node %u died so returning DLM_RECOVERING "
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 42b0bad7a612..0cd24cf54396 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
102 assert_spin_locked(&res->spinlock); 102 assert_spin_locked(&res->spinlock);
103 103
104 stringify_lockname(res->lockname.name, res->lockname.len, 104 stringify_lockname(res->lockname.name, res->lockname.len,
105 buf, sizeof(buf) - 1); 105 buf, sizeof(buf));
106 printk("lockres: %s, owner=%u, state=%u\n", 106 printk("lockres: %s, owner=%u, state=%u\n",
107 buf, res->owner, res->state); 107 buf, res->owner, res->state);
108 printk(" last used: %lu, refcnt: %u, on purge list: %s\n", 108 printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 0334000676d3..988c9055fd4e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
816 } 816 }
817 817
818 /* Once the dlm ctxt is marked as leaving then we don't want 818 /* Once the dlm ctxt is marked as leaving then we don't want
819 * to be put in someone's domain map. 819 * to be put in someone's domain map.
820 * Also, explicitly disallow joining at certain troublesome 820 * Also, explicitly disallow joining at certain troublesome
821 * times (ie. during recovery). */ 821 * times (ie. during recovery). */
822 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { 822 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 437698e9465f..733337772671 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
269 } 269 }
270 dlm_revert_pending_lock(res, lock); 270 dlm_revert_pending_lock(res, lock);
271 dlm_lock_put(lock); 271 dlm_lock_put(lock);
272 } else if (dlm_is_recovery_lock(res->lockname.name, 272 } else if (dlm_is_recovery_lock(res->lockname.name,
273 res->lockname.len)) { 273 res->lockname.len)) {
274 /* special case for the $RECOVERY lock. 274 /* special case for the $RECOVERY lock.
275 * there will never be an AST delivered to put 275 * there will never be an AST delivered to put
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 03ccf9a7b1f4..a659606dcb95 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
366 struct dlm_master_list_entry *mle; 366 struct dlm_master_list_entry *mle;
367 367
368 assert_spin_locked(&dlm->spinlock); 368 assert_spin_locked(&dlm->spinlock);
369 369
370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { 370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371 if (node_up) 371 if (node_up)
372 dlm_mle_node_up(dlm, mle, NULL, idx); 372 dlm_mle_node_up(dlm, mle, NULL, idx);
@@ -833,7 +833,7 @@ lookup:
833 __dlm_insert_mle(dlm, mle); 833 __dlm_insert_mle(dlm, mle);
834 834
835 /* still holding the dlm spinlock, check the recovery map 835 /* still holding the dlm spinlock, check the recovery map
836 * to see if there are any nodes that still need to be 836 * to see if there are any nodes that still need to be
837 * considered. these will not appear in the mle nodemap 837 * considered. these will not appear in the mle nodemap
838 * but they might own this lockres. wait on them. */ 838 * but they might own this lockres. wait on them. */
839 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 839 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
@@ -883,7 +883,7 @@ redo_request:
883 msleep(500); 883 msleep(500);
884 } 884 }
885 continue; 885 continue;
886 } 886 }
887 887
888 dlm_kick_recovery_thread(dlm); 888 dlm_kick_recovery_thread(dlm);
889 msleep(1000); 889 msleep(1000);
@@ -939,8 +939,8 @@ wait:
939 res->lockname.name, blocked); 939 res->lockname.name, blocked);
940 if (++tries > 20) { 940 if (++tries > 20) {
941 mlog(ML_ERROR, "%s:%.*s: spinning on " 941 mlog(ML_ERROR, "%s:%.*s: spinning on "
942 "dlm_wait_for_lock_mastery, blocked=%d\n", 942 "dlm_wait_for_lock_mastery, blocked=%d\n",
943 dlm->name, res->lockname.len, 943 dlm->name, res->lockname.len,
944 res->lockname.name, blocked); 944 res->lockname.name, blocked);
945 dlm_print_one_lock_resource(res); 945 dlm_print_one_lock_resource(res);
946 dlm_print_one_mle(mle); 946 dlm_print_one_mle(mle);
@@ -1029,7 +1029,7 @@ recheck:
1029 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); 1029 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1030 b = (mle->type == DLM_MLE_BLOCK); 1030 b = (mle->type == DLM_MLE_BLOCK);
1031 if ((*blocked && !b) || (!*blocked && b)) { 1031 if ((*blocked && !b) || (!*blocked && b)) {
1032 mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 1032 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1033 dlm->name, res->lockname.len, res->lockname.name, 1033 dlm->name, res->lockname.len, res->lockname.name,
1034 *blocked, b); 1034 *blocked, b);
1035 *blocked = b; 1035 *blocked = b;
@@ -1602,7 +1602,7 @@ send_response:
1602 } 1602 }
1603 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", 1603 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1604 dlm->node_num, res->lockname.len, res->lockname.name); 1604 dlm->node_num, res->lockname.len, res->lockname.name);
1605 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 1605 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1606 DLM_ASSERT_MASTER_MLE_CLEANUP); 1606 DLM_ASSERT_MASTER_MLE_CLEANUP);
1607 if (ret < 0) { 1607 if (ret < 0) {
1608 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1608 mlog(ML_ERROR, "failed to dispatch assert master work\n");
@@ -1701,7 +1701,7 @@ again:
1701 1701
1702 if (r & DLM_ASSERT_RESPONSE_REASSERT) { 1702 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1703 mlog(0, "%.*s: node %u create mles on other " 1703 mlog(0, "%.*s: node %u create mles on other "
1704 "nodes and requests a re-assert\n", 1704 "nodes and requests a re-assert\n",
1705 namelen, lockname, to); 1705 namelen, lockname, to);
1706 reassert = 1; 1706 reassert = 1;
1707 } 1707 }
@@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1812 spin_unlock(&dlm->master_lock); 1812 spin_unlock(&dlm->master_lock);
1813 spin_unlock(&dlm->spinlock); 1813 spin_unlock(&dlm->spinlock);
1814 goto done; 1814 goto done;
1815 } 1815 }
1816 } 1816 }
1817 } 1817 }
1818 spin_unlock(&dlm->master_lock); 1818 spin_unlock(&dlm->master_lock);
@@ -1883,7 +1883,7 @@ ok:
1883 int extra_ref = 0; 1883 int extra_ref = 0;
1884 int nn = -1; 1884 int nn = -1;
1885 int rr, err = 0; 1885 int rr, err = 0;
1886 1886
1887 spin_lock(&mle->spinlock); 1887 spin_lock(&mle->spinlock);
1888 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1888 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1889 extra_ref = 1; 1889 extra_ref = 1;
@@ -1891,7 +1891,7 @@ ok:
1891 /* MASTER mle: if any bits set in the response map 1891 /* MASTER mle: if any bits set in the response map
1892 * then the calling node needs to re-assert to clear 1892 * then the calling node needs to re-assert to clear
1893 * up nodes that this node contacted */ 1893 * up nodes that this node contacted */
1894 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1894 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1895 nn+1)) < O2NM_MAX_NODES) { 1895 nn+1)) < O2NM_MAX_NODES) {
1896 if (nn != dlm->node_num && nn != assert->node_idx) 1896 if (nn != dlm->node_num && nn != assert->node_idx)
1897 master_request = 1; 1897 master_request = 1;
@@ -2002,7 +2002,7 @@ kill:
2002 __dlm_print_one_lock_resource(res); 2002 __dlm_print_one_lock_resource(res);
2003 spin_unlock(&res->spinlock); 2003 spin_unlock(&res->spinlock);
2004 spin_unlock(&dlm->spinlock); 2004 spin_unlock(&dlm->spinlock);
2005 *ret_data = (void *)res; 2005 *ret_data = (void *)res;
2006 dlm_put(dlm); 2006 dlm_put(dlm);
2007 return -EINVAL; 2007 return -EINVAL;
2008} 2008}
@@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2040 item->u.am.request_from = request_from; 2040 item->u.am.request_from = request_from;
2041 item->u.am.flags = flags; 2041 item->u.am.flags = flags;
2042 2042
2043 if (ignore_higher) 2043 if (ignore_higher)
2044 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 2044 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2045 res->lockname.name); 2045 res->lockname.name);
2046 2046
2047 spin_lock(&dlm->work_lock); 2047 spin_lock(&dlm->work_lock);
2048 list_add_tail(&item->list, &dlm->work_list); 2048 list_add_tail(&item->list, &dlm->work_list);
2049 spin_unlock(&dlm->work_lock); 2049 spin_unlock(&dlm->work_lock);
@@ -2133,7 +2133,7 @@ put:
2133 * think that $RECOVERY is currently mastered by a dead node. If so, 2133 * think that $RECOVERY is currently mastered by a dead node. If so,
2134 * we wait a short time to allow that node to get notified by its own 2134 * we wait a short time to allow that node to get notified by its own
2135 * heartbeat stack, then check again. All $RECOVERY lock resources 2135 * heartbeat stack, then check again. All $RECOVERY lock resources
2136 * mastered by dead nodes are purged when the hearbeat callback is 2136 * mastered by dead nodes are purged when the hearbeat callback is
2137 * fired, so we can know for sure that it is safe to continue once 2137 * fired, so we can know for sure that it is safe to continue once
2138 * the node returns a live node or no node. */ 2138 * the node returns a live node or no node. */
2139static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 2139static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
@@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2174 ret = -EAGAIN; 2174 ret = -EAGAIN;
2175 } 2175 }
2176 spin_unlock(&dlm->spinlock); 2176 spin_unlock(&dlm->spinlock);
2177 mlog(0, "%s: reco lock master is %u\n", dlm->name, 2177 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2178 master); 2178 master);
2179 break; 2179 break;
2180 } 2180 }
@@ -2602,7 +2602,7 @@ fail:
2602 2602
2603 mlog(0, "%s:%.*s: timed out during migration\n", 2603 mlog(0, "%s:%.*s: timed out during migration\n",
2604 dlm->name, res->lockname.len, res->lockname.name); 2604 dlm->name, res->lockname.len, res->lockname.name);
2605 /* avoid hang during shutdown when migrating lockres 2605 /* avoid hang during shutdown when migrating lockres
2606 * to a node which also goes down */ 2606 * to a node which also goes down */
2607 if (dlm_is_node_dead(dlm, target)) { 2607 if (dlm_is_node_dead(dlm, target)) {
2608 mlog(0, "%s:%.*s: expected migration " 2608 mlog(0, "%s:%.*s: expected migration "
@@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2738 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); 2738 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2739 spin_unlock(&res->spinlock); 2739 spin_unlock(&res->spinlock);
2740 2740
2741 /* target has died, so make the caller break out of the 2741 /* target has died, so make the caller break out of the
2742 * wait_event, but caller must recheck the domain_map */ 2742 * wait_event, but caller must recheck the domain_map */
2743 spin_lock(&dlm->spinlock); 2743 spin_lock(&dlm->spinlock);
2744 if (!test_bit(mig_target, dlm->domain_map)) 2744 if (!test_bit(mig_target, dlm->domain_map))
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 2f9e4e19a4f2..344bcf90cbf4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1050 if (lock->ml.node == dead_node) { 1050 if (lock->ml.node == dead_node) {
1051 mlog(0, "AHA! there was " 1051 mlog(0, "AHA! there was "
1052 "a $RECOVERY lock for dead " 1052 "a $RECOVERY lock for dead "
1053 "node %u (%s)!\n", 1053 "node %u (%s)!\n",
1054 dead_node, dlm->name); 1054 dead_node, dlm->name);
1055 list_del_init(&lock->list); 1055 list_del_init(&lock->list);
1056 dlm_lock_put(lock); 1056 dlm_lock_put(lock);
@@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1164 mres->master = master; 1164 mres->master = master;
1165} 1165}
1166 1166
1167static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1168 struct dlm_migratable_lockres *mres,
1169 int queue)
1170{
1171 if (!lock->lksb)
1172 return;
1173
1174 /* Ignore lvb in all locks in the blocked list */
1175 if (queue == DLM_BLOCKED_LIST)
1176 return;
1177
1178 /* Only consider lvbs in locks with granted EX or PR lock levels */
1179 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1180 return;
1181
1182 if (dlm_lvb_is_empty(mres->lvb)) {
1183 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1184 return;
1185 }
1186
1187 /* Ensure the lvb copied for migration matches in other valid locks */
1188 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1189 return;
1190
1191 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1192 "node=%u\n",
1193 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1194 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1195 lock->lockres->lockname.len, lock->lockres->lockname.name,
1196 lock->ml.node);
1197 dlm_print_one_lock_resource(lock->lockres);
1198 BUG();
1199}
1167 1200
1168/* returns 1 if this lock fills the network structure, 1201/* returns 1 if this lock fills the network structure,
1169 * 0 otherwise */ 1202 * 0 otherwise */
@@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
1181 ml->list = queue; 1214 ml->list = queue;
1182 if (lock->lksb) { 1215 if (lock->lksb) {
1183 ml->flags = lock->lksb->flags; 1216 ml->flags = lock->lksb->flags;
1184 /* send our current lvb */ 1217 dlm_prepare_lvb_for_migration(lock, mres, queue);
1185 if (ml->type == LKM_EXMODE ||
1186 ml->type == LKM_PRMODE) {
1187 /* if it is already set, this had better be a PR
1188 * and it has to match */
1189 if (!dlm_lvb_is_empty(mres->lvb) &&
1190 (ml->type == LKM_EXMODE ||
1191 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1192 mlog(ML_ERROR, "mismatched lvbs!\n");
1193 dlm_print_one_lock_resource(lock->lockres);
1194 BUG();
1195 }
1196 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1197 }
1198 } 1218 }
1199 ml->node = lock->ml.node; 1219 ml->node = lock->ml.node;
1200 mres->num_locks++; 1220 mres->num_locks++;
@@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1730 struct dlm_lock *lock = NULL; 1750 struct dlm_lock *lock = NULL;
1731 u8 from = O2NM_MAX_NODES; 1751 u8 from = O2NM_MAX_NODES;
1732 unsigned int added = 0; 1752 unsigned int added = 0;
1753 __be64 c;
1733 1754
1734 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1755 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1735 for (i=0; i<mres->num_locks; i++) { 1756 for (i=0; i<mres->num_locks; i++) {
@@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1777 /* lock is always created locally first, and 1798 /* lock is always created locally first, and
1778 * destroyed locally last. it must be on the list */ 1799 * destroyed locally last. it must be on the list */
1779 if (!lock) { 1800 if (!lock) {
1780 __be64 c = ml->cookie; 1801 c = ml->cookie;
1781 mlog(ML_ERROR, "could not find local lock " 1802 mlog(ML_ERROR, "Could not find local lock "
1782 "with cookie %u:%llu!\n", 1803 "with cookie %u:%llu, node %u, "
1804 "list %u, flags 0x%x, type %d, "
1805 "conv %d, highest blocked %d\n",
1783 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1806 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1784 dlm_get_lock_cookie_seq(be64_to_cpu(c))); 1807 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1808 ml->node, ml->list, ml->flags, ml->type,
1809 ml->convert_type, ml->highest_blocked);
1810 __dlm_print_one_lock_resource(res);
1811 BUG();
1812 }
1813
1814 if (lock->ml.node != ml->node) {
1815 c = lock->ml.cookie;
1816 mlog(ML_ERROR, "Mismatched node# in lock "
1817 "cookie %u:%llu, name %.*s, node %u\n",
1818 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1819 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1820 res->lockname.len, res->lockname.name,
1821 lock->ml.node);
1822 c = ml->cookie;
1823 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1824 "node %u, list %u, flags 0x%x, type %d, "
1825 "conv %d, highest blocked %d\n",
1826 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1827 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1828 ml->node, ml->list, ml->flags, ml->type,
1829 ml->convert_type, ml->highest_blocked);
1785 __dlm_print_one_lock_resource(res); 1830 __dlm_print_one_lock_resource(res);
1786 BUG(); 1831 BUG();
1787 } 1832 }
1788 BUG_ON(lock->ml.node != ml->node);
1789 1833
1790 if (tmpq != queue) { 1834 if (tmpq != queue) {
1791 mlog(0, "lock was on %u instead of %u for %.*s\n", 1835 c = ml->cookie;
1792 j, ml->list, res->lockname.len, res->lockname.name); 1836 mlog(0, "Lock cookie %u:%llu was on list %u "
1837 "instead of list %u for %.*s\n",
1838 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1839 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1840 j, ml->list, res->lockname.len,
1841 res->lockname.name);
1842 __dlm_print_one_lock_resource(res);
1793 spin_unlock(&res->spinlock); 1843 spin_unlock(&res->spinlock);
1794 continue; 1844 continue;
1795 } 1845 }
@@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1839 * the lvb. */ 1889 * the lvb. */
1840 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1890 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1841 } else { 1891 } else {
1842 /* otherwise, the node is sending its 1892 /* otherwise, the node is sending its
1843 * most recent valid lvb info */ 1893 * most recent valid lvb info */
1844 BUG_ON(ml->type != LKM_EXMODE && 1894 BUG_ON(ml->type != LKM_EXMODE &&
1845 ml->type != LKM_PRMODE); 1895 ml->type != LKM_PRMODE);
@@ -1886,7 +1936,7 @@ skip_lvb:
1886 spin_lock(&res->spinlock); 1936 spin_lock(&res->spinlock);
1887 list_for_each_entry(lock, queue, list) { 1937 list_for_each_entry(lock, queue, list) {
1888 if (lock->ml.cookie == ml->cookie) { 1938 if (lock->ml.cookie == ml->cookie) {
1889 __be64 c = lock->ml.cookie; 1939 c = lock->ml.cookie;
1890 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 1940 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1891 "exists on this lockres!\n", dlm->name, 1941 "exists on this lockres!\n", dlm->name,
1892 res->lockname.len, res->lockname.name, 1942 res->lockname.len, res->lockname.name,
@@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2114 assert_spin_locked(&res->spinlock); 2164 assert_spin_locked(&res->spinlock);
2115 2165
2116 if (res->owner == dlm->node_num) 2166 if (res->owner == dlm->node_num)
2117 /* if this node owned the lockres, and if the dead node 2167 /* if this node owned the lockres, and if the dead node
2118 * had an EX when he died, blank out the lvb */ 2168 * had an EX when he died, blank out the lvb */
2119 search_node = dead_node; 2169 search_node = dead_node;
2120 else { 2170 else {
@@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2152 2202
2153 /* this node is the lockres master: 2203 /* this node is the lockres master:
2154 * 1) remove any stale locks for the dead node 2204 * 1) remove any stale locks for the dead node
2155 * 2) if the dead node had an EX when he died, blank out the lvb 2205 * 2) if the dead node had an EX when he died, blank out the lvb
2156 */ 2206 */
2157 assert_spin_locked(&dlm->spinlock); 2207 assert_spin_locked(&dlm->spinlock);
2158 assert_spin_locked(&res->spinlock); 2208 assert_spin_locked(&res->spinlock);
@@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2193 mlog(0, "%s:%.*s: freed %u locks for dead node %u, " 2243 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2194 "dropping ref from lockres\n", dlm->name, 2244 "dropping ref from lockres\n", dlm->name,
2195 res->lockname.len, res->lockname.name, freed, dead_node); 2245 res->lockname.len, res->lockname.name, freed, dead_node);
2196 BUG_ON(!test_bit(dead_node, res->refmap)); 2246 if(!test_bit(dead_node, res->refmap)) {
2247 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2248 "but ref was not set\n", dlm->name,
2249 res->lockname.len, res->lockname.name, freed, dead_node);
2250 __dlm_print_one_lock_resource(res);
2251 }
2197 dlm_lockres_clear_refmap_bit(dead_node, res); 2252 dlm_lockres_clear_refmap_bit(dead_node, res);
2198 } else if (test_bit(dead_node, res->refmap)) { 2253 } else if (test_bit(dead_node, res->refmap)) {
2199 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2254 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
@@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2260 } 2315 }
2261 spin_unlock(&res->spinlock); 2316 spin_unlock(&res->spinlock);
2262 continue; 2317 continue;
2263 } 2318 }
2264 spin_lock(&res->spinlock); 2319 spin_lock(&res->spinlock);
2265 /* zero the lvb if necessary */ 2320 /* zero the lvb if necessary */
2266 dlm_revalidate_lvb(dlm, res, dead_node); 2321 dlm_revalidate_lvb(dlm, res, dead_node);
@@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2411 * this function on each node racing to become the recovery 2466 * this function on each node racing to become the recovery
2412 * master will not stop attempting this until either: 2467 * master will not stop attempting this until either:
2413 * a) this node gets the EX (and becomes the recovery master), 2468 * a) this node gets the EX (and becomes the recovery master),
2414 * or b) dlm->reco.new_master gets set to some nodenum 2469 * or b) dlm->reco.new_master gets set to some nodenum
2415 * != O2NM_INVALID_NODE_NUM (another node will do the reco). 2470 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2416 * so each time a recovery master is needed, the entire cluster 2471 * so each time a recovery master is needed, the entire cluster
2417 * will sync at this point. if the new master dies, that will 2472 * will sync at this point. if the new master dies, that will
@@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2424 2479
2425 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2480 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2426 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2481 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2427again: 2482again:
2428 memset(&lksb, 0, sizeof(lksb)); 2483 memset(&lksb, 0, sizeof(lksb));
2429 2484
2430 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2485 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
@@ -2437,8 +2492,8 @@ again:
2437 if (ret == DLM_NORMAL) { 2492 if (ret == DLM_NORMAL) {
2438 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2493 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2439 dlm->name, dlm->node_num); 2494 dlm->name, dlm->node_num);
2440 2495
2441 /* got the EX lock. check to see if another node 2496 /* got the EX lock. check to see if another node
2442 * just became the reco master */ 2497 * just became the reco master */
2443 if (dlm_reco_master_ready(dlm)) { 2498 if (dlm_reco_master_ready(dlm)) {
2444 mlog(0, "%s: got reco EX lock, but %u will " 2499 mlog(0, "%s: got reco EX lock, but %u will "
@@ -2451,12 +2506,12 @@ again:
2451 /* see if recovery was already finished elsewhere */ 2506 /* see if recovery was already finished elsewhere */
2452 spin_lock(&dlm->spinlock); 2507 spin_lock(&dlm->spinlock);
2453 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 2508 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2454 status = -EINVAL; 2509 status = -EINVAL;
2455 mlog(0, "%s: got reco EX lock, but " 2510 mlog(0, "%s: got reco EX lock, but "
2456 "node got recovered already\n", dlm->name); 2511 "node got recovered already\n", dlm->name);
2457 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2512 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2458 mlog(ML_ERROR, "%s: new master is %u " 2513 mlog(ML_ERROR, "%s: new master is %u "
2459 "but no dead node!\n", 2514 "but no dead node!\n",
2460 dlm->name, dlm->reco.new_master); 2515 dlm->name, dlm->reco.new_master);
2461 BUG(); 2516 BUG();
2462 } 2517 }
@@ -2468,7 +2523,7 @@ again:
2468 * set the master and send the messages to begin recovery */ 2523 * set the master and send the messages to begin recovery */
2469 if (!status) { 2524 if (!status) {
2470 mlog(0, "%s: dead=%u, this=%u, sending " 2525 mlog(0, "%s: dead=%u, this=%u, sending "
2471 "begin_reco now\n", dlm->name, 2526 "begin_reco now\n", dlm->name,
2472 dlm->reco.dead_node, dlm->node_num); 2527 dlm->reco.dead_node, dlm->node_num);
2473 status = dlm_send_begin_reco_message(dlm, 2528 status = dlm_send_begin_reco_message(dlm,
2474 dlm->reco.dead_node); 2529 dlm->reco.dead_node);
@@ -2501,7 +2556,7 @@ again:
2501 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2556 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2502 dlm->name, dlm->node_num); 2557 dlm->name, dlm->node_num);
2503 /* another node is master. wait on 2558 /* another node is master. wait on
2504 * reco.new_master != O2NM_INVALID_NODE_NUM 2559 * reco.new_master != O2NM_INVALID_NODE_NUM
2505 * for at most one second */ 2560 * for at most one second */
2506 wait_event_timeout(dlm->dlm_reco_thread_wq, 2561 wait_event_timeout(dlm->dlm_reco_thread_wq,
2507 dlm_reco_master_ready(dlm), 2562 dlm_reco_master_ready(dlm),
@@ -2589,7 +2644,13 @@ retry:
2589 "begin reco msg (%d)\n", dlm->name, nodenum, ret); 2644 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2590 ret = 0; 2645 ret = 0;
2591 } 2646 }
2592 if (ret == -EAGAIN) { 2647
2648 /*
2649 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2650 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2651 * We are handling both for compatibility reasons.
2652 */
2653 if (ret == -EAGAIN || ret == EAGAIN) {
2593 mlog(0, "%s: trying to start recovery of node " 2654 mlog(0, "%s: trying to start recovery of node "
2594 "%u, but node %u is waiting for last recovery " 2655 "%u, but node %u is waiting for last recovery "
2595 "to complete, backoff for a bit\n", dlm->name, 2656 "to complete, backoff for a bit\n", dlm->name,
@@ -2599,7 +2660,7 @@ retry:
2599 } 2660 }
2600 if (ret < 0) { 2661 if (ret < 0) {
2601 struct dlm_lock_resource *res; 2662 struct dlm_lock_resource *res;
2602 /* this is now a serious problem, possibly ENOMEM 2663 /* this is now a serious problem, possibly ENOMEM
2603 * in the network stack. must retry */ 2664 * in the network stack. must retry */
2604 mlog_errno(ret); 2665 mlog_errno(ret);
2605 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2666 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
@@ -2612,7 +2673,7 @@ retry:
2612 } else { 2673 } else {
2613 mlog(ML_ERROR, "recovery lock not found\n"); 2674 mlog(ML_ERROR, "recovery lock not found\n");
2614 } 2675 }
2615 /* sleep for a bit in hopes that we can avoid 2676 /* sleep for a bit in hopes that we can avoid
2616 * another ENOMEM */ 2677 * another ENOMEM */
2617 msleep(100); 2678 msleep(100);
2618 goto retry; 2679 goto retry;
@@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2664 } 2725 }
2665 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2726 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2666 mlog(ML_NOTICE, "%s: dead_node previously set to %u, " 2727 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2667 "node %u changing it to %u\n", dlm->name, 2728 "node %u changing it to %u\n", dlm->name,
2668 dlm->reco.dead_node, br->node_idx, br->dead_node); 2729 dlm->reco.dead_node, br->node_idx, br->dead_node);
2669 } 2730 }
2670 dlm_set_reco_master(dlm, br->node_idx); 2731 dlm_set_reco_master(dlm, br->node_idx);
@@ -2730,8 +2791,8 @@ stage2:
2730 if (ret < 0) { 2791 if (ret < 0) {
2731 mlog_errno(ret); 2792 mlog_errno(ret);
2732 if (dlm_is_host_down(ret)) { 2793 if (dlm_is_host_down(ret)) {
2733 /* this has no effect on this recovery 2794 /* this has no effect on this recovery
2734 * session, so set the status to zero to 2795 * session, so set the status to zero to
2735 * finish out the last recovery */ 2796 * finish out the last recovery */
2736 mlog(ML_ERROR, "node %u went down after this " 2797 mlog(ML_ERROR, "node %u went down after this "
2737 "node finished recovery.\n", nodenum); 2798 "node finished recovery.\n", nodenum);
@@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2768 mlog(0, "%s: node %u finalizing recovery stage%d of " 2829 mlog(0, "%s: node %u finalizing recovery stage%d of "
2769 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, 2830 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2770 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); 2831 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2771 2832
2772 spin_lock(&dlm->spinlock); 2833 spin_lock(&dlm->spinlock);
2773 2834
2774 if (dlm->reco.new_master != fr->node_idx) { 2835 if (dlm->reco.new_master != fr->node_idx) {
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 00f53b2aea76..49e29ecd0201 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
190 actions &= ~(DLM_UNLOCK_REMOVE_LOCK| 190 actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
191 DLM_UNLOCK_REGRANT_LOCK| 191 DLM_UNLOCK_REGRANT_LOCK|
192 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 192 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
193 } else if (status == DLM_RECOVERING || 193 } else if (status == DLM_RECOVERING ||
194 status == DLM_MIGRATING || 194 status == DLM_MIGRATING ||
195 status == DLM_FORWARD) { 195 status == DLM_FORWARD) {
196 /* must clear the actions because this unlock 196 /* must clear the actions because this unlock
197 * is about to be retried. cannot free or do 197 * is about to be retried. cannot free or do
@@ -661,14 +661,14 @@ retry:
661 if (call_ast) { 661 if (call_ast) {
662 mlog(0, "calling unlockast(%p, %d)\n", data, status); 662 mlog(0, "calling unlockast(%p, %d)\n", data, status);
663 if (is_master) { 663 if (is_master) {
664 /* it is possible that there is one last bast 664 /* it is possible that there is one last bast
665 * pending. make sure it is flushed, then 665 * pending. make sure it is flushed, then
666 * call the unlockast. 666 * call the unlockast.
667 * not an issue if this is a mastered remotely, 667 * not an issue if this is a mastered remotely,
668 * since this lock has been removed from the 668 * since this lock has been removed from the
669 * lockres queues and cannot be found. */ 669 * lockres queues and cannot be found. */
670 dlm_kick_thread(dlm, NULL); 670 dlm_kick_thread(dlm, NULL);
671 wait_event(dlm->ast_wq, 671 wait_event(dlm->ast_wq,
672 dlm_lock_basts_flushed(dlm, lock)); 672 dlm_lock_basts_flushed(dlm, lock));
673 } 673 }
674 (*unlockast)(data, status); 674 (*unlockast)(data, status);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index c5e4a49e3a12..e044019cb3b1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
875 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); 875 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
876 876
877 lockres->l_level = lockres->l_requested; 877 lockres->l_level = lockres->l_requested;
878
879 /*
880 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
881 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
882 * downconverting the lock before the upconvert has fully completed.
883 */
884 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
885
878 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 886 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
879 887
880 mlog_exit_void(); 888 mlog_exit_void();
@@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
907 915
908 assert_spin_locked(&lockres->l_lock); 916 assert_spin_locked(&lockres->l_lock);
909 917
910 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
911
912 if (level > lockres->l_blocking) { 918 if (level > lockres->l_blocking) {
913 /* only schedule a downconvert if we haven't already scheduled 919 /* only schedule a downconvert if we haven't already scheduled
914 * one that goes low enough to satisfy the level we're 920 * one that goes low enough to satisfy the level we're
@@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
921 lockres->l_blocking = level; 927 lockres->l_blocking = level;
922 } 928 }
923 929
930 if (needs_downconvert)
931 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
932
924 mlog_exit(needs_downconvert); 933 mlog_exit(needs_downconvert);
925 return needs_downconvert; 934 return needs_downconvert;
926} 935}
@@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1133 mlog_entry_void(); 1142 mlog_entry_void();
1134 spin_lock_irqsave(&lockres->l_lock, flags); 1143 spin_lock_irqsave(&lockres->l_lock, flags);
1135 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 1144 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1145 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1136 if (convert) 1146 if (convert)
1137 lockres->l_action = OCFS2_AST_INVALID; 1147 lockres->l_action = OCFS2_AST_INVALID;
1138 else 1148 else
@@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1323again: 1333again:
1324 wait = 0; 1334 wait = 0;
1325 1335
1336 spin_lock_irqsave(&lockres->l_lock, flags);
1337
1326 if (catch_signals && signal_pending(current)) { 1338 if (catch_signals && signal_pending(current)) {
1327 ret = -ERESTARTSYS; 1339 ret = -ERESTARTSYS;
1328 goto out; 1340 goto unlock;
1329 } 1341 }
1330 1342
1331 spin_lock_irqsave(&lockres->l_lock, flags);
1332
1333 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, 1343 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1334 "Cluster lock called on freeing lockres %s! flags " 1344 "Cluster lock called on freeing lockres %s! flags "
1335 "0x%lx\n", lockres->l_name, lockres->l_flags); 1345 "0x%lx\n", lockres->l_name, lockres->l_flags);
@@ -1346,6 +1356,25 @@ again:
1346 goto unlock; 1356 goto unlock;
1347 } 1357 }
1348 1358
1359 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1360 /*
1361 * We've upconverted. If the lock now has a level we can
1362 * work with, we take it. If, however, the lock is not at the
1363 * required level, we go thru the full cycle. One way this could
1364 * happen is if a process requesting an upconvert to PR is
1365 * closely followed by another requesting upconvert to an EX.
1366 * If the process requesting EX lands here, we want it to
1367 * continue attempting to upconvert and let the process
1368 * requesting PR take the lock.
1369 * If multiple processes request upconvert to PR, the first one
1370 * here will take the lock. The others will have to go thru the
1371 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1372 * downconvert request.
1373 */
1374 if (level <= lockres->l_level)
1375 goto update_holders;
1376 }
1377
1349 if (lockres->l_flags & OCFS2_LOCK_BLOCKED && 1378 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1350 !ocfs2_may_continue_on_blocked_lock(lockres, level)) { 1379 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1351 /* is the lock is currently blocked on behalf of 1380 /* is the lock is currently blocked on behalf of
@@ -1416,11 +1445,14 @@ again:
1416 goto again; 1445 goto again;
1417 } 1446 }
1418 1447
1448update_holders:
1419 /* Ok, if we get here then we're good to go. */ 1449 /* Ok, if we get here then we're good to go. */
1420 ocfs2_inc_holders(lockres, level); 1450 ocfs2_inc_holders(lockres, level);
1421 1451
1422 ret = 0; 1452 ret = 0;
1423unlock: 1453unlock:
1454 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1455
1424 spin_unlock_irqrestore(&lockres->l_lock, flags); 1456 spin_unlock_irqrestore(&lockres->l_lock, flags);
1425out: 1457out:
1426 /* 1458 /*
@@ -3155,7 +3187,7 @@ out:
3155/* Mark the lockres as being dropped. It will no longer be 3187/* Mark the lockres as being dropped. It will no longer be
3156 * queued if blocking, but we still may have to wait on it 3188 * queued if blocking, but we still may have to wait on it
3157 * being dequeued from the downconvert thread before we can consider 3189 * being dequeued from the downconvert thread before we can consider
3158 * it safe to drop. 3190 * it safe to drop.
3159 * 3191 *
3160 * You can *not* attempt to call cluster_lock on this lockres anymore. */ 3192 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3161void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) 3193void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
@@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3352 unsigned long flags; 3384 unsigned long flags;
3353 int blocking; 3385 int blocking;
3354 int new_level; 3386 int new_level;
3387 int level;
3355 int ret = 0; 3388 int ret = 0;
3356 int set_lvb = 0; 3389 int set_lvb = 0;
3357 unsigned int gen; 3390 unsigned int gen;
@@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3360 3393
3361 spin_lock_irqsave(&lockres->l_lock, flags); 3394 spin_lock_irqsave(&lockres->l_lock, flags);
3362 3395
3363 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
3364
3365recheck: 3396recheck:
3397 /*
3398 * Is it still blocking? If not, we have no more work to do.
3399 */
3400 if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3401 BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3402 spin_unlock_irqrestore(&lockres->l_lock, flags);
3403 ret = 0;
3404 goto leave;
3405 }
3406
3366 if (lockres->l_flags & OCFS2_LOCK_BUSY) { 3407 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3367 /* XXX 3408 /* XXX
3368 * This is a *big* race. The OCFS2_LOCK_PENDING flag 3409 * This is a *big* race. The OCFS2_LOCK_PENDING flag
@@ -3401,6 +3442,31 @@ recheck:
3401 goto leave; 3442 goto leave;
3402 } 3443 }
3403 3444
3445 /*
3446 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3447 * set when the ast is received for an upconvert just before the
3448 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3449 * on the heels of the ast, we want to delay the downconvert just
3450 * enough to allow the up requestor to do its task. Because this
3451 * lock is in the blocked queue, the lock will be downconverted
3452 * as soon as the requestor is done with the lock.
3453 */
3454 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3455 goto leave_requeue;
3456
3457 /*
3458 * How can we block and yet be at NL? We were trying to upconvert
3459 * from NL and got canceled. The code comes back here, and now
3460 * we notice and clear BLOCKING.
3461 */
3462 if (lockres->l_level == DLM_LOCK_NL) {
3463 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3464 lockres->l_blocking = DLM_LOCK_NL;
3465 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3466 spin_unlock_irqrestore(&lockres->l_lock, flags);
3467 goto leave;
3468 }
3469
3404 /* if we're blocking an exclusive and we have *any* holders, 3470 /* if we're blocking an exclusive and we have *any* holders,
3405 * then requeue. */ 3471 * then requeue. */
3406 if ((lockres->l_blocking == DLM_LOCK_EX) 3472 if ((lockres->l_blocking == DLM_LOCK_EX)
@@ -3438,6 +3504,7 @@ recheck:
3438 * may sleep, so we save off a copy of what we're blocking as 3504 * may sleep, so we save off a copy of what we're blocking as
3439 * it may change while we're not holding the spin lock. */ 3505 * it may change while we're not holding the spin lock. */
3440 blocking = lockres->l_blocking; 3506 blocking = lockres->l_blocking;
3507 level = lockres->l_level;
3441 spin_unlock_irqrestore(&lockres->l_lock, flags); 3508 spin_unlock_irqrestore(&lockres->l_lock, flags);
3442 3509
3443 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); 3510 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
@@ -3446,7 +3513,7 @@ recheck:
3446 goto leave; 3513 goto leave;
3447 3514
3448 spin_lock_irqsave(&lockres->l_lock, flags); 3515 spin_lock_irqsave(&lockres->l_lock, flags);
3449 if (blocking != lockres->l_blocking) { 3516 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3450 /* If this changed underneath us, then we can't drop 3517 /* If this changed underneath us, then we can't drop
3451 * it just yet. */ 3518 * it just yet. */
3452 goto recheck; 3519 goto recheck;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 15713cbb865c..19ad145d2af3 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
239 mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", 239 mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
240 (unsigned long long)blkno, generation); 240 (unsigned long long)blkno, generation);
241 } 241 }
242 242
243 *max_len = len; 243 *max_len = len;
244 244
245bail: 245bail:
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index d35a27f4523e..5328529e7fd2 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
192 emi->ei_clusters += ins->ei_clusters; 192 emi->ei_clusters += ins->ei_clusters;
193 return 1; 193 return 1;
194 } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && 194 } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
195 (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && 195 (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
196 ins->ei_flags == emi->ei_flags) { 196 ins->ei_flags == emi->ei_flags) {
197 emi->ei_phys = ins->ei_phys; 197 emi->ei_phys = ins->ei_phys;
198 emi->ei_cpos = ins->ei_cpos; 198 emi->ei_cpos = ins->ei_cpos;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 06ccf6a86d35..558ce0312421 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
749 int ret; 749 int ret;
750 750
751 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ 751 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
752 /* ugh. in prepare/commit_write, if from==to==start of block, we 752 /* ugh. in prepare/commit_write, if from==to==start of block, we
753 ** skip the prepare. make sure we never send an offset for the start 753 ** skip the prepare. make sure we never send an offset for the start
754 ** of a block 754 ** of a block
755 */ 755 */
@@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1779 struct inode *inode = dentry->d_inode; 1779 struct inode *inode = dentry->d_inode;
1780 loff_t saved_pos, end; 1780 loff_t saved_pos, end;
1781 1781
1782 /* 1782 /*
1783 * We start with a read level meta lock and only jump to an ex 1783 * We start with a read level meta lock and only jump to an ex
1784 * if we need to make modifications here. 1784 * if we need to make modifications here.
1785 */ 1785 */
@@ -2013,8 +2013,8 @@ out_dio:
2013 /* buffered aio wouldn't have proper lock coverage today */ 2013 /* buffered aio wouldn't have proper lock coverage today */
2014 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); 2014 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2015 2015
2016 if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) || 2016 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2017 (file->f_flags & O_DIRECT && has_refcount)) { 2017 ((file->f_flags & O_DIRECT) && has_refcount)) {
2018 ret = filemap_fdatawrite_range(file->f_mapping, pos, 2018 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2019 pos + count - 1); 2019 pos + count - 1);
2020 if (ret < 0) 2020 if (ret < 0)
@@ -2033,7 +2033,7 @@ out_dio:
2033 pos + count - 1); 2033 pos + count - 1);
2034 } 2034 }
2035 2035
2036 /* 2036 /*
2037 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io 2037 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2038 * function pointer which is called when o_direct io completes so that 2038 * function pointer which is called when o_direct io completes so that
2039 * it can unlock our rw lock. (it's the clustered equivalent of 2039 * it can unlock our rw lock. (it's the clustered equivalent of
@@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2198 goto bail; 2198 goto bail;
2199 } 2199 }
2200 2200
2201 /* 2201 /*
2202 * buffered reads protect themselves in ->readpage(). O_DIRECT reads 2202 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2203 * need locks to protect pending reads from racing with truncate. 2203 * need locks to protect pending reads from racing with truncate.
2204 */ 2204 */
@@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2220 * We're fine letting folks race truncates and extending 2220 * We're fine letting folks race truncates and extending
2221 * writes with read across the cluster, just like they can 2221 * writes with read across the cluster, just like they can
2222 * locally. Hence no rw_lock during read. 2222 * locally. Hence no rw_lock during read.
2223 * 2223 *
2224 * Take and drop the meta data lock to update inode fields 2224 * Take and drop the meta data lock to update inode fields
2225 * like i_size. This allows the checks down below 2225 * like i_size. This allows the checks down below
2226 * generic_file_aio_read() a chance of actually working. 2226 * generic_file_aio_read() a chance of actually working.
2227 */ 2227 */
2228 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); 2228 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2229 if (ret < 0) { 2229 if (ret < 0) {
@@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2248bail: 2248bail:
2249 if (have_alloc_sem) 2249 if (have_alloc_sem)
2250 up_read(&inode->i_alloc_sem); 2250 up_read(&inode->i_alloc_sem);
2251 if (rw_level != -1) 2251 if (rw_level != -1)
2252 ocfs2_rw_unlock(inode, rw_level); 2252 ocfs2_rw_unlock(inode, rw_level);
2253 mlog_exit(ret); 2253 mlog_exit(ret);
2254 2254
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0297fb8982b8..88459bdd1ff3 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
475 if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) { 475 if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
476 status = ocfs2_try_open_lock(inode, 0); 476 status = ocfs2_try_open_lock(inode, 0);
477 if (status) { 477 if (status) {
478 make_bad_inode(inode); 478 make_bad_inode(inode);
479 return status; 479 return status;
480 } 480 }
481 } 481 }
@@ -684,7 +684,7 @@ bail:
684 return status; 684 return status;
685} 685}
686 686
687/* 687/*
688 * Serialize with orphan dir recovery. If the process doing 688 * Serialize with orphan dir recovery. If the process doing
689 * recovery on this orphan dir does an iget() with the dir 689 * recovery on this orphan dir does an iget() with the dir
690 * i_mutex held, we'll deadlock here. Instead we detect this 690 * i_mutex held, we'll deadlock here. Instead we detect this
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 31fbb0619510..7d9d9c132cef 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/fs.h> 8#include <linux/fs.h>
9#include <linux/mount.h> 9#include <linux/mount.h>
10#include <linux/compat.h>
10 11
11#define MLOG_MASK_PREFIX ML_INODE 12#define MLOG_MASK_PREFIX ML_INODE
12#include <cluster/masklog.h> 13#include <cluster/masklog.h>
@@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
181#ifdef CONFIG_COMPAT 182#ifdef CONFIG_COMPAT
182long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) 183long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
183{ 184{
185 bool preserve;
186 struct reflink_arguments args;
187 struct inode *inode = file->f_path.dentry->d_inode;
188
184 switch (cmd) { 189 switch (cmd) {
185 case OCFS2_IOC32_GETFLAGS: 190 case OCFS2_IOC32_GETFLAGS:
186 cmd = OCFS2_IOC_GETFLAGS; 191 cmd = OCFS2_IOC_GETFLAGS;
@@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
195 case OCFS2_IOC_GROUP_EXTEND: 200 case OCFS2_IOC_GROUP_EXTEND:
196 case OCFS2_IOC_GROUP_ADD: 201 case OCFS2_IOC_GROUP_ADD:
197 case OCFS2_IOC_GROUP_ADD64: 202 case OCFS2_IOC_GROUP_ADD64:
198 case OCFS2_IOC_REFLINK:
199 break; 203 break;
204 case OCFS2_IOC_REFLINK:
205 if (copy_from_user(&args, (struct reflink_arguments *)arg,
206 sizeof(args)))
207 return -EFAULT;
208 preserve = (args.preserve != 0);
209
210 return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
211 compat_ptr(args.new_path), preserve);
200 default: 212 default:
201 return -ENOIOCTLCMD; 213 return -ENOIOCTLCMD;
202 } 214 }
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index bf34c491ae96..9336c60e3a36 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
2034 status = -ENOENT; 2034 status = -ENOENT;
2035 mlog_errno(status); 2035 mlog_errno(status);
2036 return status; 2036 return status;
2037 } 2037 }
2038 2038
2039 mutex_lock(&orphan_dir_inode->i_mutex); 2039 mutex_lock(&orphan_dir_inode->i_mutex);
2040 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); 2040 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 9362eea7424b..740f448041e2 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -136,6 +136,10 @@ enum ocfs2_unlock_action {
136#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a 136#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a
137 call to dlm_lock. Only 137 call to dlm_lock. Only
138 exists with BUSY set. */ 138 exists with BUSY set. */
139#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
140 * from downconverting
141 * before the upconvert
142 * has completed */
139 143
140struct ocfs2_lock_res_ops; 144struct ocfs2_lock_res_ops;
141 145
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 1a1a679e51b5..7638a38c32bc 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
1417 return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); 1417 return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
1418} 1418}
1419 1419
1420static inline int ocfs2_max_inline_data(int blocksize) 1420static inline int ocfs2_max_inline_data_with_xattr(int blocksize,
1421 struct ocfs2_dinode *di)
1421{ 1422{
1422 return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); 1423 if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL))
1424 return blocksize -
1425 offsetof(struct ocfs2_dinode, id2.i_data.id_data) -
1426 di->i_xattr_inline_size;
1427 else
1428 return blocksize -
1429 offsetof(struct ocfs2_dinode, id2.i_data.id_data);
1423} 1430}
1424 1431
1425static inline int ocfs2_extent_recs_per_inode(int blocksize) 1432static inline int ocfs2_extent_recs_per_inode(int blocksize)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 74db2be75dd6..8ae65c9c020c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2945 2945
2946 while (offset < end) { 2946 while (offset < end) {
2947 page_index = offset >> PAGE_CACHE_SHIFT; 2947 page_index = offset >> PAGE_CACHE_SHIFT;
2948 map_end = (page_index + 1) << PAGE_CACHE_SHIFT; 2948 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
2949 if (map_end > end) 2949 if (map_end > end)
2950 map_end = end; 2950 map_end = end;
2951 2951
@@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2957 2957
2958 page = grab_cache_page(mapping, page_index); 2958 page = grab_cache_page(mapping, page_index);
2959 2959
2960 /* This page can't be dirtied before we CoW it out. */ 2960 /*
2961 BUG_ON(PageDirty(page)); 2961 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
2962 * can't be dirtied before we CoW it out.
2963 */
2964 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2965 BUG_ON(PageDirty(page));
2962 2966
2963 if (!PageUptodate(page)) { 2967 if (!PageUptodate(page)) {
2964 ret = block_read_full_page(page, ocfs2_get_block); 2968 ret = block_read_full_page(page, ocfs2_get_block);
@@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
3170 3174
3171 while (offset < end) { 3175 while (offset < end) {
3172 page_index = offset >> PAGE_CACHE_SHIFT; 3176 page_index = offset >> PAGE_CACHE_SHIFT;
3173 map_end = (page_index + 1) << PAGE_CACHE_SHIFT; 3177 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
3174 if (map_end > end) 3178 if (map_end > end)
3175 map_end = end; 3179 map_end = end;
3176 3180
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index e49c41050264..3038c92af493 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
277 u32 dlm_key; 277 u32 dlm_key;
278 struct dlm_ctxt *dlm; 278 struct dlm_ctxt *dlm;
279 struct o2dlm_private *priv; 279 struct o2dlm_private *priv;
280 struct dlm_protocol_version dlm_version; 280 struct dlm_protocol_version fs_version;
281 281
282 BUG_ON(conn == NULL); 282 BUG_ON(conn == NULL);
283 BUG_ON(o2cb_stack.sp_proto == NULL); 283 BUG_ON(o2cb_stack.sp_proto == NULL);
@@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
304 /* used by the dlm code to make message headers unique, each 304 /* used by the dlm code to make message headers unique, each
305 * node in this domain must agree on this. */ 305 * node in this domain must agree on this. */
306 dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); 306 dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
307 dlm_version.pv_major = conn->cc_version.pv_major; 307 fs_version.pv_major = conn->cc_version.pv_major;
308 dlm_version.pv_minor = conn->cc_version.pv_minor; 308 fs_version.pv_minor = conn->cc_version.pv_minor;
309 309
310 dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version); 310 dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version);
311 if (IS_ERR(dlm)) { 311 if (IS_ERR(dlm)) {
312 rc = PTR_ERR(dlm); 312 rc = PTR_ERR(dlm);
313 mlog_errno(rc); 313 mlog_errno(rc);
314 goto out_free; 314 goto out_free;
315 } 315 }
316 316
317 conn->cc_version.pv_major = dlm_version.pv_major; 317 conn->cc_version.pv_major = fs_version.pv_major;
318 conn->cc_version.pv_minor = dlm_version.pv_minor; 318 conn->cc_version.pv_minor = fs_version.pv_minor;
319 conn->cc_lockspace = dlm; 319 conn->cc_lockspace = dlm;
320 320
321 dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); 321 dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 26069917a9f5..755cd49a5ef3 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1062 "file system, but write access is " 1062 "file system, but write access is "
1063 "unavailable.\n"); 1063 "unavailable.\n");
1064 else 1064 else
1065 mlog_errno(status); 1065 mlog_errno(status);
1066 goto read_super_error; 1066 goto read_super_error;
1067 } 1067 }
1068 1068
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 49b133ccbf11..32499d213fc4 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
137 } 137 }
138 138
139 memcpy(link, target, len); 139 memcpy(link, target, len);
140 nd_set_link(nd, link);
141 140
142bail: 141bail:
142 nd_set_link(nd, status ? ERR_PTR(status) : link);
143 brelse(bh); 143 brelse(bh);
144 144
145 mlog_exit(status); 145 mlog_exit(status);
146 return status ? ERR_PTR(status) : link; 146 return NULL;
147} 147}
148 148
149static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 149static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
150{ 150{
151 char *link = cookie; 151 char *link = nd_get_link(nd);
152 152 if (!IS_ERR(link))
153 kfree(link); 153 kfree(link);
154} 154}
155 155
156const struct inode_operations ocfs2_symlink_inode_operations = { 156const struct inode_operations ocfs2_symlink_inode_operations = {
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index c61369342a27..a0a120e82b97 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
267} 267}
268 268
269/* Warning: even if it returns true, this does *not* guarantee that 269/* Warning: even if it returns true, this does *not* guarantee that
270 * the block is stored in our inode metadata cache. 270 * the block is stored in our inode metadata cache.
271 * 271 *
272 * This can be called under lock_buffer() 272 * This can be called under lock_buffer()
273 */ 273 */
274int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, 274int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 38a6948ce0c2..20f31567ccee 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id)
647 return id[ATA_ID_SECTOR_SIZE] & (1 << 13); 647 return id[ATA_ID_SECTOR_SIZE] & (1 << 13);
648} 648}
649 649
650static inline u8 ata_id_logical_per_physical_sectors(const u16 *id) 650static inline u16 ata_id_logical_per_physical_sectors(const u16 *id)
651{ 651{
652 return id[ATA_ID_SECTOR_SIZE] & 0xf; 652 return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf);
653} 653}
654 654
655static inline int ata_id_has_lba48(const u16 *id) 655static inline int ata_id_has_lba48(const u16 *id)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 5be3dab4a695..188fcae10a99 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -15,6 +15,7 @@
15# define __acquire(x) __context__(x,1) 15# define __acquire(x) __context__(x,1)
16# define __release(x) __context__(x,-1) 16# define __release(x) __context__(x,-1)
17# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 17# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
18# define __percpu __attribute__((noderef, address_space(3)))
18extern void __chk_user_ptr(const volatile void __user *); 19extern void __chk_user_ptr(const volatile void __user *);
19extern void __chk_io_ptr(const volatile void __iomem *); 20extern void __chk_io_ptr(const volatile void __iomem *);
20#else 21#else
@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
32# define __acquire(x) (void)0 33# define __acquire(x) (void)0
33# define __release(x) (void)0 34# define __release(x) (void)0
34# define __cond_lock(x,c) (c) 35# define __cond_lock(x,c) (c)
36# define __percpu
35#endif 37#endif
36 38
37#ifdef __KERNEL__ 39#ifdef __KERNEL__
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 72ba63eb83c5..3a779ffba60b 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -24,9 +24,6 @@
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26 26
27#define CN_IDX_CONNECTOR 0xffffffff
28#define CN_VAL_CONNECTOR 0xffffffff
29
30/* 27/*
31 * Process Events connector unique ids -- used for message routing 28 * Process Events connector unique ids -- used for message routing
32 */ 29 */
@@ -75,30 +72,6 @@ struct cn_msg {
75 __u8 data[0]; 72 __u8 data[0];
76}; 73};
77 74
78/*
79 * Notify structure - requests notification about
80 * registering/unregistering idx/val in range [first, first+range].
81 */
82struct cn_notify_req {
83 __u32 first;
84 __u32 range;
85};
86
87/*
88 * Main notification control message
89 * *_notify_num - number of appropriate cn_notify_req structures after
90 * this struct.
91 * group - notification receiver's idx.
92 * len - total length of the attached data.
93 */
94struct cn_ctl_msg {
95 __u32 idx_notify_num;
96 __u32 val_notify_num;
97 __u32 group;
98 __u32 len;
99 __u8 data[0];
100};
101
102#ifdef __KERNEL__ 75#ifdef __KERNEL__
103 76
104#include <asm/atomic.h> 77#include <asm/atomic.h>
@@ -151,11 +124,6 @@ struct cn_callback_entry {
151 u32 seq, group; 124 u32 seq, group;
152}; 125};
153 126
154struct cn_ctl_entry {
155 struct list_head notify_entry;
156 struct cn_ctl_msg *msg;
157};
158
159struct cn_dev { 127struct cn_dev {
160 struct cb_id id; 128 struct cb_id id;
161 129
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 99dc6d5cf7e5..975837e7d6c0 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -17,7 +17,7 @@ struct linux_binprm;
17extern int ima_bprm_check(struct linux_binprm *bprm); 17extern int ima_bprm_check(struct linux_binprm *bprm);
18extern int ima_inode_alloc(struct inode *inode); 18extern int ima_inode_alloc(struct inode *inode);
19extern void ima_inode_free(struct inode *inode); 19extern void ima_inode_free(struct inode *inode);
20extern int ima_path_check(struct path *path, int mask); 20extern int ima_file_check(struct file *file, int mask);
21extern void ima_file_free(struct file *file); 21extern void ima_file_free(struct file *file);
22extern int ima_file_mmap(struct file *file, unsigned long prot); 22extern int ima_file_mmap(struct file *file, unsigned long prot);
23extern void ima_counts_get(struct file *file); 23extern void ima_counts_get(struct file *file);
@@ -38,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode)
38 return; 38 return;
39} 39}
40 40
41static inline int ima_path_check(struct path *path, int mask) 41static inline int ima_file_check(struct file *file, int mask)
42{ 42{
43 return 0; 43 return 0;
44} 44}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index abdfacc58653..78efe7c485ac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -310,6 +310,7 @@ extern void sched_show_task(struct task_struct *p);
310#ifdef CONFIG_DETECT_SOFTLOCKUP 310#ifdef CONFIG_DETECT_SOFTLOCKUP
311extern void softlockup_tick(void); 311extern void softlockup_tick(void);
312extern void touch_softlockup_watchdog(void); 312extern void touch_softlockup_watchdog(void);
313extern void touch_softlockup_watchdog_sync(void);
313extern void touch_all_softlockup_watchdogs(void); 314extern void touch_all_softlockup_watchdogs(void);
314extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 315extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
315 void __user *buffer, 316 void __user *buffer,
@@ -323,6 +324,9 @@ static inline void softlockup_tick(void)
323static inline void touch_softlockup_watchdog(void) 324static inline void touch_softlockup_watchdog(void)
324{ 325{
325} 326}
327static inline void touch_softlockup_watchdog_sync(void)
328{
329}
326static inline void touch_all_softlockup_watchdogs(void) 330static inline void touch_all_softlockup_watchdogs(void)
327{ 331{
328} 332}
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index ba1ba0c5efd1..63d449807d9b 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -11,6 +11,8 @@ struct nf_conntrack_ecache;
11struct netns_ct { 11struct netns_ct {
12 atomic_t count; 12 atomic_t count;
13 unsigned int expect_count; 13 unsigned int expect_count;
14 unsigned int htable_size;
15 struct kmem_cache *nf_conntrack_cachep;
14 struct hlist_nulls_head *hash; 16 struct hlist_nulls_head *hash;
15 struct hlist_head *expect_hash; 17 struct hlist_head *expect_hash;
16 struct hlist_nulls_head unconfirmed; 18 struct hlist_nulls_head unconfirmed;
@@ -28,5 +30,6 @@ struct netns_ct {
28#endif 30#endif
29 int hash_vmalloc; 31 int hash_vmalloc;
30 int expect_vmalloc; 32 int expect_vmalloc;
33 char *slabname;
31}; 34};
32#endif 35#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2eb3814d6258..9a4b8b714079 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,6 +40,7 @@ struct netns_ipv4 {
40 struct xt_table *iptable_security; 40 struct xt_table *iptable_security;
41 struct xt_table *nat_table; 41 struct xt_table *nat_table;
42 struct hlist_head *nat_bysource; 42 struct hlist_head *nat_bysource;
43 unsigned int nat_htable_size;
43 int nat_vmalloced; 44 int nat_vmalloced;
44#endif 45#endif
45 46
diff --git a/init/main.c b/init/main.c
index dac44a9356a5..4cb47a159f02 100644
--- a/init/main.c
+++ b/init/main.c
@@ -657,9 +657,9 @@ asmlinkage void __init start_kernel(void)
657 proc_caches_init(); 657 proc_caches_init();
658 buffer_init(); 658 buffer_init();
659 key_init(); 659 key_init();
660 radix_tree_init();
660 security_init(); 661 security_init();
661 vfs_caches_init(totalram_pages); 662 vfs_caches_init(totalram_pages);
662 radix_tree_init();
663 signals_init(); 663 signals_init();
664 /* rootfs populating might need page-writeback */ 664 /* rootfs populating might need page-writeback */
665 page_writeback_init(); 665 page_writeback_init();
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 1fbcc748044a..aa3bee566446 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2936,14 +2936,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
2936 2936
2937 for_each_subsys(root, ss) { 2937 for_each_subsys(root, ss) {
2938 struct cgroup_subsys_state *css = ss->create(ss, cgrp); 2938 struct cgroup_subsys_state *css = ss->create(ss, cgrp);
2939
2939 if (IS_ERR(css)) { 2940 if (IS_ERR(css)) {
2940 err = PTR_ERR(css); 2941 err = PTR_ERR(css);
2941 goto err_destroy; 2942 goto err_destroy;
2942 } 2943 }
2943 init_cgroup_css(css, ss, cgrp); 2944 init_cgroup_css(css, ss, cgrp);
2944 if (ss->use_id) 2945 if (ss->use_id) {
2945 if (alloc_css_id(ss, parent, cgrp)) 2946 err = alloc_css_id(ss, parent, cgrp);
2947 if (err)
2946 goto err_destroy; 2948 goto err_destroy;
2949 }
2947 /* At error, ->destroy() callback has to free assigned ID. */ 2950 /* At error, ->destroy() callback has to free assigned ID. */
2948 } 2951 }
2949 2952
diff --git a/kernel/cred.c b/kernel/cred.c
index dd76cfe5f5b0..1ed8ca18790c 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -224,7 +224,7 @@ struct cred *cred_alloc_blank(void)
224#ifdef CONFIG_KEYS 224#ifdef CONFIG_KEYS
225 new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL); 225 new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
226 if (!new->tgcred) { 226 if (!new->tgcred) {
227 kfree(new); 227 kmem_cache_free(cred_jar, new);
228 return NULL; 228 return NULL;
229 } 229 }
230 atomic_set(&new->tgcred->usage, 1); 230 atomic_set(&new->tgcred->usage, 1);
diff --git a/kernel/futex.c b/kernel/futex.c
index d9b3a2228f9d..e7a35f1039e7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -530,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
530 return -EINVAL; 530 return -EINVAL;
531 531
532 WARN_ON(!atomic_read(&pi_state->refcount)); 532 WARN_ON(!atomic_read(&pi_state->refcount));
533 WARN_ON(pid && pi_state->owner && 533
534 pi_state->owner->pid != pid); 534 /*
535 * When pi_state->owner is NULL then the owner died
536 * and another waiter is on the fly. pi_state->owner
537 * is fixed up by the task which acquires
538 * pi_state->rt_mutex.
539 *
540 * We do not check for pid == 0 which can happen when
541 * the owner died and robust_list_exit() cleared the
542 * TID.
543 */
544 if (pid && pi_state->owner) {
545 /*
546 * Bail out if user space manipulated the
547 * futex value.
548 */
549 if (pid != task_pid_vnr(pi_state->owner))
550 return -EINVAL;
551 }
535 552
536 atomic_inc(&pi_state->refcount); 553 atomic_inc(&pi_state->refcount);
537 *ps = pi_state; 554 *ps = pi_state;
@@ -758,6 +775,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
758 if (!pi_state) 775 if (!pi_state)
759 return -EINVAL; 776 return -EINVAL;
760 777
778 /*
779 * If current does not own the pi_state then the futex is
780 * inconsistent and user space fiddled with the futex value.
781 */
782 if (pi_state->owner != current)
783 return -EINVAL;
784
761 raw_spin_lock(&pi_state->pi_mutex.wait_lock); 785 raw_spin_lock(&pi_state->pi_mutex.wait_lock);
762 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); 786 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
763 787
@@ -1971,7 +1995,7 @@ retry_private:
1971 /* Unqueue and drop the lock */ 1995 /* Unqueue and drop the lock */
1972 unqueue_me_pi(&q); 1996 unqueue_me_pi(&q);
1973 1997
1974 goto out; 1998 goto out_put_key;
1975 1999
1976out_unlock_put_key: 2000out_unlock_put_key:
1977 queue_unlock(&q, hb); 2001 queue_unlock(&q, hb);
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 32c5c15d750d..498cabba225e 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -349,6 +349,7 @@ EXPORT_SYMBOL(__kfifo_from_user_n);
349 * @fifo: the fifo to be used. 349 * @fifo: the fifo to be used.
350 * @from: pointer to the data to be added. 350 * @from: pointer to the data to be added.
351 * @len: the length of the data to be added. 351 * @len: the length of the data to be added.
352 * @total: the actual returned data length.
352 * 353 *
353 * This function copies at most @len bytes from the @from into the 354 * This function copies at most @len bytes from the @from into the
354 * FIFO depending and returns -EFAULT/0. 355 * FIFO depending and returns -EFAULT/0.
@@ -399,7 +400,7 @@ EXPORT_SYMBOL(__kfifo_to_user_n);
399 * @fifo: the fifo to be used. 400 * @fifo: the fifo to be used.
400 * @to: where the data must be copied. 401 * @to: where the data must be copied.
401 * @len: the size of the destination buffer. 402 * @len: the size of the destination buffer.
402 @ @lenout: pointer to output variable with copied data 403 * @lenout: pointer to output variable with copied data
403 * 404 *
404 * This function copies at most @len bytes from the FIFO into the 405 * This function copies at most @len bytes from the FIFO into the
405 * @to buffer and 0 or -EFAULT. 406 * @to buffer and 0 or -EFAULT.
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index c7ade62e4ef0..761fdd2b3034 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -599,7 +599,7 @@ static void kgdb_wait(struct pt_regs *regs)
599 599
600 /* Signal the primary CPU that we are done: */ 600 /* Signal the primary CPU that we are done: */
601 atomic_set(&cpu_in_kgdb[cpu], 0); 601 atomic_set(&cpu_in_kgdb[cpu], 0);
602 touch_softlockup_watchdog(); 602 touch_softlockup_watchdog_sync();
603 clocksource_touch_watchdog(); 603 clocksource_touch_watchdog();
604 local_irq_restore(flags); 604 local_irq_restore(flags);
605} 605}
@@ -1453,7 +1453,7 @@ acquirelock:
1453 (kgdb_info[cpu].task && 1453 (kgdb_info[cpu].task &&
1454 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { 1454 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
1455 atomic_set(&kgdb_active, -1); 1455 atomic_set(&kgdb_active, -1);
1456 touch_softlockup_watchdog(); 1456 touch_softlockup_watchdog_sync();
1457 clocksource_touch_watchdog(); 1457 clocksource_touch_watchdog();
1458 local_irq_restore(flags); 1458 local_irq_restore(flags);
1459 1459
@@ -1553,7 +1553,7 @@ kgdb_restore:
1553 } 1553 }
1554 /* Free kgdb_active */ 1554 /* Free kgdb_active */
1555 atomic_set(&kgdb_active, -1); 1555 atomic_set(&kgdb_active, -1);
1556 touch_softlockup_watchdog(); 1556 touch_softlockup_watchdog_sync();
1557 clocksource_touch_watchdog(); 1557 clocksource_touch_watchdog();
1558 local_irq_restore(flags); 1558 local_irq_restore(flags);
1559 1559
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index d22579087e27..0d4c7898ab80 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock);
25static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ 25static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
26static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ 26static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
27static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 27static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
28static DEFINE_PER_CPU(bool, softlock_touch_sync);
28 29
29static int __read_mostly did_panic; 30static int __read_mostly did_panic;
30int __read_mostly softlockup_thresh = 60; 31int __read_mostly softlockup_thresh = 60;
@@ -79,6 +80,12 @@ void touch_softlockup_watchdog(void)
79} 80}
80EXPORT_SYMBOL(touch_softlockup_watchdog); 81EXPORT_SYMBOL(touch_softlockup_watchdog);
81 82
83void touch_softlockup_watchdog_sync(void)
84{
85 __raw_get_cpu_var(softlock_touch_sync) = true;
86 __raw_get_cpu_var(softlockup_touch_ts) = 0;
87}
88
82void touch_all_softlockup_watchdogs(void) 89void touch_all_softlockup_watchdogs(void)
83{ 90{
84 int cpu; 91 int cpu;
@@ -118,6 +125,14 @@ void softlockup_tick(void)
118 } 125 }
119 126
120 if (touch_ts == 0) { 127 if (touch_ts == 0) {
128 if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) {
129 /*
130 * If the time stamp was touched atomically
131 * make sure the scheduler tick is up to date.
132 */
133 per_cpu(softlock_touch_sync, this_cpu) = false;
134 sched_clock_tick();
135 }
121 __touch_softlockup_watchdog(); 136 __touch_softlockup_watchdog();
122 return; 137 return;
123 } 138 }
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 7faaa32fbf4f..e2ab064c6d41 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -880,6 +880,7 @@ void getboottime(struct timespec *ts)
880 880
881 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); 881 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
882} 882}
883EXPORT_SYMBOL_GPL(getboottime);
883 884
884/** 885/**
885 * monotonic_to_bootbased - Convert the monotonic time to boot based. 886 * monotonic_to_bootbased - Convert the monotonic time to boot based.
@@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts)
889{ 890{
890 *ts = timespec_add_safe(*ts, total_sleep_time); 891 *ts = timespec_add_safe(*ts, total_sleep_time);
891} 892}
893EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
892 894
893unsigned long get_seconds(void) 895unsigned long get_seconds(void)
894{ 896{
diff --git a/mm/filemap.c b/mm/filemap.c
index e3736923220e..698ea80f2102 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2232,6 +2232,9 @@ again:
2232 if (unlikely(status)) 2232 if (unlikely(status))
2233 break; 2233 break;
2234 2234
2235 if (mapping_writably_mapped(mapping))
2236 flush_dcache_page(page);
2237
2235 pagefault_disable(); 2238 pagefault_disable();
2236 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2239 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
2237 pagefault_enable(); 2240 pagefault_enable();
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e91b81b63670..2d16fa6b8c2d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1515,10 +1515,9 @@ static struct attribute_group hstate_attr_group = {
1515 .attrs = hstate_attrs, 1515 .attrs = hstate_attrs,
1516}; 1516};
1517 1517
1518static int __init hugetlb_sysfs_add_hstate(struct hstate *h, 1518static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1519 struct kobject *parent, 1519 struct kobject **hstate_kobjs,
1520 struct kobject **hstate_kobjs, 1520 struct attribute_group *hstate_attr_group)
1521 struct attribute_group *hstate_attr_group)
1522{ 1521{
1523 int retval; 1522 int retval;
1524 int hi = h - hstates; 1523 int hi = h - hstates;
diff --git a/mm/migrate.c b/mm/migrate.c
index efddbf0926b2..9a0db5bbabe4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -912,6 +912,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
912 goto out_pm; 912 goto out_pm;
913 913
914 err = -ENODEV; 914 err = -ENODEV;
915 if (node < 0 || node >= MAX_NUMNODES)
916 goto out_pm;
917
915 if (!node_state(node, N_HIGH_MEMORY)) 918 if (!node_state(node, N_HIGH_MEMORY))
916 goto out_pm; 919 goto out_pm;
917 920
diff --git a/net/9p/client.c b/net/9p/client.c
index 8af95b2dddd6..09d4f1e2e4a8 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
69 69
70static int parse_opts(char *opts, struct p9_client *clnt) 70static int parse_opts(char *opts, struct p9_client *clnt)
71{ 71{
72 char *options; 72 char *options, *tmp_options;
73 char *p; 73 char *p;
74 substring_t args[MAX_OPT_ARGS]; 74 substring_t args[MAX_OPT_ARGS];
75 int option; 75 int option;
@@ -81,12 +81,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
81 if (!opts) 81 if (!opts)
82 return 0; 82 return 0;
83 83
84 options = kstrdup(opts, GFP_KERNEL); 84 tmp_options = kstrdup(opts, GFP_KERNEL);
85 if (!options) { 85 if (!tmp_options) {
86 P9_DPRINTK(P9_DEBUG_ERROR, 86 P9_DPRINTK(P9_DEBUG_ERROR,
87 "failed to allocate copy of option string\n"); 87 "failed to allocate copy of option string\n");
88 return -ENOMEM; 88 return -ENOMEM;
89 } 89 }
90 options = tmp_options;
90 91
91 while ((p = strsep(&options, ",")) != NULL) { 92 while ((p = strsep(&options, ",")) != NULL) {
92 int token; 93 int token;
@@ -108,6 +109,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
108 break; 109 break;
109 case Opt_trans: 110 case Opt_trans:
110 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); 111 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
112 if(clnt->trans_mod == NULL) {
113 P9_DPRINTK(P9_DEBUG_ERROR,
114 "Could not find request transport: %s\n",
115 (char *) &args[0]);
116 ret = -EINVAL;
117 goto free_and_return;
118 }
111 break; 119 break;
112 case Opt_legacy: 120 case Opt_legacy:
113 clnt->dotu = 0; 121 clnt->dotu = 0;
@@ -117,7 +125,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
117 } 125 }
118 } 126 }
119 127
120 kfree(options); 128free_and_return:
129 kfree(tmp_options);
121 return ret; 130 return ret;
122} 131}
123 132
@@ -667,18 +676,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
667 clnt->trans = NULL; 676 clnt->trans = NULL;
668 spin_lock_init(&clnt->lock); 677 spin_lock_init(&clnt->lock);
669 INIT_LIST_HEAD(&clnt->fidlist); 678 INIT_LIST_HEAD(&clnt->fidlist);
670 clnt->fidpool = p9_idpool_create();
671 if (IS_ERR(clnt->fidpool)) {
672 err = PTR_ERR(clnt->fidpool);
673 clnt->fidpool = NULL;
674 goto error;
675 }
676 679
677 p9_tag_init(clnt); 680 p9_tag_init(clnt);
678 681
679 err = parse_opts(options, clnt); 682 err = parse_opts(options, clnt);
680 if (err < 0) 683 if (err < 0)
681 goto error; 684 goto free_client;
682 685
683 if (!clnt->trans_mod) 686 if (!clnt->trans_mod)
684 clnt->trans_mod = v9fs_get_default_trans(); 687 clnt->trans_mod = v9fs_get_default_trans();
@@ -687,7 +690,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
687 err = -EPROTONOSUPPORT; 690 err = -EPROTONOSUPPORT;
688 P9_DPRINTK(P9_DEBUG_ERROR, 691 P9_DPRINTK(P9_DEBUG_ERROR,
689 "No transport defined or default transport\n"); 692 "No transport defined or default transport\n");
690 goto error; 693 goto free_client;
694 }
695
696 clnt->fidpool = p9_idpool_create();
697 if (IS_ERR(clnt->fidpool)) {
698 err = PTR_ERR(clnt->fidpool);
699 clnt->fidpool = NULL;
700 goto put_trans;
691 } 701 }
692 702
693 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", 703 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n",
@@ -695,19 +705,25 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
695 705
696 err = clnt->trans_mod->create(clnt, dev_name, options); 706 err = clnt->trans_mod->create(clnt, dev_name, options);
697 if (err) 707 if (err)
698 goto error; 708 goto destroy_fidpool;
699 709
700 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) 710 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
701 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; 711 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
702 712
703 err = p9_client_version(clnt); 713 err = p9_client_version(clnt);
704 if (err) 714 if (err)
705 goto error; 715 goto close_trans;
706 716
707 return clnt; 717 return clnt;
708 718
709error: 719close_trans:
710 p9_client_destroy(clnt); 720 clnt->trans_mod->close(clnt);
721destroy_fidpool:
722 p9_idpool_destroy(clnt->fidpool);
723put_trans:
724 v9fs_put_trans(clnt->trans_mod);
725free_client:
726 kfree(clnt);
711 return ERR_PTR(err); 727 return ERR_PTR(err);
712} 728}
713EXPORT_SYMBOL(p9_client_create); 729EXPORT_SYMBOL(p9_client_create);
@@ -1214,10 +1230,11 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional)
1214{ 1230{
1215 int ret; 1231 int ret;
1216 1232
1233 /* NOTE: size shouldn't include its own length */
1217 /* size[2] type[2] dev[4] qid[13] */ 1234 /* size[2] type[2] dev[4] qid[13] */
1218 /* mode[4] atime[4] mtime[4] length[8]*/ 1235 /* mode[4] atime[4] mtime[4] length[8]*/
1219 /* name[s] uid[s] gid[s] muid[s] */ 1236 /* name[s] uid[s] gid[s] muid[s] */
1220 ret = 2+2+4+13+4+4+4+8+2+2+2+2; 1237 ret = 2+4+13+4+4+4+8+2+2+2+2;
1221 1238
1222 if (wst->name) 1239 if (wst->name)
1223 ret += strlen(wst->name); 1240 ret += strlen(wst->name);
@@ -1258,7 +1275,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1258 wst->name, wst->uid, wst->gid, wst->muid, wst->extension, 1275 wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
1259 wst->n_uid, wst->n_gid, wst->n_muid); 1276 wst->n_uid, wst->n_gid, wst->n_muid);
1260 1277
1261 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); 1278 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
1262 if (IS_ERR(req)) { 1279 if (IS_ERR(req)) {
1263 err = PTR_ERR(req); 1280 err = PTR_ERR(req);
1264 goto error; 1281 goto error;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index be1cb909d8c0..31d0b05582a9 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
714 char *p; 714 char *p;
715 substring_t args[MAX_OPT_ARGS]; 715 substring_t args[MAX_OPT_ARGS];
716 int option; 716 int option;
717 char *options; 717 char *options, *tmp_options;
718 int ret; 718 int ret;
719 719
720 opts->port = P9_PORT; 720 opts->port = P9_PORT;
@@ -724,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
724 if (!params) 724 if (!params)
725 return 0; 725 return 0;
726 726
727 options = kstrdup(params, GFP_KERNEL); 727 tmp_options = kstrdup(params, GFP_KERNEL);
728 if (!options) { 728 if (!tmp_options) {
729 P9_DPRINTK(P9_DEBUG_ERROR, 729 P9_DPRINTK(P9_DEBUG_ERROR,
730 "failed to allocate copy of option string\n"); 730 "failed to allocate copy of option string\n");
731 return -ENOMEM; 731 return -ENOMEM;
732 } 732 }
733 options = tmp_options;
733 734
734 while ((p = strsep(&options, ",")) != NULL) { 735 while ((p = strsep(&options, ",")) != NULL) {
735 int token; 736 int token;
@@ -760,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
760 continue; 761 continue;
761 } 762 }
762 } 763 }
763 kfree(options); 764
765 kfree(tmp_options);
764 return 0; 766 return 0;
765} 767}
766 768
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 65cb29db03f8..2c95a89c0f46 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
166 char *p; 166 char *p;
167 substring_t args[MAX_OPT_ARGS]; 167 substring_t args[MAX_OPT_ARGS];
168 int option; 168 int option;
169 char *options; 169 char *options, *tmp_options;
170 int ret; 170 int ret;
171 171
172 opts->port = P9_PORT; 172 opts->port = P9_PORT;
@@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
177 if (!params) 177 if (!params)
178 return 0; 178 return 0;
179 179
180 options = kstrdup(params, GFP_KERNEL); 180 tmp_options = kstrdup(params, GFP_KERNEL);
181 if (!options) { 181 if (!tmp_options) {
182 P9_DPRINTK(P9_DEBUG_ERROR, 182 P9_DPRINTK(P9_DEBUG_ERROR,
183 "failed to allocate copy of option string\n"); 183 "failed to allocate copy of option string\n");
184 return -ENOMEM; 184 return -ENOMEM;
185 } 185 }
186 options = tmp_options;
186 187
187 while ((p = strsep(&options, ",")) != NULL) { 188 while ((p = strsep(&options, ",")) != NULL) {
188 int token; 189 int token;
@@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
216 } 217 }
217 /* RQ must be at least as large as the SQ */ 218 /* RQ must be at least as large as the SQ */
218 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); 219 opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
219 kfree(options); 220 kfree(tmp_options);
220 return 0; 221 return 0;
221} 222}
222 223
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index ea1e3daabefe..cb50f4ae5eef 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -102,7 +102,8 @@ static void p9_virtio_close(struct p9_client *client)
102 struct virtio_chan *chan = client->trans; 102 struct virtio_chan *chan = client->trans;
103 103
104 mutex_lock(&virtio_9p_lock); 104 mutex_lock(&virtio_9p_lock);
105 chan->inuse = false; 105 if (chan)
106 chan->inuse = false;
106 mutex_unlock(&virtio_9p_lock); 107 mutex_unlock(&virtio_9p_lock);
107} 108}
108 109
@@ -311,6 +312,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
311 } 312 }
312 313
313 client->trans = (void *)chan; 314 client->trans = (void *)chan;
315 client->status = Connected;
314 chan->client = client; 316 chan->client = client;
315 317
316 return 0; 318 return 0;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b7c4224f4e7d..b10e3cdb08f8 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
377 377
378 if (acl->state == BT_CONNECTED && 378 if (acl->state == BT_CONNECTED &&
379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
380 acl->power_save = 1;
381 hci_conn_enter_active_mode(acl);
382
380 if (lmp_esco_capable(hdev)) 383 if (lmp_esco_capable(hdev))
381 hci_setup_sync(sco, acl->handle); 384 hci_setup_sync(sco, acl->handle);
382 else 385 else
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 28517bad796c..592da5c909c1 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1699,6 +1699,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
1699 break; 1699 break;
1700 1700
1701 case 0x1c: /* SCO interval rejected */ 1701 case 0x1c: /* SCO interval rejected */
1702 case 0x1a: /* Unsupported Remote Feature */
1702 case 0x1f: /* Unspecified error */ 1703 case 0x1f: /* Unspecified error */
1703 if (conn->out && conn->attempt < 2) { 1704 if (conn->out && conn->attempt < 2) {
1704 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 1705 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 18e7f5a43dc4..fc6ec1e72652 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -243,6 +243,39 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
243 input_sync(dev); 243 input_sync(dev);
244} 244}
245 245
246static int __hidp_send_ctrl_message(struct hidp_session *session,
247 unsigned char hdr, unsigned char *data, int size)
248{
249 struct sk_buff *skb;
250
251 BT_DBG("session %p data %p size %d", session, data, size);
252
253 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
254 BT_ERR("Can't allocate memory for new frame");
255 return -ENOMEM;
256 }
257
258 *skb_put(skb, 1) = hdr;
259 if (data && size > 0)
260 memcpy(skb_put(skb, size), data, size);
261
262 skb_queue_tail(&session->ctrl_transmit, skb);
263
264 return 0;
265}
266
267static inline int hidp_send_ctrl_message(struct hidp_session *session,
268 unsigned char hdr, unsigned char *data, int size)
269{
270 int err;
271
272 err = __hidp_send_ctrl_message(session, hdr, data, size);
273
274 hidp_schedule(session);
275
276 return err;
277}
278
246static int hidp_queue_report(struct hidp_session *session, 279static int hidp_queue_report(struct hidp_session *session,
247 unsigned char *data, int size) 280 unsigned char *data, int size)
248{ 281{
@@ -282,7 +315,9 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep
282 315
283static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count) 316static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count)
284{ 317{
285 if (hidp_queue_report(hid->driver_data, data, count)) 318 if (hidp_send_ctrl_message(hid->driver_data,
319 HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE,
320 data, count))
286 return -ENOMEM; 321 return -ENOMEM;
287 return count; 322 return count;
288} 323}
@@ -307,39 +342,6 @@ static inline void hidp_del_timer(struct hidp_session *session)
307 del_timer(&session->timer); 342 del_timer(&session->timer);
308} 343}
309 344
310static int __hidp_send_ctrl_message(struct hidp_session *session,
311 unsigned char hdr, unsigned char *data, int size)
312{
313 struct sk_buff *skb;
314
315 BT_DBG("session %p data %p size %d", session, data, size);
316
317 if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
318 BT_ERR("Can't allocate memory for new frame");
319 return -ENOMEM;
320 }
321
322 *skb_put(skb, 1) = hdr;
323 if (data && size > 0)
324 memcpy(skb_put(skb, size), data, size);
325
326 skb_queue_tail(&session->ctrl_transmit, skb);
327
328 return 0;
329}
330
331static inline int hidp_send_ctrl_message(struct hidp_session *session,
332 unsigned char hdr, unsigned char *data, int size)
333{
334 int err;
335
336 err = __hidp_send_ctrl_message(session, hdr, data, size);
337
338 hidp_schedule(session);
339
340 return err;
341}
342
343static void hidp_process_handshake(struct hidp_session *session, 345static void hidp_process_handshake(struct hidp_session *session,
344 unsigned char param) 346 unsigned char param)
345{ 347{
@@ -701,29 +703,9 @@ static void hidp_close(struct hid_device *hid)
701static int hidp_parse(struct hid_device *hid) 703static int hidp_parse(struct hid_device *hid)
702{ 704{
703 struct hidp_session *session = hid->driver_data; 705 struct hidp_session *session = hid->driver_data;
704 struct hidp_connadd_req *req = session->req;
705 unsigned char *buf;
706 int ret;
707
708 buf = kmalloc(req->rd_size, GFP_KERNEL);
709 if (!buf)
710 return -ENOMEM;
711
712 if (copy_from_user(buf, req->rd_data, req->rd_size)) {
713 kfree(buf);
714 return -EFAULT;
715 }
716
717 ret = hid_parse_report(session->hid, buf, req->rd_size);
718
719 kfree(buf);
720
721 if (ret)
722 return ret;
723 706
724 session->req = NULL; 707 return hid_parse_report(session->hid, session->rd_data,
725 708 session->rd_size);
726 return 0;
727} 709}
728 710
729static int hidp_start(struct hid_device *hid) 711static int hidp_start(struct hid_device *hid)
@@ -768,12 +750,24 @@ static int hidp_setup_hid(struct hidp_session *session,
768 bdaddr_t src, dst; 750 bdaddr_t src, dst;
769 int err; 751 int err;
770 752
753 session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
754 if (!session->rd_data)
755 return -ENOMEM;
756
757 if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
758 err = -EFAULT;
759 goto fault;
760 }
761 session->rd_size = req->rd_size;
762
771 hid = hid_allocate_device(); 763 hid = hid_allocate_device();
772 if (IS_ERR(hid)) 764 if (IS_ERR(hid)) {
773 return PTR_ERR(hid); 765 err = PTR_ERR(hid);
766 goto fault;
767 }
774 768
775 session->hid = hid; 769 session->hid = hid;
776 session->req = req; 770
777 hid->driver_data = session; 771 hid->driver_data = session;
778 772
779 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); 773 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
@@ -804,6 +798,10 @@ failed:
804 hid_destroy_device(hid); 798 hid_destroy_device(hid);
805 session->hid = NULL; 799 session->hid = NULL;
806 800
801fault:
802 kfree(session->rd_data);
803 session->rd_data = NULL;
804
807 return err; 805 return err;
808} 806}
809 807
@@ -898,6 +896,9 @@ unlink:
898 session->hid = NULL; 896 session->hid = NULL;
899 } 897 }
900 898
899 kfree(session->rd_data);
900 session->rd_data = NULL;
901
901purge: 902purge:
902 skb_queue_purge(&session->ctrl_transmit); 903 skb_queue_purge(&session->ctrl_transmit);
903 skb_queue_purge(&session->intr_transmit); 904 skb_queue_purge(&session->intr_transmit);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index faf3d74c3586..a4e215d50c10 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -154,7 +154,9 @@ struct hidp_session {
154 struct sk_buff_head ctrl_transmit; 154 struct sk_buff_head ctrl_transmit;
155 struct sk_buff_head intr_transmit; 155 struct sk_buff_head intr_transmit;
156 156
157 struct hidp_connadd_req *req; 157 /* Report descriptor */
158 __u8 *rd_data;
159 uint rd_size;
158}; 160};
159 161
160static inline void hidp_schedule(struct hidp_session *session) 162static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 1120cf14a548..400efa26ddba 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1368,7 +1368,6 @@ static int l2cap_ertm_send(struct sock *sk)
1368 1368
1369 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) && 1369 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1370 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) { 1370 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1371 tx_skb = skb_clone(skb, GFP_ATOMIC);
1372 1371
1373 if (pi->remote_max_tx && 1372 if (pi->remote_max_tx &&
1374 bt_cb(skb)->retries == pi->remote_max_tx) { 1373 bt_cb(skb)->retries == pi->remote_max_tx) {
@@ -1376,6 +1375,8 @@ static int l2cap_ertm_send(struct sock *sk)
1376 break; 1375 break;
1377 } 1376 }
1378 1377
1378 tx_skb = skb_clone(skb, GFP_ATOMIC);
1379
1379 bt_cb(skb)->retries++; 1380 bt_cb(skb)->retries++;
1380 1381
1381 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1382 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
@@ -3518,7 +3519,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3518 struct l2cap_pinfo *pi; 3519 struct l2cap_pinfo *pi;
3519 u16 control, len; 3520 u16 control, len;
3520 u8 tx_seq; 3521 u8 tx_seq;
3521 int err;
3522 3522
3523 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 3523 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3524 if (!sk) { 3524 if (!sk) {
@@ -3570,13 +3570,11 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3570 goto drop; 3570 goto drop;
3571 3571
3572 if (__is_iframe(control)) 3572 if (__is_iframe(control))
3573 err = l2cap_data_channel_iframe(sk, control, skb); 3573 l2cap_data_channel_iframe(sk, control, skb);
3574 else 3574 else
3575 err = l2cap_data_channel_sframe(sk, control, skb); 3575 l2cap_data_channel_sframe(sk, control, skb);
3576 3576
3577 if (!err) 3577 goto done;
3578 goto done;
3579 break;
3580 3578
3581 case L2CAP_MODE_STREAMING: 3579 case L2CAP_MODE_STREAMING:
3582 control = get_unaligned_le16(skb->data); 3580 control = get_unaligned_le16(skb->data);
@@ -3602,7 +3600,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3602 else 3600 else
3603 pi->expected_tx_seq = tx_seq + 1; 3601 pi->expected_tx_seq = tx_seq + 1;
3604 3602
3605 err = l2cap_sar_reassembly_sdu(sk, skb, control); 3603 l2cap_sar_reassembly_sdu(sk, skb, control);
3606 3604
3607 goto done; 3605 goto done;
3608 3606
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index fc5ee3296e22..89f4a59eb82b 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -252,7 +252,6 @@ static void rfcomm_session_timeout(unsigned long arg)
252 BT_DBG("session %p state %ld", s, s->state); 252 BT_DBG("session %p state %ld", s, s->state);
253 253
254 set_bit(RFCOMM_TIMED_OUT, &s->flags); 254 set_bit(RFCOMM_TIMED_OUT, &s->flags);
255 rfcomm_session_put(s);
256 rfcomm_schedule(RFCOMM_SCHED_TIMEO); 255 rfcomm_schedule(RFCOMM_SCHED_TIMEO);
257} 256}
258 257
@@ -1151,7 +1150,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1151 break; 1150 break;
1152 1151
1153 case BT_DISCONN: 1152 case BT_DISCONN:
1154 rfcomm_session_put(s); 1153 /* When socket is closed and we are not RFCOMM
1154 * initiator rfcomm_process_rx already calls
1155 * rfcomm_session_put() */
1156 if (s->sock->sk->sk_state != BT_CLOSED)
1157 rfcomm_session_put(s);
1155 break; 1158 break;
1156 } 1159 }
1157 } 1160 }
@@ -1920,6 +1923,7 @@ static inline void rfcomm_process_sessions(void)
1920 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { 1923 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
1921 s->state = BT_DISCONN; 1924 s->state = BT_DISCONN;
1922 rfcomm_send_disc(s, 0); 1925 rfcomm_send_disc(s, 0);
1926 rfcomm_session_put(s);
1923 continue; 1927 continue;
1924 } 1928 }
1925 1929
diff --git a/net/core/dst.c b/net/core/dst.c
index 57bc4d5b8d08..cb1b3488b739 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <net/net_namespace.h> 19#include <net/net_namespace.h>
20#include <linux/sched.h>
20 21
21#include <net/dst.h> 22#include <net/dst.h>
22 23
@@ -79,6 +80,7 @@ loop:
79 while ((dst = next) != NULL) { 80 while ((dst = next) != NULL) {
80 next = dst->next; 81 next = dst->next;
81 prefetch(&next->next); 82 prefetch(&next->next);
83 cond_resched();
82 if (likely(atomic_read(&dst->__refcnt))) { 84 if (likely(atomic_read(&dst->__refcnt))) {
83 last->next = dst; 85 last->next = dst;
84 last = dst; 86 last = dst;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index de0c2c726420..2e692afdc55d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3524,6 +3524,7 @@ static int pktgen_thread_worker(void *arg)
3524 wait_event_interruptible_timeout(t->queue, 3524 wait_event_interruptible_timeout(t->queue,
3525 t->control != 0, 3525 t->control != 0,
3526 HZ/10); 3526 HZ/10);
3527 try_to_freeze();
3527 continue; 3528 continue;
3528 } 3529 }
3529 3530
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 57dfb9c8c4f2..ff16e9df1969 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -83,7 +83,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
83 va_list args; 83 va_list args;
84 84
85 va_start(args, fmt); 85 va_start(args, fmt);
86 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 86 vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
87 va_end(args); 87 va_end(args);
88 88
89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, 89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 269958bf7fe9..6df6f8ac9636 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -19,7 +19,9 @@
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21
22#define CCID_MAX 255 22/* maximum value for a CCID (RFC 4340, 19.5) */
23#define CCID_MAX 255
24#define CCID_SLAB_NAME_LENGTH 32
23 25
24struct tcp_info; 26struct tcp_info;
25 27
@@ -49,8 +51,8 @@ struct ccid_operations {
49 const char *ccid_name; 51 const char *ccid_name;
50 struct kmem_cache *ccid_hc_rx_slab, 52 struct kmem_cache *ccid_hc_rx_slab,
51 *ccid_hc_tx_slab; 53 *ccid_hc_tx_slab;
52 char ccid_hc_rx_slab_name[32]; 54 char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH];
53 char ccid_hc_tx_slab_name[32]; 55 char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH];
54 __u32 ccid_hc_rx_obj_size, 56 __u32 ccid_hc_rx_obj_size,
55 ccid_hc_tx_obj_size; 57 ccid_hc_tx_obj_size;
56 /* Interface Routines */ 58 /* Interface Routines */
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index bace1d8cbcfd..f5b3464f1242 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -161,8 +161,8 @@ static __init int dccpprobe_init(void)
161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
162 goto err0; 162 goto err0;
163 163
164 ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), 164 try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
165 "dccp"); 165 "dccp");
166 if (ret) 166 if (ret)
167 goto err1; 167 goto err1;
168 168
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 06632762ba5f..90203e1b9187 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
925 if (t && !IS_ERR(t)) { 925 if (t && !IS_ERR(t)) {
926 struct arpt_getinfo info; 926 struct arpt_getinfo info;
927 const struct xt_table_info *private = t->private; 927 const struct xt_table_info *private = t->private;
928
929#ifdef CONFIG_COMPAT 928#ifdef CONFIG_COMPAT
929 struct xt_table_info tmp;
930
930 if (compat) { 931 if (compat) {
931 struct xt_table_info tmp;
932 ret = compat_table_info(private, &tmp); 932 ret = compat_table_info(private, &tmp);
933 xt_compat_flush_offsets(NFPROTO_ARP); 933 xt_compat_flush_offsets(NFPROTO_ARP);
934 private = &tmp; 934 private = &tmp;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 572330a552ef..3ce53cf13d5a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1132 if (t && !IS_ERR(t)) { 1132 if (t && !IS_ERR(t)) {
1133 struct ipt_getinfo info; 1133 struct ipt_getinfo info;
1134 const struct xt_table_info *private = t->private; 1134 const struct xt_table_info *private = t->private;
1135
1136#ifdef CONFIG_COMPAT 1135#ifdef CONFIG_COMPAT
1136 struct xt_table_info tmp;
1137
1137 if (compat) { 1138 if (compat) {
1138 struct xt_table_info tmp;
1139 ret = compat_table_info(private, &tmp); 1139 ret = compat_table_info(private, &tmp);
1140 xt_compat_flush_offsets(AF_INET); 1140 xt_compat_flush_offsets(AF_INET);
1141 private = &tmp; 1141 private = &tmp;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d171b123a656..d1ea38a7c490 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = {
210 }, 210 },
211 { 211 {
212 .procname = "ip_conntrack_buckets", 212 .procname = "ip_conntrack_buckets",
213 .data = &nf_conntrack_htable_size, 213 .data = &init_net.ct.htable_size,
214 .maxlen = sizeof(unsigned int), 214 .maxlen = sizeof(unsigned int),
215 .mode = 0444, 215 .mode = 0444,
216 .proc_handler = proc_dointvec, 216 .proc_handler = proc_dointvec,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 8668a3defda6..2fb7b76da94f 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
32 struct hlist_nulls_node *n; 32 struct hlist_nulls_node *n;
33 33
34 for (st->bucket = 0; 34 for (st->bucket = 0;
35 st->bucket < nf_conntrack_htable_size; 35 st->bucket < net->ct.htable_size;
36 st->bucket++) { 36 st->bucket++) {
37 n = rcu_dereference(net->ct.hash[st->bucket].first); 37 n = rcu_dereference(net->ct.hash[st->bucket].first);
38 if (!is_a_nulls(n)) 38 if (!is_a_nulls(n))
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
50 head = rcu_dereference(head->next); 50 head = rcu_dereference(head->next);
51 while (is_a_nulls(head)) { 51 while (is_a_nulls(head)) {
52 if (likely(get_nulls_value(head) == st->bucket)) { 52 if (likely(get_nulls_value(head) == st->bucket)) {
53 if (++st->bucket >= nf_conntrack_htable_size) 53 if (++st->bucket >= net->ct.htable_size)
54 return NULL; 54 return NULL;
55 } 55 }
56 head = rcu_dereference(net->ct.hash[st->bucket].first); 56 head = rcu_dereference(net->ct.hash[st->bucket].first);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index fe1a64479dd0..26066a2327ad 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock);
35 35
36static struct nf_conntrack_l3proto *l3proto __read_mostly; 36static struct nf_conntrack_l3proto *l3proto __read_mostly;
37 37
38/* Calculated at init based on memory size */
39static unsigned int nf_nat_htable_size __read_mostly;
40
41#define MAX_IP_NAT_PROTO 256 38#define MAX_IP_NAT_PROTO 256
42static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] 39static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
43 __read_mostly; 40 __read_mostly;
@@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
72 69
73/* We keep an extra hash for each conntrack, for fast searching. */ 70/* We keep an extra hash for each conntrack, for fast searching. */
74static inline unsigned int 71static inline unsigned int
75hash_by_src(const struct nf_conntrack_tuple *tuple) 72hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
76{ 73{
77 unsigned int hash; 74 unsigned int hash;
78 75
@@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple)
80 hash = jhash_3words((__force u32)tuple->src.u3.ip, 77 hash = jhash_3words((__force u32)tuple->src.u3.ip,
81 (__force u32)tuple->src.u.all, 78 (__force u32)tuple->src.u.all,
82 tuple->dst.protonum, 0); 79 tuple->dst.protonum, 0);
83 return ((u64)hash * nf_nat_htable_size) >> 32; 80 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
84} 81}
85 82
86/* Is this tuple already taken? (not by us) */ 83/* Is this tuple already taken? (not by us) */
@@ -147,7 +144,7 @@ find_appropriate_src(struct net *net,
147 struct nf_conntrack_tuple *result, 144 struct nf_conntrack_tuple *result,
148 const struct nf_nat_range *range) 145 const struct nf_nat_range *range)
149{ 146{
150 unsigned int h = hash_by_src(tuple); 147 unsigned int h = hash_by_src(net, tuple);
151 const struct nf_conn_nat *nat; 148 const struct nf_conn_nat *nat;
152 const struct nf_conn *ct; 149 const struct nf_conn *ct;
153 const struct hlist_node *n; 150 const struct hlist_node *n;
@@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct,
330 if (have_to_hash) { 327 if (have_to_hash) {
331 unsigned int srchash; 328 unsigned int srchash;
332 329
333 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 330 srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
334 spin_lock_bh(&nf_nat_lock); 331 spin_lock_bh(&nf_nat_lock);
335 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 332 /* nf_conntrack_alter_reply might re-allocate exntension aera */
336 nat = nfct_nat(ct); 333 nat = nfct_nat(ct);
@@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
679 676
680static int __net_init nf_nat_net_init(struct net *net) 677static int __net_init nf_nat_net_init(struct net *net)
681{ 678{
682 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 679 /* Leave them the same for the moment. */
683 &net->ipv4.nat_vmalloced, 0); 680 net->ipv4.nat_htable_size = net->ct.htable_size;
681 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
682 &net->ipv4.nat_vmalloced, 0);
684 if (!net->ipv4.nat_bysource) 683 if (!net->ipv4.nat_bysource)
685 return -ENOMEM; 684 return -ENOMEM;
686 return 0; 685 return 0;
@@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
703 nf_ct_iterate_cleanup(net, &clean_nat, NULL); 702 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
704 synchronize_rcu(); 703 synchronize_rcu();
705 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, 704 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
706 nf_nat_htable_size); 705 net->ipv4.nat_htable_size);
707} 706}
708 707
709static struct pernet_operations nf_nat_net_ops = { 708static struct pernet_operations nf_nat_net_ops = {
@@ -724,9 +723,6 @@ static int __init nf_nat_init(void)
724 return ret; 723 return ret;
725 } 724 }
726 725
727 /* Leave them the same for the moment. */
728 nf_nat_htable_size = nf_conntrack_htable_size;
729
730 ret = register_pernet_subsys(&nf_nat_net_ops); 726 ret = register_pernet_subsys(&nf_nat_net_ops);
731 if (ret < 0) 727 if (ret < 0)
732 goto cleanup_extend; 728 goto cleanup_extend;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 480d7f8c9802..8a7e0f52e177 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1164 if (t && !IS_ERR(t)) { 1164 if (t && !IS_ERR(t)) {
1165 struct ip6t_getinfo info; 1165 struct ip6t_getinfo info;
1166 const struct xt_table_info *private = t->private; 1166 const struct xt_table_info *private = t->private;
1167
1168#ifdef CONFIG_COMPAT 1167#ifdef CONFIG_COMPAT
1168 struct xt_table_info tmp;
1169
1169 if (compat) { 1170 if (compat) {
1170 struct xt_table_info tmp;
1171 ret = compat_table_info(private, &tmp); 1171 ret = compat_table_info(private, &tmp);
1172 xt_compat_flush_offsets(AF_INET6); 1172 xt_compat_flush_offsets(AF_INET6);
1173 private = &tmp; 1173 private = &tmp;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 312c20adc83f..624a54832a7c 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -63,6 +63,7 @@ struct nf_ct_frag6_queue
63 struct inet_frag_queue q; 63 struct inet_frag_queue q;
64 64
65 __be32 id; /* fragment id */ 65 __be32 id; /* fragment id */
66 u32 user;
66 struct in6_addr saddr; 67 struct in6_addr saddr;
67 struct in6_addr daddr; 68 struct in6_addr daddr;
68 69
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 156020d138b5..6b3602de359a 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -698,15 +698,18 @@ dev_irnet_ioctl(
698 698
699 /* Query PPP channel and unit number */ 699 /* Query PPP channel and unit number */
700 case PPPIOCGCHAN: 700 case PPPIOCGCHAN:
701 lock_kernel();
701 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), 702 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
702 (int __user *)argp)) 703 (int __user *)argp))
703 err = 0; 704 err = 0;
705 unlock_kernel();
704 break; 706 break;
705 case PPPIOCGUNIT: 707 case PPPIOCGUNIT:
706 lock_kernel(); 708 lock_kernel();
707 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), 709 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
708 (int __user *)argp)) 710 (int __user *)argp))
709 err = 0; 711 err = 0;
712 unlock_kernel();
710 break; 713 break;
711 714
712 /* All these ioctls can be passed both directly and from ppp_generic, 715 /* All these ioctls can be passed both directly and from ppp_generic,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 76fa6fef6473..539f43bc97db 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3794,9 +3794,9 @@ static struct pernet_operations pfkey_net_ops = {
3794 3794
3795static void __exit ipsec_pfkey_exit(void) 3795static void __exit ipsec_pfkey_exit(void)
3796{ 3796{
3797 unregister_pernet_subsys(&pfkey_net_ops);
3798 xfrm_unregister_km(&pfkeyv2_mgr); 3797 xfrm_unregister_km(&pfkeyv2_mgr);
3799 sock_unregister(PF_KEY); 3798 sock_unregister(PF_KEY);
3799 unregister_pernet_subsys(&pfkey_net_ops);
3800 proto_unregister(&key_proto); 3800 proto_unregister(&key_proto);
3801} 3801}
3802 3802
@@ -3807,21 +3807,22 @@ static int __init ipsec_pfkey_init(void)
3807 if (err != 0) 3807 if (err != 0)
3808 goto out; 3808 goto out;
3809 3809
3810 err = sock_register(&pfkey_family_ops); 3810 err = register_pernet_subsys(&pfkey_net_ops);
3811 if (err != 0) 3811 if (err != 0)
3812 goto out_unregister_key_proto; 3812 goto out_unregister_key_proto;
3813 err = sock_register(&pfkey_family_ops);
3814 if (err != 0)
3815 goto out_unregister_pernet;
3813 err = xfrm_register_km(&pfkeyv2_mgr); 3816 err = xfrm_register_km(&pfkeyv2_mgr);
3814 if (err != 0) 3817 if (err != 0)
3815 goto out_sock_unregister; 3818 goto out_sock_unregister;
3816 err = register_pernet_subsys(&pfkey_net_ops);
3817 if (err != 0)
3818 goto out_xfrm_unregister_km;
3819out: 3819out:
3820 return err; 3820 return err;
3821out_xfrm_unregister_km: 3821
3822 xfrm_unregister_km(&pfkeyv2_mgr);
3823out_sock_unregister: 3822out_sock_unregister:
3824 sock_unregister(PF_KEY); 3823 sock_unregister(PF_KEY);
3824out_unregister_pernet:
3825 unregister_pernet_subsys(&pfkey_net_ops);
3825out_unregister_key_proto: 3826out_unregister_key_proto:
3826 proto_unregister(&key_proto); 3827 proto_unregister(&key_proto);
3827 goto out; 3828 goto out;
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index ee94ea0c67e9..da8497ef7063 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -680,7 +680,7 @@ TRACE_EVENT(drv_ampdu_action,
680 __entry->ret = ret; 680 __entry->ret = ret;
681 __entry->action = action; 681 __entry->action = action;
682 __entry->tid = tid; 682 __entry->tid = tid;
683 __entry->ssn = *ssn; 683 __entry->ssn = ssn ? *ssn : 0;
684 ), 684 ),
685 685
686 TP_printk( 686 TP_printk(
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0e98c3282d42..4d79e3c1616c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -30,6 +30,7 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/socket.h> 31#include <linux/socket.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/nsproxy.h>
33#include <linux/rculist_nulls.h> 34#include <linux/rculist_nulls.h>
34 35
35#include <net/netfilter/nf_conntrack.h> 36#include <net/netfilter/nf_conntrack.h>
@@ -63,8 +64,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
63struct nf_conn nf_conntrack_untracked __read_mostly; 64struct nf_conn nf_conntrack_untracked __read_mostly;
64EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 65EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
65 66
66static struct kmem_cache *nf_conntrack_cachep __read_mostly;
67
68static int nf_conntrack_hash_rnd_initted; 67static int nf_conntrack_hash_rnd_initted;
69static unsigned int nf_conntrack_hash_rnd; 68static unsigned int nf_conntrack_hash_rnd;
70 69
@@ -86,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
86 return ((u64)h * size) >> 32; 85 return ((u64)h * size) >> 32;
87} 86}
88 87
89static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 88static inline u_int32_t hash_conntrack(const struct net *net,
89 const struct nf_conntrack_tuple *tuple)
90{ 90{
91 return __hash_conntrack(tuple, nf_conntrack_htable_size, 91 return __hash_conntrack(tuple, net->ct.htable_size,
92 nf_conntrack_hash_rnd); 92 nf_conntrack_hash_rnd);
93} 93}
94 94
@@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
296{ 296{
297 struct nf_conntrack_tuple_hash *h; 297 struct nf_conntrack_tuple_hash *h;
298 struct hlist_nulls_node *n; 298 struct hlist_nulls_node *n;
299 unsigned int hash = hash_conntrack(tuple); 299 unsigned int hash = hash_conntrack(net, tuple);
300 300
301 /* Disable BHs the entire time since we normally need to disable them 301 /* Disable BHs the entire time since we normally need to disable them
302 * at least once for the stats anyway. 302 * at least once for the stats anyway.
@@ -366,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
366 366
367void nf_conntrack_hash_insert(struct nf_conn *ct) 367void nf_conntrack_hash_insert(struct nf_conn *ct)
368{ 368{
369 struct net *net = nf_ct_net(ct);
369 unsigned int hash, repl_hash; 370 unsigned int hash, repl_hash;
370 371
371 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 372 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
372 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 373 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
373 374
374 __nf_conntrack_hash_insert(ct, hash, repl_hash); 375 __nf_conntrack_hash_insert(ct, hash, repl_hash);
375} 376}
@@ -397,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
397 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 398 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
398 return NF_ACCEPT; 399 return NF_ACCEPT;
399 400
400 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 401 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
401 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 402 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
402 403
403 /* We're not in hash table, and we refuse to set up related 404 /* We're not in hash table, and we refuse to set up related
404 connections for unconfirmed conns. But packet copies and 405 connections for unconfirmed conns. But packet copies and
@@ -468,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
468 struct net *net = nf_ct_net(ignored_conntrack); 469 struct net *net = nf_ct_net(ignored_conntrack);
469 struct nf_conntrack_tuple_hash *h; 470 struct nf_conntrack_tuple_hash *h;
470 struct hlist_nulls_node *n; 471 struct hlist_nulls_node *n;
471 unsigned int hash = hash_conntrack(tuple); 472 unsigned int hash = hash_conntrack(net, tuple);
472 473
473 /* Disable BHs the entire time since we need to disable them at 474 /* Disable BHs the entire time since we need to disable them at
474 * least once for the stats anyway. 475 * least once for the stats anyway.
@@ -503,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
503 int dropped = 0; 504 int dropped = 0;
504 505
505 rcu_read_lock(); 506 rcu_read_lock();
506 for (i = 0; i < nf_conntrack_htable_size; i++) { 507 for (i = 0; i < net->ct.htable_size; i++) {
507 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], 508 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
508 hnnode) { 509 hnnode) {
509 tmp = nf_ct_tuplehash_to_ctrack(h); 510 tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -523,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
523 if (cnt >= NF_CT_EVICTION_RANGE) 524 if (cnt >= NF_CT_EVICTION_RANGE)
524 break; 525 break;
525 526
526 hash = (hash + 1) % nf_conntrack_htable_size; 527 hash = (hash + 1) % net->ct.htable_size;
527 } 528 }
528 rcu_read_unlock(); 529 rcu_read_unlock();
529 530
@@ -557,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
557 558
558 if (nf_conntrack_max && 559 if (nf_conntrack_max &&
559 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 560 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
560 unsigned int hash = hash_conntrack(orig); 561 unsigned int hash = hash_conntrack(net, orig);
561 if (!early_drop(net, hash)) { 562 if (!early_drop(net, hash)) {
562 atomic_dec(&net->ct.count); 563 atomic_dec(&net->ct.count);
563 if (net_ratelimit()) 564 if (net_ratelimit())
@@ -572,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
572 * Do not use kmem_cache_zalloc(), as this cache uses 573 * Do not use kmem_cache_zalloc(), as this cache uses
573 * SLAB_DESTROY_BY_RCU. 574 * SLAB_DESTROY_BY_RCU.
574 */ 575 */
575 ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); 576 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
576 if (ct == NULL) { 577 if (ct == NULL) {
577 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 578 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
578 atomic_dec(&net->ct.count); 579 atomic_dec(&net->ct.count);
@@ -611,7 +612,7 @@ void nf_conntrack_free(struct nf_conn *ct)
611 nf_ct_ext_destroy(ct); 612 nf_ct_ext_destroy(ct);
612 atomic_dec(&net->ct.count); 613 atomic_dec(&net->ct.count);
613 nf_ct_ext_free(ct); 614 nf_ct_ext_free(ct);
614 kmem_cache_free(nf_conntrack_cachep, ct); 615 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
615} 616}
616EXPORT_SYMBOL_GPL(nf_conntrack_free); 617EXPORT_SYMBOL_GPL(nf_conntrack_free);
617 618
@@ -1014,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1014 struct hlist_nulls_node *n; 1015 struct hlist_nulls_node *n;
1015 1016
1016 spin_lock_bh(&nf_conntrack_lock); 1017 spin_lock_bh(&nf_conntrack_lock);
1017 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 1018 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1018 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 1019 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1019 ct = nf_ct_tuplehash_to_ctrack(h); 1020 ct = nf_ct_tuplehash_to_ctrack(h);
1020 if (iter(ct, data)) 1021 if (iter(ct, data))
@@ -1113,9 +1114,12 @@ static void nf_ct_release_dying_list(struct net *net)
1113 1114
1114static void nf_conntrack_cleanup_init_net(void) 1115static void nf_conntrack_cleanup_init_net(void)
1115{ 1116{
1117 /* wait until all references to nf_conntrack_untracked are dropped */
1118 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1119 schedule();
1120
1116 nf_conntrack_helper_fini(); 1121 nf_conntrack_helper_fini();
1117 nf_conntrack_proto_fini(); 1122 nf_conntrack_proto_fini();
1118 kmem_cache_destroy(nf_conntrack_cachep);
1119} 1123}
1120 1124
1121static void nf_conntrack_cleanup_net(struct net *net) 1125static void nf_conntrack_cleanup_net(struct net *net)
@@ -1127,15 +1131,14 @@ static void nf_conntrack_cleanup_net(struct net *net)
1127 schedule(); 1131 schedule();
1128 goto i_see_dead_people; 1132 goto i_see_dead_people;
1129 } 1133 }
1130 /* wait until all references to nf_conntrack_untracked are dropped */
1131 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1132 schedule();
1133 1134
1134 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1135 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1135 nf_conntrack_htable_size); 1136 net->ct.htable_size);
1136 nf_conntrack_ecache_fini(net); 1137 nf_conntrack_ecache_fini(net);
1137 nf_conntrack_acct_fini(net); 1138 nf_conntrack_acct_fini(net);
1138 nf_conntrack_expect_fini(net); 1139 nf_conntrack_expect_fini(net);
1140 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1141 kfree(net->ct.slabname);
1139 free_percpu(net->ct.stat); 1142 free_percpu(net->ct.stat);
1140} 1143}
1141 1144
@@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1190{ 1193{
1191 int i, bucket, vmalloced, old_vmalloced; 1194 int i, bucket, vmalloced, old_vmalloced;
1192 unsigned int hashsize, old_size; 1195 unsigned int hashsize, old_size;
1193 int rnd;
1194 struct hlist_nulls_head *hash, *old_hash; 1196 struct hlist_nulls_head *hash, *old_hash;
1195 struct nf_conntrack_tuple_hash *h; 1197 struct nf_conntrack_tuple_hash *h;
1196 1198
1199 if (current->nsproxy->net_ns != &init_net)
1200 return -EOPNOTSUPP;
1201
1197 /* On boot, we can set this without any fancy locking. */ 1202 /* On boot, we can set this without any fancy locking. */
1198 if (!nf_conntrack_htable_size) 1203 if (!nf_conntrack_htable_size)
1199 return param_set_uint(val, kp); 1204 return param_set_uint(val, kp);
@@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1206 if (!hash) 1211 if (!hash)
1207 return -ENOMEM; 1212 return -ENOMEM;
1208 1213
1209 /* We have to rehahs for the new table anyway, so we also can
1210 * use a newrandom seed */
1211 get_random_bytes(&rnd, sizeof(rnd));
1212
1213 /* Lookups in the old hash might happen in parallel, which means we 1214 /* Lookups in the old hash might happen in parallel, which means we
1214 * might get false negatives during connection lookup. New connections 1215 * might get false negatives during connection lookup. New connections
1215 * created because of a false negative won't make it into the hash 1216 * created because of a false negative won't make it into the hash
1216 * though since that required taking the lock. 1217 * though since that required taking the lock.
1217 */ 1218 */
1218 spin_lock_bh(&nf_conntrack_lock); 1219 spin_lock_bh(&nf_conntrack_lock);
1219 for (i = 0; i < nf_conntrack_htable_size; i++) { 1220 for (i = 0; i < init_net.ct.htable_size; i++) {
1220 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1221 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1221 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1222 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1222 struct nf_conntrack_tuple_hash, hnnode); 1223 struct nf_conntrack_tuple_hash, hnnode);
1223 hlist_nulls_del_rcu(&h->hnnode); 1224 hlist_nulls_del_rcu(&h->hnnode);
1224 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1225 bucket = __hash_conntrack(&h->tuple, hashsize,
1226 nf_conntrack_hash_rnd);
1225 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1227 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1226 } 1228 }
1227 } 1229 }
1228 old_size = nf_conntrack_htable_size; 1230 old_size = init_net.ct.htable_size;
1229 old_vmalloced = init_net.ct.hash_vmalloc; 1231 old_vmalloced = init_net.ct.hash_vmalloc;
1230 old_hash = init_net.ct.hash; 1232 old_hash = init_net.ct.hash;
1231 1233
1232 nf_conntrack_htable_size = hashsize; 1234 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1233 init_net.ct.hash_vmalloc = vmalloced; 1235 init_net.ct.hash_vmalloc = vmalloced;
1234 init_net.ct.hash = hash; 1236 init_net.ct.hash = hash;
1235 nf_conntrack_hash_rnd = rnd;
1236 spin_unlock_bh(&nf_conntrack_lock); 1237 spin_unlock_bh(&nf_conntrack_lock);
1237 1238
1238 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1239 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
@@ -1271,15 +1272,6 @@ static int nf_conntrack_init_init_net(void)
1271 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1272 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1272 nf_conntrack_max); 1273 nf_conntrack_max);
1273 1274
1274 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1275 sizeof(struct nf_conn),
1276 0, SLAB_DESTROY_BY_RCU, NULL);
1277 if (!nf_conntrack_cachep) {
1278 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1279 ret = -ENOMEM;
1280 goto err_cache;
1281 }
1282
1283 ret = nf_conntrack_proto_init(); 1275 ret = nf_conntrack_proto_init();
1284 if (ret < 0) 1276 if (ret < 0)
1285 goto err_proto; 1277 goto err_proto;
@@ -1288,13 +1280,19 @@ static int nf_conntrack_init_init_net(void)
1288 if (ret < 0) 1280 if (ret < 0)
1289 goto err_helper; 1281 goto err_helper;
1290 1282
1283 /* Set up fake conntrack: to never be deleted, not in any hashes */
1284#ifdef CONFIG_NET_NS
1285 nf_conntrack_untracked.ct_net = &init_net;
1286#endif
1287 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1288 /* - and look it like as a confirmed connection */
1289 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1290
1291 return 0; 1291 return 0;
1292 1292
1293err_helper: 1293err_helper:
1294 nf_conntrack_proto_fini(); 1294 nf_conntrack_proto_fini();
1295err_proto: 1295err_proto:
1296 kmem_cache_destroy(nf_conntrack_cachep);
1297err_cache:
1298 return ret; 1296 return ret;
1299} 1297}
1300 1298
@@ -1316,7 +1314,24 @@ static int nf_conntrack_init_net(struct net *net)
1316 ret = -ENOMEM; 1314 ret = -ENOMEM;
1317 goto err_stat; 1315 goto err_stat;
1318 } 1316 }
1319 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1317
1318 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1319 if (!net->ct.slabname) {
1320 ret = -ENOMEM;
1321 goto err_slabname;
1322 }
1323
1324 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1325 sizeof(struct nf_conn), 0,
1326 SLAB_DESTROY_BY_RCU, NULL);
1327 if (!net->ct.nf_conntrack_cachep) {
1328 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1329 ret = -ENOMEM;
1330 goto err_cache;
1331 }
1332
1333 net->ct.htable_size = nf_conntrack_htable_size;
1334 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
1320 &net->ct.hash_vmalloc, 1); 1335 &net->ct.hash_vmalloc, 1);
1321 if (!net->ct.hash) { 1336 if (!net->ct.hash) {
1322 ret = -ENOMEM; 1337 ret = -ENOMEM;
@@ -1333,15 +1348,6 @@ static int nf_conntrack_init_net(struct net *net)
1333 if (ret < 0) 1348 if (ret < 0)
1334 goto err_ecache; 1349 goto err_ecache;
1335 1350
1336 /* Set up fake conntrack:
1337 - to never be deleted, not in any hashes */
1338#ifdef CONFIG_NET_NS
1339 nf_conntrack_untracked.ct_net = &init_net;
1340#endif
1341 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1342 /* - and look it like as a confirmed connection */
1343 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1344
1345 return 0; 1351 return 0;
1346 1352
1347err_ecache: 1353err_ecache:
@@ -1350,8 +1356,12 @@ err_acct:
1350 nf_conntrack_expect_fini(net); 1356 nf_conntrack_expect_fini(net);
1351err_expect: 1357err_expect:
1352 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1358 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1353 nf_conntrack_htable_size); 1359 net->ct.htable_size);
1354err_hash: 1360err_hash:
1361 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1362err_cache:
1363 kfree(net->ct.slabname);
1364err_slabname:
1355 free_percpu(net->ct.stat); 1365 free_percpu(net->ct.stat);
1356err_stat: 1366err_stat:
1357 return ret; 1367 return ret;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index fdf5d2a1d9b4..2f25ff610982 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net)
569#endif /* CONFIG_PROC_FS */ 569#endif /* CONFIG_PROC_FS */
570} 570}
571 571
572module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); 572module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
573 573
574int nf_conntrack_expect_init(struct net *net) 574int nf_conntrack_expect_init(struct net *net)
575{ 575{
@@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net)
577 577
578 if (net_eq(net, &init_net)) { 578 if (net_eq(net, &init_net)) {
579 if (!nf_ct_expect_hsize) { 579 if (!nf_ct_expect_hsize) {
580 nf_ct_expect_hsize = nf_conntrack_htable_size / 256; 580 nf_ct_expect_hsize = net->ct.htable_size / 256;
581 if (!nf_ct_expect_hsize) 581 if (!nf_ct_expect_hsize)
582 nf_ct_expect_hsize = 1; 582 nf_ct_expect_hsize = 1;
583 } 583 }
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 65c2a7bc3afc..4b1a56bd074c 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
192 /* Get rid of expecteds, set helpers to NULL. */ 192 /* Get rid of expecteds, set helpers to NULL. */
193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) 193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
194 unhelp(h, me); 194 unhelp(h, me);
195 for (i = 0; i < nf_conntrack_htable_size; i++) { 195 for (i = 0; i < net->ct.htable_size; i++) {
196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
197 unhelp(h, me); 197 unhelp(h, me);
198 } 198 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 59d8064eb522..0ffe689dfe97 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
594 594
595 rcu_read_lock(); 595 rcu_read_lock();
596 last = (struct nf_conn *)cb->args[1]; 596 last = (struct nf_conn *)cb->args[1];
597 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 597 for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
598restart: 598restart:
599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
600 hnnode) { 600 hnnode) {
@@ -1437,8 +1437,9 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb,
1437 struct nlattr *nest_parms; 1437 struct nlattr *nest_parms;
1438 1438
1439 memset(&m, 0xFF, sizeof(m)); 1439 memset(&m, 0xFF, sizeof(m));
1440 m.src.u.all = mask->src.u.all;
1441 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); 1440 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
1441 m.src.u.all = mask->src.u.all;
1442 m.dst.protonum = tuple->dst.protonum;
1442 1443
1443 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED); 1444 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
1444 if (!nest_parms) 1445 if (!nest_parms)
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 4b572163784b..023966b569bf 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -376,7 +376,7 @@ int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
376 dptr += hdr->len; 376 dptr += hdr->len;
377 else if (hdr->cname && limit - dptr >= hdr->clen + 1 && 377 else if (hdr->cname && limit - dptr >= hdr->clen + 1 &&
378 strnicmp(dptr, hdr->cname, hdr->clen) == 0 && 378 strnicmp(dptr, hdr->cname, hdr->clen) == 0 &&
379 !isalpha(*(dptr + hdr->clen + 1))) 379 !isalpha(*(dptr + hdr->clen)))
380 dptr += hdr->clen; 380 dptr += hdr->clen;
381 else 381 else
382 continue; 382 continue;
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 028aba667ef7..e310f1561bb2 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
51 struct hlist_nulls_node *n; 51 struct hlist_nulls_node *n;
52 52
53 for (st->bucket = 0; 53 for (st->bucket = 0;
54 st->bucket < nf_conntrack_htable_size; 54 st->bucket < net->ct.htable_size;
55 st->bucket++) { 55 st->bucket++) {
56 n = rcu_dereference(net->ct.hash[st->bucket].first); 56 n = rcu_dereference(net->ct.hash[st->bucket].first);
57 if (!is_a_nulls(n)) 57 if (!is_a_nulls(n))
@@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
69 head = rcu_dereference(head->next); 69 head = rcu_dereference(head->next);
70 while (is_a_nulls(head)) { 70 while (is_a_nulls(head)) {
71 if (likely(get_nulls_value(head) == st->bucket)) { 71 if (likely(get_nulls_value(head) == st->bucket)) {
72 if (++st->bucket >= nf_conntrack_htable_size) 72 if (++st->bucket >= net->ct.htable_size)
73 return NULL; 73 return NULL;
74 } 74 }
75 head = rcu_dereference(net->ct.hash[st->bucket].first); 75 head = rcu_dereference(net->ct.hash[st->bucket].first);
@@ -355,7 +355,7 @@ static ctl_table nf_ct_sysctl_table[] = {
355 }, 355 },
356 { 356 {
357 .procname = "nf_conntrack_buckets", 357 .procname = "nf_conntrack_buckets",
358 .data = &nf_conntrack_htable_size, 358 .data = &init_net.ct.htable_size,
359 .maxlen = sizeof(unsigned int), 359 .maxlen = sizeof(unsigned int),
360 .mode = 0444, 360 .mode = 0444,
361 .proc_handler = proc_dointvec, 361 .proc_handler = proc_dointvec,
@@ -421,6 +421,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
421 goto out_kmemdup; 421 goto out_kmemdup;
422 422
423 table[1].data = &net->ct.count; 423 table[1].data = &net->ct.count;
424 table[2].data = &net->ct.htable_size;
424 table[3].data = &net->ct.sysctl_checksum; 425 table[3].data = &net->ct.sysctl_checksum;
425 table[4].data = &net->ct.sysctl_log_invalid; 426 table[4].data = &net->ct.sysctl_log_invalid;
426 427
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a4957bf2ca60..4c5972ba8c78 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -455,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
455 if (nl_table[protocol].registered && 455 if (nl_table[protocol].registered &&
456 try_module_get(nl_table[protocol].module)) 456 try_module_get(nl_table[protocol].module))
457 module = nl_table[protocol].module; 457 module = nl_table[protocol].module;
458 else
459 err = -EPROTONOSUPPORT;
458 cb_mutex = nl_table[protocol].cb_mutex; 460 cb_mutex = nl_table[protocol].cb_mutex;
459 netlink_unlock_table(); 461 netlink_unlock_table();
460 462
463 if (err < 0)
464 goto out;
465
461 err = __netlink_create(net, sock, cb_mutex, protocol); 466 err = __netlink_create(net, sock, cb_mutex, protocol);
462 if (err < 0) 467 if (err < 0)
463 goto out_module; 468 goto out_module;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 929218a47620..21f9c7678aa3 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -433,7 +433,7 @@ config NET_ACT_POLICE
433 module. 433 module.
434 434
435 To compile this code as a module, choose M here: the 435 To compile this code as a module, choose M here: the
436 module will be called police. 436 module will be called act_police.
437 437
438config NET_ACT_GACT 438config NET_ACT_GACT
439 tristate "Generic actions" 439 tristate "Generic actions"
@@ -443,7 +443,7 @@ config NET_ACT_GACT
443 accepting packets. 443 accepting packets.
444 444
445 To compile this code as a module, choose M here: the 445 To compile this code as a module, choose M here: the
446 module will be called gact. 446 module will be called act_gact.
447 447
448config GACT_PROB 448config GACT_PROB
449 bool "Probability support" 449 bool "Probability support"
@@ -459,7 +459,7 @@ config NET_ACT_MIRRED
459 other devices. 459 other devices.
460 460
461 To compile this code as a module, choose M here: the 461 To compile this code as a module, choose M here: the
462 module will be called mirred. 462 module will be called act_mirred.
463 463
464config NET_ACT_IPT 464config NET_ACT_IPT
465 tristate "IPtables targets" 465 tristate "IPtables targets"
@@ -469,7 +469,7 @@ config NET_ACT_IPT
469 classification. 469 classification.
470 470
471 To compile this code as a module, choose M here: the 471 To compile this code as a module, choose M here: the
472 module will be called ipt. 472 module will be called act_ipt.
473 473
474config NET_ACT_NAT 474config NET_ACT_NAT
475 tristate "Stateless NAT" 475 tristate "Stateless NAT"
@@ -479,7 +479,7 @@ config NET_ACT_NAT
479 netfilter for NAT unless you know what you are doing. 479 netfilter for NAT unless you know what you are doing.
480 480
481 To compile this code as a module, choose M here: the 481 To compile this code as a module, choose M here: the
482 module will be called nat. 482 module will be called act_nat.
483 483
484config NET_ACT_PEDIT 484config NET_ACT_PEDIT
485 tristate "Packet Editing" 485 tristate "Packet Editing"
@@ -488,7 +488,7 @@ config NET_ACT_PEDIT
488 Say Y here if you want to mangle the content of packets. 488 Say Y here if you want to mangle the content of packets.
489 489
490 To compile this code as a module, choose M here: the 490 To compile this code as a module, choose M here: the
491 module will be called pedit. 491 module will be called act_pedit.
492 492
493config NET_ACT_SIMP 493config NET_ACT_SIMP
494 tristate "Simple Example (Debug)" 494 tristate "Simple Example (Debug)"
@@ -502,7 +502,7 @@ config NET_ACT_SIMP
502 If unsure, say N. 502 If unsure, say N.
503 503
504 To compile this code as a module, choose M here: the 504 To compile this code as a module, choose M here: the
505 module will be called simple. 505 module will be called act_simple.
506 506
507config NET_ACT_SKBEDIT 507config NET_ACT_SKBEDIT
508 tristate "SKB Editing" 508 tristate "SKB Editing"
@@ -513,7 +513,7 @@ config NET_ACT_SKBEDIT
513 If unsure, say N. 513 If unsure, say N.
514 514
515 To compile this code as a module, choose M here: the 515 To compile this code as a module, choose M here: the
516 module will be called skbedit. 516 module will be called act_skbedit.
517 517
518config NET_CLS_IND 518config NET_CLS_IND
519 bool "Incoming device classification" 519 bool "Incoming device classification"
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 090f24839700..2f3230db7ffb 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -74,8 +74,8 @@ my %VCS_cmds;
74my %VCS_cmds_git = ( 74my %VCS_cmds_git = (
75 "execute_cmd" => \&git_execute_cmd, 75 "execute_cmd" => \&git_execute_cmd,
76 "available" => '(which("git") ne "") && (-d ".git")', 76 "available" => '(which("git") ne "") && (-d ".git")',
77 "find_signers_cmd" => "git log --since=\$email_git_since -- \$file", 77 "find_signers_cmd" => "git log --no-color --since=\$email_git_since -- \$file",
78 "find_commit_signers_cmd" => "git log -1 \$commit", 78 "find_commit_signers_cmd" => "git log --no-color -1 \$commit",
79 "blame_range_cmd" => "git blame -l -L \$diff_start,+\$diff_length \$file", 79 "blame_range_cmd" => "git blame -l -L \$diff_start,+\$diff_length \$file",
80 "blame_file_cmd" => "git blame -l \$file", 80 "blame_file_cmd" => "git blame -l \$file",
81 "commit_pattern" => "^commit [0-9a-f]{40,40}", 81 "commit_pattern" => "^commit [0-9a-f]{40,40}",
diff --git a/scripts/markup_oops.pl b/scripts/markup_oops.pl
index ce3e40b01e48..e950f9cde019 100644
--- a/scripts/markup_oops.pl
+++ b/scripts/markup_oops.pl
@@ -158,7 +158,7 @@ while (<STDIN>) {
158 $function = $1; 158 $function = $1;
159 $func_offset = $2; 159 $func_offset = $2;
160 } 160 }
161 if ($line =~ /RIP: 0010:\[\<[0-9a-f]+\>\] \[\<[0-9a-f]+\>\] ([a-zA-Z0-9\_]+)\+(0x[0-9a-f]+)\/0x[a-f0-9]/) { 161 if ($line =~ /RIP: 0010:\[\<[0-9a-f]+\>\] \[\<[0-9a-f]+\>\] ([a-zA-Z0-9\_]+)\+0x([0-9a-f]+)\/0x[a-f0-9]/) {
162 $function = $1; 162 $function = $1;
163 $func_offset = $2; 163 $func_offset = $2;
164 } 164 }
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index c41afe6639a0..47fb65d1fcbd 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -65,7 +65,6 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
65 const char *cause, int result, int info); 65 const char *cause, int result, int info);
66 66
67/* Internal IMA function definitions */ 67/* Internal IMA function definitions */
68void ima_iintcache_init(void);
69int ima_init(void); 68int ima_init(void);
70void ima_cleanup(void); 69void ima_cleanup(void);
71int ima_fs_init(void); 70int ima_fs_init(void);
@@ -131,7 +130,7 @@ void iint_free(struct kref *kref);
131void iint_rcu_free(struct rcu_head *rcu); 130void iint_rcu_free(struct rcu_head *rcu);
132 131
133/* IMA policy related functions */ 132/* IMA policy related functions */
134enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK }; 133enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
135 134
136int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask); 135int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask);
137void ima_init_policy(void); 136void ima_init_policy(void);
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 3cd58b60afd2..2a5e0bcf3887 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -95,12 +95,12 @@ err_out:
95 * ima_must_measure - measure decision based on policy. 95 * ima_must_measure - measure decision based on policy.
96 * @inode: pointer to inode to measure 96 * @inode: pointer to inode to measure
97 * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) 97 * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
98 * @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP) 98 * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP)
99 * 99 *
100 * The policy is defined in terms of keypairs: 100 * The policy is defined in terms of keypairs:
101 * subj=, obj=, type=, func=, mask=, fsmagic= 101 * subj=, obj=, type=, func=, mask=, fsmagic=
102 * subj,obj, and type: are LSM specific. 102 * subj,obj, and type: are LSM specific.
103 * func: PATH_CHECK | BPRM_CHECK | FILE_MMAP 103 * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP
104 * mask: contains the permission mask 104 * mask: contains the permission mask
105 * fsmagic: hex value 105 * fsmagic: hex value
106 * 106 *
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index fa592ff1ac1c..0d83edcfc402 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -52,9 +52,6 @@ int ima_inode_alloc(struct inode *inode)
52 struct ima_iint_cache *iint = NULL; 52 struct ima_iint_cache *iint = NULL;
53 int rc = 0; 53 int rc = 0;
54 54
55 if (!ima_initialized)
56 return 0;
57
58 iint = kmem_cache_alloc(iint_cache, GFP_NOFS); 55 iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
59 if (!iint) 56 if (!iint)
60 return -ENOMEM; 57 return -ENOMEM;
@@ -118,8 +115,6 @@ void ima_inode_free(struct inode *inode)
118{ 115{
119 struct ima_iint_cache *iint; 116 struct ima_iint_cache *iint;
120 117
121 if (!ima_initialized)
122 return;
123 spin_lock(&ima_iint_lock); 118 spin_lock(&ima_iint_lock);
124 iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode); 119 iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode);
125 spin_unlock(&ima_iint_lock); 120 spin_unlock(&ima_iint_lock);
@@ -141,9 +136,11 @@ static void init_once(void *foo)
141 kref_set(&iint->refcount, 1); 136 kref_set(&iint->refcount, 1);
142} 137}
143 138
144void __init ima_iintcache_init(void) 139static int __init ima_iintcache_init(void)
145{ 140{
146 iint_cache = 141 iint_cache =
147 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, 142 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
148 SLAB_PANIC, init_once); 143 SLAB_PANIC, init_once);
144 return 0;
149} 145}
146security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index a89f44d5e030..294b005d6520 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -14,7 +14,7 @@
14 * 14 *
15 * File: ima_main.c 15 * File: ima_main.c
16 * implements the IMA hooks: ima_bprm_check, ima_file_mmap, 16 * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
17 * and ima_path_check. 17 * and ima_file_check.
18 */ 18 */
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/file.h> 20#include <linux/file.h>
@@ -84,6 +84,36 @@ out:
84 return found; 84 return found;
85} 85}
86 86
87/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
88 *
89 * When opening a file for read, if the file is already open for write,
90 * the file could change, resulting in a file measurement error.
91 *
92 * Opening a file for write, if the file is already open for read, results
93 * in a time of measure, time of use (ToMToU) error.
94 *
95 * In either case invalidate the PCR.
96 */
97enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
98static void ima_read_write_check(enum iint_pcr_error error,
99 struct ima_iint_cache *iint,
100 struct inode *inode,
101 const unsigned char *filename)
102{
103 switch (error) {
104 case TOMTOU:
105 if (iint->readcount > 0)
106 ima_add_violation(inode, filename, "invalid_pcr",
107 "ToMToU");
108 break;
109 case OPEN_WRITERS:
110 if (iint->writecount > 0)
111 ima_add_violation(inode, filename, "invalid_pcr",
112 "open_writers");
113 break;
114 }
115}
116
87/* 117/*
88 * Update the counts given an fmode_t 118 * Update the counts given an fmode_t
89 */ 119 */
@@ -99,6 +129,47 @@ static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode)
99} 129}
100 130
101/* 131/*
132 * ima_counts_get - increment file counts
133 *
134 * Maintain read/write counters for all files, but only
135 * invalidate the PCR for measured files:
136 * - Opening a file for write when already open for read,
137 * results in a time of measure, time of use (ToMToU) error.
138 * - Opening a file for read when already open for write,
139 * could result in a file measurement error.
140 *
141 */
142void ima_counts_get(struct file *file)
143{
144 struct dentry *dentry = file->f_path.dentry;
145 struct inode *inode = dentry->d_inode;
146 fmode_t mode = file->f_mode;
147 struct ima_iint_cache *iint;
148 int rc;
149
150 if (!ima_initialized || !S_ISREG(inode->i_mode))
151 return;
152 iint = ima_iint_find_get(inode);
153 if (!iint)
154 return;
155 mutex_lock(&iint->mutex);
156 rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
157 if (rc < 0)
158 goto out;
159
160 if (mode & FMODE_WRITE) {
161 ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name);
162 goto out;
163 }
164 ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name);
165out:
166 ima_inc_counts(iint, file->f_mode);
167 mutex_unlock(&iint->mutex);
168
169 kref_put(&iint->refcount, iint_free);
170}
171
172/*
102 * Decrement ima counts 173 * Decrement ima counts
103 */ 174 */
104static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode, 175static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode,
@@ -153,123 +224,6 @@ void ima_file_free(struct file *file)
153 kref_put(&iint->refcount, iint_free); 224 kref_put(&iint->refcount, iint_free);
154} 225}
155 226
156/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
157 *
158 * When opening a file for read, if the file is already open for write,
159 * the file could change, resulting in a file measurement error.
160 *
161 * Opening a file for write, if the file is already open for read, results
162 * in a time of measure, time of use (ToMToU) error.
163 *
164 * In either case invalidate the PCR.
165 */
166enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
167static void ima_read_write_check(enum iint_pcr_error error,
168 struct ima_iint_cache *iint,
169 struct inode *inode,
170 const unsigned char *filename)
171{
172 switch (error) {
173 case TOMTOU:
174 if (iint->readcount > 0)
175 ima_add_violation(inode, filename, "invalid_pcr",
176 "ToMToU");
177 break;
178 case OPEN_WRITERS:
179 if (iint->writecount > 0)
180 ima_add_violation(inode, filename, "invalid_pcr",
181 "open_writers");
182 break;
183 }
184}
185
186static int get_path_measurement(struct ima_iint_cache *iint, struct file *file,
187 const unsigned char *filename)
188{
189 int rc = 0;
190
191 ima_inc_counts(iint, file->f_mode);
192
193 rc = ima_collect_measurement(iint, file);
194 if (!rc)
195 ima_store_measurement(iint, file, filename);
196 return rc;
197}
198
199/**
200 * ima_path_check - based on policy, collect/store measurement.
201 * @path: contains a pointer to the path to be measured
202 * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
203 *
204 * Measure the file being open for readonly, based on the
205 * ima_must_measure() policy decision.
206 *
207 * Keep read/write counters for all files, but only
208 * invalidate the PCR for measured files:
209 * - Opening a file for write when already open for read,
210 * results in a time of measure, time of use (ToMToU) error.
211 * - Opening a file for read when already open for write,
212 * could result in a file measurement error.
213 *
214 * Always return 0 and audit dentry_open failures.
215 * (Return code will be based upon measurement appraisal.)
216 */
217int ima_path_check(struct path *path, int mask)
218{
219 struct inode *inode = path->dentry->d_inode;
220 struct ima_iint_cache *iint;
221 struct file *file = NULL;
222 int rc;
223
224 if (!ima_initialized || !S_ISREG(inode->i_mode))
225 return 0;
226 iint = ima_iint_find_get(inode);
227 if (!iint)
228 return 0;
229
230 mutex_lock(&iint->mutex);
231
232 rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK);
233 if (rc < 0)
234 goto out;
235
236 if ((mask & MAY_WRITE) || (mask == 0))
237 ima_read_write_check(TOMTOU, iint, inode,
238 path->dentry->d_name.name);
239
240 if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ)
241 goto out;
242
243 ima_read_write_check(OPEN_WRITERS, iint, inode,
244 path->dentry->d_name.name);
245 if (!(iint->flags & IMA_MEASURED)) {
246 struct dentry *dentry = dget(path->dentry);
247 struct vfsmount *mnt = mntget(path->mnt);
248
249 file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE,
250 current_cred());
251 if (IS_ERR(file)) {
252 int audit_info = 0;
253
254 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
255 dentry->d_name.name,
256 "add_measurement",
257 "dentry_open failed",
258 1, audit_info);
259 file = NULL;
260 goto out;
261 }
262 rc = get_path_measurement(iint, file, dentry->d_name.name);
263 }
264out:
265 mutex_unlock(&iint->mutex);
266 if (file)
267 fput(file);
268 kref_put(&iint->refcount, iint_free);
269 return 0;
270}
271EXPORT_SYMBOL_GPL(ima_path_check);
272
273static int process_measurement(struct file *file, const unsigned char *filename, 227static int process_measurement(struct file *file, const unsigned char *filename,
274 int mask, int function) 228 int mask, int function)
275{ 229{
@@ -297,33 +251,6 @@ out:
297 return rc; 251 return rc;
298} 252}
299 253
300/*
301 * ima_counts_get - increment file counts
302 *
303 * - for IPC shm and shmat file.
304 * - for nfsd exported files.
305 *
306 * Increment the counts for these files to prevent unnecessary
307 * imbalance messages.
308 */
309void ima_counts_get(struct file *file)
310{
311 struct inode *inode = file->f_dentry->d_inode;
312 struct ima_iint_cache *iint;
313
314 if (!ima_initialized || !S_ISREG(inode->i_mode))
315 return;
316 iint = ima_iint_find_get(inode);
317 if (!iint)
318 return;
319 mutex_lock(&iint->mutex);
320 ima_inc_counts(iint, file->f_mode);
321 mutex_unlock(&iint->mutex);
322
323 kref_put(&iint->refcount, iint_free);
324}
325EXPORT_SYMBOL_GPL(ima_counts_get);
326
327/** 254/**
328 * ima_file_mmap - based on policy, collect/store measurement. 255 * ima_file_mmap - based on policy, collect/store measurement.
329 * @file: pointer to the file to be measured (May be NULL) 256 * @file: pointer to the file to be measured (May be NULL)
@@ -369,11 +296,31 @@ int ima_bprm_check(struct linux_binprm *bprm)
369 return 0; 296 return 0;
370} 297}
371 298
299/**
300 * ima_path_check - based on policy, collect/store measurement.
301 * @file: pointer to the file to be measured
302 * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
303 *
304 * Measure files based on the ima_must_measure() policy decision.
305 *
306 * Always return 0 and audit dentry_open failures.
307 * (Return code will be based upon measurement appraisal.)
308 */
309int ima_file_check(struct file *file, int mask)
310{
311 int rc;
312
313 rc = process_measurement(file, file->f_dentry->d_name.name,
314 mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
315 FILE_CHECK);
316 return 0;
317}
318EXPORT_SYMBOL_GPL(ima_file_check);
319
372static int __init init_ima(void) 320static int __init init_ima(void)
373{ 321{
374 int error; 322 int error;
375 323
376 ima_iintcache_init();
377 error = ima_init(); 324 error = ima_init();
378 ima_initialized = 1; 325 ima_initialized = 1;
379 return error; 326 return error;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index e1278399b345..4759d0f99335 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -67,7 +67,7 @@ static struct ima_measure_rule_entry default_rules[] = {
67 .flags = IMA_FUNC | IMA_MASK}, 67 .flags = IMA_FUNC | IMA_MASK},
68 {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, 68 {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC,
69 .flags = IMA_FUNC | IMA_MASK}, 69 .flags = IMA_FUNC | IMA_MASK},
70 {.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0, 70 {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0,
71 .flags = IMA_FUNC | IMA_MASK | IMA_UID}, 71 .flags = IMA_FUNC | IMA_MASK | IMA_UID},
72}; 72};
73 73
@@ -282,8 +282,11 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
282 break; 282 break;
283 case Opt_func: 283 case Opt_func:
284 audit_log_format(ab, "func=%s ", args[0].from); 284 audit_log_format(ab, "func=%s ", args[0].from);
285 if (strcmp(args[0].from, "PATH_CHECK") == 0) 285 if (strcmp(args[0].from, "FILE_CHECK") == 0)
286 entry->func = PATH_CHECK; 286 entry->func = FILE_CHECK;
287 /* PATH_CHECK is for backwards compat */
288 else if (strcmp(args[0].from, "PATH_CHECK") == 0)
289 entry->func = FILE_CHECK;
287 else if (strcmp(args[0].from, "FILE_MMAP") == 0) 290 else if (strcmp(args[0].from, "FILE_MMAP") == 0)
288 entry->func = FILE_MMAP; 291 entry->func = FILE_MMAP;
289 else if (strcmp(args[0].from, "BPRM_CHECK") == 0) 292 else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
diff --git a/security/security.c b/security/security.c
index 24e060be9fa5..122b748d0f4c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -666,8 +666,6 @@ int security_file_alloc(struct file *file)
666void security_file_free(struct file *file) 666void security_file_free(struct file *file)
667{ 667{
668 security_ops->file_free_security(file); 668 security_ops->file_free_security(file);
669 if (file->f_dentry)
670 ima_file_free(file);
671} 669}
672 670
673int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 671int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index cb65bd0dd35b..459c1f62783b 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -166,18 +166,7 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
166 166
167static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index) 167static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
168{ 168{
169 struct ct_vm *vm; 169 return atc->vm->get_ptp_phys(atc->vm, index);
170 void *kvirt_addr;
171 unsigned long phys_addr;
172
173 vm = atc->vm;
174 kvirt_addr = vm->get_ptp_virt(vm, index);
175 if (kvirt_addr == NULL)
176 phys_addr = (~0UL);
177 else
178 phys_addr = virt_to_phys(kvirt_addr);
179
180 return phys_addr;
181} 170}
182 171
183static unsigned int convert_format(snd_pcm_format_t snd_format) 172static unsigned int convert_format(snd_pcm_format_t snd_format)
@@ -1669,7 +1658,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
1669 } 1658 }
1670 1659
1671 /* Set up device virtual memory management object */ 1660 /* Set up device virtual memory management object */
1672 err = ct_vm_create(&atc->vm); 1661 err = ct_vm_create(&atc->vm, pci);
1673 if (err < 0) 1662 if (err < 0)
1674 goto error1; 1663 goto error1;
1675 1664
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
index 6b78752e9503..65da6e466f80 100644
--- a/sound/pci/ctxfi/ctvmem.c
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -138,7 +138,7 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
138 return NULL; 138 return NULL;
139 } 139 }
140 140
141 ptp = vm->ptp[0]; 141 ptp = (unsigned long *)vm->ptp[0].area;
142 pte_start = (block->addr >> CT_PAGE_SHIFT); 142 pte_start = (block->addr >> CT_PAGE_SHIFT);
143 pages = block->size >> CT_PAGE_SHIFT; 143 pages = block->size >> CT_PAGE_SHIFT;
144 for (i = 0; i < pages; i++) { 144 for (i = 0; i < pages; i++) {
@@ -158,25 +158,25 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
158} 158}
159 159
160/* * 160/* *
161 * return the host (kmalloced) addr of the @index-th device 161 * return the host physical addr of the @index-th device
162 * page talbe page on success, or NULL on failure. 162 * page table page on success, or ~0UL on failure.
163 * The first returned NULL indicates the termination. 163 * The first returned ~0UL indicates the termination.
164 * */ 164 * */
165static void * 165static dma_addr_t
166ct_get_ptp_virt(struct ct_vm *vm, int index) 166ct_get_ptp_phys(struct ct_vm *vm, int index)
167{ 167{
168 void *addr; 168 dma_addr_t addr;
169 169
170 addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index]; 170 addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
171 171
172 return addr; 172 return addr;
173} 173}
174 174
175int ct_vm_create(struct ct_vm **rvm) 175int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
176{ 176{
177 struct ct_vm *vm; 177 struct ct_vm *vm;
178 struct ct_vm_block *block; 178 struct ct_vm_block *block;
179 int i; 179 int i, err = 0;
180 180
181 *rvm = NULL; 181 *rvm = NULL;
182 182
@@ -188,23 +188,21 @@ int ct_vm_create(struct ct_vm **rvm)
188 188
189 /* Allocate page table pages */ 189 /* Allocate page table pages */
190 for (i = 0; i < CT_PTP_NUM; i++) { 190 for (i = 0; i < CT_PTP_NUM; i++) {
191 vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); 191 err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
192 if (!vm->ptp[i]) 192 snd_dma_pci_data(pci),
193 PAGE_SIZE, &vm->ptp[i]);
194 if (err < 0)
193 break; 195 break;
194 } 196 }
195 if (!i) { 197 if (err < 0) {
196 /* no page table pages are allocated */ 198 /* no page table pages are allocated */
197 kfree(vm); 199 ct_vm_destroy(vm);
198 return -ENOMEM; 200 return -ENOMEM;
199 } 201 }
200 vm->size = CT_ADDRS_PER_PAGE * i; 202 vm->size = CT_ADDRS_PER_PAGE * i;
201 /* Initialise remaining ptps */
202 for (; i < CT_PTP_NUM; i++)
203 vm->ptp[i] = NULL;
204
205 vm->map = ct_vm_map; 203 vm->map = ct_vm_map;
206 vm->unmap = ct_vm_unmap; 204 vm->unmap = ct_vm_unmap;
207 vm->get_ptp_virt = ct_get_ptp_virt; 205 vm->get_ptp_phys = ct_get_ptp_phys;
208 INIT_LIST_HEAD(&vm->unused); 206 INIT_LIST_HEAD(&vm->unused);
209 INIT_LIST_HEAD(&vm->used); 207 INIT_LIST_HEAD(&vm->used);
210 block = kzalloc(sizeof(*block), GFP_KERNEL); 208 block = kzalloc(sizeof(*block), GFP_KERNEL);
@@ -242,7 +240,7 @@ void ct_vm_destroy(struct ct_vm *vm)
242 240
243 /* free allocated page table pages */ 241 /* free allocated page table pages */
244 for (i = 0; i < CT_PTP_NUM; i++) 242 for (i = 0; i < CT_PTP_NUM; i++)
245 kfree(vm->ptp[i]); 243 snd_dma_free_pages(&vm->ptp[i]);
246 244
247 vm->size = 0; 245 vm->size = 0;
248 246
diff --git a/sound/pci/ctxfi/ctvmem.h b/sound/pci/ctxfi/ctvmem.h
index 01e4fd0386a3..b23adfca4de6 100644
--- a/sound/pci/ctxfi/ctvmem.h
+++ b/sound/pci/ctxfi/ctvmem.h
@@ -22,6 +22,8 @@
22 22
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/pci.h>
26#include <sound/memalloc.h>
25 27
26/* The chip can handle the page table of 4k pages 28/* The chip can handle the page table of 4k pages
27 * (emu20k1 can handle even 8k pages, but we don't use it right now) 29 * (emu20k1 can handle even 8k pages, but we don't use it right now)
@@ -41,7 +43,7 @@ struct snd_pcm_substream;
41 43
42/* Virtual memory management object for card device */ 44/* Virtual memory management object for card device */
43struct ct_vm { 45struct ct_vm {
44 void *ptp[CT_PTP_NUM]; /* Device page table pages */ 46 struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */
45 unsigned int size; /* Available addr space in bytes */ 47 unsigned int size; /* Available addr space in bytes */
46 struct list_head unused; /* List of unused blocks */ 48 struct list_head unused; /* List of unused blocks */
47 struct list_head used; /* List of used blocks */ 49 struct list_head used; /* List of used blocks */
@@ -52,10 +54,10 @@ struct ct_vm {
52 int size); 54 int size);
53 /* Unmap device logical addr area. */ 55 /* Unmap device logical addr area. */
54 void (*unmap)(struct ct_vm *, struct ct_vm_block *block); 56 void (*unmap)(struct ct_vm *, struct ct_vm_block *block);
55 void *(*get_ptp_virt)(struct ct_vm *vm, int index); 57 dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index);
56}; 58};
57 59
58int ct_vm_create(struct ct_vm **rvm); 60int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci);
59void ct_vm_destroy(struct ct_vm *vm); 61void ct_vm_destroy(struct ct_vm *vm);
60 62
61#endif /* CTVMEM_H */ 63#endif /* CTVMEM_H */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 565de38a3fc7..b8faa6dc5abe 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -426,6 +426,7 @@ struct azx {
426 426
427 /* flags */ 427 /* flags */
428 int position_fix; 428 int position_fix;
429 int poll_count;
429 unsigned int running :1; 430 unsigned int running :1;
430 unsigned int initialized :1; 431 unsigned int initialized :1;
431 unsigned int single_cmd :1; 432 unsigned int single_cmd :1;
@@ -506,7 +507,7 @@ static char *driver_short_names[] __devinitdata = {
506#define get_azx_dev(substream) (substream->runtime->private_data) 507#define get_azx_dev(substream) (substream->runtime->private_data)
507 508
508static int azx_acquire_irq(struct azx *chip, int do_disconnect); 509static int azx_acquire_irq(struct azx *chip, int do_disconnect);
509 510static int azx_send_cmd(struct hda_bus *bus, unsigned int val);
510/* 511/*
511 * Interface for HD codec 512 * Interface for HD codec
512 */ 513 */
@@ -664,11 +665,12 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
664{ 665{
665 struct azx *chip = bus->private_data; 666 struct azx *chip = bus->private_data;
666 unsigned long timeout; 667 unsigned long timeout;
668 int do_poll = 0;
667 669
668 again: 670 again:
669 timeout = jiffies + msecs_to_jiffies(1000); 671 timeout = jiffies + msecs_to_jiffies(1000);
670 for (;;) { 672 for (;;) {
671 if (chip->polling_mode) { 673 if (chip->polling_mode || do_poll) {
672 spin_lock_irq(&chip->reg_lock); 674 spin_lock_irq(&chip->reg_lock);
673 azx_update_rirb(chip); 675 azx_update_rirb(chip);
674 spin_unlock_irq(&chip->reg_lock); 676 spin_unlock_irq(&chip->reg_lock);
@@ -676,6 +678,9 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
676 if (!chip->rirb.cmds[addr]) { 678 if (!chip->rirb.cmds[addr]) {
677 smp_rmb(); 679 smp_rmb();
678 bus->rirb_error = 0; 680 bus->rirb_error = 0;
681
682 if (!do_poll)
683 chip->poll_count = 0;
679 return chip->rirb.res[addr]; /* the last value */ 684 return chip->rirb.res[addr]; /* the last value */
680 } 685 }
681 if (time_after(jiffies, timeout)) 686 if (time_after(jiffies, timeout))
@@ -688,6 +693,16 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
688 } 693 }
689 } 694 }
690 695
696 if (!chip->polling_mode && chip->poll_count < 2) {
697 snd_printdd(SFX "azx_get_response timeout, "
698 "polling the codec once: last cmd=0x%08x\n",
699 chip->last_cmd[addr]);
700 do_poll = 1;
701 chip->poll_count++;
702 goto again;
703 }
704
705
691 if (!chip->polling_mode) { 706 if (!chip->polling_mode) {
692 snd_printk(KERN_WARNING SFX "azx_get_response timeout, " 707 snd_printk(KERN_WARNING SFX "azx_get_response timeout, "
693 "switching to polling mode: last cmd=0x%08x\n", 708 "switching to polling mode: last cmd=0x%08x\n",
@@ -2043,7 +2058,7 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect)
2043{ 2058{
2044 if (request_irq(chip->pci->irq, azx_interrupt, 2059 if (request_irq(chip->pci->irq, azx_interrupt,
2045 chip->msi ? 0 : IRQF_SHARED, 2060 chip->msi ? 0 : IRQF_SHARED,
2046 "HDA Intel", chip)) { 2061 "hda_intel", chip)) {
2047 printk(KERN_ERR "hda-intel: unable to grab IRQ %d, " 2062 printk(KERN_ERR "hda-intel: unable to grab IRQ %d, "
2048 "disabling device\n", chip->pci->irq); 2063 "disabling device\n", chip->pci->irq);
2049 if (do_disconnect) 2064 if (do_disconnect)
diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
index 765d7bd4c3d4..9e66f6d306f8 100644
--- a/sound/pci/ice1712/aureon.c
+++ b/sound/pci/ice1712/aureon.c
@@ -703,11 +703,13 @@ static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned sho
703{ 703{
704 unsigned char nvol; 704 unsigned char nvol;
705 705
706 if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) 706 if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) {
707 nvol = 0; 707 nvol = 0;
708 else 708 } else {
709 nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) / 709 nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) /
710 WM_VOL_MAX; 710 WM_VOL_MAX;
711 nvol += 0x1b;
712 }
711 713
712 wm_put(ice, index, nvol); 714 wm_put(ice, index, nvol);
713 wm_put_nocache(ice, index, 0x180 | nvol); 715 wm_put_nocache(ice, index, 0x180 | nvol);
@@ -778,7 +780,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
778 for (ch = 0; ch < 2; ch++) { 780 for (ch = 0; ch < 2; ch++) {
779 unsigned int vol = ucontrol->value.integer.value[ch]; 781 unsigned int vol = ucontrol->value.integer.value[ch];
780 if (vol > WM_VOL_MAX) 782 if (vol > WM_VOL_MAX)
781 continue; 783 vol = WM_VOL_MAX;
782 vol |= spec->master[ch] & WM_VOL_MUTE; 784 vol |= spec->master[ch] & WM_VOL_MUTE;
783 if (vol != spec->master[ch]) { 785 if (vol != spec->master[ch]) {
784 int dac; 786 int dac;
@@ -834,8 +836,8 @@ static int wm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *
834 for (i = 0; i < voices; i++) { 836 for (i = 0; i < voices; i++) {
835 unsigned int vol = ucontrol->value.integer.value[i]; 837 unsigned int vol = ucontrol->value.integer.value[i];
836 if (vol > WM_VOL_MAX) 838 if (vol > WM_VOL_MAX)
837 continue; 839 vol = WM_VOL_MAX;
838 vol |= spec->vol[ofs+i]; 840 vol |= spec->vol[ofs+i] & WM_VOL_MUTE;
839 if (vol != spec->vol[ofs+i]) { 841 if (vol != spec->vol[ofs+i]) {
840 spec->vol[ofs+i] = vol; 842 spec->vol[ofs+i] = vol;
841 idx = WM_DAC_ATTEN + ofs + i; 843 idx = WM_DAC_ATTEN + ofs + i;
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 71b2c161158d..68980c19a3bc 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -145,6 +145,7 @@ static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = {
145}; 145};
146 146
147static const struct snd_soc_dapm_route omap3pandora_out_map[] = { 147static const struct snd_soc_dapm_route omap3pandora_out_map[] = {
148 {"PCM DAC", NULL, "APLL Enable"},
148 {"Headphone Amplifier", NULL, "PCM DAC"}, 149 {"Headphone Amplifier", NULL, "PCM DAC"},
149 {"Line Out", NULL, "PCM DAC"}, 150 {"Line Out", NULL, "PCM DAC"},
150 {"Headphone Jack", NULL, "Headphone Amplifier"}, 151 {"Headphone Jack", NULL, "Headphone Amplifier"},