aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/ima_policy12
-rw-r--r--Documentation/cpu-freq/governors.txt4
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile2
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c15
-rw-r--r--arch/microblaze/kernel/setup.c1
-rw-r--r--arch/powerpc/mm/tlb_hash64.c12
-rw-r--r--arch/powerpc/platforms/pseries/xics.c14
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S3
-rw-r--r--arch/sh/kernel/dwarf.c20
-rw-r--r--arch/sh/kernel/entry-common.S8
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c3
-rw-r--r--arch/x86/kvm/i8254.c3
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--drivers/ata/ahci.c15
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c3
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c1
-rw-r--r--drivers/char/tty_io.c4
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/gpu/drm/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c42
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/radeon/r100.c14
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/r420.c3
-rw-r--r--drivers/gpu/drm/radeon/r520.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c48
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/rs400.c28
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c24
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/lm78.c25
-rw-r--r--drivers/hwmon/w83781d.c26
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c20
-rw-r--r--drivers/net/ax88796.c2
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/igb/igb_main.c20
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/sky2.c8
-rw-r--r--drivers/pci/quirks.c17
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/usb/host/r8a66597-hcd.c41
-rw-r--r--fs/9p/v9fs.c33
-rw-r--r--fs/9p/v9fs_vfs.h1
-rw-r--r--fs/9p/vfs_file.c19
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/btrfs/disk-io.c7
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode.c50
-rw-r--r--fs/btrfs/relocation.c3
-rw-r--r--fs/cifs/CHANGES4
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/inode.c12
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c11
-rw-r--r--fs/fcntl.c6
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/namei.c6
-rw-r--r--fs/nfsd/export.c10
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c10
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h4
-rw-r--r--fs/ocfs2/dlm/dlmapi.h2
-rw-r--r--fs/ocfs2/dlm/dlmast.c2
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c2
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmlock.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c38
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c147
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c8
-rw-r--r--fs/ocfs2/dlmglue.c85
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/file.c18
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/ocfs2/ioctl.c14
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/ocfs2_fs.h11
-rw-r--r--fs/ocfs2/refcounttree.c12
-rw-r--r--fs/ocfs2/stack_o2cb.c12
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/symlink.c10
-rw-r--r--fs/ocfs2/uptodate.c4
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/ima.h4
-rw-r--r--include/net/netns/conntrack.h3
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--init/main.c2
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--mm/migrate.c3
-rw-r--r--net/9p/client.c53
-rw-r--r--net/9p/trans_fd.c10
-rw-r--r--net/9p/trans_rdma.c9
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/bluetooth/hci_conn.c3
-rw-r--r--net/bluetooth/hci_event.c1
-rw-r--r--net/bluetooth/hidp/core.c49
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/rfcomm/core.c8
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/pktgen.c1
-rw-r--r--net/dccp/ccid.c2
-rw-r--r--net/dccp/ccid.h8
-rw-r--r--net/dccp/probe.c4
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c22
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/irda/irnet/irnet_ppp.c5
-rw-r--r--net/key/af_key.c15
-rw-r--r--net/netfilter/nf_conntrack_core.c116
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c7
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sched/Kconfig16
-rw-r--r--security/integrity/ima/ima.h3
-rw-r--r--security/integrity/ima/ima_api.c4
-rw-r--r--security/integrity/ima/ima_iint.c9
-rw-r--r--security/integrity/ima/ima_main.c239
-rw-r--r--security/integrity/ima/ima_policy.c9
-rw-r--r--security/security.c2
-rw-r--r--sound/pci/ctxfi/ctatc.c15
-rw-r--r--sound/pci/ctxfi/ctvmem.c38
-rw-r--r--sound/pci/ctxfi/ctvmem.h8
-rw-r--r--sound/pci/hda/hda_intel.c21
-rw-r--r--sound/pci/ice1712/aureon.c12
-rw-r--r--sound/soc/omap/omap3pandora.c1
162 files changed, 1232 insertions, 810 deletions
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index 6434f0df012e..6cd6daefaaed 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -20,7 +20,7 @@ Description:
20 lsm: [[subj_user=] [subj_role=] [subj_type=] 20 lsm: [[subj_user=] [subj_role=] [subj_type=]
21 [obj_user=] [obj_role=] [obj_type=]] 21 [obj_user=] [obj_role=] [obj_type=]]
22 22
23 base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION] 23 base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK]
24 mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] 24 mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
25 fsmagic:= hex value 25 fsmagic:= hex value
26 uid:= decimal value 26 uid:= decimal value
@@ -40,11 +40,11 @@ Description:
40 40
41 measure func=BPRM_CHECK 41 measure func=BPRM_CHECK
42 measure func=FILE_MMAP mask=MAY_EXEC 42 measure func=FILE_MMAP mask=MAY_EXEC
43 measure func=INODE_PERM mask=MAY_READ uid=0 43 measure func=FILE_CHECK mask=MAY_READ uid=0
44 44
45 The default policy measures all executables in bprm_check, 45 The default policy measures all executables in bprm_check,
46 all files mmapped executable in file_mmap, and all files 46 all files mmapped executable in file_mmap, and all files
47 open for read by root in inode_permission. 47 open for read by root in do_filp_open.
48 48
49 Examples of LSM specific definitions: 49 Examples of LSM specific definitions:
50 50
@@ -54,8 +54,8 @@ Description:
54 54
55 dont_measure obj_type=var_log_t 55 dont_measure obj_type=var_log_t
56 dont_measure obj_type=auditd_log_t 56 dont_measure obj_type=auditd_log_t
57 measure subj_user=system_u func=INODE_PERM mask=MAY_READ 57 measure subj_user=system_u func=FILE_CHECK mask=MAY_READ
58 measure subj_role=system_r func=INODE_PERM mask=MAY_READ 58 measure subj_role=system_r func=FILE_CHECK mask=MAY_READ
59 59
60 Smack: 60 Smack:
61 measure subj_user=_ func=INODE_PERM mask=MAY_READ 61 measure subj_user=_ func=FILE_CHECK mask=MAY_READ
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index aed082f49d09..737988fca64d 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
145up_threshold: defines what the average CPU usage between the samplings 145up_threshold: defines what the average CPU usage between the samplings
146of 'sampling_rate' needs to be for the kernel to make a decision on 146of 'sampling_rate' needs to be for the kernel to make a decision on
147whether it should increase the frequency. For example when it is set 147whether it should increase the frequency. For example when it is set
148to its default value of '80' it means that between the checking 148to its default value of '95' it means that between the checking
149intervals the CPU needs to be on average more than 80% in use to then 149intervals the CPU needs to be on average more than 95% in use to then
150decide that the CPU frequency needs to be increased. 150decide that the CPU frequency needs to be increased.
151 151
152ignore_nice_load: this parameter takes a value of '0' or '1'. When 152ignore_nice_load: this parameter takes a value of '0' or '1'. When
diff --git a/MAINTAINERS b/MAINTAINERS
index 03f38c18f323..602022d2c7a5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3836,6 +3836,7 @@ NETWORKING DRIVERS
3836L: netdev@vger.kernel.org 3836L: netdev@vger.kernel.org
3837W: http://www.linuxfoundation.org/en/Net 3837W: http://www.linuxfoundation.org/en/Net
3838T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git 3838T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
3839T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
3839S: Odd Fixes 3840S: Odd Fixes
3840F: drivers/net/ 3841F: drivers/net/
3841F: include/linux/if_* 3842F: include/linux/if_*
diff --git a/Makefile b/Makefile
index 394aec712c7d..f8e02e9491d0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 33 3SUBLEVEL = 33
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 1aa1ea5e9212..b13d1879e51b 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1325,7 +1325,7 @@ struct platform_device *__init
1325at32_add_device_mci(unsigned int id, struct mci_platform_data *data) 1325at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1326{ 1326{
1327 struct platform_device *pdev; 1327 struct platform_device *pdev;
1328 struct mci_dma_slave *slave; 1328 struct mci_dma_data *slave;
1329 u32 pioa_mask; 1329 u32 pioa_mask;
1330 u32 piob_mask; 1330 u32 piob_mask;
1331 1331
@@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1344 ARRAY_SIZE(atmel_mci0_resource))) 1344 ARRAY_SIZE(atmel_mci0_resource)))
1345 goto fail; 1345 goto fail;
1346 1346
1347 slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL); 1347 slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
1348 if (!slave)
1349 goto fail;
1348 1350
1349 slave->sdata.dma_dev = &dw_dmac0_device.dev; 1351 slave->sdata.dma_dev = &dw_dmac0_device.dev;
1350 slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT; 1352 slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
@@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1357 1359
1358 if (platform_device_add_data(pdev, data, 1360 if (platform_device_add_data(pdev, data,
1359 sizeof(struct mci_platform_data))) 1361 sizeof(struct mci_platform_data)))
1360 goto fail; 1362 goto fail_free;
1361 1363
1362 /* CLK line is common to both slots */ 1364 /* CLK line is common to both slots */
1363 pioa_mask = 1 << 10; 1365 pioa_mask = 1 << 10;
@@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1381 /* Slot is unused */ 1383 /* Slot is unused */
1382 break; 1384 break;
1383 default: 1385 default:
1384 goto fail; 1386 goto fail_free;
1385 } 1387 }
1386 1388
1387 select_peripheral(PIOA, pioa_mask, PERIPH_A, 0); 1389 select_peripheral(PIOA, pioa_mask, PERIPH_A, 0);
@@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1408 break; 1410 break;
1409 default: 1411 default:
1410 if (!data->slot[0].bus_width) 1412 if (!data->slot[0].bus_width)
1411 goto fail; 1413 goto fail_free;
1412 1414
1413 data->slot[1].bus_width = 0; 1415 data->slot[1].bus_width = 0;
1414 break; 1416 break;
@@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
1419 platform_device_add(pdev); 1421 platform_device_add(pdev);
1420 return pdev; 1422 return pdev;
1421 1423
1424fail_free:
1425 kfree(slave);
1422fail: 1426fail:
1423 data->dma_slave = NULL; 1427 data->dma_slave = NULL;
1424 kfree(slave);
1425 platform_device_put(pdev); 1428 platform_device_put(pdev);
1426 return NULL; 1429 return NULL;
1427} 1430}
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 5372b24ad049..bb8c4b9ccb80 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p)
54 54
55 microblaze_cache_init(); 55 microblaze_cache_init();
56 56
57 invalidate_dcache();
57 enable_dcache(); 58 enable_dcache();
58 59
59 invalidate_icache(); 60 invalidate_icache();
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 282d9306361f..1ec06576f619 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
63 if (huge) { 63 if (huge) {
64#ifdef CONFIG_HUGETLB_PAGE 64#ifdef CONFIG_HUGETLB_PAGE
65 psize = get_slice_psize(mm, addr); 65 psize = get_slice_psize(mm, addr);
66 /* Mask the address for the correct page size */
67 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
66#else 68#else
67 BUG(); 69 BUG();
68 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ 70 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
69#endif 71#endif
70 } else 72 } else {
71 psize = pte_pagesize_index(mm, addr, pte); 73 psize = pte_pagesize_index(mm, addr, pte);
74 /* Mask the address for the standard page size. If we
75 * have a 64k page kernel, but the hardware does not
76 * support 64k pages, this might be different from the
77 * hardware page size encoded in the slice table. */
78 addr &= PAGE_MASK;
79 }
72 80
73 /* Mask the address for the correct page size */
74 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
75 81
76 /* Build full vaddr */ 82 /* Build full vaddr */
77 if (!is_kernel_addr(addr)) { 83 if (!is_kernel_addr(addr)) {
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 1ee66db003be..f5f79196721c 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr)
784{ 784{
785 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 785 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
786 786
787 BUG_ON(os_cppr->index != 0); 787 /*
788 * we only really want to set the priority when there's
789 * just one cppr value on the stack
790 */
791 WARN_ON(os_cppr->index != 0);
788 792
789 os_cppr->stack[os_cppr->index] = cppr; 793 os_cppr->stack[0] = cppr;
790 794
791 if (firmware_has_feature(FW_FEATURE_LPAR)) 795 if (firmware_has_feature(FW_FEATURE_LPAR))
792 lpar_cppr_info(cppr); 796 lpar_cppr_info(cppr);
@@ -821,8 +825,14 @@ void xics_setup_cpu(void)
821 825
822void xics_teardown_cpu(void) 826void xics_teardown_cpu(void)
823{ 827{
828 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
824 int cpu = smp_processor_id(); 829 int cpu = smp_processor_id();
825 830
831 /*
832 * we have to reset the cppr index to 0 because we're
833 * not going to return from the IPI
834 */
835 os_cppr->index = 0;
826 xics_set_cpu_priority(0); 836 xics_set_cpu_priority(0);
827 837
828 /* Clear any pending IPI request */ 838 /* Clear any pending IPI request */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index f2ef4b619ce1..c25dfac7dd76 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -293,12 +293,12 @@ struct _lowcore
293 __u64 clock_comparator; /* 0x02d0 */ 293 __u64 clock_comparator; /* 0x02d0 */
294 __u32 machine_flags; /* 0x02d8 */ 294 __u32 machine_flags; /* 0x02d8 */
295 __u32 ftrace_func; /* 0x02dc */ 295 __u32 ftrace_func; /* 0x02dc */
296 __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ 296 __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */
297 297
298 /* Interrupt response block */ 298 /* Interrupt response block */
299 __u8 irb[64]; /* 0x0300 */ 299 __u8 irb[64]; /* 0x0300 */
300 300
301 __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ 301 __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
302 302
303 /* 303 /*
304 * 0xe00 contains the address of the IPL Parameter Information 304 * 0xe00 contains the address of the IPL Parameter Information
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 3f7e2a22c7c2..f6a389c996cb 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
132 mov #1, r5 132 mov #1, r5
133 133
134call_handle_tlbmiss: 134call_handle_tlbmiss:
135 setup_frame_reg
136 mov.l 1f, r0 135 mov.l 1f, r0
137 mov r5, r8 136 mov r5, r8
138 mov.l @r0, r6 137 mov.l @r0, r6
@@ -365,6 +364,8 @@ handle_exception:
365 mov.l @k2, k2 ! read out vector and keep in k2 364 mov.l @k2, k2 ! read out vector and keep in k2
366 365
367handle_exception_special: 366handle_exception_special:
367 setup_frame_reg
368
368 ! Setup return address and jump to exception handler 369 ! Setup return address and jump to exception handler
369 mov.l 7f, r9 ! fetch return address 370 mov.l 7f, r9 ! fetch return address
370 stc r2_bank, r0 ! k2 (vector) 371 stc r2_bank, r0 ! k2 (vector)
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 88d28ec3780a..e51168064e56 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
540 mempool_free(frame, dwarf_frame_pool); 540 mempool_free(frame, dwarf_frame_pool);
541} 541}
542 542
543extern void ret_from_irq(void);
544
543/** 545/**
544 * dwarf_unwind_stack - unwind the stack 546 * dwarf_unwind_stack - unwind the stack
545 * 547 *
@@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
678 addr = frame->cfa + reg->addr; 680 addr = frame->cfa + reg->addr;
679 frame->return_addr = __raw_readl(addr); 681 frame->return_addr = __raw_readl(addr);
680 682
683 /*
684 * Ah, the joys of unwinding through interrupts.
685 *
686 * Interrupts are tricky - the DWARF info needs to be _really_
687 * accurate and unfortunately I'm seeing a lot of bogus DWARF
688 * info. For example, I've seen interrupts occur in epilogues
689 * just after the frame pointer (r14) had been restored. The
690 * problem was that the DWARF info claimed that the CFA could be
691 * reached by using the value of the frame pointer before it was
692 * restored.
693 *
694 * So until the compiler can be trusted to produce reliable
695 * DWARF info when it really matters, let's stop unwinding once
696 * we've calculated the function that was interrupted.
697 */
698 if (prev && prev->pc == (unsigned long)ret_from_irq)
699 frame->return_addr = 0;
700
681 return frame; 701 return frame;
682 702
683bail: 703bail:
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index f0abd58c3a69..2b15ae60c3a0 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -70,8 +70,14 @@ ret_from_exception:
70 CFI_STARTPROC simple 70 CFI_STARTPROC simple
71 CFI_DEF_CFA r14, 0 71 CFI_DEF_CFA r14, 0
72 CFI_REL_OFFSET 17, 64 72 CFI_REL_OFFSET 17, 64
73 CFI_REL_OFFSET 15, 0 73 CFI_REL_OFFSET 15, 60
74 CFI_REL_OFFSET 14, 56 74 CFI_REL_OFFSET 14, 56
75 CFI_REL_OFFSET 13, 52
76 CFI_REL_OFFSET 12, 48
77 CFI_REL_OFFSET 11, 44
78 CFI_REL_OFFSET 10, 40
79 CFI_REL_OFFSET 9, 36
80 CFI_REL_OFFSET 8, 32
75 preempt_stop() 81 preempt_stop()
76ENTRY(ret_from_irq) 82ENTRY(ret_from_irq)
77 ! 83 !
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index f125e5c551c0..6e44519960c8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
1356 1356
1357 kfree(data->powernow_table); 1357 kfree(data->powernow_table);
1358 kfree(data); 1358 kfree(data);
1359 per_cpu(powernow_data, pol->cpu) = NULL;
1359 1360
1360 return 0; 1361 return 0;
1361} 1362}
@@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
1375 int err; 1376 int err;
1376 1377
1377 if (!data) 1378 if (!data)
1378 return -EINVAL; 1379 return 0;
1379 1380
1380 smp_call_function_single(cpu, query_values_on_cpu, &err, true); 1381 smp_call_function_single(cpu, query_values_on_cpu, &err, true);
1381 if (err) 1382 if (err)
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 296aba49472a..15578f180e59 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this,
467 return -EOPNOTSUPP; 467 return -EOPNOTSUPP;
468 468
469 addr &= KVM_PIT_CHANNEL_MASK; 469 addr &= KVM_PIT_CHANNEL_MASK;
470 if (addr == 3)
471 return 0;
472
470 s = &pit_state->channels[addr]; 473 s = &pit_state->channels[addr];
471 474
472 mutex_lock(&pit_state->lock); 475 mutex_lock(&pit_state->lock);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1ddcad452add..a1e1bc9d412d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
670{ 670{
671 static int version; 671 static int version;
672 struct pvclock_wall_clock wc; 672 struct pvclock_wall_clock wc;
673 struct timespec now, sys, boot; 673 struct timespec boot;
674 674
675 if (!wall_clock) 675 if (!wall_clock)
676 return; 676 return;
@@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
685 * wall clock specified here. guest system time equals host 685 * wall clock specified here. guest system time equals host
686 * system time for us, thus we must fill in host boot time here. 686 * system time for us, thus we must fill in host boot time here.
687 */ 687 */
688 now = current_kernel_time(); 688 getboottime(&boot);
689 ktime_get_ts(&sys);
690 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
691 689
692 wc.sec = boot.tv_sec; 690 wc.sec = boot.tv_sec;
693 wc.nsec = boot.tv_nsec; 691 wc.nsec = boot.tv_nsec;
@@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
762 local_irq_save(flags); 760 local_irq_save(flags);
763 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp); 761 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
764 ktime_get_ts(&ts); 762 ktime_get_ts(&ts);
763 monotonic_to_bootbased(&ts);
765 local_irq_restore(flags); 764 local_irq_restore(flags);
766 765
767 /* With all the info we got, fill in the values */ 766 /* With all the info we got, fill in the values */
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b8bea100a160..b34390347c16 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
2868 }, 2868 },
2869 .driver_data = "F.23", /* cutoff BIOS version */ 2869 .driver_data = "F.23", /* cutoff BIOS version */
2870 }, 2870 },
2871 /*
2872 * Acer eMachines G725 has the same problem. BIOS
2873 * V1.03 is known to be broken. V3.04 is known to
2874 * work. Inbetween, there are V1.06, V2.06 and V3.03
2875 * that we don't have much idea about. For now,
2876 * blacklist anything older than V3.04.
2877 */
2878 {
2879 .ident = "G725",
2880 .matches = {
2881 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
2882 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
2883 },
2884 .driver_data = "V3.04", /* cutoff BIOS version */
2885 },
2871 { } /* terminate list */ 2886 { } /* terminate list */
2872 }; 2887 };
2873 const struct dmi_system_id *dmi = dmi_first_match(sysids); 2888 const struct dmi_system_id *dmi = dmi_first_match(sysids);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f4ea5a8c325b..d096fbcbc771 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2875 * write indication (used for PIO/DMA setup), result TF is 2875 * write indication (used for PIO/DMA setup), result TF is
2876 * copied back and we don't whine too much about its failure. 2876 * copied back and we don't whine too much about its failure.
2877 */ 2877 */
2878 tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2878 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2879 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2879 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2880 tf->flags |= ATA_TFLAG_WRITE; 2880 tf->flags |= ATA_TFLAG_WRITE;
2881 2881
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 741065c9da67..730ef3c384ca 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
893 do_write); 893 do_write);
894 } 894 }
895 895
896 if (!do_write)
897 flush_dcache_page(page);
898
896 qc->curbytes += qc->sect_size; 899 qc->curbytes += qc->sect_size;
897 qc->cursg_ofs += qc->sect_size; 900 qc->cursg_ofs += qc->sect_size;
898 901
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index f36defa37764..57d965b7f521 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
808 808
809exit: 809exit:
810 sdio_release_host(card->func); 810 sdio_release_host(card->func);
811 kfree(tmpbuf);
811 812
812 return ret; 813 return ret;
813} 814}
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index c6f3b48be9dd..dcb9083ecde0 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on)
1951 pid = task_pid(current); 1951 pid = task_pid(current);
1952 type = PIDTYPE_PID; 1952 type = PIDTYPE_PID;
1953 } 1953 }
1954 retval = __f_setown(filp, pid, type, 0); 1954 get_pid(pid);
1955 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 1955 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
1956 retval = __f_setown(filp, pid, type, 0);
1957 put_pid(pid);
1956 if (retval) 1958 if (retval)
1957 goto out; 1959 goto out;
1958 } else { 1960 } else {
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b34ade2332b..bd444dc93cf2 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
554 (dbs_tuners_ins.up_threshold - 554 (dbs_tuners_ins.up_threshold -
555 dbs_tuners_ins.down_differential); 555 dbs_tuners_ins.down_differential);
556 556
557 if (freq_next < policy->min)
558 freq_next = policy->min;
559
557 if (!dbs_tuners_ins.powersave_bias) { 560 if (!dbs_tuners_ins.powersave_bias) {
558 __cpufreq_driver_target(policy, freq_next, 561 __cpufreq_driver_target(policy, freq_next,
559 CPUFREQ_RELATION_L); 562 CPUFREQ_RELATION_L);
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index a1fce68e3bbe..17be051b7aa3 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
113 113
114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { 114 if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
115 DRM_ERROR("fail to set dma mask to 0x%Lx\n", 115 DRM_ERROR("fail to set dma mask to 0x%Lx\n",
116 gart_info->table_mask); 116 (unsigned long long)gart_info->table_mask);
117 ret = 1; 117 ret = 1;
118 goto done; 118 goto done;
119 } 119 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 46d88965852a..ecac882e1d54 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
120 120
121const static struct intel_device_info intel_pineview_info = { 121const static struct intel_device_info intel_pineview_info = {
122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 122 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
123 .has_pipe_cxsr = 1, 123 .need_gfx_hws = 1,
124 .has_hotplug = 1, 124 .has_hotplug = 1,
125}; 125};
126 126
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dda787aafcc6..b4c8c0230689 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3564,6 +3564,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
3564 uint32_t reloc_count = 0, i; 3564 uint32_t reloc_count = 0, i;
3565 int ret = 0; 3565 int ret = 0;
3566 3566
3567 if (relocs == NULL)
3568 return 0;
3569
3567 for (i = 0; i < buffer_count; i++) { 3570 for (i = 0; i < buffer_count; i++) {
3568 struct drm_i915_gem_relocation_entry __user *user_relocs; 3571 struct drm_i915_gem_relocation_entry __user *user_relocs;
3569 int unwritten; 3572 int unwritten;
@@ -3653,7 +3656,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3653 struct drm_gem_object *batch_obj; 3656 struct drm_gem_object *batch_obj;
3654 struct drm_i915_gem_object *obj_priv; 3657 struct drm_i915_gem_object *obj_priv;
3655 struct drm_clip_rect *cliprects = NULL; 3658 struct drm_clip_rect *cliprects = NULL;
3656 struct drm_i915_gem_relocation_entry *relocs; 3659 struct drm_i915_gem_relocation_entry *relocs = NULL;
3657 int ret = 0, ret2, i, pinned = 0; 3660 int ret = 0, ret2, i, pinned = 0;
3658 uint64_t exec_offset; 3661 uint64_t exec_offset;
3659 uint32_t seqno, flush_domains, reloc_index; 3662 uint32_t seqno, flush_domains, reloc_index;
@@ -3722,6 +3725,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3722 if (object_list[i] == NULL) { 3725 if (object_list[i] == NULL) {
3723 DRM_ERROR("Invalid object handle %d at index %d\n", 3726 DRM_ERROR("Invalid object handle %d at index %d\n",
3724 exec_list[i].handle, i); 3727 exec_list[i].handle, i);
3728 /* prevent error path from reading uninitialized data */
3729 args->buffer_count = i + 1;
3725 ret = -EBADF; 3730 ret = -EBADF;
3726 goto err; 3731 goto err;
3727 } 3732 }
@@ -3730,6 +3735,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3730 if (obj_priv->in_execbuffer) { 3735 if (obj_priv->in_execbuffer) {
3731 DRM_ERROR("Object %p appears more than once in object list\n", 3736 DRM_ERROR("Object %p appears more than once in object list\n",
3732 object_list[i]); 3737 object_list[i]);
3738 /* prevent error path from reading uninitialized data */
3739 args->buffer_count = i + 1;
3733 ret = -EBADF; 3740 ret = -EBADF;
3734 goto err; 3741 goto err;
3735 } 3742 }
@@ -3926,6 +3933,7 @@ err:
3926 3933
3927 mutex_unlock(&dev->struct_mutex); 3934 mutex_unlock(&dev->struct_mutex);
3928 3935
3936pre_mutex_err:
3929 /* Copy the updated relocations out regardless of current error 3937 /* Copy the updated relocations out regardless of current error
3930 * state. Failure to update the relocs would mean that the next 3938 * state. Failure to update the relocs would mean that the next
3931 * time userland calls execbuf, it would do so with presumed offset 3939 * time userland calls execbuf, it would do so with presumed offset
@@ -3940,7 +3948,6 @@ err:
3940 ret = ret2; 3948 ret = ret2;
3941 } 3949 }
3942 3950
3943pre_mutex_err:
3944 drm_free_large(object_list); 3951 drm_free_large(object_list);
3945 kfree(cliprects); 3952 kfree(cliprects);
3946 3953
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 89a071a3e6fb..50ddf4a95c5e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
309 if (de_iir & DE_GSE) 309 if (de_iir & DE_GSE)
310 ironlake_opregion_gse_intr(dev); 310 ironlake_opregion_gse_intr(dev);
311 311
312 if (de_iir & DE_PLANEA_FLIP_DONE)
313 intel_prepare_page_flip(dev, 0);
314
315 if (de_iir & DE_PLANEB_FLIP_DONE)
316 intel_prepare_page_flip(dev, 1);
317
318 if (de_iir & DE_PIPEA_VBLANK) {
319 drm_handle_vblank(dev, 0);
320 intel_finish_page_flip(dev, 0);
321 }
322
323 if (de_iir & DE_PIPEB_VBLANK) {
324 drm_handle_vblank(dev, 1);
325 intel_finish_page_flip(dev, 1);
326 }
327
312 /* check event from PCH */ 328 /* check event from PCH */
313 if ((de_iir & DE_PCH_EVENT) && 329 if ((de_iir & DE_PCH_EVENT) &&
314 (pch_iir & SDE_HOTPLUG_MASK)) { 330 (pch_iir & SDE_HOTPLUG_MASK)) {
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
844 if (!(pipeconf & PIPEACONF_ENABLE)) 860 if (!(pipeconf & PIPEACONF_ENABLE))
845 return -EINVAL; 861 return -EINVAL;
846 862
847 if (IS_IRONLAKE(dev))
848 return 0;
849
850 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 863 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
851 if (IS_I965G(dev)) 864 if (IS_IRONLAKE(dev))
865 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
866 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
867 else if (IS_I965G(dev))
852 i915_enable_pipestat(dev_priv, pipe, 868 i915_enable_pipestat(dev_priv, pipe,
853 PIPE_START_VBLANK_INTERRUPT_ENABLE); 869 PIPE_START_VBLANK_INTERRUPT_ENABLE);
854 else 870 else
@@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
866 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
867 unsigned long irqflags; 883 unsigned long irqflags;
868 884
869 if (IS_IRONLAKE(dev))
870 return;
871
872 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 885 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
873 i915_disable_pipestat(dev_priv, pipe, 886 if (IS_IRONLAKE(dev))
874 PIPE_VBLANK_INTERRUPT_ENABLE | 887 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
875 PIPE_START_VBLANK_INTERRUPT_ENABLE); 888 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
889 else
890 i915_disable_pipestat(dev_priv, pipe,
891 PIPE_VBLANK_INTERRUPT_ENABLE |
892 PIPE_START_VBLANK_INTERRUPT_ENABLE);
876 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 893 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
877} 894}
878 895
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1015{ 1032{
1016 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1033 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1017 /* enable kind of interrupts always enabled */ 1034 /* enable kind of interrupts always enabled */
1018 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; 1035 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1036 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1019 u32 render_mask = GT_USER_INTERRUPT; 1037 u32 render_mask = GT_USER_INTERRUPT;
1020 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1038 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1021 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1039 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1022 1040
1023 dev_priv->irq_mask_reg = ~display_mask; 1041 dev_priv->irq_mask_reg = ~display_mask;
1024 dev_priv->de_irq_enable_reg = display_mask; 1042 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1025 1043
1026 /* should always can generate irq */ 1044 /* should always can generate irq */
1027 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1045 I915_WRITE(DEIIR, I915_READ(DEIIR));
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ddefc871edfe..79dd4026586f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
157 adpa = I915_READ(PCH_ADPA); 157 adpa = I915_READ(PCH_ADPA);
158 158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160 /* disable HPD first */
161 I915_WRITE(PCH_ADPA, adpa);
162 (void)I915_READ(PCH_ADPA);
160 163
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 164 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS | 165 ADPA_CRT_HOTPLUG_WARMUP_10MS |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 45da78ef4a92..12775df1bbfd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1638,6 +1638,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1638 case DRM_MODE_DPMS_OFF: 1638 case DRM_MODE_DPMS_OFF:
1639 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 1639 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1640 1640
1641 drm_vblank_off(dev, pipe);
1641 /* Disable display plane */ 1642 /* Disable display plane */
1642 temp = I915_READ(dspcntr_reg); 1643 temp = I915_READ(dspcntr_reg);
1643 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 1644 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2519,6 +2520,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2519 sr_entries = roundup(sr_entries / cacheline_size, 1); 2520 sr_entries = roundup(sr_entries / cacheline_size, 1);
2520 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2521 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2521 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2522 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2523 } else {
2524 /* Turn off self refresh if both pipes are enabled */
2525 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2526 & ~FW_BLC_SELF_EN);
2522 } 2527 }
2523 2528
2524 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", 2529 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2562,6 +2567,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
2562 srwm = 1; 2567 srwm = 1;
2563 srwm &= 0x3f; 2568 srwm &= 0x3f;
2564 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 2569 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2570 } else {
2571 /* Turn off self refresh if both pipes are enabled */
2572 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2573 & ~FW_BLC_SELF_EN);
2565 } 2574 }
2566 2575
2567 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2576 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2630,6 +2639,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2630 if (srwm < 0) 2639 if (srwm < 0)
2631 srwm = 1; 2640 srwm = 1;
2632 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2641 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2642 } else {
2643 /* Turn off self refresh if both pipes are enabled */
2644 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
2645 & ~FW_BLC_SELF_EN);
2633 } 2646 }
2634 2647
2635 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2648 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3984,6 +3997,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
3984 spin_lock_irqsave(&dev->event_lock, flags); 3997 spin_lock_irqsave(&dev->event_lock, flags);
3985 work = intel_crtc->unpin_work; 3998 work = intel_crtc->unpin_work;
3986 if (work == NULL || !work->pending) { 3999 if (work == NULL || !work->pending) {
4000 if (work && !work->pending) {
4001 obj_priv = work->obj->driver_private;
4002 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4003 obj_priv,
4004 atomic_read(&obj_priv->pending_flip));
4005 }
3987 spin_unlock_irqrestore(&dev->event_lock, flags); 4006 spin_unlock_irqrestore(&dev->event_lock, flags);
3988 return; 4007 return;
3989 } 4008 }
@@ -4005,7 +4024,10 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4005 spin_unlock_irqrestore(&dev->event_lock, flags); 4024 spin_unlock_irqrestore(&dev->event_lock, flags);
4006 4025
4007 obj_priv = work->obj->driver_private; 4026 obj_priv = work->obj->driver_private;
4008 if (atomic_dec_and_test(&obj_priv->pending_flip)) 4027
4028 /* Initial scanout buffer will have a 0 pending flip count */
4029 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
4030 atomic_dec_and_test(&obj_priv->pending_flip))
4009 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4031 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4010 schedule_work(&work->work); 4032 schedule_work(&work->work);
4011} 4033}
@@ -4018,8 +4040,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
4018 unsigned long flags; 4040 unsigned long flags;
4019 4041
4020 spin_lock_irqsave(&dev->event_lock, flags); 4042 spin_lock_irqsave(&dev->event_lock, flags);
4021 if (intel_crtc->unpin_work) 4043 if (intel_crtc->unpin_work) {
4022 intel_crtc->unpin_work->pending = 1; 4044 intel_crtc->unpin_work->pending = 1;
4045 } else {
4046 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
4047 }
4023 spin_unlock_irqrestore(&dev->event_lock, flags); 4048 spin_unlock_irqrestore(&dev->event_lock, flags);
4024} 4049}
4025 4050
@@ -4053,6 +4078,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4053 /* We borrow the event spin lock for protecting unpin_work */ 4078 /* We borrow the event spin lock for protecting unpin_work */
4054 spin_lock_irqsave(&dev->event_lock, flags); 4079 spin_lock_irqsave(&dev->event_lock, flags);
4055 if (intel_crtc->unpin_work) { 4080 if (intel_crtc->unpin_work) {
4081 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
4056 spin_unlock_irqrestore(&dev->event_lock, flags); 4082 spin_unlock_irqrestore(&dev->event_lock, flags);
4057 kfree(work); 4083 kfree(work);
4058 mutex_unlock(&dev->struct_mutex); 4084 mutex_unlock(&dev->struct_mutex);
@@ -4066,7 +4092,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4066 4092
4067 ret = intel_pin_and_fence_fb_obj(dev, obj); 4093 ret = intel_pin_and_fence_fb_obj(dev, obj);
4068 if (ret != 0) { 4094 if (ret != 0) {
4095 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4096 obj->driver_private);
4069 kfree(work); 4097 kfree(work);
4098 intel_crtc->unpin_work = NULL;
4070 mutex_unlock(&dev->struct_mutex); 4099 mutex_unlock(&dev->struct_mutex);
4071 return ret; 4100 return ret;
4072 } 4101 }
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa74e59bec61..b1d0acbae4e4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = {
611 { 611 {
612 .ident = "Samsung SX20S", 612 .ident = "Samsung SX20S",
613 .matches = { 613 .matches = {
614 DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"), 614 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"), 615 DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
616 }, 616 },
617 }, 617 },
@@ -623,6 +623,13 @@ static const struct dmi_system_id bad_lid_status[] = {
623 }, 623 },
624 }, 624 },
625 { 625 {
626 .ident = "Aspire 1810T",
627 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
629 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
630 },
631 },
632 {
626 .ident = "PC-81005", 633 .ident = "PC-81005",
627 .matches = { 634 .matches = {
628 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), 635 DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
@@ -643,7 +650,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
643{ 650{
644 enum drm_connector_status status = connector_status_connected; 651 enum drm_connector_status status = connector_status_connected;
645 652
646 if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) 653 if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
647 status = connector_status_disconnected; 654 status = connector_status_disconnected;
648 655
649 return status; 656 return status;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index eaacfd0920df..82678d30ab06 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2345 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2346 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2347 (1 << INTEL_ANALOG_CLONE_BIT); 2347 (1 << INTEL_ANALOG_CLONE_BIT);
2348 } else if (flags & SDVO_OUTPUT_CVBS0) {
2349
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
2351 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2352 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2353 sdvo_priv->is_tv = true;
2354 intel_output->needs_tv_clock = true;
2355 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2348 } else if (flags & SDVO_OUTPUT_LVDS0) { 2356 } else if (flags & SDVO_OUTPUT_LVDS0) {
2349 2357
2350 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2358 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 11c9a3fe6810..c0d4650cdb79 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
354 return RREG32(RADEON_CRTC2_CRNT_FRAME); 354 return RREG32(RADEON_CRTC2_CRNT_FRAME);
355} 355}
356 356
357/* Who ever call radeon_fence_emit should call ring_lock and ask
358 * for enough space (today caller are ib schedule and buffer move) */
357void r100_fence_ring_emit(struct radeon_device *rdev, 359void r100_fence_ring_emit(struct radeon_device *rdev,
358 struct radeon_fence *fence) 360 struct radeon_fence *fence)
359{ 361{
360 /* Who ever call radeon_fence_emit should call ring_lock and ask 362 /* We have to make sure that caches are flushed before
361 * for enough space (today caller are ib schedule and buffer move) */ 363 * CPU might read something from VRAM. */
364 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
365 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
366 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
367 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
362 /* Wait until IDLE & CLEAN */ 368 /* Wait until IDLE & CLEAN */
363 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 369 radeon_ring_write(rdev, PACKET0(0x1720, 0));
364 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 370 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
@@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev)
3369 3375
3370void r100_fini(struct radeon_device *rdev) 3376void r100_fini(struct radeon_device *rdev)
3371{ 3377{
3372 r100_suspend(rdev);
3373 r100_cp_fini(rdev); 3378 r100_cp_fini(rdev);
3374 r100_wb_fini(rdev); 3379 r100_wb_fini(rdev);
3375 r100_ib_fini(rdev); 3380 r100_ib_fini(rdev);
@@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
3481 if (r) { 3486 if (r) {
3482 /* Somethings want wront with the accel init stop accel */ 3487 /* Somethings want wront with the accel init stop accel */
3483 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 3488 dev_err(rdev->dev, "Disabling GPU acceleration\n");
3484 r100_suspend(rdev);
3485 r100_cp_fini(rdev); 3489 r100_cp_fini(rdev);
3486 r100_wb_fini(rdev); 3490 r100_wb_fini(rdev);
3487 r100_ib_fini(rdev); 3491 r100_ib_fini(rdev);
3492 radeon_irq_kms_fini(rdev);
3488 if (rdev->flags & RADEON_IS_PCI) 3493 if (rdev->flags & RADEON_IS_PCI)
3489 r100_pci_gart_fini(rdev); 3494 r100_pci_gart_fini(rdev);
3490 radeon_irq_kms_fini(rdev);
3491 rdev->accel_working = false; 3495 rdev->accel_working = false;
3492 } 3496 }
3493 return 0; 3497 return 0;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 0051d11b907c..43b55a030b4d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
506 506
507 /* DDR for all card after R300 & IGP */ 507 /* DDR for all card after R300 & IGP */
508 rdev->mc.vram_is_ddr = true; 508 rdev->mc.vram_is_ddr = true;
509
509 tmp = RREG32(RADEON_MEM_CNTL); 510 tmp = RREG32(RADEON_MEM_CNTL);
510 if (tmp & R300_MEM_NUM_CHANNELS_MASK) { 511 tmp &= R300_MEM_NUM_CHANNELS_MASK;
511 rdev->mc.vram_width = 128; 512 switch (tmp) {
512 } else { 513 case 0: rdev->mc.vram_width = 64; break;
513 rdev->mc.vram_width = 64; 514 case 1: rdev->mc.vram_width = 128; break;
515 case 2: rdev->mc.vram_width = 256; break;
516 default: rdev->mc.vram_width = 128; break;
514 } 517 }
515 518
516 r100_vram_init_sizes(rdev); 519 r100_vram_init_sizes(rdev);
@@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
1327 1330
1328void r300_fini(struct radeon_device *rdev) 1331void r300_fini(struct radeon_device *rdev)
1329{ 1332{
1330 r300_suspend(rdev);
1331 r100_cp_fini(rdev); 1333 r100_cp_fini(rdev);
1332 r100_wb_fini(rdev); 1334 r100_wb_fini(rdev);
1333 r100_ib_fini(rdev); 1335 r100_ib_fini(rdev);
@@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
1418 if (r) { 1420 if (r) {
1419 /* Somethings want wront with the accel init stop accel */ 1421 /* Somethings want wront with the accel init stop accel */
1420 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1422 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1421 r300_suspend(rdev);
1422 r100_cp_fini(rdev); 1423 r100_cp_fini(rdev);
1423 r100_wb_fini(rdev); 1424 r100_wb_fini(rdev);
1424 r100_ib_fini(rdev); 1425 r100_ib_fini(rdev);
1426 radeon_irq_kms_fini(rdev);
1425 if (rdev->flags & RADEON_IS_PCIE) 1427 if (rdev->flags & RADEON_IS_PCIE)
1426 rv370_pcie_gart_fini(rdev); 1428 rv370_pcie_gart_fini(rdev);
1427 if (rdev->flags & RADEON_IS_PCI) 1429 if (rdev->flags & RADEON_IS_PCI)
1428 r100_pci_gart_fini(rdev); 1430 r100_pci_gart_fini(rdev);
1429 radeon_irq_kms_fini(rdev); 1431 radeon_agp_fini(rdev);
1430 rdev->accel_working = false; 1432 rdev->accel_working = false;
1431 } 1433 }
1432 return 0; 1434 return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4526faaacca8..d9373246c97f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev)
389 if (r) { 389 if (r) {
390 /* Somethings want wront with the accel init stop accel */ 390 /* Somethings want wront with the accel init stop accel */
391 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 391 dev_err(rdev->dev, "Disabling GPU acceleration\n");
392 r420_suspend(rdev);
393 r100_cp_fini(rdev); 392 r100_cp_fini(rdev);
394 r100_wb_fini(rdev); 393 r100_wb_fini(rdev);
395 r100_ib_fini(rdev); 394 r100_ib_fini(rdev);
395 radeon_irq_kms_fini(rdev);
396 if (rdev->flags & RADEON_IS_PCIE) 396 if (rdev->flags & RADEON_IS_PCIE)
397 rv370_pcie_gart_fini(rdev); 397 rv370_pcie_gart_fini(rdev);
398 if (rdev->flags & RADEON_IS_PCI) 398 if (rdev->flags & RADEON_IS_PCI)
399 r100_pci_gart_fini(rdev); 399 r100_pci_gart_fini(rdev);
400 radeon_agp_fini(rdev); 400 radeon_agp_fini(rdev);
401 radeon_irq_kms_fini(rdev);
402 rdev->accel_working = false; 401 rdev->accel_working = false;
403 } 402 }
404 return 0; 403 return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 9a189072f2b9..ddf5731eba0d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
294 if (r) { 294 if (r) {
295 /* Somethings want wront with the accel init stop accel */ 295 /* Somethings want wront with the accel init stop accel */
296 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 296 dev_err(rdev->dev, "Disabling GPU acceleration\n");
297 rv515_suspend(rdev);
298 r100_cp_fini(rdev); 297 r100_cp_fini(rdev);
299 r100_wb_fini(rdev); 298 r100_wb_fini(rdev);
300 r100_ib_fini(rdev); 299 r100_ib_fini(rdev);
300 radeon_irq_kms_fini(rdev);
301 rv370_pcie_gart_fini(rdev); 301 rv370_pcie_gart_fini(rdev);
302 radeon_agp_fini(rdev); 302 radeon_agp_fini(rdev);
303 radeon_irq_kms_fini(rdev);
304 rdev->accel_working = false; 303 rdev->accel_working = false;
305 } 304 }
306 return 0; 305 return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 1b6d0001b20e..a1198d99cdf9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
1654 rdev->cp.align_mask = 16 - 1; 1654 rdev->cp.align_mask = 16 - 1;
1655} 1655}
1656 1656
1657void r600_cp_fini(struct radeon_device *rdev)
1658{
1659 r600_cp_stop(rdev);
1660 radeon_ring_fini(rdev);
1661}
1662
1657 1663
1658/* 1664/*
1659 * GPU scratch registers helpers function. 1665 * GPU scratch registers helpers function.
@@ -1861,6 +1867,12 @@ int r600_startup(struct radeon_device *rdev)
1861 return r; 1867 return r;
1862 } 1868 }
1863 r600_gpu_init(rdev); 1869 r600_gpu_init(rdev);
1870 r = r600_blit_init(rdev);
1871 if (r) {
1872 r600_blit_fini(rdev);
1873 rdev->asic->copy = NULL;
1874 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1875 }
1864 /* pin copy shader into vram */ 1876 /* pin copy shader into vram */
1865 if (rdev->r600_blit.shader_obj) { 1877 if (rdev->r600_blit.shader_obj) {
1866 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1878 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
@@ -2045,19 +2057,15 @@ int r600_init(struct radeon_device *rdev)
2045 r = r600_pcie_gart_init(rdev); 2057 r = r600_pcie_gart_init(rdev);
2046 if (r) 2058 if (r)
2047 return r; 2059 return r;
2048 r = r600_blit_init(rdev);
2049 if (r) {
2050 r600_blit_fini(rdev);
2051 rdev->asic->copy = NULL;
2052 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2053 }
2054 2060
2055 rdev->accel_working = true; 2061 rdev->accel_working = true;
2056 r = r600_startup(rdev); 2062 r = r600_startup(rdev);
2057 if (r) { 2063 if (r) {
2058 r600_suspend(rdev); 2064 dev_err(rdev->dev, "disabling GPU acceleration\n");
2065 r600_cp_fini(rdev);
2059 r600_wb_fini(rdev); 2066 r600_wb_fini(rdev);
2060 radeon_ring_fini(rdev); 2067 r600_irq_fini(rdev);
2068 radeon_irq_kms_fini(rdev);
2061 r600_pcie_gart_fini(rdev); 2069 r600_pcie_gart_fini(rdev);
2062 rdev->accel_working = false; 2070 rdev->accel_working = false;
2063 } 2071 }
@@ -2083,20 +2091,17 @@ int r600_init(struct radeon_device *rdev)
2083 2091
2084void r600_fini(struct radeon_device *rdev) 2092void r600_fini(struct radeon_device *rdev)
2085{ 2093{
2086 /* Suspend operations */
2087 r600_suspend(rdev);
2088
2089 r600_audio_fini(rdev); 2094 r600_audio_fini(rdev);
2090 r600_blit_fini(rdev); 2095 r600_blit_fini(rdev);
2096 r600_cp_fini(rdev);
2097 r600_wb_fini(rdev);
2091 r600_irq_fini(rdev); 2098 r600_irq_fini(rdev);
2092 radeon_irq_kms_fini(rdev); 2099 radeon_irq_kms_fini(rdev);
2093 radeon_ring_fini(rdev);
2094 r600_wb_fini(rdev);
2095 r600_pcie_gart_fini(rdev); 2100 r600_pcie_gart_fini(rdev);
2101 radeon_agp_fini(rdev);
2096 radeon_gem_fini(rdev); 2102 radeon_gem_fini(rdev);
2097 radeon_fence_driver_fini(rdev); 2103 radeon_fence_driver_fini(rdev);
2098 radeon_clocks_fini(rdev); 2104 radeon_clocks_fini(rdev);
2099 radeon_agp_fini(rdev);
2100 radeon_bo_fini(rdev); 2105 radeon_bo_fini(rdev);
2101 radeon_atombios_fini(rdev); 2106 radeon_atombios_fini(rdev);
2102 kfree(rdev->bios); 2107 kfree(rdev->bios);
@@ -2900,3 +2905,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
2900 return 0; 2905 return 0;
2901#endif 2906#endif
2902} 2907}
2908
2909/**
2910 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
2911 * rdev: radeon device structure
2912 * bo: buffer object struct which userspace is waiting for idle
2913 *
2914 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
2915 * through ring buffer, this leads to corruption in rendering, see
2916 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
2917 * directly perform HDP flush by writing register through MMIO.
2918 */
2919void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
2920{
2921 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2922}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 99e2c3891a7d..b1c1d3433454 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return rdev->family >= CHIP_R600 38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2d5f2bfa7201..f57480ba1355 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -661,6 +661,13 @@ struct radeon_asic {
661 void (*hpd_fini)(struct radeon_device *rdev); 661 void (*hpd_fini)(struct radeon_device *rdev);
662 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 662 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
663 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 663 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
664 /* ioctl hw specific callback. Some hw might want to perform special
665 * operation on specific ioctl. For instance on wait idle some hw
666 * might want to perform and HDP flush through MMIO as it seems that
667 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
668 * through ring.
669 */
670 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
664}; 671};
665 672
666/* 673/*
@@ -1143,6 +1150,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
1143extern void r600_cp_stop(struct radeon_device *rdev); 1150extern void r600_cp_stop(struct radeon_device *rdev);
1144extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); 1151extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
1145extern int r600_cp_resume(struct radeon_device *rdev); 1152extern int r600_cp_resume(struct radeon_device *rdev);
1153extern void r600_cp_fini(struct radeon_device *rdev);
1146extern int r600_count_pipe_bits(uint32_t val); 1154extern int r600_count_pipe_bits(uint32_t val);
1147extern int r600_gart_clear_page(struct radeon_device *rdev, int i); 1155extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
1148extern int r600_mc_wait_for_idle(struct radeon_device *rdev); 1156extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2fbd2e4e9df..05ee1aeac3fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = {
117 .hpd_fini = &r100_hpd_fini, 117 .hpd_fini = &r100_hpd_fini,
118 .hpd_sense = &r100_hpd_sense, 118 .hpd_sense = &r100_hpd_sense,
119 .hpd_set_polarity = &r100_hpd_set_polarity, 119 .hpd_set_polarity = &r100_hpd_set_polarity,
120 .ioctl_wait_idle = NULL,
120}; 121};
121 122
122 123
@@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = {
176 .hpd_fini = &r100_hpd_fini, 177 .hpd_fini = &r100_hpd_fini,
177 .hpd_sense = &r100_hpd_sense, 178 .hpd_sense = &r100_hpd_sense,
178 .hpd_set_polarity = &r100_hpd_set_polarity, 179 .hpd_set_polarity = &r100_hpd_set_polarity,
180 .ioctl_wait_idle = NULL,
179}; 181};
180 182
181/* 183/*
@@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = {
219 .hpd_fini = &r100_hpd_fini, 221 .hpd_fini = &r100_hpd_fini,
220 .hpd_sense = &r100_hpd_sense, 222 .hpd_sense = &r100_hpd_sense,
221 .hpd_set_polarity = &r100_hpd_set_polarity, 223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
222}; 225};
223 226
224 227
@@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = {
267 .hpd_fini = &r100_hpd_fini, 270 .hpd_fini = &r100_hpd_fini,
268 .hpd_sense = &r100_hpd_sense, 271 .hpd_sense = &r100_hpd_sense,
269 .hpd_set_polarity = &r100_hpd_set_polarity, 272 .hpd_set_polarity = &r100_hpd_set_polarity,
273 .ioctl_wait_idle = NULL,
270}; 274};
271 275
272 276
@@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = {
323 .hpd_fini = &rs600_hpd_fini, 327 .hpd_fini = &rs600_hpd_fini,
324 .hpd_sense = &rs600_hpd_sense, 328 .hpd_sense = &rs600_hpd_sense,
325 .hpd_set_polarity = &rs600_hpd_set_polarity, 329 .hpd_set_polarity = &rs600_hpd_set_polarity,
330 .ioctl_wait_idle = NULL,
326}; 331};
327 332
328 333
@@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = {
370 .hpd_fini = &rs600_hpd_fini, 375 .hpd_fini = &rs600_hpd_fini,
371 .hpd_sense = &rs600_hpd_sense, 376 .hpd_sense = &rs600_hpd_sense,
372 .hpd_set_polarity = &rs600_hpd_set_polarity, 377 .hpd_set_polarity = &rs600_hpd_set_polarity,
378 .ioctl_wait_idle = NULL,
373}; 379};
374 380
375 381
@@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = {
421 .hpd_fini = &rs600_hpd_fini, 427 .hpd_fini = &rs600_hpd_fini,
422 .hpd_sense = &rs600_hpd_sense, 428 .hpd_sense = &rs600_hpd_sense,
423 .hpd_set_polarity = &rs600_hpd_set_polarity, 429 .hpd_set_polarity = &rs600_hpd_set_polarity,
430 .ioctl_wait_idle = NULL,
424}; 431};
425 432
426 433
@@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = {
463 .hpd_fini = &rs600_hpd_fini, 470 .hpd_fini = &rs600_hpd_fini,
464 .hpd_sense = &rs600_hpd_sense, 471 .hpd_sense = &rs600_hpd_sense,
465 .hpd_set_polarity = &rs600_hpd_set_polarity, 472 .hpd_set_polarity = &rs600_hpd_set_polarity,
473 .ioctl_wait_idle = NULL,
466}; 474};
467 475
468/* 476/*
@@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
504bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 512bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
505void r600_hpd_set_polarity(struct radeon_device *rdev, 513void r600_hpd_set_polarity(struct radeon_device *rdev,
506 enum radeon_hpd_id hpd); 514 enum radeon_hpd_id hpd);
515extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
507 516
508static struct radeon_asic r600_asic = { 517static struct radeon_asic r600_asic = {
509 .init = &r600_init, 518 .init = &r600_init,
@@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = {
538 .hpd_fini = &r600_hpd_fini, 547 .hpd_fini = &r600_hpd_fini,
539 .hpd_sense = &r600_hpd_sense, 548 .hpd_sense = &r600_hpd_sense,
540 .hpd_set_polarity = &r600_hpd_set_polarity, 549 .hpd_set_polarity = &r600_hpd_set_polarity,
550 .ioctl_wait_idle = r600_ioctl_wait_idle,
541}; 551};
542 552
543/* 553/*
@@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = {
582 .hpd_fini = &r600_hpd_fini, 592 .hpd_fini = &r600_hpd_fini,
583 .hpd_sense = &r600_hpd_sense, 593 .hpd_sense = &r600_hpd_sense,
584 .hpd_set_polarity = &r600_hpd_set_polarity, 594 .hpd_set_polarity = &r600_hpd_set_polarity,
595 .ioctl_wait_idle = r600_ioctl_wait_idle,
585}; 596};
586 597
587#endif 598#endif
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 579c8920e081..e7b19440102e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
971 lvds->native_mode.vdisplay); 971 lvds->native_mode.vdisplay);
972 972
973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c); 973 lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
974 if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0) 974 lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
975 lvds->panel_vcc_delay = 2000;
976 975
977 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24); 976 lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
978 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf; 977 lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 55266416fa47..2d8e5a70f284 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1343,7 +1343,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1343 radeon_connector->dac_load_detect = false; 1343 radeon_connector->dac_load_detect = false;
1344 drm_connector_attach_property(&radeon_connector->base, 1344 drm_connector_attach_property(&radeon_connector->base,
1345 rdev->mode_info.load_detect_property, 1345 rdev->mode_info.load_detect_property,
1346 1); 1346 radeon_connector->dac_load_detect);
1347 drm_connector_attach_property(&radeon_connector->base, 1347 drm_connector_attach_property(&radeon_connector->base,
1348 rdev->mode_info.tv_std_property, 1348 rdev->mode_info.tv_std_property,
1349 radeon_combios_get_tv_info(rdev)); 1349 radeon_combios_get_tv_info(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0e1325e18534..db8e9a355a01 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
308 } 308 }
309 robj = gobj->driver_private; 309 robj = gobj->driver_private;
310 r = radeon_bo_wait(robj, NULL, false); 310 r = radeon_bo_wait(robj, NULL, false);
311 /* callback hw specific functions if any */
312 if (robj->rdev->asic->ioctl_wait_idle)
313 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
311 mutex_lock(&dev->struct_mutex); 314 mutex_lock(&dev->struct_mutex);
312 drm_gem_object_unreference(gobj); 315 drm_gem_object_unreference(gobj);
313 mutex_unlock(&dev->struct_mutex); 316 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9f5418983e2a..287fcebfb4e6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
223 return 0; 223 return 0;
224} 224}
225 225
226int rs400_mc_wait_for_idle(struct radeon_device *rdev)
227{
228 unsigned i;
229 uint32_t tmp;
230
231 for (i = 0; i < rdev->usec_timeout; i++) {
232 /* read MC_STATUS */
233 tmp = RREG32(0x0150);
234 if (tmp & (1 << 2)) {
235 return 0;
236 }
237 DRM_UDELAY(1);
238 }
239 return -1;
240}
241
226void rs400_gpu_init(struct radeon_device *rdev) 242void rs400_gpu_init(struct radeon_device *rdev)
227{ 243{
228 /* FIXME: HDP same place on rs400 ? */ 244 /* FIXME: HDP same place on rs400 ? */
229 r100_hdp_reset(rdev); 245 r100_hdp_reset(rdev);
230 /* FIXME: is this correct ? */ 246 /* FIXME: is this correct ? */
231 r420_pipes_init(rdev); 247 r420_pipes_init(rdev);
232 if (r300_mc_wait_for_idle(rdev)) { 248 if (rs400_mc_wait_for_idle(rdev)) {
233 printk(KERN_WARNING "Failed to wait MC idle while " 249 printk(KERN_WARNING "rs400: Failed to wait MC idle while "
234 "programming pipes. Bad things might happen.\n"); 250 "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
235 } 251 }
236} 252}
237 253
@@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
370 r100_mc_stop(rdev, &save); 386 r100_mc_stop(rdev, &save);
371 387
372 /* Wait for mc idle */ 388 /* Wait for mc idle */
373 if (r300_mc_wait_for_idle(rdev)) 389 if (rs400_mc_wait_for_idle(rdev))
374 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 390 dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
375 WREG32(R_000148_MC_FB_LOCATION, 391 WREG32(R_000148_MC_FB_LOCATION,
376 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | 392 S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
377 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); 393 S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
448 464
449void rs400_fini(struct radeon_device *rdev) 465void rs400_fini(struct radeon_device *rdev)
450{ 466{
451 rs400_suspend(rdev);
452 r100_cp_fini(rdev); 467 r100_cp_fini(rdev);
453 r100_wb_fini(rdev); 468 r100_wb_fini(rdev);
454 r100_ib_fini(rdev); 469 r100_ib_fini(rdev);
@@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
527 if (r) { 542 if (r) {
528 /* Somethings want wront with the accel init stop accel */ 543 /* Somethings want wront with the accel init stop accel */
529 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 544 dev_err(rdev->dev, "Disabling GPU acceleration\n");
530 rs400_suspend(rdev);
531 r100_cp_fini(rdev); 545 r100_cp_fini(rdev);
532 r100_wb_fini(rdev); 546 r100_wb_fini(rdev);
533 r100_ib_fini(rdev); 547 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d5255751e7b3..c3818562a13e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
610 610
611void rs600_fini(struct radeon_device *rdev) 611void rs600_fini(struct radeon_device *rdev)
612{ 612{
613 rs600_suspend(rdev);
614 r100_cp_fini(rdev); 613 r100_cp_fini(rdev);
615 r100_wb_fini(rdev); 614 r100_wb_fini(rdev);
616 r100_ib_fini(rdev); 615 r100_ib_fini(rdev);
@@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
689 if (r) { 688 if (r) {
690 /* Somethings want wront with the accel init stop accel */ 689 /* Somethings want wront with the accel init stop accel */
691 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 690 dev_err(rdev->dev, "Disabling GPU acceleration\n");
692 rs600_suspend(rdev);
693 r100_cp_fini(rdev); 691 r100_cp_fini(rdev);
694 r100_wb_fini(rdev); 692 r100_wb_fini(rdev);
695 r100_ib_fini(rdev); 693 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index cd31da913771..06e2771aee5a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
676 676
677void rs690_fini(struct radeon_device *rdev) 677void rs690_fini(struct radeon_device *rdev)
678{ 678{
679 rs690_suspend(rdev);
680 r100_cp_fini(rdev); 679 r100_cp_fini(rdev);
681 r100_wb_fini(rdev); 680 r100_wb_fini(rdev);
682 r100_ib_fini(rdev); 681 r100_ib_fini(rdev);
@@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
756 if (r) { 755 if (r) {
757 /* Somethings want wront with the accel init stop accel */ 756 /* Somethings want wront with the accel init stop accel */
758 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 757 dev_err(rdev->dev, "Disabling GPU acceleration\n");
759 rs690_suspend(rdev);
760 r100_cp_fini(rdev); 758 r100_cp_fini(rdev);
761 r100_wb_fini(rdev); 759 r100_wb_fini(rdev);
762 r100_ib_fini(rdev); 760 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 62756717b044..0e1e6b8632b8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
537 537
538void rv515_fini(struct radeon_device *rdev) 538void rv515_fini(struct radeon_device *rdev)
539{ 539{
540 rv515_suspend(rdev);
541 r100_cp_fini(rdev); 540 r100_cp_fini(rdev);
542 r100_wb_fini(rdev); 541 r100_wb_fini(rdev);
543 r100_ib_fini(rdev); 542 r100_ib_fini(rdev);
@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
615 if (r) { 614 if (r) {
616 /* Somethings want wront with the accel init stop accel */ 615 /* Somethings want wront with the accel init stop accel */
617 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 616 dev_err(rdev->dev, "Disabling GPU acceleration\n");
618 rv515_suspend(rdev);
619 r100_cp_fini(rdev); 617 r100_cp_fini(rdev);
620 r100_wb_fini(rdev); 618 r100_wb_fini(rdev);
621 r100_ib_fini(rdev); 619 r100_ib_fini(rdev);
620 radeon_irq_kms_fini(rdev);
622 rv370_pcie_gart_fini(rdev); 621 rv370_pcie_gart_fini(rdev);
623 radeon_agp_fini(rdev); 622 radeon_agp_fini(rdev);
624 radeon_irq_kms_fini(rdev);
625 rdev->accel_working = false; 623 rdev->accel_working = false;
626 } 624 }
627 return 0; 625 return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index afd9e8213c29..5943d561fd1e 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -887,6 +887,12 @@ static int rv770_startup(struct radeon_device *rdev)
887 return r; 887 return r;
888 } 888 }
889 rv770_gpu_init(rdev); 889 rv770_gpu_init(rdev);
890 r = r600_blit_init(rdev);
891 if (r) {
892 r600_blit_fini(rdev);
893 rdev->asic->copy = NULL;
894 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
895 }
890 /* pin copy shader into vram */ 896 /* pin copy shader into vram */
891 if (rdev->r600_blit.shader_obj) { 897 if (rdev->r600_blit.shader_obj) {
892 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 898 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
@@ -1055,19 +1061,15 @@ int rv770_init(struct radeon_device *rdev)
1055 r = r600_pcie_gart_init(rdev); 1061 r = r600_pcie_gart_init(rdev);
1056 if (r) 1062 if (r)
1057 return r; 1063 return r;
1058 r = r600_blit_init(rdev);
1059 if (r) {
1060 r600_blit_fini(rdev);
1061 rdev->asic->copy = NULL;
1062 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1063 }
1064 1064
1065 rdev->accel_working = true; 1065 rdev->accel_working = true;
1066 r = rv770_startup(rdev); 1066 r = rv770_startup(rdev);
1067 if (r) { 1067 if (r) {
1068 rv770_suspend(rdev); 1068 dev_err(rdev->dev, "disabling GPU acceleration\n");
1069 r600_cp_fini(rdev);
1069 r600_wb_fini(rdev); 1070 r600_wb_fini(rdev);
1070 radeon_ring_fini(rdev); 1071 r600_irq_fini(rdev);
1072 radeon_irq_kms_fini(rdev);
1071 rv770_pcie_gart_fini(rdev); 1073 rv770_pcie_gart_fini(rdev);
1072 rdev->accel_working = false; 1074 rdev->accel_working = false;
1073 } 1075 }
@@ -1089,13 +1091,11 @@ int rv770_init(struct radeon_device *rdev)
1089 1091
1090void rv770_fini(struct radeon_device *rdev) 1092void rv770_fini(struct radeon_device *rdev)
1091{ 1093{
1092 rv770_suspend(rdev);
1093
1094 r600_blit_fini(rdev); 1094 r600_blit_fini(rdev);
1095 r600_cp_fini(rdev);
1096 r600_wb_fini(rdev);
1095 r600_irq_fini(rdev); 1097 r600_irq_fini(rdev);
1096 radeon_irq_kms_fini(rdev); 1098 radeon_irq_kms_fini(rdev);
1097 radeon_ring_fini(rdev);
1098 r600_wb_fini(rdev);
1099 rv770_pcie_gart_fini(rdev); 1099 rv770_pcie_gart_fini(rdev);
1100 radeon_gem_fini(rdev); 1100 radeon_gem_fini(rdev);
1101 radeon_fence_driver_fini(rdev); 1101 radeon_fence_driver_fini(rdev);
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index a31e77c776ae..b8156b4893bb 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
179 * 179 *
180 * Some, but not all, of these voltages have low/high limits. 180 * Some, but not all, of these voltages have low/high limits.
181 */ 181 */
182#define ADT7462_VOLT_COUNT 12 182#define ADT7462_VOLT_COUNT 13
183 183
184#define ADT7462_VENDOR 0x41 184#define ADT7462_VENDOR 0x41
185#define ADT7462_DEVICE 0x62 185#define ADT7462_DEVICE 0x62
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index cadcbd90ff3b..72ff2c4e757d 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev)
851static int __init lm78_isa_found(unsigned short address) 851static int __init lm78_isa_found(unsigned short address)
852{ 852{
853 int val, save, found = 0; 853 int val, save, found = 0;
854 854 int port;
855 /* We have to request the region in two parts because some 855
856 boards declare base+4 to base+7 as a PNP device */ 856 /* Some boards declare base+0 to base+7 as a PNP device, some base+4
857 if (!request_region(address, 4, "lm78")) { 857 * to base+7 and some base+5 to base+6. So we better request each port
858 pr_debug("lm78: Failed to request low part of region\n"); 858 * individually for the probing phase. */
859 return 0; 859 for (port = address; port < address + LM78_EXTENT; port++) {
860 } 860 if (!request_region(port, 1, "lm78")) {
861 if (!request_region(address + 4, 4, "lm78")) { 861 pr_debug("lm78: Failed to request port 0x%x\n", port);
862 pr_debug("lm78: Failed to request high part of region\n"); 862 goto release;
863 release_region(address, 4); 863 }
864 return 0;
865 } 864 }
866 865
867#define REALLY_SLOW_IO 866#define REALLY_SLOW_IO
@@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address)
925 val & 0x80 ? "LM79" : "LM78", (int)address); 924 val & 0x80 ? "LM79" : "LM78", (int)address);
926 925
927 release: 926 release:
928 release_region(address + 4, 4); 927 for (port--; port >= address; port--)
929 release_region(address, 4); 928 release_region(port, 1);
930 return found; 929 return found;
931} 930}
932 931
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 05f9225b6f94..32d4adee73db 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1793,17 +1793,17 @@ static int __init
1793w83781d_isa_found(unsigned short address) 1793w83781d_isa_found(unsigned short address)
1794{ 1794{
1795 int val, save, found = 0; 1795 int val, save, found = 0;
1796 1796 int port;
1797 /* We have to request the region in two parts because some 1797
1798 boards declare base+4 to base+7 as a PNP device */ 1798 /* Some boards declare base+0 to base+7 as a PNP device, some base+4
1799 if (!request_region(address, 4, "w83781d")) { 1799 * to base+7 and some base+5 to base+6. So we better request each port
1800 pr_debug("w83781d: Failed to request low part of region\n"); 1800 * individually for the probing phase. */
1801 return 0; 1801 for (port = address; port < address + W83781D_EXTENT; port++) {
1802 } 1802 if (!request_region(port, 1, "w83781d")) {
1803 if (!request_region(address + 4, 4, "w83781d")) { 1803 pr_debug("w83781d: Failed to request port 0x%x\n",
1804 pr_debug("w83781d: Failed to request high part of region\n"); 1804 port);
1805 release_region(address, 4); 1805 goto release;
1806 return 0; 1806 }
1807 } 1807 }
1808 1808
1809#define REALLY_SLOW_IO 1809#define REALLY_SLOW_IO
@@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address)
1877 val == 0x30 ? "W83782D" : "W83781D", (int)address); 1877 val == 0x30 ? "W83782D" : "W83781D", (int)address);
1878 1878
1879 release: 1879 release:
1880 release_region(address + 4, 4); 1880 for (port--; port >= address; port--)
1881 release_region(address, 4); 1881 release_region(port, 1);
1882 return found; 1882 return found;
1883} 1883}
1884 1884
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dd3dfe42d5a9..a20a71e5efd3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws)
4075{ 4075{
4076 mddev_t *mddev = container_of(ws, mddev_t, del_work); 4076 mddev_t *mddev = container_of(ws, mddev_t, del_work);
4077 4077
4078 if (mddev->private == &md_redundancy_group) { 4078 if (mddev->private) {
4079 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 4079 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
4080 if (mddev->private != (void*)1)
4081 sysfs_remove_group(&mddev->kobj, mddev->private);
4080 if (mddev->sysfs_action) 4082 if (mddev->sysfs_action)
4081 sysfs_put(mddev->sysfs_action); 4083 sysfs_put(mddev->sysfs_action);
4082 mddev->sysfs_action = NULL; 4084 mddev->sysfs_action = NULL;
@@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev)
4287 sysfs_notify_dirent(rdev->sysfs_state); 4289 sysfs_notify_dirent(rdev->sysfs_state);
4288 } 4290 }
4289 4291
4290 md_probe(mddev->unit, NULL, NULL);
4291 disk = mddev->gendisk; 4292 disk = mddev->gendisk;
4292 if (!disk)
4293 return -ENOMEM;
4294 4293
4295 spin_lock(&pers_lock); 4294 spin_lock(&pers_lock);
4296 pers = find_pers(mddev->level, mddev->clevel); 4295 pers = find_pers(mddev->level, mddev->clevel);
@@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4530 mddev->queue->unplug_fn = NULL; 4529 mddev->queue->unplug_fn = NULL;
4531 mddev->queue->backing_dev_info.congested_fn = NULL; 4530 mddev->queue->backing_dev_info.congested_fn = NULL;
4532 module_put(mddev->pers->owner); 4531 module_put(mddev->pers->owner);
4533 if (mddev->pers->sync_request) 4532 if (mddev->pers->sync_request && mddev->private == NULL)
4534 mddev->private = &md_redundancy_group; 4533 mddev->private = (void*)1;
4535 mddev->pers = NULL; 4534 mddev->pers = NULL;
4536 /* tell userspace to handle 'inactive' */ 4535 /* tell userspace to handle 'inactive' */
4537 sysfs_notify_dirent(mddev->sysfs_state); 4536 sysfs_notify_dirent(mddev->sysfs_state);
@@ -4578,9 +4577,6 @@ out:
4578 } 4577 }
4579 mddev->bitmap_info.offset = 0; 4578 mddev->bitmap_info.offset = 0;
4580 4579
4581 /* make sure all md_delayed_delete calls have finished */
4582 flush_scheduled_work();
4583
4584 export_array(mddev); 4580 export_array(mddev);
4585 4581
4586 mddev->array_sectors = 0; 4582 mddev->array_sectors = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e84204eb12df..ceb24afdc147 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev)
5136 mddev->thread = NULL; 5136 mddev->thread = NULL;
5137 mddev->queue->backing_dev_info.congested_fn = NULL; 5137 mddev->queue->backing_dev_info.congested_fn = NULL;
5138 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 5138 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5139 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
5140 free_conf(conf); 5139 free_conf(conf);
5141 mddev->private = NULL; 5140 mddev->private = &raid5_attrs_group;
5142 return 0; 5141 return 0;
5143} 5142}
5144 5143
@@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev)
5464 !test_bit(Faulty, &rdev->flags)) { 5463 !test_bit(Faulty, &rdev->flags)) {
5465 if (raid5_add_disk(mddev, rdev) == 0) { 5464 if (raid5_add_disk(mddev, rdev) == 0) {
5466 char nm[20]; 5465 char nm[20];
5467 if (rdev->raid_disk >= conf->previous_raid_disks) 5466 if (rdev->raid_disk >= conf->previous_raid_disks) {
5468 set_bit(In_sync, &rdev->flags); 5467 set_bit(In_sync, &rdev->flags);
5469 else 5468 added_devices++;
5469 } else
5470 rdev->recovery_offset = 0; 5470 rdev->recovery_offset = 0;
5471 added_devices++;
5472 sprintf(nm, "rd%d", rdev->raid_disk); 5471 sprintf(nm, "rd%d", rdev->raid_disk);
5473 if (sysfs_create_link(&mddev->kobj, 5472 if (sysfs_create_link(&mddev->kobj,
5474 &rdev->kobj, nm)) 5473 &rdev->kobj, nm))
@@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev)
5480 break; 5479 break;
5481 } 5480 }
5482 5481
5482 /* When a reshape changes the number of devices, ->degraded
5483 * is measured against the large of the pre and post number of
5484 * devices.*/
5483 if (mddev->delta_disks > 0) { 5485 if (mddev->delta_disks > 0) {
5484 spin_lock_irqsave(&conf->device_lock, flags); 5486 spin_lock_irqsave(&conf->device_lock, flags);
5485 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) 5487 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
5486 - added_devices; 5488 - added_devices;
5487 spin_unlock_irqrestore(&conf->device_lock, flags); 5489 spin_unlock_irqrestore(&conf->device_lock, flags);
5488 } 5490 }
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index c37790ad92d0..9ddc57909d49 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
761 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); 761 dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
762 dmxdevfilter->type = DMXDEV_TYPE_NONE; 762 dmxdevfilter->type = DMXDEV_TYPE_NONE;
763 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); 763 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
764 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
765 init_timer(&dmxdevfilter->timer); 764 init_timer(&dmxdevfilter->timer);
766 765
767 dvbdev->users++; 766 dvbdev->users++;
@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
887 dmxdevfilter->type = DMXDEV_TYPE_PES; 886 dmxdevfilter->type = DMXDEV_TYPE_PES;
888 memcpy(&dmxdevfilter->params, params, 887 memcpy(&dmxdevfilter->params, params,
889 sizeof(struct dmx_pes_filter_params)); 888 sizeof(struct dmx_pes_filter_params));
889 INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
890 890
891 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); 891 dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
892 892
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index b78cfb7d1897..67f189b7aa1f 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
426 }; 426 };
427 }; 427 };
428 428
429 if (dvb_demux_tscheck) { 429 if (demux->cnt_storage) {
430 if (!demux->cnt_storage)
431 demux->cnt_storage = vmalloc(MAX_PID + 1);
432
433 if (!demux->cnt_storage) {
434 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
435 dvb_demux_tscheck = 0;
436 goto no_dvb_demux_tscheck;
437 }
438
439 /* check pkt counter */ 430 /* check pkt counter */
440 if (pid < MAX_PID) { 431 if (pid < MAX_PID) {
441 if (buf[1] & 0x80) 432 if (buf[1] & 0x80)
@@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
454 }; 445 };
455 /* end check */ 446 /* end check */
456 }; 447 };
457no_dvb_demux_tscheck:
458 448
459 list_for_each_entry(feed, &demux->feed_list, list_head) { 449 list_for_each_entry(feed, &demux->feed_list, list_head) {
460 if ((feed->pid != pid) && (feed->pid != 0x2000)) 450 if ((feed->pid != pid) && (feed->pid != 0x2000))
@@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1246 dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed)); 1236 dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
1247 if (!dvbdemux->feed) { 1237 if (!dvbdemux->feed) {
1248 vfree(dvbdemux->filter); 1238 vfree(dvbdemux->filter);
1239 dvbdemux->filter = NULL;
1249 return -ENOMEM; 1240 return -ENOMEM;
1250 } 1241 }
1251 for (i = 0; i < dvbdemux->filternum; i++) { 1242 for (i = 0; i < dvbdemux->filternum; i++) {
@@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
1257 dvbdemux->feed[i].index = i; 1248 dvbdemux->feed[i].index = i;
1258 } 1249 }
1259 1250
1251 if (dvb_demux_tscheck) {
1252 dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
1253
1254 if (!dvbdemux->cnt_storage)
1255 printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
1256 }
1257
1260 INIT_LIST_HEAD(&dvbdemux->frontend_list); 1258 INIT_LIST_HEAD(&dvbdemux->frontend_list);
1261 1259
1262 for (i = 0; i < DMX_TS_PES_OTHER; i++) { 1260 for (i = 0; i < DMX_TS_PES_OTHER; i++) {
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 62d9c9cc5671..1dd4403247ca 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev)
921 size = (res->end - res->start) + 1; 921 size = (res->end - res->start) + 1;
922 922
923 ax->mem2 = request_mem_region(res->start, size, pdev->name); 923 ax->mem2 = request_mem_region(res->start, size, pdev->name);
924 if (ax->mem == NULL) { 924 if (ax->mem2 == NULL) {
925 dev_err(&pdev->dev, "cannot reserve registers\n"); 925 dev_err(&pdev->dev, "cannot reserve registers\n");
926 ret = -ENXIO; 926 ret = -ENXIO;
927 goto exit_mem1; 927 goto exit_mem1;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd14727e4b..318a018ca7c5 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2079 struct sge_fl *fl, int len, int complete) 2079 struct sge_fl *fl, int len, int complete)
2080{ 2080{
2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 2081 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2082 struct port_info *pi = netdev_priv(qs->netdev);
2082 struct sk_buff *skb = NULL; 2083 struct sk_buff *skb = NULL;
2083 struct cpl_rx_pkt *cpl; 2084 struct cpl_rx_pkt *cpl;
2084 struct skb_frag_struct *rx_frag; 2085 struct skb_frag_struct *rx_frag;
@@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2116 2117
2117 if (!nr_frags) { 2118 if (!nr_frags) {
2118 offset = 2 + sizeof(struct cpl_rx_pkt); 2119 offset = 2 + sizeof(struct cpl_rx_pkt);
2119 qs->lro_va = sd->pg_chunk.va + 2; 2120 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2120 }
2121 len -= offset;
2122 2121
2123 prefetch(qs->lro_va); 2122 if ((pi->rx_offload & T3_RX_CSUM) &&
2123 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2124 skb->ip_summed = CHECKSUM_UNNECESSARY;
2125 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2126 } else
2127 skb->ip_summed = CHECKSUM_NONE;
2128 } else
2129 cpl = qs->lro_va;
2130
2131 len -= offset;
2124 2132
2125 rx_frag += nr_frags; 2133 rx_frag += nr_frags;
2126 rx_frag->page = sd->pg_chunk.page; 2134 rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2136 return; 2144 return;
2137 2145
2138 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]); 2146 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2139 skb->ip_summed = CHECKSUM_UNNECESSARY;
2140 cpl = qs->lro_va;
2141 2147
2142 if (unlikely(cpl->vlan_valid)) { 2148 if (unlikely(cpl->vlan_valid)) {
2143 struct net_device *dev = qs->netdev;
2144 struct port_info *pi = netdev_priv(dev);
2145 struct vlan_group *grp = pi->vlan_grp; 2149 struct vlan_group *grp = pi->vlan_grp;
2146 2150
2147 if (likely(grp != NULL)) { 2151 if (likely(grp != NULL)) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 997124d2992a..c881347cb26d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
422 if (tx_queue > IGB_N0_QUEUE) 422 if (tx_queue > IGB_N0_QUEUE)
423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
424 if (!adapter->msix_entries && msix_vector == 0)
425 msixbm |= E1000_EIMS_OTHER;
424 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 426 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
425 q_vector->eims_value = msixbm; 427 q_vector->eims_value = msixbm;
426 break; 428 break;
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
877{ 879{
878 struct net_device *netdev = adapter->netdev; 880 struct net_device *netdev = adapter->netdev;
879 struct pci_dev *pdev = adapter->pdev; 881 struct pci_dev *pdev = adapter->pdev;
880 struct e1000_hw *hw = &adapter->hw;
881 int err = 0; 882 int err = 0;
882 883
883 if (adapter->msix_entries) { 884 if (adapter->msix_entries) {
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
909 igb_setup_all_tx_resources(adapter); 910 igb_setup_all_tx_resources(adapter);
910 igb_setup_all_rx_resources(adapter); 911 igb_setup_all_rx_resources(adapter);
911 } else { 912 } else {
912 switch (hw->mac.type) { 913 igb_assign_vector(adapter->q_vector[0], 0);
913 case e1000_82575:
914 wr32(E1000_MSIXBM(0),
915 (E1000_EICR_RX_QUEUE0 |
916 E1000_EICR_TX_QUEUE0 |
917 E1000_EIMS_OTHER));
918 break;
919 case e1000_82580:
920 case e1000_82576:
921 wr32(E1000_IVAR0, E1000_IVAR_VALID);
922 break;
923 default:
924 break;
925 }
926 } 914 }
927 915
928 if (adapter->flags & IGB_FLAG_HAS_MSI) { 916 if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter)
1140 } 1128 }
1141 if (adapter->msix_entries) 1129 if (adapter->msix_entries)
1142 igb_configure_msix(adapter); 1130 igb_configure_msix(adapter);
1131 else
1132 igb_assign_vector(adapter->q_vector[0], 0);
1143 1133
1144 /* Clear any pending interrupts. */ 1134 /* Clear any pending interrupts. */
1145 rd32(E1000_ICR); 1135 rd32(E1000_ICR);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index b5f64ad67975..7b7c8486c0bf 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5179,7 +5179,7 @@ dma_error:
5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
5180 } 5180 }
5181 5181
5182 return count; 5182 return 0;
5183} 5183}
5184 5184
5185static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 5185static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5329 struct ixgbe_adapter *adapter = netdev_priv(dev); 5329 struct ixgbe_adapter *adapter = netdev_priv(dev);
5330 int txq = smp_processor_id(); 5330 int txq = smp_processor_id();
5331 5331
5332 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) 5332 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
5333 while (unlikely(txq >= dev->real_num_tx_queues))
5334 txq -= dev->real_num_tx_queues;
5333 return txq; 5335 return txq;
5336 }
5334 5337
5335#ifdef IXGBE_FCOE 5338#ifdef IXGBE_FCOE
5336 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && 5339 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 9f9d6081959b..24279e6e55f5 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
1941 netif_wake_queue(adapter->netdev); 1941 netif_wake_queue(adapter->netdev);
1942 1942
1943 clear_bit(__NX_RESETTING, &adapter->state); 1943 clear_bit(__NX_RESETTING, &adapter->state);
1944 1944 return;
1945 } else { 1945 } else {
1946 clear_bit(__NX_RESETTING, &adapter->state); 1946 clear_bit(__NX_RESETTING, &adapter->state);
1947 if (!netxen_nic_reset_context(adapter)) { 1947 if (!netxen_nic_reset_context(adapter)) {
@@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work)
2240 2240
2241 netxen_nic_down(adapter, netdev); 2241 netxen_nic_down(adapter, netdev);
2242 2242
2243 rtnl_lock();
2243 netxen_nic_detach(adapter); 2244 netxen_nic_detach(adapter);
2245 rtnl_unlock();
2244 2246
2245 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 2247 status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
2246 2248
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d760650c5c04..67249c3c9f50 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
1025static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot) 1025static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
1026{ 1026{
1027 struct sky2_tx_le *le = sky2->tx_le + *slot; 1027 struct sky2_tx_le *le = sky2->tx_le + *slot;
1028 struct tx_ring_info *re = sky2->tx_ring + *slot;
1029 1028
1030 *slot = RING_NEXT(*slot, sky2->tx_ring_size); 1029 *slot = RING_NEXT(*slot, sky2->tx_ring_size);
1031 re->flags = 0;
1032 re->skb = NULL;
1033 le->ctrl = 0; 1030 le->ctrl = 0;
1034 return le; 1031 return le;
1035} 1032}
@@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1622 return count; 1619 return count;
1623} 1620}
1624 1621
1625static void sky2_tx_unmap(struct pci_dev *pdev, 1622static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
1626 const struct tx_ring_info *re)
1627{ 1623{
1628 if (re->flags & TX_MAP_SINGLE) 1624 if (re->flags & TX_MAP_SINGLE)
1629 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), 1625 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
1633 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), 1629 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
1634 pci_unmap_len(re, maplen), 1630 pci_unmap_len(re, maplen),
1635 PCI_DMA_TODEVICE); 1631 PCI_DMA_TODEVICE);
1632 re->flags = 0;
1636} 1633}
1637 1634
1638/* 1635/*
@@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1839 dev->stats.tx_packets++; 1836 dev->stats.tx_packets++;
1840 dev->stats.tx_bytes += skb->len; 1837 dev->stats.tx_bytes += skb->len;
1841 1838
1839 re->skb = NULL;
1842 dev_kfree_skb_any(skb); 1840 dev_kfree_skb_any(skb);
1843 1841
1844 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size); 1842 sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c74694345b6e..d58b94030ef3 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev)
338DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); 338DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
339DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); 339DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
340 340
341/*
342 * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
343 * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
344 * BAR0 should be 8 bytes; instead, it may be set to something like 8k
345 * (which conflicts w/ BAR1's memory range).
346 */
347static void __devinit quirk_cs5536_vsa(struct pci_dev *dev)
348{
349 if (pci_resource_len(dev, 0) != 8) {
350 struct resource *res = &dev->resource[0];
351 res->end = res->start + 8 - 1;
352 dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
353 "(incorrect header); workaround applied.\n");
354 }
355}
356DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
357
341static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, 358static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
342 unsigned size, int nr, const char *name) 359 unsigned size, int nr, const char *name)
343{ 360{
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 999fe80c4051..62b654af9237 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
531 qdio_siga_sync_q(q); 531 qdio_siga_sync_q(q);
532 get_buf_state(q, q->first_to_check, &state, 0); 532 get_buf_state(q, q->first_to_check, &state, 0);
533 533
534 if (state == SLSB_P_INPUT_PRIMED) 534 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
535 /* more work coming */ 535 /* more work coming */
536 return 0; 536 return 0;
537 537
@@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
960 qdio_handle_activate_check(cdev, intparm, cstat, 960 qdio_handle_activate_check(cdev, intparm, cstat,
961 dstat); 961 dstat);
962 break; 962 break;
963 case QDIO_IRQ_STATE_STOPPED:
964 break;
963 default: 965 default:
964 WARN_ON(1); 966 WARN_ON(1);
965 } 967 }
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 0ceec123ddfd..bee558aed427 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -35,7 +35,9 @@
35#include <linux/usb.h> 35#include <linux/usb.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/mm.h>
38#include <linux/irq.h> 39#include <linux/irq.h>
40#include <asm/cacheflush.h>
39 41
40#include "../core/hcd.h" 42#include "../core/hcd.h"
41#include "r8a66597.h" 43#include "r8a66597.h"
@@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
820 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb); 822 enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
821} 823}
822 824
825static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
826 int status)
827__releases(r8a66597->lock)
828__acquires(r8a66597->lock)
829{
830 if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
831 void *ptr;
832
833 for (ptr = urb->transfer_buffer;
834 ptr < urb->transfer_buffer + urb->transfer_buffer_length;
835 ptr += PAGE_SIZE)
836 flush_dcache_page(virt_to_page(ptr));
837 }
838
839 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
840 spin_unlock(&r8a66597->lock);
841 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
842 spin_lock(&r8a66597->lock);
843}
844
823/* this function must be called with interrupt disabled */ 845/* this function must be called with interrupt disabled */
824static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address) 846static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
825{ 847{
@@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
838 list_del(&td->queue); 860 list_del(&td->queue);
839 kfree(td); 861 kfree(td);
840 862
841 if (urb) { 863 if (urb)
842 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), 864 r8a66597_urb_done(r8a66597, urb, -ENODEV);
843 urb);
844 865
845 spin_unlock(&r8a66597->lock);
846 usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
847 -ENODEV);
848 spin_lock(&r8a66597->lock);
849 }
850 break; 866 break;
851 } 867 }
852} 868}
@@ -1006,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
1006/* this function must be called with interrupt disabled */ 1022/* this function must be called with interrupt disabled */
1007static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, 1023static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1008 u16 syssts) 1024 u16 syssts)
1025__releases(r8a66597->lock)
1026__acquires(r8a66597->lock)
1009{ 1027{
1010 if (syssts == SE0) { 1028 if (syssts == SE0) {
1011 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); 1029 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
@@ -1023,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
1023 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); 1041 usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
1024 } 1042 }
1025 1043
1044 spin_unlock(&r8a66597->lock);
1026 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); 1045 usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
1046 spin_lock(&r8a66597->lock);
1027} 1047}
1028 1048
1029/* this function must be called with interrupt disabled */ 1049/* this function must be called with interrupt disabled */
@@ -1283,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
1283 if (usb_pipeisoc(urb->pipe)) 1303 if (usb_pipeisoc(urb->pipe))
1284 urb->start_frame = r8a66597_get_frame(hcd); 1304 urb->start_frame = r8a66597_get_frame(hcd);
1285 1305
1286 usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb); 1306 r8a66597_urb_done(r8a66597, urb, status);
1287 spin_unlock(&r8a66597->lock);
1288 usb_hcd_giveback_urb(hcd, urb, status);
1289 spin_lock(&r8a66597->lock);
1290 } 1307 }
1291 1308
1292 if (restart) { 1309 if (restart) {
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index cf62b05e296a..7d6c2139891d 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -84,7 +84,7 @@ static const match_table_t tokens = {
84 84
85static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) 85static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
86{ 86{
87 char *options; 87 char *options, *tmp_options;
88 substring_t args[MAX_OPT_ARGS]; 88 substring_t args[MAX_OPT_ARGS];
89 char *p; 89 char *p;
90 int option = 0; 90 int option = 0;
@@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
102 if (!opts) 102 if (!opts)
103 return 0; 103 return 0;
104 104
105 options = kstrdup(opts, GFP_KERNEL); 105 tmp_options = kstrdup(opts, GFP_KERNEL);
106 if (!options) 106 if (!tmp_options) {
107 ret = -ENOMEM;
107 goto fail_option_alloc; 108 goto fail_option_alloc;
109 }
110 options = tmp_options;
108 111
109 while ((p = strsep(&options, ",")) != NULL) { 112 while ((p = strsep(&options, ",")) != NULL) {
110 int token; 113 int token;
@@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
159 break; 162 break;
160 case Opt_cache: 163 case Opt_cache:
161 s = match_strdup(&args[0]); 164 s = match_strdup(&args[0]);
162 if (!s) 165 if (!s) {
163 goto fail_option_alloc; 166 ret = -ENOMEM;
167 P9_DPRINTK(P9_DEBUG_ERROR,
168 "problem allocating copy of cache arg\n");
169 goto free_and_return;
170 }
164 171
165 if (strcmp(s, "loose") == 0) 172 if (strcmp(s, "loose") == 0)
166 v9ses->cache = CACHE_LOOSE; 173 v9ses->cache = CACHE_LOOSE;
@@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
173 180
174 case Opt_access: 181 case Opt_access:
175 s = match_strdup(&args[0]); 182 s = match_strdup(&args[0]);
176 if (!s) 183 if (!s) {
177 goto fail_option_alloc; 184 ret = -ENOMEM;
185 P9_DPRINTK(P9_DEBUG_ERROR,
186 "problem allocating copy of access arg\n");
187 goto free_and_return;
188 }
178 189
179 v9ses->flags &= ~V9FS_ACCESS_MASK; 190 v9ses->flags &= ~V9FS_ACCESS_MASK;
180 if (strcmp(s, "user") == 0) 191 if (strcmp(s, "user") == 0)
@@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
194 continue; 205 continue;
195 } 206 }
196 } 207 }
197 kfree(options);
198 return ret;
199 208
209free_and_return:
210 kfree(tmp_options);
200fail_option_alloc: 211fail_option_alloc:
201 P9_DPRINTK(P9_DEBUG_ERROR, 212 return ret;
202 "failed to allocate copy of option argument\n");
203 return -ENOMEM;
204} 213}
205 214
206/** 215/**
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 3a7560e35865..ed835836e0dc 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *);
60int v9fs_uflags2omode(int uflags, int extended); 60int v9fs_uflags2omode(int uflags, int extended);
61 61
62ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); 62ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
63void v9fs_blank_wstat(struct p9_wstat *wstat);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 3902bf43a088..74a0461a9ac0 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data,
257 return total; 257 return total;
258} 258}
259 259
260static int v9fs_file_fsync(struct file *filp, struct dentry *dentry,
261 int datasync)
262{
263 struct p9_fid *fid;
264 struct p9_wstat wstat;
265 int retval;
266
267 P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp,
268 dentry, datasync);
269
270 fid = filp->private_data;
271 v9fs_blank_wstat(&wstat);
272
273 retval = p9_client_wstat(fid, &wstat);
274 return retval;
275}
276
260static const struct file_operations v9fs_cached_file_operations = { 277static const struct file_operations v9fs_cached_file_operations = {
261 .llseek = generic_file_llseek, 278 .llseek = generic_file_llseek,
262 .read = do_sync_read, 279 .read = do_sync_read,
@@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = {
266 .release = v9fs_dir_release, 283 .release = v9fs_dir_release,
267 .lock = v9fs_file_lock, 284 .lock = v9fs_file_lock,
268 .mmap = generic_file_readonly_mmap, 285 .mmap = generic_file_readonly_mmap,
286 .fsync = v9fs_file_fsync,
269}; 287};
270 288
271const struct file_operations v9fs_file_operations = { 289const struct file_operations v9fs_file_operations = {
@@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = {
276 .release = v9fs_dir_release, 294 .release = v9fs_dir_release,
277 .lock = v9fs_file_lock, 295 .lock = v9fs_file_lock,
278 .mmap = generic_file_readonly_mmap, 296 .mmap = generic_file_readonly_mmap,
297 .fsync = v9fs_file_fsync,
279}; 298};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 9d03d1ebca6f..a407fa3388c0 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended)
176 * 176 *
177 */ 177 */
178 178
179static void 179void
180v9fs_blank_wstat(struct p9_wstat *wstat) 180v9fs_blank_wstat(struct p9_wstat *wstat)
181{ 181{
182 wstat->type = ~0; 182 wstat->type = ~0;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 33baf27fac78..34ddda888e63 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
873 brelse(bh); 873 brelse(bh);
874 874
875 unacquire_priv_sbp: 875 unacquire_priv_sbp:
876 kfree(befs_sb->mount_opts.iocharset);
876 kfree(sb->s_fs_info); 877 kfree(sb->s_fs_info);
877 878
878 unacquire_none: 879 unacquire_none:
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 73d6a735b8f3..d11d0289f3d2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
246 if (!sb) 246 if (!sb)
247 goto out; 247 goto out;
248 if (sb->s_flags & MS_RDONLY) { 248 if (sb->s_flags & MS_RDONLY) {
249 deactivate_locked_super(sb); 249 sb->s_frozen = SB_FREEZE_TRANS;
250 up_write(&sb->s_umount);
250 mutex_unlock(&bdev->bd_fsfreeze_mutex); 251 mutex_unlock(&bdev->bd_fsfreeze_mutex);
251 return sb; 252 return sb;
252 } 253 }
@@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
307 BUG_ON(sb->s_bdev != bdev); 308 BUG_ON(sb->s_bdev != bdev);
308 down_write(&sb->s_umount); 309 down_write(&sb->s_umount);
309 if (sb->s_flags & MS_RDONLY) 310 if (sb->s_flags & MS_RDONLY)
310 goto out_deactivate; 311 goto out_unfrozen;
311 312
312 if (sb->s_op->unfreeze_fs) { 313 if (sb->s_op->unfreeze_fs) {
313 error = sb->s_op->unfreeze_fs(sb); 314 error = sb->s_op->unfreeze_fs(sb);
@@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
321 } 322 }
322 } 323 }
323 324
325out_unfrozen:
324 sb->s_frozen = SB_UNFROZEN; 326 sb->s_frozen = SB_UNFROZEN;
325 smp_wmb(); 327 smp_wmb();
326 wake_up(&sb->s_wait_unfrozen); 328 wake_up(&sb->s_wait_unfrozen);
327 329
328out_deactivate:
329 if (sb) 330 if (sb)
330 deactivate_locked_super(sb); 331 deactivate_locked_super(sb);
331out_unlock: 332out_unlock:
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 87b25543d7d1..2b59201b955c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1982 1982
1983 if (!(sb->s_flags & MS_RDONLY)) { 1983 if (!(sb->s_flags & MS_RDONLY)) {
1984 ret = btrfs_recover_relocation(tree_root); 1984 ret = btrfs_recover_relocation(tree_root);
1985 BUG_ON(ret); 1985 if (ret < 0) {
1986 printk(KERN_WARNING
1987 "btrfs: failed to recover relocation\n");
1988 err = -EINVAL;
1989 goto fail_trans_kthread;
1990 }
1986 } 1991 }
1987 1992
1988 location.objectid = BTRFS_FS_TREE_OBJECTID; 1993 location.objectid = BTRFS_FS_TREE_OBJECTID;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 432a2da4641e..559f72489b3b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5402,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5402 int ret; 5402 int ret;
5403 5403
5404 while (level >= 0) { 5404 while (level >= 0) {
5405 if (path->slots[level] >=
5406 btrfs_header_nritems(path->nodes[level]))
5407 break;
5408
5409 ret = walk_down_proc(trans, root, path, wc, lookup_info); 5405 ret = walk_down_proc(trans, root, path, wc, lookup_info);
5410 if (ret > 0) 5406 if (ret > 0)
5411 break; 5407 break;
@@ -5413,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5413 if (level == 0) 5409 if (level == 0)
5414 break; 5410 break;
5415 5411
5412 if (path->slots[level] >=
5413 btrfs_header_nritems(path->nodes[level]))
5414 break;
5415
5416 ret = do_walk_down(trans, root, path, wc, &lookup_info); 5416 ret = do_walk_down(trans, root, path, wc, &lookup_info);
5417 if (ret > 0) { 5417 if (ret > 0) {
5418 path->slots[level]++; 5418 path->slots[level]++;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 96577e8bf9fd..b177ed319612 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3165 spin_unlock(&tree->buffer_lock); 3165 spin_unlock(&tree->buffer_lock);
3166 goto free_eb; 3166 goto free_eb;
3167 } 3167 }
3168 spin_unlock(&tree->buffer_lock);
3169
3170 /* add one reference for the tree */ 3168 /* add one reference for the tree */
3171 atomic_inc(&eb->refs); 3169 atomic_inc(&eb->refs);
3170 spin_unlock(&tree->buffer_lock);
3172 return eb; 3171 return eb;
3173 3172
3174free_eb: 3173free_eb:
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c02033596f02..9d0809629967 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1133,7 +1133,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1133 } 1133 }
1134 mutex_lock(&dentry->d_inode->i_mutex); 1134 mutex_lock(&dentry->d_inode->i_mutex);
1135out: 1135out:
1136 return ret > 0 ? EIO : ret; 1136 return ret > 0 ? -EIO : ret;
1137} 1137}
1138 1138
1139static const struct vm_operations_struct btrfs_file_vm_ops = { 1139static const struct vm_operations_struct btrfs_file_vm_ops = {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8cd109972fa6..4deb280f8969 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1681,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1681 * before we start the transaction. It limits the amount of btree 1681 * before we start the transaction. It limits the amount of btree
1682 * reads required while inside the transaction. 1682 * reads required while inside the transaction.
1683 */ 1683 */
1684static noinline void reada_csum(struct btrfs_root *root,
1685 struct btrfs_path *path,
1686 struct btrfs_ordered_extent *ordered_extent)
1687{
1688 struct btrfs_ordered_sum *sum;
1689 u64 bytenr;
1690
1691 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1692 list);
1693 bytenr = sum->sums[0].bytenr;
1694
1695 /*
1696 * we don't care about the results, the point of this search is
1697 * just to get the btree leaves into ram
1698 */
1699 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1700}
1701
1702/* as ordered data IO finishes, this gets called so we can finish 1684/* as ordered data IO finishes, this gets called so we can finish
1703 * an ordered extent if the range of bytes in the file it covers are 1685 * an ordered extent if the range of bytes in the file it covers are
1704 * fully written. 1686 * fully written.
@@ -1709,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1709 struct btrfs_trans_handle *trans; 1691 struct btrfs_trans_handle *trans;
1710 struct btrfs_ordered_extent *ordered_extent = NULL; 1692 struct btrfs_ordered_extent *ordered_extent = NULL;
1711 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1693 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1712 struct btrfs_path *path;
1713 int compressed = 0; 1694 int compressed = 0;
1714 int ret; 1695 int ret;
1715 1696
@@ -1717,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1717 if (!ret) 1698 if (!ret)
1718 return 0; 1699 return 0;
1719 1700
1720 /* 1701 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1721 * before we join the transaction, try to do some of our IO.
1722 * This will limit the amount of IO that we have to do with
1723 * the transaction running. We're unlikely to need to do any
1724 * IO if the file extents are new, the disk_i_size checks
1725 * covers the most common case.
1726 */
1727 if (start < BTRFS_I(inode)->disk_i_size) {
1728 path = btrfs_alloc_path();
1729 if (path) {
1730 ret = btrfs_lookup_file_extent(NULL, root, path,
1731 inode->i_ino,
1732 start, 0);
1733 ordered_extent = btrfs_lookup_ordered_extent(inode,
1734 start);
1735 if (!list_empty(&ordered_extent->list)) {
1736 btrfs_release_path(root, path);
1737 reada_csum(root, path, ordered_extent);
1738 }
1739 btrfs_free_path(path);
1740 }
1741 }
1742
1743 if (!ordered_extent)
1744 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1745 BUG_ON(!ordered_extent); 1702 BUG_ON(!ordered_extent);
1703
1746 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1704 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1747 BUG_ON(!list_empty(&ordered_extent->list)); 1705 BUG_ON(!list_empty(&ordered_extent->list));
1748 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1706 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -5841,7 +5799,9 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
5841 inode->i_ctime = CURRENT_TIME; 5799 inode->i_ctime = CURRENT_TIME;
5842 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 5800 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5843 if (!(mode & FALLOC_FL_KEEP_SIZE) && 5801 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5844 cur_offset > inode->i_size) { 5802 (actual_len > inode->i_size) &&
5803 (cur_offset > inode->i_size)) {
5804
5845 if (cur_offset > actual_len) 5805 if (cur_offset > actual_len)
5846 i_size = actual_len; 5806 i_size = actual_len;
5847 else 5807 else
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ed3e4a2ec2c8..ab7ab5318745 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3764,7 +3764,8 @@ out:
3764 BTRFS_DATA_RELOC_TREE_OBJECTID); 3764 BTRFS_DATA_RELOC_TREE_OBJECTID);
3765 if (IS_ERR(fs_root)) 3765 if (IS_ERR(fs_root))
3766 err = PTR_ERR(fs_root); 3766 err = PTR_ERR(fs_root);
3767 btrfs_orphan_cleanup(fs_root); 3767 else
3768 btrfs_orphan_cleanup(fs_root);
3768 } 3769 }
3769 return err; 3770 return err;
3770} 3771}
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 7b2600b380d7..49503d2edc7e 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,7 @@
1Version 1.62
2------------
3Add sockopt=TCP_NODELAY mount option.
4
1Version 1.61 5Version 1.61
2------------ 6------------
3Fix append problem to Samba servers (files opened with O_APPEND could 7Fix append problem to Samba servers (files opened with O_APPEND could
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ac2b24c192f8..78c1b86d55f6 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
113extern const struct export_operations cifs_export_ops; 113extern const struct export_operations cifs_export_ops;
114#endif /* EXPERIMENTAL */ 114#endif /* EXPERIMENTAL */
115 115
116#define CIFS_VERSION "1.61" 116#define CIFS_VERSION "1.62"
117#endif /* _CIFSFS_H */ 117#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4b35f7ec0583..ed751bb657db 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -149,6 +149,7 @@ struct TCP_Server_Info {
149 bool svlocal:1; /* local server or remote */ 149 bool svlocal:1; /* local server or remote */
150 bool noblocksnd; /* use blocking sendmsg */ 150 bool noblocksnd; /* use blocking sendmsg */
151 bool noautotune; /* do not autotune send buf sizes */ 151 bool noautotune; /* do not autotune send buf sizes */
152 bool tcp_nodelay;
152 atomic_t inFlight; /* number of requests on the wire to server */ 153 atomic_t inFlight; /* number of requests on the wire to server */
153#ifdef CONFIG_CIFS_STATS2 154#ifdef CONFIG_CIFS_STATS2
154 atomic_t inSend; /* requests trying to send */ 155 atomic_t inSend; /* requests trying to send */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 3bbcaa716b3c..2e9e09ca0e30 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -98,7 +98,7 @@ struct smb_vol {
98 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ 98 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
99 unsigned int rsize; 99 unsigned int rsize;
100 unsigned int wsize; 100 unsigned int wsize;
101 unsigned int sockopt; 101 bool sockopt_tcp_nodelay:1;
102 unsigned short int port; 102 unsigned short int port;
103 char *prepath; 103 char *prepath;
104}; 104};
@@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname,
1142 simple_strtoul(value, &value, 0); 1142 simple_strtoul(value, &value, 0);
1143 } 1143 }
1144 } else if (strnicmp(data, "sockopt", 5) == 0) { 1144 } else if (strnicmp(data, "sockopt", 5) == 0) {
1145 if (value && *value) { 1145 if (!value || !*value) {
1146 vol->sockopt = 1146 cERROR(1, ("no socket option specified"));
1147 simple_strtoul(value, &value, 0); 1147 continue;
1148 } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
1149 vol->sockopt_tcp_nodelay = 1;
1148 } 1150 }
1149 } else if (strnicmp(data, "netbiosname", 4) == 0) { 1151 } else if (strnicmp(data, "netbiosname", 4) == 0) {
1150 if (!value || !*value || (*value == ' ')) { 1152 if (!value || !*value || (*value == ' ')) {
@@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
1514 1516
1515 tcp_ses->noblocksnd = volume_info->noblocksnd; 1517 tcp_ses->noblocksnd = volume_info->noblocksnd;
1516 tcp_ses->noautotune = volume_info->noautotune; 1518 tcp_ses->noautotune = volume_info->noautotune;
1519 tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
1517 atomic_set(&tcp_ses->inFlight, 0); 1520 atomic_set(&tcp_ses->inFlight, 0);
1518 init_waitqueue_head(&tcp_ses->response_q); 1521 init_waitqueue_head(&tcp_ses->response_q);
1519 init_waitqueue_head(&tcp_ses->request_q); 1522 init_waitqueue_head(&tcp_ses->request_q);
@@ -1764,6 +1767,7 @@ static int
1764ipv4_connect(struct TCP_Server_Info *server) 1767ipv4_connect(struct TCP_Server_Info *server)
1765{ 1768{
1766 int rc = 0; 1769 int rc = 0;
1770 int val;
1767 bool connected = false; 1771 bool connected = false;
1768 __be16 orig_port = 0; 1772 __be16 orig_port = 0;
1769 struct socket *socket = server->ssocket; 1773 struct socket *socket = server->ssocket;
@@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server)
1845 socket->sk->sk_rcvbuf = 140 * 1024; 1849 socket->sk->sk_rcvbuf = 140 * 1024;
1846 } 1850 }
1847 1851
1852 if (server->tcp_nodelay) {
1853 val = 1;
1854 rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
1855 (char *)&val, sizeof(val));
1856 if (rc)
1857 cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
1858 }
1859
1848 cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", 1860 cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
1849 socket->sk->sk_sndbuf, 1861 socket->sk->sk_sndbuf,
1850 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo)); 1862 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
@@ -1916,6 +1928,7 @@ static int
1916ipv6_connect(struct TCP_Server_Info *server) 1928ipv6_connect(struct TCP_Server_Info *server)
1917{ 1929{
1918 int rc = 0; 1930 int rc = 0;
1931 int val;
1919 bool connected = false; 1932 bool connected = false;
1920 __be16 orig_port = 0; 1933 __be16 orig_port = 0;
1921 struct socket *socket = server->ssocket; 1934 struct socket *socket = server->ssocket;
@@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server)
1987 */ 2000 */
1988 socket->sk->sk_rcvtimeo = 7 * HZ; 2001 socket->sk->sk_rcvtimeo = 7 * HZ;
1989 socket->sk->sk_sndtimeo = 5 * HZ; 2002 socket->sk->sk_sndtimeo = 5 * HZ;
2003
2004 if (server->tcp_nodelay) {
2005 val = 1;
2006 rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
2007 (char *)&val, sizeof(val));
2008 if (rc)
2009 cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
2010 }
2011
1990 server->ssocket = socket; 2012 server->ssocket = socket;
1991 2013
1992 return rc; 2014 return rc;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index cf18ee765590..e3fda978f481 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
1762 CIFS_MOUNT_MAP_SPECIAL_CHR); 1762 CIFS_MOUNT_MAP_SPECIAL_CHR);
1763 } 1763 }
1764 1764
1765 if (!rc) 1765 if (!rc) {
1766 rc = inode_setattr(inode, attrs); 1766 rc = inode_setattr(inode, attrs);
1767
1768 /* force revalidate when any of these times are set since some
1769 of the fs types (eg ext3, fat) do not have fine enough
1770 time granularity to match protocol, and we do not have a
1771 a way (yet) to query the server fs's time granularity (and
1772 whether it rounds times down).
1773 */
1774 if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)))
1775 cifsInode->time = 0;
1776 }
1767out: 1777out:
1768 kfree(args); 1778 kfree(args);
1769 kfree(full_path); 1779 kfree(full_path);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f84062f9a985..c343b14ba2d3 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
77 77
78 cFYI(1, ("For %s", name->name)); 78 cFYI(1, ("For %s", name->name));
79 79
80 if (parent->d_op && parent->d_op->d_hash)
81 parent->d_op->d_hash(parent, name);
82 else
83 name->hash = full_name_hash(name->name, name->len);
84
80 dentry = d_lookup(parent, name); 85 dentry = d_lookup(parent, name);
81 if (dentry) { 86 if (dentry) {
82 /* FIXME: check for inode number changes? */ 87 /* FIXME: check for inode number changes? */
@@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
666 min(len, max_len), nlt, 671 min(len, max_len), nlt,
667 cifs_sb->mnt_cifs_flags & 672 cifs_sb->mnt_cifs_flags &
668 CIFS_MOUNT_MAP_SPECIAL_CHR); 673 CIFS_MOUNT_MAP_SPECIAL_CHR);
674 pqst->len -= nls_nullsize(nlt);
669 } else { 675 } else {
670 pqst->name = filename; 676 pqst->name = filename;
671 pqst->len = len; 677 pqst->len = len;
672 } 678 }
673 pqst->hash = full_name_hash(pqst->name, pqst->len);
674/* cFYI(1, ("filldir on %s",pqst->name)); */
675 return rc; 679 return rc;
676} 680}
677 681
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7085a6275c4c..aaa9c1c5a5bd 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
223 /* null user mount */ 223 /* null user mount */
224 *bcc_ptr = 0; 224 *bcc_ptr = 0;
225 *(bcc_ptr+1) = 0; 225 *(bcc_ptr+1) = 0;
226 } else { /* 300 should be long enough for any conceivable user name */ 226 } else {
227 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, 227 bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
228 300, nls_cp); 228 MAX_USERNAME_SIZE, nls_cp);
229 } 229 }
230 bcc_ptr += 2 * bytes_ret; 230 bcc_ptr += 2 * bytes_ret;
231 bcc_ptr += 2; /* account for null termination */ 231 bcc_ptr += 2; /* account for null termination */
@@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
246 /* copy user */ 246 /* copy user */
247 if (ses->userName == NULL) { 247 if (ses->userName == NULL) {
248 /* BB what about null user mounts - check that we do this BB */ 248 /* BB what about null user mounts - check that we do this BB */
249 } else { /* 300 should be long enough for any conceivable user name */ 249 } else {
250 strncpy(bcc_ptr, ses->userName, 300); 250 strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
251 } 251 }
252 /* BB improve check for overflow */ 252 bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
253 bcc_ptr += strnlen(ses->userName, 300);
254 *bcc_ptr = 0; 253 *bcc_ptr = 0;
255 bcc_ptr++; /* account for null termination */ 254 bcc_ptr++; /* account for null termination */
256 255
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 5ef953e6f908..97e01dc0d95f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -199,9 +199,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
199static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, 199static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
200 int force) 200 int force)
201{ 201{
202 unsigned long flags; 202 write_lock_irq(&filp->f_owner.lock);
203
204 write_lock_irqsave(&filp->f_owner.lock, flags);
205 if (force || !filp->f_owner.pid) { 203 if (force || !filp->f_owner.pid) {
206 put_pid(filp->f_owner.pid); 204 put_pid(filp->f_owner.pid);
207 filp->f_owner.pid = get_pid(pid); 205 filp->f_owner.pid = get_pid(pid);
@@ -213,7 +211,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
213 filp->f_owner.euid = cred->euid; 211 filp->f_owner.euid = cred->euid;
214 } 212 }
215 } 213 }
216 write_unlock_irqrestore(&filp->f_owner.lock, flags); 214 write_unlock_irq(&filp->f_owner.lock);
217} 215}
218 216
219int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, 217int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
diff --git a/fs/file_table.c b/fs/file_table.c
index 69652c5bd5f0..b98404b54383 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -253,6 +253,7 @@ void __fput(struct file *file)
253 if (file->f_op && file->f_op->release) 253 if (file->f_op && file->f_op->release)
254 file->f_op->release(inode, file); 254 file->f_op->release(inode, file);
255 security_file_free(file); 255 security_file_free(file);
256 ima_file_free(file);
256 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) 257 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
257 cdev_put(inode->i_cdev); 258 cdev_put(inode->i_cdev);
258 fops_put(file->f_op); 259 fops_put(file->f_op);
diff --git a/fs/namei.c b/fs/namei.c
index 94a5e60779f9..d62fdc875f22 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1736,8 +1736,7 @@ do_last:
1736 if (nd.root.mnt) 1736 if (nd.root.mnt)
1737 path_put(&nd.root); 1737 path_put(&nd.root);
1738 if (!IS_ERR(filp)) { 1738 if (!IS_ERR(filp)) {
1739 error = ima_path_check(&filp->f_path, filp->f_mode & 1739 error = ima_file_check(filp, acc_mode);
1740 (MAY_READ | MAY_WRITE | MAY_EXEC));
1741 if (error) { 1740 if (error) {
1742 fput(filp); 1741 fput(filp);
1743 filp = ERR_PTR(error); 1742 filp = ERR_PTR(error);
@@ -1797,8 +1796,7 @@ ok:
1797 } 1796 }
1798 filp = nameidata_to_filp(&nd); 1797 filp = nameidata_to_filp(&nd);
1799 if (!IS_ERR(filp)) { 1798 if (!IS_ERR(filp)) {
1800 error = ima_path_check(&filp->f_path, filp->f_mode & 1799 error = ima_file_check(filp, acc_mode);
1801 (MAY_READ | MAY_WRITE | MAY_EXEC));
1802 if (error) { 1800 if (error) {
1803 fput(filp); 1801 fput(filp);
1804 filp = ERR_PTR(error); 1802 filp = ERR_PTR(error);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c487810a2366..a0c4016413f1 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
1316 1316
1317static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp) 1317static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
1318{ 1318{
1319 struct svc_export *exp;
1320 u32 fsidv[2]; 1319 u32 fsidv[2];
1321 1320
1322 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); 1321 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
1323 1322
1324 exp = rqst_exp_find(rqstp, FSID_NUM, fsidv); 1323 return rqst_exp_find(rqstp, FSID_NUM, fsidv);
1325 /*
1326 * We shouldn't have accepting an nfsv4 request at all if we
1327 * don't have a pseudoexport!:
1328 */
1329 if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
1330 exp = ERR_PTR(-ESERVERFAULT);
1331 return exp;
1332} 1324}
1333 1325
1334/* 1326/*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c194793b642b..97d79eff6b7f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -752,6 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
752 flags, current_cred()); 752 flags, current_cred());
753 if (IS_ERR(*filp)) 753 if (IS_ERR(*filp))
754 host_err = PTR_ERR(*filp); 754 host_err = PTR_ERR(*filp);
755 host_err = ima_file_check(*filp, access);
755out_nfserr: 756out_nfserr:
756 err = nfserrno(host_err); 757 err = nfserrno(host_err);
757out: 758out:
@@ -2127,7 +2128,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
2127 */ 2128 */
2128 path.mnt = exp->ex_path.mnt; 2129 path.mnt = exp->ex_path.mnt;
2129 path.dentry = dentry; 2130 path.dentry = dentry;
2130 err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
2131nfsd_out: 2131nfsd_out:
2132 return err? nfserrno(err) : 0; 2132 return err? nfserrno(err) : 0;
2133} 2133}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 3dae4a13f6e4..7e9df11260f4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -599,7 +599,7 @@ bail:
599 return ret; 599 return ret;
600} 600}
601 601
602/* 602/*
603 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're 603 * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
604 * particularly interested in the aio/dio case. Like the core uses 604 * particularly interested in the aio/dio case. Like the core uses
605 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from 605 * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
@@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
670 670
671 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, 671 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
672 inode->i_sb->s_bdev, iov, offset, 672 inode->i_sb->s_bdev, iov, offset,
673 nr_segs, 673 nr_segs,
674 ocfs2_direct_IO_get_blocks, 674 ocfs2_direct_IO_get_blocks,
675 ocfs2_dio_end_io); 675 ocfs2_dio_end_io);
676 676
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d43d34a1dd31..21c808f752d8 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
368 } 368 }
369 ocfs2_metadata_cache_io_unlock(ci); 369 ocfs2_metadata_cache_io_unlock(ci);
370 370
371 mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", 371 mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
372 (unsigned long long)block, nr, 372 (unsigned long long)block, nr,
373 ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes", 373 ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
374 flags); 374 flags);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index eda5b8bcddd5..5c9890006708 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
78 78
79unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; 79unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
80 80
81/* Only sets a new threshold if there are no active regions. 81/* Only sets a new threshold if there are no active regions.
82 * 82 *
83 * No locking or otherwise interesting code is required for reading 83 * No locking or otherwise interesting code is required for reading
84 * o2hb_dead_threshold as it can't change once regions are active and 84 * o2hb_dead_threshold as it can't change once regions are active and
@@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
170 170
171 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " 171 mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
172 "milliseconds\n", reg->hr_dev_name, 172 "milliseconds\n", reg->hr_dev_name,
173 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start)); 173 jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
174 o2quo_disk_timeout(); 174 o2quo_disk_timeout();
175} 175}
176 176
@@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
624 "seq %llu last %llu changed %u equal %u\n", 624 "seq %llu last %llu changed %u equal %u\n",
625 slot->ds_node_num, (long long)slot->ds_last_generation, 625 slot->ds_node_num, (long long)slot->ds_last_generation,
626 le32_to_cpu(hb_block->hb_cksum), 626 le32_to_cpu(hb_block->hb_cksum),
627 (unsigned long long)le64_to_cpu(hb_block->hb_seq), 627 (unsigned long long)le64_to_cpu(hb_block->hb_seq),
628 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples, 628 (unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
629 slot->ds_equal_samples); 629 slot->ds_equal_samples);
630 630
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 334f231a422c..d8d0c65ac03c 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
485 } 485 }
486 486
487 if (was_valid && !valid) { 487 if (was_valid && !valid) {
488 printk(KERN_INFO "o2net: no longer connected to " 488 printk(KERN_NOTICE "o2net: no longer connected to "
489 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); 489 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
490 o2net_complete_nodes_nsw(nn); 490 o2net_complete_nodes_nsw(nn);
491 } 491 }
@@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
493 if (!was_valid && valid) { 493 if (!was_valid && valid) {
494 o2quo_conn_up(o2net_num_from_nn(nn)); 494 o2quo_conn_up(o2net_num_from_nn(nn));
495 cancel_delayed_work(&nn->nn_connect_expired); 495 cancel_delayed_work(&nn->nn_connect_expired);
496 printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n", 496 printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
497 o2nm_this_node() > sc->sc_node->nd_num ? 497 o2nm_this_node() > sc->sc_node->nd_num ?
498 "connected to" : "accepted connection from", 498 "connected to" : "accepted connection from",
499 SC_NODEF_ARGS(sc)); 499 SC_NODEF_ARGS(sc));
@@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
930 cond_resched(); 930 cond_resched();
931 continue; 931 continue;
932 } 932 }
933 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT 933 mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
934 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret); 934 " failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
935 o2net_ensure_shutdown(nn, sc, 0); 935 o2net_ensure_shutdown(nn, sc, 0);
936 break; 936 break;
@@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data)
1476 1476
1477 do_gettimeofday(&now); 1477 do_gettimeofday(&now);
1478 1478
1479 printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " 1479 printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
1480 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), 1480 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
1481 o2net_idle_timeout() / 1000, 1481 o2net_idle_timeout() / 1000,
1482 o2net_idle_timeout() % 1000); 1482 o2net_idle_timeout() % 1000);
1483 mlog(ML_NOTICE, "here are some times that might help debug the " 1483 mlog(ML_NOTICE, "here are some times that might help debug the "
1484 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " 1484 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
1485 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", 1485 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
1486 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, 1486 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
1487 now.tv_sec, (long) now.tv_usec, 1487 now.tv_sec, (long) now.tv_usec,
1488 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, 1488 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
1489 sc->sc_tv_advance_start.tv_sec, 1489 sc->sc_tv_advance_start.tv_sec,
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 8d58cfe410b1..96fa7ebc530c 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -32,10 +32,10 @@
32 * on their number */ 32 * on their number */
33#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS) 33#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
34 34
35/* 35/*
36 * This version number represents quite a lot, unfortunately. It not 36 * This version number represents quite a lot, unfortunately. It not
37 * only represents the raw network message protocol on the wire but also 37 * only represents the raw network message protocol on the wire but also
38 * locking semantics of the file system using the protocol. It should 38 * locking semantics of the file system using the protocol. It should
39 * be somewhere else, I'm sure, but right now it isn't. 39 * be somewhere else, I'm sure, but right now it isn't.
40 * 40 *
41 * With version 11, we separate out the filesystem locking portion. The 41 * With version 11, we separate out the filesystem locking portion. The
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index b5786a787fab..3cfa114aa391 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
95 mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ 95 mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
96} while (0) 96} while (0)
97 97
98#define DLM_LKSB_UNUSED1 0x01 98#define DLM_LKSB_UNUSED1 0x01
99#define DLM_LKSB_PUT_LVB 0x02 99#define DLM_LKSB_PUT_LVB 0x02
100#define DLM_LKSB_GET_LVB 0x04 100#define DLM_LKSB_GET_LVB 0x04
101#define DLM_LKSB_UNUSED2 0x08 101#define DLM_LKSB_UNUSED2 0x08
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 01cf8cc3d286..dccc439fa087 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
123 dlm_lock_put(lock); 123 dlm_lock_put(lock);
124 /* free up the reserved bast that we are cancelling. 124 /* free up the reserved bast that we are cancelling.
125 * guaranteed that this will not be the last reserved 125 * guaranteed that this will not be the last reserved
126 * ast because *both* an ast and a bast were reserved 126 * ast because *both* an ast and a bast were reserved
127 * to get to this point. the res->spinlock will not be 127 * to get to this point. the res->spinlock will not be
128 * taken here */ 128 * taken here */
129 dlm_lockres_release_ast(dlm, res); 129 dlm_lockres_release_ast(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index ca96bce50e18..f283bce776b4 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
396 /* instead of logging the same network error over 396 /* instead of logging the same network error over
397 * and over, sleep here and wait for the heartbeat 397 * and over, sleep here and wait for the heartbeat
398 * to notice the node is dead. times out after 5s. */ 398 * to notice the node is dead. times out after 5s. */
399 dlm_wait_for_node_death(dlm, res->owner, 399 dlm_wait_for_node_death(dlm, res->owner,
400 DLM_NODE_DEATH_WAIT_MAX); 400 DLM_NODE_DEATH_WAIT_MAX);
401 ret = DLM_RECOVERING; 401 ret = DLM_RECOVERING;
402 mlog(0, "node %u died so returning DLM_RECOVERING " 402 mlog(0, "node %u died so returning DLM_RECOVERING "
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 42b0bad7a612..0cd24cf54396 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
102 assert_spin_locked(&res->spinlock); 102 assert_spin_locked(&res->spinlock);
103 103
104 stringify_lockname(res->lockname.name, res->lockname.len, 104 stringify_lockname(res->lockname.name, res->lockname.len,
105 buf, sizeof(buf) - 1); 105 buf, sizeof(buf));
106 printk("lockres: %s, owner=%u, state=%u\n", 106 printk("lockres: %s, owner=%u, state=%u\n",
107 buf, res->owner, res->state); 107 buf, res->owner, res->state);
108 printk(" last used: %lu, refcnt: %u, on purge list: %s\n", 108 printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 0334000676d3..988c9055fd4e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
816 } 816 }
817 817
818 /* Once the dlm ctxt is marked as leaving then we don't want 818 /* Once the dlm ctxt is marked as leaving then we don't want
819 * to be put in someone's domain map. 819 * to be put in someone's domain map.
820 * Also, explicitly disallow joining at certain troublesome 820 * Also, explicitly disallow joining at certain troublesome
821 * times (ie. during recovery). */ 821 * times (ie. during recovery). */
822 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { 822 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 437698e9465f..733337772671 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
269 } 269 }
270 dlm_revert_pending_lock(res, lock); 270 dlm_revert_pending_lock(res, lock);
271 dlm_lock_put(lock); 271 dlm_lock_put(lock);
272 } else if (dlm_is_recovery_lock(res->lockname.name, 272 } else if (dlm_is_recovery_lock(res->lockname.name,
273 res->lockname.len)) { 273 res->lockname.len)) {
274 /* special case for the $RECOVERY lock. 274 /* special case for the $RECOVERY lock.
275 * there will never be an AST delivered to put 275 * there will never be an AST delivered to put
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 03ccf9a7b1f4..a659606dcb95 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
366 struct dlm_master_list_entry *mle; 366 struct dlm_master_list_entry *mle;
367 367
368 assert_spin_locked(&dlm->spinlock); 368 assert_spin_locked(&dlm->spinlock);
369 369
370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { 370 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371 if (node_up) 371 if (node_up)
372 dlm_mle_node_up(dlm, mle, NULL, idx); 372 dlm_mle_node_up(dlm, mle, NULL, idx);
@@ -833,7 +833,7 @@ lookup:
833 __dlm_insert_mle(dlm, mle); 833 __dlm_insert_mle(dlm, mle);
834 834
835 /* still holding the dlm spinlock, check the recovery map 835 /* still holding the dlm spinlock, check the recovery map
836 * to see if there are any nodes that still need to be 836 * to see if there are any nodes that still need to be
837 * considered. these will not appear in the mle nodemap 837 * considered. these will not appear in the mle nodemap
838 * but they might own this lockres. wait on them. */ 838 * but they might own this lockres. wait on them. */
839 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); 839 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
@@ -883,7 +883,7 @@ redo_request:
883 msleep(500); 883 msleep(500);
884 } 884 }
885 continue; 885 continue;
886 } 886 }
887 887
888 dlm_kick_recovery_thread(dlm); 888 dlm_kick_recovery_thread(dlm);
889 msleep(1000); 889 msleep(1000);
@@ -939,8 +939,8 @@ wait:
939 res->lockname.name, blocked); 939 res->lockname.name, blocked);
940 if (++tries > 20) { 940 if (++tries > 20) {
941 mlog(ML_ERROR, "%s:%.*s: spinning on " 941 mlog(ML_ERROR, "%s:%.*s: spinning on "
942 "dlm_wait_for_lock_mastery, blocked=%d\n", 942 "dlm_wait_for_lock_mastery, blocked=%d\n",
943 dlm->name, res->lockname.len, 943 dlm->name, res->lockname.len,
944 res->lockname.name, blocked); 944 res->lockname.name, blocked);
945 dlm_print_one_lock_resource(res); 945 dlm_print_one_lock_resource(res);
946 dlm_print_one_mle(mle); 946 dlm_print_one_mle(mle);
@@ -1029,7 +1029,7 @@ recheck:
1029 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); 1029 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1030 b = (mle->type == DLM_MLE_BLOCK); 1030 b = (mle->type == DLM_MLE_BLOCK);
1031 if ((*blocked && !b) || (!*blocked && b)) { 1031 if ((*blocked && !b) || (!*blocked && b)) {
1032 mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 1032 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1033 dlm->name, res->lockname.len, res->lockname.name, 1033 dlm->name, res->lockname.len, res->lockname.name,
1034 *blocked, b); 1034 *blocked, b);
1035 *blocked = b; 1035 *blocked = b;
@@ -1602,7 +1602,7 @@ send_response:
1602 } 1602 }
1603 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", 1603 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1604 dlm->node_num, res->lockname.len, res->lockname.name); 1604 dlm->node_num, res->lockname.len, res->lockname.name);
1605 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 1605 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1606 DLM_ASSERT_MASTER_MLE_CLEANUP); 1606 DLM_ASSERT_MASTER_MLE_CLEANUP);
1607 if (ret < 0) { 1607 if (ret < 0) {
1608 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1608 mlog(ML_ERROR, "failed to dispatch assert master work\n");
@@ -1701,7 +1701,7 @@ again:
1701 1701
1702 if (r & DLM_ASSERT_RESPONSE_REASSERT) { 1702 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1703 mlog(0, "%.*s: node %u create mles on other " 1703 mlog(0, "%.*s: node %u create mles on other "
1704 "nodes and requests a re-assert\n", 1704 "nodes and requests a re-assert\n",
1705 namelen, lockname, to); 1705 namelen, lockname, to);
1706 reassert = 1; 1706 reassert = 1;
1707 } 1707 }
@@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1812 spin_unlock(&dlm->master_lock); 1812 spin_unlock(&dlm->master_lock);
1813 spin_unlock(&dlm->spinlock); 1813 spin_unlock(&dlm->spinlock);
1814 goto done; 1814 goto done;
1815 } 1815 }
1816 } 1816 }
1817 } 1817 }
1818 spin_unlock(&dlm->master_lock); 1818 spin_unlock(&dlm->master_lock);
@@ -1883,7 +1883,7 @@ ok:
1883 int extra_ref = 0; 1883 int extra_ref = 0;
1884 int nn = -1; 1884 int nn = -1;
1885 int rr, err = 0; 1885 int rr, err = 0;
1886 1886
1887 spin_lock(&mle->spinlock); 1887 spin_lock(&mle->spinlock);
1888 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) 1888 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1889 extra_ref = 1; 1889 extra_ref = 1;
@@ -1891,7 +1891,7 @@ ok:
1891 /* MASTER mle: if any bits set in the response map 1891 /* MASTER mle: if any bits set in the response map
1892 * then the calling node needs to re-assert to clear 1892 * then the calling node needs to re-assert to clear
1893 * up nodes that this node contacted */ 1893 * up nodes that this node contacted */
1894 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 1894 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1895 nn+1)) < O2NM_MAX_NODES) { 1895 nn+1)) < O2NM_MAX_NODES) {
1896 if (nn != dlm->node_num && nn != assert->node_idx) 1896 if (nn != dlm->node_num && nn != assert->node_idx)
1897 master_request = 1; 1897 master_request = 1;
@@ -2002,7 +2002,7 @@ kill:
2002 __dlm_print_one_lock_resource(res); 2002 __dlm_print_one_lock_resource(res);
2003 spin_unlock(&res->spinlock); 2003 spin_unlock(&res->spinlock);
2004 spin_unlock(&dlm->spinlock); 2004 spin_unlock(&dlm->spinlock);
2005 *ret_data = (void *)res; 2005 *ret_data = (void *)res;
2006 dlm_put(dlm); 2006 dlm_put(dlm);
2007 return -EINVAL; 2007 return -EINVAL;
2008} 2008}
@@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2040 item->u.am.request_from = request_from; 2040 item->u.am.request_from = request_from;
2041 item->u.am.flags = flags; 2041 item->u.am.flags = flags;
2042 2042
2043 if (ignore_higher) 2043 if (ignore_higher)
2044 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 2044 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2045 res->lockname.name); 2045 res->lockname.name);
2046 2046
2047 spin_lock(&dlm->work_lock); 2047 spin_lock(&dlm->work_lock);
2048 list_add_tail(&item->list, &dlm->work_list); 2048 list_add_tail(&item->list, &dlm->work_list);
2049 spin_unlock(&dlm->work_lock); 2049 spin_unlock(&dlm->work_lock);
@@ -2133,7 +2133,7 @@ put:
2133 * think that $RECOVERY is currently mastered by a dead node. If so, 2133 * think that $RECOVERY is currently mastered by a dead node. If so,
2134 * we wait a short time to allow that node to get notified by its own 2134 * we wait a short time to allow that node to get notified by its own
2135 * heartbeat stack, then check again. All $RECOVERY lock resources 2135 * heartbeat stack, then check again. All $RECOVERY lock resources
2136 * mastered by dead nodes are purged when the hearbeat callback is 2136 * mastered by dead nodes are purged when the hearbeat callback is
2137 * fired, so we can know for sure that it is safe to continue once 2137 * fired, so we can know for sure that it is safe to continue once
2138 * the node returns a live node or no node. */ 2138 * the node returns a live node or no node. */
2139static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, 2139static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
@@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2174 ret = -EAGAIN; 2174 ret = -EAGAIN;
2175 } 2175 }
2176 spin_unlock(&dlm->spinlock); 2176 spin_unlock(&dlm->spinlock);
2177 mlog(0, "%s: reco lock master is %u\n", dlm->name, 2177 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2178 master); 2178 master);
2179 break; 2179 break;
2180 } 2180 }
@@ -2602,7 +2602,7 @@ fail:
2602 2602
2603 mlog(0, "%s:%.*s: timed out during migration\n", 2603 mlog(0, "%s:%.*s: timed out during migration\n",
2604 dlm->name, res->lockname.len, res->lockname.name); 2604 dlm->name, res->lockname.len, res->lockname.name);
2605 /* avoid hang during shutdown when migrating lockres 2605 /* avoid hang during shutdown when migrating lockres
2606 * to a node which also goes down */ 2606 * to a node which also goes down */
2607 if (dlm_is_node_dead(dlm, target)) { 2607 if (dlm_is_node_dead(dlm, target)) {
2608 mlog(0, "%s:%.*s: expected migration " 2608 mlog(0, "%s:%.*s: expected migration "
@@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2738 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); 2738 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2739 spin_unlock(&res->spinlock); 2739 spin_unlock(&res->spinlock);
2740 2740
2741 /* target has died, so make the caller break out of the 2741 /* target has died, so make the caller break out of the
2742 * wait_event, but caller must recheck the domain_map */ 2742 * wait_event, but caller must recheck the domain_map */
2743 spin_lock(&dlm->spinlock); 2743 spin_lock(&dlm->spinlock);
2744 if (!test_bit(mig_target, dlm->domain_map)) 2744 if (!test_bit(mig_target, dlm->domain_map))
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 2f9e4e19a4f2..344bcf90cbf4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1050 if (lock->ml.node == dead_node) { 1050 if (lock->ml.node == dead_node) {
1051 mlog(0, "AHA! there was " 1051 mlog(0, "AHA! there was "
1052 "a $RECOVERY lock for dead " 1052 "a $RECOVERY lock for dead "
1053 "node %u (%s)!\n", 1053 "node %u (%s)!\n",
1054 dead_node, dlm->name); 1054 dead_node, dlm->name);
1055 list_del_init(&lock->list); 1055 list_del_init(&lock->list);
1056 dlm_lock_put(lock); 1056 dlm_lock_put(lock);
@@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1164 mres->master = master; 1164 mres->master = master;
1165} 1165}
1166 1166
1167static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1168 struct dlm_migratable_lockres *mres,
1169 int queue)
1170{
1171 if (!lock->lksb)
1172 return;
1173
1174 /* Ignore lvb in all locks in the blocked list */
1175 if (queue == DLM_BLOCKED_LIST)
1176 return;
1177
1178 /* Only consider lvbs in locks with granted EX or PR lock levels */
1179 if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1180 return;
1181
1182 if (dlm_lvb_is_empty(mres->lvb)) {
1183 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1184 return;
1185 }
1186
1187 /* Ensure the lvb copied for migration matches in other valid locks */
1188 if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1189 return;
1190
1191 mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1192 "node=%u\n",
1193 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1194 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1195 lock->lockres->lockname.len, lock->lockres->lockname.name,
1196 lock->ml.node);
1197 dlm_print_one_lock_resource(lock->lockres);
1198 BUG();
1199}
1167 1200
1168/* returns 1 if this lock fills the network structure, 1201/* returns 1 if this lock fills the network structure,
1169 * 0 otherwise */ 1202 * 0 otherwise */
@@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
1181 ml->list = queue; 1214 ml->list = queue;
1182 if (lock->lksb) { 1215 if (lock->lksb) {
1183 ml->flags = lock->lksb->flags; 1216 ml->flags = lock->lksb->flags;
1184 /* send our current lvb */ 1217 dlm_prepare_lvb_for_migration(lock, mres, queue);
1185 if (ml->type == LKM_EXMODE ||
1186 ml->type == LKM_PRMODE) {
1187 /* if it is already set, this had better be a PR
1188 * and it has to match */
1189 if (!dlm_lvb_is_empty(mres->lvb) &&
1190 (ml->type == LKM_EXMODE ||
1191 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1192 mlog(ML_ERROR, "mismatched lvbs!\n");
1193 dlm_print_one_lock_resource(lock->lockres);
1194 BUG();
1195 }
1196 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1197 }
1198 } 1218 }
1199 ml->node = lock->ml.node; 1219 ml->node = lock->ml.node;
1200 mres->num_locks++; 1220 mres->num_locks++;
@@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1730 struct dlm_lock *lock = NULL; 1750 struct dlm_lock *lock = NULL;
1731 u8 from = O2NM_MAX_NODES; 1751 u8 from = O2NM_MAX_NODES;
1732 unsigned int added = 0; 1752 unsigned int added = 0;
1753 __be64 c;
1733 1754
1734 mlog(0, "running %d locks for this lockres\n", mres->num_locks); 1755 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1735 for (i=0; i<mres->num_locks; i++) { 1756 for (i=0; i<mres->num_locks; i++) {
@@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1777 /* lock is always created locally first, and 1798 /* lock is always created locally first, and
1778 * destroyed locally last. it must be on the list */ 1799 * destroyed locally last. it must be on the list */
1779 if (!lock) { 1800 if (!lock) {
1780 __be64 c = ml->cookie; 1801 c = ml->cookie;
1781 mlog(ML_ERROR, "could not find local lock " 1802 mlog(ML_ERROR, "Could not find local lock "
1782 "with cookie %u:%llu!\n", 1803 "with cookie %u:%llu, node %u, "
1804 "list %u, flags 0x%x, type %d, "
1805 "conv %d, highest blocked %d\n",
1783 dlm_get_lock_cookie_node(be64_to_cpu(c)), 1806 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1784 dlm_get_lock_cookie_seq(be64_to_cpu(c))); 1807 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1808 ml->node, ml->list, ml->flags, ml->type,
1809 ml->convert_type, ml->highest_blocked);
1810 __dlm_print_one_lock_resource(res);
1811 BUG();
1812 }
1813
1814 if (lock->ml.node != ml->node) {
1815 c = lock->ml.cookie;
1816 mlog(ML_ERROR, "Mismatched node# in lock "
1817 "cookie %u:%llu, name %.*s, node %u\n",
1818 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1819 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1820 res->lockname.len, res->lockname.name,
1821 lock->ml.node);
1822 c = ml->cookie;
1823 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1824 "node %u, list %u, flags 0x%x, type %d, "
1825 "conv %d, highest blocked %d\n",
1826 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1827 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1828 ml->node, ml->list, ml->flags, ml->type,
1829 ml->convert_type, ml->highest_blocked);
1785 __dlm_print_one_lock_resource(res); 1830 __dlm_print_one_lock_resource(res);
1786 BUG(); 1831 BUG();
1787 } 1832 }
1788 BUG_ON(lock->ml.node != ml->node);
1789 1833
1790 if (tmpq != queue) { 1834 if (tmpq != queue) {
1791 mlog(0, "lock was on %u instead of %u for %.*s\n", 1835 c = ml->cookie;
1792 j, ml->list, res->lockname.len, res->lockname.name); 1836 mlog(0, "Lock cookie %u:%llu was on list %u "
1837 "instead of list %u for %.*s\n",
1838 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1839 dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1840 j, ml->list, res->lockname.len,
1841 res->lockname.name);
1842 __dlm_print_one_lock_resource(res);
1793 spin_unlock(&res->spinlock); 1843 spin_unlock(&res->spinlock);
1794 continue; 1844 continue;
1795 } 1845 }
@@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1839 * the lvb. */ 1889 * the lvb. */
1840 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); 1890 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1841 } else { 1891 } else {
1842 /* otherwise, the node is sending its 1892 /* otherwise, the node is sending its
1843 * most recent valid lvb info */ 1893 * most recent valid lvb info */
1844 BUG_ON(ml->type != LKM_EXMODE && 1894 BUG_ON(ml->type != LKM_EXMODE &&
1845 ml->type != LKM_PRMODE); 1895 ml->type != LKM_PRMODE);
@@ -1886,7 +1936,7 @@ skip_lvb:
1886 spin_lock(&res->spinlock); 1936 spin_lock(&res->spinlock);
1887 list_for_each_entry(lock, queue, list) { 1937 list_for_each_entry(lock, queue, list) {
1888 if (lock->ml.cookie == ml->cookie) { 1938 if (lock->ml.cookie == ml->cookie) {
1889 __be64 c = lock->ml.cookie; 1939 c = lock->ml.cookie;
1890 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " 1940 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1891 "exists on this lockres!\n", dlm->name, 1941 "exists on this lockres!\n", dlm->name,
1892 res->lockname.len, res->lockname.name, 1942 res->lockname.len, res->lockname.name,
@@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2114 assert_spin_locked(&res->spinlock); 2164 assert_spin_locked(&res->spinlock);
2115 2165
2116 if (res->owner == dlm->node_num) 2166 if (res->owner == dlm->node_num)
2117 /* if this node owned the lockres, and if the dead node 2167 /* if this node owned the lockres, and if the dead node
2118 * had an EX when he died, blank out the lvb */ 2168 * had an EX when he died, blank out the lvb */
2119 search_node = dead_node; 2169 search_node = dead_node;
2120 else { 2170 else {
@@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2152 2202
2153 /* this node is the lockres master: 2203 /* this node is the lockres master:
2154 * 1) remove any stale locks for the dead node 2204 * 1) remove any stale locks for the dead node
2155 * 2) if the dead node had an EX when he died, blank out the lvb 2205 * 2) if the dead node had an EX when he died, blank out the lvb
2156 */ 2206 */
2157 assert_spin_locked(&dlm->spinlock); 2207 assert_spin_locked(&dlm->spinlock);
2158 assert_spin_locked(&res->spinlock); 2208 assert_spin_locked(&res->spinlock);
@@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2193 mlog(0, "%s:%.*s: freed %u locks for dead node %u, " 2243 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2194 "dropping ref from lockres\n", dlm->name, 2244 "dropping ref from lockres\n", dlm->name,
2195 res->lockname.len, res->lockname.name, freed, dead_node); 2245 res->lockname.len, res->lockname.name, freed, dead_node);
2196 BUG_ON(!test_bit(dead_node, res->refmap)); 2246 if(!test_bit(dead_node, res->refmap)) {
2247 mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2248 "but ref was not set\n", dlm->name,
2249 res->lockname.len, res->lockname.name, freed, dead_node);
2250 __dlm_print_one_lock_resource(res);
2251 }
2197 dlm_lockres_clear_refmap_bit(dead_node, res); 2252 dlm_lockres_clear_refmap_bit(dead_node, res);
2198 } else if (test_bit(dead_node, res->refmap)) { 2253 } else if (test_bit(dead_node, res->refmap)) {
2199 mlog(0, "%s:%.*s: dead node %u had a ref, but had " 2254 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
@@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2260 } 2315 }
2261 spin_unlock(&res->spinlock); 2316 spin_unlock(&res->spinlock);
2262 continue; 2317 continue;
2263 } 2318 }
2264 spin_lock(&res->spinlock); 2319 spin_lock(&res->spinlock);
2265 /* zero the lvb if necessary */ 2320 /* zero the lvb if necessary */
2266 dlm_revalidate_lvb(dlm, res, dead_node); 2321 dlm_revalidate_lvb(dlm, res, dead_node);
@@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2411 * this function on each node racing to become the recovery 2466 * this function on each node racing to become the recovery
2412 * master will not stop attempting this until either: 2467 * master will not stop attempting this until either:
2413 * a) this node gets the EX (and becomes the recovery master), 2468 * a) this node gets the EX (and becomes the recovery master),
2414 * or b) dlm->reco.new_master gets set to some nodenum 2469 * or b) dlm->reco.new_master gets set to some nodenum
2415 * != O2NM_INVALID_NODE_NUM (another node will do the reco). 2470 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2416 * so each time a recovery master is needed, the entire cluster 2471 * so each time a recovery master is needed, the entire cluster
2417 * will sync at this point. if the new master dies, that will 2472 * will sync at this point. if the new master dies, that will
@@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2424 2479
2425 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", 2480 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2426 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); 2481 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2427again: 2482again:
2428 memset(&lksb, 0, sizeof(lksb)); 2483 memset(&lksb, 0, sizeof(lksb));
2429 2484
2430 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, 2485 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
@@ -2437,8 +2492,8 @@ again:
2437 if (ret == DLM_NORMAL) { 2492 if (ret == DLM_NORMAL) {
2438 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", 2493 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2439 dlm->name, dlm->node_num); 2494 dlm->name, dlm->node_num);
2440 2495
2441 /* got the EX lock. check to see if another node 2496 /* got the EX lock. check to see if another node
2442 * just became the reco master */ 2497 * just became the reco master */
2443 if (dlm_reco_master_ready(dlm)) { 2498 if (dlm_reco_master_ready(dlm)) {
2444 mlog(0, "%s: got reco EX lock, but %u will " 2499 mlog(0, "%s: got reco EX lock, but %u will "
@@ -2451,12 +2506,12 @@ again:
2451 /* see if recovery was already finished elsewhere */ 2506 /* see if recovery was already finished elsewhere */
2452 spin_lock(&dlm->spinlock); 2507 spin_lock(&dlm->spinlock);
2453 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { 2508 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2454 status = -EINVAL; 2509 status = -EINVAL;
2455 mlog(0, "%s: got reco EX lock, but " 2510 mlog(0, "%s: got reco EX lock, but "
2456 "node got recovered already\n", dlm->name); 2511 "node got recovered already\n", dlm->name);
2457 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { 2512 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2458 mlog(ML_ERROR, "%s: new master is %u " 2513 mlog(ML_ERROR, "%s: new master is %u "
2459 "but no dead node!\n", 2514 "but no dead node!\n",
2460 dlm->name, dlm->reco.new_master); 2515 dlm->name, dlm->reco.new_master);
2461 BUG(); 2516 BUG();
2462 } 2517 }
@@ -2468,7 +2523,7 @@ again:
2468 * set the master and send the messages to begin recovery */ 2523 * set the master and send the messages to begin recovery */
2469 if (!status) { 2524 if (!status) {
2470 mlog(0, "%s: dead=%u, this=%u, sending " 2525 mlog(0, "%s: dead=%u, this=%u, sending "
2471 "begin_reco now\n", dlm->name, 2526 "begin_reco now\n", dlm->name,
2472 dlm->reco.dead_node, dlm->node_num); 2527 dlm->reco.dead_node, dlm->node_num);
2473 status = dlm_send_begin_reco_message(dlm, 2528 status = dlm_send_begin_reco_message(dlm,
2474 dlm->reco.dead_node); 2529 dlm->reco.dead_node);
@@ -2501,7 +2556,7 @@ again:
2501 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", 2556 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2502 dlm->name, dlm->node_num); 2557 dlm->name, dlm->node_num);
2503 /* another node is master. wait on 2558 /* another node is master. wait on
2504 * reco.new_master != O2NM_INVALID_NODE_NUM 2559 * reco.new_master != O2NM_INVALID_NODE_NUM
2505 * for at most one second */ 2560 * for at most one second */
2506 wait_event_timeout(dlm->dlm_reco_thread_wq, 2561 wait_event_timeout(dlm->dlm_reco_thread_wq,
2507 dlm_reco_master_ready(dlm), 2562 dlm_reco_master_ready(dlm),
@@ -2589,7 +2644,13 @@ retry:
2589 "begin reco msg (%d)\n", dlm->name, nodenum, ret); 2644 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2590 ret = 0; 2645 ret = 0;
2591 } 2646 }
2592 if (ret == -EAGAIN) { 2647
2648 /*
2649 * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2650 * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2651 * We are handling both for compatibility reasons.
2652 */
2653 if (ret == -EAGAIN || ret == EAGAIN) {
2593 mlog(0, "%s: trying to start recovery of node " 2654 mlog(0, "%s: trying to start recovery of node "
2594 "%u, but node %u is waiting for last recovery " 2655 "%u, but node %u is waiting for last recovery "
2595 "to complete, backoff for a bit\n", dlm->name, 2656 "to complete, backoff for a bit\n", dlm->name,
@@ -2599,7 +2660,7 @@ retry:
2599 } 2660 }
2600 if (ret < 0) { 2661 if (ret < 0) {
2601 struct dlm_lock_resource *res; 2662 struct dlm_lock_resource *res;
2602 /* this is now a serious problem, possibly ENOMEM 2663 /* this is now a serious problem, possibly ENOMEM
2603 * in the network stack. must retry */ 2664 * in the network stack. must retry */
2604 mlog_errno(ret); 2665 mlog_errno(ret);
2605 mlog(ML_ERROR, "begin reco of dlm %s to node %u " 2666 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
@@ -2612,7 +2673,7 @@ retry:
2612 } else { 2673 } else {
2613 mlog(ML_ERROR, "recovery lock not found\n"); 2674 mlog(ML_ERROR, "recovery lock not found\n");
2614 } 2675 }
2615 /* sleep for a bit in hopes that we can avoid 2676 /* sleep for a bit in hopes that we can avoid
2616 * another ENOMEM */ 2677 * another ENOMEM */
2617 msleep(100); 2678 msleep(100);
2618 goto retry; 2679 goto retry;
@@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2664 } 2725 }
2665 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { 2726 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2666 mlog(ML_NOTICE, "%s: dead_node previously set to %u, " 2727 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2667 "node %u changing it to %u\n", dlm->name, 2728 "node %u changing it to %u\n", dlm->name,
2668 dlm->reco.dead_node, br->node_idx, br->dead_node); 2729 dlm->reco.dead_node, br->node_idx, br->dead_node);
2669 } 2730 }
2670 dlm_set_reco_master(dlm, br->node_idx); 2731 dlm_set_reco_master(dlm, br->node_idx);
@@ -2730,8 +2791,8 @@ stage2:
2730 if (ret < 0) { 2791 if (ret < 0) {
2731 mlog_errno(ret); 2792 mlog_errno(ret);
2732 if (dlm_is_host_down(ret)) { 2793 if (dlm_is_host_down(ret)) {
2733 /* this has no effect on this recovery 2794 /* this has no effect on this recovery
2734 * session, so set the status to zero to 2795 * session, so set the status to zero to
2735 * finish out the last recovery */ 2796 * finish out the last recovery */
2736 mlog(ML_ERROR, "node %u went down after this " 2797 mlog(ML_ERROR, "node %u went down after this "
2737 "node finished recovery.\n", nodenum); 2798 "node finished recovery.\n", nodenum);
@@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2768 mlog(0, "%s: node %u finalizing recovery stage%d of " 2829 mlog(0, "%s: node %u finalizing recovery stage%d of "
2769 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, 2830 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2770 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); 2831 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2771 2832
2772 spin_lock(&dlm->spinlock); 2833 spin_lock(&dlm->spinlock);
2773 2834
2774 if (dlm->reco.new_master != fr->node_idx) { 2835 if (dlm->reco.new_master != fr->node_idx) {
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 00f53b2aea76..49e29ecd0201 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
190 actions &= ~(DLM_UNLOCK_REMOVE_LOCK| 190 actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
191 DLM_UNLOCK_REGRANT_LOCK| 191 DLM_UNLOCK_REGRANT_LOCK|
192 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 192 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
193 } else if (status == DLM_RECOVERING || 193 } else if (status == DLM_RECOVERING ||
194 status == DLM_MIGRATING || 194 status == DLM_MIGRATING ||
195 status == DLM_FORWARD) { 195 status == DLM_FORWARD) {
196 /* must clear the actions because this unlock 196 /* must clear the actions because this unlock
197 * is about to be retried. cannot free or do 197 * is about to be retried. cannot free or do
@@ -661,14 +661,14 @@ retry:
661 if (call_ast) { 661 if (call_ast) {
662 mlog(0, "calling unlockast(%p, %d)\n", data, status); 662 mlog(0, "calling unlockast(%p, %d)\n", data, status);
663 if (is_master) { 663 if (is_master) {
664 /* it is possible that there is one last bast 664 /* it is possible that there is one last bast
665 * pending. make sure it is flushed, then 665 * pending. make sure it is flushed, then
666 * call the unlockast. 666 * call the unlockast.
667 * not an issue if this is a mastered remotely, 667 * not an issue if this is a mastered remotely,
668 * since this lock has been removed from the 668 * since this lock has been removed from the
669 * lockres queues and cannot be found. */ 669 * lockres queues and cannot be found. */
670 dlm_kick_thread(dlm, NULL); 670 dlm_kick_thread(dlm, NULL);
671 wait_event(dlm->ast_wq, 671 wait_event(dlm->ast_wq,
672 dlm_lock_basts_flushed(dlm, lock)); 672 dlm_lock_basts_flushed(dlm, lock));
673 } 673 }
674 (*unlockast)(data, status); 674 (*unlockast)(data, status);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index c5e4a49e3a12..e044019cb3b1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
875 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); 875 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
876 876
877 lockres->l_level = lockres->l_requested; 877 lockres->l_level = lockres->l_requested;
878
879 /*
880 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
881 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
882 * downconverting the lock before the upconvert has fully completed.
883 */
884 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
885
878 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 886 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
879 887
880 mlog_exit_void(); 888 mlog_exit_void();
@@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
907 915
908 assert_spin_locked(&lockres->l_lock); 916 assert_spin_locked(&lockres->l_lock);
909 917
910 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
911
912 if (level > lockres->l_blocking) { 918 if (level > lockres->l_blocking) {
913 /* only schedule a downconvert if we haven't already scheduled 919 /* only schedule a downconvert if we haven't already scheduled
914 * one that goes low enough to satisfy the level we're 920 * one that goes low enough to satisfy the level we're
@@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
921 lockres->l_blocking = level; 927 lockres->l_blocking = level;
922 } 928 }
923 929
930 if (needs_downconvert)
931 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
932
924 mlog_exit(needs_downconvert); 933 mlog_exit(needs_downconvert);
925 return needs_downconvert; 934 return needs_downconvert;
926} 935}
@@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1133 mlog_entry_void(); 1142 mlog_entry_void();
1134 spin_lock_irqsave(&lockres->l_lock, flags); 1143 spin_lock_irqsave(&lockres->l_lock, flags);
1135 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 1144 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1145 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1136 if (convert) 1146 if (convert)
1137 lockres->l_action = OCFS2_AST_INVALID; 1147 lockres->l_action = OCFS2_AST_INVALID;
1138 else 1148 else
@@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1323again: 1333again:
1324 wait = 0; 1334 wait = 0;
1325 1335
1336 spin_lock_irqsave(&lockres->l_lock, flags);
1337
1326 if (catch_signals && signal_pending(current)) { 1338 if (catch_signals && signal_pending(current)) {
1327 ret = -ERESTARTSYS; 1339 ret = -ERESTARTSYS;
1328 goto out; 1340 goto unlock;
1329 } 1341 }
1330 1342
1331 spin_lock_irqsave(&lockres->l_lock, flags);
1332
1333 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, 1343 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1334 "Cluster lock called on freeing lockres %s! flags " 1344 "Cluster lock called on freeing lockres %s! flags "
1335 "0x%lx\n", lockres->l_name, lockres->l_flags); 1345 "0x%lx\n", lockres->l_name, lockres->l_flags);
@@ -1346,6 +1356,25 @@ again:
1346 goto unlock; 1356 goto unlock;
1347 } 1357 }
1348 1358
1359 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1360 /*
1361 * We've upconverted. If the lock now has a level we can
1362 * work with, we take it. If, however, the lock is not at the
1363 * required level, we go thru the full cycle. One way this could
1364 * happen is if a process requesting an upconvert to PR is
1365 * closely followed by another requesting upconvert to an EX.
1366 * If the process requesting EX lands here, we want it to
1367 * continue attempting to upconvert and let the process
1368 * requesting PR take the lock.
1369 * If multiple processes request upconvert to PR, the first one
1370 * here will take the lock. The others will have to go thru the
1371 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1372 * downconvert request.
1373 */
1374 if (level <= lockres->l_level)
1375 goto update_holders;
1376 }
1377
1349 if (lockres->l_flags & OCFS2_LOCK_BLOCKED && 1378 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1350 !ocfs2_may_continue_on_blocked_lock(lockres, level)) { 1379 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1351 /* is the lock is currently blocked on behalf of 1380 /* is the lock is currently blocked on behalf of
@@ -1416,11 +1445,14 @@ again:
1416 goto again; 1445 goto again;
1417 } 1446 }
1418 1447
1448update_holders:
1419 /* Ok, if we get here then we're good to go. */ 1449 /* Ok, if we get here then we're good to go. */
1420 ocfs2_inc_holders(lockres, level); 1450 ocfs2_inc_holders(lockres, level);
1421 1451
1422 ret = 0; 1452 ret = 0;
1423unlock: 1453unlock:
1454 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1455
1424 spin_unlock_irqrestore(&lockres->l_lock, flags); 1456 spin_unlock_irqrestore(&lockres->l_lock, flags);
1425out: 1457out:
1426 /* 1458 /*
@@ -3155,7 +3187,7 @@ out:
3155/* Mark the lockres as being dropped. It will no longer be 3187/* Mark the lockres as being dropped. It will no longer be
3156 * queued if blocking, but we still may have to wait on it 3188 * queued if blocking, but we still may have to wait on it
3157 * being dequeued from the downconvert thread before we can consider 3189 * being dequeued from the downconvert thread before we can consider
3158 * it safe to drop. 3190 * it safe to drop.
3159 * 3191 *
3160 * You can *not* attempt to call cluster_lock on this lockres anymore. */ 3192 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3161void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) 3193void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
@@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3352 unsigned long flags; 3384 unsigned long flags;
3353 int blocking; 3385 int blocking;
3354 int new_level; 3386 int new_level;
3387 int level;
3355 int ret = 0; 3388 int ret = 0;
3356 int set_lvb = 0; 3389 int set_lvb = 0;
3357 unsigned int gen; 3390 unsigned int gen;
@@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3360 3393
3361 spin_lock_irqsave(&lockres->l_lock, flags); 3394 spin_lock_irqsave(&lockres->l_lock, flags);
3362 3395
3363 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
3364
3365recheck: 3396recheck:
3397 /*
3398 * Is it still blocking? If not, we have no more work to do.
3399 */
3400 if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3401 BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3402 spin_unlock_irqrestore(&lockres->l_lock, flags);
3403 ret = 0;
3404 goto leave;
3405 }
3406
3366 if (lockres->l_flags & OCFS2_LOCK_BUSY) { 3407 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3367 /* XXX 3408 /* XXX
3368 * This is a *big* race. The OCFS2_LOCK_PENDING flag 3409 * This is a *big* race. The OCFS2_LOCK_PENDING flag
@@ -3401,6 +3442,31 @@ recheck:
3401 goto leave; 3442 goto leave;
3402 } 3443 }
3403 3444
3445 /*
3446 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3447 * set when the ast is received for an upconvert just before the
3448 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3449 * on the heels of the ast, we want to delay the downconvert just
3450 * enough to allow the up requestor to do its task. Because this
3451 * lock is in the blocked queue, the lock will be downconverted
3452 * as soon as the requestor is done with the lock.
3453 */
3454 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3455 goto leave_requeue;
3456
3457 /*
3458 * How can we block and yet be at NL? We were trying to upconvert
3459 * from NL and got canceled. The code comes back here, and now
3460 * we notice and clear BLOCKING.
3461 */
3462 if (lockres->l_level == DLM_LOCK_NL) {
3463 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3464 lockres->l_blocking = DLM_LOCK_NL;
3465 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3466 spin_unlock_irqrestore(&lockres->l_lock, flags);
3467 goto leave;
3468 }
3469
3404 /* if we're blocking an exclusive and we have *any* holders, 3470 /* if we're blocking an exclusive and we have *any* holders,
3405 * then requeue. */ 3471 * then requeue. */
3406 if ((lockres->l_blocking == DLM_LOCK_EX) 3472 if ((lockres->l_blocking == DLM_LOCK_EX)
@@ -3438,6 +3504,7 @@ recheck:
3438 * may sleep, so we save off a copy of what we're blocking as 3504 * may sleep, so we save off a copy of what we're blocking as
3439 * it may change while we're not holding the spin lock. */ 3505 * it may change while we're not holding the spin lock. */
3440 blocking = lockres->l_blocking; 3506 blocking = lockres->l_blocking;
3507 level = lockres->l_level;
3441 spin_unlock_irqrestore(&lockres->l_lock, flags); 3508 spin_unlock_irqrestore(&lockres->l_lock, flags);
3442 3509
3443 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); 3510 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
@@ -3446,7 +3513,7 @@ recheck:
3446 goto leave; 3513 goto leave;
3447 3514
3448 spin_lock_irqsave(&lockres->l_lock, flags); 3515 spin_lock_irqsave(&lockres->l_lock, flags);
3449 if (blocking != lockres->l_blocking) { 3516 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3450 /* If this changed underneath us, then we can't drop 3517 /* If this changed underneath us, then we can't drop
3451 * it just yet. */ 3518 * it just yet. */
3452 goto recheck; 3519 goto recheck;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 15713cbb865c..19ad145d2af3 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
239 mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", 239 mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
240 (unsigned long long)blkno, generation); 240 (unsigned long long)blkno, generation);
241 } 241 }
242 242
243 *max_len = len; 243 *max_len = len;
244 244
245bail: 245bail:
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index d35a27f4523e..5328529e7fd2 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
192 emi->ei_clusters += ins->ei_clusters; 192 emi->ei_clusters += ins->ei_clusters;
193 return 1; 193 return 1;
194 } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys && 194 } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
195 (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys && 195 (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
196 ins->ei_flags == emi->ei_flags) { 196 ins->ei_flags == emi->ei_flags) {
197 emi->ei_phys = ins->ei_phys; 197 emi->ei_phys = ins->ei_phys;
198 emi->ei_cpos = ins->ei_cpos; 198 emi->ei_cpos = ins->ei_cpos;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 06ccf6a86d35..558ce0312421 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
749 int ret; 749 int ret;
750 750
751 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */ 751 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
752 /* ugh. in prepare/commit_write, if from==to==start of block, we 752 /* ugh. in prepare/commit_write, if from==to==start of block, we
753 ** skip the prepare. make sure we never send an offset for the start 753 ** skip the prepare. make sure we never send an offset for the start
754 ** of a block 754 ** of a block
755 */ 755 */
@@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1779 struct inode *inode = dentry->d_inode; 1779 struct inode *inode = dentry->d_inode;
1780 loff_t saved_pos, end; 1780 loff_t saved_pos, end;
1781 1781
1782 /* 1782 /*
1783 * We start with a read level meta lock and only jump to an ex 1783 * We start with a read level meta lock and only jump to an ex
1784 * if we need to make modifications here. 1784 * if we need to make modifications here.
1785 */ 1785 */
@@ -2013,8 +2013,8 @@ out_dio:
2013 /* buffered aio wouldn't have proper lock coverage today */ 2013 /* buffered aio wouldn't have proper lock coverage today */
2014 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); 2014 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2015 2015
2016 if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) || 2016 if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
2017 (file->f_flags & O_DIRECT && has_refcount)) { 2017 ((file->f_flags & O_DIRECT) && has_refcount)) {
2018 ret = filemap_fdatawrite_range(file->f_mapping, pos, 2018 ret = filemap_fdatawrite_range(file->f_mapping, pos,
2019 pos + count - 1); 2019 pos + count - 1);
2020 if (ret < 0) 2020 if (ret < 0)
@@ -2033,7 +2033,7 @@ out_dio:
2033 pos + count - 1); 2033 pos + count - 1);
2034 } 2034 }
2035 2035
2036 /* 2036 /*
2037 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io 2037 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2038 * function pointer which is called when o_direct io completes so that 2038 * function pointer which is called when o_direct io completes so that
2039 * it can unlock our rw lock. (it's the clustered equivalent of 2039 * it can unlock our rw lock. (it's the clustered equivalent of
@@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2198 goto bail; 2198 goto bail;
2199 } 2199 }
2200 2200
2201 /* 2201 /*
2202 * buffered reads protect themselves in ->readpage(). O_DIRECT reads 2202 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2203 * need locks to protect pending reads from racing with truncate. 2203 * need locks to protect pending reads from racing with truncate.
2204 */ 2204 */
@@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2220 * We're fine letting folks race truncates and extending 2220 * We're fine letting folks race truncates and extending
2221 * writes with read across the cluster, just like they can 2221 * writes with read across the cluster, just like they can
2222 * locally. Hence no rw_lock during read. 2222 * locally. Hence no rw_lock during read.
2223 * 2223 *
2224 * Take and drop the meta data lock to update inode fields 2224 * Take and drop the meta data lock to update inode fields
2225 * like i_size. This allows the checks down below 2225 * like i_size. This allows the checks down below
2226 * generic_file_aio_read() a chance of actually working. 2226 * generic_file_aio_read() a chance of actually working.
2227 */ 2227 */
2228 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level); 2228 ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2229 if (ret < 0) { 2229 if (ret < 0) {
@@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2248bail: 2248bail:
2249 if (have_alloc_sem) 2249 if (have_alloc_sem)
2250 up_read(&inode->i_alloc_sem); 2250 up_read(&inode->i_alloc_sem);
2251 if (rw_level != -1) 2251 if (rw_level != -1)
2252 ocfs2_rw_unlock(inode, rw_level); 2252 ocfs2_rw_unlock(inode, rw_level);
2253 mlog_exit(ret); 2253 mlog_exit(ret);
2254 2254
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0297fb8982b8..88459bdd1ff3 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
475 if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) { 475 if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
476 status = ocfs2_try_open_lock(inode, 0); 476 status = ocfs2_try_open_lock(inode, 0);
477 if (status) { 477 if (status) {
478 make_bad_inode(inode); 478 make_bad_inode(inode);
479 return status; 479 return status;
480 } 480 }
481 } 481 }
@@ -684,7 +684,7 @@ bail:
684 return status; 684 return status;
685} 685}
686 686
687/* 687/*
688 * Serialize with orphan dir recovery. If the process doing 688 * Serialize with orphan dir recovery. If the process doing
689 * recovery on this orphan dir does an iget() with the dir 689 * recovery on this orphan dir does an iget() with the dir
690 * i_mutex held, we'll deadlock here. Instead we detect this 690 * i_mutex held, we'll deadlock here. Instead we detect this
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 31fbb0619510..7d9d9c132cef 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/fs.h> 8#include <linux/fs.h>
9#include <linux/mount.h> 9#include <linux/mount.h>
10#include <linux/compat.h>
10 11
11#define MLOG_MASK_PREFIX ML_INODE 12#define MLOG_MASK_PREFIX ML_INODE
12#include <cluster/masklog.h> 13#include <cluster/masklog.h>
@@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
181#ifdef CONFIG_COMPAT 182#ifdef CONFIG_COMPAT
182long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) 183long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
183{ 184{
185 bool preserve;
186 struct reflink_arguments args;
187 struct inode *inode = file->f_path.dentry->d_inode;
188
184 switch (cmd) { 189 switch (cmd) {
185 case OCFS2_IOC32_GETFLAGS: 190 case OCFS2_IOC32_GETFLAGS:
186 cmd = OCFS2_IOC_GETFLAGS; 191 cmd = OCFS2_IOC_GETFLAGS;
@@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
195 case OCFS2_IOC_GROUP_EXTEND: 200 case OCFS2_IOC_GROUP_EXTEND:
196 case OCFS2_IOC_GROUP_ADD: 201 case OCFS2_IOC_GROUP_ADD:
197 case OCFS2_IOC_GROUP_ADD64: 202 case OCFS2_IOC_GROUP_ADD64:
198 case OCFS2_IOC_REFLINK:
199 break; 203 break;
204 case OCFS2_IOC_REFLINK:
205 if (copy_from_user(&args, (struct reflink_arguments *)arg,
206 sizeof(args)))
207 return -EFAULT;
208 preserve = (args.preserve != 0);
209
210 return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
211 compat_ptr(args.new_path), preserve);
200 default: 212 default:
201 return -ENOIOCTLCMD; 213 return -ENOIOCTLCMD;
202 } 214 }
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index bf34c491ae96..9336c60e3a36 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
2034 status = -ENOENT; 2034 status = -ENOENT;
2035 mlog_errno(status); 2035 mlog_errno(status);
2036 return status; 2036 return status;
2037 } 2037 }
2038 2038
2039 mutex_lock(&orphan_dir_inode->i_mutex); 2039 mutex_lock(&orphan_dir_inode->i_mutex);
2040 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0); 2040 status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 9362eea7424b..740f448041e2 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -136,6 +136,10 @@ enum ocfs2_unlock_action {
136#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a 136#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a
137 call to dlm_lock. Only 137 call to dlm_lock. Only
138 exists with BUSY set. */ 138 exists with BUSY set. */
139#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
140 * from downconverting
141 * before the upconvert
142 * has completed */
139 143
140struct ocfs2_lock_res_ops; 144struct ocfs2_lock_res_ops;
141 145
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 1a1a679e51b5..7638a38c32bc 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
1417 return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink); 1417 return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
1418} 1418}
1419 1419
1420static inline int ocfs2_max_inline_data(int blocksize) 1420static inline int ocfs2_max_inline_data_with_xattr(int blocksize,
1421 struct ocfs2_dinode *di)
1421{ 1422{
1422 return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data); 1423 if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL))
1424 return blocksize -
1425 offsetof(struct ocfs2_dinode, id2.i_data.id_data) -
1426 di->i_xattr_inline_size;
1427 else
1428 return blocksize -
1429 offsetof(struct ocfs2_dinode, id2.i_data.id_data);
1423} 1430}
1424 1431
1425static inline int ocfs2_extent_recs_per_inode(int blocksize) 1432static inline int ocfs2_extent_recs_per_inode(int blocksize)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 74db2be75dd6..8ae65c9c020c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2945 2945
2946 while (offset < end) { 2946 while (offset < end) {
2947 page_index = offset >> PAGE_CACHE_SHIFT; 2947 page_index = offset >> PAGE_CACHE_SHIFT;
2948 map_end = (page_index + 1) << PAGE_CACHE_SHIFT; 2948 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
2949 if (map_end > end) 2949 if (map_end > end)
2950 map_end = end; 2950 map_end = end;
2951 2951
@@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2957 2957
2958 page = grab_cache_page(mapping, page_index); 2958 page = grab_cache_page(mapping, page_index);
2959 2959
2960 /* This page can't be dirtied before we CoW it out. */ 2960 /*
2961 BUG_ON(PageDirty(page)); 2961 * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
2962 * can't be dirtied before we CoW it out.
2963 */
2964 if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
2965 BUG_ON(PageDirty(page));
2962 2966
2963 if (!PageUptodate(page)) { 2967 if (!PageUptodate(page)) {
2964 ret = block_read_full_page(page, ocfs2_get_block); 2968 ret = block_read_full_page(page, ocfs2_get_block);
@@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
3170 3174
3171 while (offset < end) { 3175 while (offset < end) {
3172 page_index = offset >> PAGE_CACHE_SHIFT; 3176 page_index = offset >> PAGE_CACHE_SHIFT;
3173 map_end = (page_index + 1) << PAGE_CACHE_SHIFT; 3177 map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
3174 if (map_end > end) 3178 if (map_end > end)
3175 map_end = end; 3179 map_end = end;
3176 3180
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index e49c41050264..3038c92af493 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
277 u32 dlm_key; 277 u32 dlm_key;
278 struct dlm_ctxt *dlm; 278 struct dlm_ctxt *dlm;
279 struct o2dlm_private *priv; 279 struct o2dlm_private *priv;
280 struct dlm_protocol_version dlm_version; 280 struct dlm_protocol_version fs_version;
281 281
282 BUG_ON(conn == NULL); 282 BUG_ON(conn == NULL);
283 BUG_ON(o2cb_stack.sp_proto == NULL); 283 BUG_ON(o2cb_stack.sp_proto == NULL);
@@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
304 /* used by the dlm code to make message headers unique, each 304 /* used by the dlm code to make message headers unique, each
305 * node in this domain must agree on this. */ 305 * node in this domain must agree on this. */
306 dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); 306 dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
307 dlm_version.pv_major = conn->cc_version.pv_major; 307 fs_version.pv_major = conn->cc_version.pv_major;
308 dlm_version.pv_minor = conn->cc_version.pv_minor; 308 fs_version.pv_minor = conn->cc_version.pv_minor;
309 309
310 dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version); 310 dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version);
311 if (IS_ERR(dlm)) { 311 if (IS_ERR(dlm)) {
312 rc = PTR_ERR(dlm); 312 rc = PTR_ERR(dlm);
313 mlog_errno(rc); 313 mlog_errno(rc);
314 goto out_free; 314 goto out_free;
315 } 315 }
316 316
317 conn->cc_version.pv_major = dlm_version.pv_major; 317 conn->cc_version.pv_major = fs_version.pv_major;
318 conn->cc_version.pv_minor = dlm_version.pv_minor; 318 conn->cc_version.pv_minor = fs_version.pv_minor;
319 conn->cc_lockspace = dlm; 319 conn->cc_lockspace = dlm;
320 320
321 dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); 321 dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 26069917a9f5..755cd49a5ef3 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1062 "file system, but write access is " 1062 "file system, but write access is "
1063 "unavailable.\n"); 1063 "unavailable.\n");
1064 else 1064 else
1065 mlog_errno(status); 1065 mlog_errno(status);
1066 goto read_super_error; 1066 goto read_super_error;
1067 } 1067 }
1068 1068
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 49b133ccbf11..32499d213fc4 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
137 } 137 }
138 138
139 memcpy(link, target, len); 139 memcpy(link, target, len);
140 nd_set_link(nd, link);
141 140
142bail: 141bail:
142 nd_set_link(nd, status ? ERR_PTR(status) : link);
143 brelse(bh); 143 brelse(bh);
144 144
145 mlog_exit(status); 145 mlog_exit(status);
146 return status ? ERR_PTR(status) : link; 146 return NULL;
147} 147}
148 148
149static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 149static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
150{ 150{
151 char *link = cookie; 151 char *link = nd_get_link(nd);
152 152 if (!IS_ERR(link))
153 kfree(link); 153 kfree(link);
154} 154}
155 155
156const struct inode_operations ocfs2_symlink_inode_operations = { 156const struct inode_operations ocfs2_symlink_inode_operations = {
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index c61369342a27..a0a120e82b97 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
267} 267}
268 268
269/* Warning: even if it returns true, this does *not* guarantee that 269/* Warning: even if it returns true, this does *not* guarantee that
270 * the block is stored in our inode metadata cache. 270 * the block is stored in our inode metadata cache.
271 * 271 *
272 * This can be called under lock_buffer() 272 * This can be called under lock_buffer()
273 */ 273 */
274int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci, 274int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 38a6948ce0c2..20f31567ccee 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id)
647 return id[ATA_ID_SECTOR_SIZE] & (1 << 13); 647 return id[ATA_ID_SECTOR_SIZE] & (1 << 13);
648} 648}
649 649
650static inline u8 ata_id_logical_per_physical_sectors(const u16 *id) 650static inline u16 ata_id_logical_per_physical_sectors(const u16 *id)
651{ 651{
652 return id[ATA_ID_SECTOR_SIZE] & 0xf; 652 return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf);
653} 653}
654 654
655static inline int ata_id_has_lba48(const u16 *id) 655static inline int ata_id_has_lba48(const u16 *id)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 5be3dab4a695..188fcae10a99 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -15,6 +15,7 @@
15# define __acquire(x) __context__(x,1) 15# define __acquire(x) __context__(x,1)
16# define __release(x) __context__(x,-1) 16# define __release(x) __context__(x,-1)
17# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 17# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
18# define __percpu __attribute__((noderef, address_space(3)))
18extern void __chk_user_ptr(const volatile void __user *); 19extern void __chk_user_ptr(const volatile void __user *);
19extern void __chk_io_ptr(const volatile void __iomem *); 20extern void __chk_io_ptr(const volatile void __iomem *);
20#else 21#else
@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
32# define __acquire(x) (void)0 33# define __acquire(x) (void)0
33# define __release(x) (void)0 34# define __release(x) (void)0
34# define __cond_lock(x,c) (c) 35# define __cond_lock(x,c) (c)
36# define __percpu
35#endif 37#endif
36 38
37#ifdef __KERNEL__ 39#ifdef __KERNEL__
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 99dc6d5cf7e5..975837e7d6c0 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -17,7 +17,7 @@ struct linux_binprm;
17extern int ima_bprm_check(struct linux_binprm *bprm); 17extern int ima_bprm_check(struct linux_binprm *bprm);
18extern int ima_inode_alloc(struct inode *inode); 18extern int ima_inode_alloc(struct inode *inode);
19extern void ima_inode_free(struct inode *inode); 19extern void ima_inode_free(struct inode *inode);
20extern int ima_path_check(struct path *path, int mask); 20extern int ima_file_check(struct file *file, int mask);
21extern void ima_file_free(struct file *file); 21extern void ima_file_free(struct file *file);
22extern int ima_file_mmap(struct file *file, unsigned long prot); 22extern int ima_file_mmap(struct file *file, unsigned long prot);
23extern void ima_counts_get(struct file *file); 23extern void ima_counts_get(struct file *file);
@@ -38,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode)
38 return; 38 return;
39} 39}
40 40
41static inline int ima_path_check(struct path *path, int mask) 41static inline int ima_file_check(struct file *file, int mask)
42{ 42{
43 return 0; 43 return 0;
44} 44}
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index ba1ba0c5efd1..63d449807d9b 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -11,6 +11,8 @@ struct nf_conntrack_ecache;
11struct netns_ct { 11struct netns_ct {
12 atomic_t count; 12 atomic_t count;
13 unsigned int expect_count; 13 unsigned int expect_count;
14 unsigned int htable_size;
15 struct kmem_cache *nf_conntrack_cachep;
14 struct hlist_nulls_head *hash; 16 struct hlist_nulls_head *hash;
15 struct hlist_head *expect_hash; 17 struct hlist_head *expect_hash;
16 struct hlist_nulls_head unconfirmed; 18 struct hlist_nulls_head unconfirmed;
@@ -28,5 +30,6 @@ struct netns_ct {
28#endif 30#endif
29 int hash_vmalloc; 31 int hash_vmalloc;
30 int expect_vmalloc; 32 int expect_vmalloc;
33 char *slabname;
31}; 34};
32#endif 35#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2eb3814d6258..9a4b8b714079 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,6 +40,7 @@ struct netns_ipv4 {
40 struct xt_table *iptable_security; 40 struct xt_table *iptable_security;
41 struct xt_table *nat_table; 41 struct xt_table *nat_table;
42 struct hlist_head *nat_bysource; 42 struct hlist_head *nat_bysource;
43 unsigned int nat_htable_size;
43 int nat_vmalloced; 44 int nat_vmalloced;
44#endif 45#endif
45 46
diff --git a/init/main.c b/init/main.c
index dac44a9356a5..4cb47a159f02 100644
--- a/init/main.c
+++ b/init/main.c
@@ -657,9 +657,9 @@ asmlinkage void __init start_kernel(void)
657 proc_caches_init(); 657 proc_caches_init();
658 buffer_init(); 658 buffer_init();
659 key_init(); 659 key_init();
660 radix_tree_init();
660 security_init(); 661 security_init();
661 vfs_caches_init(totalram_pages); 662 vfs_caches_init(totalram_pages);
662 radix_tree_init();
663 signals_init(); 663 signals_init();
664 /* rootfs populating might need page-writeback */ 664 /* rootfs populating might need page-writeback */
665 page_writeback_init(); 665 page_writeback_init();
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 7faaa32fbf4f..e2ab064c6d41 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -880,6 +880,7 @@ void getboottime(struct timespec *ts)
880 880
881 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); 881 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
882} 882}
883EXPORT_SYMBOL_GPL(getboottime);
883 884
884/** 885/**
885 * monotonic_to_bootbased - Convert the monotonic time to boot based. 886 * monotonic_to_bootbased - Convert the monotonic time to boot based.
@@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts)
889{ 890{
890 *ts = timespec_add_safe(*ts, total_sleep_time); 891 *ts = timespec_add_safe(*ts, total_sleep_time);
891} 892}
893EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
892 894
893unsigned long get_seconds(void) 895unsigned long get_seconds(void)
894{ 896{
diff --git a/mm/migrate.c b/mm/migrate.c
index efddbf0926b2..9a0db5bbabe4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -912,6 +912,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
912 goto out_pm; 912 goto out_pm;
913 913
914 err = -ENODEV; 914 err = -ENODEV;
915 if (node < 0 || node >= MAX_NUMNODES)
916 goto out_pm;
917
915 if (!node_state(node, N_HIGH_MEMORY)) 918 if (!node_state(node, N_HIGH_MEMORY))
916 goto out_pm; 919 goto out_pm;
917 920
diff --git a/net/9p/client.c b/net/9p/client.c
index 8af95b2dddd6..09d4f1e2e4a8 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
69 69
70static int parse_opts(char *opts, struct p9_client *clnt) 70static int parse_opts(char *opts, struct p9_client *clnt)
71{ 71{
72 char *options; 72 char *options, *tmp_options;
73 char *p; 73 char *p;
74 substring_t args[MAX_OPT_ARGS]; 74 substring_t args[MAX_OPT_ARGS];
75 int option; 75 int option;
@@ -81,12 +81,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
81 if (!opts) 81 if (!opts)
82 return 0; 82 return 0;
83 83
84 options = kstrdup(opts, GFP_KERNEL); 84 tmp_options = kstrdup(opts, GFP_KERNEL);
85 if (!options) { 85 if (!tmp_options) {
86 P9_DPRINTK(P9_DEBUG_ERROR, 86 P9_DPRINTK(P9_DEBUG_ERROR,
87 "failed to allocate copy of option string\n"); 87 "failed to allocate copy of option string\n");
88 return -ENOMEM; 88 return -ENOMEM;
89 } 89 }
90 options = tmp_options;
90 91
91 while ((p = strsep(&options, ",")) != NULL) { 92 while ((p = strsep(&options, ",")) != NULL) {
92 int token; 93 int token;
@@ -108,6 +109,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
108 break; 109 break;
109 case Opt_trans: 110 case Opt_trans:
110 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); 111 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
112 if(clnt->trans_mod == NULL) {
113 P9_DPRINTK(P9_DEBUG_ERROR,
114 "Could not find request transport: %s\n",
115 (char *) &args[0]);
116 ret = -EINVAL;
117 goto free_and_return;
118 }
111 break; 119 break;
112 case Opt_legacy: 120 case Opt_legacy:
113 clnt->dotu = 0; 121 clnt->dotu = 0;
@@ -117,7 +125,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
117 } 125 }
118 } 126 }
119 127
120 kfree(options); 128free_and_return:
129 kfree(tmp_options);
121 return ret; 130 return ret;
122} 131}
123 132
@@ -667,18 +676,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
667 clnt->trans = NULL; 676 clnt->trans = NULL;
668 spin_lock_init(&clnt->lock); 677 spin_lock_init(&clnt->lock);
669 INIT_LIST_HEAD(&clnt->fidlist); 678 INIT_LIST_HEAD(&clnt->fidlist);
670 clnt->fidpool = p9_idpool_create();
671 if (IS_ERR(clnt->fidpool)) {
672 err = PTR_ERR(clnt->fidpool);
673 clnt->fidpool = NULL;
674 goto error;
675 }
676 679
677 p9_tag_init(clnt); 680 p9_tag_init(clnt);
678 681
679 err = parse_opts(options, clnt); 682 err = parse_opts(options, clnt);
680 if (err < 0) 683 if (err < 0)
681 goto error; 684 goto free_client;
682 685
683 if (!clnt->trans_mod) 686 if (!clnt->trans_mod)
684 clnt->trans_mod = v9fs_get_default_trans(); 687 clnt->trans_mod = v9fs_get_default_trans();
@@ -687,7 +690,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
687 err = -EPROTONOSUPPORT; 690 err = -EPROTONOSUPPORT;
688 P9_DPRINTK(P9_DEBUG_ERROR, 691 P9_DPRINTK(P9_DEBUG_ERROR,
689 "No transport defined or default transport\n"); 692 "No transport defined or default transport\n");
690 goto error; 693 goto free_client;
694 }
695
696 clnt->fidpool = p9_idpool_create();
697 if (IS_ERR(clnt->fidpool)) {
698 err = PTR_ERR(clnt->fidpool);
699 clnt->fidpool = NULL;
700 goto put_trans;
691 } 701 }
692 702
693 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n", 703 P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n",
@@ -695,19 +705,25 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
695 705
696 err = clnt->trans_mod->create(clnt, dev_name, options); 706 err = clnt->trans_mod->create(clnt, dev_name, options);
697 if (err) 707 if (err)
698 goto error; 708 goto destroy_fidpool;
699 709
700 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) 710 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
701 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; 711 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
702 712
703 err = p9_client_version(clnt); 713 err = p9_client_version(clnt);
704 if (err) 714 if (err)
705 goto error; 715 goto close_trans;
706 716
707 return clnt; 717 return clnt;
708 718
709error: 719close_trans:
710 p9_client_destroy(clnt); 720 clnt->trans_mod->close(clnt);
721destroy_fidpool:
722 p9_idpool_destroy(clnt->fidpool);
723put_trans:
724 v9fs_put_trans(clnt->trans_mod);
725free_client:
726 kfree(clnt);
711 return ERR_PTR(err); 727 return ERR_PTR(err);
712} 728}
713EXPORT_SYMBOL(p9_client_create); 729EXPORT_SYMBOL(p9_client_create);
@@ -1214,10 +1230,11 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional)
1214{ 1230{
1215 int ret; 1231 int ret;
1216 1232
1233 /* NOTE: size shouldn't include its own length */
1217 /* size[2] type[2] dev[4] qid[13] */ 1234 /* size[2] type[2] dev[4] qid[13] */
1218 /* mode[4] atime[4] mtime[4] length[8]*/ 1235 /* mode[4] atime[4] mtime[4] length[8]*/
1219 /* name[s] uid[s] gid[s] muid[s] */ 1236 /* name[s] uid[s] gid[s] muid[s] */
1220 ret = 2+2+4+13+4+4+4+8+2+2+2+2; 1237 ret = 2+4+13+4+4+4+8+2+2+2+2;
1221 1238
1222 if (wst->name) 1239 if (wst->name)
1223 ret += strlen(wst->name); 1240 ret += strlen(wst->name);
@@ -1258,7 +1275,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
1258 wst->name, wst->uid, wst->gid, wst->muid, wst->extension, 1275 wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
1259 wst->n_uid, wst->n_gid, wst->n_muid); 1276 wst->n_uid, wst->n_gid, wst->n_muid);
1260 1277
1261 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst); 1278 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
1262 if (IS_ERR(req)) { 1279 if (IS_ERR(req)) {
1263 err = PTR_ERR(req); 1280 err = PTR_ERR(req);
1264 goto error; 1281 goto error;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index be1cb909d8c0..31d0b05582a9 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
714 char *p; 714 char *p;
715 substring_t args[MAX_OPT_ARGS]; 715 substring_t args[MAX_OPT_ARGS];
716 int option; 716 int option;
717 char *options; 717 char *options, *tmp_options;
718 int ret; 718 int ret;
719 719
720 opts->port = P9_PORT; 720 opts->port = P9_PORT;
@@ -724,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
724 if (!params) 724 if (!params)
725 return 0; 725 return 0;
726 726
727 options = kstrdup(params, GFP_KERNEL); 727 tmp_options = kstrdup(params, GFP_KERNEL);
728 if (!options) { 728 if (!tmp_options) {
729 P9_DPRINTK(P9_DEBUG_ERROR, 729 P9_DPRINTK(P9_DEBUG_ERROR,
730 "failed to allocate copy of option string\n"); 730 "failed to allocate copy of option string\n");
731 return -ENOMEM; 731 return -ENOMEM;
732 } 732 }
733 options = tmp_options;
733 734
734 while ((p = strsep(&options, ",")) != NULL) { 735 while ((p = strsep(&options, ",")) != NULL) {
735 int token; 736 int token;
@@ -760,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
760 continue; 761 continue;
761 } 762 }
762 } 763 }
763 kfree(options); 764
765 kfree(tmp_options);
764 return 0; 766 return 0;
765} 767}
766 768
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 65cb29db03f8..2c95a89c0f46 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
166 char *p; 166 char *p;
167 substring_t args[MAX_OPT_ARGS]; 167 substring_t args[MAX_OPT_ARGS];
168 int option; 168 int option;
169 char *options; 169 char *options, *tmp_options;
170 int ret; 170 int ret;
171 171
172 opts->port = P9_PORT; 172 opts->port = P9_PORT;
@@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
177 if (!params) 177 if (!params)
178 return 0; 178 return 0;
179 179
180 options = kstrdup(params, GFP_KERNEL); 180 tmp_options = kstrdup(params, GFP_KERNEL);
181 if (!options) { 181 if (!tmp_options) {
182 P9_DPRINTK(P9_DEBUG_ERROR, 182 P9_DPRINTK(P9_DEBUG_ERROR,
183 "failed to allocate copy of option string\n"); 183 "failed to allocate copy of option string\n");
184 return -ENOMEM; 184 return -ENOMEM;
185 } 185 }
186 options = tmp_options;
186 187
187 while ((p = strsep(&options, ",")) != NULL) { 188 while ((p = strsep(&options, ",")) != NULL) {
188 int token; 189 int token;
@@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
216 } 217 }
217 /* RQ must be at least as large as the SQ */ 218 /* RQ must be at least as large as the SQ */
218 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); 219 opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
219 kfree(options); 220 kfree(tmp_options);
220 return 0; 221 return 0;
221} 222}
222 223
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index ea1e3daabefe..cb50f4ae5eef 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -102,7 +102,8 @@ static void p9_virtio_close(struct p9_client *client)
102 struct virtio_chan *chan = client->trans; 102 struct virtio_chan *chan = client->trans;
103 103
104 mutex_lock(&virtio_9p_lock); 104 mutex_lock(&virtio_9p_lock);
105 chan->inuse = false; 105 if (chan)
106 chan->inuse = false;
106 mutex_unlock(&virtio_9p_lock); 107 mutex_unlock(&virtio_9p_lock);
107} 108}
108 109
@@ -311,6 +312,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
311 } 312 }
312 313
313 client->trans = (void *)chan; 314 client->trans = (void *)chan;
315 client->status = Connected;
314 chan->client = client; 316 chan->client = client;
315 317
316 return 0; 318 return 0;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b7c4224f4e7d..b10e3cdb08f8 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
377 377
378 if (acl->state == BT_CONNECTED && 378 if (acl->state == BT_CONNECTED &&
379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 379 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
380 acl->power_save = 1;
381 hci_conn_enter_active_mode(acl);
382
380 if (lmp_esco_capable(hdev)) 383 if (lmp_esco_capable(hdev))
381 hci_setup_sync(sco, acl->handle); 384 hci_setup_sync(sco, acl->handle);
382 else 385 else
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 28517bad796c..592da5c909c1 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1699,6 +1699,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
1699 break; 1699 break;
1700 1700
1701 case 0x1c: /* SCO interval rejected */ 1701 case 0x1c: /* SCO interval rejected */
1702 case 0x1a: /* Unsupported Remote Feature */
1702 case 0x1f: /* Unspecified error */ 1703 case 0x1f: /* Unspecified error */
1703 if (conn->out && conn->attempt < 2) { 1704 if (conn->out && conn->attempt < 2) {
1704 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 1705 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 6cf526d06e21..fc6ec1e72652 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -703,29 +703,9 @@ static void hidp_close(struct hid_device *hid)
703static int hidp_parse(struct hid_device *hid) 703static int hidp_parse(struct hid_device *hid)
704{ 704{
705 struct hidp_session *session = hid->driver_data; 705 struct hidp_session *session = hid->driver_data;
706 struct hidp_connadd_req *req = session->req;
707 unsigned char *buf;
708 int ret;
709
710 buf = kmalloc(req->rd_size, GFP_KERNEL);
711 if (!buf)
712 return -ENOMEM;
713
714 if (copy_from_user(buf, req->rd_data, req->rd_size)) {
715 kfree(buf);
716 return -EFAULT;
717 }
718
719 ret = hid_parse_report(session->hid, buf, req->rd_size);
720
721 kfree(buf);
722
723 if (ret)
724 return ret;
725
726 session->req = NULL;
727 706
728 return 0; 707 return hid_parse_report(session->hid, session->rd_data,
708 session->rd_size);
729} 709}
730 710
731static int hidp_start(struct hid_device *hid) 711static int hidp_start(struct hid_device *hid)
@@ -770,12 +750,24 @@ static int hidp_setup_hid(struct hidp_session *session,
770 bdaddr_t src, dst; 750 bdaddr_t src, dst;
771 int err; 751 int err;
772 752
753 session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
754 if (!session->rd_data)
755 return -ENOMEM;
756
757 if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
758 err = -EFAULT;
759 goto fault;
760 }
761 session->rd_size = req->rd_size;
762
773 hid = hid_allocate_device(); 763 hid = hid_allocate_device();
774 if (IS_ERR(hid)) 764 if (IS_ERR(hid)) {
775 return PTR_ERR(hid); 765 err = PTR_ERR(hid);
766 goto fault;
767 }
776 768
777 session->hid = hid; 769 session->hid = hid;
778 session->req = req; 770
779 hid->driver_data = session; 771 hid->driver_data = session;
780 772
781 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src); 773 baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
@@ -806,6 +798,10 @@ failed:
806 hid_destroy_device(hid); 798 hid_destroy_device(hid);
807 session->hid = NULL; 799 session->hid = NULL;
808 800
801fault:
802 kfree(session->rd_data);
803 session->rd_data = NULL;
804
809 return err; 805 return err;
810} 806}
811 807
@@ -900,6 +896,9 @@ unlink:
900 session->hid = NULL; 896 session->hid = NULL;
901 } 897 }
902 898
899 kfree(session->rd_data);
900 session->rd_data = NULL;
901
903purge: 902purge:
904 skb_queue_purge(&session->ctrl_transmit); 903 skb_queue_purge(&session->ctrl_transmit);
905 skb_queue_purge(&session->intr_transmit); 904 skb_queue_purge(&session->intr_transmit);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index faf3d74c3586..a4e215d50c10 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -154,7 +154,9 @@ struct hidp_session {
154 struct sk_buff_head ctrl_transmit; 154 struct sk_buff_head ctrl_transmit;
155 struct sk_buff_head intr_transmit; 155 struct sk_buff_head intr_transmit;
156 156
157 struct hidp_connadd_req *req; 157 /* Report descriptor */
158 __u8 *rd_data;
159 uint rd_size;
158}; 160};
159 161
160static inline void hidp_schedule(struct hidp_session *session) 162static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index fc5ee3296e22..89f4a59eb82b 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -252,7 +252,6 @@ static void rfcomm_session_timeout(unsigned long arg)
252 BT_DBG("session %p state %ld", s, s->state); 252 BT_DBG("session %p state %ld", s, s->state);
253 253
254 set_bit(RFCOMM_TIMED_OUT, &s->flags); 254 set_bit(RFCOMM_TIMED_OUT, &s->flags);
255 rfcomm_session_put(s);
256 rfcomm_schedule(RFCOMM_SCHED_TIMEO); 255 rfcomm_schedule(RFCOMM_SCHED_TIMEO);
257} 256}
258 257
@@ -1151,7 +1150,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
1151 break; 1150 break;
1152 1151
1153 case BT_DISCONN: 1152 case BT_DISCONN:
1154 rfcomm_session_put(s); 1153 /* When socket is closed and we are not RFCOMM
1154 * initiator rfcomm_process_rx already calls
1155 * rfcomm_session_put() */
1156 if (s->sock->sk->sk_state != BT_CLOSED)
1157 rfcomm_session_put(s);
1155 break; 1158 break;
1156 } 1159 }
1157 } 1160 }
@@ -1920,6 +1923,7 @@ static inline void rfcomm_process_sessions(void)
1920 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { 1923 if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
1921 s->state = BT_DISCONN; 1924 s->state = BT_DISCONN;
1922 rfcomm_send_disc(s, 0); 1925 rfcomm_send_disc(s, 0);
1926 rfcomm_session_put(s);
1923 continue; 1927 continue;
1924 } 1928 }
1925 1929
diff --git a/net/core/dst.c b/net/core/dst.c
index 57bc4d5b8d08..cb1b3488b739 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -17,6 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <net/net_namespace.h> 19#include <net/net_namespace.h>
20#include <linux/sched.h>
20 21
21#include <net/dst.h> 22#include <net/dst.h>
22 23
@@ -79,6 +80,7 @@ loop:
79 while ((dst = next) != NULL) { 80 while ((dst = next) != NULL) {
80 next = dst->next; 81 next = dst->next;
81 prefetch(&next->next); 82 prefetch(&next->next);
83 cond_resched();
82 if (likely(atomic_read(&dst->__refcnt))) { 84 if (likely(atomic_read(&dst->__refcnt))) {
83 last->next = dst; 85 last->next = dst;
84 last = dst; 86 last = dst;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index de0c2c726420..2e692afdc55d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3524,6 +3524,7 @@ static int pktgen_thread_worker(void *arg)
3524 wait_event_interruptible_timeout(t->queue, 3524 wait_event_interruptible_timeout(t->queue,
3525 t->control != 0, 3525 t->control != 0,
3526 HZ/10); 3526 HZ/10);
3527 try_to_freeze();
3527 continue; 3528 continue;
3528 } 3529 }
3529 3530
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 57dfb9c8c4f2..ff16e9df1969 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -83,7 +83,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
83 va_list args; 83 va_list args;
84 84
85 va_start(args, fmt); 85 va_start(args, fmt);
86 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 86 vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
87 va_end(args); 87 va_end(args);
88 88
89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, 89 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 269958bf7fe9..6df6f8ac9636 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -19,7 +19,9 @@
19#include <linux/list.h> 19#include <linux/list.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21
22#define CCID_MAX 255 22/* maximum value for a CCID (RFC 4340, 19.5) */
23#define CCID_MAX 255
24#define CCID_SLAB_NAME_LENGTH 32
23 25
24struct tcp_info; 26struct tcp_info;
25 27
@@ -49,8 +51,8 @@ struct ccid_operations {
49 const char *ccid_name; 51 const char *ccid_name;
50 struct kmem_cache *ccid_hc_rx_slab, 52 struct kmem_cache *ccid_hc_rx_slab,
51 *ccid_hc_tx_slab; 53 *ccid_hc_tx_slab;
52 char ccid_hc_rx_slab_name[32]; 54 char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH];
53 char ccid_hc_tx_slab_name[32]; 55 char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH];
54 __u32 ccid_hc_rx_obj_size, 56 __u32 ccid_hc_rx_obj_size,
55 ccid_hc_tx_obj_size; 57 ccid_hc_tx_obj_size;
56 /* Interface Routines */ 58 /* Interface Routines */
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index bace1d8cbcfd..f5b3464f1242 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -161,8 +161,8 @@ static __init int dccpprobe_init(void)
161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
162 goto err0; 162 goto err0;
163 163
164 ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), 164 try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
165 "dccp"); 165 "dccp");
166 if (ret) 166 if (ret)
167 goto err1; 167 goto err1;
168 168
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 06632762ba5f..90203e1b9187 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
925 if (t && !IS_ERR(t)) { 925 if (t && !IS_ERR(t)) {
926 struct arpt_getinfo info; 926 struct arpt_getinfo info;
927 const struct xt_table_info *private = t->private; 927 const struct xt_table_info *private = t->private;
928
929#ifdef CONFIG_COMPAT 928#ifdef CONFIG_COMPAT
929 struct xt_table_info tmp;
930
930 if (compat) { 931 if (compat) {
931 struct xt_table_info tmp;
932 ret = compat_table_info(private, &tmp); 932 ret = compat_table_info(private, &tmp);
933 xt_compat_flush_offsets(NFPROTO_ARP); 933 xt_compat_flush_offsets(NFPROTO_ARP);
934 private = &tmp; 934 private = &tmp;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 572330a552ef..3ce53cf13d5a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1132 if (t && !IS_ERR(t)) { 1132 if (t && !IS_ERR(t)) {
1133 struct ipt_getinfo info; 1133 struct ipt_getinfo info;
1134 const struct xt_table_info *private = t->private; 1134 const struct xt_table_info *private = t->private;
1135
1136#ifdef CONFIG_COMPAT 1135#ifdef CONFIG_COMPAT
1136 struct xt_table_info tmp;
1137
1137 if (compat) { 1138 if (compat) {
1138 struct xt_table_info tmp;
1139 ret = compat_table_info(private, &tmp); 1139 ret = compat_table_info(private, &tmp);
1140 xt_compat_flush_offsets(AF_INET); 1140 xt_compat_flush_offsets(AF_INET);
1141 private = &tmp; 1141 private = &tmp;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d171b123a656..d1ea38a7c490 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = {
210 }, 210 },
211 { 211 {
212 .procname = "ip_conntrack_buckets", 212 .procname = "ip_conntrack_buckets",
213 .data = &nf_conntrack_htable_size, 213 .data = &init_net.ct.htable_size,
214 .maxlen = sizeof(unsigned int), 214 .maxlen = sizeof(unsigned int),
215 .mode = 0444, 215 .mode = 0444,
216 .proc_handler = proc_dointvec, 216 .proc_handler = proc_dointvec,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 8668a3defda6..2fb7b76da94f 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
32 struct hlist_nulls_node *n; 32 struct hlist_nulls_node *n;
33 33
34 for (st->bucket = 0; 34 for (st->bucket = 0;
35 st->bucket < nf_conntrack_htable_size; 35 st->bucket < net->ct.htable_size;
36 st->bucket++) { 36 st->bucket++) {
37 n = rcu_dereference(net->ct.hash[st->bucket].first); 37 n = rcu_dereference(net->ct.hash[st->bucket].first);
38 if (!is_a_nulls(n)) 38 if (!is_a_nulls(n))
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
50 head = rcu_dereference(head->next); 50 head = rcu_dereference(head->next);
51 while (is_a_nulls(head)) { 51 while (is_a_nulls(head)) {
52 if (likely(get_nulls_value(head) == st->bucket)) { 52 if (likely(get_nulls_value(head) == st->bucket)) {
53 if (++st->bucket >= nf_conntrack_htable_size) 53 if (++st->bucket >= net->ct.htable_size)
54 return NULL; 54 return NULL;
55 } 55 }
56 head = rcu_dereference(net->ct.hash[st->bucket].first); 56 head = rcu_dereference(net->ct.hash[st->bucket].first);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index fe1a64479dd0..26066a2327ad 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock);
35 35
36static struct nf_conntrack_l3proto *l3proto __read_mostly; 36static struct nf_conntrack_l3proto *l3proto __read_mostly;
37 37
38/* Calculated at init based on memory size */
39static unsigned int nf_nat_htable_size __read_mostly;
40
41#define MAX_IP_NAT_PROTO 256 38#define MAX_IP_NAT_PROTO 256
42static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO] 39static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
43 __read_mostly; 40 __read_mostly;
@@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
72 69
73/* We keep an extra hash for each conntrack, for fast searching. */ 70/* We keep an extra hash for each conntrack, for fast searching. */
74static inline unsigned int 71static inline unsigned int
75hash_by_src(const struct nf_conntrack_tuple *tuple) 72hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
76{ 73{
77 unsigned int hash; 74 unsigned int hash;
78 75
@@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple)
80 hash = jhash_3words((__force u32)tuple->src.u3.ip, 77 hash = jhash_3words((__force u32)tuple->src.u3.ip,
81 (__force u32)tuple->src.u.all, 78 (__force u32)tuple->src.u.all,
82 tuple->dst.protonum, 0); 79 tuple->dst.protonum, 0);
83 return ((u64)hash * nf_nat_htable_size) >> 32; 80 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
84} 81}
85 82
86/* Is this tuple already taken? (not by us) */ 83/* Is this tuple already taken? (not by us) */
@@ -147,7 +144,7 @@ find_appropriate_src(struct net *net,
147 struct nf_conntrack_tuple *result, 144 struct nf_conntrack_tuple *result,
148 const struct nf_nat_range *range) 145 const struct nf_nat_range *range)
149{ 146{
150 unsigned int h = hash_by_src(tuple); 147 unsigned int h = hash_by_src(net, tuple);
151 const struct nf_conn_nat *nat; 148 const struct nf_conn_nat *nat;
152 const struct nf_conn *ct; 149 const struct nf_conn *ct;
153 const struct hlist_node *n; 150 const struct hlist_node *n;
@@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct,
330 if (have_to_hash) { 327 if (have_to_hash) {
331 unsigned int srchash; 328 unsigned int srchash;
332 329
333 srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 330 srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
334 spin_lock_bh(&nf_nat_lock); 331 spin_lock_bh(&nf_nat_lock);
335 /* nf_conntrack_alter_reply might re-allocate exntension aera */ 332 /* nf_conntrack_alter_reply might re-allocate exntension aera */
336 nat = nfct_nat(ct); 333 nat = nfct_nat(ct);
@@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
679 676
680static int __net_init nf_nat_net_init(struct net *net) 677static int __net_init nf_nat_net_init(struct net *net)
681{ 678{
682 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 679 /* Leave them the same for the moment. */
683 &net->ipv4.nat_vmalloced, 0); 680 net->ipv4.nat_htable_size = net->ct.htable_size;
681 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
682 &net->ipv4.nat_vmalloced, 0);
684 if (!net->ipv4.nat_bysource) 683 if (!net->ipv4.nat_bysource)
685 return -ENOMEM; 684 return -ENOMEM;
686 return 0; 685 return 0;
@@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
703 nf_ct_iterate_cleanup(net, &clean_nat, NULL); 702 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
704 synchronize_rcu(); 703 synchronize_rcu();
705 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, 704 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
706 nf_nat_htable_size); 705 net->ipv4.nat_htable_size);
707} 706}
708 707
709static struct pernet_operations nf_nat_net_ops = { 708static struct pernet_operations nf_nat_net_ops = {
@@ -724,9 +723,6 @@ static int __init nf_nat_init(void)
724 return ret; 723 return ret;
725 } 724 }
726 725
727 /* Leave them the same for the moment. */
728 nf_nat_htable_size = nf_conntrack_htable_size;
729
730 ret = register_pernet_subsys(&nf_nat_net_ops); 726 ret = register_pernet_subsys(&nf_nat_net_ops);
731 if (ret < 0) 727 if (ret < 0)
732 goto cleanup_extend; 728 goto cleanup_extend;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 480d7f8c9802..8a7e0f52e177 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
1164 if (t && !IS_ERR(t)) { 1164 if (t && !IS_ERR(t)) {
1165 struct ip6t_getinfo info; 1165 struct ip6t_getinfo info;
1166 const struct xt_table_info *private = t->private; 1166 const struct xt_table_info *private = t->private;
1167
1168#ifdef CONFIG_COMPAT 1167#ifdef CONFIG_COMPAT
1168 struct xt_table_info tmp;
1169
1169 if (compat) { 1170 if (compat) {
1170 struct xt_table_info tmp;
1171 ret = compat_table_info(private, &tmp); 1171 ret = compat_table_info(private, &tmp);
1172 xt_compat_flush_offsets(AF_INET6); 1172 xt_compat_flush_offsets(AF_INET6);
1173 private = &tmp; 1173 private = &tmp;
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 156020d138b5..6b3602de359a 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -698,15 +698,18 @@ dev_irnet_ioctl(
698 698
699 /* Query PPP channel and unit number */ 699 /* Query PPP channel and unit number */
700 case PPPIOCGCHAN: 700 case PPPIOCGCHAN:
701 lock_kernel();
701 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan), 702 if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
702 (int __user *)argp)) 703 (int __user *)argp))
703 err = 0; 704 err = 0;
705 unlock_kernel();
704 break; 706 break;
705 case PPPIOCGUNIT: 707 case PPPIOCGUNIT:
706 lock_kernel(); 708 lock_kernel();
707 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan), 709 if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
708 (int __user *)argp)) 710 (int __user *)argp))
709 err = 0; 711 err = 0;
712 unlock_kernel();
710 break; 713 break;
711 714
712 /* All these ioctls can be passed both directly and from ppp_generic, 715 /* All these ioctls can be passed both directly and from ppp_generic,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 76fa6fef6473..539f43bc97db 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3794,9 +3794,9 @@ static struct pernet_operations pfkey_net_ops = {
3794 3794
3795static void __exit ipsec_pfkey_exit(void) 3795static void __exit ipsec_pfkey_exit(void)
3796{ 3796{
3797 unregister_pernet_subsys(&pfkey_net_ops);
3798 xfrm_unregister_km(&pfkeyv2_mgr); 3797 xfrm_unregister_km(&pfkeyv2_mgr);
3799 sock_unregister(PF_KEY); 3798 sock_unregister(PF_KEY);
3799 unregister_pernet_subsys(&pfkey_net_ops);
3800 proto_unregister(&key_proto); 3800 proto_unregister(&key_proto);
3801} 3801}
3802 3802
@@ -3807,21 +3807,22 @@ static int __init ipsec_pfkey_init(void)
3807 if (err != 0) 3807 if (err != 0)
3808 goto out; 3808 goto out;
3809 3809
3810 err = sock_register(&pfkey_family_ops); 3810 err = register_pernet_subsys(&pfkey_net_ops);
3811 if (err != 0) 3811 if (err != 0)
3812 goto out_unregister_key_proto; 3812 goto out_unregister_key_proto;
3813 err = sock_register(&pfkey_family_ops);
3814 if (err != 0)
3815 goto out_unregister_pernet;
3813 err = xfrm_register_km(&pfkeyv2_mgr); 3816 err = xfrm_register_km(&pfkeyv2_mgr);
3814 if (err != 0) 3817 if (err != 0)
3815 goto out_sock_unregister; 3818 goto out_sock_unregister;
3816 err = register_pernet_subsys(&pfkey_net_ops);
3817 if (err != 0)
3818 goto out_xfrm_unregister_km;
3819out: 3819out:
3820 return err; 3820 return err;
3821out_xfrm_unregister_km: 3821
3822 xfrm_unregister_km(&pfkeyv2_mgr);
3823out_sock_unregister: 3822out_sock_unregister:
3824 sock_unregister(PF_KEY); 3823 sock_unregister(PF_KEY);
3824out_unregister_pernet:
3825 unregister_pernet_subsys(&pfkey_net_ops);
3825out_unregister_key_proto: 3826out_unregister_key_proto:
3826 proto_unregister(&key_proto); 3827 proto_unregister(&key_proto);
3827 goto out; 3828 goto out;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0e98c3282d42..4d79e3c1616c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -30,6 +30,7 @@
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/socket.h> 31#include <linux/socket.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/nsproxy.h>
33#include <linux/rculist_nulls.h> 34#include <linux/rculist_nulls.h>
34 35
35#include <net/netfilter/nf_conntrack.h> 36#include <net/netfilter/nf_conntrack.h>
@@ -63,8 +64,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
63struct nf_conn nf_conntrack_untracked __read_mostly; 64struct nf_conn nf_conntrack_untracked __read_mostly;
64EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 65EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
65 66
66static struct kmem_cache *nf_conntrack_cachep __read_mostly;
67
68static int nf_conntrack_hash_rnd_initted; 67static int nf_conntrack_hash_rnd_initted;
69static unsigned int nf_conntrack_hash_rnd; 68static unsigned int nf_conntrack_hash_rnd;
70 69
@@ -86,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
86 return ((u64)h * size) >> 32; 85 return ((u64)h * size) >> 32;
87} 86}
88 87
89static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) 88static inline u_int32_t hash_conntrack(const struct net *net,
89 const struct nf_conntrack_tuple *tuple)
90{ 90{
91 return __hash_conntrack(tuple, nf_conntrack_htable_size, 91 return __hash_conntrack(tuple, net->ct.htable_size,
92 nf_conntrack_hash_rnd); 92 nf_conntrack_hash_rnd);
93} 93}
94 94
@@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
296{ 296{
297 struct nf_conntrack_tuple_hash *h; 297 struct nf_conntrack_tuple_hash *h;
298 struct hlist_nulls_node *n; 298 struct hlist_nulls_node *n;
299 unsigned int hash = hash_conntrack(tuple); 299 unsigned int hash = hash_conntrack(net, tuple);
300 300
301 /* Disable BHs the entire time since we normally need to disable them 301 /* Disable BHs the entire time since we normally need to disable them
302 * at least once for the stats anyway. 302 * at least once for the stats anyway.
@@ -366,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
366 366
367void nf_conntrack_hash_insert(struct nf_conn *ct) 367void nf_conntrack_hash_insert(struct nf_conn *ct)
368{ 368{
369 struct net *net = nf_ct_net(ct);
369 unsigned int hash, repl_hash; 370 unsigned int hash, repl_hash;
370 371
371 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 372 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
372 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 373 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
373 374
374 __nf_conntrack_hash_insert(ct, hash, repl_hash); 375 __nf_conntrack_hash_insert(ct, hash, repl_hash);
375} 376}
@@ -397,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
397 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) 398 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
398 return NF_ACCEPT; 399 return NF_ACCEPT;
399 400
400 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 401 hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
401 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); 402 repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
402 403
403 /* We're not in hash table, and we refuse to set up related 404 /* We're not in hash table, and we refuse to set up related
404 connections for unconfirmed conns. But packet copies and 405 connections for unconfirmed conns. But packet copies and
@@ -468,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
468 struct net *net = nf_ct_net(ignored_conntrack); 469 struct net *net = nf_ct_net(ignored_conntrack);
469 struct nf_conntrack_tuple_hash *h; 470 struct nf_conntrack_tuple_hash *h;
470 struct hlist_nulls_node *n; 471 struct hlist_nulls_node *n;
471 unsigned int hash = hash_conntrack(tuple); 472 unsigned int hash = hash_conntrack(net, tuple);
472 473
473 /* Disable BHs the entire time since we need to disable them at 474 /* Disable BHs the entire time since we need to disable them at
474 * least once for the stats anyway. 475 * least once for the stats anyway.
@@ -503,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
503 int dropped = 0; 504 int dropped = 0;
504 505
505 rcu_read_lock(); 506 rcu_read_lock();
506 for (i = 0; i < nf_conntrack_htable_size; i++) { 507 for (i = 0; i < net->ct.htable_size; i++) {
507 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], 508 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
508 hnnode) { 509 hnnode) {
509 tmp = nf_ct_tuplehash_to_ctrack(h); 510 tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -523,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
523 if (cnt >= NF_CT_EVICTION_RANGE) 524 if (cnt >= NF_CT_EVICTION_RANGE)
524 break; 525 break;
525 526
526 hash = (hash + 1) % nf_conntrack_htable_size; 527 hash = (hash + 1) % net->ct.htable_size;
527 } 528 }
528 rcu_read_unlock(); 529 rcu_read_unlock();
529 530
@@ -557,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
557 558
558 if (nf_conntrack_max && 559 if (nf_conntrack_max &&
559 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { 560 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
560 unsigned int hash = hash_conntrack(orig); 561 unsigned int hash = hash_conntrack(net, orig);
561 if (!early_drop(net, hash)) { 562 if (!early_drop(net, hash)) {
562 atomic_dec(&net->ct.count); 563 atomic_dec(&net->ct.count);
563 if (net_ratelimit()) 564 if (net_ratelimit())
@@ -572,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
572 * Do not use kmem_cache_zalloc(), as this cache uses 573 * Do not use kmem_cache_zalloc(), as this cache uses
573 * SLAB_DESTROY_BY_RCU. 574 * SLAB_DESTROY_BY_RCU.
574 */ 575 */
575 ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); 576 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
576 if (ct == NULL) { 577 if (ct == NULL) {
577 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); 578 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
578 atomic_dec(&net->ct.count); 579 atomic_dec(&net->ct.count);
@@ -611,7 +612,7 @@ void nf_conntrack_free(struct nf_conn *ct)
611 nf_ct_ext_destroy(ct); 612 nf_ct_ext_destroy(ct);
612 atomic_dec(&net->ct.count); 613 atomic_dec(&net->ct.count);
613 nf_ct_ext_free(ct); 614 nf_ct_ext_free(ct);
614 kmem_cache_free(nf_conntrack_cachep, ct); 615 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
615} 616}
616EXPORT_SYMBOL_GPL(nf_conntrack_free); 617EXPORT_SYMBOL_GPL(nf_conntrack_free);
617 618
@@ -1014,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1014 struct hlist_nulls_node *n; 1015 struct hlist_nulls_node *n;
1015 1016
1016 spin_lock_bh(&nf_conntrack_lock); 1017 spin_lock_bh(&nf_conntrack_lock);
1017 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 1018 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1018 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { 1019 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1019 ct = nf_ct_tuplehash_to_ctrack(h); 1020 ct = nf_ct_tuplehash_to_ctrack(h);
1020 if (iter(ct, data)) 1021 if (iter(ct, data))
@@ -1113,9 +1114,12 @@ static void nf_ct_release_dying_list(struct net *net)
1113 1114
1114static void nf_conntrack_cleanup_init_net(void) 1115static void nf_conntrack_cleanup_init_net(void)
1115{ 1116{
1117 /* wait until all references to nf_conntrack_untracked are dropped */
1118 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1119 schedule();
1120
1116 nf_conntrack_helper_fini(); 1121 nf_conntrack_helper_fini();
1117 nf_conntrack_proto_fini(); 1122 nf_conntrack_proto_fini();
1118 kmem_cache_destroy(nf_conntrack_cachep);
1119} 1123}
1120 1124
1121static void nf_conntrack_cleanup_net(struct net *net) 1125static void nf_conntrack_cleanup_net(struct net *net)
@@ -1127,15 +1131,14 @@ static void nf_conntrack_cleanup_net(struct net *net)
1127 schedule(); 1131 schedule();
1128 goto i_see_dead_people; 1132 goto i_see_dead_people;
1129 } 1133 }
1130 /* wait until all references to nf_conntrack_untracked are dropped */
1131 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1132 schedule();
1133 1134
1134 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1135 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1135 nf_conntrack_htable_size); 1136 net->ct.htable_size);
1136 nf_conntrack_ecache_fini(net); 1137 nf_conntrack_ecache_fini(net);
1137 nf_conntrack_acct_fini(net); 1138 nf_conntrack_acct_fini(net);
1138 nf_conntrack_expect_fini(net); 1139 nf_conntrack_expect_fini(net);
1140 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1141 kfree(net->ct.slabname);
1139 free_percpu(net->ct.stat); 1142 free_percpu(net->ct.stat);
1140} 1143}
1141 1144
@@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1190{ 1193{
1191 int i, bucket, vmalloced, old_vmalloced; 1194 int i, bucket, vmalloced, old_vmalloced;
1192 unsigned int hashsize, old_size; 1195 unsigned int hashsize, old_size;
1193 int rnd;
1194 struct hlist_nulls_head *hash, *old_hash; 1196 struct hlist_nulls_head *hash, *old_hash;
1195 struct nf_conntrack_tuple_hash *h; 1197 struct nf_conntrack_tuple_hash *h;
1196 1198
1199 if (current->nsproxy->net_ns != &init_net)
1200 return -EOPNOTSUPP;
1201
1197 /* On boot, we can set this without any fancy locking. */ 1202 /* On boot, we can set this without any fancy locking. */
1198 if (!nf_conntrack_htable_size) 1203 if (!nf_conntrack_htable_size)
1199 return param_set_uint(val, kp); 1204 return param_set_uint(val, kp);
@@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1206 if (!hash) 1211 if (!hash)
1207 return -ENOMEM; 1212 return -ENOMEM;
1208 1213
1209 /* We have to rehahs for the new table anyway, so we also can
1210 * use a newrandom seed */
1211 get_random_bytes(&rnd, sizeof(rnd));
1212
1213 /* Lookups in the old hash might happen in parallel, which means we 1214 /* Lookups in the old hash might happen in parallel, which means we
1214 * might get false negatives during connection lookup. New connections 1215 * might get false negatives during connection lookup. New connections
1215 * created because of a false negative won't make it into the hash 1216 * created because of a false negative won't make it into the hash
1216 * though since that required taking the lock. 1217 * though since that required taking the lock.
1217 */ 1218 */
1218 spin_lock_bh(&nf_conntrack_lock); 1219 spin_lock_bh(&nf_conntrack_lock);
1219 for (i = 0; i < nf_conntrack_htable_size; i++) { 1220 for (i = 0; i < init_net.ct.htable_size; i++) {
1220 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { 1221 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1221 h = hlist_nulls_entry(init_net.ct.hash[i].first, 1222 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1222 struct nf_conntrack_tuple_hash, hnnode); 1223 struct nf_conntrack_tuple_hash, hnnode);
1223 hlist_nulls_del_rcu(&h->hnnode); 1224 hlist_nulls_del_rcu(&h->hnnode);
1224 bucket = __hash_conntrack(&h->tuple, hashsize, rnd); 1225 bucket = __hash_conntrack(&h->tuple, hashsize,
1226 nf_conntrack_hash_rnd);
1225 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1227 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1226 } 1228 }
1227 } 1229 }
1228 old_size = nf_conntrack_htable_size; 1230 old_size = init_net.ct.htable_size;
1229 old_vmalloced = init_net.ct.hash_vmalloc; 1231 old_vmalloced = init_net.ct.hash_vmalloc;
1230 old_hash = init_net.ct.hash; 1232 old_hash = init_net.ct.hash;
1231 1233
1232 nf_conntrack_htable_size = hashsize; 1234 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1233 init_net.ct.hash_vmalloc = vmalloced; 1235 init_net.ct.hash_vmalloc = vmalloced;
1234 init_net.ct.hash = hash; 1236 init_net.ct.hash = hash;
1235 nf_conntrack_hash_rnd = rnd;
1236 spin_unlock_bh(&nf_conntrack_lock); 1237 spin_unlock_bh(&nf_conntrack_lock);
1237 1238
1238 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1239 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
@@ -1271,15 +1272,6 @@ static int nf_conntrack_init_init_net(void)
1271 NF_CONNTRACK_VERSION, nf_conntrack_htable_size, 1272 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1272 nf_conntrack_max); 1273 nf_conntrack_max);
1273 1274
1274 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1275 sizeof(struct nf_conn),
1276 0, SLAB_DESTROY_BY_RCU, NULL);
1277 if (!nf_conntrack_cachep) {
1278 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1279 ret = -ENOMEM;
1280 goto err_cache;
1281 }
1282
1283 ret = nf_conntrack_proto_init(); 1275 ret = nf_conntrack_proto_init();
1284 if (ret < 0) 1276 if (ret < 0)
1285 goto err_proto; 1277 goto err_proto;
@@ -1288,13 +1280,19 @@ static int nf_conntrack_init_init_net(void)
1288 if (ret < 0) 1280 if (ret < 0)
1289 goto err_helper; 1281 goto err_helper;
1290 1282
1283 /* Set up fake conntrack: to never be deleted, not in any hashes */
1284#ifdef CONFIG_NET_NS
1285 nf_conntrack_untracked.ct_net = &init_net;
1286#endif
1287 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1288 /* - and look it like as a confirmed connection */
1289 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1290
1291 return 0; 1291 return 0;
1292 1292
1293err_helper: 1293err_helper:
1294 nf_conntrack_proto_fini(); 1294 nf_conntrack_proto_fini();
1295err_proto: 1295err_proto:
1296 kmem_cache_destroy(nf_conntrack_cachep);
1297err_cache:
1298 return ret; 1296 return ret;
1299} 1297}
1300 1298
@@ -1316,7 +1314,24 @@ static int nf_conntrack_init_net(struct net *net)
1316 ret = -ENOMEM; 1314 ret = -ENOMEM;
1317 goto err_stat; 1315 goto err_stat;
1318 } 1316 }
1319 net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1317
1318 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1319 if (!net->ct.slabname) {
1320 ret = -ENOMEM;
1321 goto err_slabname;
1322 }
1323
1324 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1325 sizeof(struct nf_conn), 0,
1326 SLAB_DESTROY_BY_RCU, NULL);
1327 if (!net->ct.nf_conntrack_cachep) {
1328 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1329 ret = -ENOMEM;
1330 goto err_cache;
1331 }
1332
1333 net->ct.htable_size = nf_conntrack_htable_size;
1334 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
1320 &net->ct.hash_vmalloc, 1); 1335 &net->ct.hash_vmalloc, 1);
1321 if (!net->ct.hash) { 1336 if (!net->ct.hash) {
1322 ret = -ENOMEM; 1337 ret = -ENOMEM;
@@ -1333,15 +1348,6 @@ static int nf_conntrack_init_net(struct net *net)
1333 if (ret < 0) 1348 if (ret < 0)
1334 goto err_ecache; 1349 goto err_ecache;
1335 1350
1336 /* Set up fake conntrack:
1337 - to never be deleted, not in any hashes */
1338#ifdef CONFIG_NET_NS
1339 nf_conntrack_untracked.ct_net = &init_net;
1340#endif
1341 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1342 /* - and look it like as a confirmed connection */
1343 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1344
1345 return 0; 1351 return 0;
1346 1352
1347err_ecache: 1353err_ecache:
@@ -1350,8 +1356,12 @@ err_acct:
1350 nf_conntrack_expect_fini(net); 1356 nf_conntrack_expect_fini(net);
1351err_expect: 1357err_expect:
1352 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1358 nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1353 nf_conntrack_htable_size); 1359 net->ct.htable_size);
1354err_hash: 1360err_hash:
1361 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1362err_cache:
1363 kfree(net->ct.slabname);
1364err_slabname:
1355 free_percpu(net->ct.stat); 1365 free_percpu(net->ct.stat);
1356err_stat: 1366err_stat:
1357 return ret; 1367 return ret;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index fdf5d2a1d9b4..2f25ff610982 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net)
569#endif /* CONFIG_PROC_FS */ 569#endif /* CONFIG_PROC_FS */
570} 570}
571 571
572module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); 572module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
573 573
574int nf_conntrack_expect_init(struct net *net) 574int nf_conntrack_expect_init(struct net *net)
575{ 575{
@@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net)
577 577
578 if (net_eq(net, &init_net)) { 578 if (net_eq(net, &init_net)) {
579 if (!nf_ct_expect_hsize) { 579 if (!nf_ct_expect_hsize) {
580 nf_ct_expect_hsize = nf_conntrack_htable_size / 256; 580 nf_ct_expect_hsize = net->ct.htable_size / 256;
581 if (!nf_ct_expect_hsize) 581 if (!nf_ct_expect_hsize)
582 nf_ct_expect_hsize = 1; 582 nf_ct_expect_hsize = 1;
583 } 583 }
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 65c2a7bc3afc..4b1a56bd074c 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
192 /* Get rid of expecteds, set helpers to NULL. */ 192 /* Get rid of expecteds, set helpers to NULL. */
193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode) 193 hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
194 unhelp(h, me); 194 unhelp(h, me);
195 for (i = 0; i < nf_conntrack_htable_size; i++) { 195 for (i = 0; i < net->ct.htable_size; i++) {
196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) 196 hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
197 unhelp(h, me); 197 unhelp(h, me);
198 } 198 }
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 42f21c01a93e..0ffe689dfe97 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
594 594
595 rcu_read_lock(); 595 rcu_read_lock();
596 last = (struct nf_conn *)cb->args[1]; 596 last = (struct nf_conn *)cb->args[1];
597 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 597 for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
598restart: 598restart:
599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]], 599 hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
600 hnnode) { 600 hnnode) {
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 028aba667ef7..e310f1561bb2 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
51 struct hlist_nulls_node *n; 51 struct hlist_nulls_node *n;
52 52
53 for (st->bucket = 0; 53 for (st->bucket = 0;
54 st->bucket < nf_conntrack_htable_size; 54 st->bucket < net->ct.htable_size;
55 st->bucket++) { 55 st->bucket++) {
56 n = rcu_dereference(net->ct.hash[st->bucket].first); 56 n = rcu_dereference(net->ct.hash[st->bucket].first);
57 if (!is_a_nulls(n)) 57 if (!is_a_nulls(n))
@@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
69 head = rcu_dereference(head->next); 69 head = rcu_dereference(head->next);
70 while (is_a_nulls(head)) { 70 while (is_a_nulls(head)) {
71 if (likely(get_nulls_value(head) == st->bucket)) { 71 if (likely(get_nulls_value(head) == st->bucket)) {
72 if (++st->bucket >= nf_conntrack_htable_size) 72 if (++st->bucket >= net->ct.htable_size)
73 return NULL; 73 return NULL;
74 } 74 }
75 head = rcu_dereference(net->ct.hash[st->bucket].first); 75 head = rcu_dereference(net->ct.hash[st->bucket].first);
@@ -355,7 +355,7 @@ static ctl_table nf_ct_sysctl_table[] = {
355 }, 355 },
356 { 356 {
357 .procname = "nf_conntrack_buckets", 357 .procname = "nf_conntrack_buckets",
358 .data = &nf_conntrack_htable_size, 358 .data = &init_net.ct.htable_size,
359 .maxlen = sizeof(unsigned int), 359 .maxlen = sizeof(unsigned int),
360 .mode = 0444, 360 .mode = 0444,
361 .proc_handler = proc_dointvec, 361 .proc_handler = proc_dointvec,
@@ -421,6 +421,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
421 goto out_kmemdup; 421 goto out_kmemdup;
422 422
423 table[1].data = &net->ct.count; 423 table[1].data = &net->ct.count;
424 table[2].data = &net->ct.htable_size;
424 table[3].data = &net->ct.sysctl_checksum; 425 table[3].data = &net->ct.sysctl_checksum;
425 table[4].data = &net->ct.sysctl_log_invalid; 426 table[4].data = &net->ct.sysctl_log_invalid;
426 427
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a4957bf2ca60..4c5972ba8c78 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -455,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
455 if (nl_table[protocol].registered && 455 if (nl_table[protocol].registered &&
456 try_module_get(nl_table[protocol].module)) 456 try_module_get(nl_table[protocol].module))
457 module = nl_table[protocol].module; 457 module = nl_table[protocol].module;
458 else
459 err = -EPROTONOSUPPORT;
458 cb_mutex = nl_table[protocol].cb_mutex; 460 cb_mutex = nl_table[protocol].cb_mutex;
459 netlink_unlock_table(); 461 netlink_unlock_table();
460 462
463 if (err < 0)
464 goto out;
465
461 err = __netlink_create(net, sock, cb_mutex, protocol); 466 err = __netlink_create(net, sock, cb_mutex, protocol);
462 if (err < 0) 467 if (err < 0)
463 goto out_module; 468 goto out_module;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 929218a47620..21f9c7678aa3 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -433,7 +433,7 @@ config NET_ACT_POLICE
433 module. 433 module.
434 434
435 To compile this code as a module, choose M here: the 435 To compile this code as a module, choose M here: the
436 module will be called police. 436 module will be called act_police.
437 437
438config NET_ACT_GACT 438config NET_ACT_GACT
439 tristate "Generic actions" 439 tristate "Generic actions"
@@ -443,7 +443,7 @@ config NET_ACT_GACT
443 accepting packets. 443 accepting packets.
444 444
445 To compile this code as a module, choose M here: the 445 To compile this code as a module, choose M here: the
446 module will be called gact. 446 module will be called act_gact.
447 447
448config GACT_PROB 448config GACT_PROB
449 bool "Probability support" 449 bool "Probability support"
@@ -459,7 +459,7 @@ config NET_ACT_MIRRED
459 other devices. 459 other devices.
460 460
461 To compile this code as a module, choose M here: the 461 To compile this code as a module, choose M here: the
462 module will be called mirred. 462 module will be called act_mirred.
463 463
464config NET_ACT_IPT 464config NET_ACT_IPT
465 tristate "IPtables targets" 465 tristate "IPtables targets"
@@ -469,7 +469,7 @@ config NET_ACT_IPT
469 classification. 469 classification.
470 470
471 To compile this code as a module, choose M here: the 471 To compile this code as a module, choose M here: the
472 module will be called ipt. 472 module will be called act_ipt.
473 473
474config NET_ACT_NAT 474config NET_ACT_NAT
475 tristate "Stateless NAT" 475 tristate "Stateless NAT"
@@ -479,7 +479,7 @@ config NET_ACT_NAT
479 netfilter for NAT unless you know what you are doing. 479 netfilter for NAT unless you know what you are doing.
480 480
481 To compile this code as a module, choose M here: the 481 To compile this code as a module, choose M here: the
482 module will be called nat. 482 module will be called act_nat.
483 483
484config NET_ACT_PEDIT 484config NET_ACT_PEDIT
485 tristate "Packet Editing" 485 tristate "Packet Editing"
@@ -488,7 +488,7 @@ config NET_ACT_PEDIT
488 Say Y here if you want to mangle the content of packets. 488 Say Y here if you want to mangle the content of packets.
489 489
490 To compile this code as a module, choose M here: the 490 To compile this code as a module, choose M here: the
491 module will be called pedit. 491 module will be called act_pedit.
492 492
493config NET_ACT_SIMP 493config NET_ACT_SIMP
494 tristate "Simple Example (Debug)" 494 tristate "Simple Example (Debug)"
@@ -502,7 +502,7 @@ config NET_ACT_SIMP
502 If unsure, say N. 502 If unsure, say N.
503 503
504 To compile this code as a module, choose M here: the 504 To compile this code as a module, choose M here: the
505 module will be called simple. 505 module will be called act_simple.
506 506
507config NET_ACT_SKBEDIT 507config NET_ACT_SKBEDIT
508 tristate "SKB Editing" 508 tristate "SKB Editing"
@@ -513,7 +513,7 @@ config NET_ACT_SKBEDIT
513 If unsure, say N. 513 If unsure, say N.
514 514
515 To compile this code as a module, choose M here: the 515 To compile this code as a module, choose M here: the
516 module will be called skbedit. 516 module will be called act_skbedit.
517 517
518config NET_CLS_IND 518config NET_CLS_IND
519 bool "Incoming device classification" 519 bool "Incoming device classification"
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index c41afe6639a0..47fb65d1fcbd 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -65,7 +65,6 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
65 const char *cause, int result, int info); 65 const char *cause, int result, int info);
66 66
67/* Internal IMA function definitions */ 67/* Internal IMA function definitions */
68void ima_iintcache_init(void);
69int ima_init(void); 68int ima_init(void);
70void ima_cleanup(void); 69void ima_cleanup(void);
71int ima_fs_init(void); 70int ima_fs_init(void);
@@ -131,7 +130,7 @@ void iint_free(struct kref *kref);
131void iint_rcu_free(struct rcu_head *rcu); 130void iint_rcu_free(struct rcu_head *rcu);
132 131
133/* IMA policy related functions */ 132/* IMA policy related functions */
134enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK }; 133enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
135 134
136int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask); 135int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask);
137void ima_init_policy(void); 136void ima_init_policy(void);
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 3cd58b60afd2..2a5e0bcf3887 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -95,12 +95,12 @@ err_out:
95 * ima_must_measure - measure decision based on policy. 95 * ima_must_measure - measure decision based on policy.
96 * @inode: pointer to inode to measure 96 * @inode: pointer to inode to measure
97 * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) 97 * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
98 * @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP) 98 * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP)
99 * 99 *
100 * The policy is defined in terms of keypairs: 100 * The policy is defined in terms of keypairs:
101 * subj=, obj=, type=, func=, mask=, fsmagic= 101 * subj=, obj=, type=, func=, mask=, fsmagic=
102 * subj,obj, and type: are LSM specific. 102 * subj,obj, and type: are LSM specific.
103 * func: PATH_CHECK | BPRM_CHECK | FILE_MMAP 103 * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP
104 * mask: contains the permission mask 104 * mask: contains the permission mask
105 * fsmagic: hex value 105 * fsmagic: hex value
106 * 106 *
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index fa592ff1ac1c..0d83edcfc402 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -52,9 +52,6 @@ int ima_inode_alloc(struct inode *inode)
52 struct ima_iint_cache *iint = NULL; 52 struct ima_iint_cache *iint = NULL;
53 int rc = 0; 53 int rc = 0;
54 54
55 if (!ima_initialized)
56 return 0;
57
58 iint = kmem_cache_alloc(iint_cache, GFP_NOFS); 55 iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
59 if (!iint) 56 if (!iint)
60 return -ENOMEM; 57 return -ENOMEM;
@@ -118,8 +115,6 @@ void ima_inode_free(struct inode *inode)
118{ 115{
119 struct ima_iint_cache *iint; 116 struct ima_iint_cache *iint;
120 117
121 if (!ima_initialized)
122 return;
123 spin_lock(&ima_iint_lock); 118 spin_lock(&ima_iint_lock);
124 iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode); 119 iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode);
125 spin_unlock(&ima_iint_lock); 120 spin_unlock(&ima_iint_lock);
@@ -141,9 +136,11 @@ static void init_once(void *foo)
141 kref_set(&iint->refcount, 1); 136 kref_set(&iint->refcount, 1);
142} 137}
143 138
144void __init ima_iintcache_init(void) 139static int __init ima_iintcache_init(void)
145{ 140{
146 iint_cache = 141 iint_cache =
147 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, 142 kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
148 SLAB_PANIC, init_once); 143 SLAB_PANIC, init_once);
144 return 0;
149} 145}
146security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index a89f44d5e030..294b005d6520 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -14,7 +14,7 @@
14 * 14 *
15 * File: ima_main.c 15 * File: ima_main.c
16 * implements the IMA hooks: ima_bprm_check, ima_file_mmap, 16 * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
17 * and ima_path_check. 17 * and ima_file_check.
18 */ 18 */
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/file.h> 20#include <linux/file.h>
@@ -84,6 +84,36 @@ out:
84 return found; 84 return found;
85} 85}
86 86
87/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
88 *
89 * When opening a file for read, if the file is already open for write,
90 * the file could change, resulting in a file measurement error.
91 *
92 * Opening a file for write, if the file is already open for read, results
93 * in a time of measure, time of use (ToMToU) error.
94 *
95 * In either case invalidate the PCR.
96 */
97enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
98static void ima_read_write_check(enum iint_pcr_error error,
99 struct ima_iint_cache *iint,
100 struct inode *inode,
101 const unsigned char *filename)
102{
103 switch (error) {
104 case TOMTOU:
105 if (iint->readcount > 0)
106 ima_add_violation(inode, filename, "invalid_pcr",
107 "ToMToU");
108 break;
109 case OPEN_WRITERS:
110 if (iint->writecount > 0)
111 ima_add_violation(inode, filename, "invalid_pcr",
112 "open_writers");
113 break;
114 }
115}
116
87/* 117/*
88 * Update the counts given an fmode_t 118 * Update the counts given an fmode_t
89 */ 119 */
@@ -99,6 +129,47 @@ static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode)
99} 129}
100 130
101/* 131/*
132 * ima_counts_get - increment file counts
133 *
134 * Maintain read/write counters for all files, but only
135 * invalidate the PCR for measured files:
136 * - Opening a file for write when already open for read,
137 * results in a time of measure, time of use (ToMToU) error.
138 * - Opening a file for read when already open for write,
139 * could result in a file measurement error.
140 *
141 */
142void ima_counts_get(struct file *file)
143{
144 struct dentry *dentry = file->f_path.dentry;
145 struct inode *inode = dentry->d_inode;
146 fmode_t mode = file->f_mode;
147 struct ima_iint_cache *iint;
148 int rc;
149
150 if (!ima_initialized || !S_ISREG(inode->i_mode))
151 return;
152 iint = ima_iint_find_get(inode);
153 if (!iint)
154 return;
155 mutex_lock(&iint->mutex);
156 rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
157 if (rc < 0)
158 goto out;
159
160 if (mode & FMODE_WRITE) {
161 ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name);
162 goto out;
163 }
164 ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name);
165out:
166 ima_inc_counts(iint, file->f_mode);
167 mutex_unlock(&iint->mutex);
168
169 kref_put(&iint->refcount, iint_free);
170}
171
172/*
102 * Decrement ima counts 173 * Decrement ima counts
103 */ 174 */
104static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode, 175static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode,
@@ -153,123 +224,6 @@ void ima_file_free(struct file *file)
153 kref_put(&iint->refcount, iint_free); 224 kref_put(&iint->refcount, iint_free);
154} 225}
155 226
156/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
157 *
158 * When opening a file for read, if the file is already open for write,
159 * the file could change, resulting in a file measurement error.
160 *
161 * Opening a file for write, if the file is already open for read, results
162 * in a time of measure, time of use (ToMToU) error.
163 *
164 * In either case invalidate the PCR.
165 */
166enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
167static void ima_read_write_check(enum iint_pcr_error error,
168 struct ima_iint_cache *iint,
169 struct inode *inode,
170 const unsigned char *filename)
171{
172 switch (error) {
173 case TOMTOU:
174 if (iint->readcount > 0)
175 ima_add_violation(inode, filename, "invalid_pcr",
176 "ToMToU");
177 break;
178 case OPEN_WRITERS:
179 if (iint->writecount > 0)
180 ima_add_violation(inode, filename, "invalid_pcr",
181 "open_writers");
182 break;
183 }
184}
185
186static int get_path_measurement(struct ima_iint_cache *iint, struct file *file,
187 const unsigned char *filename)
188{
189 int rc = 0;
190
191 ima_inc_counts(iint, file->f_mode);
192
193 rc = ima_collect_measurement(iint, file);
194 if (!rc)
195 ima_store_measurement(iint, file, filename);
196 return rc;
197}
198
199/**
200 * ima_path_check - based on policy, collect/store measurement.
201 * @path: contains a pointer to the path to be measured
202 * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
203 *
204 * Measure the file being open for readonly, based on the
205 * ima_must_measure() policy decision.
206 *
207 * Keep read/write counters for all files, but only
208 * invalidate the PCR for measured files:
209 * - Opening a file for write when already open for read,
210 * results in a time of measure, time of use (ToMToU) error.
211 * - Opening a file for read when already open for write,
212 * could result in a file measurement error.
213 *
214 * Always return 0 and audit dentry_open failures.
215 * (Return code will be based upon measurement appraisal.)
216 */
217int ima_path_check(struct path *path, int mask)
218{
219 struct inode *inode = path->dentry->d_inode;
220 struct ima_iint_cache *iint;
221 struct file *file = NULL;
222 int rc;
223
224 if (!ima_initialized || !S_ISREG(inode->i_mode))
225 return 0;
226 iint = ima_iint_find_get(inode);
227 if (!iint)
228 return 0;
229
230 mutex_lock(&iint->mutex);
231
232 rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK);
233 if (rc < 0)
234 goto out;
235
236 if ((mask & MAY_WRITE) || (mask == 0))
237 ima_read_write_check(TOMTOU, iint, inode,
238 path->dentry->d_name.name);
239
240 if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ)
241 goto out;
242
243 ima_read_write_check(OPEN_WRITERS, iint, inode,
244 path->dentry->d_name.name);
245 if (!(iint->flags & IMA_MEASURED)) {
246 struct dentry *dentry = dget(path->dentry);
247 struct vfsmount *mnt = mntget(path->mnt);
248
249 file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE,
250 current_cred());
251 if (IS_ERR(file)) {
252 int audit_info = 0;
253
254 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
255 dentry->d_name.name,
256 "add_measurement",
257 "dentry_open failed",
258 1, audit_info);
259 file = NULL;
260 goto out;
261 }
262 rc = get_path_measurement(iint, file, dentry->d_name.name);
263 }
264out:
265 mutex_unlock(&iint->mutex);
266 if (file)
267 fput(file);
268 kref_put(&iint->refcount, iint_free);
269 return 0;
270}
271EXPORT_SYMBOL_GPL(ima_path_check);
272
273static int process_measurement(struct file *file, const unsigned char *filename, 227static int process_measurement(struct file *file, const unsigned char *filename,
274 int mask, int function) 228 int mask, int function)
275{ 229{
@@ -297,33 +251,6 @@ out:
297 return rc; 251 return rc;
298} 252}
299 253
300/*
301 * ima_counts_get - increment file counts
302 *
303 * - for IPC shm and shmat file.
304 * - for nfsd exported files.
305 *
306 * Increment the counts for these files to prevent unnecessary
307 * imbalance messages.
308 */
309void ima_counts_get(struct file *file)
310{
311 struct inode *inode = file->f_dentry->d_inode;
312 struct ima_iint_cache *iint;
313
314 if (!ima_initialized || !S_ISREG(inode->i_mode))
315 return;
316 iint = ima_iint_find_get(inode);
317 if (!iint)
318 return;
319 mutex_lock(&iint->mutex);
320 ima_inc_counts(iint, file->f_mode);
321 mutex_unlock(&iint->mutex);
322
323 kref_put(&iint->refcount, iint_free);
324}
325EXPORT_SYMBOL_GPL(ima_counts_get);
326
327/** 254/**
328 * ima_file_mmap - based on policy, collect/store measurement. 255 * ima_file_mmap - based on policy, collect/store measurement.
329 * @file: pointer to the file to be measured (May be NULL) 256 * @file: pointer to the file to be measured (May be NULL)
@@ -369,11 +296,31 @@ int ima_bprm_check(struct linux_binprm *bprm)
369 return 0; 296 return 0;
370} 297}
371 298
299/**
300 * ima_path_check - based on policy, collect/store measurement.
301 * @file: pointer to the file to be measured
302 * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
303 *
304 * Measure files based on the ima_must_measure() policy decision.
305 *
306 * Always return 0 and audit dentry_open failures.
307 * (Return code will be based upon measurement appraisal.)
308 */
309int ima_file_check(struct file *file, int mask)
310{
311 int rc;
312
313 rc = process_measurement(file, file->f_dentry->d_name.name,
314 mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
315 FILE_CHECK);
316 return 0;
317}
318EXPORT_SYMBOL_GPL(ima_file_check);
319
372static int __init init_ima(void) 320static int __init init_ima(void)
373{ 321{
374 int error; 322 int error;
375 323
376 ima_iintcache_init();
377 error = ima_init(); 324 error = ima_init();
378 ima_initialized = 1; 325 ima_initialized = 1;
379 return error; 326 return error;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index e1278399b345..4759d0f99335 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -67,7 +67,7 @@ static struct ima_measure_rule_entry default_rules[] = {
67 .flags = IMA_FUNC | IMA_MASK}, 67 .flags = IMA_FUNC | IMA_MASK},
68 {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, 68 {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC,
69 .flags = IMA_FUNC | IMA_MASK}, 69 .flags = IMA_FUNC | IMA_MASK},
70 {.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0, 70 {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0,
71 .flags = IMA_FUNC | IMA_MASK | IMA_UID}, 71 .flags = IMA_FUNC | IMA_MASK | IMA_UID},
72}; 72};
73 73
@@ -282,8 +282,11 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
282 break; 282 break;
283 case Opt_func: 283 case Opt_func:
284 audit_log_format(ab, "func=%s ", args[0].from); 284 audit_log_format(ab, "func=%s ", args[0].from);
285 if (strcmp(args[0].from, "PATH_CHECK") == 0) 285 if (strcmp(args[0].from, "FILE_CHECK") == 0)
286 entry->func = PATH_CHECK; 286 entry->func = FILE_CHECK;
287 /* PATH_CHECK is for backwards compat */
288 else if (strcmp(args[0].from, "PATH_CHECK") == 0)
289 entry->func = FILE_CHECK;
287 else if (strcmp(args[0].from, "FILE_MMAP") == 0) 290 else if (strcmp(args[0].from, "FILE_MMAP") == 0)
288 entry->func = FILE_MMAP; 291 entry->func = FILE_MMAP;
289 else if (strcmp(args[0].from, "BPRM_CHECK") == 0) 292 else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
diff --git a/security/security.c b/security/security.c
index 24e060be9fa5..122b748d0f4c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -666,8 +666,6 @@ int security_file_alloc(struct file *file)
666void security_file_free(struct file *file) 666void security_file_free(struct file *file)
667{ 667{
668 security_ops->file_free_security(file); 668 security_ops->file_free_security(file);
669 if (file->f_dentry)
670 ima_file_free(file);
671} 669}
672 670
673int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 671int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index cb65bd0dd35b..459c1f62783b 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -166,18 +166,7 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
166 166
167static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index) 167static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
168{ 168{
169 struct ct_vm *vm; 169 return atc->vm->get_ptp_phys(atc->vm, index);
170 void *kvirt_addr;
171 unsigned long phys_addr;
172
173 vm = atc->vm;
174 kvirt_addr = vm->get_ptp_virt(vm, index);
175 if (kvirt_addr == NULL)
176 phys_addr = (~0UL);
177 else
178 phys_addr = virt_to_phys(kvirt_addr);
179
180 return phys_addr;
181} 170}
182 171
183static unsigned int convert_format(snd_pcm_format_t snd_format) 172static unsigned int convert_format(snd_pcm_format_t snd_format)
@@ -1669,7 +1658,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
1669 } 1658 }
1670 1659
1671 /* Set up device virtual memory management object */ 1660 /* Set up device virtual memory management object */
1672 err = ct_vm_create(&atc->vm); 1661 err = ct_vm_create(&atc->vm, pci);
1673 if (err < 0) 1662 if (err < 0)
1674 goto error1; 1663 goto error1;
1675 1664
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
index 6b78752e9503..65da6e466f80 100644
--- a/sound/pci/ctxfi/ctvmem.c
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -138,7 +138,7 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
138 return NULL; 138 return NULL;
139 } 139 }
140 140
141 ptp = vm->ptp[0]; 141 ptp = (unsigned long *)vm->ptp[0].area;
142 pte_start = (block->addr >> CT_PAGE_SHIFT); 142 pte_start = (block->addr >> CT_PAGE_SHIFT);
143 pages = block->size >> CT_PAGE_SHIFT; 143 pages = block->size >> CT_PAGE_SHIFT;
144 for (i = 0; i < pages; i++) { 144 for (i = 0; i < pages; i++) {
@@ -158,25 +158,25 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
158} 158}
159 159
160/* * 160/* *
161 * return the host (kmalloced) addr of the @index-th device 161 * return the host physical addr of the @index-th device
162 * page talbe page on success, or NULL on failure. 162 * page table page on success, or ~0UL on failure.
163 * The first returned NULL indicates the termination. 163 * The first returned ~0UL indicates the termination.
164 * */ 164 * */
165static void * 165static dma_addr_t
166ct_get_ptp_virt(struct ct_vm *vm, int index) 166ct_get_ptp_phys(struct ct_vm *vm, int index)
167{ 167{
168 void *addr; 168 dma_addr_t addr;
169 169
170 addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index]; 170 addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
171 171
172 return addr; 172 return addr;
173} 173}
174 174
175int ct_vm_create(struct ct_vm **rvm) 175int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
176{ 176{
177 struct ct_vm *vm; 177 struct ct_vm *vm;
178 struct ct_vm_block *block; 178 struct ct_vm_block *block;
179 int i; 179 int i, err = 0;
180 180
181 *rvm = NULL; 181 *rvm = NULL;
182 182
@@ -188,23 +188,21 @@ int ct_vm_create(struct ct_vm **rvm)
188 188
189 /* Allocate page table pages */ 189 /* Allocate page table pages */
190 for (i = 0; i < CT_PTP_NUM; i++) { 190 for (i = 0; i < CT_PTP_NUM; i++) {
191 vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); 191 err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
192 if (!vm->ptp[i]) 192 snd_dma_pci_data(pci),
193 PAGE_SIZE, &vm->ptp[i]);
194 if (err < 0)
193 break; 195 break;
194 } 196 }
195 if (!i) { 197 if (err < 0) {
196 /* no page table pages are allocated */ 198 /* no page table pages are allocated */
197 kfree(vm); 199 ct_vm_destroy(vm);
198 return -ENOMEM; 200 return -ENOMEM;
199 } 201 }
200 vm->size = CT_ADDRS_PER_PAGE * i; 202 vm->size = CT_ADDRS_PER_PAGE * i;
201 /* Initialise remaining ptps */
202 for (; i < CT_PTP_NUM; i++)
203 vm->ptp[i] = NULL;
204
205 vm->map = ct_vm_map; 203 vm->map = ct_vm_map;
206 vm->unmap = ct_vm_unmap; 204 vm->unmap = ct_vm_unmap;
207 vm->get_ptp_virt = ct_get_ptp_virt; 205 vm->get_ptp_phys = ct_get_ptp_phys;
208 INIT_LIST_HEAD(&vm->unused); 206 INIT_LIST_HEAD(&vm->unused);
209 INIT_LIST_HEAD(&vm->used); 207 INIT_LIST_HEAD(&vm->used);
210 block = kzalloc(sizeof(*block), GFP_KERNEL); 208 block = kzalloc(sizeof(*block), GFP_KERNEL);
@@ -242,7 +240,7 @@ void ct_vm_destroy(struct ct_vm *vm)
242 240
243 /* free allocated page table pages */ 241 /* free allocated page table pages */
244 for (i = 0; i < CT_PTP_NUM; i++) 242 for (i = 0; i < CT_PTP_NUM; i++)
245 kfree(vm->ptp[i]); 243 snd_dma_free_pages(&vm->ptp[i]);
246 244
247 vm->size = 0; 245 vm->size = 0;
248 246
diff --git a/sound/pci/ctxfi/ctvmem.h b/sound/pci/ctxfi/ctvmem.h
index 01e4fd0386a3..b23adfca4de6 100644
--- a/sound/pci/ctxfi/ctvmem.h
+++ b/sound/pci/ctxfi/ctvmem.h
@@ -22,6 +22,8 @@
22 22
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/pci.h>
26#include <sound/memalloc.h>
25 27
26/* The chip can handle the page table of 4k pages 28/* The chip can handle the page table of 4k pages
27 * (emu20k1 can handle even 8k pages, but we don't use it right now) 29 * (emu20k1 can handle even 8k pages, but we don't use it right now)
@@ -41,7 +43,7 @@ struct snd_pcm_substream;
41 43
42/* Virtual memory management object for card device */ 44/* Virtual memory management object for card device */
43struct ct_vm { 45struct ct_vm {
44 void *ptp[CT_PTP_NUM]; /* Device page table pages */ 46 struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */
45 unsigned int size; /* Available addr space in bytes */ 47 unsigned int size; /* Available addr space in bytes */
46 struct list_head unused; /* List of unused blocks */ 48 struct list_head unused; /* List of unused blocks */
47 struct list_head used; /* List of used blocks */ 49 struct list_head used; /* List of used blocks */
@@ -52,10 +54,10 @@ struct ct_vm {
52 int size); 54 int size);
53 /* Unmap device logical addr area. */ 55 /* Unmap device logical addr area. */
54 void (*unmap)(struct ct_vm *, struct ct_vm_block *block); 56 void (*unmap)(struct ct_vm *, struct ct_vm_block *block);
55 void *(*get_ptp_virt)(struct ct_vm *vm, int index); 57 dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index);
56}; 58};
57 59
58int ct_vm_create(struct ct_vm **rvm); 60int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci);
59void ct_vm_destroy(struct ct_vm *vm); 61void ct_vm_destroy(struct ct_vm *vm);
60 62
61#endif /* CTVMEM_H */ 63#endif /* CTVMEM_H */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 565de38a3fc7..b8faa6dc5abe 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -426,6 +426,7 @@ struct azx {
426 426
427 /* flags */ 427 /* flags */
428 int position_fix; 428 int position_fix;
429 int poll_count;
429 unsigned int running :1; 430 unsigned int running :1;
430 unsigned int initialized :1; 431 unsigned int initialized :1;
431 unsigned int single_cmd :1; 432 unsigned int single_cmd :1;
@@ -506,7 +507,7 @@ static char *driver_short_names[] __devinitdata = {
506#define get_azx_dev(substream) (substream->runtime->private_data) 507#define get_azx_dev(substream) (substream->runtime->private_data)
507 508
508static int azx_acquire_irq(struct azx *chip, int do_disconnect); 509static int azx_acquire_irq(struct azx *chip, int do_disconnect);
509 510static int azx_send_cmd(struct hda_bus *bus, unsigned int val);
510/* 511/*
511 * Interface for HD codec 512 * Interface for HD codec
512 */ 513 */
@@ -664,11 +665,12 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
664{ 665{
665 struct azx *chip = bus->private_data; 666 struct azx *chip = bus->private_data;
666 unsigned long timeout; 667 unsigned long timeout;
668 int do_poll = 0;
667 669
668 again: 670 again:
669 timeout = jiffies + msecs_to_jiffies(1000); 671 timeout = jiffies + msecs_to_jiffies(1000);
670 for (;;) { 672 for (;;) {
671 if (chip->polling_mode) { 673 if (chip->polling_mode || do_poll) {
672 spin_lock_irq(&chip->reg_lock); 674 spin_lock_irq(&chip->reg_lock);
673 azx_update_rirb(chip); 675 azx_update_rirb(chip);
674 spin_unlock_irq(&chip->reg_lock); 676 spin_unlock_irq(&chip->reg_lock);
@@ -676,6 +678,9 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
676 if (!chip->rirb.cmds[addr]) { 678 if (!chip->rirb.cmds[addr]) {
677 smp_rmb(); 679 smp_rmb();
678 bus->rirb_error = 0; 680 bus->rirb_error = 0;
681
682 if (!do_poll)
683 chip->poll_count = 0;
679 return chip->rirb.res[addr]; /* the last value */ 684 return chip->rirb.res[addr]; /* the last value */
680 } 685 }
681 if (time_after(jiffies, timeout)) 686 if (time_after(jiffies, timeout))
@@ -688,6 +693,16 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
688 } 693 }
689 } 694 }
690 695
696 if (!chip->polling_mode && chip->poll_count < 2) {
697 snd_printdd(SFX "azx_get_response timeout, "
698 "polling the codec once: last cmd=0x%08x\n",
699 chip->last_cmd[addr]);
700 do_poll = 1;
701 chip->poll_count++;
702 goto again;
703 }
704
705
691 if (!chip->polling_mode) { 706 if (!chip->polling_mode) {
692 snd_printk(KERN_WARNING SFX "azx_get_response timeout, " 707 snd_printk(KERN_WARNING SFX "azx_get_response timeout, "
693 "switching to polling mode: last cmd=0x%08x\n", 708 "switching to polling mode: last cmd=0x%08x\n",
@@ -2043,7 +2058,7 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect)
2043{ 2058{
2044 if (request_irq(chip->pci->irq, azx_interrupt, 2059 if (request_irq(chip->pci->irq, azx_interrupt,
2045 chip->msi ? 0 : IRQF_SHARED, 2060 chip->msi ? 0 : IRQF_SHARED,
2046 "HDA Intel", chip)) { 2061 "hda_intel", chip)) {
2047 printk(KERN_ERR "hda-intel: unable to grab IRQ %d, " 2062 printk(KERN_ERR "hda-intel: unable to grab IRQ %d, "
2048 "disabling device\n", chip->pci->irq); 2063 "disabling device\n", chip->pci->irq);
2049 if (do_disconnect) 2064 if (do_disconnect)
diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
index 765d7bd4c3d4..9e66f6d306f8 100644
--- a/sound/pci/ice1712/aureon.c
+++ b/sound/pci/ice1712/aureon.c
@@ -703,11 +703,13 @@ static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned sho
703{ 703{
704 unsigned char nvol; 704 unsigned char nvol;
705 705
706 if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) 706 if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) {
707 nvol = 0; 707 nvol = 0;
708 else 708 } else {
709 nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) / 709 nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) /
710 WM_VOL_MAX; 710 WM_VOL_MAX;
711 nvol += 0x1b;
712 }
711 713
712 wm_put(ice, index, nvol); 714 wm_put(ice, index, nvol);
713 wm_put_nocache(ice, index, 0x180 | nvol); 715 wm_put_nocache(ice, index, 0x180 | nvol);
@@ -778,7 +780,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
778 for (ch = 0; ch < 2; ch++) { 780 for (ch = 0; ch < 2; ch++) {
779 unsigned int vol = ucontrol->value.integer.value[ch]; 781 unsigned int vol = ucontrol->value.integer.value[ch];
780 if (vol > WM_VOL_MAX) 782 if (vol > WM_VOL_MAX)
781 continue; 783 vol = WM_VOL_MAX;
782 vol |= spec->master[ch] & WM_VOL_MUTE; 784 vol |= spec->master[ch] & WM_VOL_MUTE;
783 if (vol != spec->master[ch]) { 785 if (vol != spec->master[ch]) {
784 int dac; 786 int dac;
@@ -834,8 +836,8 @@ static int wm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *
834 for (i = 0; i < voices; i++) { 836 for (i = 0; i < voices; i++) {
835 unsigned int vol = ucontrol->value.integer.value[i]; 837 unsigned int vol = ucontrol->value.integer.value[i];
836 if (vol > WM_VOL_MAX) 838 if (vol > WM_VOL_MAX)
837 continue; 839 vol = WM_VOL_MAX;
838 vol |= spec->vol[ofs+i]; 840 vol |= spec->vol[ofs+i] & WM_VOL_MUTE;
839 if (vol != spec->vol[ofs+i]) { 841 if (vol != spec->vol[ofs+i]) {
840 spec->vol[ofs+i] = vol; 842 spec->vol[ofs+i] = vol;
841 idx = WM_DAC_ATTEN + ofs + i; 843 idx = WM_DAC_ATTEN + ofs + i;
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 71b2c161158d..68980c19a3bc 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -145,6 +145,7 @@ static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = {
145}; 145};
146 146
147static const struct snd_soc_dapm_route omap3pandora_out_map[] = { 147static const struct snd_soc_dapm_route omap3pandora_out_map[] = {
148 {"PCM DAC", NULL, "APLL Enable"},
148 {"Headphone Amplifier", NULL, "PCM DAC"}, 149 {"Headphone Amplifier", NULL, "PCM DAC"},
149 {"Line Out", NULL, "PCM DAC"}, 150 {"Line Out", NULL, "PCM DAC"},
150 {"Headphone Jack", NULL, "Headphone Amplifier"}, 151 {"Headphone Jack", NULL, "Headphone Amplifier"},