aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt8
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/mach-landisk/gio.c10
-rw-r--r--arch/sh/mm/cache-sh4.c22
-rw-r--r--arch/sh/mm/cache.c10
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c10
-rw-r--r--arch/x86/kvm/i8254.c2
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu.c16
-rw-r--r--drivers/crypto/padlock-sha.c14
-rw-r--r--drivers/input/input.c65
-rw-r--r--drivers/input/keyboard/atkbd.c96
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c2
-rw-r--r--drivers/input/mouse/logips2pp.c2
-rw-r--r--drivers/input/mouse/synaptics.c10
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h13
-rw-r--r--drivers/md/dm-exception-store.c38
-rw-r--r--drivers/md/dm-exception-store.h8
-rw-r--r--drivers/md/dm-log-userspace-base.c2
-rw-r--r--drivers/md/dm-snap-persistent.c16
-rw-r--r--drivers/md/dm-snap.c25
-rw-r--r--drivers/md/dm.c11
-rw-r--r--drivers/mmc/host/at91_mci.c1
-rw-r--r--drivers/net/benet/be_cmds.c33
-rw-r--r--drivers/net/benet/be_cmds.h5
-rw-r--r--drivers/net/benet/be_main.c27
-rw-r--r--drivers/net/ethoc.c21
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/ks8851.c42
-rw-r--r--drivers/net/ks8851.h1
-rw-r--r--drivers/net/niu.c2
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c27
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--fs/nfs/super.c1
-rw-r--r--fs/notify/dnotify/dnotify.c3
-rw-r--r--fs/notify/inode_mark.c6
-rw-r--r--fs/notify/notification.c2
-rw-r--r--fs/pipe.c41
-rw-r--r--include/net/inet_timewait_sock.h8
-rw-r--r--kernel/power/suspend_test.c5
-rw-r--r--net/bluetooth/hci_sysfs.c4
-rw-r--r--net/bluetooth/l2cap.c9
-rw-r--r--net/ipv4/inet_connection_sock.c34
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/tcp.c59
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c6
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--virt/kvm/kvm_main.c7
50 files changed, 498 insertions, 249 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 04e6c819b28a..bc693fffabe0 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -418,6 +418,14 @@ When: 2.6.33
418Why: Should be implemented in userspace, policy daemon. 418Why: Should be implemented in userspace, policy daemon.
419Who: Johannes Berg <johannes@sipsolutions.net> 419Who: Johannes Berg <johannes@sipsolutions.net>
420 420
421---------------------------
422
423What: CONFIG_INOTIFY
424When: 2.6.33
425Why: last user (audit) will be converted to the newer more generic
426 and more easily maintained fsnotify subsystem
427Who: Eric Paris <eparis@redhat.com>
428
421---------------------------- 429----------------------------
422 430
423What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be 431What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index b940424f8ccc..0dc7e3cbeffa 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -37,7 +37,6 @@ config SUPERH32
37 select HAVE_FTRACE_MCOUNT_RECORD 37 select HAVE_FTRACE_MCOUNT_RECORD
38 select HAVE_DYNAMIC_FTRACE 38 select HAVE_DYNAMIC_FTRACE
39 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 39 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
40 select HAVE_FTRACE_SYSCALLS
41 select HAVE_FUNCTION_GRAPH_TRACER 40 select HAVE_FUNCTION_GRAPH_TRACER
42 select HAVE_ARCH_KGDB 41 select HAVE_ARCH_KGDB
43 select ARCH_HIBERNATION_POSSIBLE if MMU 42 select ARCH_HIBERNATION_POSSIBLE if MMU
diff --git a/arch/sh/boards/mach-landisk/gio.c b/arch/sh/boards/mach-landisk/gio.c
index 25cdf7358000..528013188196 100644
--- a/arch/sh/boards/mach-landisk/gio.c
+++ b/arch/sh/boards/mach-landisk/gio.c
@@ -14,7 +14,6 @@
14 */ 14 */
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/smp_lock.h>
18#include <linux/kdev_t.h> 17#include <linux/kdev_t.h>
19#include <linux/cdev.h> 18#include <linux/cdev.h>
20#include <linux/fs.h> 19#include <linux/fs.h>
@@ -35,7 +34,7 @@ static int gio_open(struct inode *inode, struct file *filp)
35 int minor; 34 int minor;
36 int ret = -ENOENT; 35 int ret = -ENOENT;
37 36
38 lock_kernel(); 37 preempt_disable();
39 minor = MINOR(inode->i_rdev); 38 minor = MINOR(inode->i_rdev);
40 if (minor < DEVCOUNT) { 39 if (minor < DEVCOUNT) {
41 if (openCnt > 0) { 40 if (openCnt > 0) {
@@ -45,7 +44,7 @@ static int gio_open(struct inode *inode, struct file *filp)
45 ret = 0; 44 ret = 0;
46 } 45 }
47 } 46 }
48 unlock_kernel(); 47 preempt_enable();
49 return ret; 48 return ret;
50} 49}
51 50
@@ -60,8 +59,7 @@ static int gio_close(struct inode *inode, struct file *filp)
60 return 0; 59 return 0;
61} 60}
62 61
63static int gio_ioctl(struct inode *inode, struct file *filp, 62static long gio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
64 unsigned int cmd, unsigned long arg)
65{ 63{
66 unsigned int data; 64 unsigned int data;
67 static unsigned int addr = 0; 65 static unsigned int addr = 0;
@@ -129,7 +127,7 @@ static const struct file_operations gio_fops = {
129 .owner = THIS_MODULE, 127 .owner = THIS_MODULE,
130 .open = gio_open, /* open */ 128 .open = gio_open, /* open */
131 .release = gio_close, /* release */ 129 .release = gio_close, /* release */
132 .ioctl = gio_ioctl, /* ioctl */ 130 .unlocked_ioctl = gio_ioctl,
133}; 131};
134 132
135static int __init gio_init(void) 133static int __init gio_init(void)
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index a98c7d8984fa..519e2d16cd06 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -26,7 +26,7 @@
26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ 26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
27#define MAX_ICACHE_PAGES 32 27#define MAX_ICACHE_PAGES 32
28 28
29static void __flush_cache_4096(unsigned long addr, unsigned long phys, 29static void __flush_cache_one(unsigned long addr, unsigned long phys,
30 unsigned long exec_offset); 30 unsigned long exec_offset);
31 31
32/* 32/*
@@ -89,8 +89,7 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
89 local_irq_restore(flags); 89 local_irq_restore(flags);
90} 90}
91 91
92static inline void flush_cache_4096(unsigned long start, 92static inline void flush_cache_one(unsigned long start, unsigned long phys)
93 unsigned long phys)
94{ 93{
95 unsigned long flags, exec_offset = 0; 94 unsigned long flags, exec_offset = 0;
96 95
@@ -103,8 +102,7 @@ static inline void flush_cache_4096(unsigned long start,
103 exec_offset = 0x20000000; 102 exec_offset = 0x20000000;
104 103
105 local_irq_save(flags); 104 local_irq_save(flags);
106 __flush_cache_4096(start | SH_CACHE_ASSOC, 105 __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset);
107 P1SEGADDR(phys), exec_offset);
108 local_irq_restore(flags); 106 local_irq_restore(flags);
109} 107}
110 108
@@ -129,8 +127,8 @@ static void sh4_flush_dcache_page(void *arg)
129 127
130 /* Loop all the D-cache */ 128 /* Loop all the D-cache */
131 n = boot_cpu_data.dcache.n_aliases; 129 n = boot_cpu_data.dcache.n_aliases;
132 for (i = 0; i < n; i++, addr += 4096) 130 for (i = 0; i < n; i++, addr += PAGE_SIZE)
133 flush_cache_4096(addr, phys); 131 flush_cache_one(addr, phys);
134 } 132 }
135 133
136 wmb(); 134 wmb();
@@ -318,11 +316,11 @@ static void sh4_flush_cache_page(void *args)
318 /* We only need to flush D-cache when we have alias */ 316 /* We only need to flush D-cache when we have alias */
319 if ((address^phys) & alias_mask) { 317 if ((address^phys) & alias_mask) {
320 /* Loop 4K of the D-cache */ 318 /* Loop 4K of the D-cache */
321 flush_cache_4096( 319 flush_cache_one(
322 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), 320 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
323 phys); 321 phys);
324 /* Loop another 4K of the D-cache */ 322 /* Loop another 4K of the D-cache */
325 flush_cache_4096( 323 flush_cache_one(
326 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), 324 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
327 phys); 325 phys);
328 } 326 }
@@ -337,7 +335,7 @@ static void sh4_flush_cache_page(void *args)
337 * kernel has never executed the code through its identity 335 * kernel has never executed the code through its identity
338 * translation. 336 * translation.
339 */ 337 */
340 flush_cache_4096( 338 flush_cache_one(
341 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), 339 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
342 phys); 340 phys);
343 } 341 }
@@ -393,7 +391,7 @@ static void sh4_flush_cache_range(void *args)
393} 391}
394 392
395/** 393/**
396 * __flush_cache_4096 394 * __flush_cache_one
397 * 395 *
398 * @addr: address in memory mapped cache array 396 * @addr: address in memory mapped cache array
399 * @phys: P1 address to flush (has to match tags if addr has 'A' bit 397 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
@@ -406,7 +404,7 @@ static void sh4_flush_cache_range(void *args)
406 * operation (purge/write-back) is selected by the lower 2 bits of 404 * operation (purge/write-back) is selected by the lower 2 bits of
407 * 'phys'. 405 * 'phys'.
408 */ 406 */
409static void __flush_cache_4096(unsigned long addr, unsigned long phys, 407static void __flush_cache_one(unsigned long addr, unsigned long phys,
410 unsigned long exec_offset) 408 unsigned long exec_offset)
411{ 409{
412 int way_count; 410 int way_count;
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 5e1091be9dc4..a2dc7f9ecc51 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -265,6 +265,8 @@ static void __init emit_cache_params(void)
265 265
266void __init cpu_cache_init(void) 266void __init cpu_cache_init(void)
267{ 267{
268 unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
269
268 compute_alias(&boot_cpu_data.icache); 270 compute_alias(&boot_cpu_data.icache);
269 compute_alias(&boot_cpu_data.dcache); 271 compute_alias(&boot_cpu_data.dcache);
270 compute_alias(&boot_cpu_data.scache); 272 compute_alias(&boot_cpu_data.scache);
@@ -273,6 +275,13 @@ void __init cpu_cache_init(void)
273 __flush_purge_region = noop__flush_region; 275 __flush_purge_region = noop__flush_region;
274 __flush_invalidate_region = noop__flush_region; 276 __flush_invalidate_region = noop__flush_region;
275 277
278 /*
279 * No flushing is necessary in the disabled cache case so we can
280 * just keep the noop functions in local_flush_..() and __flush_..()
281 */
282 if (unlikely(cache_disabled))
283 goto skip;
284
276 if (boot_cpu_data.family == CPU_FAMILY_SH2) { 285 if (boot_cpu_data.family == CPU_FAMILY_SH2) {
277 extern void __weak sh2_cache_init(void); 286 extern void __weak sh2_cache_init(void);
278 287
@@ -312,5 +321,6 @@ void __init cpu_cache_init(void)
312 sh5_cache_init(); 321 sh5_cache_init();
313 } 322 }
314 323
324skip:
315 emit_cache_params(); 325 emit_cache_params();
316} 326}
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 585edebe12cf..49c552c060e9 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -82,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
82 return -EINVAL; 82 return -EINVAL;
83 } 83 }
84 84
85 if (irq_fpu_usable()) 85 if (!irq_fpu_usable())
86 err = crypto_aes_expand_key(ctx, in_key, key_len); 86 err = crypto_aes_expand_key(ctx, in_key, key_len);
87 else { 87 else {
88 kernel_fpu_begin(); 88 kernel_fpu_begin();
@@ -103,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
103{ 103{
104 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 104 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
105 105
106 if (irq_fpu_usable()) 106 if (!irq_fpu_usable())
107 crypto_aes_encrypt_x86(ctx, dst, src); 107 crypto_aes_encrypt_x86(ctx, dst, src);
108 else { 108 else {
109 kernel_fpu_begin(); 109 kernel_fpu_begin();
@@ -116,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
116{ 116{
117 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 117 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
118 118
119 if (irq_fpu_usable()) 119 if (!irq_fpu_usable())
120 crypto_aes_decrypt_x86(ctx, dst, src); 120 crypto_aes_decrypt_x86(ctx, dst, src);
121 else { 121 else {
122 kernel_fpu_begin(); 122 kernel_fpu_begin();
@@ -342,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
342 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 342 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
343 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 343 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
344 344
345 if (irq_fpu_usable()) { 345 if (!irq_fpu_usable()) {
346 struct ablkcipher_request *cryptd_req = 346 struct ablkcipher_request *cryptd_req =
347 ablkcipher_request_ctx(req); 347 ablkcipher_request_ctx(req);
348 memcpy(cryptd_req, req, sizeof(*req)); 348 memcpy(cryptd_req, req, sizeof(*req));
@@ -363,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
363 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 363 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
364 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 364 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
365 365
366 if (irq_fpu_usable()) { 366 if (!irq_fpu_usable()) {
367 struct ablkcipher_request *cryptd_req = 367 struct ablkcipher_request *cryptd_req =
368 ablkcipher_request_ctx(req); 368 ablkcipher_request_ctx(req);
369 memcpy(cryptd_req, req, sizeof(*req)); 369 memcpy(cryptd_req, req, sizeof(*req));
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 82ad523b4901..144e7f60b5e2 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -116,7 +116,7 @@ static s64 __kpit_elapsed(struct kvm *kvm)
116 * itself with the initial count and continues counting 116 * itself with the initial count and continues counting
117 * from there. 117 * from there.
118 */ 118 */
119 remaining = hrtimer_expires_remaining(&ps->pit_timer.timer); 119 remaining = hrtimer_get_remaining(&ps->pit_timer.timer);
120 elapsed = ps->pit_timer.period - ktime_to_ns(remaining); 120 elapsed = ps->pit_timer.period - ktime_to_ns(remaining);
121 elapsed = mod_64(elapsed, ps->pit_timer.period); 121 elapsed = mod_64(elapsed, ps->pit_timer.period);
122 122
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 7024224f0fc8..23c217692ea9 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -521,7 +521,7 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
521 if (apic_get_reg(apic, APIC_TMICT) == 0) 521 if (apic_get_reg(apic, APIC_TMICT) == 0)
522 return 0; 522 return 0;
523 523
524 remaining = hrtimer_expires_remaining(&apic->lapic_timer.timer); 524 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
525 if (ktime_to_ns(remaining) < 0) 525 if (ktime_to_ns(remaining) < 0)
526 remaining = ktime_set(0, 0); 526 remaining = ktime_set(0, 0);
527 527
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 685a4ffac8e6..818b92ad82cf 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -748,7 +748,8 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
748 return write_protected; 748 return write_protected;
749} 749}
750 750
751static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) 751static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
752 unsigned long data)
752{ 753{
753 u64 *spte; 754 u64 *spte;
754 int need_tlb_flush = 0; 755 int need_tlb_flush = 0;
@@ -763,7 +764,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
763 return need_tlb_flush; 764 return need_tlb_flush;
764} 765}
765 766
766static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) 767static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
768 unsigned long data)
767{ 769{
768 int need_flush = 0; 770 int need_flush = 0;
769 u64 *spte, new_spte; 771 u64 *spte, new_spte;
@@ -799,9 +801,10 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
799 return 0; 801 return 0;
800} 802}
801 803
802static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data, 804static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
805 unsigned long data,
803 int (*handler)(struct kvm *kvm, unsigned long *rmapp, 806 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
804 u64 data)) 807 unsigned long data))
805{ 808{
806 int i, j; 809 int i, j;
807 int retval = 0; 810 int retval = 0;
@@ -846,10 +849,11 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
846 849
847void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 850void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
848{ 851{
849 kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp); 852 kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
850} 853}
851 854
852static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) 855static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
856 unsigned long data)
853{ 857{
854 u64 *spte; 858 u64 *spte;
855 int young = 0; 859 int young = 0;
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 76cb6b345e7b..0af80577dc7b 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -24,6 +24,12 @@
24#include <asm/i387.h> 24#include <asm/i387.h>
25#include "padlock.h" 25#include "padlock.h"
26 26
27#ifdef CONFIG_64BIT
28#define STACK_ALIGN 16
29#else
30#define STACK_ALIGN 4
31#endif
32
27struct padlock_sha_desc { 33struct padlock_sha_desc {
28 struct shash_desc fallback; 34 struct shash_desc fallback;
29}; 35};
@@ -64,7 +70,9 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
64 /* We can't store directly to *out as it may be unaligned. */ 70 /* We can't store directly to *out as it may be unaligned. */
65 /* BTW Don't reduce the buffer size below 128 Bytes! 71 /* BTW Don't reduce the buffer size below 128 Bytes!
66 * PadLock microcode needs it that big. */ 72 * PadLock microcode needs it that big. */
67 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); 73 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
74 ((aligned(STACK_ALIGN)));
75 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
68 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); 76 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
69 struct sha1_state state; 77 struct sha1_state state;
70 unsigned int space; 78 unsigned int space;
@@ -128,7 +136,9 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
128 /* We can't store directly to *out as it may be unaligned. */ 136 /* We can't store directly to *out as it may be unaligned. */
129 /* BTW Don't reduce the buffer size below 128 Bytes! 137 /* BTW Don't reduce the buffer size below 128 Bytes!
130 * PadLock microcode needs it that big. */ 138 * PadLock microcode needs it that big. */
131 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); 139 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
140 ((aligned(STACK_ALIGN)));
141 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
132 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); 142 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
133 struct sha256_state state; 143 struct sha256_state state;
134 unsigned int space; 144 unsigned int space;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index c6f88ebb40c7..cc763c96fada 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -782,10 +782,29 @@ static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
782 return 0; 782 return 0;
783} 783}
784 784
785union input_seq_state {
786 struct {
787 unsigned short pos;
788 bool mutex_acquired;
789 };
790 void *p;
791};
792
785static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) 793static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
786{ 794{
787 if (mutex_lock_interruptible(&input_mutex)) 795 union input_seq_state *state = (union input_seq_state *)&seq->private;
788 return NULL; 796 int error;
797
798 /* We need to fit into seq->private pointer */
799 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
800
801 error = mutex_lock_interruptible(&input_mutex);
802 if (error) {
803 state->mutex_acquired = false;
804 return ERR_PTR(error);
805 }
806
807 state->mutex_acquired = true;
789 808
790 return seq_list_start(&input_dev_list, *pos); 809 return seq_list_start(&input_dev_list, *pos);
791} 810}
@@ -795,9 +814,12 @@ static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
795 return seq_list_next(v, &input_dev_list, pos); 814 return seq_list_next(v, &input_dev_list, pos);
796} 815}
797 816
798static void input_devices_seq_stop(struct seq_file *seq, void *v) 817static void input_seq_stop(struct seq_file *seq, void *v)
799{ 818{
800 mutex_unlock(&input_mutex); 819 union input_seq_state *state = (union input_seq_state *)&seq->private;
820
821 if (state->mutex_acquired)
822 mutex_unlock(&input_mutex);
801} 823}
802 824
803static void input_seq_print_bitmap(struct seq_file *seq, const char *name, 825static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
@@ -861,7 +883,7 @@ static int input_devices_seq_show(struct seq_file *seq, void *v)
861static const struct seq_operations input_devices_seq_ops = { 883static const struct seq_operations input_devices_seq_ops = {
862 .start = input_devices_seq_start, 884 .start = input_devices_seq_start,
863 .next = input_devices_seq_next, 885 .next = input_devices_seq_next,
864 .stop = input_devices_seq_stop, 886 .stop = input_seq_stop,
865 .show = input_devices_seq_show, 887 .show = input_devices_seq_show,
866}; 888};
867 889
@@ -881,40 +903,49 @@ static const struct file_operations input_devices_fileops = {
881 903
882static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) 904static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
883{ 905{
884 if (mutex_lock_interruptible(&input_mutex)) 906 union input_seq_state *state = (union input_seq_state *)&seq->private;
885 return NULL; 907 int error;
908
909 /* We need to fit into seq->private pointer */
910 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
911
912 error = mutex_lock_interruptible(&input_mutex);
913 if (error) {
914 state->mutex_acquired = false;
915 return ERR_PTR(error);
916 }
917
918 state->mutex_acquired = true;
919 state->pos = *pos;
886 920
887 seq->private = (void *)(unsigned long)*pos;
888 return seq_list_start(&input_handler_list, *pos); 921 return seq_list_start(&input_handler_list, *pos);
889} 922}
890 923
891static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) 924static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
892{ 925{
893 seq->private = (void *)(unsigned long)(*pos + 1); 926 union input_seq_state *state = (union input_seq_state *)&seq->private;
894 return seq_list_next(v, &input_handler_list, pos);
895}
896 927
897static void input_handlers_seq_stop(struct seq_file *seq, void *v) 928 state->pos = *pos + 1;
898{ 929 return seq_list_next(v, &input_handler_list, pos);
899 mutex_unlock(&input_mutex);
900} 930}
901 931
902static int input_handlers_seq_show(struct seq_file *seq, void *v) 932static int input_handlers_seq_show(struct seq_file *seq, void *v)
903{ 933{
904 struct input_handler *handler = container_of(v, struct input_handler, node); 934 struct input_handler *handler = container_of(v, struct input_handler, node);
935 union input_seq_state *state = (union input_seq_state *)&seq->private;
905 936
906 seq_printf(seq, "N: Number=%ld Name=%s", 937 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
907 (unsigned long)seq->private, handler->name);
908 if (handler->fops) 938 if (handler->fops)
909 seq_printf(seq, " Minor=%d", handler->minor); 939 seq_printf(seq, " Minor=%d", handler->minor);
910 seq_putc(seq, '\n'); 940 seq_putc(seq, '\n');
911 941
912 return 0; 942 return 0;
913} 943}
944
914static const struct seq_operations input_handlers_seq_ops = { 945static const struct seq_operations input_handlers_seq_ops = {
915 .start = input_handlers_seq_start, 946 .start = input_handlers_seq_start,
916 .next = input_handlers_seq_next, 947 .next = input_handlers_seq_next,
917 .stop = input_handlers_seq_stop, 948 .stop = input_seq_stop,
918 .show = input_handlers_seq_show, 949 .show = input_handlers_seq_show,
919}; 950};
920 951
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 4709e15af607..a6512372c7a3 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -574,11 +574,22 @@ static void atkbd_event_work(struct work_struct *work)
574 574
575 mutex_lock(&atkbd->event_mutex); 575 mutex_lock(&atkbd->event_mutex);
576 576
577 if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask)) 577 if (!atkbd->enabled) {
578 atkbd_set_leds(atkbd); 578 /*
579 * Serio ports are resumed asynchronously so while driver core
580 * thinks that device is already fully operational in reality
581 * it may not be ready yet. In this case we need to keep
582 * rescheduling till reconnect completes.
583 */
584 schedule_delayed_work(&atkbd->event_work,
585 msecs_to_jiffies(100));
586 } else {
587 if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask))
588 atkbd_set_leds(atkbd);
579 589
580 if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask)) 590 if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask))
581 atkbd_set_repeat_rate(atkbd); 591 atkbd_set_repeat_rate(atkbd);
592 }
582 593
583 mutex_unlock(&atkbd->event_mutex); 594 mutex_unlock(&atkbd->event_mutex);
584} 595}
@@ -770,6 +781,30 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
770 return 3; 781 return 3;
771} 782}
772 783
784static int atkbd_reset_state(struct atkbd *atkbd)
785{
786 struct ps2dev *ps2dev = &atkbd->ps2dev;
787 unsigned char param[1];
788
789/*
790 * Set the LEDs to a predefined state (all off).
791 */
792
793 param[0] = 0;
794 if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
795 return -1;
796
797/*
798 * Set autorepeat to fastest possible.
799 */
800
801 param[0] = 0;
802 if (ps2_command(ps2dev, param, ATKBD_CMD_SETREP))
803 return -1;
804
805 return 0;
806}
807
773static int atkbd_activate(struct atkbd *atkbd) 808static int atkbd_activate(struct atkbd *atkbd)
774{ 809{
775 struct ps2dev *ps2dev = &atkbd->ps2dev; 810 struct ps2dev *ps2dev = &atkbd->ps2dev;
@@ -852,29 +887,6 @@ static unsigned int atkbd_hp_forced_release_keys[] = {
852}; 887};
853 888
854/* 889/*
855 * Inventec system with broken key release on volume keys
856 */
857static unsigned int atkbd_inventec_forced_release_keys[] = {
858 0xae, 0xb0, -1U
859};
860
861/*
862 * Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release
863 * for its volume buttons
864 */
865static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
866 0xae, 0xb0, -1U
867};
868
869/*
870 * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate
871 * release for their volume buttons
872 */
873static unsigned int atkbd_hp_r4000_forced_release_keys[] = {
874 0xae, 0xb0, -1U
875};
876
877/*
878 * Samsung NC10,NC20 with Fn+F? key release not working 890 * Samsung NC10,NC20 with Fn+F? key release not working
879 */ 891 */
880static unsigned int atkbd_samsung_forced_release_keys[] = { 892static unsigned int atkbd_samsung_forced_release_keys[] = {
@@ -882,14 +894,6 @@ static unsigned int atkbd_samsung_forced_release_keys[] = {
882}; 894};
883 895
884/* 896/*
885 * The volume up and volume down special keys on a Fujitsu Amilo PA 1510 laptop
886 * do not generate release events so we have to do it ourselves.
887 */
888static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = {
889 0xb0, 0xae, -1U
890};
891
892/*
893 * Amilo Pi 3525 key release for Fn+Volume keys not working 897 * Amilo Pi 3525 key release for Fn+Volume keys not working
894 */ 898 */
895static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = { 899static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = {
@@ -911,6 +915,14 @@ static unsigned int atkdb_soltech_ta12_forced_release_keys[] = {
911}; 915};
912 916
913/* 917/*
918 * Many notebooks don't send key release event for volume up/down
919 * keys, with key list below common among them
920 */
921static unsigned int atkbd_volume_forced_release_keys[] = {
922 0xae, 0xb0, -1U
923};
924
925/*
914 * atkbd_set_keycode_table() initializes keyboard's keycode table 926 * atkbd_set_keycode_table() initializes keyboard's keycode table
915 * according to the selected scancode set 927 * according to the selected scancode set
916 */ 928 */
@@ -1087,6 +1099,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
1087 } 1099 }
1088 1100
1089 atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra); 1101 atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra);
1102 atkbd_reset_state(atkbd);
1090 atkbd_activate(atkbd); 1103 atkbd_activate(atkbd);
1091 1104
1092 } else { 1105 } else {
@@ -1267,6 +1280,7 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun
1267 1280
1268 atkbd->dev = new_dev; 1281 atkbd->dev = new_dev;
1269 atkbd->set = atkbd_select_set(atkbd, atkbd->set, value); 1282 atkbd->set = atkbd_select_set(atkbd, atkbd->set, value);
1283 atkbd_reset_state(atkbd);
1270 atkbd_activate(atkbd); 1284 atkbd_activate(atkbd);
1271 atkbd_set_keycode_table(atkbd); 1285 atkbd_set_keycode_table(atkbd);
1272 atkbd_set_device_attrs(atkbd); 1286 atkbd_set_device_attrs(atkbd);
@@ -1548,7 +1562,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1548 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"), 1562 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"),
1549 }, 1563 },
1550 .callback = atkbd_setup_forced_release, 1564 .callback = atkbd_setup_forced_release,
1551 .driver_data = atkbd_hp_zv6100_forced_release_keys, 1565 .driver_data = atkbd_volume_forced_release_keys,
1552 }, 1566 },
1553 { 1567 {
1554 .ident = "HP Presario R4000", 1568 .ident = "HP Presario R4000",
@@ -1557,7 +1571,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1557 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"), 1571 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"),
1558 }, 1572 },
1559 .callback = atkbd_setup_forced_release, 1573 .callback = atkbd_setup_forced_release,
1560 .driver_data = atkbd_hp_r4000_forced_release_keys, 1574 .driver_data = atkbd_volume_forced_release_keys,
1561 }, 1575 },
1562 { 1576 {
1563 .ident = "HP Presario R4100", 1577 .ident = "HP Presario R4100",
@@ -1566,7 +1580,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1566 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"), 1580 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"),
1567 }, 1581 },
1568 .callback = atkbd_setup_forced_release, 1582 .callback = atkbd_setup_forced_release,
1569 .driver_data = atkbd_hp_r4000_forced_release_keys, 1583 .driver_data = atkbd_volume_forced_release_keys,
1570 }, 1584 },
1571 { 1585 {
1572 .ident = "HP Presario R4200", 1586 .ident = "HP Presario R4200",
@@ -1575,7 +1589,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1575 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"), 1589 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"),
1576 }, 1590 },
1577 .callback = atkbd_setup_forced_release, 1591 .callback = atkbd_setup_forced_release,
1578 .driver_data = atkbd_hp_r4000_forced_release_keys, 1592 .driver_data = atkbd_volume_forced_release_keys,
1579 }, 1593 },
1580 { 1594 {
1581 .ident = "Inventec Symphony", 1595 .ident = "Inventec Symphony",
@@ -1584,7 +1598,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1584 DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"), 1598 DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"),
1585 }, 1599 },
1586 .callback = atkbd_setup_forced_release, 1600 .callback = atkbd_setup_forced_release,
1587 .driver_data = atkbd_inventec_forced_release_keys, 1601 .driver_data = atkbd_volume_forced_release_keys,
1588 }, 1602 },
1589 { 1603 {
1590 .ident = "Samsung NC10", 1604 .ident = "Samsung NC10",
@@ -1620,7 +1634,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1620 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"), 1634 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"),
1621 }, 1635 },
1622 .callback = atkbd_setup_forced_release, 1636 .callback = atkbd_setup_forced_release,
1623 .driver_data = atkbd_amilo_pa1510_forced_release_keys, 1637 .driver_data = atkbd_volume_forced_release_keys,
1624 }, 1638 },
1625 { 1639 {
1626 .ident = "Fujitsu Amilo Pi 3525", 1640 .ident = "Fujitsu Amilo Pi 3525",
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 216a559f55ea..ea821b546969 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -209,7 +209,7 @@ static inline int hp_sdc_rtc_read_rt(struct timeval *res) {
209 209
210/* Read the i8042 fast handshake timer */ 210/* Read the i8042 fast handshake timer */
211static inline int hp_sdc_rtc_read_fhs(struct timeval *res) { 211static inline int hp_sdc_rtc_read_fhs(struct timeval *res) {
212 uint64_t raw; 212 int64_t raw;
213 unsigned int tenms; 213 unsigned int tenms;
214 214
215 raw = hp_sdc_rtc_read_i8042timer(HP_SDC_CMD_LOAD_FHS, 2); 215 raw = hp_sdc_rtc_read_i8042timer(HP_SDC_CMD_LOAD_FHS, 2);
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index de745d751162..ab5dc5f5fd83 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -219,7 +219,7 @@ static const struct ps2pp_info *get_model_info(unsigned char model)
219 PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | 219 PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN |
220 PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL }, 220 PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL },
221 { 72, PS2PP_KIND_TRACKMAN, 0 }, /* T-CH11: TrackMan Marble */ 221 { 72, PS2PP_KIND_TRACKMAN, 0 }, /* T-CH11: TrackMan Marble */
222 { 73, 0, PS2PP_SIDE_BTN }, 222 { 73, PS2PP_KIND_TRACKMAN, PS2PP_SIDE_BTN }, /* TrackMan FX */
223 { 75, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 223 { 75, PS2PP_KIND_WHEEL, PS2PP_WHEEL },
224 { 76, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, 224 { 76, PS2PP_KIND_WHEEL, PS2PP_WHEEL },
225 { 79, PS2PP_KIND_TRACKMAN, PS2PP_WHEEL }, /* TrackMan with wheel */ 225 { 79, PS2PP_KIND_TRACKMAN, PS2PP_WHEEL }, /* TrackMan with wheel */
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index b66ff1ac7dea..f4a61252bcc9 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -652,6 +652,16 @@ static const struct dmi_system_id toshiba_dmi_table[] = {
652 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 652 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
653 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"), 653 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"),
654 }, 654 },
655
656 },
657 {
658 .ident = "Toshiba Portege M300",
659 .matches = {
660 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
661 DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
662 DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
663 },
664
655 }, 665 },
656 { } 666 { }
657}; 667};
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a39bc4eb902b..a537925f7651 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -327,6 +327,17 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
327 }, 327 },
328 }, 328 },
329 { 329 {
330 /*
331 * Reset and GET ID commands issued via KBD port are
332 * sometimes being delivered to AUX3.
333 */
334 .ident = "Sony Vaio FZ-240E",
335 .matches = {
336 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
337 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
338 },
339 },
340 {
330 .ident = "Amoi M636/A737", 341 .ident = "Amoi M636/A737",
331 .matches = { 342 .matches = {
332 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), 343 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
@@ -661,7 +672,7 @@ static void i8042_pnp_exit(void)
661static int __init i8042_pnp_init(void) 672static int __init i8042_pnp_init(void)
662{ 673{
663 char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 }; 674 char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 };
664 int pnp_data_busted = false; 675 bool pnp_data_busted = false;
665 int err; 676 int err;
666 677
667#ifdef CONFIG_X86 678#ifdef CONFIG_X86
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 556acff3952f..7dbe652efb5a 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -138,16 +138,6 @@ int dm_exception_store_type_unregister(struct dm_exception_store_type *type)
138} 138}
139EXPORT_SYMBOL(dm_exception_store_type_unregister); 139EXPORT_SYMBOL(dm_exception_store_type_unregister);
140 140
141/*
142 * Round a number up to the nearest 'size' boundary. size must
143 * be a power of 2.
144 */
145static ulong round_up(ulong n, ulong size)
146{
147 size--;
148 return (n + size) & ~size;
149}
150
151static int set_chunk_size(struct dm_exception_store *store, 141static int set_chunk_size(struct dm_exception_store *store,
152 const char *chunk_size_arg, char **error) 142 const char *chunk_size_arg, char **error)
153{ 143{
@@ -155,7 +145,8 @@ static int set_chunk_size(struct dm_exception_store *store,
155 char *value; 145 char *value;
156 146
157 chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10); 147 chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10);
158 if (*chunk_size_arg == '\0' || *value != '\0') { 148 if (*chunk_size_arg == '\0' || *value != '\0' ||
149 chunk_size_ulong > UINT_MAX) {
159 *error = "Invalid chunk size"; 150 *error = "Invalid chunk size";
160 return -EINVAL; 151 return -EINVAL;
161 } 152 }
@@ -165,40 +156,35 @@ static int set_chunk_size(struct dm_exception_store *store,
165 return 0; 156 return 0;
166 } 157 }
167 158
168 /* 159 return dm_exception_store_set_chunk_size(store,
169 * Chunk size must be multiple of page size. Silently 160 (unsigned) chunk_size_ulong,
170 * round up if it's not.
171 */
172 chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
173
174 return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
175 error); 161 error);
176} 162}
177 163
178int dm_exception_store_set_chunk_size(struct dm_exception_store *store, 164int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
179 unsigned long chunk_size_ulong, 165 unsigned chunk_size,
180 char **error) 166 char **error)
181{ 167{
182 /* Check chunk_size is a power of 2 */ 168 /* Check chunk_size is a power of 2 */
183 if (!is_power_of_2(chunk_size_ulong)) { 169 if (!is_power_of_2(chunk_size)) {
184 *error = "Chunk size is not a power of 2"; 170 *error = "Chunk size is not a power of 2";
185 return -EINVAL; 171 return -EINVAL;
186 } 172 }
187 173
188 /* Validate the chunk size against the device block size */ 174 /* Validate the chunk size against the device block size */
189 if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) { 175 if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
190 *error = "Chunk size is not a multiple of device blocksize"; 176 *error = "Chunk size is not a multiple of device blocksize";
191 return -EINVAL; 177 return -EINVAL;
192 } 178 }
193 179
194 if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) { 180 if (chunk_size > INT_MAX >> SECTOR_SHIFT) {
195 *error = "Chunk size is too high"; 181 *error = "Chunk size is too high";
196 return -EINVAL; 182 return -EINVAL;
197 } 183 }
198 184
199 store->chunk_size = chunk_size_ulong; 185 store->chunk_size = chunk_size;
200 store->chunk_mask = chunk_size_ulong - 1; 186 store->chunk_mask = chunk_size - 1;
201 store->chunk_shift = ffs(chunk_size_ulong) - 1; 187 store->chunk_shift = ffs(chunk_size) - 1;
202 188
203 return 0; 189 return 0;
204} 190}
@@ -251,7 +237,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
251 237
252 r = set_chunk_size(tmp_store, argv[2], &ti->error); 238 r = set_chunk_size(tmp_store, argv[2], &ti->error);
253 if (r) 239 if (r)
254 goto bad_cow; 240 goto bad_ctr;
255 241
256 r = type->ctr(tmp_store, 0, NULL); 242 r = type->ctr(tmp_store, 0, NULL);
257 if (r) { 243 if (r) {
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 812c71872ba0..8a223a48802c 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -101,9 +101,9 @@ struct dm_exception_store {
101 struct dm_dev *cow; 101 struct dm_dev *cow;
102 102
103 /* Size of data blocks saved - must be a power of 2 */ 103 /* Size of data blocks saved - must be a power of 2 */
104 chunk_t chunk_size; 104 unsigned chunk_size;
105 chunk_t chunk_mask; 105 unsigned chunk_mask;
106 chunk_t chunk_shift; 106 unsigned chunk_shift;
107 107
108 void *context; 108 void *context;
109}; 109};
@@ -169,7 +169,7 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
169int dm_exception_store_type_unregister(struct dm_exception_store_type *type); 169int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
170 170
171int dm_exception_store_set_chunk_size(struct dm_exception_store *store, 171int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
172 unsigned long chunk_size_ulong, 172 unsigned chunk_size,
173 char **error); 173 char **error);
174 174
175int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, 175int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 652bd33109e3..7ac2c1450d10 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -156,7 +156,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
156 } 156 }
157 157
158 /* The ptr value is sufficient for local unique id */ 158 /* The ptr value is sufficient for local unique id */
159 lc->luid = (uint64_t)lc; 159 lc->luid = (unsigned long)lc;
160 160
161 lc->ti = ti; 161 lc->ti = ti;
162 162
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index d5b2e08750d5..0c746420c008 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -284,12 +284,13 @@ static int read_header(struct pstore *ps, int *new_snapshot)
284{ 284{
285 int r; 285 int r;
286 struct disk_header *dh; 286 struct disk_header *dh;
287 chunk_t chunk_size; 287 unsigned chunk_size;
288 int chunk_size_supplied = 1; 288 int chunk_size_supplied = 1;
289 char *chunk_err; 289 char *chunk_err;
290 290
291 /* 291 /*
292 * Use default chunk size (or hardsect_size, if larger) if none supplied 292 * Use default chunk size (or logical_block_size, if larger)
293 * if none supplied
293 */ 294 */
294 if (!ps->store->chunk_size) { 295 if (!ps->store->chunk_size) {
295 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, 296 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
@@ -334,10 +335,9 @@ static int read_header(struct pstore *ps, int *new_snapshot)
334 return 0; 335 return 0;
335 336
336 if (chunk_size_supplied) 337 if (chunk_size_supplied)
337 DMWARN("chunk size %llu in device metadata overrides " 338 DMWARN("chunk size %u in device metadata overrides "
338 "table chunk size of %llu.", 339 "table chunk size of %u.",
339 (unsigned long long)chunk_size, 340 chunk_size, ps->store->chunk_size);
340 (unsigned long long)ps->store->chunk_size);
341 341
342 /* We had a bogus chunk_size. Fix stuff up. */ 342 /* We had a bogus chunk_size. Fix stuff up. */
343 free_area(ps); 343 free_area(ps);
@@ -345,8 +345,8 @@ static int read_header(struct pstore *ps, int *new_snapshot)
345 r = dm_exception_store_set_chunk_size(ps->store, chunk_size, 345 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
346 &chunk_err); 346 &chunk_err);
347 if (r) { 347 if (r) {
348 DMERR("invalid on-disk chunk size %llu: %s.", 348 DMERR("invalid on-disk chunk size %u: %s.",
349 (unsigned long long)chunk_size, chunk_err); 349 chunk_size, chunk_err);
350 return r; 350 return r;
351 } 351 }
352 352
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 57f1bf7f3b7a..3a3ba46e6d4b 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -296,6 +296,7 @@ static void __insert_origin(struct origin *o)
296 */ 296 */
297static int register_snapshot(struct dm_snapshot *snap) 297static int register_snapshot(struct dm_snapshot *snap)
298{ 298{
299 struct dm_snapshot *l;
299 struct origin *o, *new_o; 300 struct origin *o, *new_o;
300 struct block_device *bdev = snap->origin->bdev; 301 struct block_device *bdev = snap->origin->bdev;
301 302
@@ -319,7 +320,11 @@ static int register_snapshot(struct dm_snapshot *snap)
319 __insert_origin(o); 320 __insert_origin(o);
320 } 321 }
321 322
322 list_add_tail(&snap->list, &o->snapshots); 323 /* Sort the list according to chunk size, largest-first smallest-last */
324 list_for_each_entry(l, &o->snapshots, list)
325 if (l->store->chunk_size < snap->store->chunk_size)
326 break;
327 list_add_tail(&snap->list, &l->list);
323 328
324 up_write(&_origins_lock); 329 up_write(&_origins_lock);
325 return 0; 330 return 0;
@@ -668,6 +673,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
668 bio_list_init(&s->queued_bios); 673 bio_list_init(&s->queued_bios);
669 INIT_WORK(&s->queued_bios_work, flush_queued_bios); 674 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
670 675
676 if (!s->store->chunk_size) {
677 ti->error = "Chunk size not set";
678 goto bad_load_and_register;
679 }
680
671 /* Add snapshot to the list of snapshots for this origin */ 681 /* Add snapshot to the list of snapshots for this origin */
672 /* Exceptions aren't triggered till snapshot_resume() is called */ 682 /* Exceptions aren't triggered till snapshot_resume() is called */
673 if (register_snapshot(s)) { 683 if (register_snapshot(s)) {
@@ -951,7 +961,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
951 961
952 src.bdev = bdev; 962 src.bdev = bdev;
953 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); 963 src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
954 src.count = min(s->store->chunk_size, dev_size - src.sector); 964 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
955 965
956 dest.bdev = s->store->cow->bdev; 966 dest.bdev = s->store->cow->bdev;
957 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); 967 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
@@ -1142,6 +1152,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
1142 unsigned sz = 0; 1152 unsigned sz = 0;
1143 struct dm_snapshot *snap = ti->private; 1153 struct dm_snapshot *snap = ti->private;
1144 1154
1155 down_write(&snap->lock);
1156
1145 switch (type) { 1157 switch (type) {
1146 case STATUSTYPE_INFO: 1158 case STATUSTYPE_INFO:
1147 if (!snap->valid) 1159 if (!snap->valid)
@@ -1173,6 +1185,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
1173 break; 1185 break;
1174 } 1186 }
1175 1187
1188 up_write(&snap->lock);
1189
1176 return 0; 1190 return 0;
1177} 1191}
1178 1192
@@ -1388,7 +1402,7 @@ static void origin_resume(struct dm_target *ti)
1388 struct dm_dev *dev = ti->private; 1402 struct dm_dev *dev = ti->private;
1389 struct dm_snapshot *snap; 1403 struct dm_snapshot *snap;
1390 struct origin *o; 1404 struct origin *o;
1391 chunk_t chunk_size = 0; 1405 unsigned chunk_size = 0;
1392 1406
1393 down_read(&_origins_lock); 1407 down_read(&_origins_lock);
1394 o = __lookup_origin(dev->bdev); 1408 o = __lookup_origin(dev->bdev);
@@ -1465,7 +1479,7 @@ static int __init dm_snapshot_init(void)
1465 r = dm_register_target(&snapshot_target); 1479 r = dm_register_target(&snapshot_target);
1466 if (r) { 1480 if (r) {
1467 DMERR("snapshot target register failed %d", r); 1481 DMERR("snapshot target register failed %d", r);
1468 return r; 1482 goto bad_register_snapshot_target;
1469 } 1483 }
1470 1484
1471 r = dm_register_target(&origin_target); 1485 r = dm_register_target(&origin_target);
@@ -1522,6 +1536,9 @@ bad2:
1522 dm_unregister_target(&origin_target); 1536 dm_unregister_target(&origin_target);
1523bad1: 1537bad1:
1524 dm_unregister_target(&snapshot_target); 1538 dm_unregister_target(&snapshot_target);
1539
1540bad_register_snapshot_target:
1541 dm_exception_store_exit();
1525 return r; 1542 return r;
1526} 1543}
1527 1544
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 376f1ab48a24..724efc63904d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -47,6 +47,7 @@ struct dm_io {
47 atomic_t io_count; 47 atomic_t io_count;
48 struct bio *bio; 48 struct bio *bio;
49 unsigned long start_time; 49 unsigned long start_time;
50 spinlock_t endio_lock;
50}; 51};
51 52
52/* 53/*
@@ -578,8 +579,12 @@ static void dec_pending(struct dm_io *io, int error)
578 struct mapped_device *md = io->md; 579 struct mapped_device *md = io->md;
579 580
580 /* Push-back supersedes any I/O errors */ 581 /* Push-back supersedes any I/O errors */
581 if (error && !(io->error > 0 && __noflush_suspending(md))) 582 if (unlikely(error)) {
582 io->error = error; 583 spin_lock_irqsave(&io->endio_lock, flags);
584 if (!(io->error > 0 && __noflush_suspending(md)))
585 io->error = error;
586 spin_unlock_irqrestore(&io->endio_lock, flags);
587 }
583 588
584 if (atomic_dec_and_test(&io->io_count)) { 589 if (atomic_dec_and_test(&io->io_count)) {
585 if (io->error == DM_ENDIO_REQUEUE) { 590 if (io->error == DM_ENDIO_REQUEUE) {
@@ -1226,6 +1231,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1226 atomic_set(&ci.io->io_count, 1); 1231 atomic_set(&ci.io->io_count, 1);
1227 ci.io->bio = bio; 1232 ci.io->bio = bio;
1228 ci.io->md = md; 1233 ci.io->md = md;
1234 spin_lock_init(&ci.io->endio_lock);
1229 ci.sector = bio->bi_sector; 1235 ci.sector = bio->bi_sector;
1230 ci.sector_count = bio_sectors(bio); 1236 ci.sector_count = bio_sectors(bio);
1231 if (unlikely(bio_empty_barrier(bio))) 1237 if (unlikely(bio_empty_barrier(bio)))
@@ -1822,6 +1828,7 @@ static struct mapped_device *alloc_dev(int minor)
1822bad_bdev: 1828bad_bdev:
1823 destroy_workqueue(md->wq); 1829 destroy_workqueue(md->wq);
1824bad_thread: 1830bad_thread:
1831 del_gendisk(md->disk);
1825 put_disk(md->disk); 1832 put_disk(md->disk);
1826bad_disk: 1833bad_disk:
1827 blk_cleanup_queue(md->queue); 1834 blk_cleanup_queue(md->queue);
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index e556d42cc45a..63924e0c7ea9 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -72,7 +72,6 @@
72#include <asm/irq.h> 72#include <asm/irq.h>
73#include <asm/gpio.h> 73#include <asm/gpio.h>
74 74
75#include <asm/mach/mmc.h>
76#include <mach/board.h> 75#include <mach/board.h>
77#include <mach/cpu.h> 76#include <mach/cpu.h>
78#include <mach/at91_mci.h> 77#include <mach/at91_mci.h>
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 89876ade5e33..28a0eda92680 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -243,15 +243,26 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
243 243
244int be_cmd_POST(struct be_adapter *adapter) 244int be_cmd_POST(struct be_adapter *adapter)
245{ 245{
246 u16 stage, error; 246 u16 stage;
247 int status, timeout = 0;
247 248
248 error = be_POST_stage_get(adapter, &stage); 249 do {
249 if (error || stage != POST_STAGE_ARMFW_RDY) { 250 status = be_POST_stage_get(adapter, &stage);
250 dev_err(&adapter->pdev->dev, "POST failed.\n"); 251 if (status) {
251 return -1; 252 dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
252 } 253 stage);
254 return -1;
255 } else if (stage != POST_STAGE_ARMFW_RDY) {
256 set_current_state(TASK_INTERRUPTIBLE);
257 schedule_timeout(2 * HZ);
258 timeout += 2;
259 } else {
260 return 0;
261 }
262 } while (timeout < 20);
253 263
254 return 0; 264 dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
265 return -1;
255} 266}
256 267
257static inline void *embedded_payload(struct be_mcc_wrb *wrb) 268static inline void *embedded_payload(struct be_mcc_wrb *wrb)
@@ -729,8 +740,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
729/* Create an rx filtering policy configuration on an i/f 740/* Create an rx filtering policy configuration on an i/f
730 * Uses mbox 741 * Uses mbox
731 */ 742 */
732int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, 743int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
733 bool pmac_invalid, u32 *if_handle, u32 *pmac_id) 744 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
734{ 745{
735 struct be_mcc_wrb *wrb; 746 struct be_mcc_wrb *wrb;
736 struct be_cmd_req_if_create *req; 747 struct be_cmd_req_if_create *req;
@@ -746,8 +757,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
746 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 757 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
747 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); 758 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
748 759
749 req->capability_flags = cpu_to_le32(flags); 760 req->capability_flags = cpu_to_le32(cap_flags);
750 req->enable_flags = cpu_to_le32(flags); 761 req->enable_flags = cpu_to_le32(en_flags);
751 req->pmac_invalid = pmac_invalid; 762 req->pmac_invalid = pmac_invalid;
752 if (!pmac_invalid) 763 if (!pmac_invalid)
753 memcpy(req->mac_addr, mac, ETH_ALEN); 764 memcpy(req->mac_addr, mac, ETH_ALEN);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index a86f917f85f4..49953787e41c 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -720,8 +720,9 @@ extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
720extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 720extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
721 u32 if_id, u32 *pmac_id); 721 u32 if_id, u32 *pmac_id);
722extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 722extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
723extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac, 723extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
724 bool pmac_invalid, u32 *if_handle, u32 *pmac_id); 724 u32 en_flags, u8 *mac, bool pmac_invalid,
725 u32 *if_handle, u32 *pmac_id);
725extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 726extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
726extern int be_cmd_eq_create(struct be_adapter *adapter, 727extern int be_cmd_eq_create(struct be_adapter *adapter,
727 struct be_queue_info *eq, int eq_delay); 728 struct be_queue_info *eq, int eq_delay);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 6d5e81f7046f..1f941f027718 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1620,19 +1620,22 @@ static int be_open(struct net_device *netdev)
1620static int be_setup(struct be_adapter *adapter) 1620static int be_setup(struct be_adapter *adapter)
1621{ 1621{
1622 struct net_device *netdev = adapter->netdev; 1622 struct net_device *netdev = adapter->netdev;
1623 u32 if_flags; 1623 u32 cap_flags, en_flags;
1624 int status; 1624 int status;
1625 1625
1626 if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | 1626 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1627 BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | 1627 BE_IF_FLAGS_MCAST_PROMISCUOUS |
1628 BE_IF_FLAGS_PASS_L3L4_ERRORS; 1628 BE_IF_FLAGS_PROMISCUOUS |
1629 status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr, 1629 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1630 false/* pmac_invalid */, &adapter->if_handle, 1630 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1631 &adapter->pmac_id); 1631 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1632
1633 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1634 netdev->dev_addr, false/* pmac_invalid */,
1635 &adapter->if_handle, &adapter->pmac_id);
1632 if (status != 0) 1636 if (status != 0)
1633 goto do_none; 1637 goto do_none;
1634 1638
1635
1636 status = be_tx_queues_create(adapter); 1639 status = be_tx_queues_create(adapter);
1637 if (status != 0) 1640 if (status != 0)
1638 goto if_destroy; 1641 goto if_destroy;
@@ -2055,6 +2058,10 @@ static int be_hw_up(struct be_adapter *adapter)
2055 if (status) 2058 if (status)
2056 return status; 2059 return status;
2057 2060
2061 status = be_cmd_reset_function(adapter);
2062 if (status)
2063 return status;
2064
2058 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); 2065 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2059 if (status) 2066 if (status)
2060 return status; 2067 return status;
@@ -2108,10 +2115,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
2108 if (status) 2115 if (status)
2109 goto free_netdev; 2116 goto free_netdev;
2110 2117
2111 status = be_cmd_reset_function(adapter);
2112 if (status)
2113 goto ctrl_clean;
2114
2115 status = be_stats_init(adapter); 2118 status = be_stats_init(adapter);
2116 if (status) 2119 if (status)
2117 goto ctrl_clean; 2120 goto ctrl_clean;
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 9c950bb5e90c..f7d9ac8324cb 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -223,24 +223,25 @@ struct ethoc_bd {
223 u32 addr; 223 u32 addr;
224}; 224};
225 225
226static u32 ethoc_read(struct ethoc *dev, loff_t offset) 226static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
227{ 227{
228 return ioread32(dev->iobase + offset); 228 return ioread32(dev->iobase + offset);
229} 229}
230 230
231static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) 231static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
232{ 232{
233 iowrite32(data, dev->iobase + offset); 233 iowrite32(data, dev->iobase + offset);
234} 234}
235 235
236static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) 236static inline void ethoc_read_bd(struct ethoc *dev, int index,
237 struct ethoc_bd *bd)
237{ 238{
238 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 239 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
239 bd->stat = ethoc_read(dev, offset + 0); 240 bd->stat = ethoc_read(dev, offset + 0);
240 bd->addr = ethoc_read(dev, offset + 4); 241 bd->addr = ethoc_read(dev, offset + 4);
241} 242}
242 243
243static void ethoc_write_bd(struct ethoc *dev, int index, 244static inline void ethoc_write_bd(struct ethoc *dev, int index,
244 const struct ethoc_bd *bd) 245 const struct ethoc_bd *bd)
245{ 246{
246 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 247 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
@@ -248,33 +249,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index,
248 ethoc_write(dev, offset + 4, bd->addr); 249 ethoc_write(dev, offset + 4, bd->addr);
249} 250}
250 251
251static void ethoc_enable_irq(struct ethoc *dev, u32 mask) 252static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
252{ 253{
253 u32 imask = ethoc_read(dev, INT_MASK); 254 u32 imask = ethoc_read(dev, INT_MASK);
254 imask |= mask; 255 imask |= mask;
255 ethoc_write(dev, INT_MASK, imask); 256 ethoc_write(dev, INT_MASK, imask);
256} 257}
257 258
258static void ethoc_disable_irq(struct ethoc *dev, u32 mask) 259static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
259{ 260{
260 u32 imask = ethoc_read(dev, INT_MASK); 261 u32 imask = ethoc_read(dev, INT_MASK);
261 imask &= ~mask; 262 imask &= ~mask;
262 ethoc_write(dev, INT_MASK, imask); 263 ethoc_write(dev, INT_MASK, imask);
263} 264}
264 265
265static void ethoc_ack_irq(struct ethoc *dev, u32 mask) 266static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
266{ 267{
267 ethoc_write(dev, INT_SOURCE, mask); 268 ethoc_write(dev, INT_SOURCE, mask);
268} 269}
269 270
270static void ethoc_enable_rx_and_tx(struct ethoc *dev) 271static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
271{ 272{
272 u32 mode = ethoc_read(dev, MODER); 273 u32 mode = ethoc_read(dev, MODER);
273 mode |= MODER_RXEN | MODER_TXEN; 274 mode |= MODER_RXEN | MODER_TXEN;
274 ethoc_write(dev, MODER, mode); 275 ethoc_write(dev, MODER, mode);
275} 276}
276 277
277static void ethoc_disable_rx_and_tx(struct ethoc *dev) 278static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
278{ 279{
279 u32 mode = ethoc_read(dev, MODER); 280 u32 mode = ethoc_read(dev, MODER);
280 mode &= ~(MODER_RXEN | MODER_TXEN); 281 mode &= ~(MODER_RXEN | MODER_TXEN);
@@ -508,7 +509,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
508 return IRQ_NONE; 509 return IRQ_NONE;
509 } 510 }
510 511
511 ethoc_ack_irq(priv, INT_MASK_ALL); 512 ethoc_ack_irq(priv, pending);
512 513
513 if (pending & INT_MASK_BUSY) { 514 if (pending & INT_MASK_BUSY) {
514 dev_err(&dev->dev, "packet dropped\n"); 515 dev_err(&dev->dev, "packet dropped\n");
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 29234380e6c6..16a1d58419d9 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1654,7 +1654,7 @@ static const struct net_device_ops fec_netdev_ops = {
1654 * 1654 *
1655 * index is only used in legacy code 1655 * index is only used in legacy code
1656 */ 1656 */
1657int __init fec_enet_init(struct net_device *dev, int index) 1657static int fec_enet_init(struct net_device *dev, int index)
1658{ 1658{
1659 struct fec_enet_private *fep = netdev_priv(dev); 1659 struct fec_enet_private *fep = netdev_priv(dev);
1660 struct bufdesc *cbd_base; 1660 struct bufdesc *cbd_base;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 237835864357..a23f739d222f 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -171,6 +171,36 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
171} 171}
172 172
173/** 173/**
174 * ks8851_wrreg8 - write 8bit register value to chip
175 * @ks: The chip state
176 * @reg: The register address
177 * @val: The value to write
178 *
179 * Issue a write to put the value @val into the register specified in @reg.
180 */
181static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
182{
183 struct spi_transfer *xfer = &ks->spi_xfer1;
184 struct spi_message *msg = &ks->spi_msg1;
185 __le16 txb[2];
186 int ret;
187 int bit;
188
189 bit = 1 << (reg & 3);
190
191 txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
192 txb[1] = val;
193
194 xfer->tx_buf = txb;
195 xfer->rx_buf = NULL;
196 xfer->len = 3;
197
198 ret = spi_sync(ks->spidev, msg);
199 if (ret < 0)
200 ks_err(ks, "spi_sync() failed\n");
201}
202
203/**
174 * ks8851_rx_1msg - select whether to use one or two messages for spi read 204 * ks8851_rx_1msg - select whether to use one or two messages for spi read
175 * @ks: The device structure 205 * @ks: The device structure
176 * 206 *
@@ -322,13 +352,12 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
322static int ks8851_write_mac_addr(struct net_device *dev) 352static int ks8851_write_mac_addr(struct net_device *dev)
323{ 353{
324 struct ks8851_net *ks = netdev_priv(dev); 354 struct ks8851_net *ks = netdev_priv(dev);
325 u16 *mcp = (u16 *)dev->dev_addr; 355 int i;
326 356
327 mutex_lock(&ks->lock); 357 mutex_lock(&ks->lock);
328 358
329 ks8851_wrreg16(ks, KS_MARL, mcp[0]); 359 for (i = 0; i < ETH_ALEN; i++)
330 ks8851_wrreg16(ks, KS_MARM, mcp[1]); 360 ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
331 ks8851_wrreg16(ks, KS_MARH, mcp[2]);
332 361
333 mutex_unlock(&ks->lock); 362 mutex_unlock(&ks->lock);
334 363
@@ -951,7 +980,7 @@ static void ks8851_set_rx_mode(struct net_device *dev)
951 mcptr = mcptr->next; 980 mcptr = mcptr->next;
952 } 981 }
953 982
954 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA; 983 rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
955 } else { 984 } else {
956 /* just accept broadcast / unicast */ 985 /* just accept broadcast / unicast */
957 rxctrl.rxcr1 = RXCR1_RXPAFMA; 986 rxctrl.rxcr1 = RXCR1_RXPAFMA;
@@ -1239,6 +1268,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1239 ndev->netdev_ops = &ks8851_netdev_ops; 1268 ndev->netdev_ops = &ks8851_netdev_ops;
1240 ndev->irq = spi->irq; 1269 ndev->irq = spi->irq;
1241 1270
1271 /* issue a global soft reset to reset the device. */
1272 ks8851_soft_reset(ks, GRR_GSR);
1273
1242 /* simple check for a valid chip being connected to the bus */ 1274 /* simple check for a valid chip being connected to the bus */
1243 1275
1244 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1276 if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index 85abe147afbf..f52c312cc356 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -16,6 +16,7 @@
16#define CCR_32PIN (1 << 0) 16#define CCR_32PIN (1 << 0)
17 17
18/* MAC address registers */ 18/* MAC address registers */
19#define KS_MAR(_m) 0x15 - (_m)
19#define KS_MARL 0x10 20#define KS_MARL 0x10
20#define KS_MARM 0x12 21#define KS_MARM 0x12
21#define KS_MARH 0x14 22#define KS_MARH 0x14
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index f9364d0678f2..d6c7ac68f6ea 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3545,7 +3545,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3545 rp->rcr_index = index; 3545 rp->rcr_index = index;
3546 3546
3547 skb_reserve(skb, NET_IP_ALIGN); 3547 skb_reserve(skb, NET_IP_ALIGN);
3548 __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); 3548 __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
3549 3549
3550 rp->rx_packets++; 3550 rp->rx_packets++;
3551 rp->rx_bytes += skb->len; 3551 rp->rx_bytes += skb->len;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 50ac94ce9c16..3709d6af9abf 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -453,7 +453,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
453 vi->dev->stats.tx_bytes += skb->len; 453 vi->dev->stats.tx_bytes += skb->len;
454 vi->dev->stats.tx_packets++; 454 vi->dev->stats.tx_packets++;
455 tot_sgs += skb_vnet_hdr(skb)->num_sg; 455 tot_sgs += skb_vnet_hdr(skb)->num_sg;
456 kfree_skb(skb); 456 dev_kfree_skb_any(skb);
457 } 457 }
458 return tot_sgs; 458 return tot_sgs;
459} 459}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 6a16f76f277e..004353a46af0 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -481,7 +481,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
481 } 481 }
482 rq->uncommitted[ring_idx] += num_allocated; 482 rq->uncommitted[ring_idx] += num_allocated;
483 483
484 dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp " 484 dev_dbg(&adapter->netdev->dev,
485 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
485 "%u, uncommited %u\n", num_allocated, ring->next2fill, 486 "%u, uncommited %u\n", num_allocated, ring->next2fill,
486 ring->next2comp, rq->uncommitted[ring_idx]); 487 ring->next2comp, rq->uncommitted[ring_idx]);
487 488
@@ -539,7 +540,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
539 tbi = tq->buf_info + tq->tx_ring.next2fill; 540 tbi = tq->buf_info + tq->tx_ring.next2fill;
540 tbi->map_type = VMXNET3_MAP_NONE; 541 tbi->map_type = VMXNET3_MAP_NONE;
541 542
542 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", 543 dev_dbg(&adapter->netdev->dev,
544 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
543 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, 545 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
544 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 546 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
545 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 547 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -572,7 +574,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
572 gdesc->dword[2] = dw2 | buf_size; 574 gdesc->dword[2] = dw2 | buf_size;
573 gdesc->dword[3] = 0; 575 gdesc->dword[3] = 0;
574 576
575 dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", 577 dev_dbg(&adapter->netdev->dev,
578 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
576 tq->tx_ring.next2fill, gdesc->txd.addr, 579 tq->tx_ring.next2fill, gdesc->txd.addr,
577 gdesc->dword[2], gdesc->dword[3]); 580 gdesc->dword[2], gdesc->dword[3]);
578 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 581 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -600,7 +603,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
600 gdesc->dword[2] = dw2 | frag->size; 603 gdesc->dword[2] = dw2 | frag->size;
601 gdesc->dword[3] = 0; 604 gdesc->dword[3] = 0;
602 605
603 dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n", 606 dev_dbg(&adapter->netdev->dev,
607 "txd[%u]: 0x%llu %u %u\n",
604 tq->tx_ring.next2fill, gdesc->txd.addr, 608 tq->tx_ring.next2fill, gdesc->txd.addr,
605 gdesc->dword[2], gdesc->dword[3]); 609 gdesc->dword[2], gdesc->dword[3]);
606 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 610 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
@@ -697,7 +701,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
697 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 701 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
698 702
699 memcpy(tdd->data, skb->data, ctx->copy_size); 703 memcpy(tdd->data, skb->data, ctx->copy_size);
700 dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n", 704 dev_dbg(&adapter->netdev->dev,
705 "copy %u bytes to dataRing[%u]\n",
701 ctx->copy_size, tq->tx_ring.next2fill); 706 ctx->copy_size, tq->tx_ring.next2fill);
702 return 1; 707 return 1;
703 708
@@ -808,7 +813,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
808 813
809 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { 814 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
810 tq->stats.tx_ring_full++; 815 tq->stats.tx_ring_full++;
811 dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u" 816 dev_dbg(&adapter->netdev->dev,
817 "tx queue stopped on %s, next2comp %u"
812 " next2fill %u\n", adapter->netdev->name, 818 " next2fill %u\n", adapter->netdev->name,
813 tq->tx_ring.next2comp, tq->tx_ring.next2fill); 819 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
814 820
@@ -853,7 +859,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
853 859
854 /* finally flips the GEN bit of the SOP desc */ 860 /* finally flips the GEN bit of the SOP desc */
855 gdesc->dword[2] ^= VMXNET3_TXD_GEN; 861 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
856 dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 862 dev_dbg(&adapter->netdev->dev,
863 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
857 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 864 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
858 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], 865 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
859 gdesc->dword[3]); 866 gdesc->dword[3]);
@@ -990,7 +997,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
990 if (unlikely(rcd->len == 0)) { 997 if (unlikely(rcd->len == 0)) {
991 /* Pretend the rx buffer is skipped. */ 998 /* Pretend the rx buffer is skipped. */
992 BUG_ON(!(rcd->sop && rcd->eop)); 999 BUG_ON(!(rcd->sop && rcd->eop));
993 dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n", 1000 dev_dbg(&adapter->netdev->dev,
1001 "rxRing[%u][%u] 0 length\n",
994 ring_idx, idx); 1002 ring_idx, idx);
995 goto rcd_done; 1003 goto rcd_done;
996 } 1004 }
@@ -1683,7 +1691,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1683 int err; 1691 int err;
1684 u32 ret; 1692 u32 ret;
1685 1693
1686 dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" 1694 dev_dbg(&adapter->netdev->dev,
1695 "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
1687 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, 1696 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
1688 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, 1697 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
1689 adapter->rx_queue.rx_ring[0].size, 1698 adapter->rx_queue.rx_ring[0].size,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 6bb91576e999..3c0d70d58111 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -30,6 +30,7 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/ethtool.h> 31#include <linux/ethtool.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/device.h>
33#include <linux/netdevice.h> 34#include <linux/netdevice.h>
34#include <linux/pci.h> 35#include <linux/pci.h>
35#include <linux/ethtool.h> 36#include <linux/ethtool.h>
@@ -59,7 +60,6 @@
59#include <linux/if_vlan.h> 60#include <linux/if_vlan.h>
60#include <linux/if_arp.h> 61#include <linux/if_arp.h>
61#include <linux/inetdevice.h> 62#include <linux/inetdevice.h>
62#include <linux/dst.h>
63 63
64#include "vmxnet3_defs.h" 64#include "vmxnet3_defs.h"
65 65
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index a2c18acb8568..90be551b80c1 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1253,6 +1253,7 @@ static int nfs_parse_mount_options(char *raw,
1253 default: 1253 default:
1254 dfprintk(MOUNT, "NFS: unrecognized " 1254 dfprintk(MOUNT, "NFS: unrecognized "
1255 "transport protocol\n"); 1255 "transport protocol\n");
1256 kfree(string);
1256 return 0; 1257 return 0;
1257 } 1258 }
1258 break; 1259 break;
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 828a889be909..7e54e52964dd 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -91,6 +91,7 @@ static int dnotify_handle_event(struct fsnotify_group *group,
91 struct dnotify_struct *dn; 91 struct dnotify_struct *dn;
92 struct dnotify_struct **prev; 92 struct dnotify_struct **prev;
93 struct fown_struct *fown; 93 struct fown_struct *fown;
94 __u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD;
94 95
95 to_tell = event->to_tell; 96 to_tell = event->to_tell;
96 97
@@ -106,7 +107,7 @@ static int dnotify_handle_event(struct fsnotify_group *group,
106 spin_lock(&entry->lock); 107 spin_lock(&entry->lock);
107 prev = &dnentry->dn; 108 prev = &dnentry->dn;
108 while ((dn = *prev) != NULL) { 109 while ((dn = *prev) != NULL) {
109 if ((dn->dn_mask & event->mask) == 0) { 110 if ((dn->dn_mask & test_mask) == 0) {
110 prev = &dn->dn_next; 111 prev = &dn->dn_next;
111 continue; 112 continue;
112 } 113 }
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index c8a07c65482b..3165d85aada2 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -324,11 +324,11 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
324 spin_lock(&group->mark_lock); 324 spin_lock(&group->mark_lock);
325 spin_lock(&inode->i_lock); 325 spin_lock(&inode->i_lock);
326 326
327 entry->group = group;
328 entry->inode = inode;
329
330 lentry = fsnotify_find_mark_entry(group, inode); 327 lentry = fsnotify_find_mark_entry(group, inode);
331 if (!lentry) { 328 if (!lentry) {
329 entry->group = group;
330 entry->inode = inode;
331
332 hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries); 332 hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
333 list_add(&entry->g_list, &group->mark_entries); 333 list_add(&entry->g_list, &group->mark_entries);
334 334
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 3816d5750dd5..b8bf53b4c108 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -143,7 +143,7 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new
143 /* remember, after old was put on the wait_q we aren't 143 /* remember, after old was put on the wait_q we aren't
144 * allowed to look at the inode any more, only thing 144 * allowed to look at the inode any more, only thing
145 * left to check was if the file_name is the same */ 145 * left to check was if the file_name is the same */
146 if (old->name_len && 146 if (!old->name_len ||
147 !strcmp(old->file_name, new->file_name)) 147 !strcmp(old->file_name, new->file_name))
148 return true; 148 return true;
149 break; 149 break;
diff --git a/fs/pipe.c b/fs/pipe.c
index 52c415114838..ae17d026aaa3 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -777,36 +777,55 @@ pipe_rdwr_release(struct inode *inode, struct file *filp)
777static int 777static int
778pipe_read_open(struct inode *inode, struct file *filp) 778pipe_read_open(struct inode *inode, struct file *filp)
779{ 779{
780 /* We could have perhaps used atomic_t, but this and friends 780 int ret = -ENOENT;
781 below are the only places. So it doesn't seem worthwhile. */ 781
782 mutex_lock(&inode->i_mutex); 782 mutex_lock(&inode->i_mutex);
783 inode->i_pipe->readers++; 783
784 if (inode->i_pipe) {
785 ret = 0;
786 inode->i_pipe->readers++;
787 }
788
784 mutex_unlock(&inode->i_mutex); 789 mutex_unlock(&inode->i_mutex);
785 790
786 return 0; 791 return ret;
787} 792}
788 793
789static int 794static int
790pipe_write_open(struct inode *inode, struct file *filp) 795pipe_write_open(struct inode *inode, struct file *filp)
791{ 796{
797 int ret = -ENOENT;
798
792 mutex_lock(&inode->i_mutex); 799 mutex_lock(&inode->i_mutex);
793 inode->i_pipe->writers++; 800
801 if (inode->i_pipe) {
802 ret = 0;
803 inode->i_pipe->writers++;
804 }
805
794 mutex_unlock(&inode->i_mutex); 806 mutex_unlock(&inode->i_mutex);
795 807
796 return 0; 808 return ret;
797} 809}
798 810
799static int 811static int
800pipe_rdwr_open(struct inode *inode, struct file *filp) 812pipe_rdwr_open(struct inode *inode, struct file *filp)
801{ 813{
814 int ret = -ENOENT;
815
802 mutex_lock(&inode->i_mutex); 816 mutex_lock(&inode->i_mutex);
803 if (filp->f_mode & FMODE_READ) 817
804 inode->i_pipe->readers++; 818 if (inode->i_pipe) {
805 if (filp->f_mode & FMODE_WRITE) 819 ret = 0;
806 inode->i_pipe->writers++; 820 if (filp->f_mode & FMODE_READ)
821 inode->i_pipe->readers++;
822 if (filp->f_mode & FMODE_WRITE)
823 inode->i_pipe->writers++;
824 }
825
807 mutex_unlock(&inode->i_mutex); 826 mutex_unlock(&inode->i_mutex);
808 827
809 return 0; 828 return ret;
810} 829}
811 830
812/* 831/*
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index b63b80fac567..f93ad90a601b 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -130,11 +130,11 @@ struct inet_timewait_sock {
130 __u16 tw_num; 130 __u16 tw_num;
131 kmemcheck_bitfield_begin(flags); 131 kmemcheck_bitfield_begin(flags);
132 /* And these are ours. */ 132 /* And these are ours. */
133 __u8 tw_ipv6only:1, 133 unsigned int tw_ipv6only : 1,
134 tw_transparent:1; 134 tw_transparent : 1,
135 /* 14 bits hole, try to pack */ 135 tw_pad : 14, /* 14 bits hole */
136 tw_ipv6_offset : 16;
136 kmemcheck_bitfield_end(flags); 137 kmemcheck_bitfield_end(flags);
137 __u16 tw_ipv6_offset;
138 unsigned long tw_ttd; 138 unsigned long tw_ttd;
139 struct inet_bind_bucket *tw_tb; 139 struct inet_bind_bucket *tw_tb;
140 struct hlist_node tw_death_node; 140 struct hlist_node tw_death_node;
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 17d8bb1acf9c..25596e450ac7 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -19,7 +19,7 @@
19 * The time it takes is system-specific though, so when we test this 19 * The time it takes is system-specific though, so when we test this
20 * during system bootup we allow a LOT of time. 20 * during system bootup we allow a LOT of time.
21 */ 21 */
22#define TEST_SUSPEND_SECONDS 5 22#define TEST_SUSPEND_SECONDS 10
23 23
24static unsigned long suspend_test_start_time; 24static unsigned long suspend_test_start_time;
25 25
@@ -49,7 +49,8 @@ void suspend_test_finish(const char *label)
49 * has some performance issues. The stack dump of a WARN_ON 49 * has some performance issues. The stack dump of a WARN_ON
50 * is more likely to get the right attention than a printk... 50 * is more likely to get the right attention than a printk...
51 */ 51 */
52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); 52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000),
53 "Component: %s, time: %u\n", label, msec);
53} 54}
54 55
55/* 56/*
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 7f939ce29801..2bc6f6a8de68 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -92,6 +92,8 @@ static void add_conn(struct work_struct *work)
92 92
93 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 93 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
94 94
95 dev_set_drvdata(&conn->dev, conn);
96
95 if (device_add(&conn->dev) < 0) { 97 if (device_add(&conn->dev) < 0) {
96 BT_ERR("Failed to register connection device"); 98 BT_ERR("Failed to register connection device");
97 return; 99 return;
@@ -144,8 +146,6 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
144 conn->dev.class = bt_class; 146 conn->dev.class = bt_class;
145 conn->dev.parent = &hdev->dev; 147 conn->dev.parent = &hdev->dev;
146 148
147 dev_set_drvdata(&conn->dev, conn);
148
149 device_initialize(&conn->dev); 149 device_initialize(&conn->dev);
150 150
151 INIT_WORK(&conn->work_add, add_conn); 151 INIT_WORK(&conn->work_add, add_conn);
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 555d9da1869b..77e9fb130adb 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -555,12 +555,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
555 555
556 conn->feat_mask = 0; 556 conn->feat_mask = 0;
557 557
558 setup_timer(&conn->info_timer, l2cap_info_timeout,
559 (unsigned long) conn);
560
561 spin_lock_init(&conn->lock); 558 spin_lock_init(&conn->lock);
562 rwlock_init(&conn->chan_list.lock); 559 rwlock_init(&conn->chan_list.lock);
563 560
561 setup_timer(&conn->info_timer, l2cap_info_timeout,
562 (unsigned long) conn);
563
564 conn->disc_reason = 0x13; 564 conn->disc_reason = 0x13;
565 565
566 return conn; 566 return conn;
@@ -783,6 +783,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
783 /* Default config options */ 783 /* Default config options */
784 pi->conf_len = 0; 784 pi->conf_len = 0;
785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; 785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
786 skb_queue_head_init(TX_QUEUE(sk));
787 skb_queue_head_init(SREJ_QUEUE(sk));
788 INIT_LIST_HEAD(SREJ_LIST(sk));
786} 789}
787 790
788static struct proto l2cap_proto = { 791static struct proto l2cap_proto = {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 4351ca2cf0b8..537731b3bcb3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -446,6 +446,28 @@ extern int sysctl_tcp_synack_retries;
446 446
447EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); 447EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
448 448
449/* Decide when to expire the request and when to resend SYN-ACK */
450static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
451 const int max_retries,
452 const u8 rskq_defer_accept,
453 int *expire, int *resend)
454{
455 if (!rskq_defer_accept) {
456 *expire = req->retrans >= thresh;
457 *resend = 1;
458 return;
459 }
460 *expire = req->retrans >= thresh &&
461 (!inet_rsk(req)->acked || req->retrans >= max_retries);
462 /*
463 * Do not resend while waiting for data after ACK,
464 * start to resend on end of deferring period to give
465 * last chance for data or ACK to create established socket.
466 */
467 *resend = !inet_rsk(req)->acked ||
468 req->retrans >= rskq_defer_accept - 1;
469}
470
449void inet_csk_reqsk_queue_prune(struct sock *parent, 471void inet_csk_reqsk_queue_prune(struct sock *parent,
450 const unsigned long interval, 472 const unsigned long interval,
451 const unsigned long timeout, 473 const unsigned long timeout,
@@ -501,9 +523,15 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
501 reqp=&lopt->syn_table[i]; 523 reqp=&lopt->syn_table[i];
502 while ((req = *reqp) != NULL) { 524 while ((req = *reqp) != NULL) {
503 if (time_after_eq(now, req->expires)) { 525 if (time_after_eq(now, req->expires)) {
504 if ((req->retrans < thresh || 526 int expire = 0, resend = 0;
505 (inet_rsk(req)->acked && req->retrans < max_retries)) 527
506 && !req->rsk_ops->rtx_syn_ack(parent, req)) { 528 syn_ack_recalc(req, thresh, max_retries,
529 queue->rskq_defer_accept,
530 &expire, &resend);
531 if (!expire &&
532 (!resend ||
533 !req->rsk_ops->rtx_syn_ack(parent, req) ||
534 inet_rsk(req)->acked)) {
507 unsigned long timeo; 535 unsigned long timeo;
508 536
509 if (req->retrans++ == 0) 537 if (req->retrans++ == 0)
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 0c0b6e363a20..e982b5c1ee17 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -634,17 +634,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
634 break; 634 break;
635 } 635 }
636 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr); 636 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
637 if (dev) { 637 if (dev)
638 mreq.imr_ifindex = dev->ifindex; 638 mreq.imr_ifindex = dev->ifindex;
639 dev_put(dev);
640 }
641 } else 639 } else
642 dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex); 640 dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
643 641
644 642
645 err = -EADDRNOTAVAIL; 643 err = -EADDRNOTAVAIL;
646 if (!dev) 644 if (!dev)
647 break; 645 break;
646 dev_put(dev);
648 647
649 err = -EINVAL; 648 err = -EINVAL;
650 if (sk->sk_bound_dev_if && 649 if (sk->sk_bound_dev_if &&
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 64d0af675823..90b2e0649bfb 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -326,6 +326,43 @@ void tcp_enter_memory_pressure(struct sock *sk)
326 326
327EXPORT_SYMBOL(tcp_enter_memory_pressure); 327EXPORT_SYMBOL(tcp_enter_memory_pressure);
328 328
329/* Convert seconds to retransmits based on initial and max timeout */
330static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
331{
332 u8 res = 0;
333
334 if (seconds > 0) {
335 int period = timeout;
336
337 res = 1;
338 while (seconds > period && res < 255) {
339 res++;
340 timeout <<= 1;
341 if (timeout > rto_max)
342 timeout = rto_max;
343 period += timeout;
344 }
345 }
346 return res;
347}
348
349/* Convert retransmits to seconds based on initial and max timeout */
350static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
351{
352 int period = 0;
353
354 if (retrans > 0) {
355 period = timeout;
356 while (--retrans) {
357 timeout <<= 1;
358 if (timeout > rto_max)
359 timeout = rto_max;
360 period += timeout;
361 }
362 }
363 return period;
364}
365
329/* 366/*
330 * Wait for a TCP event. 367 * Wait for a TCP event.
331 * 368 *
@@ -1405,7 +1442,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1405 goto found_ok_skb; 1442 goto found_ok_skb;
1406 if (tcp_hdr(skb)->fin) 1443 if (tcp_hdr(skb)->fin)
1407 goto found_fin_ok; 1444 goto found_fin_ok;
1408 WARN_ON(!(flags & MSG_PEEK)); 1445 if (WARN_ON(!(flags & MSG_PEEK)))
1446 printk(KERN_INFO "recvmsg bug 2: copied %X "
1447 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1409 } 1448 }
1410 1449
1411 /* Well, if we have backlog, try to process it now yet. */ 1450 /* Well, if we have backlog, try to process it now yet. */
@@ -2163,16 +2202,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2163 break; 2202 break;
2164 2203
2165 case TCP_DEFER_ACCEPT: 2204 case TCP_DEFER_ACCEPT:
2166 icsk->icsk_accept_queue.rskq_defer_accept = 0; 2205 /* Translate value in seconds to number of retransmits */
2167 if (val > 0) { 2206 icsk->icsk_accept_queue.rskq_defer_accept =
2168 /* Translate value in seconds to number of 2207 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2169 * retransmits */ 2208 TCP_RTO_MAX / HZ);
2170 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
2171 val > ((TCP_TIMEOUT_INIT / HZ) <<
2172 icsk->icsk_accept_queue.rskq_defer_accept))
2173 icsk->icsk_accept_queue.rskq_defer_accept++;
2174 icsk->icsk_accept_queue.rskq_defer_accept++;
2175 }
2176 break; 2209 break;
2177 2210
2178 case TCP_WINDOW_CLAMP: 2211 case TCP_WINDOW_CLAMP:
@@ -2353,8 +2386,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2353 val = (val ? : sysctl_tcp_fin_timeout) / HZ; 2386 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2354 break; 2387 break;
2355 case TCP_DEFER_ACCEPT: 2388 case TCP_DEFER_ACCEPT:
2356 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : 2389 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2357 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1)); 2390 TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2358 break; 2391 break;
2359 case TCP_WINDOW_CLAMP: 2392 case TCP_WINDOW_CLAMP:
2360 val = tp->window_clamp; 2393 val = tp->window_clamp;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index e320afea07fc..4c03598ed924 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -641,10 +641,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
641 if (!(flg & TCP_FLAG_ACK)) 641 if (!(flg & TCP_FLAG_ACK))
642 return NULL; 642 return NULL;
643 643
644 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ 644 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
645 if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 645 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
646 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 646 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
647 inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--;
648 inet_rsk(req)->acked = 1; 647 inet_rsk(req)->acked = 1;
649 return NULL; 648 return NULL;
650 } 649 }
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 14f54eb5a7fc..4f7aaf6996a3 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -496,13 +496,17 @@ done:
496 goto e_inval; 496 goto e_inval;
497 497
498 if (val) { 498 if (val) {
499 struct net_device *dev;
500
499 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) 501 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
500 goto e_inval; 502 goto e_inval;
501 503
502 if (__dev_get_by_index(net, val) == NULL) { 504 dev = dev_get_by_index(net, val);
505 if (!dev) {
503 retv = -ENODEV; 506 retv = -ENODEV;
504 break; 507 break;
505 } 508 }
509 dev_put(dev);
506 } 510 }
507 np->mcast_oif = val; 511 np->mcast_oif = val;
508 retv = 0; 512 retv = 0;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 51ab497115eb..fc820cd75453 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1074,6 +1074,8 @@ restart:
1074 err = -ECONNREFUSED; 1074 err = -ECONNREFUSED;
1075 if (other->sk_state != TCP_LISTEN) 1075 if (other->sk_state != TCP_LISTEN)
1076 goto out_unlock; 1076 goto out_unlock;
1077 if (other->sk_shutdown & RCV_SHUTDOWN)
1078 goto out_unlock;
1077 1079
1078 if (unix_recvq_full(other)) { 1080 if (unix_recvq_full(other)) {
1079 err = -EAGAIN; 1081 err = -EAGAIN;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b7c78a403dc2..7495ce347344 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2717,8 +2717,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
2717 int r; 2717 int r;
2718 int cpu; 2718 int cpu;
2719 2719
2720 kvm_init_debug();
2721
2722 r = kvm_arch_init(opaque); 2720 r = kvm_arch_init(opaque);
2723 if (r) 2721 if (r)
2724 goto out_fail; 2722 goto out_fail;
@@ -2785,6 +2783,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
2785 kvm_preempt_ops.sched_in = kvm_sched_in; 2783 kvm_preempt_ops.sched_in = kvm_sched_in;
2786 kvm_preempt_ops.sched_out = kvm_sched_out; 2784 kvm_preempt_ops.sched_out = kvm_sched_out;
2787 2785
2786 kvm_init_debug();
2787
2788 return 0; 2788 return 0;
2789 2789
2790out_free: 2790out_free:
@@ -2807,7 +2807,6 @@ out_free_0:
2807out: 2807out:
2808 kvm_arch_exit(); 2808 kvm_arch_exit();
2809out_fail: 2809out_fail:
2810 kvm_exit_debug();
2811 return r; 2810 return r;
2812} 2811}
2813EXPORT_SYMBOL_GPL(kvm_init); 2812EXPORT_SYMBOL_GPL(kvm_init);
@@ -2815,6 +2814,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
2815void kvm_exit(void) 2814void kvm_exit(void)
2816{ 2815{
2817 tracepoint_synchronize_unregister(); 2816 tracepoint_synchronize_unregister();
2817 kvm_exit_debug();
2818 misc_deregister(&kvm_dev); 2818 misc_deregister(&kvm_dev);
2819 kmem_cache_destroy(kvm_vcpu_cache); 2819 kmem_cache_destroy(kvm_vcpu_cache);
2820 sysdev_unregister(&kvm_sysdev); 2820 sysdev_unregister(&kvm_sysdev);
@@ -2824,7 +2824,6 @@ void kvm_exit(void)
2824 on_each_cpu(hardware_disable, NULL, 1); 2824 on_each_cpu(hardware_disable, NULL, 1);
2825 kvm_arch_hardware_unsetup(); 2825 kvm_arch_hardware_unsetup();
2826 kvm_arch_exit(); 2826 kvm_arch_exit();
2827 kvm_exit_debug();
2828 free_cpumask_var(cpus_hardware_enabled); 2827 free_cpumask_var(cpus_hardware_enabled);
2829 __free_page(bad_page); 2828 __free_page(bad_page);
2830} 2829}