aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/plat-s3c64xx/s3c6400-clock.c10
-rw-r--r--arch/ia64/include/asm/ftrace.h1
-rw-r--r--arch/ia64/include/asm/kprobes.h5
-rw-r--r--arch/ia64/include/asm/tlb.h2
-rw-r--r--arch/ia64/include/asm/topology.h4
-rw-r--r--arch/ia64/include/asm/types.h5
-rw-r--r--arch/ia64/kernel/mca.c5
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/mm/tlb.c32
-rw-r--r--arch/x86/include/asm/uaccess_32.h5
-rw-r--r--arch/x86/include/asm/uaccess_64.h5
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/e820.c4
-rw-r--r--arch/x86/pci/intel_bus.c4
-rw-r--r--drivers/net/sky2.c1
-rw-r--r--drivers/pci/pci-sysfs.c6
-rw-r--r--drivers/pci/pci.c19
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c28
-rw-r--r--drivers/pci/pcie/portdrv_core.c16
-rw-r--r--drivers/pci/pcie/portdrv_pci.c17
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--fs/nfs/dir.c1
-rw-r--r--fs/reiserfs/inode.c19
-rw-r--r--fs/reiserfs/ioctl.c3
-rw-r--r--fs/reiserfs/xattr.c18
-rw-r--r--fs/reiserfs/xattr_acl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h8
-rw-r--r--include/linux/pci.h1
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c17
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c4
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c2
32 files changed, 172 insertions, 85 deletions
diff --git a/arch/arm/plat-s3c64xx/s3c6400-clock.c b/arch/arm/plat-s3c64xx/s3c6400-clock.c
index 6ffa21eb1b91..ffd56deb9e81 100644
--- a/arch/arm/plat-s3c64xx/s3c6400-clock.c
+++ b/arch/arm/plat-s3c64xx/s3c6400-clock.c
@@ -46,6 +46,7 @@ static struct clk clk_ext_xtal_mux = {
46#define clk_fin_epll clk_ext_xtal_mux 46#define clk_fin_epll clk_ext_xtal_mux
47 47
48#define clk_fout_mpll clk_mpll 48#define clk_fout_mpll clk_mpll
49#define clk_fout_epll clk_epll
49 50
50struct clk_sources { 51struct clk_sources {
51 unsigned int nr_sources; 52 unsigned int nr_sources;
@@ -88,11 +89,6 @@ static struct clksrc_clk clk_mout_apll = {
88 .sources = &clk_src_apll, 89 .sources = &clk_src_apll,
89}; 90};
90 91
91static struct clk clk_fout_epll = {
92 .name = "fout_epll",
93 .id = -1,
94};
95
96static struct clk *clk_src_epll_list[] = { 92static struct clk *clk_src_epll_list[] = {
97 [0] = &clk_fin_epll, 93 [0] = &clk_fin_epll,
98 [1] = &clk_fout_epll, 94 [1] = &clk_fout_epll,
@@ -715,7 +711,6 @@ static struct clk *clks[] __initdata = {
715 &clk_iis_cd1, 711 &clk_iis_cd1,
716 &clk_pcm_cd, 712 &clk_pcm_cd,
717 &clk_mout_epll.clk, 713 &clk_mout_epll.clk,
718 &clk_fout_epll,
719 &clk_mout_mpll.clk, 714 &clk_mout_mpll.clk,
720 &clk_dout_mpll, 715 &clk_dout_mpll,
721 &clk_mmc0.clk, 716 &clk_mmc0.clk,
@@ -760,7 +755,4 @@ void __init s3c6400_register_clocks(unsigned armclk_divlimit)
760 clkp->name, ret); 755 clkp->name, ret);
761 } 756 }
762 } 757 }
763
764 clk_mpll.parent = &clk_mout_mpll.clk;
765 clk_epll.parent = &clk_mout_epll.clk;
766} 758}
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
index d20db3c2a656..fbd1a2470cae 100644
--- a/arch/ia64/include/asm/ftrace.h
+++ b/arch/ia64/include/asm/ftrace.h
@@ -8,7 +8,6 @@
8extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0); 8extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
9#define mcount _mcount 9#define mcount _mcount
10 10
11#include <asm/kprobes.h>
12/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */ 11/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
13#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip) 12#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
14#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip) 13#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
index dbf83fb28db3..d5505d6f2382 100644
--- a/arch/ia64/include/asm/kprobes.h
+++ b/arch/ia64/include/asm/kprobes.h
@@ -103,11 +103,6 @@ typedef struct kprobe_opcode {
103 bundle_t bundle; 103 bundle_t bundle;
104} kprobe_opcode_t; 104} kprobe_opcode_t;
105 105
106struct fnptr {
107 unsigned long ip;
108 unsigned long gp;
109};
110
111/* Architecture specific copy of original instruction*/ 106/* Architecture specific copy of original instruction*/
112struct arch_specific_insn { 107struct arch_specific_insn {
113 /* copy of the instruction to be emulated */ 108 /* copy of the instruction to be emulated */
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 85d965cb19a0..23cce999eb1c 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -74,7 +74,7 @@ struct ia64_tr_entry {
74extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); 74extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
75extern void ia64_ptr_entry(u64 target_mask, int slot); 75extern void ia64_ptr_entry(u64 target_mask, int slot);
76 76
77extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; 77extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
78 78
79/* 79/*
80 region register macros 80 region register macros
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 3ddb4e709dba..d323071d0f91 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,9 @@
33/* 33/*
34 * Returns a bitmask of CPUs on Node 'node'. 34 * Returns a bitmask of CPUs on Node 'node'.
35 */ 35 */
36#define cpumask_of_node(node) (&node_to_cpu_mask[node]) 36#define cpumask_of_node(node) ((node) == -1 ? \
37 cpu_all_mask : \
38 &node_to_cpu_mask[node])
37 39
38/* 40/*
39 * Returns the number of the node containing Node 'nid'. 41 * Returns the number of the node containing Node 'nid'.
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
index bcd260e597de..93773fd37be0 100644
--- a/arch/ia64/include/asm/types.h
+++ b/arch/ia64/include/asm/types.h
@@ -35,6 +35,11 @@ typedef unsigned int umode_t;
35 */ 35 */
36# ifdef __KERNEL__ 36# ifdef __KERNEL__
37 37
38struct fnptr {
39 unsigned long ip;
40 unsigned long gp;
41};
42
38/* DMA addresses are 64-bits wide, in general. */ 43/* DMA addresses are 64-bits wide, in general. */
39typedef u64 dma_addr_t; 44typedef u64 dma_addr_t;
40 45
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 32f2639e9b0a..378b4833024f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1225,9 +1225,12 @@ static void mca_insert_tr(u64 iord)
1225 unsigned long psr; 1225 unsigned long psr;
1226 int cpu = smp_processor_id(); 1226 int cpu = smp_processor_id();
1227 1227
1228 if (!ia64_idtrs[cpu])
1229 return;
1230
1228 psr = ia64_clear_ic(); 1231 psr = ia64_clear_ic();
1229 for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { 1232 for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
1230 p = &__per_cpu_idtrs[cpu][iord-1][i]; 1233 p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
1231 if (p->pte & 0x1) { 1234 if (p->pte & 0x1) {
1232 old_rr = ia64_get_rr(p->ifa); 1235 old_rr = ia64_get_rr(p->ifa);
1233 if (old_rr != p->rr) { 1236 if (old_rr != p->rr) {
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 5246285a95fb..6bcbe215b9a4 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2293,7 +2293,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
2293 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) 2293 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2294 * return -ENOMEM; 2294 * return -ENOMEM;
2295 */ 2295 */
2296 if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) 2296 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2297 return -ENOMEM; 2297 return -ENOMEM;
2298 2298
2299 /* 2299 /*
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index b9609c69343a..7c0d4814a68d 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -91,7 +91,7 @@ dma_mark_clean(void *addr, size_t size)
91inline void 91inline void
92ia64_set_rbs_bot (void) 92ia64_set_rbs_bot (void)
93{ 93{
94 unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16; 94 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
95 95
96 if (stack_size > MAX_USER_STACK_SIZE) 96 if (stack_size > MAX_USER_STACK_SIZE)
97 stack_size = MAX_USER_STACK_SIZE; 97 stack_size = MAX_USER_STACK_SIZE;
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index ee09d261f2e6..f3de9d7a98b4 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -48,7 +48,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
48DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ 48DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
49DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ 49DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
50 50
51struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; 51struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
52 52
53/* 53/*
54 * Initializes the ia64_ctx.bitmap array based on max_ctx+1. 54 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -429,10 +429,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
429 struct ia64_tr_entry *p; 429 struct ia64_tr_entry *p;
430 int cpu = smp_processor_id(); 430 int cpu = smp_processor_id();
431 431
432 if (!ia64_idtrs[cpu]) {
433 ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
434 sizeof (struct ia64_tr_entry), GFP_KERNEL);
435 if (!ia64_idtrs[cpu])
436 return -ENOMEM;
437 }
432 r = -EINVAL; 438 r = -EINVAL;
433 /*Check overlap with existing TR entries*/ 439 /*Check overlap with existing TR entries*/
434 if (target_mask & 0x1) { 440 if (target_mask & 0x1) {
435 p = &__per_cpu_idtrs[cpu][0][0]; 441 p = ia64_idtrs[cpu];
436 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 442 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
437 i++, p++) { 443 i++, p++) {
438 if (p->pte & 0x1) 444 if (p->pte & 0x1)
@@ -444,7 +450,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
444 } 450 }
445 } 451 }
446 if (target_mask & 0x2) { 452 if (target_mask & 0x2) {
447 p = &__per_cpu_idtrs[cpu][1][0]; 453 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
448 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 454 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
449 i++, p++) { 455 i++, p++) {
450 if (p->pte & 0x1) 456 if (p->pte & 0x1)
@@ -459,16 +465,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
459 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { 465 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
460 switch (target_mask & 0x3) { 466 switch (target_mask & 0x3) {
461 case 1: 467 case 1:
462 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1)) 468 if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
463 goto found; 469 goto found;
464 continue; 470 continue;
465 case 2: 471 case 2:
466 if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 472 if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
467 goto found; 473 goto found;
468 continue; 474 continue;
469 case 3: 475 case 3:
470 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) && 476 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
471 !(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 477 !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
472 goto found; 478 goto found;
473 continue; 479 continue;
474 default: 480 default:
@@ -488,7 +494,7 @@ found:
488 if (target_mask & 0x1) { 494 if (target_mask & 0x1) {
489 ia64_itr(0x1, i, va, pte, log_size); 495 ia64_itr(0x1, i, va, pte, log_size);
490 ia64_srlz_i(); 496 ia64_srlz_i();
491 p = &__per_cpu_idtrs[cpu][0][i]; 497 p = ia64_idtrs[cpu] + i;
492 p->ifa = va; 498 p->ifa = va;
493 p->pte = pte; 499 p->pte = pte;
494 p->itir = log_size << 2; 500 p->itir = log_size << 2;
@@ -497,7 +503,7 @@ found:
497 if (target_mask & 0x2) { 503 if (target_mask & 0x2) {
498 ia64_itr(0x2, i, va, pte, log_size); 504 ia64_itr(0x2, i, va, pte, log_size);
499 ia64_srlz_i(); 505 ia64_srlz_i();
500 p = &__per_cpu_idtrs[cpu][1][i]; 506 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
501 p->ifa = va; 507 p->ifa = va;
502 p->pte = pte; 508 p->pte = pte;
503 p->itir = log_size << 2; 509 p->itir = log_size << 2;
@@ -528,7 +534,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
528 return; 534 return;
529 535
530 if (target_mask & 0x1) { 536 if (target_mask & 0x1) {
531 p = &__per_cpu_idtrs[cpu][0][slot]; 537 p = ia64_idtrs[cpu] + slot;
532 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 538 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
533 p->pte = 0; 539 p->pte = 0;
534 ia64_ptr(0x1, p->ifa, p->itir>>2); 540 ia64_ptr(0x1, p->ifa, p->itir>>2);
@@ -537,7 +543,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
537 } 543 }
538 544
539 if (target_mask & 0x2) { 545 if (target_mask & 0x2) {
540 p = &__per_cpu_idtrs[cpu][1][slot]; 546 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
541 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 547 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
542 p->pte = 0; 548 p->pte = 0;
543 ia64_ptr(0x2, p->ifa, p->itir>>2); 549 ia64_ptr(0x2, p->ifa, p->itir>>2);
@@ -546,8 +552,8 @@ void ia64_ptr_entry(u64 target_mask, int slot)
546 } 552 }
547 553
548 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { 554 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
549 if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) || 555 if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
550 (__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 556 ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
551 break; 557 break;
552 } 558 }
553 per_cpu(ia64_tr_used, cpu) = i; 559 per_cpu(ia64_tr_used, cpu) = i;
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 0c9825e97f36..088d09fb1615 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -205,14 +205,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
205 unsigned long n) 205 unsigned long n)
206{ 206{
207 int sz = __compiletime_object_size(to); 207 int sz = __compiletime_object_size(to);
208 int ret = -EFAULT;
209 208
210 if (likely(sz == -1 || sz >= n)) 209 if (likely(sz == -1 || sz >= n))
211 ret = _copy_from_user(to, from, n); 210 n = _copy_from_user(to, from, n);
212 else 211 else
213 copy_from_user_overflow(); 212 copy_from_user_overflow();
214 213
215 return ret; 214 return n;
216} 215}
217 216
218long __must_check strncpy_from_user(char *dst, const char __user *src, 217long __must_check strncpy_from_user(char *dst, const char __user *src,
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 46324c6a4f6e..535e421498f6 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -30,16 +30,15 @@ static inline unsigned long __must_check copy_from_user(void *to,
30 unsigned long n) 30 unsigned long n)
31{ 31{
32 int sz = __compiletime_object_size(to); 32 int sz = __compiletime_object_size(to);
33 int ret = -EFAULT;
34 33
35 might_fault(); 34 might_fault();
36 if (likely(sz == -1 || sz >= n)) 35 if (likely(sz == -1 || sz >= n))
37 ret = _copy_from_user(to, from, n); 36 n = _copy_from_user(to, from, n);
38#ifdef CONFIG_DEBUG_VM 37#ifdef CONFIG_DEBUG_VM
39 else 38 else
40 WARN(1, "Buffer overflow detected!\n"); 39 WARN(1, "Buffer overflow detected!\n");
41#endif 40#endif
42 return ret; 41 return n;
43} 42}
44 43
45static __always_inline __must_check 44static __always_inline __must_check
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index de00c4619a55..53243ca7816d 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2434,6 +2434,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
2434 cfg = irq_cfg(irq); 2434 cfg = irq_cfg(irq);
2435 raw_spin_lock(&desc->lock); 2435 raw_spin_lock(&desc->lock);
2436 2436
2437 /*
2438 * Check if the irq migration is in progress. If so, we
2439 * haven't received the cleanup request yet for this irq.
2440 */
2441 if (cfg->move_in_progress)
2442 goto unlock;
2443
2437 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2444 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2438 goto unlock; 2445 goto unlock;
2439 2446
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 05ed7ab2ca48..a1a7876cadcb 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -733,13 +733,13 @@ struct early_res {
733}; 733};
734static struct early_res early_res[MAX_EARLY_RES] __initdata = { 734static struct early_res early_res[MAX_EARLY_RES] __initdata = {
735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */ 735 { 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
736#ifdef CONFIG_X86_32 736#if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE)
737 /* 737 /*
738 * But first pinch a few for the stack/trampoline stuff 738 * But first pinch a few for the stack/trampoline stuff
739 * FIXME: Don't need the extra page at 4K, but need to fix 739 * FIXME: Don't need the extra page at 4K, but need to fix
740 * trampoline before removing it. (see the GDT stuff) 740 * trampoline before removing it. (see the GDT stuff)
741 */ 741 */
742 { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 }, 742 { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 },
743#endif 743#endif
744 744
745 {} 745 {}
diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c
index b7a55dc55d13..f81a2fa8fe25 100644
--- a/arch/x86/pci/intel_bus.c
+++ b/arch/x86/pci/intel_bus.c
@@ -49,6 +49,10 @@ static void __devinit pci_root_bus_res(struct pci_dev *dev)
49 u64 mmioh_base, mmioh_end; 49 u64 mmioh_base, mmioh_end;
50 int bus_base, bus_end; 50 int bus_base, bus_end;
51 51
52 /* some sys doesn't get mmconf enabled */
53 if (dev->cfg_size < 0x120)
54 return;
55
52 if (pci_root_num >= PCI_ROOT_NR) { 56 if (pci_root_num >= PCI_ROOT_NR) {
53 printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n"); 57 printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n");
54 return; 58 return;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 1c01b96c9611..2d28d58200d0 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -4684,6 +4684,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4684 INIT_WORK(&hw->restart_work, sky2_restart); 4684 INIT_WORK(&hw->restart_work, sky2_restart);
4685 4685
4686 pci_set_drvdata(pdev, hw); 4686 pci_set_drvdata(pdev, hw);
4687 pdev->d3_delay = 150;
4687 4688
4688 return 0; 4689 return 0;
4689 4690
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c5df94e86678..807224ec8351 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -75,7 +75,8 @@ static ssize_t local_cpus_show(struct device *dev,
75 int len; 75 int len;
76 76
77#ifdef CONFIG_NUMA 77#ifdef CONFIG_NUMA
78 mask = cpumask_of_node(dev_to_node(dev)); 78 mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
79 cpumask_of_node(dev_to_node(dev));
79#else 80#else
80 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 81 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
81#endif 82#endif
@@ -93,7 +94,8 @@ static ssize_t local_cpulist_show(struct device *dev,
93 int len; 94 int len;
94 95
95#ifdef CONFIG_NUMA 96#ifdef CONFIG_NUMA
96 mask = cpumask_of_node(dev_to_node(dev)); 97 mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
98 cpumask_of_node(dev_to_node(dev));
97#else 99#else
98 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 100 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
99#endif 101#endif
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0906599ebfde..315fea47e784 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -29,7 +29,17 @@ const char *pci_power_names[] = {
29}; 29};
30EXPORT_SYMBOL_GPL(pci_power_names); 30EXPORT_SYMBOL_GPL(pci_power_names);
31 31
32unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT; 32unsigned int pci_pm_d3_delay;
33
34static void pci_dev_d3_sleep(struct pci_dev *dev)
35{
36 unsigned int delay = dev->d3_delay;
37
38 if (delay < pci_pm_d3_delay)
39 delay = pci_pm_d3_delay;
40
41 msleep(delay);
42}
33 43
34#ifdef CONFIG_PCI_DOMAINS 44#ifdef CONFIG_PCI_DOMAINS
35int pci_domains_supported = 1; 45int pci_domains_supported = 1;
@@ -522,7 +532,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
522 /* Mandatory power management transition delays */ 532 /* Mandatory power management transition delays */
523 /* see PCI PM 1.1 5.6.1 table 18 */ 533 /* see PCI PM 1.1 5.6.1 table 18 */
524 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 534 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
525 msleep(pci_pm_d3_delay); 535 pci_dev_d3_sleep(dev);
526 else if (state == PCI_D2 || dev->current_state == PCI_D2) 536 else if (state == PCI_D2 || dev->current_state == PCI_D2)
527 udelay(PCI_PM_D2_DELAY); 537 udelay(PCI_PM_D2_DELAY);
528 538
@@ -1409,6 +1419,7 @@ void pci_pm_init(struct pci_dev *dev)
1409 } 1419 }
1410 1420
1411 dev->pm_cap = pm; 1421 dev->pm_cap = pm;
1422 dev->d3_delay = PCI_PM_D3_WAIT;
1412 1423
1413 dev->d1_support = false; 1424 dev->d1_support = false;
1414 dev->d2_support = false; 1425 dev->d2_support = false;
@@ -2247,12 +2258,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
2247 csr &= ~PCI_PM_CTRL_STATE_MASK; 2258 csr &= ~PCI_PM_CTRL_STATE_MASK;
2248 csr |= PCI_D3hot; 2259 csr |= PCI_D3hot;
2249 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2260 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2250 msleep(pci_pm_d3_delay); 2261 pci_dev_d3_sleep(dev);
2251 2262
2252 csr &= ~PCI_PM_CTRL_STATE_MASK; 2263 csr &= ~PCI_PM_CTRL_STATE_MASK;
2253 csr |= PCI_D0; 2264 csr |= PCI_D0;
2254 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2265 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2255 msleep(pci_pm_d3_delay); 2266 pci_dev_d3_sleep(dev);
2256 2267
2257 return 0; 2268 return 0;
2258} 2269}
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 797d47809f7a..8c30a9544d61 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -321,7 +321,7 @@ static int aer_inject(struct aer_error_inj *einj)
321 unsigned long flags; 321 unsigned long flags;
322 unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn); 322 unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
323 int pos_cap_err, rp_pos_cap_err; 323 int pos_cap_err, rp_pos_cap_err;
324 u32 sever; 324 u32 sever, mask;
325 int ret = 0; 325 int ret = 0;
326 326
327 dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); 327 dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
@@ -374,6 +374,24 @@ static int aer_inject(struct aer_error_inj *einj)
374 err->header_log2 = einj->header_log2; 374 err->header_log2 = einj->header_log2;
375 err->header_log3 = einj->header_log3; 375 err->header_log3 = einj->header_log3;
376 376
377 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &mask);
378 if (einj->cor_status && !(einj->cor_status & ~mask)) {
379 ret = -EINVAL;
380 printk(KERN_WARNING "The correctable error(s) is masked "
381 "by device\n");
382 spin_unlock_irqrestore(&inject_lock, flags);
383 goto out_put;
384 }
385
386 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, &mask);
387 if (einj->uncor_status && !(einj->uncor_status & ~mask)) {
388 ret = -EINVAL;
389 printk(KERN_WARNING "The uncorrectable error(s) is masked "
390 "by device\n");
391 spin_unlock_irqrestore(&inject_lock, flags);
392 goto out_put;
393 }
394
377 rperr = __find_aer_error_by_dev(rpdev); 395 rperr = __find_aer_error_by_dev(rpdev);
378 if (!rperr) { 396 if (!rperr) {
379 rperr = rperr_alloc; 397 rperr = rperr_alloc;
@@ -413,8 +431,14 @@ static int aer_inject(struct aer_error_inj *einj)
413 if (ret) 431 if (ret)
414 goto out_put; 432 goto out_put;
415 433
416 if (find_aer_device(rpdev, &edev)) 434 if (find_aer_device(rpdev, &edev)) {
435 if (!get_service_data(edev)) {
436 printk(KERN_WARNING "AER service is not initialized\n");
437 ret = -EINVAL;
438 goto out_put;
439 }
417 aer_irq(-1, edev); 440 aer_irq(-1, edev);
441 }
418 else 442 else
419 ret = -EINVAL; 443 ret = -EINVAL;
420out_put: 444out_put:
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 413262eb95b7..b174188ac121 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -27,7 +27,7 @@
27 */ 27 */
28static void release_pcie_device(struct device *dev) 28static void release_pcie_device(struct device *dev)
29{ 29{
30 kfree(to_pcie_device(dev)); 30 kfree(to_pcie_device(dev));
31} 31}
32 32
33/** 33/**
@@ -346,12 +346,11 @@ static int suspend_iter(struct device *dev, void *data)
346{ 346{
347 struct pcie_port_service_driver *service_driver; 347 struct pcie_port_service_driver *service_driver;
348 348
349 if ((dev->bus == &pcie_port_bus_type) && 349 if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
350 (dev->driver)) { 350 service_driver = to_service_driver(dev->driver);
351 service_driver = to_service_driver(dev->driver); 351 if (service_driver->suspend)
352 if (service_driver->suspend) 352 service_driver->suspend(to_pcie_device(dev));
353 service_driver->suspend(to_pcie_device(dev)); 353 }
354 }
355 return 0; 354 return 0;
356} 355}
357 356
@@ -494,6 +493,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)
494 493
495 return driver_register(&new->driver); 494 return driver_register(&new->driver);
496} 495}
496EXPORT_SYMBOL(pcie_port_service_register);
497 497
498/** 498/**
499 * pcie_port_service_unregister - unregister PCI Express port service driver 499 * pcie_port_service_unregister - unregister PCI Express port service driver
@@ -503,6 +503,4 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
503{ 503{
504 driver_unregister(&drv->driver); 504 driver_unregister(&drv->driver);
505} 505}
506
507EXPORT_SYMBOL(pcie_port_service_register);
508EXPORT_SYMBOL(pcie_port_service_unregister); 506EXPORT_SYMBOL(pcie_port_service_unregister);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 34d65172a4d7..13c8972886e6 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -63,7 +63,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
63 * pcie_portdrv_probe - Probe PCI-Express port devices 63 * pcie_portdrv_probe - Probe PCI-Express port devices
64 * @dev: PCI-Express port device being probed 64 * @dev: PCI-Express port device being probed
65 * 65 *
66 * If detected invokes the pcie_port_device_register() method for 66 * If detected invokes the pcie_port_device_register() method for
67 * this port device. 67 * this port device.
68 * 68 *
69 */ 69 */
@@ -78,7 +78,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
78 (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))) 78 (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
79 return -ENODEV; 79 return -ENODEV;
80 80
81 if (!dev->irq && dev->pin) { 81 if (!dev->irq && dev->pin) {
82 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " 82 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
83 "check vendor BIOS\n", dev->vendor, dev->device); 83 "check vendor BIOS\n", dev->vendor, dev->device);
84 } 84 }
@@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
91 return 0; 91 return 0;
92} 92}
93 93
94static void pcie_portdrv_remove (struct pci_dev *dev) 94static void pcie_portdrv_remove(struct pci_dev *dev)
95{ 95{
96 pcie_port_device_remove(dev); 96 pcie_port_device_remove(dev);
97 pci_disable_device(dev); 97 pci_disable_device(dev);
@@ -129,14 +129,13 @@ static int error_detected_iter(struct device *device, void *data)
129static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, 129static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
130 enum pci_channel_state error) 130 enum pci_channel_state error)
131{ 131{
132 struct aer_broadcast_data result_data = 132 struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER};
133 {error, PCI_ERS_RESULT_CAN_RECOVER}; 133 int ret;
134 int retval;
135 134
136 /* can not fail */ 135 /* can not fail */
137 retval = device_for_each_child(&dev->dev, &result_data, error_detected_iter); 136 ret = device_for_each_child(&dev->dev, &data, error_detected_iter);
138 137
139 return result_data.result; 138 return data.result;
140} 139}
141 140
142static int mmio_enabled_iter(struct device *device, void *data) 141static int mmio_enabled_iter(struct device *device, void *data)
@@ -290,7 +289,7 @@ static int __init pcie_portdrv_init(void)
290 return retval; 289 return retval;
291} 290}
292 291
293static void __exit pcie_portdrv_exit(void) 292static void __exit pcie_portdrv_exit(void)
294{ 293{
295 pci_unregister_driver(&pcie_portdriver); 294 pci_unregister_driver(&pcie_portdriver);
296 pcie_port_bus_unregister(); 295 pcie_port_bus_unregister();
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 5b648f0c6075..ad4c414dbfbc 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -393,8 +393,6 @@ static void hp_wmi_notify(u32 value, void *context)
393 } else 393 } else
394 printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", 394 printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
395 eventcode); 395 eventcode);
396
397 kfree(obj);
398} 396}
399 397
400static int __init hp_wmi_input_setup(void) 398static int __init hp_wmi_input_setup(void)
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 2c5ace4f00a7..3c7f03b669fb 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1615,6 +1615,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1615 goto out; 1615 goto out;
1616 1616
1617 new_dentry = dentry; 1617 new_dentry = dentry;
1618 rehash = NULL;
1618 new_inode = NULL; 1619 new_inode = NULL;
1619 } 1620 }
1620 } 1621 }
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 1150ebb2536f..9087b10209e6 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3062,13 +3062,14 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
3062int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) 3062int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3063{ 3063{
3064 struct inode *inode = dentry->d_inode; 3064 struct inode *inode = dentry->d_inode;
3065 int error;
3066 unsigned int ia_valid; 3065 unsigned int ia_valid;
3066 int depth;
3067 int error;
3067 3068
3068 /* must be turned off for recursive notify_change calls */ 3069 /* must be turned off for recursive notify_change calls */
3069 ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID); 3070 ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
3070 3071
3071 reiserfs_write_lock(inode->i_sb); 3072 depth = reiserfs_write_lock_once(inode->i_sb);
3072 if (attr->ia_valid & ATTR_SIZE) { 3073 if (attr->ia_valid & ATTR_SIZE) {
3073 /* version 2 items will be caught by the s_maxbytes check 3074 /* version 2 items will be caught by the s_maxbytes check
3074 ** done for us in vmtruncate 3075 ** done for us in vmtruncate
@@ -3149,8 +3150,17 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3149 journal_end(&th, inode->i_sb, jbegin_count); 3150 journal_end(&th, inode->i_sb, jbegin_count);
3150 } 3151 }
3151 } 3152 }
3152 if (!error) 3153 if (!error) {
3154 /*
3155 * Relax the lock here, as it might truncate the
3156 * inode pages and wait for inode pages locks.
3157 * To release such page lock, the owner needs the
3158 * reiserfs lock
3159 */
3160 reiserfs_write_unlock_once(inode->i_sb, depth);
3153 error = inode_setattr(inode, attr); 3161 error = inode_setattr(inode, attr);
3162 depth = reiserfs_write_lock_once(inode->i_sb);
3163 }
3154 } 3164 }
3155 3165
3156 if (!error && reiserfs_posixacl(inode->i_sb)) { 3166 if (!error && reiserfs_posixacl(inode->i_sb)) {
@@ -3159,7 +3169,8 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
3159 } 3169 }
3160 3170
3161 out: 3171 out:
3162 reiserfs_write_unlock(inode->i_sb); 3172 reiserfs_write_unlock_once(inode->i_sb, depth);
3173
3163 return error; 3174 return error;
3164} 3175}
3165 3176
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index ace77451ceb1..f53505de0712 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -104,9 +104,10 @@ setflags_out:
104 err = put_user(inode->i_generation, (int __user *)arg); 104 err = put_user(inode->i_generation, (int __user *)arg);
105 break; 105 break;
106 case REISERFS_IOC_SETVERSION: 106 case REISERFS_IOC_SETVERSION:
107 if (!is_owner_or_cap(inode)) 107 if (!is_owner_or_cap(inode)) {
108 err = -EPERM; 108 err = -EPERM;
109 break; 109 break;
110 }
110 err = mnt_want_write(filp->f_path.mnt); 111 err = mnt_want_write(filp->f_path.mnt);
111 if (err) 112 if (err)
112 break; 113 break;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index c3b004ee627b..81f09fab8ae4 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -452,7 +452,9 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name)
452 } 452 }
453 453
454 if (dentry->d_inode) { 454 if (dentry->d_inode) {
455 reiserfs_write_lock(inode->i_sb);
455 err = xattr_unlink(xadir->d_inode, dentry); 456 err = xattr_unlink(xadir->d_inode, dentry);
457 reiserfs_write_unlock(inode->i_sb);
456 update_ctime(inode); 458 update_ctime(inode);
457 } 459 }
458 460
@@ -486,17 +488,21 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
486 if (get_inode_sd_version(inode) == STAT_DATA_V1) 488 if (get_inode_sd_version(inode) == STAT_DATA_V1)
487 return -EOPNOTSUPP; 489 return -EOPNOTSUPP;
488 490
489 if (!buffer)
490 return lookup_and_delete_xattr(inode, name);
491
492 reiserfs_write_unlock(inode->i_sb); 491 reiserfs_write_unlock(inode->i_sb);
492
493 if (!buffer) {
494 err = lookup_and_delete_xattr(inode, name);
495 reiserfs_write_lock(inode->i_sb);
496 return err;
497 }
498
493 dentry = xattr_lookup(inode, name, flags); 499 dentry = xattr_lookup(inode, name, flags);
494 if (IS_ERR(dentry)) { 500 if (IS_ERR(dentry)) {
495 reiserfs_write_lock(inode->i_sb); 501 reiserfs_write_lock(inode->i_sb);
496 return PTR_ERR(dentry); 502 return PTR_ERR(dentry);
497 } 503 }
498 504
499 down_read(&REISERFS_I(inode)->i_xattr_sem); 505 down_write(&REISERFS_I(inode)->i_xattr_sem);
500 506
501 reiserfs_write_lock(inode->i_sb); 507 reiserfs_write_lock(inode->i_sb);
502 508
@@ -554,8 +560,12 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
554 .ia_size = buffer_size, 560 .ia_size = buffer_size,
555 .ia_valid = ATTR_SIZE | ATTR_CTIME, 561 .ia_valid = ATTR_SIZE | ATTR_CTIME,
556 }; 562 };
563
564 reiserfs_write_unlock(inode->i_sb);
557 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR); 565 mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
558 down_write(&dentry->d_inode->i_alloc_sem); 566 down_write(&dentry->d_inode->i_alloc_sem);
567 reiserfs_write_lock(inode->i_sb);
568
559 err = reiserfs_setattr(dentry, &newattrs); 569 err = reiserfs_setattr(dentry, &newattrs);
560 up_write(&dentry->d_inode->i_alloc_sem); 570 up_write(&dentry->d_inode->i_alloc_sem);
561 mutex_unlock(&dentry->d_inode->i_mutex); 571 mutex_unlock(&dentry->d_inode->i_mutex);
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index cc32e6ada67b..dd20a7883f0f 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -455,7 +455,9 @@ int reiserfs_acl_chmod(struct inode *inode)
455 return 0; 455 return 0;
456 } 456 }
457 457
458 reiserfs_write_unlock(inode->i_sb);
458 acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS); 459 acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
460 reiserfs_write_lock(inode->i_sb);
459 if (!acl) 461 if (!acl)
460 return 0; 462 return 0;
461 if (IS_ERR(acl)) 463 if (IS_ERR(acl))
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index c40834bdee58..d4ded59d2ffe 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -815,7 +815,7 @@ TRACE_EVENT(name, \
815 ), \ 815 ), \
816 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \ 816 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
817 "offset 0x%llx count %zd flags %s " \ 817 "offset 0x%llx count %zd flags %s " \
818 "startoff 0x%llx startblock 0x%llx blockcount 0x%llx", \ 818 "startoff 0x%llx startblock %s blockcount 0x%llx", \
819 MAJOR(__entry->dev), MINOR(__entry->dev), \ 819 MAJOR(__entry->dev), MINOR(__entry->dev), \
820 __entry->ino, \ 820 __entry->ino, \
821 __entry->size, \ 821 __entry->size, \
@@ -824,7 +824,7 @@ TRACE_EVENT(name, \
824 __entry->count, \ 824 __entry->count, \
825 __print_flags(__entry->flags, "|", BMAPI_FLAGS), \ 825 __print_flags(__entry->flags, "|", BMAPI_FLAGS), \
826 __entry->startoff, \ 826 __entry->startoff, \
827 __entry->startblock, \ 827 xfs_fmtfsblock(__entry->startblock), \
828 __entry->blockcount) \ 828 __entry->blockcount) \
829) 829)
830DEFINE_IOMAP_EVENT(xfs_iomap_enter); 830DEFINE_IOMAP_EVENT(xfs_iomap_enter);
@@ -1201,7 +1201,7 @@ TRACE_EVENT(name, \
1201 TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \ 1201 TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \
1202 "prod %u minleft %u total %u alignment %u minalignslop %u " \ 1202 "prod %u minleft %u total %u alignment %u minalignslop %u " \
1203 "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \ 1203 "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \
1204 "userdata %d firstblock 0x%llx", \ 1204 "userdata %d firstblock %s", \
1205 MAJOR(__entry->dev), MINOR(__entry->dev), \ 1205 MAJOR(__entry->dev), MINOR(__entry->dev), \
1206 __entry->agno, \ 1206 __entry->agno, \
1207 __entry->agbno, \ 1207 __entry->agbno, \
@@ -1220,7 +1220,7 @@ TRACE_EVENT(name, \
1220 __entry->wasfromfl, \ 1220 __entry->wasfromfl, \
1221 __entry->isfl, \ 1221 __entry->isfl, \
1222 __entry->userdata, \ 1222 __entry->userdata, \
1223 __entry->firstblock) \ 1223 xfs_fmtfsblock(__entry->firstblock)) \
1224) 1224)
1225 1225
1226DEFINE_ALLOC_EVENT(xfs_alloc_exact_done); 1226DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 5da0690d9cee..174e5392e51e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -243,6 +243,7 @@ struct pci_dev {
243 unsigned int d2_support:1; /* Low power state D2 is supported */ 243 unsigned int d2_support:1; /* Low power state D2 is supported */
244 unsigned int no_d1d2:1; /* Only allow D0 and D3 */ 244 unsigned int no_d1d2:1; /* Only allow D0 and D3 */
245 unsigned int wakeup_prepared:1; 245 unsigned int wakeup_prepared:1;
246 unsigned int d3_delay; /* D3->D0 transition time in ms */
246 247
247#ifdef CONFIG_PCIEASPM 248#ifdef CONFIG_PCIEASPM
248 struct pcie_link_state *link_state; /* ASPM link state. */ 249 struct pcie_link_state *link_state; /* ASPM link state. */
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3c3c50f38a1c..f7a7f8380e38 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -644,7 +644,22 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
644 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 644 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
645 if (IS_ERR(p)) { 645 if (IS_ERR(p)) {
646 err = PTR_ERR(p); 646 err = PTR_ERR(p);
647 gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES; 647 switch (err) {
648 case -EACCES:
649 gss_msg->msg.errno = err;
650 err = mlen;
651 break;
652 case -EFAULT:
653 case -ENOMEM:
654 case -EINVAL:
655 case -ENOSYS:
656 gss_msg->msg.errno = -EAGAIN;
657 break;
658 default:
659 printk(KERN_CRIT "%s: bad return from "
660 "gss_fill_context: %zd\n", __func__, err);
661 BUG();
662 }
648 goto err_release_msg; 663 goto err_release_msg;
649 } 664 }
650 gss_msg->ctx = gss_get_ctx(ctx); 665 gss_msg->ctx = gss_get_ctx(ctx);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index ef45eba22485..2deb0ed72ff4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -131,8 +131,10 @@ gss_import_sec_context_kerberos(const void *p,
131 struct krb5_ctx *ctx; 131 struct krb5_ctx *ctx;
132 int tmp; 132 int tmp;
133 133
134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) 134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
135 p = ERR_PTR(-ENOMEM);
135 goto out_err; 136 goto out_err;
137 }
136 138
137 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 139 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
138 if (IS_ERR(p)) 140 if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 6efbb0cd3c7c..76e4c6f4ac3c 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -252,7 +252,7 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
252 struct gss_ctx **ctx_id) 252 struct gss_ctx **ctx_id)
253{ 253{
254 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL))) 254 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
255 return GSS_S_FAILURE; 255 return -ENOMEM;
256 (*ctx_id)->mech_type = gss_mech_get(mech); 256 (*ctx_id)->mech_type = gss_mech_get(mech);
257 257
258 return mech->gm_ops 258 return mech->gm_ops