aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/acpi/method-customizing.txt5
-rw-r--r--Documentation/feature-removal-schedule.txt36
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/idle.h2
-rw-r--r--arch/x86/include/asm/processor.h4
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c1
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/process.c43
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--drivers/acpi/Kconfig15
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acconfig.h1
-rw-r--r--drivers/acpi/acpica/acevents.h17
-rw-r--r--drivers/acpi/acpica/acglobal.h13
-rw-r--r--drivers/acpi/acpica/amlcode.h15
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evglock.c335
-rw-r--r--drivers/acpi/acpica/evmisc.c303
-rw-r--r--drivers/acpi/acpica/evregion.c121
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c13
-rw-r--r--drivers/acpi/acpica/excreate.c3
-rw-r--r--drivers/acpi/acpica/nsrepair.c13
-rw-r--r--drivers/acpi/acpica/utdecode.c5
-rw-r--r--drivers/acpi/acpica/utmutex.c12
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/custom_method.c100
-rw-r--r--drivers/acpi/debugfs.c92
-rw-r--r--drivers/acpi/ec.c19
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/osl.c33
-rw-r--r--drivers/acpi/processor_core.c12
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/sysfs.c8
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/platform/x86/compal-laptop.c14
-rw-r--r--drivers/platform/x86/msi-laptop.c12
-rw-r--r--drivers/thermal/thermal_sys.c10
-rw-r--r--fs/cifs/cifsacl.c3
-rw-r--r--fs/nfs/delegation.c14
-rw-r--r--fs/nfs/inode.c8
-rw-r--r--fs/nfs/nfs4proc.c9
-rw-r--r--fs/nfs/nfs4state.c6
-rw-r--r--fs/nfs/nfsroot.c2
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/squashfs/export.c2
-rw-r--r--fs/squashfs/fragment.c2
-rw-r--r--fs/squashfs/id.c2
-rw-r--r--fs/squashfs/super.c6
-rw-r--r--fs/ubifs/shrinker.c3
-rw-r--r--fs/ubifs/ubifs.h2
-rw-r--r--include/acpi/acpiosxf.h3
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actypes.h25
-rw-r--r--include/acpi/processor.h7
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/pm_qos_params.h4
-rw-r--r--include/linux/sunrpc/msg_prot.h1
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--kernel/pm_qos_params.c37
-rw-r--r--mm/rmap.c7
-rw-r--r--net/sunrpc/clnt.c29
-rw-r--r--net/sunrpc/rpcb_clnt.c97
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/xprtsock.c435
70 files changed, 1390 insertions, 614 deletions
diff --git a/Documentation/acpi/method-customizing.txt b/Documentation/acpi/method-customizing.txt
index 3e1d25aee3fb..5f55373dd53b 100644
--- a/Documentation/acpi/method-customizing.txt
+++ b/Documentation/acpi/method-customizing.txt
@@ -66,3 +66,8 @@ Note: We can use a kernel with multiple custom ACPI method running,
66 But each individual write to debugfs can implement a SINGLE 66 But each individual write to debugfs can implement a SINGLE
67 method override. i.e. if we want to insert/override multiple 67 method override. i.e. if we want to insert/override multiple
68 ACPI methods, we need to redo step c) ~ g) for multiple times. 68 ACPI methods, we need to redo step c) ~ g) for multiple times.
69
70Note: Be aware that root can mis-use this driver to modify arbitrary
71 memory and gain additional rights, if root's privileges got
72 restricted (for example if root is not allowed to load additional
73 modules after boot).
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index ff31b1cc50aa..1a9446b59153 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,6 +6,42 @@ be removed from this file.
6 6
7--------------------------- 7---------------------------
8 8
9What: x86 floppy disable_hlt
10When: 2012
11Why: ancient workaround of dubious utility clutters the
12 code used by everybody else.
13Who: Len Brown <len.brown@intel.com>
14
15---------------------------
16
17What: CONFIG_APM_CPU_IDLE, and its ability to call APM BIOS in idle
18When: 2012
19Why: This optional sub-feature of APM is of dubious reliability,
20 and ancient APM laptops are likely better served by calling HLT.
21 Deleting CONFIG_APM_CPU_IDLE allows x86 to stop exporting
22 the pm_idle function pointer to modules.
23Who: Len Brown <len.brown@intel.com>
24
25----------------------------
26
27What: x86_32 "no-hlt" cmdline param
28When: 2012
29Why: remove a branch from idle path, simplify code used by everybody.
30 This option disabled the use of HLT in idle and machine_halt()
31 for hardware that was flakey 15-years ago. Today we have
32 "idle=poll" that removed HLT from idle, and so if such a machine
33 is still running the upstream kernel, "idle=poll" is likely sufficient.
34Who: Len Brown <len.brown@intel.com>
35
36----------------------------
37
38What: x86 "idle=mwait" cmdline param
39When: 2012
40Why: simplify x86 idle code
41Who: Len Brown <len.brown@intel.com>
42
43----------------------------
44
9What: PRISM54 45What: PRISM54
10When: 2.6.34 46When: 2.6.34
11 47
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 416d865eae39..610001d385dd 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -139,7 +139,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
139 boot_cpu_data.x86_model <= 0x05 && 139 boot_cpu_data.x86_model <= 0x05 &&
140 boot_cpu_data.x86_mask < 0x0A) 140 boot_cpu_data.x86_mask < 0x0A)
141 return 1; 141 return 1;
142 else if (c1e_detected) 142 else if (amd_e400_c1e_detected)
143 return 1; 143 return 1;
144 else 144 else
145 return max_cstate; 145 return max_cstate;
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index 38d87379e270..f49253d75710 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -16,6 +16,6 @@ static inline void enter_idle(void) { }
16static inline void exit_idle(void) { } 16static inline void exit_idle(void) { }
17#endif /* CONFIG_X86_64 */ 17#endif /* CONFIG_X86_64 */
18 18
19void c1e_remove_cpu(int cpu); 19void amd_e400_remove_cpu(int cpu);
20 20
21#endif /* _ASM_X86_IDLE_H */ 21#endif /* _ASM_X86_IDLE_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 4c25ab48257b..219371546afd 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -754,10 +754,10 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
754extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); 754extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
755 755
756extern void select_idle_routine(const struct cpuinfo_x86 *c); 756extern void select_idle_routine(const struct cpuinfo_x86 *c);
757extern void init_c1e_mask(void); 757extern void init_amd_e400_c1e_mask(void);
758 758
759extern unsigned long boot_option_idle_override; 759extern unsigned long boot_option_idle_override;
760extern bool c1e_detected; 760extern bool amd_e400_c1e_detected;
761 761
762enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 762enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
763 IDLE_POLL, IDLE_FORCE_MWAIT}; 763 IDLE_POLL, IDLE_FORCE_MWAIT};
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 3bfa02235965..965a7666c283 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -361,6 +361,7 @@ struct apm_user {
361 * idle percentage above which bios idle calls are done 361 * idle percentage above which bios idle calls are done
362 */ 362 */
363#ifdef CONFIG_APM_CPU_IDLE 363#ifdef CONFIG_APM_CPU_IDLE
364#warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012
364#define DEFAULT_IDLE_THRESHOLD 95 365#define DEFAULT_IDLE_THRESHOLD 95
365#else 366#else
366#define DEFAULT_IDLE_THRESHOLD 100 367#define DEFAULT_IDLE_THRESHOLD 100
@@ -904,6 +905,7 @@ static void apm_cpu_idle(void)
904 unsigned int jiffies_since_last_check = jiffies - last_jiffies; 905 unsigned int jiffies_since_last_check = jiffies - last_jiffies;
905 unsigned int bucket; 906 unsigned int bucket;
906 907
908 WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
907recalc: 909recalc:
908 if (jiffies_since_last_check > IDLE_CALC_LIMIT) { 910 if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
909 use_apm_idle = 0; 911 use_apm_idle = 0;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c39576cb3018..525514cf33c3 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -19,6 +19,7 @@
19 19
20static int __init no_halt(char *s) 20static int __init no_halt(char *s)
21{ 21{
22 WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
22 boot_cpu_data.hlt_works_ok = 0; 23 boot_cpu_data.hlt_works_ok = 0;
23 return 1; 24 return 1;
24} 25}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 53f02f5d8cce..22a073d7fbff 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -902,7 +902,7 @@ static void vgetcpu_set_mode(void)
902void __init identify_boot_cpu(void) 902void __init identify_boot_cpu(void)
903{ 903{
904 identify_cpu(&boot_cpu_data); 904 identify_cpu(&boot_cpu_data);
905 init_c1e_mask(); 905 init_amd_e400_c1e_mask();
906#ifdef CONFIG_X86_32 906#ifdef CONFIG_X86_32
907 sysenter_setup(); 907 sysenter_setup();
908 enable_sep_cpu(); 908 enable_sep_cpu();
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 88a90a977f8e..426a5b66f7e4 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -337,7 +337,9 @@ EXPORT_SYMBOL(boot_option_idle_override);
337 * Powermanagement idle function, if any.. 337 * Powermanagement idle function, if any..
338 */ 338 */
339void (*pm_idle)(void); 339void (*pm_idle)(void);
340#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE)
340EXPORT_SYMBOL(pm_idle); 341EXPORT_SYMBOL(pm_idle);
342#endif
341 343
342#ifdef CONFIG_X86_32 344#ifdef CONFIG_X86_32
343/* 345/*
@@ -397,7 +399,7 @@ void default_idle(void)
397 cpu_relax(); 399 cpu_relax();
398 } 400 }
399} 401}
400#ifdef CONFIG_APM_MODULE 402#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE)
401EXPORT_SYMBOL(default_idle); 403EXPORT_SYMBOL(default_idle);
402#endif 404#endif
403 405
@@ -535,45 +537,45 @@ int mwait_usable(const struct cpuinfo_x86 *c)
535 return (edx & MWAIT_EDX_C1); 537 return (edx & MWAIT_EDX_C1);
536} 538}
537 539
538bool c1e_detected; 540bool amd_e400_c1e_detected;
539EXPORT_SYMBOL(c1e_detected); 541EXPORT_SYMBOL(amd_e400_c1e_detected);
540 542
541static cpumask_var_t c1e_mask; 543static cpumask_var_t amd_e400_c1e_mask;
542 544
543void c1e_remove_cpu(int cpu) 545void amd_e400_remove_cpu(int cpu)
544{ 546{
545 if (c1e_mask != NULL) 547 if (amd_e400_c1e_mask != NULL)
546 cpumask_clear_cpu(cpu, c1e_mask); 548 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
547} 549}
548 550
549/* 551/*
550 * C1E aware idle routine. We check for C1E active in the interrupt 552 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
551 * pending message MSR. If we detect C1E, then we handle it the same 553 * pending message MSR. If we detect C1E, then we handle it the same
552 * way as C3 power states (local apic timer and TSC stop) 554 * way as C3 power states (local apic timer and TSC stop)
553 */ 555 */
554static void c1e_idle(void) 556static void amd_e400_idle(void)
555{ 557{
556 if (need_resched()) 558 if (need_resched())
557 return; 559 return;
558 560
559 if (!c1e_detected) { 561 if (!amd_e400_c1e_detected) {
560 u32 lo, hi; 562 u32 lo, hi;
561 563
562 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); 564 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
563 565
564 if (lo & K8_INTP_C1E_ACTIVE_MASK) { 566 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
565 c1e_detected = true; 567 amd_e400_c1e_detected = true;
566 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 568 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
567 mark_tsc_unstable("TSC halt in AMD C1E"); 569 mark_tsc_unstable("TSC halt in AMD C1E");
568 printk(KERN_INFO "System has AMD C1E enabled\n"); 570 printk(KERN_INFO "System has AMD C1E enabled\n");
569 } 571 }
570 } 572 }
571 573
572 if (c1e_detected) { 574 if (amd_e400_c1e_detected) {
573 int cpu = smp_processor_id(); 575 int cpu = smp_processor_id();
574 576
575 if (!cpumask_test_cpu(cpu, c1e_mask)) { 577 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
576 cpumask_set_cpu(cpu, c1e_mask); 578 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
577 /* 579 /*
578 * Force broadcast so ACPI can not interfere. 580 * Force broadcast so ACPI can not interfere.
579 */ 581 */
@@ -616,17 +618,17 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
616 pm_idle = mwait_idle; 618 pm_idle = mwait_idle;
617 } else if (cpu_has_amd_erratum(amd_erratum_400)) { 619 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
618 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 620 /* E400: APIC timer interrupt does not wake up CPU from C1e */
619 printk(KERN_INFO "using C1E aware idle routine\n"); 621 printk(KERN_INFO "using AMD E400 aware idle routine\n");
620 pm_idle = c1e_idle; 622 pm_idle = amd_e400_idle;
621 } else 623 } else
622 pm_idle = default_idle; 624 pm_idle = default_idle;
623} 625}
624 626
625void __init init_c1e_mask(void) 627void __init init_amd_e400_c1e_mask(void)
626{ 628{
627 /* If we're using c1e_idle, we need to allocate c1e_mask. */ 629 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
628 if (pm_idle == c1e_idle) 630 if (pm_idle == amd_e400_idle)
629 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); 631 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
630} 632}
631 633
632static int __init idle_setup(char *str) 634static int __init idle_setup(char *str)
@@ -640,6 +642,7 @@ static int __init idle_setup(char *str)
640 boot_option_idle_override = IDLE_POLL; 642 boot_option_idle_override = IDLE_POLL;
641 } else if (!strcmp(str, "mwait")) { 643 } else if (!strcmp(str, "mwait")) {
642 boot_option_idle_override = IDLE_FORCE_MWAIT; 644 boot_option_idle_override = IDLE_FORCE_MWAIT;
645 WARN_ONCE(1, "\idle=mwait\" will be removed in 2012\"\n");
643 } else if (!strcmp(str, "halt")) { 646 } else if (!strcmp(str, "halt")) {
644 /* 647 /*
645 * When the boot option of idle=halt is added, halt is 648 * When the boot option of idle=halt is added, halt is
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a3c430bdfb60..eefd96765e79 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1307,7 +1307,7 @@ void play_dead_common(void)
1307{ 1307{
1308 idle_task_exit(); 1308 idle_task_exit();
1309 reset_lazy_tlbstate(); 1309 reset_lazy_tlbstate();
1310 c1e_remove_cpu(raw_smp_processor_id()); 1310 amd_e400_remove_cpu(raw_smp_processor_id());
1311 1311
1312 mb(); 1312 mb();
1313 /* Ack it */ 1313 /* Ack it */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index bc2218db5ba9..de0e3df76776 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -369,6 +369,21 @@ config ACPI_HED
369 which is used to report some hardware errors notified via 369 which is used to report some hardware errors notified via
370 SCI, mainly the corrected errors. 370 SCI, mainly the corrected errors.
371 371
372config ACPI_CUSTOM_METHOD
373 tristate "Allow ACPI methods to be inserted/replaced at run time"
374 depends on DEBUG_FS
375 default n
376 help
377 This debug facility allows ACPI AML methods to me inserted and/or
378 replaced without rebooting the system. For details refer to:
379 Documentation/acpi/method-customizing.txt.
380
381 NOTE: This option is security sensitive, because it allows arbitrary
382 kernel memory to be written to by root (uid=0) users, allowing them
383 to bypass certain security measures (e.g. if root is not allowed to
384 load additional kernel modules after boot, this feature may be used
385 to override that restriction).
386
372source "drivers/acpi/apei/Kconfig" 387source "drivers/acpi/apei/Kconfig"
373 388
374endif # ACPI 389endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b66fbb2fc85f..ecb26b4f29a0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_ACPI_SBS) += sbshc.o
61obj-$(CONFIG_ACPI_SBS) += sbs.o 61obj-$(CONFIG_ACPI_SBS) += sbs.o
62obj-$(CONFIG_ACPI_HED) += hed.o 62obj-$(CONFIG_ACPI_HED) += hed.o
63obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o 63obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
64obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
64 65
65# processor has its own "processor." module_param namespace 66# processor has its own "processor." module_param namespace
66processor-y := processor_driver.o processor_throttling.o 67processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a1224712fd0c..301bd2d388ad 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,7 +14,7 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
14 14
15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ 15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
16 evmisc.o evrgnini.o evxface.o evxfregn.o \ 16 evmisc.o evrgnini.o evxface.o evxfregn.o \
17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o 17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
18 18
19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ 19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ 20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index ab87396c2c07..bc533dde16c4 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -187,7 +187,6 @@
187 187
188/* Operation regions */ 188/* Operation regions */
189 189
190#define ACPI_NUM_PREDEFINED_REGIONS 9
191#define ACPI_USER_REGION_BEGIN 0x80 190#define ACPI_USER_REGION_BEGIN 0x80
192 191
193/* Maximum space_ids for Operation Regions */ 192/* Maximum space_ids for Operation Regions */
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 41d247daf461..bea3b4899183 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -58,12 +58,6 @@ u32 acpi_ev_fixed_event_detect(void);
58 */ 58 */
59u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node); 59u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node);
60 60
61acpi_status acpi_ev_acquire_global_lock(u16 timeout);
62
63acpi_status acpi_ev_release_global_lock(void);
64
65acpi_status acpi_ev_init_global_lock_handler(void);
66
67u32 acpi_ev_get_gpe_number_index(u32 gpe_number); 61u32 acpi_ev_get_gpe_number_index(u32 gpe_number);
68 62
69acpi_status 63acpi_status
@@ -71,6 +65,17 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
71 u32 notify_value); 65 u32 notify_value);
72 66
73/* 67/*
68 * evglock - Global Lock support
69 */
70acpi_status acpi_ev_init_global_lock_handler(void);
71
72acpi_status acpi_ev_acquire_global_lock(u16 timeout);
73
74acpi_status acpi_ev_release_global_lock(void);
75
76acpi_status acpi_ev_remove_global_lock_handler(void);
77
78/*
74 * evgpe - Low-level GPE support 79 * evgpe - Low-level GPE support
75 */ 80 */
76u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list); 81u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index d69750b83b36..73863d86f022 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -214,24 +214,23 @@ ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
214 214
215/* 215/*
216 * Global lock mutex is an actual AML mutex object 216 * Global lock mutex is an actual AML mutex object
217 * Global lock semaphore works in conjunction with the HW global lock 217 * Global lock semaphore works in conjunction with the actual global lock
218 * Global lock spinlock is used for "pending" handshake
218 */ 219 */
219ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex; 220ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
220ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore; 221ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
222ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
221ACPI_EXTERN u16 acpi_gbl_global_lock_handle; 223ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
222ACPI_EXTERN u8 acpi_gbl_global_lock_acquired; 224ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
223ACPI_EXTERN u8 acpi_gbl_global_lock_present; 225ACPI_EXTERN u8 acpi_gbl_global_lock_present;
226ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
224 227
225/* 228/*
226 * Spinlocks are used for interfaces that can be possibly called at 229 * Spinlocks are used for interfaces that can be possibly called at
227 * interrupt level 230 * interrupt level
228 */ 231 */
229ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */ 232ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock; /* For GPE data structs and registers */
230ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ 233ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */
231ACPI_EXTERN spinlock_t _acpi_ev_global_lock_pending_lock; /* For global lock */
232#define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock
233#define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock
234#define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock
235 234
236/***************************************************************************** 235/*****************************************************************************
237 * 236 *
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index f4f0998d3967..1077f17859ed 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -394,21 +394,6 @@
394#define AML_CLASS_METHOD_CALL 0x09 394#define AML_CLASS_METHOD_CALL 0x09
395#define AML_CLASS_UNKNOWN 0x0A 395#define AML_CLASS_UNKNOWN 0x0A
396 396
397/* Predefined Operation Region space_iDs */
398
399typedef enum {
400 REGION_MEMORY = 0,
401 REGION_IO,
402 REGION_PCI_CONFIG,
403 REGION_EC,
404 REGION_SMBUS,
405 REGION_CMOS,
406 REGION_PCI_BAR,
407 REGION_IPMI,
408 REGION_DATA_TABLE, /* Internal use only */
409 REGION_FIXED_HW = 0x7F
410} AML_REGION_TYPES;
411
412/* Comparison operation codes for match_op operator */ 397/* Comparison operation codes for match_op operator */
413 398
414typedef enum { 399typedef enum {
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 23a3b1ab20c1..324acec1179a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -450,7 +450,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
450 status = 450 status =
451 acpi_ex_create_region(op->named.data, 451 acpi_ex_create_region(op->named.data,
452 op->named.length, 452 op->named.length,
453 REGION_DATA_TABLE, 453 ACPI_ADR_SPACE_DATA_TABLE,
454 walk_state); 454 walk_state);
455 if (ACPI_FAILURE(status)) { 455 if (ACPI_FAILURE(status)) {
456 return_ACPI_STATUS(status); 456 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 4be4e921dfe1..976318138c56 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -562,7 +562,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
562 ((op->common.value.arg)->common.value. 562 ((op->common.value.arg)->common.value.
563 integer); 563 integer);
564 } else { 564 } else {
565 region_space = REGION_DATA_TABLE; 565 region_space = ACPI_ADR_SPACE_DATA_TABLE;
566 } 566 }
567 567
568 /* 568 /*
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
new file mode 100644
index 000000000000..56a562a1e5d7
--- /dev/null
+++ b/drivers/acpi/acpica/evglock.c
@@ -0,0 +1,335 @@
1/******************************************************************************
2 *
3 * Module Name: evglock - Global Lock support
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acinterp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evglock")
51
52/* Local prototypes */
53static u32 acpi_ev_global_lock_handler(void *context);
54
55/*******************************************************************************
56 *
57 * FUNCTION: acpi_ev_init_global_lock_handler
58 *
59 * PARAMETERS: None
60 *
61 * RETURN: Status
62 *
63 * DESCRIPTION: Install a handler for the global lock release event
64 *
65 ******************************************************************************/
66
67acpi_status acpi_ev_init_global_lock_handler(void)
68{
69 acpi_status status;
70
71 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
72
73 /* Attempt installation of the global lock handler */
74
75 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
76 acpi_ev_global_lock_handler,
77 NULL);
78
79 /*
80 * If the global lock does not exist on this platform, the attempt to
81 * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
82 * Map to AE_OK, but mark global lock as not present. Any attempt to
83 * actually use the global lock will be flagged with an error.
84 */
85 acpi_gbl_global_lock_present = FALSE;
86 if (status == AE_NO_HARDWARE_RESPONSE) {
87 ACPI_ERROR((AE_INFO,
88 "No response from Global Lock hardware, disabling lock"));
89
90 return_ACPI_STATUS(AE_OK);
91 }
92
93 status = acpi_os_create_lock(&acpi_gbl_global_lock_pending_lock);
94 if (ACPI_FAILURE(status)) {
95 return_ACPI_STATUS(status);
96 }
97
98 acpi_gbl_global_lock_pending = FALSE;
99 acpi_gbl_global_lock_present = TRUE;
100 return_ACPI_STATUS(status);
101}
102
103/*******************************************************************************
104 *
105 * FUNCTION: acpi_ev_remove_global_lock_handler
106 *
107 * PARAMETERS: None
108 *
109 * RETURN: Status
110 *
111 * DESCRIPTION: Remove the handler for the Global Lock
112 *
113 ******************************************************************************/
114
115acpi_status acpi_ev_remove_global_lock_handler(void)
116{
117 acpi_status status;
118
119 ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
120
121 acpi_gbl_global_lock_present = FALSE;
122 status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
123 acpi_ev_global_lock_handler);
124
125 return_ACPI_STATUS(status);
126}
127
128/*******************************************************************************
129 *
130 * FUNCTION: acpi_ev_global_lock_handler
131 *
132 * PARAMETERS: Context - From thread interface, not used
133 *
134 * RETURN: ACPI_INTERRUPT_HANDLED
135 *
136 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
137 * release interrupt occurs. If there is actually a pending
138 * request for the lock, signal the waiting thread.
139 *
140 ******************************************************************************/
141
142static u32 acpi_ev_global_lock_handler(void *context)
143{
144 acpi_status status;
145 acpi_cpu_flags flags;
146
147 flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
148
149 /*
150 * If a request for the global lock is not actually pending,
151 * we are done. This handles "spurious" global lock interrupts
152 * which are possible (and have been seen) with bad BIOSs.
153 */
154 if (!acpi_gbl_global_lock_pending) {
155 goto cleanup_and_exit;
156 }
157
158 /*
159 * Send a unit to the global lock semaphore. The actual acquisition
160 * of the global lock will be performed by the waiting thread.
161 */
162 status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
163 if (ACPI_FAILURE(status)) {
164 ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
165 }
166
167 acpi_gbl_global_lock_pending = FALSE;
168
169 cleanup_and_exit:
170
171 acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
172 return (ACPI_INTERRUPT_HANDLED);
173}
174
175/******************************************************************************
176 *
177 * FUNCTION: acpi_ev_acquire_global_lock
178 *
179 * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
180 *
181 * RETURN: Status
182 *
183 * DESCRIPTION: Attempt to gain ownership of the Global Lock.
184 *
185 * MUTEX: Interpreter must be locked
186 *
187 * Note: The original implementation allowed multiple threads to "acquire" the
188 * Global Lock, and the OS would hold the lock until the last thread had
189 * released it. However, this could potentially starve the BIOS out of the
190 * lock, especially in the case where there is a tight handshake between the
191 * Embedded Controller driver and the BIOS. Therefore, this implementation
192 * allows only one thread to acquire the HW Global Lock at a time, and makes
193 * the global lock appear as a standard mutex on the OS side.
194 *
195 *****************************************************************************/
196
197acpi_status acpi_ev_acquire_global_lock(u16 timeout)
198{
199 acpi_cpu_flags flags;
200 acpi_status status;
201 u8 acquired = FALSE;
202
203 ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
204
205 /*
206 * Only one thread can acquire the GL at a time, the global_lock_mutex
207 * enforces this. This interface releases the interpreter if we must wait.
208 */
209 status =
210 acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex->mutex.
211 os_mutex, timeout);
212 if (ACPI_FAILURE(status)) {
213 return_ACPI_STATUS(status);
214 }
215
216 /*
217 * Update the global lock handle and check for wraparound. The handle is
218 * only used for the external global lock interfaces, but it is updated
219 * here to properly handle the case where a single thread may acquire the
220 * lock via both the AML and the acpi_acquire_global_lock interfaces. The
221 * handle is therefore updated on the first acquire from a given thread
222 * regardless of where the acquisition request originated.
223 */
224 acpi_gbl_global_lock_handle++;
225 if (acpi_gbl_global_lock_handle == 0) {
226 acpi_gbl_global_lock_handle = 1;
227 }
228
229 /*
230 * Make sure that a global lock actually exists. If not, just
231 * treat the lock as a standard mutex.
232 */
233 if (!acpi_gbl_global_lock_present) {
234 acpi_gbl_global_lock_acquired = TRUE;
235 return_ACPI_STATUS(AE_OK);
236 }
237
238 flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
239
240 do {
241
242 /* Attempt to acquire the actual hardware lock */
243
244 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
245 if (acquired) {
246 acpi_gbl_global_lock_acquired = TRUE;
247 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
248 "Acquired hardware Global Lock\n"));
249 break;
250 }
251
252 /*
253 * Did not get the lock. The pending bit was set above, and
254 * we must now wait until we receive the global lock
255 * released interrupt.
256 */
257 acpi_gbl_global_lock_pending = TRUE;
258 acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
259
260 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
261 "Waiting for hardware Global Lock\n"));
262
263 /*
264 * Wait for handshake with the global lock interrupt handler.
265 * This interface releases the interpreter if we must wait.
266 */
267 status =
268 acpi_ex_system_wait_semaphore
269 (acpi_gbl_global_lock_semaphore, ACPI_WAIT_FOREVER);
270
271 flags = acpi_os_acquire_lock(acpi_gbl_global_lock_pending_lock);
272
273 } while (ACPI_SUCCESS(status));
274
275 acpi_gbl_global_lock_pending = FALSE;
276 acpi_os_release_lock(acpi_gbl_global_lock_pending_lock, flags);
277
278 return_ACPI_STATUS(status);
279}
280
281/*******************************************************************************
282 *
283 * FUNCTION: acpi_ev_release_global_lock
284 *
285 * PARAMETERS: None
286 *
287 * RETURN: Status
288 *
289 * DESCRIPTION: Releases ownership of the Global Lock.
290 *
291 ******************************************************************************/
292
293acpi_status acpi_ev_release_global_lock(void)
294{
295 u8 pending = FALSE;
296 acpi_status status = AE_OK;
297
298 ACPI_FUNCTION_TRACE(ev_release_global_lock);
299
300 /* Lock must be already acquired */
301
302 if (!acpi_gbl_global_lock_acquired) {
303 ACPI_WARNING((AE_INFO,
304 "Cannot release the ACPI Global Lock, it has not been acquired"));
305 return_ACPI_STATUS(AE_NOT_ACQUIRED);
306 }
307
308 if (acpi_gbl_global_lock_present) {
309
310 /* Allow any thread to release the lock */
311
312 ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
313
314 /*
315 * If the pending bit was set, we must write GBL_RLS to the control
316 * register
317 */
318 if (pending) {
319 status =
320 acpi_write_bit_register
321 (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
322 ACPI_ENABLE_EVENT);
323 }
324
325 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
326 "Released hardware Global Lock\n"));
327 }
328
329 acpi_gbl_global_lock_acquired = FALSE;
330
331 /* Release the local GL mutex */
332
333 acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
334 return_ACPI_STATUS(status);
335}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 7dc80946f7bd..d0b331844427 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -45,7 +45,6 @@
45#include "accommon.h" 45#include "accommon.h"
46#include "acevents.h" 46#include "acevents.h"
47#include "acnamesp.h" 47#include "acnamesp.h"
48#include "acinterp.h"
49 48
50#define _COMPONENT ACPI_EVENTS 49#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evmisc") 50ACPI_MODULE_NAME("evmisc")
@@ -53,10 +52,6 @@ ACPI_MODULE_NAME("evmisc")
53/* Local prototypes */ 52/* Local prototypes */
54static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); 53static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
55 54
56static u32 acpi_ev_global_lock_handler(void *context);
57
58static acpi_status acpi_ev_remove_global_lock_handler(void);
59
60/******************************************************************************* 55/*******************************************************************************
61 * 56 *
62 * FUNCTION: acpi_ev_is_notify_object 57 * FUNCTION: acpi_ev_is_notify_object
@@ -275,304 +270,6 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
275 acpi_ut_delete_generic_state(notify_info); 270 acpi_ut_delete_generic_state(notify_info);
276} 271}
277 272
278/*******************************************************************************
279 *
280 * FUNCTION: acpi_ev_global_lock_handler
281 *
282 * PARAMETERS: Context - From thread interface, not used
283 *
284 * RETURN: ACPI_INTERRUPT_HANDLED
285 *
286 * DESCRIPTION: Invoked directly from the SCI handler when a global lock
287 * release interrupt occurs. If there's a thread waiting for
288 * the global lock, signal it.
289 *
290 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If
291 * this is not possible for some reason, a separate thread will have to be
292 * scheduled to do this.
293 *
294 ******************************************************************************/
295static u8 acpi_ev_global_lock_pending;
296
297static u32 acpi_ev_global_lock_handler(void *context)
298{
299 acpi_status status;
300 acpi_cpu_flags flags;
301
302 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
303
304 if (!acpi_ev_global_lock_pending) {
305 goto out;
306 }
307
308 /* Send a unit to the semaphore */
309
310 status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1);
311 if (ACPI_FAILURE(status)) {
312 ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore"));
313 }
314
315 acpi_ev_global_lock_pending = FALSE;
316
317 out:
318 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
319
320 return (ACPI_INTERRUPT_HANDLED);
321}
322
323/*******************************************************************************
324 *
325 * FUNCTION: acpi_ev_init_global_lock_handler
326 *
327 * PARAMETERS: None
328 *
329 * RETURN: Status
330 *
331 * DESCRIPTION: Install a handler for the global lock release event
332 *
333 ******************************************************************************/
334
335acpi_status acpi_ev_init_global_lock_handler(void)
336{
337 acpi_status status;
338
339 ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
340
341 /* Attempt installation of the global lock handler */
342
343 status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
344 acpi_ev_global_lock_handler,
345 NULL);
346
347 /*
348 * If the global lock does not exist on this platform, the attempt to
349 * enable GBL_STATUS will fail (the GBL_ENABLE bit will not stick).
350 * Map to AE_OK, but mark global lock as not present. Any attempt to
351 * actually use the global lock will be flagged with an error.
352 */
353 if (status == AE_NO_HARDWARE_RESPONSE) {
354 ACPI_ERROR((AE_INFO,
355 "No response from Global Lock hardware, disabling lock"));
356
357 acpi_gbl_global_lock_present = FALSE;
358 return_ACPI_STATUS(AE_OK);
359 }
360
361 acpi_gbl_global_lock_present = TRUE;
362 return_ACPI_STATUS(status);
363}
364
365/*******************************************************************************
366 *
367 * FUNCTION: acpi_ev_remove_global_lock_handler
368 *
369 * PARAMETERS: None
370 *
371 * RETURN: Status
372 *
373 * DESCRIPTION: Remove the handler for the Global Lock
374 *
375 ******************************************************************************/
376
377static acpi_status acpi_ev_remove_global_lock_handler(void)
378{
379 acpi_status status;
380
381 ACPI_FUNCTION_TRACE(ev_remove_global_lock_handler);
382
383 acpi_gbl_global_lock_present = FALSE;
384 status = acpi_remove_fixed_event_handler(ACPI_EVENT_GLOBAL,
385 acpi_ev_global_lock_handler);
386
387 return_ACPI_STATUS(status);
388}
389
390/******************************************************************************
391 *
392 * FUNCTION: acpi_ev_acquire_global_lock
393 *
394 * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
395 *
396 * RETURN: Status
397 *
398 * DESCRIPTION: Attempt to gain ownership of the Global Lock.
399 *
400 * MUTEX: Interpreter must be locked
401 *
402 * Note: The original implementation allowed multiple threads to "acquire" the
403 * Global Lock, and the OS would hold the lock until the last thread had
404 * released it. However, this could potentially starve the BIOS out of the
405 * lock, especially in the case where there is a tight handshake between the
406 * Embedded Controller driver and the BIOS. Therefore, this implementation
407 * allows only one thread to acquire the HW Global Lock at a time, and makes
408 * the global lock appear as a standard mutex on the OS side.
409 *
410 *****************************************************************************/
411static acpi_thread_id acpi_ev_global_lock_thread_id;
412static int acpi_ev_global_lock_acquired;
413
414acpi_status acpi_ev_acquire_global_lock(u16 timeout)
415{
416 acpi_cpu_flags flags;
417 acpi_status status = AE_OK;
418 u8 acquired = FALSE;
419
420 ACPI_FUNCTION_TRACE(ev_acquire_global_lock);
421
422 /*
423 * Only one thread can acquire the GL at a time, the global_lock_mutex
424 * enforces this. This interface releases the interpreter if we must wait.
425 */
426 status = acpi_ex_system_wait_mutex(
427 acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
428 if (status == AE_TIME) {
429 if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
430 acpi_ev_global_lock_acquired++;
431 return AE_OK;
432 }
433 }
434
435 if (ACPI_FAILURE(status)) {
436 status = acpi_ex_system_wait_mutex(
437 acpi_gbl_global_lock_mutex->mutex.os_mutex,
438 timeout);
439 }
440 if (ACPI_FAILURE(status)) {
441 return_ACPI_STATUS(status);
442 }
443
444 acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
445 acpi_ev_global_lock_acquired++;
446
447 /*
448 * Update the global lock handle and check for wraparound. The handle is
449 * only used for the external global lock interfaces, but it is updated
450 * here to properly handle the case where a single thread may acquire the
451 * lock via both the AML and the acpi_acquire_global_lock interfaces. The
452 * handle is therefore updated on the first acquire from a given thread
453 * regardless of where the acquisition request originated.
454 */
455 acpi_gbl_global_lock_handle++;
456 if (acpi_gbl_global_lock_handle == 0) {
457 acpi_gbl_global_lock_handle = 1;
458 }
459
460 /*
461 * Make sure that a global lock actually exists. If not, just treat the
462 * lock as a standard mutex.
463 */
464 if (!acpi_gbl_global_lock_present) {
465 acpi_gbl_global_lock_acquired = TRUE;
466 return_ACPI_STATUS(AE_OK);
467 }
468
469 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
470
471 do {
472
473 /* Attempt to acquire the actual hardware lock */
474
475 ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired);
476 if (acquired) {
477 acpi_gbl_global_lock_acquired = TRUE;
478
479 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
480 "Acquired hardware Global Lock\n"));
481 break;
482 }
483
484 acpi_ev_global_lock_pending = TRUE;
485
486 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
487
488 /*
489 * Did not get the lock. The pending bit was set above, and we
490 * must wait until we get the global lock released interrupt.
491 */
492 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
493 "Waiting for hardware Global Lock\n"));
494
495 /*
496 * Wait for handshake with the global lock interrupt handler.
497 * This interface releases the interpreter if we must wait.
498 */
499 status = acpi_ex_system_wait_semaphore(
500 acpi_gbl_global_lock_semaphore,
501 ACPI_WAIT_FOREVER);
502
503 flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock);
504
505 } while (ACPI_SUCCESS(status));
506
507 acpi_ev_global_lock_pending = FALSE;
508
509 acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags);
510
511 return_ACPI_STATUS(status);
512}
513
514/*******************************************************************************
515 *
516 * FUNCTION: acpi_ev_release_global_lock
517 *
518 * PARAMETERS: None
519 *
520 * RETURN: Status
521 *
522 * DESCRIPTION: Releases ownership of the Global Lock.
523 *
524 ******************************************************************************/
525
526acpi_status acpi_ev_release_global_lock(void)
527{
528 u8 pending = FALSE;
529 acpi_status status = AE_OK;
530
531 ACPI_FUNCTION_TRACE(ev_release_global_lock);
532
533 /* Lock must be already acquired */
534
535 if (!acpi_gbl_global_lock_acquired) {
536 ACPI_WARNING((AE_INFO,
537 "Cannot release the ACPI Global Lock, it has not been acquired"));
538 return_ACPI_STATUS(AE_NOT_ACQUIRED);
539 }
540
541 acpi_ev_global_lock_acquired--;
542 if (acpi_ev_global_lock_acquired > 0) {
543 return AE_OK;
544 }
545
546 if (acpi_gbl_global_lock_present) {
547
548 /* Allow any thread to release the lock */
549
550 ACPI_RELEASE_GLOBAL_LOCK(acpi_gbl_FACS, pending);
551
552 /*
553 * If the pending bit was set, we must write GBL_RLS to the control
554 * register
555 */
556 if (pending) {
557 status =
558 acpi_write_bit_register
559 (ACPI_BITREG_GLOBAL_LOCK_RELEASE,
560 ACPI_ENABLE_EVENT);
561 }
562
563 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
564 "Released hardware Global Lock\n"));
565 }
566
567 acpi_gbl_global_lock_acquired = FALSE;
568
569 /* Release the local GL mutex */
570 acpi_ev_global_lock_thread_id = 0;
571 acpi_ev_global_lock_acquired = 0;
572 acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
573 return_ACPI_STATUS(status);
574}
575
576/****************************************************************************** 273/******************************************************************************
577 * 274 *
578 * FUNCTION: acpi_ev_terminate 275 * FUNCTION: acpi_ev_terminate
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index bea7223d7a71..f0edf5c43c03 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -55,6 +55,8 @@ static u8
55acpi_ev_has_default_handler(struct acpi_namespace_node *node, 55acpi_ev_has_default_handler(struct acpi_namespace_node *node,
56 acpi_adr_space_type space_id); 56 acpi_adr_space_type space_id);
57 57
58static void acpi_ev_orphan_ec_reg_method(void);
59
58static acpi_status 60static acpi_status
59acpi_ev_reg_run(acpi_handle obj_handle, 61acpi_ev_reg_run(acpi_handle obj_handle,
60 u32 level, void *context, void **return_value); 62 u32 level, void *context, void **return_value);
@@ -561,7 +563,9 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
561 563
562 /* Now stop region accesses by executing the _REG method */ 564 /* Now stop region accesses by executing the _REG method */
563 565
564 status = acpi_ev_execute_reg_method(region_obj, 0); 566 status =
567 acpi_ev_execute_reg_method(region_obj,
568 ACPI_REG_DISCONNECT);
565 if (ACPI_FAILURE(status)) { 569 if (ACPI_FAILURE(status)) {
566 ACPI_EXCEPTION((AE_INFO, status, 570 ACPI_EXCEPTION((AE_INFO, status,
567 "from region _REG, [%s]", 571 "from region _REG, [%s]",
@@ -1062,6 +1066,12 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
1062 ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run, 1066 ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
1063 NULL, &space_id, NULL); 1067 NULL, &space_id, NULL);
1064 1068
1069 /* Special case for EC: handle "orphan" _REG methods with no region */
1070
1071 if (space_id == ACPI_ADR_SPACE_EC) {
1072 acpi_ev_orphan_ec_reg_method();
1073 }
1074
1065 return_ACPI_STATUS(status); 1075 return_ACPI_STATUS(status);
1066} 1076}
1067 1077
@@ -1120,6 +1130,113 @@ acpi_ev_reg_run(acpi_handle obj_handle,
1120 return (AE_OK); 1130 return (AE_OK);
1121 } 1131 }
1122 1132
1123 status = acpi_ev_execute_reg_method(obj_desc, 1); 1133 status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
1124 return (status); 1134 return (status);
1125} 1135}
1136
1137/*******************************************************************************
1138 *
1139 * FUNCTION: acpi_ev_orphan_ec_reg_method
1140 *
1141 * PARAMETERS: None
1142 *
1143 * RETURN: None
1144 *
1145 * DESCRIPTION: Execute an "orphan" _REG method that appears under the EC
1146 * device. This is a _REG method that has no corresponding region
1147 * within the EC device scope. The orphan _REG method appears to
1148 * have been enabled by the description of the ECDT in the ACPI
1149 * specification: "The availability of the region space can be
1150 * detected by providing a _REG method object underneath the
1151 * Embedded Controller device."
1152 *
1153 * To quickly access the EC device, we use the EC_ID that appears
1154 * within the ECDT. Otherwise, we would need to perform a time-
1155 * consuming namespace walk, executing _HID methods to find the
1156 * EC device.
1157 *
1158 ******************************************************************************/
1159
1160static void acpi_ev_orphan_ec_reg_method(void)
1161{
1162 struct acpi_table_ecdt *table;
1163 acpi_status status;
1164 struct acpi_object_list args;
1165 union acpi_object objects[2];
1166 struct acpi_namespace_node *ec_device_node;
1167 struct acpi_namespace_node *reg_method;
1168 struct acpi_namespace_node *next_node;
1169
1170 ACPI_FUNCTION_TRACE(ev_orphan_ec_reg_method);
1171
1172 /* Get the ECDT (if present in system) */
1173
1174 status = acpi_get_table(ACPI_SIG_ECDT, 0,
1175 ACPI_CAST_INDIRECT_PTR(struct acpi_table_header,
1176 &table));
1177 if (ACPI_FAILURE(status)) {
1178 return_VOID;
1179 }
1180
1181 /* We need a valid EC_ID string */
1182
1183 if (!(*table->id)) {
1184 return_VOID;
1185 }
1186
1187 /* Namespace is currently locked, must release */
1188
1189 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
1190
1191 /* Get a handle to the EC device referenced in the ECDT */
1192
1193 status = acpi_get_handle(NULL,
1194 ACPI_CAST_PTR(char, table->id),
1195 ACPI_CAST_PTR(acpi_handle, &ec_device_node));
1196 if (ACPI_FAILURE(status)) {
1197 goto exit;
1198 }
1199
1200 /* Get a handle to a _REG method immediately under the EC device */
1201
1202 status = acpi_get_handle(ec_device_node,
1203 METHOD_NAME__REG, ACPI_CAST_PTR(acpi_handle,
1204 &reg_method));
1205 if (ACPI_FAILURE(status)) {
1206 goto exit;
1207 }
1208
1209 /*
1210 * Execute the _REG method only if there is no Operation Region in
1211 * this scope with the Embedded Controller space ID. Otherwise, it
1212 * will already have been executed. Note, this allows for Regions
1213 * with other space IDs to be present; but the code below will then
1214 * execute the _REG method with the EC space ID argument.
1215 */
1216 next_node = acpi_ns_get_next_node(ec_device_node, NULL);
1217 while (next_node) {
1218 if ((next_node->type == ACPI_TYPE_REGION) &&
1219 (next_node->object) &&
1220 (next_node->object->region.space_id == ACPI_ADR_SPACE_EC)) {
1221 goto exit; /* Do not execute _REG */
1222 }
1223 next_node = acpi_ns_get_next_node(ec_device_node, next_node);
1224 }
1225
1226 /* Evaluate the _REG(EC,Connect) method */
1227
1228 args.count = 2;
1229 args.pointer = objects;
1230 objects[0].type = ACPI_TYPE_INTEGER;
1231 objects[0].integer.value = ACPI_ADR_SPACE_EC;
1232 objects[1].type = ACPI_TYPE_INTEGER;
1233 objects[1].integer.value = ACPI_REG_CONNECT;
1234
1235 status = acpi_evaluate_object(reg_method, NULL, &args, NULL);
1236
1237 exit:
1238 /* We ignore all errors from above, don't care */
1239
1240 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
1241 return_VOID;
1242}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 9659cee6093e..55a5d35ef34a 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -637,7 +637,7 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
637 637
638 status = 638 status =
639 acpi_ev_execute_reg_method 639 acpi_ev_execute_reg_method
640 (region_obj, 1); 640 (region_obj, ACPI_REG_CONNECT);
641 641
642 if (acpi_ns_locked) { 642 if (acpi_ns_locked) {
643 status = 643 status =
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index c85c8c45599d..00cd95692a91 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -130,20 +130,21 @@ acpi_install_address_space_handler(acpi_handle device,
130 case ACPI_ADR_SPACE_PCI_CONFIG: 130 case ACPI_ADR_SPACE_PCI_CONFIG:
131 case ACPI_ADR_SPACE_DATA_TABLE: 131 case ACPI_ADR_SPACE_DATA_TABLE:
132 132
133 if (acpi_gbl_reg_methods_executed) { 133 if (!acpi_gbl_reg_methods_executed) {
134 134
135 /* Run all _REG methods for this address space */ 135 /* We will defer execution of the _REG methods for this space */
136 136 goto unlock_and_exit;
137 status = acpi_ev_execute_reg_methods(node, space_id);
138 } 137 }
139 break; 138 break;
140 139
141 default: 140 default:
142
143 status = acpi_ev_execute_reg_methods(node, space_id);
144 break; 141 break;
145 } 142 }
146 143
144 /* Run all _REG methods for this address space */
145
146 status = acpi_ev_execute_reg_methods(node, space_id);
147
147 unlock_and_exit: 148 unlock_and_exit:
148 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 149 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
149 return_ACPI_STATUS(status); 150 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index e7b372d17667..110711afada8 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -305,7 +305,8 @@ acpi_ex_create_region(u8 * aml_start,
305 * range 305 * range
306 */ 306 */
307 if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) && 307 if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
308 (region_space < ACPI_USER_REGION_BEGIN)) { 308 (region_space < ACPI_USER_REGION_BEGIN) &&
309 (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
309 ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X", 310 ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
310 region_space)); 311 region_space));
311 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); 312 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 1d76ac85b5e7..ac7b854b0bd7 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -74,7 +74,6 @@ ACPI_MODULE_NAME("nsrepair")
74 * 74 *
75 * Additional possible repairs: 75 * Additional possible repairs:
76 * 76 *
77 * Optional/unnecessary NULL package elements removed
78 * Required package elements that are NULL replaced by Integer/String/Buffer 77 * Required package elements that are NULL replaced by Integer/String/Buffer
79 * Incorrect standalone package wrapped with required outer package 78 * Incorrect standalone package wrapped with required outer package
80 * 79 *
@@ -623,16 +622,12 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
623 ACPI_FUNCTION_NAME(ns_remove_null_elements); 622 ACPI_FUNCTION_NAME(ns_remove_null_elements);
624 623
625 /* 624 /*
626 * PTYPE1 packages contain no subpackages. 625 * We can safely remove all NULL elements from these package types:
627 * PTYPE2 packages contain a variable number of sub-packages. We can 626 * PTYPE1_VAR packages contain a variable number of simple data types.
628 * safely remove all NULL elements from the PTYPE2 packages. 627 * PTYPE2 packages contain a variable number of sub-packages.
629 */ 628 */
630 switch (package_type) { 629 switch (package_type) {
631 case ACPI_PTYPE1_FIXED:
632 case ACPI_PTYPE1_VAR: 630 case ACPI_PTYPE1_VAR:
633 case ACPI_PTYPE1_OPTION:
634 return;
635
636 case ACPI_PTYPE2: 631 case ACPI_PTYPE2:
637 case ACPI_PTYPE2_COUNT: 632 case ACPI_PTYPE2_COUNT:
638 case ACPI_PTYPE2_PKG_COUNT: 633 case ACPI_PTYPE2_PKG_COUNT:
@@ -642,6 +637,8 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
642 break; 637 break;
643 638
644 default: 639 default:
640 case ACPI_PTYPE1_FIXED:
641 case ACPI_PTYPE1_OPTION:
645 return; 642 return;
646 } 643 }
647 644
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 136a814cec69..97cb36f85ce9 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -170,8 +170,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
170 "SMBus", 170 "SMBus",
171 "SystemCMOS", 171 "SystemCMOS",
172 "PCIBARTarget", 172 "PCIBARTarget",
173 "IPMI", 173 "IPMI"
174 "DataTable"
175}; 174};
176 175
177char *acpi_ut_get_region_name(u8 space_id) 176char *acpi_ut_get_region_name(u8 space_id)
@@ -179,6 +178,8 @@ char *acpi_ut_get_region_name(u8 space_id)
179 178
180 if (space_id >= ACPI_USER_REGION_BEGIN) { 179 if (space_id >= ACPI_USER_REGION_BEGIN) {
181 return ("UserDefinedRegion"); 180 return ("UserDefinedRegion");
181 } else if (space_id == ACPI_ADR_SPACE_DATA_TABLE) {
182 return ("DataTable");
182 } else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 183 } else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
183 return ("FunctionalFixedHW"); 184 return ("FunctionalFixedHW");
184 } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) { 185 } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index a946c689f03b..7d797e2baecd 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -83,9 +83,15 @@ acpi_status acpi_ut_mutex_initialize(void)
83 83
84 /* Create the spinlocks for use at interrupt level */ 84 /* Create the spinlocks for use at interrupt level */
85 85
86 spin_lock_init(acpi_gbl_gpe_lock); 86 status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
87 spin_lock_init(acpi_gbl_hardware_lock); 87 if (ACPI_FAILURE (status)) {
88 spin_lock_init(acpi_ev_global_lock_pending_lock); 88 return_ACPI_STATUS (status);
89 }
90
91 status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
92 if (ACPI_FAILURE (status)) {
93 return_ACPI_STATUS (status);
94 }
89 95
90 /* Mutex for _OSI support */ 96 /* Mutex for _OSI support */
91 status = acpi_os_create_mutex(&acpi_gbl_osi_mutex); 97 status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 9749980ca6ca..d1e06c182cdb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -227,7 +227,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
227 acpi_status status = AE_OK; 227 acpi_status status = AE_OK;
228 char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' }; 228 char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' };
229 229
230 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) 230 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
231 return -EINVAL; 231 return -EINVAL;
232 232
233 /* Make sure this is a valid target state */ 233 /* Make sure this is a valid target state */
diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
new file mode 100644
index 000000000000..5d42c2414ae5
--- /dev/null
+++ b/drivers/acpi/custom_method.c
@@ -0,0 +1,100 @@
1/*
2 * debugfs.c - ACPI debugfs interface to userspace.
3 */
4
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/uaccess.h>
9#include <linux/debugfs.h>
10#include <acpi/acpi_drivers.h>
11
12#include "internal.h"
13
14#define _COMPONENT ACPI_SYSTEM_COMPONENT
15ACPI_MODULE_NAME("custom_method");
16MODULE_LICENSE("GPL");
17
18static struct dentry *cm_dentry;
19
20/* /sys/kernel/debug/acpi/custom_method */
21
22static ssize_t cm_write(struct file *file, const char __user * user_buf,
23 size_t count, loff_t *ppos)
24{
25 static char *buf;
26 static u32 max_size;
27 static u32 uncopied_bytes;
28
29 struct acpi_table_header table;
30 acpi_status status;
31
32 if (!(*ppos)) {
33 /* parse the table header to get the table length */
34 if (count <= sizeof(struct acpi_table_header))
35 return -EINVAL;
36 if (copy_from_user(&table, user_buf,
37 sizeof(struct acpi_table_header)))
38 return -EFAULT;
39 uncopied_bytes = max_size = table.length;
40 buf = kzalloc(max_size, GFP_KERNEL);
41 if (!buf)
42 return -ENOMEM;
43 }
44
45 if (buf == NULL)
46 return -EINVAL;
47
48 if ((*ppos > max_size) ||
49 (*ppos + count > max_size) ||
50 (*ppos + count < count) ||
51 (count > uncopied_bytes))
52 return -EINVAL;
53
54 if (copy_from_user(buf + (*ppos), user_buf, count)) {
55 kfree(buf);
56 buf = NULL;
57 return -EFAULT;
58 }
59
60 uncopied_bytes -= count;
61 *ppos += count;
62
63 if (!uncopied_bytes) {
64 status = acpi_install_method(buf);
65 kfree(buf);
66 buf = NULL;
67 if (ACPI_FAILURE(status))
68 return -EINVAL;
69 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
70 }
71
72 return count;
73}
74
75static const struct file_operations cm_fops = {
76 .write = cm_write,
77 .llseek = default_llseek,
78};
79
80static int __init acpi_custom_method_init(void)
81{
82 if (acpi_debugfs_dir == NULL)
83 return -ENOENT;
84
85 cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
86 acpi_debugfs_dir, NULL, &cm_fops);
87 if (cm_dentry == NULL)
88 return -ENODEV;
89
90 return 0;
91}
92
93static void __exit acpi_custom_method_exit(void)
94{
95 if (cm_dentry)
96 debugfs_remove(cm_dentry);
97 }
98
99module_init(acpi_custom_method_init);
100module_exit(acpi_custom_method_exit);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 384f7abcff77..182a9fc36355 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -3,100 +3,16 @@
3 */ 3 */
4 4
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/uaccess.h>
9#include <linux/debugfs.h> 6#include <linux/debugfs.h>
10#include <acpi/acpi_drivers.h> 7#include <acpi/acpi_drivers.h>
11 8
12#define _COMPONENT ACPI_SYSTEM_COMPONENT 9#define _COMPONENT ACPI_SYSTEM_COMPONENT
13ACPI_MODULE_NAME("debugfs"); 10ACPI_MODULE_NAME("debugfs");
14 11
12struct dentry *acpi_debugfs_dir;
13EXPORT_SYMBOL_GPL(acpi_debugfs_dir);
15 14
16/* /sys/modules/acpi/parameters/aml_debug_output */ 15void __init acpi_debugfs_init(void)
17
18module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
19 bool, 0644);
20MODULE_PARM_DESC(aml_debug_output,
21 "To enable/disable the ACPI Debug Object output.");
22
23/* /sys/kernel/debug/acpi/custom_method */
24
25static ssize_t cm_write(struct file *file, const char __user * user_buf,
26 size_t count, loff_t *ppos)
27{ 16{
28 static char *buf; 17 acpi_debugfs_dir = debugfs_create_dir("acpi", NULL);
29 static u32 max_size;
30 static u32 uncopied_bytes;
31
32 struct acpi_table_header table;
33 acpi_status status;
34
35 if (!(*ppos)) {
36 /* parse the table header to get the table length */
37 if (count <= sizeof(struct acpi_table_header))
38 return -EINVAL;
39 if (copy_from_user(&table, user_buf,
40 sizeof(struct acpi_table_header)))
41 return -EFAULT;
42 uncopied_bytes = max_size = table.length;
43 buf = kzalloc(max_size, GFP_KERNEL);
44 if (!buf)
45 return -ENOMEM;
46 }
47
48 if (buf == NULL)
49 return -EINVAL;
50
51 if ((*ppos > max_size) ||
52 (*ppos + count > max_size) ||
53 (*ppos + count < count) ||
54 (count > uncopied_bytes))
55 return -EINVAL;
56
57 if (copy_from_user(buf + (*ppos), user_buf, count)) {
58 kfree(buf);
59 buf = NULL;
60 return -EFAULT;
61 }
62
63 uncopied_bytes -= count;
64 *ppos += count;
65
66 if (!uncopied_bytes) {
67 status = acpi_install_method(buf);
68 kfree(buf);
69 buf = NULL;
70 if (ACPI_FAILURE(status))
71 return -EINVAL;
72 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
73 }
74
75 return count;
76}
77
78static const struct file_operations cm_fops = {
79 .write = cm_write,
80 .llseek = default_llseek,
81};
82
83int __init acpi_debugfs_init(void)
84{
85 struct dentry *acpi_dir, *cm_dentry;
86
87 acpi_dir = debugfs_create_dir("acpi", NULL);
88 if (!acpi_dir)
89 goto err;
90
91 cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
92 acpi_dir, NULL, &cm_fops);
93 if (!cm_dentry)
94 goto err;
95
96 return 0;
97
98err:
99 if (acpi_dir)
100 debugfs_remove(acpi_dir);
101 return -EINVAL;
102} 18}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index fa848c4116a8..b19a18dd994f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -69,7 +69,6 @@ enum ec_command {
69 69
70#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ 70#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
71#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 71#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
72#define ACPI_EC_CDELAY 10 /* Wait 10us before polling EC */
73#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ 72#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
74 73
75#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts 74#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
@@ -433,8 +432,7 @@ EXPORT_SYMBOL(ec_write);
433 432
434int ec_transaction(u8 command, 433int ec_transaction(u8 command,
435 const u8 * wdata, unsigned wdata_len, 434 const u8 * wdata, unsigned wdata_len,
436 u8 * rdata, unsigned rdata_len, 435 u8 * rdata, unsigned rdata_len)
437 int force_poll)
438{ 436{
439 struct transaction t = {.command = command, 437 struct transaction t = {.command = command,
440 .wdata = wdata, .rdata = rdata, 438 .wdata = wdata, .rdata = rdata,
@@ -592,8 +590,6 @@ static void acpi_ec_gpe_query(void *ec_cxt)
592 mutex_unlock(&ec->lock); 590 mutex_unlock(&ec->lock);
593} 591}
594 592
595static void acpi_ec_gpe_query(void *ec_cxt);
596
597static int ec_check_sci(struct acpi_ec *ec, u8 state) 593static int ec_check_sci(struct acpi_ec *ec, u8 state)
598{ 594{
599 if (state & ACPI_EC_FLAG_SCI) { 595 if (state & ACPI_EC_FLAG_SCI) {
@@ -808,8 +804,6 @@ static int acpi_ec_add(struct acpi_device *device)
808 return -EINVAL; 804 return -EINVAL;
809 } 805 }
810 806
811 ec->handle = device->handle;
812
813 /* Find and register all query methods */ 807 /* Find and register all query methods */
814 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, 808 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
815 acpi_ec_register_query_methods, NULL, ec, NULL); 809 acpi_ec_register_query_methods, NULL, ec, NULL);
@@ -938,8 +932,19 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
938 ec_flag_msi, "MSI hardware", { 932 ec_flag_msi, "MSI hardware", {
939 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, 933 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL},
940 { 934 {
935 ec_flag_msi, "Quanta hardware", {
936 DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
937 DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL},
938 {
939 ec_flag_msi, "Quanta hardware", {
940 DMI_MATCH(DMI_SYS_VENDOR, "Quanta"),
941 DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL},
942 {
941 ec_validate_ecdt, "ASUS hardware", { 943 ec_validate_ecdt, "ASUS hardware", {
942 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, 944 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
945 {
946 ec_validate_ecdt, "ASUS hardware", {
947 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
943 {}, 948 {},
944}; 949};
945 950
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4bfb759deb10..ca75b9ce0489 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -28,9 +28,10 @@ int acpi_scan_init(void);
28int acpi_sysfs_init(void); 28int acpi_sysfs_init(void);
29 29
30#ifdef CONFIG_DEBUG_FS 30#ifdef CONFIG_DEBUG_FS
31extern struct dentry *acpi_debugfs_dir;
31int acpi_debugfs_init(void); 32int acpi_debugfs_init(void);
32#else 33#else
33static inline int acpi_debugfs_init(void) { return 0; } 34static inline void acpi_debugfs_init(void) { return; }
34#endif 35#endif
35 36
36/* -------------------------------------------------------------------------- 37/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 45ad4ffef533..52ca9649d769 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -902,14 +902,6 @@ void acpi_os_wait_events_complete(void *context)
902 902
903EXPORT_SYMBOL(acpi_os_wait_events_complete); 903EXPORT_SYMBOL(acpi_os_wait_events_complete);
904 904
905/*
906 * Deallocate the memory for a spinlock.
907 */
908void acpi_os_delete_lock(acpi_spinlock handle)
909{
910 return;
911}
912
913acpi_status 905acpi_status
914acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 906acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
915{ 907{
@@ -1341,6 +1333,31 @@ int acpi_resources_are_enforced(void)
1341EXPORT_SYMBOL(acpi_resources_are_enforced); 1333EXPORT_SYMBOL(acpi_resources_are_enforced);
1342 1334
1343/* 1335/*
1336 * Create and initialize a spinlock.
1337 */
1338acpi_status
1339acpi_os_create_lock(acpi_spinlock *out_handle)
1340{
1341 spinlock_t *lock;
1342
1343 lock = ACPI_ALLOCATE(sizeof(spinlock_t));
1344 if (!lock)
1345 return AE_NO_MEMORY;
1346 spin_lock_init(lock);
1347 *out_handle = lock;
1348
1349 return AE_OK;
1350}
1351
1352/*
1353 * Deallocate the memory for a spinlock.
1354 */
1355void acpi_os_delete_lock(acpi_spinlock handle)
1356{
1357 ACPI_FREE(handle);
1358}
1359
1360/*
1344 * Acquire a spinlock. 1361 * Acquire a spinlock.
1345 * 1362 *
1346 * handle is a pointer to the spinlock_t. 1363 * handle is a pointer to the spinlock_t.
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 25bf17da69fd..02d2a4c9084d 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -37,7 +37,6 @@ static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
37 {}, 37 {},
38}; 38};
39 39
40#ifdef CONFIG_SMP
41static int map_lapic_id(struct acpi_subtable_header *entry, 40static int map_lapic_id(struct acpi_subtable_header *entry,
42 u32 acpi_id, int *apic_id) 41 u32 acpi_id, int *apic_id)
43{ 42{
@@ -165,7 +164,9 @@ exit:
165 164
166int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) 165int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
167{ 166{
167#ifdef CONFIG_SMP
168 int i; 168 int i;
169#endif
169 int apic_id = -1; 170 int apic_id = -1;
170 171
171 apic_id = map_mat_entry(handle, type, acpi_id); 172 apic_id = map_mat_entry(handle, type, acpi_id);
@@ -174,14 +175,19 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
174 if (apic_id == -1) 175 if (apic_id == -1)
175 return apic_id; 176 return apic_id;
176 177
178#ifdef CONFIG_SMP
177 for_each_possible_cpu(i) { 179 for_each_possible_cpu(i) {
178 if (cpu_physical_id(i) == apic_id) 180 if (cpu_physical_id(i) == apic_id)
179 return i; 181 return i;
180 } 182 }
183#else
184 /* In UP kernel, only processor 0 is valid */
185 if (apic_id == 0)
186 return apic_id;
187#endif
181 return -1; 188 return -1;
182} 189}
183EXPORT_SYMBOL_GPL(acpi_get_cpuid); 190EXPORT_SYMBOL_GPL(acpi_get_cpuid);
184#endif
185 191
186static bool __init processor_physically_present(acpi_handle handle) 192static bool __init processor_physically_present(acpi_handle handle)
187{ 193{
@@ -217,7 +223,7 @@ static bool __init processor_physically_present(acpi_handle handle)
217 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; 223 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
218 cpuid = acpi_get_cpuid(handle, type, acpi_id); 224 cpuid = acpi_get_cpuid(handle, type, acpi_id);
219 225
220 if ((cpuid == -1) && (num_possible_cpus() > 1)) 226 if (cpuid == -1)
221 return false; 227 return false;
222 228
223 return true; 229 return true;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d615b7d69bca..431ab11c8c1b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -161,7 +161,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
161 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 161 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
162 return; 162 return;
163 163
164 if (c1e_detected) 164 if (amd_e400_c1e_detected)
165 type = ACPI_STATE_C1; 165 type = ACPI_STATE_C1;
166 166
167 /* 167 /*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 61891e75583d..77255f250dbb 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -220,6 +220,14 @@ module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
220 NULL, 0644); 220 NULL, 0644);
221#endif /* CONFIG_ACPI_DEBUG */ 221#endif /* CONFIG_ACPI_DEBUG */
222 222
223
224/* /sys/modules/acpi/parameters/aml_debug_output */
225
226module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
227 bool, 0644);
228MODULE_PARM_DESC(aml_debug_output,
229 "To enable/disable the ACPI Debug Object output.");
230
223/* /sys/module/acpi/parameters/acpica_version */ 231/* /sys/module/acpi/parameters/acpica_version */
224static int param_get_acpica_version(char *buffer, struct kernel_param *kp) 232static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
225{ 233{
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index db8f88586c8d..98de8f418676 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1038,6 +1038,7 @@ static void floppy_disable_hlt(void)
1038{ 1038{
1039 unsigned long flags; 1039 unsigned long flags;
1040 1040
1041 WARN_ONCE(1, "floppy_disable_hlt() scheduled for removal in 2012");
1041 spin_lock_irqsave(&floppy_hlt_lock, flags); 1042 spin_lock_irqsave(&floppy_hlt_lock, flags);
1042 if (!hlt_disabled) { 1043 if (!hlt_disabled) {
1043 hlt_disabled = 1; 1044 hlt_disabled = 1;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index f508690eb958..c47f3d09c1ee 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -237,6 +237,7 @@ static int menu_select(struct cpuidle_device *dev)
237 unsigned int power_usage = -1; 237 unsigned int power_usage = -1;
238 int i; 238 int i;
239 int multiplier; 239 int multiplier;
240 struct timespec t;
240 241
241 if (data->needs_update) { 242 if (data->needs_update) {
242 menu_update(dev); 243 menu_update(dev);
@@ -251,8 +252,9 @@ static int menu_select(struct cpuidle_device *dev)
251 return 0; 252 return 0;
252 253
253 /* determine the expected residency time, round up */ 254 /* determine the expected residency time, round up */
255 t = ktime_to_timespec(tick_nohz_get_sleep_length());
254 data->expected_us = 256 data->expected_us =
255 DIV_ROUND_UP((u32)ktime_to_ns(tick_nohz_get_sleep_length()), 1000); 257 t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
256 258
257 259
258 data->bucket = which_bucket(data->expected_us); 260 data->bucket = which_bucket(data->expected_us);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 7c3b18e78cee..d36f41ea8cbf 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -195,6 +195,8 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
195 return PCI_D2; 195 return PCI_D2;
196 case ACPI_STATE_D3: 196 case ACPI_STATE_D3:
197 return PCI_D3hot; 197 return PCI_D3hot;
198 case ACPI_STATE_D3_COLD:
199 return PCI_D3cold;
198 } 200 }
199 return PCI_POWER_ERROR; 201 return PCI_POWER_ERROR;
200} 202}
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index c16a27641ced..9b3afb6f0b13 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -200,8 +200,8 @@ static bool extra_features;
200 * watching the output of address 0x4F (do an ec_transaction writing 0x33 200 * watching the output of address 0x4F (do an ec_transaction writing 0x33
201 * into 0x4F and read a few bytes from the output, like so: 201 * into 0x4F and read a few bytes from the output, like so:
202 * u8 writeData = 0x33; 202 * u8 writeData = 0x33;
203 * ec_transaction(0x4F, &writeData, 1, buffer, 32, 0); 203 * ec_transaction(0x4F, &writeData, 1, buffer, 32);
204 * That address is labelled "fan1 table information" in the service manual. 204 * That address is labeled "fan1 table information" in the service manual.
205 * It should be clear which value in 'buffer' changes). This seems to be 205 * It should be clear which value in 'buffer' changes). This seems to be
206 * related to fan speed. It isn't a proper 'realtime' fan speed value 206 * related to fan speed. It isn't a proper 'realtime' fan speed value
207 * though, because physically stopping or speeding up the fan doesn't 207 * though, because physically stopping or speeding up the fan doesn't
@@ -286,7 +286,7 @@ static int get_backlight_level(void)
286static void set_backlight_state(bool on) 286static void set_backlight_state(bool on)
287{ 287{
288 u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA; 288 u8 data = on ? BACKLIGHT_STATE_ON_DATA : BACKLIGHT_STATE_OFF_DATA;
289 ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0, 0); 289 ec_transaction(BACKLIGHT_STATE_ADDR, &data, 1, NULL, 0);
290} 290}
291 291
292 292
@@ -294,24 +294,24 @@ static void set_backlight_state(bool on)
294static void pwm_enable_control(void) 294static void pwm_enable_control(void)
295{ 295{
296 unsigned char writeData = PWM_ENABLE_DATA; 296 unsigned char writeData = PWM_ENABLE_DATA;
297 ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0, 0); 297 ec_transaction(PWM_ENABLE_ADDR, &writeData, 1, NULL, 0);
298} 298}
299 299
300static void pwm_disable_control(void) 300static void pwm_disable_control(void)
301{ 301{
302 unsigned char writeData = PWM_DISABLE_DATA; 302 unsigned char writeData = PWM_DISABLE_DATA;
303 ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0, 0); 303 ec_transaction(PWM_DISABLE_ADDR, &writeData, 1, NULL, 0);
304} 304}
305 305
306static void set_pwm(int pwm) 306static void set_pwm(int pwm)
307{ 307{
308 ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0, 0); 308 ec_transaction(PWM_ADDRESS, &pwm_lookup_table[pwm], 1, NULL, 0);
309} 309}
310 310
311static int get_fan_rpm(void) 311static int get_fan_rpm(void)
312{ 312{
313 u8 value, data = FAN_DATA; 313 u8 value, data = FAN_DATA;
314 ec_transaction(FAN_ADDRESS, &data, 1, &value, 1, 0); 314 ec_transaction(FAN_ADDRESS, &data, 1, &value, 1);
315 return 100 * (int)value; 315 return 100 * (int)value;
316} 316}
317 317
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 23fb2afda00b..821ad7bc1e26 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -135,7 +135,7 @@ static int set_lcd_level(int level)
135 buf[1] = (u8) (level*31); 135 buf[1] = (u8) (level*31);
136 136
137 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), 137 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf),
138 NULL, 0, 1); 138 NULL, 0);
139} 139}
140 140
141static int get_lcd_level(void) 141static int get_lcd_level(void)
@@ -144,7 +144,7 @@ static int get_lcd_level(void)
144 int result; 144 int result;
145 145
146 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, 146 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
147 &rdata, 1, 1); 147 &rdata, 1);
148 if (result < 0) 148 if (result < 0)
149 return result; 149 return result;
150 150
@@ -157,7 +157,7 @@ static int get_auto_brightness(void)
157 int result; 157 int result;
158 158
159 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, 159 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1,
160 &rdata, 1, 1); 160 &rdata, 1);
161 if (result < 0) 161 if (result < 0)
162 return result; 162 return result;
163 163
@@ -172,7 +172,7 @@ static int set_auto_brightness(int enable)
172 wdata[0] = 4; 172 wdata[0] = 4;
173 173
174 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, 174 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1,
175 &rdata, 1, 1); 175 &rdata, 1);
176 if (result < 0) 176 if (result < 0)
177 return result; 177 return result;
178 178
@@ -180,7 +180,7 @@ static int set_auto_brightness(int enable)
180 wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0); 180 wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
181 181
182 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, 182 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2,
183 NULL, 0, 1); 183 NULL, 0);
184} 184}
185 185
186static ssize_t set_device_state(const char *buf, size_t count, u8 mask) 186static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
@@ -217,7 +217,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
217 u8 wdata = 0, rdata; 217 u8 wdata = 0, rdata;
218 int result; 218 int result;
219 219
220 result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1); 220 result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
221 if (result < 0) 221 if (result < 0)
222 return -1; 222 return -1;
223 223
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index fc6f2a5bde01..0b1c82ad6805 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -499,7 +499,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
499 dev_set_drvdata(hwmon->device, hwmon); 499 dev_set_drvdata(hwmon->device, hwmon);
500 result = device_create_file(hwmon->device, &dev_attr_name); 500 result = device_create_file(hwmon->device, &dev_attr_name);
501 if (result) 501 if (result)
502 goto unregister_hwmon_device; 502 goto free_mem;
503 503
504 register_sys_interface: 504 register_sys_interface:
505 tz->hwmon = hwmon; 505 tz->hwmon = hwmon;
@@ -513,7 +513,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
513 sysfs_attr_init(&tz->temp_input.attr.attr); 513 sysfs_attr_init(&tz->temp_input.attr.attr);
514 result = device_create_file(hwmon->device, &tz->temp_input.attr); 514 result = device_create_file(hwmon->device, &tz->temp_input.attr);
515 if (result) 515 if (result)
516 goto unregister_hwmon_device; 516 goto unregister_name;
517 517
518 if (tz->ops->get_crit_temp) { 518 if (tz->ops->get_crit_temp) {
519 unsigned long temperature; 519 unsigned long temperature;
@@ -527,7 +527,7 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
527 result = device_create_file(hwmon->device, 527 result = device_create_file(hwmon->device,
528 &tz->temp_crit.attr); 528 &tz->temp_crit.attr);
529 if (result) 529 if (result)
530 goto unregister_hwmon_device; 530 goto unregister_input;
531 } 531 }
532 } 532 }
533 533
@@ -539,9 +539,9 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
539 539
540 return 0; 540 return 0;
541 541
542 unregister_hwmon_device: 542 unregister_input:
543 device_remove_file(hwmon->device, &tz->temp_crit.attr);
544 device_remove_file(hwmon->device, &tz->temp_input.attr); 543 device_remove_file(hwmon->device, &tz->temp_input.attr);
544 unregister_name:
545 if (new_hwmon_device) { 545 if (new_hwmon_device) {
546 device_remove_file(hwmon->device, &dev_attr_name); 546 device_remove_file(hwmon->device, &dev_attr_name);
547 hwmon_device_unregister(hwmon->device); 547 hwmon_device_unregister(hwmon->device);
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 8f1700623b41..21de1d6d5849 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -74,8 +74,9 @@ shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
74 * Run idmap cache shrinker. 74 * Run idmap cache shrinker.
75 */ 75 */
76static int 76static int
77cifs_idmap_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 77cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
78{ 78{
79 int nr_to_scan = sc->nr_to_scan;
79 int nr_del = 0; 80 int nr_del = 0;
80 int nr_rem = 0; 81 int nr_rem = 0;
81 struct rb_root *root; 82 struct rb_root *root;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index bbbc6bf5cb2e..dd25c2aec375 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -21,25 +21,13 @@
21#include "delegation.h" 21#include "delegation.h"
22#include "internal.h" 22#include "internal.h"
23 23
24static void nfs_do_free_delegation(struct nfs_delegation *delegation)
25{
26 kfree(delegation);
27}
28
29static void nfs_free_delegation_callback(struct rcu_head *head)
30{
31 struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
32
33 nfs_do_free_delegation(delegation);
34}
35
36static void nfs_free_delegation(struct nfs_delegation *delegation) 24static void nfs_free_delegation(struct nfs_delegation *delegation)
37{ 25{
38 if (delegation->cred) { 26 if (delegation->cred) {
39 put_rpccred(delegation->cred); 27 put_rpccred(delegation->cred);
40 delegation->cred = NULL; 28 delegation->cred = NULL;
41 } 29 }
42 call_rcu(&delegation->rcu, nfs_free_delegation_callback); 30 kfree_rcu(delegation, rcu);
43} 31}
44 32
45/** 33/**
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 57bb31ad7a5e..873c6fa8bc3b 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1298,8 +1298,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1298 i_size_write(inode, new_isize); 1298 i_size_write(inode, new_isize);
1299 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1299 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1300 } 1300 }
1301 dprintk("NFS: isize change on server for file %s/%ld\n", 1301 dprintk("NFS: isize change on server for file %s/%ld "
1302 inode->i_sb->s_id, inode->i_ino); 1302 "(%Ld to %Ld)\n",
1303 inode->i_sb->s_id,
1304 inode->i_ino,
1305 (long long)cur_isize,
1306 (long long)new_isize);
1303 } 1307 }
1304 } else 1308 } else
1305 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR 1309 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index cf1b339c3937..d0e15dba7a5a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -267,9 +267,11 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
267 break; 267 break;
268 nfs4_schedule_stateid_recovery(server, state); 268 nfs4_schedule_stateid_recovery(server, state);
269 goto wait_on_recovery; 269 goto wait_on_recovery;
270 case -NFS4ERR_EXPIRED:
271 if (state != NULL)
272 nfs4_schedule_stateid_recovery(server, state);
270 case -NFS4ERR_STALE_STATEID: 273 case -NFS4ERR_STALE_STATEID:
271 case -NFS4ERR_STALE_CLIENTID: 274 case -NFS4ERR_STALE_CLIENTID:
272 case -NFS4ERR_EXPIRED:
273 nfs4_schedule_lease_recovery(clp); 275 nfs4_schedule_lease_recovery(clp);
274 goto wait_on_recovery; 276 goto wait_on_recovery;
275#if defined(CONFIG_NFS_V4_1) 277#if defined(CONFIG_NFS_V4_1)
@@ -3670,9 +3672,11 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3670 break; 3672 break;
3671 nfs4_schedule_stateid_recovery(server, state); 3673 nfs4_schedule_stateid_recovery(server, state);
3672 goto wait_on_recovery; 3674 goto wait_on_recovery;
3675 case -NFS4ERR_EXPIRED:
3676 if (state != NULL)
3677 nfs4_schedule_stateid_recovery(server, state);
3673 case -NFS4ERR_STALE_STATEID: 3678 case -NFS4ERR_STALE_STATEID:
3674 case -NFS4ERR_STALE_CLIENTID: 3679 case -NFS4ERR_STALE_CLIENTID:
3675 case -NFS4ERR_EXPIRED:
3676 nfs4_schedule_lease_recovery(clp); 3680 nfs4_schedule_lease_recovery(clp);
3677 goto wait_on_recovery; 3681 goto wait_on_recovery;
3678#if defined(CONFIG_NFS_V4_1) 3682#if defined(CONFIG_NFS_V4_1)
@@ -4543,6 +4547,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4543 case -ESTALE: 4547 case -ESTALE:
4544 goto out; 4548 goto out;
4545 case -NFS4ERR_EXPIRED: 4549 case -NFS4ERR_EXPIRED:
4550 nfs4_schedule_stateid_recovery(server, state);
4546 case -NFS4ERR_STALE_CLIENTID: 4551 case -NFS4ERR_STALE_CLIENTID:
4547 case -NFS4ERR_STALE_STATEID: 4552 case -NFS4ERR_STALE_STATEID:
4548 nfs4_schedule_lease_recovery(server->nfs_client); 4553 nfs4_schedule_lease_recovery(server->nfs_client);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 036f5adc9e1f..e97dd219f84f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1466,7 +1466,10 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
1466#ifdef CONFIG_NFS_V4_1 1466#ifdef CONFIG_NFS_V4_1
1467void nfs4_schedule_session_recovery(struct nfs4_session *session) 1467void nfs4_schedule_session_recovery(struct nfs4_session *session)
1468{ 1468{
1469 nfs4_schedule_lease_recovery(session->clp); 1469 struct nfs_client *clp = session->clp;
1470
1471 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1472 nfs4_schedule_lease_recovery(clp);
1470} 1473}
1471EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery); 1474EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
1472 1475
@@ -1549,6 +1552,7 @@ static int nfs4_reset_session(struct nfs_client *clp)
1549 status = nfs4_recovery_handle_error(clp, status); 1552 status = nfs4_recovery_handle_error(clp, status);
1550 goto out; 1553 goto out;
1551 } 1554 }
1555 clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1552 /* create_session negotiated new slot table */ 1556 /* create_session negotiated new slot table */
1553 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1557 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1554 1558
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index c541093a5bf2..c4744e1d513c 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -87,7 +87,7 @@
87#define NFS_ROOT "/tftpboot/%s" 87#define NFS_ROOT "/tftpboot/%s"
88 88
89/* Default NFSROOT mount options. */ 89/* Default NFSROOT mount options. */
90#define NFS_DEF_OPTIONS "udp" 90#define NFS_DEF_OPTIONS "vers=2,udp,rsize=4096,wsize=4096"
91 91
92/* Parameters passed from the kernel command line */ 92/* Parameters passed from the kernel command line */
93static char nfs_root_parms[256] __initdata = ""; 93static char nfs_root_parms[256] __initdata = "";
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index f57f5281a520..101c85a3644e 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1009,7 +1009,7 @@ void
1009pnfs_set_layoutcommit(struct nfs_write_data *wdata) 1009pnfs_set_layoutcommit(struct nfs_write_data *wdata)
1010{ 1010{
1011 struct nfs_inode *nfsi = NFS_I(wdata->inode); 1011 struct nfs_inode *nfsi = NFS_I(wdata->inode);
1012 loff_t end_pos = wdata->args.offset + wdata->res.count; 1012 loff_t end_pos = wdata->mds_offset + wdata->res.count;
1013 bool mark_as_dirty = false; 1013 bool mark_as_dirty = false;
1014 1014
1015 spin_lock(&nfsi->vfs_inode.i_lock); 1015 spin_lock(&nfsi->vfs_inode.i_lock);
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
index 730c56248c9b..5e1101ff276f 100644
--- a/fs/squashfs/export.c
+++ b/fs/squashfs/export.c
@@ -147,7 +147,7 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
147 * table[0] points to the first inode lookup table metadata block, 147 * table[0] points to the first inode lookup table metadata block,
148 * this should be less than lookup_table_start 148 * this should be less than lookup_table_start
149 */ 149 */
150 if (!IS_ERR(table) && table[0] >= lookup_table_start) { 150 if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
151 kfree(table); 151 kfree(table);
152 return ERR_PTR(-EINVAL); 152 return ERR_PTR(-EINVAL);
153 } 153 }
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 1516a6490bfb..0ed6edbc5c71 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -90,7 +90,7 @@ __le64 *squashfs_read_fragment_index_table(struct super_block *sb,
90 * table[0] points to the first fragment table metadata block, this 90 * table[0] points to the first fragment table metadata block, this
91 * should be less than fragment_table_start 91 * should be less than fragment_table_start
92 */ 92 */
93 if (!IS_ERR(table) && table[0] >= fragment_table_start) { 93 if (!IS_ERR(table) && le64_to_cpu(table[0]) >= fragment_table_start) {
94 kfree(table); 94 kfree(table);
95 return ERR_PTR(-EINVAL); 95 return ERR_PTR(-EINVAL);
96 } 96 }
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
index a70858e0fb44..d38ea3dab951 100644
--- a/fs/squashfs/id.c
+++ b/fs/squashfs/id.c
@@ -93,7 +93,7 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
93 * table[0] points to the first id lookup table metadata block, this 93 * table[0] points to the first id lookup table metadata block, this
94 * should be less than id_table_start 94 * should be less than id_table_start
95 */ 95 */
96 if (!IS_ERR(table) && table[0] >= id_table_start) { 96 if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) {
97 kfree(table); 97 kfree(table);
98 return ERR_PTR(-EINVAL); 98 return ERR_PTR(-EINVAL);
99 } 99 }
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 6f26abee3597..7438850c62d0 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -245,7 +245,7 @@ allocate_id_index_table:
245 msblk->id_table = NULL; 245 msblk->id_table = NULL;
246 goto failed_mount; 246 goto failed_mount;
247 } 247 }
248 next_table = msblk->id_table[0]; 248 next_table = le64_to_cpu(msblk->id_table[0]);
249 249
250 /* Handle inode lookup table */ 250 /* Handle inode lookup table */
251 lookup_table_start = le64_to_cpu(sblk->lookup_table_start); 251 lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
@@ -261,7 +261,7 @@ allocate_id_index_table:
261 msblk->inode_lookup_table = NULL; 261 msblk->inode_lookup_table = NULL;
262 goto failed_mount; 262 goto failed_mount;
263 } 263 }
264 next_table = msblk->inode_lookup_table[0]; 264 next_table = le64_to_cpu(msblk->inode_lookup_table[0]);
265 265
266 sb->s_export_op = &squashfs_export_ops; 266 sb->s_export_op = &squashfs_export_ops;
267 267
@@ -286,7 +286,7 @@ handle_fragments:
286 msblk->fragment_index = NULL; 286 msblk->fragment_index = NULL;
287 goto failed_mount; 287 goto failed_mount;
288 } 288 }
289 next_table = msblk->fragment_index[0]; 289 next_table = le64_to_cpu(msblk->fragment_index[0]);
290 290
291check_directory_table: 291check_directory_table:
292 /* Sanity check directory_table */ 292 /* Sanity check directory_table */
diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c
index 46961c003236..ca953a945029 100644
--- a/fs/ubifs/shrinker.c
+++ b/fs/ubifs/shrinker.c
@@ -277,8 +277,9 @@ static int kick_a_thread(void)
277 return 0; 277 return 0;
278} 278}
279 279
280int ubifs_shrinker(struct shrinker *shrink, int nr, gfp_t gfp_mask) 280int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc)
281{ 281{
282 int nr = sc->nr_to_scan;
282 int freed, contention = 0; 283 int freed, contention = 0;
283 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); 284 long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
284 285
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 93d1412a06f0..a70d7b4ffb25 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1614,7 +1614,7 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot);
1614int ubifs_tnc_end_commit(struct ubifs_info *c); 1614int ubifs_tnc_end_commit(struct ubifs_info *c);
1615 1615
1616/* shrinker.c */ 1616/* shrinker.c */
1617int ubifs_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask); 1617int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc);
1618 1618
1619/* commit.c */ 1619/* commit.c */
1620int ubifs_bg_thread(void *info); 1620int ubifs_bg_thread(void *info);
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index a3252a5ead66..a756bc8d866d 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -98,6 +98,9 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
98/* 98/*
99 * Spinlock primitives 99 * Spinlock primitives
100 */ 100 */
101acpi_status
102acpi_os_create_lock(acpi_spinlock *out_handle);
103
101void acpi_os_delete_lock(acpi_spinlock handle); 104void acpi_os_delete_lock(acpi_spinlock handle);
102 105
103acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle); 106acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index f6ad63d25b73..2ed0a8486c19 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
47 47
48/* Current ACPICA subsystem version in YYYYMMDD format */ 48/* Current ACPICA subsystem version in YYYYMMDD format */
49 49
50#define ACPI_CA_VERSION 0x20110316 50#define ACPI_CA_VERSION 0x20110413
51 51
52#include "actypes.h" 52#include "actypes.h"
53#include "actbl.h" 53#include "actbl.h"
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 64f838beaabf..b67231bef632 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -501,8 +501,9 @@ typedef u64 acpi_integer;
501#define ACPI_STATE_D1 (u8) 1 501#define ACPI_STATE_D1 (u8) 1
502#define ACPI_STATE_D2 (u8) 2 502#define ACPI_STATE_D2 (u8) 2
503#define ACPI_STATE_D3 (u8) 3 503#define ACPI_STATE_D3 (u8) 3
504#define ACPI_D_STATES_MAX ACPI_STATE_D3 504#define ACPI_STATE_D3_COLD (u8) 4
505#define ACPI_D_STATE_COUNT 4 505#define ACPI_D_STATES_MAX ACPI_STATE_D3_COLD
506#define ACPI_D_STATE_COUNT 5
506 507
507#define ACPI_STATE_C0 (u8) 0 508#define ACPI_STATE_C0 (u8) 0
508#define ACPI_STATE_C1 (u8) 1 509#define ACPI_STATE_C1 (u8) 1
@@ -712,8 +713,24 @@ typedef u8 acpi_adr_space_type;
712#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5 713#define ACPI_ADR_SPACE_CMOS (acpi_adr_space_type) 5
713#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6 714#define ACPI_ADR_SPACE_PCI_BAR_TARGET (acpi_adr_space_type) 6
714#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7 715#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7
715#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 8 716
716#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 127 717#define ACPI_NUM_PREDEFINED_REGIONS 8
718
719/*
720 * Special Address Spaces
721 *
722 * Note: A Data Table region is a special type of operation region
723 * that has its own AML opcode. However, internally, the AML
724 * interpreter simply creates an operation region with an an address
725 * space type of ACPI_ADR_SPACE_DATA_TABLE.
726 */
727#define ACPI_ADR_SPACE_DATA_TABLE (acpi_adr_space_type) 0x7E /* Internal to ACPICA only */
728#define ACPI_ADR_SPACE_FIXED_HARDWARE (acpi_adr_space_type) 0x7F
729
730/* Values for _REG connection code */
731
732#define ACPI_REG_DISCONNECT 0
733#define ACPI_REG_CONNECT 1
717 734
718/* 735/*
719 * bit_register IDs 736 * bit_register IDs
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 55192ac0cede..ba4928cae473 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -310,14 +310,7 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
310 310
311/* in processor_core.c */ 311/* in processor_core.c */
312void acpi_processor_set_pdc(acpi_handle handle); 312void acpi_processor_set_pdc(acpi_handle handle);
313#ifdef CONFIG_SMP
314int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); 313int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
315#else
316static inline int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
317{
318 return -1;
319}
320#endif
321 314
322/* in processor_throttling.c */ 315/* in processor_throttling.c */
323int acpi_processor_tstate_has_changed(struct acpi_processor *pr); 316int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a2e910e01293..1deb2a73c2da 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -150,8 +150,7 @@ extern int ec_read(u8 addr, u8 *val);
150extern int ec_write(u8 addr, u8 val); 150extern int ec_write(u8 addr, u8 val);
151extern int ec_transaction(u8 command, 151extern int ec_transaction(u8 command,
152 const u8 *wdata, unsigned wdata_len, 152 const u8 *wdata, unsigned wdata_len,
153 u8 *rdata, unsigned rdata_len, 153 u8 *rdata, unsigned rdata_len);
154 int force_poll);
155 154
156#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) 155#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
157 156
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
index 77cbddb3784c..a7d87f911cab 100644
--- a/include/linux/pm_qos_params.h
+++ b/include/linux/pm_qos_params.h
@@ -16,6 +16,10 @@
16#define PM_QOS_NUM_CLASSES 4 16#define PM_QOS_NUM_CLASSES 4
17#define PM_QOS_DEFAULT_VALUE -1 17#define PM_QOS_DEFAULT_VALUE -1
18 18
19#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
20#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
21#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
22
19struct pm_qos_request_list { 23struct pm_qos_request_list {
20 struct plist_node list; 24 struct plist_node list;
21 int pm_qos_class; 25 int pm_qos_class;
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 77e624883393..c68a147939a6 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -145,6 +145,7 @@ typedef __be32 rpc_fraghdr;
145#define RPCBIND_NETID_TCP "tcp" 145#define RPCBIND_NETID_TCP "tcp"
146#define RPCBIND_NETID_UDP6 "udp6" 146#define RPCBIND_NETID_UDP6 "udp6"
147#define RPCBIND_NETID_TCP6 "tcp6" 147#define RPCBIND_NETID_TCP6 "tcp6"
148#define RPCBIND_NETID_LOCAL "local"
148 149
149/* 150/*
150 * Note that RFC 1833 does not put any size restrictions on the 151 * Note that RFC 1833 does not put any size restrictions on the
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index a0f998c07c65..81cce3b3ee66 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -141,7 +141,8 @@ enum xprt_transports {
141 XPRT_TRANSPORT_UDP = IPPROTO_UDP, 141 XPRT_TRANSPORT_UDP = IPPROTO_UDP,
142 XPRT_TRANSPORT_TCP = IPPROTO_TCP, 142 XPRT_TRANSPORT_TCP = IPPROTO_TCP,
143 XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, 143 XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC,
144 XPRT_TRANSPORT_RDMA = 256 144 XPRT_TRANSPORT_RDMA = 256,
145 XPRT_TRANSPORT_LOCAL = 257,
145}; 146};
146 147
147struct rpc_xprt { 148struct rpc_xprt {
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index fd8d1e035df9..6824ca7d4d0c 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -54,11 +54,17 @@ enum pm_qos_type {
54 PM_QOS_MIN /* return the smallest value */ 54 PM_QOS_MIN /* return the smallest value */
55}; 55};
56 56
57/*
58 * Note: The lockless read path depends on the CPU accessing
59 * target_value atomically. Atomic access is only guaranteed on all CPU
60 * types linux supports for 32 bit quantites
61 */
57struct pm_qos_object { 62struct pm_qos_object {
58 struct plist_head requests; 63 struct plist_head requests;
59 struct blocking_notifier_head *notifiers; 64 struct blocking_notifier_head *notifiers;
60 struct miscdevice pm_qos_power_miscdev; 65 struct miscdevice pm_qos_power_miscdev;
61 char *name; 66 char *name;
67 s32 target_value; /* Do not change to 64 bit */
62 s32 default_value; 68 s32 default_value;
63 enum pm_qos_type type; 69 enum pm_qos_type type;
64}; 70};
@@ -71,7 +77,8 @@ static struct pm_qos_object cpu_dma_pm_qos = {
71 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), 77 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
72 .notifiers = &cpu_dma_lat_notifier, 78 .notifiers = &cpu_dma_lat_notifier,
73 .name = "cpu_dma_latency", 79 .name = "cpu_dma_latency",
74 .default_value = 2000 * USEC_PER_SEC, 80 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
81 .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
75 .type = PM_QOS_MIN, 82 .type = PM_QOS_MIN,
76}; 83};
77 84
@@ -80,7 +87,8 @@ static struct pm_qos_object network_lat_pm_qos = {
80 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), 87 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
81 .notifiers = &network_lat_notifier, 88 .notifiers = &network_lat_notifier,
82 .name = "network_latency", 89 .name = "network_latency",
83 .default_value = 2000 * USEC_PER_SEC, 90 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
91 .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
84 .type = PM_QOS_MIN 92 .type = PM_QOS_MIN
85}; 93};
86 94
@@ -90,7 +98,8 @@ static struct pm_qos_object network_throughput_pm_qos = {
90 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), 98 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
91 .notifiers = &network_throughput_notifier, 99 .notifiers = &network_throughput_notifier,
92 .name = "network_throughput", 100 .name = "network_throughput",
93 .default_value = 0, 101 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
102 .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
94 .type = PM_QOS_MAX, 103 .type = PM_QOS_MAX,
95}; 104};
96 105
@@ -136,6 +145,16 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
136 } 145 }
137} 146}
138 147
148static inline s32 pm_qos_read_value(struct pm_qos_object *o)
149{
150 return o->target_value;
151}
152
153static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
154{
155 o->target_value = value;
156}
157
139static void update_target(struct pm_qos_object *o, struct plist_node *node, 158static void update_target(struct pm_qos_object *o, struct plist_node *node,
140 int del, int value) 159 int del, int value)
141{ 160{
@@ -160,6 +179,7 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
160 plist_add(node, &o->requests); 179 plist_add(node, &o->requests);
161 } 180 }
162 curr_value = pm_qos_get_value(o); 181 curr_value = pm_qos_get_value(o);
182 pm_qos_set_value(o, curr_value);
163 spin_unlock_irqrestore(&pm_qos_lock, flags); 183 spin_unlock_irqrestore(&pm_qos_lock, flags);
164 184
165 if (prev_value != curr_value) 185 if (prev_value != curr_value)
@@ -194,18 +214,11 @@ static int find_pm_qos_object_by_minor(int minor)
194 * pm_qos_request - returns current system wide qos expectation 214 * pm_qos_request - returns current system wide qos expectation
195 * @pm_qos_class: identification of which qos value is requested 215 * @pm_qos_class: identification of which qos value is requested
196 * 216 *
197 * This function returns the current target value in an atomic manner. 217 * This function returns the current target value.
198 */ 218 */
199int pm_qos_request(int pm_qos_class) 219int pm_qos_request(int pm_qos_class)
200{ 220{
201 unsigned long flags; 221 return pm_qos_read_value(pm_qos_array[pm_qos_class]);
202 int value;
203
204 spin_lock_irqsave(&pm_qos_lock, flags);
205 value = pm_qos_get_value(pm_qos_array[pm_qos_class]);
206 spin_unlock_irqrestore(&pm_qos_lock, flags);
207
208 return value;
209} 222}
210EXPORT_SYMBOL_GPL(pm_qos_request); 223EXPORT_SYMBOL_GPL(pm_qos_request);
211 224
diff --git a/mm/rmap.c b/mm/rmap.c
index 6bada99cd61c..0eb463ea88dd 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -352,6 +352,11 @@ void __init anon_vma_init(void)
352 * The page might have been remapped to a different anon_vma or the anon_vma 352 * The page might have been remapped to a different anon_vma or the anon_vma
353 * returned may already be freed (and even reused). 353 * returned may already be freed (and even reused).
354 * 354 *
355 * In case it was remapped to a different anon_vma, the new anon_vma will be a
356 * child of the old anon_vma, and the anon_vma lifetime rules will therefore
357 * ensure that any anon_vma obtained from the page will still be valid for as
358 * long as we observe page_mapped() [ hence all those page_mapped() tests ].
359 *
355 * All users of this function must be very careful when walking the anon_vma 360 * All users of this function must be very careful when walking the anon_vma
356 * chain and verify that the page in question is indeed mapped in it 361 * chain and verify that the page in question is indeed mapped in it
357 * [ something equivalent to page_mapped_in_vma() ]. 362 * [ something equivalent to page_mapped_in_vma() ].
@@ -421,7 +426,7 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
421 /* 426 /*
422 * If the page is still mapped, then this anon_vma is still 427 * If the page is still mapped, then this anon_vma is still
423 * its anon_vma, and holding the mutex ensures that it will 428 * its anon_vma, and holding the mutex ensures that it will
424 * not go away, see __put_anon_vma(). 429 * not go away, see anon_vma_free().
425 */ 430 */
426 if (!page_mapped(page)) { 431 if (!page_mapped(page)) {
427 mutex_unlock(&root_anon_vma->mutex); 432 mutex_unlock(&root_anon_vma->mutex);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 8d83f9d48713..b84d7395535e 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -13,10 +13,6 @@
13 * and need to be refreshed, or when a packet was damaged in transit. 13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer. 14 * This may be have to be moved to the VFS layer.
15 * 15 *
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
19 *
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 16 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 17 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
22 */ 18 */
@@ -32,7 +28,9 @@
32#include <linux/slab.h> 28#include <linux/slab.h>
33#include <linux/utsname.h> 29#include <linux/utsname.h>
34#include <linux/workqueue.h> 30#include <linux/workqueue.h>
31#include <linux/in.h>
35#include <linux/in6.h> 32#include <linux/in6.h>
33#include <linux/un.h>
36 34
37#include <linux/sunrpc/clnt.h> 35#include <linux/sunrpc/clnt.h>
38#include <linux/sunrpc/rpc_pipe_fs.h> 36#include <linux/sunrpc/rpc_pipe_fs.h>
@@ -298,22 +296,27 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
298 * up a string representation of the passed-in address. 296 * up a string representation of the passed-in address.
299 */ 297 */
300 if (args->servername == NULL) { 298 if (args->servername == NULL) {
299 struct sockaddr_un *sun =
300 (struct sockaddr_un *)args->address;
301 struct sockaddr_in *sin =
302 (struct sockaddr_in *)args->address;
303 struct sockaddr_in6 *sin6 =
304 (struct sockaddr_in6 *)args->address;
305
301 servername[0] = '\0'; 306 servername[0] = '\0';
302 switch (args->address->sa_family) { 307 switch (args->address->sa_family) {
303 case AF_INET: { 308 case AF_LOCAL:
304 struct sockaddr_in *sin = 309 snprintf(servername, sizeof(servername), "%s",
305 (struct sockaddr_in *)args->address; 310 sun->sun_path);
311 break;
312 case AF_INET:
306 snprintf(servername, sizeof(servername), "%pI4", 313 snprintf(servername, sizeof(servername), "%pI4",
307 &sin->sin_addr.s_addr); 314 &sin->sin_addr.s_addr);
308 break; 315 break;
309 } 316 case AF_INET6:
310 case AF_INET6: {
311 struct sockaddr_in6 *sin =
312 (struct sockaddr_in6 *)args->address;
313 snprintf(servername, sizeof(servername), "%pI6", 317 snprintf(servername, sizeof(servername), "%pI6",
314 &sin->sin6_addr); 318 &sin6->sin6_addr);
315 break; 319 break;
316 }
317 default: 320 default:
318 /* caller wants default server name, but 321 /* caller wants default server name, but
319 * address family isn't recognized. */ 322 * address family isn't recognized. */
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index c652e4cc9fe9..9a80a922c527 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/socket.h> 18#include <linux/socket.h>
19#include <linux/un.h>
19#include <linux/in.h> 20#include <linux/in.h>
20#include <linux/in6.h> 21#include <linux/in6.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
@@ -32,6 +33,8 @@
32# define RPCDBG_FACILITY RPCDBG_BIND 33# define RPCDBG_FACILITY RPCDBG_BIND
33#endif 34#endif
34 35
36#define RPCBIND_SOCK_PATHNAME "/var/run/rpcbind.sock"
37
35#define RPCBIND_PROGRAM (100000u) 38#define RPCBIND_PROGRAM (100000u)
36#define RPCBIND_PORT (111u) 39#define RPCBIND_PORT (111u)
37 40
@@ -158,20 +161,69 @@ static void rpcb_map_release(void *data)
158 kfree(map); 161 kfree(map);
159} 162}
160 163
161static const struct sockaddr_in rpcb_inaddr_loopback = { 164/*
162 .sin_family = AF_INET, 165 * Returns zero on success, otherwise a negative errno value
163 .sin_addr.s_addr = htonl(INADDR_LOOPBACK), 166 * is returned.
164 .sin_port = htons(RPCBIND_PORT), 167 */
165}; 168static int rpcb_create_local_unix(void)
169{
170 static const struct sockaddr_un rpcb_localaddr_rpcbind = {
171 .sun_family = AF_LOCAL,
172 .sun_path = RPCBIND_SOCK_PATHNAME,
173 };
174 struct rpc_create_args args = {
175 .net = &init_net,
176 .protocol = XPRT_TRANSPORT_LOCAL,
177 .address = (struct sockaddr *)&rpcb_localaddr_rpcbind,
178 .addrsize = sizeof(rpcb_localaddr_rpcbind),
179 .servername = "localhost",
180 .program = &rpcb_program,
181 .version = RPCBVERS_2,
182 .authflavor = RPC_AUTH_NULL,
183 };
184 struct rpc_clnt *clnt, *clnt4;
185 int result = 0;
186
187 /*
188 * Because we requested an RPC PING at transport creation time,
189 * this works only if the user space portmapper is rpcbind, and
190 * it's listening on AF_LOCAL on the named socket.
191 */
192 clnt = rpc_create(&args);
193 if (IS_ERR(clnt)) {
194 dprintk("RPC: failed to create AF_LOCAL rpcbind "
195 "client (errno %ld).\n", PTR_ERR(clnt));
196 result = -PTR_ERR(clnt);
197 goto out;
198 }
199
200 clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
201 if (IS_ERR(clnt4)) {
202 dprintk("RPC: failed to bind second program to "
203 "rpcbind v4 client (errno %ld).\n",
204 PTR_ERR(clnt4));
205 clnt4 = NULL;
206 }
207
208 /* Protected by rpcb_create_local_mutex */
209 rpcb_local_clnt = clnt;
210 rpcb_local_clnt4 = clnt4;
166 211
167static DEFINE_MUTEX(rpcb_create_local_mutex); 212out:
213 return result;
214}
168 215
169/* 216/*
170 * Returns zero on success, otherwise a negative errno value 217 * Returns zero on success, otherwise a negative errno value
171 * is returned. 218 * is returned.
172 */ 219 */
173static int rpcb_create_local(void) 220static int rpcb_create_local_net(void)
174{ 221{
222 static const struct sockaddr_in rpcb_inaddr_loopback = {
223 .sin_family = AF_INET,
224 .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
225 .sin_port = htons(RPCBIND_PORT),
226 };
175 struct rpc_create_args args = { 227 struct rpc_create_args args = {
176 .net = &init_net, 228 .net = &init_net,
177 .protocol = XPRT_TRANSPORT_TCP, 229 .protocol = XPRT_TRANSPORT_TCP,
@@ -186,13 +238,6 @@ static int rpcb_create_local(void)
186 struct rpc_clnt *clnt, *clnt4; 238 struct rpc_clnt *clnt, *clnt4;
187 int result = 0; 239 int result = 0;
188 240
189 if (rpcb_local_clnt)
190 return result;
191
192 mutex_lock(&rpcb_create_local_mutex);
193 if (rpcb_local_clnt)
194 goto out;
195
196 clnt = rpc_create(&args); 241 clnt = rpc_create(&args);
197 if (IS_ERR(clnt)) { 242 if (IS_ERR(clnt)) {
198 dprintk("RPC: failed to create local rpcbind " 243 dprintk("RPC: failed to create local rpcbind "
@@ -214,10 +259,34 @@ static int rpcb_create_local(void)
214 clnt4 = NULL; 259 clnt4 = NULL;
215 } 260 }
216 261
262 /* Protected by rpcb_create_local_mutex */
217 rpcb_local_clnt = clnt; 263 rpcb_local_clnt = clnt;
218 rpcb_local_clnt4 = clnt4; 264 rpcb_local_clnt4 = clnt4;
219 265
220out: 266out:
267 return result;
268}
269
270/*
271 * Returns zero on success, otherwise a negative errno value
272 * is returned.
273 */
274static int rpcb_create_local(void)
275{
276 static DEFINE_MUTEX(rpcb_create_local_mutex);
277 int result = 0;
278
279 if (rpcb_local_clnt)
280 return result;
281
282 mutex_lock(&rpcb_create_local_mutex);
283 if (rpcb_local_clnt)
284 goto out;
285
286 if (rpcb_create_local_unix() != 0)
287 result = rpcb_create_local_net();
288
289out:
221 mutex_unlock(&rpcb_create_local_mutex); 290 mutex_unlock(&rpcb_create_local_mutex);
222 return result; 291 return result;
223} 292}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 08e05a8ce025..2b90292e9505 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -942,6 +942,8 @@ static void svc_unregister(const struct svc_serv *serv)
942 if (progp->pg_vers[i]->vs_hidden) 942 if (progp->pg_vers[i]->vs_hidden)
943 continue; 943 continue;
944 944
945 dprintk("svc: attempting to unregister %sv%u\n",
946 progp->pg_name, i);
945 __svc_unregister(progp->pg_prog, i, progp->pg_name); 947 __svc_unregister(progp->pg_prog, i, progp->pg_name);
946 } 948 }
947 } 949 }
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index bf005d3c65ef..72abb7358933 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/string.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/capability.h> 25#include <linux/capability.h>
@@ -28,6 +29,7 @@
28#include <linux/in.h> 29#include <linux/in.h>
29#include <linux/net.h> 30#include <linux/net.h>
30#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/un.h>
31#include <linux/udp.h> 33#include <linux/udp.h>
32#include <linux/tcp.h> 34#include <linux/tcp.h>
33#include <linux/sunrpc/clnt.h> 35#include <linux/sunrpc/clnt.h>
@@ -45,6 +47,9 @@
45#include <net/tcp.h> 47#include <net/tcp.h>
46 48
47#include "sunrpc.h" 49#include "sunrpc.h"
50
51static void xs_close(struct rpc_xprt *xprt);
52
48/* 53/*
49 * xprtsock tunables 54 * xprtsock tunables
50 */ 55 */
@@ -261,6 +266,11 @@ static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
261 return (struct sockaddr *) &xprt->addr; 266 return (struct sockaddr *) &xprt->addr;
262} 267}
263 268
269static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
270{
271 return (struct sockaddr_un *) &xprt->addr;
272}
273
264static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt) 274static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
265{ 275{
266 return (struct sockaddr_in *) &xprt->addr; 276 return (struct sockaddr_in *) &xprt->addr;
@@ -276,23 +286,34 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
276 struct sockaddr *sap = xs_addr(xprt); 286 struct sockaddr *sap = xs_addr(xprt);
277 struct sockaddr_in6 *sin6; 287 struct sockaddr_in6 *sin6;
278 struct sockaddr_in *sin; 288 struct sockaddr_in *sin;
289 struct sockaddr_un *sun;
279 char buf[128]; 290 char buf[128];
280 291
281 (void)rpc_ntop(sap, buf, sizeof(buf));
282 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
283
284 switch (sap->sa_family) { 292 switch (sap->sa_family) {
293 case AF_LOCAL:
294 sun = xs_addr_un(xprt);
295 strlcpy(buf, sun->sun_path, sizeof(buf));
296 xprt->address_strings[RPC_DISPLAY_ADDR] =
297 kstrdup(buf, GFP_KERNEL);
298 break;
285 case AF_INET: 299 case AF_INET:
300 (void)rpc_ntop(sap, buf, sizeof(buf));
301 xprt->address_strings[RPC_DISPLAY_ADDR] =
302 kstrdup(buf, GFP_KERNEL);
286 sin = xs_addr_in(xprt); 303 sin = xs_addr_in(xprt);
287 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 304 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
288 break; 305 break;
289 case AF_INET6: 306 case AF_INET6:
307 (void)rpc_ntop(sap, buf, sizeof(buf));
308 xprt->address_strings[RPC_DISPLAY_ADDR] =
309 kstrdup(buf, GFP_KERNEL);
290 sin6 = xs_addr_in6(xprt); 310 sin6 = xs_addr_in6(xprt);
291 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 311 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
292 break; 312 break;
293 default: 313 default:
294 BUG(); 314 BUG();
295 } 315 }
316
296 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 317 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
297} 318}
298 319
@@ -495,6 +516,70 @@ static int xs_nospace(struct rpc_task *task)
495 return ret; 516 return ret;
496} 517}
497 518
519/*
520 * Construct a stream transport record marker in @buf.
521 */
522static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
523{
524 u32 reclen = buf->len - sizeof(rpc_fraghdr);
525 rpc_fraghdr *base = buf->head[0].iov_base;
526 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
527}
528
529/**
530 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
531 * @task: RPC task that manages the state of an RPC request
532 *
533 * Return values:
534 * 0: The request has been sent
535 * EAGAIN: The socket was blocked, please call again later to
536 * complete the request
537 * ENOTCONN: Caller needs to invoke connect logic then call again
538 * other: Some other error occured, the request was not sent
539 */
540static int xs_local_send_request(struct rpc_task *task)
541{
542 struct rpc_rqst *req = task->tk_rqstp;
543 struct rpc_xprt *xprt = req->rq_xprt;
544 struct sock_xprt *transport =
545 container_of(xprt, struct sock_xprt, xprt);
546 struct xdr_buf *xdr = &req->rq_snd_buf;
547 int status;
548
549 xs_encode_stream_record_marker(&req->rq_snd_buf);
550
551 xs_pktdump("packet data:",
552 req->rq_svec->iov_base, req->rq_svec->iov_len);
553
554 status = xs_sendpages(transport->sock, NULL, 0,
555 xdr, req->rq_bytes_sent);
556 dprintk("RPC: %s(%u) = %d\n",
557 __func__, xdr->len - req->rq_bytes_sent, status);
558 if (likely(status >= 0)) {
559 req->rq_bytes_sent += status;
560 req->rq_xmit_bytes_sent += status;
561 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
562 req->rq_bytes_sent = 0;
563 return 0;
564 }
565 status = -EAGAIN;
566 }
567
568 switch (status) {
569 case -EAGAIN:
570 status = xs_nospace(task);
571 break;
572 default:
573 dprintk("RPC: sendmsg returned unrecognized error %d\n",
574 -status);
575 case -EPIPE:
576 xs_close(xprt);
577 status = -ENOTCONN;
578 }
579
580 return status;
581}
582
498/** 583/**
499 * xs_udp_send_request - write an RPC request to a UDP socket 584 * xs_udp_send_request - write an RPC request to a UDP socket
500 * @task: address of RPC task that manages the state of an RPC request 585 * @task: address of RPC task that manages the state of an RPC request
@@ -574,13 +659,6 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt)
574 kernel_sock_shutdown(sock, SHUT_WR); 659 kernel_sock_shutdown(sock, SHUT_WR);
575} 660}
576 661
577static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
578{
579 u32 reclen = buf->len - sizeof(rpc_fraghdr);
580 rpc_fraghdr *base = buf->head[0].iov_base;
581 *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
582}
583
584/** 662/**
585 * xs_tcp_send_request - write an RPC request to a TCP socket 663 * xs_tcp_send_request - write an RPC request to a TCP socket
586 * @task: address of RPC task that manages the state of an RPC request 664 * @task: address of RPC task that manages the state of an RPC request
@@ -603,7 +681,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
603 struct xdr_buf *xdr = &req->rq_snd_buf; 681 struct xdr_buf *xdr = &req->rq_snd_buf;
604 int status; 682 int status;
605 683
606 xs_encode_tcp_record_marker(&req->rq_snd_buf); 684 xs_encode_stream_record_marker(&req->rq_snd_buf);
607 685
608 xs_pktdump("packet data:", 686 xs_pktdump("packet data:",
609 req->rq_svec->iov_base, 687 req->rq_svec->iov_base,
@@ -785,6 +863,88 @@ static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
785 return (struct rpc_xprt *) sk->sk_user_data; 863 return (struct rpc_xprt *) sk->sk_user_data;
786} 864}
787 865
866static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
867{
868 struct xdr_skb_reader desc = {
869 .skb = skb,
870 .offset = sizeof(rpc_fraghdr),
871 .count = skb->len - sizeof(rpc_fraghdr),
872 };
873
874 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
875 return -1;
876 if (desc.count)
877 return -1;
878 return 0;
879}
880
881/**
882 * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
883 * @sk: socket with data to read
884 * @len: how much data to read
885 *
886 * Currently this assumes we can read the whole reply in a single gulp.
887 */
888static void xs_local_data_ready(struct sock *sk, int len)
889{
890 struct rpc_task *task;
891 struct rpc_xprt *xprt;
892 struct rpc_rqst *rovr;
893 struct sk_buff *skb;
894 int err, repsize, copied;
895 u32 _xid;
896 __be32 *xp;
897
898 read_lock_bh(&sk->sk_callback_lock);
899 dprintk("RPC: %s...\n", __func__);
900 xprt = xprt_from_sock(sk);
901 if (xprt == NULL)
902 goto out;
903
904 skb = skb_recv_datagram(sk, 0, 1, &err);
905 if (skb == NULL)
906 goto out;
907
908 if (xprt->shutdown)
909 goto dropit;
910
911 repsize = skb->len - sizeof(rpc_fraghdr);
912 if (repsize < 4) {
913 dprintk("RPC: impossible RPC reply size %d\n", repsize);
914 goto dropit;
915 }
916
917 /* Copy the XID from the skb... */
918 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
919 if (xp == NULL)
920 goto dropit;
921
922 /* Look up and lock the request corresponding to the given XID */
923 spin_lock(&xprt->transport_lock);
924 rovr = xprt_lookup_rqst(xprt, *xp);
925 if (!rovr)
926 goto out_unlock;
927 task = rovr->rq_task;
928
929 copied = rovr->rq_private_buf.buflen;
930 if (copied > repsize)
931 copied = repsize;
932
933 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
934 dprintk("RPC: sk_buff copy failed\n");
935 goto out_unlock;
936 }
937
938 xprt_complete_rqst(task, copied);
939
940 out_unlock:
941 spin_unlock(&xprt->transport_lock);
942 dropit:
943 skb_free_datagram(sk, skb);
944 out:
945 read_unlock_bh(&sk->sk_callback_lock);
946}
947
788/** 948/**
789 * xs_udp_data_ready - "data ready" callback for UDP sockets 949 * xs_udp_data_ready - "data ready" callback for UDP sockets
790 * @sk: socket with data to read 950 * @sk: socket with data to read
@@ -1344,7 +1504,6 @@ static void xs_tcp_state_change(struct sock *sk)
1344 case TCP_CLOSE_WAIT: 1504 case TCP_CLOSE_WAIT:
1345 /* The server initiated a shutdown of the socket */ 1505 /* The server initiated a shutdown of the socket */
1346 xprt_force_disconnect(xprt); 1506 xprt_force_disconnect(xprt);
1347 case TCP_SYN_SENT:
1348 xprt->connect_cookie++; 1507 xprt->connect_cookie++;
1349 case TCP_CLOSING: 1508 case TCP_CLOSING:
1350 /* 1509 /*
@@ -1571,11 +1730,31 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1571 return err; 1730 return err;
1572} 1731}
1573 1732
1733/*
1734 * We don't support autobind on AF_LOCAL sockets
1735 */
1736static void xs_local_rpcbind(struct rpc_task *task)
1737{
1738 xprt_set_bound(task->tk_xprt);
1739}
1740
1741static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1742{
1743}
1574 1744
1575#ifdef CONFIG_DEBUG_LOCK_ALLOC 1745#ifdef CONFIG_DEBUG_LOCK_ALLOC
1576static struct lock_class_key xs_key[2]; 1746static struct lock_class_key xs_key[2];
1577static struct lock_class_key xs_slock_key[2]; 1747static struct lock_class_key xs_slock_key[2];
1578 1748
1749static inline void xs_reclassify_socketu(struct socket *sock)
1750{
1751 struct sock *sk = sock->sk;
1752
1753 BUG_ON(sock_owned_by_user(sk));
1754 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1755 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1756}
1757
1579static inline void xs_reclassify_socket4(struct socket *sock) 1758static inline void xs_reclassify_socket4(struct socket *sock)
1580{ 1759{
1581 struct sock *sk = sock->sk; 1760 struct sock *sk = sock->sk;
@@ -1597,6 +1776,9 @@ static inline void xs_reclassify_socket6(struct socket *sock)
1597static inline void xs_reclassify_socket(int family, struct socket *sock) 1776static inline void xs_reclassify_socket(int family, struct socket *sock)
1598{ 1777{
1599 switch (family) { 1778 switch (family) {
1779 case AF_LOCAL:
1780 xs_reclassify_socketu(sock);
1781 break;
1600 case AF_INET: 1782 case AF_INET:
1601 xs_reclassify_socket4(sock); 1783 xs_reclassify_socket4(sock);
1602 break; 1784 break;
@@ -1606,6 +1788,10 @@ static inline void xs_reclassify_socket(int family, struct socket *sock)
1606 } 1788 }
1607} 1789}
1608#else 1790#else
1791static inline void xs_reclassify_socketu(struct socket *sock)
1792{
1793}
1794
1609static inline void xs_reclassify_socket4(struct socket *sock) 1795static inline void xs_reclassify_socket4(struct socket *sock)
1610{ 1796{
1611} 1797}
@@ -1644,6 +1830,94 @@ out:
1644 return ERR_PTR(err); 1830 return ERR_PTR(err);
1645} 1831}
1646 1832
1833static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1834 struct socket *sock)
1835{
1836 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1837 xprt);
1838
1839 if (!transport->inet) {
1840 struct sock *sk = sock->sk;
1841
1842 write_lock_bh(&sk->sk_callback_lock);
1843
1844 xs_save_old_callbacks(transport, sk);
1845
1846 sk->sk_user_data = xprt;
1847 sk->sk_data_ready = xs_local_data_ready;
1848 sk->sk_write_space = xs_udp_write_space;
1849 sk->sk_error_report = xs_error_report;
1850 sk->sk_allocation = GFP_ATOMIC;
1851
1852 xprt_clear_connected(xprt);
1853
1854 /* Reset to new socket */
1855 transport->sock = sock;
1856 transport->inet = sk;
1857
1858 write_unlock_bh(&sk->sk_callback_lock);
1859 }
1860
1861 /* Tell the socket layer to start connecting... */
1862 xprt->stat.connect_count++;
1863 xprt->stat.connect_start = jiffies;
1864 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1865}
1866
1867/**
1868 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1869 * @xprt: RPC transport to connect
1870 * @transport: socket transport to connect
1871 * @create_sock: function to create a socket of the correct type
1872 *
1873 * Invoked by a work queue tasklet.
1874 */
1875static void xs_local_setup_socket(struct work_struct *work)
1876{
1877 struct sock_xprt *transport =
1878 container_of(work, struct sock_xprt, connect_worker.work);
1879 struct rpc_xprt *xprt = &transport->xprt;
1880 struct socket *sock;
1881 int status = -EIO;
1882
1883 if (xprt->shutdown)
1884 goto out;
1885
1886 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1887 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1888 SOCK_STREAM, 0, &sock, 1);
1889 if (status < 0) {
1890 dprintk("RPC: can't create AF_LOCAL "
1891 "transport socket (%d).\n", -status);
1892 goto out;
1893 }
1894 xs_reclassify_socketu(sock);
1895
1896 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1897 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1898
1899 status = xs_local_finish_connecting(xprt, sock);
1900 switch (status) {
1901 case 0:
1902 dprintk("RPC: xprt %p connected to %s\n",
1903 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1904 xprt_set_connected(xprt);
1905 break;
1906 case -ENOENT:
1907 dprintk("RPC: xprt %p: socket %s does not exist\n",
1908 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1909 break;
1910 default:
1911 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1912 __func__, -status,
1913 xprt->address_strings[RPC_DISPLAY_ADDR]);
1914 }
1915
1916out:
1917 xprt_clear_connecting(xprt);
1918 xprt_wake_pending_tasks(xprt, status);
1919}
1920
1647static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 1921static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1648{ 1922{
1649 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1923 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
@@ -1758,6 +2032,7 @@ static void xs_tcp_reuse_connection(struct sock_xprt *transport)
1758static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2032static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1759{ 2033{
1760 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2034 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2035 int ret = -ENOTCONN;
1761 2036
1762 if (!transport->inet) { 2037 if (!transport->inet) {
1763 struct sock *sk = sock->sk; 2038 struct sock *sk = sock->sk;
@@ -1789,12 +2064,22 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1789 } 2064 }
1790 2065
1791 if (!xprt_bound(xprt)) 2066 if (!xprt_bound(xprt))
1792 return -ENOTCONN; 2067 goto out;
1793 2068
1794 /* Tell the socket layer to start connecting... */ 2069 /* Tell the socket layer to start connecting... */
1795 xprt->stat.connect_count++; 2070 xprt->stat.connect_count++;
1796 xprt->stat.connect_start = jiffies; 2071 xprt->stat.connect_start = jiffies;
1797 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 2072 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2073 switch (ret) {
2074 case 0:
2075 case -EINPROGRESS:
2076 /* SYN_SENT! */
2077 xprt->connect_cookie++;
2078 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2079 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2080 }
2081out:
2082 return ret;
1798} 2083}
1799 2084
1800/** 2085/**
@@ -1917,6 +2202,32 @@ static void xs_connect(struct rpc_task *task)
1917} 2202}
1918 2203
1919/** 2204/**
2205 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2206 * @xprt: rpc_xprt struct containing statistics
2207 * @seq: output file
2208 *
2209 */
2210static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2211{
2212 long idle_time = 0;
2213
2214 if (xprt_connected(xprt))
2215 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2216
2217 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2218 "%llu %llu\n",
2219 xprt->stat.bind_count,
2220 xprt->stat.connect_count,
2221 xprt->stat.connect_time,
2222 idle_time,
2223 xprt->stat.sends,
2224 xprt->stat.recvs,
2225 xprt->stat.bad_xids,
2226 xprt->stat.req_u,
2227 xprt->stat.bklog_u);
2228}
2229
2230/**
1920 * xs_udp_print_stats - display UDP socket-specifc stats 2231 * xs_udp_print_stats - display UDP socket-specifc stats
1921 * @xprt: rpc_xprt struct containing statistics 2232 * @xprt: rpc_xprt struct containing statistics
1922 * @seq: output file 2233 * @seq: output file
@@ -2014,10 +2325,7 @@ static int bc_sendto(struct rpc_rqst *req)
2014 unsigned long headoff; 2325 unsigned long headoff;
2015 unsigned long tailoff; 2326 unsigned long tailoff;
2016 2327
2017 /* 2328 xs_encode_stream_record_marker(xbufp);
2018 * Set up the rpc header and record marker stuff
2019 */
2020 xs_encode_tcp_record_marker(xbufp);
2021 2329
2022 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK; 2330 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2023 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK; 2331 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
@@ -2089,6 +2397,21 @@ static void bc_destroy(struct rpc_xprt *xprt)
2089{ 2397{
2090} 2398}
2091 2399
2400static struct rpc_xprt_ops xs_local_ops = {
2401 .reserve_xprt = xprt_reserve_xprt,
2402 .release_xprt = xs_tcp_release_xprt,
2403 .rpcbind = xs_local_rpcbind,
2404 .set_port = xs_local_set_port,
2405 .connect = xs_connect,
2406 .buf_alloc = rpc_malloc,
2407 .buf_free = rpc_free,
2408 .send_request = xs_local_send_request,
2409 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2410 .close = xs_close,
2411 .destroy = xs_destroy,
2412 .print_stats = xs_local_print_stats,
2413};
2414
2092static struct rpc_xprt_ops xs_udp_ops = { 2415static struct rpc_xprt_ops xs_udp_ops = {
2093 .set_buffer_size = xs_udp_set_buffer_size, 2416 .set_buffer_size = xs_udp_set_buffer_size,
2094 .reserve_xprt = xprt_reserve_xprt_cong, 2417 .reserve_xprt = xprt_reserve_xprt_cong,
@@ -2150,6 +2473,8 @@ static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2150 }; 2473 };
2151 2474
2152 switch (family) { 2475 switch (family) {
2476 case AF_LOCAL:
2477 break;
2153 case AF_INET: 2478 case AF_INET:
2154 memcpy(sap, &sin, sizeof(sin)); 2479 memcpy(sap, &sin, sizeof(sin));
2155 break; 2480 break;
@@ -2197,6 +2522,70 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2197 return xprt; 2522 return xprt;
2198} 2523}
2199 2524
2525static const struct rpc_timeout xs_local_default_timeout = {
2526 .to_initval = 10 * HZ,
2527 .to_maxval = 10 * HZ,
2528 .to_retries = 2,
2529};
2530
2531/**
2532 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2533 * @args: rpc transport creation arguments
2534 *
2535 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2536 */
2537static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2538{
2539 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2540 struct sock_xprt *transport;
2541 struct rpc_xprt *xprt;
2542 struct rpc_xprt *ret;
2543
2544 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2545 if (IS_ERR(xprt))
2546 return xprt;
2547 transport = container_of(xprt, struct sock_xprt, xprt);
2548
2549 xprt->prot = 0;
2550 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2551 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2552
2553 xprt->bind_timeout = XS_BIND_TO;
2554 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2555 xprt->idle_timeout = XS_IDLE_DISC_TO;
2556
2557 xprt->ops = &xs_local_ops;
2558 xprt->timeout = &xs_local_default_timeout;
2559
2560 switch (sun->sun_family) {
2561 case AF_LOCAL:
2562 if (sun->sun_path[0] != '/') {
2563 dprintk("RPC: bad AF_LOCAL address: %s\n",
2564 sun->sun_path);
2565 ret = ERR_PTR(-EINVAL);
2566 goto out_err;
2567 }
2568 xprt_set_bound(xprt);
2569 INIT_DELAYED_WORK(&transport->connect_worker,
2570 xs_local_setup_socket);
2571 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2572 break;
2573 default:
2574 ret = ERR_PTR(-EAFNOSUPPORT);
2575 goto out_err;
2576 }
2577
2578 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2579 xprt->address_strings[RPC_DISPLAY_ADDR]);
2580
2581 if (try_module_get(THIS_MODULE))
2582 return xprt;
2583 ret = ERR_PTR(-EINVAL);
2584out_err:
2585 xprt_free(xprt);
2586 return ret;
2587}
2588
2200static const struct rpc_timeout xs_udp_default_timeout = { 2589static const struct rpc_timeout xs_udp_default_timeout = {
2201 .to_initval = 5 * HZ, 2590 .to_initval = 5 * HZ,
2202 .to_maxval = 30 * HZ, 2591 .to_maxval = 30 * HZ,
@@ -2438,6 +2827,14 @@ out_err:
2438 return ret; 2827 return ret;
2439} 2828}
2440 2829
2830static struct xprt_class xs_local_transport = {
2831 .list = LIST_HEAD_INIT(xs_local_transport.list),
2832 .name = "named UNIX socket",
2833 .owner = THIS_MODULE,
2834 .ident = XPRT_TRANSPORT_LOCAL,
2835 .setup = xs_setup_local,
2836};
2837
2441static struct xprt_class xs_udp_transport = { 2838static struct xprt_class xs_udp_transport = {
2442 .list = LIST_HEAD_INIT(xs_udp_transport.list), 2839 .list = LIST_HEAD_INIT(xs_udp_transport.list),
2443 .name = "udp", 2840 .name = "udp",
@@ -2473,6 +2870,7 @@ int init_socket_xprt(void)
2473 sunrpc_table_header = register_sysctl_table(sunrpc_table); 2870 sunrpc_table_header = register_sysctl_table(sunrpc_table);
2474#endif 2871#endif
2475 2872
2873 xprt_register_transport(&xs_local_transport);
2476 xprt_register_transport(&xs_udp_transport); 2874 xprt_register_transport(&xs_udp_transport);
2477 xprt_register_transport(&xs_tcp_transport); 2875 xprt_register_transport(&xs_tcp_transport);
2478 xprt_register_transport(&xs_bc_tcp_transport); 2876 xprt_register_transport(&xs_bc_tcp_transport);
@@ -2493,6 +2891,7 @@ void cleanup_socket_xprt(void)
2493 } 2891 }
2494#endif 2892#endif
2495 2893
2894 xprt_unregister_transport(&xs_local_transport);
2496 xprt_unregister_transport(&xs_udp_transport); 2895 xprt_unregister_transport(&xs_udp_transport);
2497 xprt_unregister_transport(&xs_tcp_transport); 2896 xprt_unregister_transport(&xs_tcp_transport);
2498 xprt_unregister_transport(&xs_bc_tcp_transport); 2897 xprt_unregister_transport(&xs_bc_tcp_transport);