diff options
295 files changed, 2422 insertions, 2650 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 274b32d12532..492e81df2968 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
| @@ -387,26 +387,6 @@ Who: Tejun Heo <tj@kernel.org> | |||
| 387 | 387 | ||
| 388 | ---------------------------- | 388 | ---------------------------- |
| 389 | 389 | ||
| 390 | What: Support for lcd_switch and display_get in asus-laptop driver | ||
| 391 | When: March 2010 | ||
| 392 | Why: These two features use non-standard interfaces. There are the | ||
| 393 | only features that really need multiple path to guess what's | ||
| 394 | the right method name on a specific laptop. | ||
| 395 | |||
| 396 | Removing them will allow to remove a lot of code an significantly | ||
| 397 | clean the drivers. | ||
| 398 | |||
| 399 | This will affect the backlight code which won't be able to know | ||
| 400 | if the backlight is on or off. The platform display file will also be | ||
| 401 | write only (like the one in eeepc-laptop). | ||
| 402 | |||
| 403 | This should'nt affect a lot of user because they usually know | ||
| 404 | when their display is on or off. | ||
| 405 | |||
| 406 | Who: Corentin Chary <corentin.chary@gmail.com> | ||
| 407 | |||
| 408 | ---------------------------- | ||
| 409 | |||
| 410 | What: sysfs-class-rfkill state file | 390 | What: sysfs-class-rfkill state file |
| 411 | When: Feb 2014 | 391 | When: Feb 2014 |
| 412 | Files: net/rfkill/core.c | 392 | Files: net/rfkill/core.c |
diff --git a/MAINTAINERS b/MAINTAINERS index 6b4b9cdec370..ec3600306289 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -184,10 +184,9 @@ F: Documentation/filesystems/9p.txt | |||
| 184 | F: fs/9p/ | 184 | F: fs/9p/ |
| 185 | 185 | ||
| 186 | A2232 SERIAL BOARD DRIVER | 186 | A2232 SERIAL BOARD DRIVER |
| 187 | M: Enver Haase <A2232@gmx.net> | ||
| 188 | L: linux-m68k@lists.linux-m68k.org | 187 | L: linux-m68k@lists.linux-m68k.org |
| 189 | S: Maintained | 188 | S: Orphan |
| 190 | F: drivers/char/ser_a2232* | 189 | F: drivers/staging/generic_serial/ser_a2232* |
| 191 | 190 | ||
| 192 | AACRAID SCSI RAID DRIVER | 191 | AACRAID SCSI RAID DRIVER |
| 193 | M: Adaptec OEM Raid Solutions <aacraid@adaptec.com> | 192 | M: Adaptec OEM Raid Solutions <aacraid@adaptec.com> |
| @@ -877,6 +876,13 @@ F: arch/arm/mach-mv78xx0/ | |||
| 877 | F: arch/arm/mach-orion5x/ | 876 | F: arch/arm/mach-orion5x/ |
| 878 | F: arch/arm/plat-orion/ | 877 | F: arch/arm/plat-orion/ |
| 879 | 878 | ||
| 879 | ARM/Orion SoC/Technologic Systems TS-78xx platform support | ||
| 880 | M: Alexander Clouter <alex@digriz.org.uk> | ||
| 881 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
| 882 | W: http://www.digriz.org.uk/ts78xx/kernel | ||
| 883 | S: Maintained | ||
| 884 | F: arch/arm/mach-orion5x/ts78xx-* | ||
| 885 | |||
| 880 | ARM/MIOA701 MACHINE SUPPORT | 886 | ARM/MIOA701 MACHINE SUPPORT |
| 881 | M: Robert Jarzmik <robert.jarzmik@free.fr> | 887 | M: Robert Jarzmik <robert.jarzmik@free.fr> |
| 882 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 888 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| @@ -1063,7 +1069,7 @@ F: arch/arm/mach-shmobile/ | |||
| 1063 | F: drivers/sh/ | 1069 | F: drivers/sh/ |
| 1064 | 1070 | ||
| 1065 | ARM/TELECHIPS ARM ARCHITECTURE | 1071 | ARM/TELECHIPS ARM ARCHITECTURE |
| 1066 | M: "Hans J. Koch" <hjk@linutronix.de> | 1072 | M: "Hans J. Koch" <hjk@hansjkoch.de> |
| 1067 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1073 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1068 | S: Maintained | 1074 | S: Maintained |
| 1069 | F: arch/arm/plat-tcc/ | 1075 | F: arch/arm/plat-tcc/ |
| @@ -1823,11 +1829,10 @@ S: Maintained | |||
| 1823 | F: drivers/platform/x86/compal-laptop.c | 1829 | F: drivers/platform/x86/compal-laptop.c |
| 1824 | 1830 | ||
| 1825 | COMPUTONE INTELLIPORT MULTIPORT CARD | 1831 | COMPUTONE INTELLIPORT MULTIPORT CARD |
| 1826 | M: "Michael H. Warfield" <mhw@wittsend.com> | ||
| 1827 | W: http://www.wittsend.com/computone.html | 1832 | W: http://www.wittsend.com/computone.html |
| 1828 | S: Maintained | 1833 | S: Orphan |
| 1829 | F: Documentation/serial/computone.txt | 1834 | F: Documentation/serial/computone.txt |
| 1830 | F: drivers/char/ip2/ | 1835 | F: drivers/staging/tty/ip2/ |
| 1831 | 1836 | ||
| 1832 | CONEXANT ACCESSRUNNER USB DRIVER | 1837 | CONEXANT ACCESSRUNNER USB DRIVER |
| 1833 | M: Simon Arlott <cxacru@fire.lp0.eu> | 1838 | M: Simon Arlott <cxacru@fire.lp0.eu> |
| @@ -2010,7 +2015,7 @@ F: drivers/net/wan/cycx* | |||
| 2010 | CYCLADES ASYNC MUX DRIVER | 2015 | CYCLADES ASYNC MUX DRIVER |
| 2011 | W: http://www.cyclades.com/ | 2016 | W: http://www.cyclades.com/ |
| 2012 | S: Orphan | 2017 | S: Orphan |
| 2013 | F: drivers/char/cyclades.c | 2018 | F: drivers/tty/cyclades.c |
| 2014 | F: include/linux/cyclades.h | 2019 | F: include/linux/cyclades.h |
| 2015 | 2020 | ||
| 2016 | CYCLADES PC300 DRIVER | 2021 | CYCLADES PC300 DRIVER |
| @@ -2124,8 +2129,8 @@ L: Eng.Linux@digi.com | |||
| 2124 | W: http://www.digi.com | 2129 | W: http://www.digi.com |
| 2125 | S: Orphan | 2130 | S: Orphan |
| 2126 | F: Documentation/serial/digiepca.txt | 2131 | F: Documentation/serial/digiepca.txt |
| 2127 | F: drivers/char/epca* | 2132 | F: drivers/staging/tty/epca* |
| 2128 | F: drivers/char/digi* | 2133 | F: drivers/staging/tty/digi* |
| 2129 | 2134 | ||
| 2130 | DIOLAN U2C-12 I2C DRIVER | 2135 | DIOLAN U2C-12 I2C DRIVER |
| 2131 | M: Guenter Roeck <guenter.roeck@ericsson.com> | 2136 | M: Guenter Roeck <guenter.roeck@ericsson.com> |
| @@ -4077,7 +4082,7 @@ F: drivers/video/matrox/matroxfb_* | |||
| 4077 | F: include/linux/matroxfb.h | 4082 | F: include/linux/matroxfb.h |
| 4078 | 4083 | ||
| 4079 | MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER | 4084 | MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER |
| 4080 | M: "Hans J. Koch" <hjk@linutronix.de> | 4085 | M: "Hans J. Koch" <hjk@hansjkoch.de> |
| 4081 | L: lm-sensors@lm-sensors.org | 4086 | L: lm-sensors@lm-sensors.org |
| 4082 | S: Maintained | 4087 | S: Maintained |
| 4083 | F: Documentation/hwmon/max6650 | 4088 | F: Documentation/hwmon/max6650 |
| @@ -4192,7 +4197,7 @@ MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD | |||
| 4192 | M: Jiri Slaby <jirislaby@gmail.com> | 4197 | M: Jiri Slaby <jirislaby@gmail.com> |
| 4193 | S: Maintained | 4198 | S: Maintained |
| 4194 | F: Documentation/serial/moxa-smartio | 4199 | F: Documentation/serial/moxa-smartio |
| 4195 | F: drivers/char/mxser.* | 4200 | F: drivers/tty/mxser.* |
| 4196 | 4201 | ||
| 4197 | MSI LAPTOP SUPPORT | 4202 | MSI LAPTOP SUPPORT |
| 4198 | M: "Lee, Chun-Yi" <jlee@novell.com> | 4203 | M: "Lee, Chun-Yi" <jlee@novell.com> |
| @@ -4234,7 +4239,7 @@ F: sound/oss/msnd* | |||
| 4234 | 4239 | ||
| 4235 | MULTITECH MULTIPORT CARD (ISICOM) | 4240 | MULTITECH MULTIPORT CARD (ISICOM) |
| 4236 | S: Orphan | 4241 | S: Orphan |
| 4237 | F: drivers/char/isicom.c | 4242 | F: drivers/tty/isicom.c |
| 4238 | F: include/linux/isicom.h | 4243 | F: include/linux/isicom.h |
| 4239 | 4244 | ||
| 4240 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER | 4245 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER |
| @@ -5273,14 +5278,14 @@ F: drivers/memstick/host/r592.* | |||
| 5273 | RISCOM8 DRIVER | 5278 | RISCOM8 DRIVER |
| 5274 | S: Orphan | 5279 | S: Orphan |
| 5275 | F: Documentation/serial/riscom8.txt | 5280 | F: Documentation/serial/riscom8.txt |
| 5276 | F: drivers/char/riscom8* | 5281 | F: drivers/staging/tty/riscom8* |
| 5277 | 5282 | ||
| 5278 | ROCKETPORT DRIVER | 5283 | ROCKETPORT DRIVER |
| 5279 | P: Comtrol Corp. | 5284 | P: Comtrol Corp. |
| 5280 | W: http://www.comtrol.com | 5285 | W: http://www.comtrol.com |
| 5281 | S: Maintained | 5286 | S: Maintained |
| 5282 | F: Documentation/serial/rocket.txt | 5287 | F: Documentation/serial/rocket.txt |
| 5283 | F: drivers/char/rocket* | 5288 | F: drivers/tty/rocket* |
| 5284 | 5289 | ||
| 5285 | ROSE NETWORK LAYER | 5290 | ROSE NETWORK LAYER |
| 5286 | M: Ralf Baechle <ralf@linux-mips.org> | 5291 | M: Ralf Baechle <ralf@linux-mips.org> |
| @@ -5916,10 +5921,9 @@ F: arch/arm/mach-spear6xx/spear600.c | |||
| 5916 | F: arch/arm/mach-spear6xx/spear600_evb.c | 5921 | F: arch/arm/mach-spear6xx/spear600_evb.c |
| 5917 | 5922 | ||
| 5918 | SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER | 5923 | SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER |
| 5919 | M: Roger Wolff <R.E.Wolff@BitWizard.nl> | 5924 | S: Orphan |
| 5920 | S: Supported | ||
| 5921 | F: Documentation/serial/specialix.txt | 5925 | F: Documentation/serial/specialix.txt |
| 5922 | F: drivers/char/specialix* | 5926 | F: drivers/staging/tty/specialix* |
| 5923 | 5927 | ||
| 5924 | SPI SUBSYSTEM | 5928 | SPI SUBSYSTEM |
| 5925 | M: David Brownell <dbrownell@users.sourceforge.net> | 5929 | M: David Brownell <dbrownell@users.sourceforge.net> |
| @@ -5964,7 +5968,6 @@ F: arch/alpha/kernel/srm_env.c | |||
| 5964 | 5968 | ||
| 5965 | STABLE BRANCH | 5969 | STABLE BRANCH |
| 5966 | M: Greg Kroah-Hartman <greg@kroah.com> | 5970 | M: Greg Kroah-Hartman <greg@kroah.com> |
| 5967 | M: Chris Wright <chrisw@sous-sol.org> | ||
| 5968 | L: stable@kernel.org | 5971 | L: stable@kernel.org |
| 5969 | S: Maintained | 5972 | S: Maintained |
| 5970 | 5973 | ||
| @@ -6248,7 +6251,8 @@ M: Greg Ungerer <gerg@uclinux.org> | |||
| 6248 | W: http://www.uclinux.org/ | 6251 | W: http://www.uclinux.org/ |
| 6249 | L: uclinux-dev@uclinux.org (subscribers-only) | 6252 | L: uclinux-dev@uclinux.org (subscribers-only) |
| 6250 | S: Maintained | 6253 | S: Maintained |
| 6251 | F: arch/m68knommu/ | 6254 | F: arch/m68k/*/*_no.* |
| 6255 | F: arch/m68k/include/asm/*_no.* | ||
| 6252 | 6256 | ||
| 6253 | UCLINUX FOR RENESAS H8/300 (H8300) | 6257 | UCLINUX FOR RENESAS H8/300 (H8300) |
| 6254 | M: Yoshinori Sato <ysato@users.sourceforge.jp> | 6258 | M: Yoshinori Sato <ysato@users.sourceforge.jp> |
| @@ -6618,7 +6622,7 @@ F: fs/hostfs/ | |||
| 6618 | F: fs/hppfs/ | 6622 | F: fs/hppfs/ |
| 6619 | 6623 | ||
| 6620 | USERSPACE I/O (UIO) | 6624 | USERSPACE I/O (UIO) |
| 6621 | M: "Hans J. Koch" <hjk@linutronix.de> | 6625 | M: "Hans J. Koch" <hjk@hansjkoch.de> |
| 6622 | M: Greg Kroah-Hartman <gregkh@suse.de> | 6626 | M: Greg Kroah-Hartman <gregkh@suse.de> |
| 6623 | S: Maintained | 6627 | S: Maintained |
| 6624 | F: Documentation/DocBook/uio-howto.tmpl | 6628 | F: Documentation/DocBook/uio-howto.tmpl |
| @@ -6916,6 +6920,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86. | |||
| 6916 | S: Maintained | 6920 | S: Maintained |
| 6917 | F: drivers/platform/x86 | 6921 | F: drivers/platform/x86 |
| 6918 | 6922 | ||
| 6923 | XEN NETWORK BACKEND DRIVER | ||
| 6924 | M: Ian Campbell <ian.campbell@citrix.com> | ||
| 6925 | L: xen-devel@lists.xensource.com (moderated for non-subscribers) | ||
| 6926 | L: netdev@vger.kernel.org | ||
| 6927 | S: Supported | ||
| 6928 | F: drivers/net/xen-netback/* | ||
| 6929 | |||
| 6919 | XEN PCI SUBSYSTEM | 6930 | XEN PCI SUBSYSTEM |
| 6920 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 6931 | M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
| 6921 | L: xen-devel@lists.xensource.com (moderated for non-subscribers) | 6932 | L: xen-devel@lists.xensource.com (moderated for non-subscribers) |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 2 | 1 | VERSION = 2 |
| 2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
| 3 | SUBLEVEL = 39 | 3 | SUBLEVEL = 39 |
| 4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc3 |
| 5 | NAME = Flesh-Eating Bats with Fangs | 5 | NAME = Flesh-Eating Bats with Fangs |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index fdc9d4dbf85b..377a7a595b08 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -1540,7 +1540,6 @@ config HIGHMEM | |||
| 1540 | config HIGHPTE | 1540 | config HIGHPTE |
| 1541 | bool "Allocate 2nd-level pagetables from highmem" | 1541 | bool "Allocate 2nd-level pagetables from highmem" |
| 1542 | depends on HIGHMEM | 1542 | depends on HIGHMEM |
| 1543 | depends on !OUTER_CACHE | ||
| 1544 | 1543 | ||
| 1545 | config HW_PERF_EVENTS | 1544 | config HW_PERF_EVENTS |
| 1546 | bool "Enable hardware performance counter support for perf events" | 1545 | bool "Enable hardware performance counter support for perf events" |
| @@ -2012,6 +2011,8 @@ source "kernel/power/Kconfig" | |||
| 2012 | 2011 | ||
| 2013 | config ARCH_SUSPEND_POSSIBLE | 2012 | config ARCH_SUSPEND_POSSIBLE |
| 2014 | depends on !ARCH_S5P64X0 && !ARCH_S5P6442 | 2013 | depends on !ARCH_S5P64X0 && !ARCH_S5P6442 |
| 2014 | depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \ | ||
| 2015 | CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE | ||
| 2015 | def_bool y | 2016 | def_bool y |
| 2016 | 2017 | ||
| 2017 | endmenu | 2018 | endmenu |
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 494224a9b459..03d01d783e3b 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug | |||
| @@ -63,17 +63,6 @@ config DEBUG_USER | |||
| 63 | 8 - SIGSEGV faults | 63 | 8 - SIGSEGV faults |
| 64 | 16 - SIGBUS faults | 64 | 16 - SIGBUS faults |
| 65 | 65 | ||
| 66 | config DEBUG_ERRORS | ||
| 67 | bool "Verbose kernel error messages" | ||
| 68 | depends on DEBUG_KERNEL | ||
| 69 | help | ||
| 70 | This option controls verbose debugging information which can be | ||
| 71 | printed when the kernel detects an internal error. This debugging | ||
| 72 | information is useful to kernel hackers when tracking down problems, | ||
| 73 | but mostly meaningless to other people. It's safe to say Y unless | ||
| 74 | you are concerned with the code size or don't want to see these | ||
| 75 | messages. | ||
| 76 | |||
| 77 | config DEBUG_STACK_USAGE | 66 | config DEBUG_STACK_USAGE |
| 78 | bool "Enable stack utilization instrumentation" | 67 | bool "Enable stack utilization instrumentation" |
| 79 | depends on DEBUG_KERNEL | 68 | depends on DEBUG_KERNEL |
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index e7521bca2c35..6ea9b6f3607a 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile | |||
| @@ -16,5 +16,4 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o | |||
| 16 | obj-$(CONFIG_ARCH_IXP2000) += uengine.o | 16 | obj-$(CONFIG_ARCH_IXP2000) += uengine.o |
| 17 | obj-$(CONFIG_ARCH_IXP23XX) += uengine.o | 17 | obj-$(CONFIG_ARCH_IXP23XX) += uengine.o |
| 18 | obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o | 18 | obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o |
| 19 | obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o | ||
| 20 | obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o | 19 | obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o |
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h index c4391ba20350..1dc980675894 100644 --- a/arch/arm/include/asm/thread_notify.h +++ b/arch/arm/include/asm/thread_notify.h | |||
| @@ -43,6 +43,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread) | |||
| 43 | #define THREAD_NOTIFY_FLUSH 0 | 43 | #define THREAD_NOTIFY_FLUSH 0 |
| 44 | #define THREAD_NOTIFY_EXIT 1 | 44 | #define THREAD_NOTIFY_EXIT 1 |
| 45 | #define THREAD_NOTIFY_SWITCH 2 | 45 | #define THREAD_NOTIFY_SWITCH 2 |
| 46 | #define THREAD_NOTIFY_COPY 3 | ||
| 46 | 47 | ||
| 47 | #endif | 48 | #endif |
| 48 | #endif | 49 | #endif |
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 74554f1742d7..8d95446150a3 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
| @@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o | |||
| 29 | obj-$(CONFIG_ARTHUR) += arthur.o | 29 | obj-$(CONFIG_ARTHUR) += arthur.o |
| 30 | obj-$(CONFIG_ISA_DMA) += dma-isa.o | 30 | obj-$(CONFIG_ISA_DMA) += dma-isa.o |
| 31 | obj-$(CONFIG_PCI) += bios32.o isa.o | 31 | obj-$(CONFIG_PCI) += bios32.o isa.o |
| 32 | obj-$(CONFIG_PM) += sleep.o | 32 | obj-$(CONFIG_PM_SLEEP) += sleep.o |
| 33 | obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o | 33 | obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o |
| 34 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o | 34 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o |
| 35 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o | 35 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o |
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c index d4a0da1e48f4..9b05c6a0dcea 100644 --- a/arch/arm/kernel/elf.c +++ b/arch/arm/kernel/elf.c | |||
| @@ -40,15 +40,22 @@ EXPORT_SYMBOL(elf_check_arch); | |||
| 40 | void elf_set_personality(const struct elf32_hdr *x) | 40 | void elf_set_personality(const struct elf32_hdr *x) |
| 41 | { | 41 | { |
| 42 | unsigned int eflags = x->e_flags; | 42 | unsigned int eflags = x->e_flags; |
| 43 | unsigned int personality = PER_LINUX_32BIT; | 43 | unsigned int personality = current->personality & ~PER_MASK; |
| 44 | |||
| 45 | /* | ||
| 46 | * We only support Linux ELF executables, so always set the | ||
| 47 | * personality to LINUX. | ||
| 48 | */ | ||
| 49 | personality |= PER_LINUX; | ||
| 44 | 50 | ||
| 45 | /* | 51 | /* |
| 46 | * APCS-26 is only valid for OABI executables | 52 | * APCS-26 is only valid for OABI executables |
| 47 | */ | 53 | */ |
| 48 | if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { | 54 | if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN && |
| 49 | if (eflags & EF_ARM_APCS_26) | 55 | (eflags & EF_ARM_APCS_26)) |
| 50 | personality = PER_LINUX; | 56 | personality &= ~ADDR_LIMIT_32BIT; |
| 51 | } | 57 | else |
| 58 | personality |= ADDR_LIMIT_32BIT; | ||
| 52 | 59 | ||
| 53 | set_personality(personality); | 60 | set_personality(personality); |
| 54 | 61 | ||
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 8dbc126f7152..87acc25d7a3e 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
| @@ -868,6 +868,13 @@ static void reset_ctrl_regs(void *info) | |||
| 868 | */ | 868 | */ |
| 869 | asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); | 869 | asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); |
| 870 | isb(); | 870 | isb(); |
| 871 | |||
| 872 | /* | ||
| 873 | * Clear any configured vector-catch events before | ||
| 874 | * enabling monitor mode. | ||
| 875 | */ | ||
| 876 | asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0)); | ||
| 877 | isb(); | ||
| 871 | } | 878 | } |
| 872 | 879 | ||
| 873 | if (enable_monitor_mode()) | 880 | if (enable_monitor_mode()) |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 69cfee0fe00f..979da3947f42 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
| @@ -221,7 +221,7 @@ again: | |||
| 221 | prev_raw_count &= armpmu->max_period; | 221 | prev_raw_count &= armpmu->max_period; |
| 222 | 222 | ||
| 223 | if (overflow) | 223 | if (overflow) |
| 224 | delta = armpmu->max_period - prev_raw_count + new_raw_count; | 224 | delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; |
| 225 | else | 225 | else |
| 226 | delta = new_raw_count - prev_raw_count; | 226 | delta = new_raw_count - prev_raw_count; |
| 227 | 227 | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 94bbedbed639..5e1e54197227 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
| @@ -372,6 +372,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
| 372 | if (clone_flags & CLONE_SETTLS) | 372 | if (clone_flags & CLONE_SETTLS) |
| 373 | thread->tp_value = regs->ARM_r3; | 373 | thread->tp_value = regs->ARM_r3; |
| 374 | 374 | ||
| 375 | thread_notify(THREAD_NOTIFY_COPY, thread); | ||
| 376 | |||
| 375 | return 0; | 377 | return 0; |
| 376 | } | 378 | } |
| 377 | 379 | ||
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index f0000e188c8c..3b54ad19d489 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
| @@ -410,8 +410,7 @@ static int bad_syscall(int n, struct pt_regs *regs) | |||
| 410 | struct thread_info *thread = current_thread_info(); | 410 | struct thread_info *thread = current_thread_info(); |
| 411 | siginfo_t info; | 411 | siginfo_t info; |
| 412 | 412 | ||
| 413 | if (current->personality != PER_LINUX && | 413 | if ((current->personality & PER_MASK) != PER_LINUX && |
| 414 | current->personality != PER_LINUX_32BIT && | ||
| 415 | thread->exec_domain->handler) { | 414 | thread->exec_domain->handler) { |
| 416 | thread->exec_domain->handler(n, regs); | 415 | thread->exec_domain->handler(n, regs); |
| 417 | return regs->ARM_r0; | 416 | return regs->ARM_r0; |
diff --git a/arch/arm/mach-mmp/include/mach/gpio.h b/arch/arm/mach-mmp/include/mach/gpio.h index ee8b02ed8011..7bfb827f3fe3 100644 --- a/arch/arm/mach-mmp/include/mach/gpio.h +++ b/arch/arm/mach-mmp/include/mach/gpio.h | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) | 10 | #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) |
| 11 | #define GPIO_REG(x) (*((volatile u32 *)(GPIO_REGS_VIRT + (x)))) | 11 | #define GPIO_REG(x) (*((volatile u32 *)(GPIO_REGS_VIRT + (x)))) |
| 12 | 12 | ||
| 13 | #define NR_BUILTIN_GPIO (192) | 13 | #define NR_BUILTIN_GPIO IRQ_GPIO_NUM |
| 14 | 14 | ||
| 15 | #define gpio_to_bank(gpio) ((gpio) >> 5) | 15 | #define gpio_to_bank(gpio) ((gpio) >> 5) |
| 16 | #define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio)) | 16 | #define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio)) |
diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h index 4621067c7720..713be155a44d 100644 --- a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h +++ b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h | |||
| @@ -8,6 +8,15 @@ | |||
| 8 | #define MFP_DRIVE_MEDIUM (0x2 << 13) | 8 | #define MFP_DRIVE_MEDIUM (0x2 << 13) |
| 9 | #define MFP_DRIVE_FAST (0x3 << 13) | 9 | #define MFP_DRIVE_FAST (0x3 << 13) |
| 10 | 10 | ||
| 11 | #undef MFP_CFG | ||
| 12 | #undef MFP_CFG_DRV | ||
| 13 | |||
| 14 | #define MFP_CFG(pin, af) \ | ||
| 15 | (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_MEDIUM) | ||
| 16 | |||
| 17 | #define MFP_CFG_DRV(pin, af, drv) \ | ||
| 18 | (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_##drv) | ||
| 19 | |||
| 11 | /* GPIO */ | 20 | /* GPIO */ |
| 12 | #define GPIO0_GPIO MFP_CFG(GPIO0, AF5) | 21 | #define GPIO0_GPIO MFP_CFG(GPIO0, AF5) |
| 13 | #define GPIO1_GPIO MFP_CFG(GPIO1, AF5) | 22 | #define GPIO1_GPIO MFP_CFG(GPIO1, AF5) |
diff --git a/arch/arm/mach-pxa/include/mach/gpio.h b/arch/arm/mach-pxa/include/mach/gpio.h index b024a8b37439..c4639502efca 100644 --- a/arch/arm/mach-pxa/include/mach/gpio.h +++ b/arch/arm/mach-pxa/include/mach/gpio.h | |||
| @@ -99,11 +99,24 @@ | |||
| 99 | #define GAFR(x) GPIO_REG(0x54 + (((x) & 0x70) >> 2)) | 99 | #define GAFR(x) GPIO_REG(0x54 + (((x) & 0x70) >> 2)) |
| 100 | 100 | ||
| 101 | 101 | ||
| 102 | #define NR_BUILTIN_GPIO 128 | 102 | #define NR_BUILTIN_GPIO PXA_GPIO_IRQ_NUM |
| 103 | 103 | ||
| 104 | #define gpio_to_bank(gpio) ((gpio) >> 5) | 104 | #define gpio_to_bank(gpio) ((gpio) >> 5) |
| 105 | #define gpio_to_irq(gpio) IRQ_GPIO(gpio) | 105 | #define gpio_to_irq(gpio) IRQ_GPIO(gpio) |
| 106 | #define irq_to_gpio(irq) IRQ_TO_GPIO(irq) | 106 | |
| 107 | static inline int irq_to_gpio(unsigned int irq) | ||
| 108 | { | ||
| 109 | int gpio; | ||
| 110 | |||
| 111 | if (irq == IRQ_GPIO0 || irq == IRQ_GPIO1) | ||
| 112 | return irq - IRQ_GPIO0; | ||
| 113 | |||
| 114 | gpio = irq - PXA_GPIO_IRQ_BASE; | ||
| 115 | if (gpio >= 2 && gpio < NR_BUILTIN_GPIO) | ||
| 116 | return gpio; | ||
| 117 | |||
| 118 | return -1; | ||
| 119 | } | ||
| 107 | 120 | ||
| 108 | #ifdef CONFIG_CPU_PXA26x | 121 | #ifdef CONFIG_CPU_PXA26x |
| 109 | /* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted, | 122 | /* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted, |
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h index a4285fc00878..038402404e39 100644 --- a/arch/arm/mach-pxa/include/mach/irqs.h +++ b/arch/arm/mach-pxa/include/mach/irqs.h | |||
| @@ -93,9 +93,6 @@ | |||
| 93 | #define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x)) | 93 | #define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x)) |
| 94 | #define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x)) | 94 | #define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x)) |
| 95 | 95 | ||
| 96 | #define IRQ_TO_GPIO_2_x(i) ((i) - PXA_GPIO_IRQ_BASE) | ||
| 97 | #define IRQ_TO_GPIO(i) (((i) < IRQ_GPIO(2)) ? ((i) - IRQ_GPIO0) : IRQ_TO_GPIO_2_x(i)) | ||
| 98 | |||
| 99 | /* | 96 | /* |
| 100 | * The following interrupts are for board specific purposes. Since | 97 | * The following interrupts are for board specific purposes. Since |
| 101 | * the kernel can only run on one machine at a time, we can re-use | 98 | * the kernel can only run on one machine at a time, we can re-use |
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index 6bde5956358d..a4af8c52d7ee 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c | |||
| @@ -285,7 +285,7 @@ static inline void pxa25x_init_pm(void) {} | |||
| 285 | 285 | ||
| 286 | static int pxa25x_set_wake(struct irq_data *d, unsigned int on) | 286 | static int pxa25x_set_wake(struct irq_data *d, unsigned int on) |
| 287 | { | 287 | { |
| 288 | int gpio = IRQ_TO_GPIO(d->irq); | 288 | int gpio = irq_to_gpio(d->irq); |
| 289 | uint32_t mask = 0; | 289 | uint32_t mask = 0; |
| 290 | 290 | ||
| 291 | if (gpio >= 0 && gpio < 85) | 291 | if (gpio >= 0 && gpio < 85) |
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 1cb5d0f9723f..909756eaf4b7 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c | |||
| @@ -345,7 +345,7 @@ static inline void pxa27x_init_pm(void) {} | |||
| 345 | */ | 345 | */ |
| 346 | static int pxa27x_set_wake(struct irq_data *d, unsigned int on) | 346 | static int pxa27x_set_wake(struct irq_data *d, unsigned int on) |
| 347 | { | 347 | { |
| 348 | int gpio = IRQ_TO_GPIO(d->irq); | 348 | int gpio = irq_to_gpio(d->irq); |
| 349 | uint32_t mask; | 349 | uint32_t mask; |
| 350 | 350 | ||
| 351 | if (gpio >= 0 && gpio < 128) | 351 | if (gpio >= 0 && gpio < 128) |
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c index 76a3f654220f..65a1aba6823d 100644 --- a/arch/arm/mach-tegra/gpio.c +++ b/arch/arm/mach-tegra/gpio.c | |||
| @@ -257,7 +257,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) | |||
| 257 | void tegra_gpio_resume(void) | 257 | void tegra_gpio_resume(void) |
| 258 | { | 258 | { |
| 259 | unsigned long flags; | 259 | unsigned long flags; |
| 260 | int b, p, i; | 260 | int b; |
| 261 | int p; | ||
| 261 | 262 | ||
| 262 | local_irq_save(flags); | 263 | local_irq_save(flags); |
| 263 | 264 | ||
| @@ -280,7 +281,8 @@ void tegra_gpio_resume(void) | |||
| 280 | void tegra_gpio_suspend(void) | 281 | void tegra_gpio_suspend(void) |
| 281 | { | 282 | { |
| 282 | unsigned long flags; | 283 | unsigned long flags; |
| 283 | int b, p, i; | 284 | int b; |
| 285 | int p; | ||
| 284 | 286 | ||
| 285 | local_irq_save(flags); | 287 | local_irq_save(flags); |
| 286 | for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { | 288 | for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { |
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c index 6d7c4eea4dcb..4459470c052d 100644 --- a/arch/arm/mach-tegra/tegra2_clocks.c +++ b/arch/arm/mach-tegra/tegra2_clocks.c | |||
| @@ -1362,14 +1362,15 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate) | |||
| 1362 | { | 1362 | { |
| 1363 | unsigned long flags; | 1363 | unsigned long flags; |
| 1364 | int ret; | 1364 | int ret; |
| 1365 | long new_rate = rate; | ||
| 1365 | 1366 | ||
| 1366 | rate = clk_round_rate(c->parent, rate); | 1367 | new_rate = clk_round_rate(c->parent, new_rate); |
| 1367 | if (rate < 0) | 1368 | if (new_rate < 0) |
| 1368 | return rate; | 1369 | return new_rate; |
| 1369 | 1370 | ||
| 1370 | spin_lock_irqsave(&c->parent->spinlock, flags); | 1371 | spin_lock_irqsave(&c->parent->spinlock, flags); |
| 1371 | 1372 | ||
| 1372 | c->u.shared_bus_user.rate = rate; | 1373 | c->u.shared_bus_user.rate = new_rate; |
| 1373 | ret = tegra_clk_shared_bus_update(c->parent); | 1374 | ret = tegra_clk_shared_bus_update(c->parent); |
| 1374 | 1375 | ||
| 1375 | spin_unlock_irqrestore(&c->parent->spinlock, flags); | 1376 | spin_unlock_irqrestore(&c->parent->spinlock, flags); |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index afe209e1e1f8..74be05f3e03a 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/shm.h> | 7 | #include <linux/shm.h> |
| 8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
| 9 | #include <linux/io.h> | 9 | #include <linux/io.h> |
| 10 | #include <linux/personality.h> | ||
| 10 | #include <linux/random.h> | 11 | #include <linux/random.h> |
| 11 | #include <asm/cputype.h> | 12 | #include <asm/cputype.h> |
| 12 | #include <asm/system.h> | 13 | #include <asm/system.h> |
| @@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
| 82 | mm->cached_hole_size = 0; | 83 | mm->cached_hole_size = 0; |
| 83 | } | 84 | } |
| 84 | /* 8 bits of randomness in 20 address space bits */ | 85 | /* 8 bits of randomness in 20 address space bits */ |
| 85 | if (current->flags & PF_RANDOMIZE) | 86 | if ((current->flags & PF_RANDOMIZE) && |
| 87 | !(current->personality & ADDR_NO_RANDOMIZE)) | ||
| 86 | addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; | 88 | addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; |
| 87 | 89 | ||
| 88 | full_search: | 90 | full_search: |
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index b46eb21f05c7..bf8a1d1cccb6 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
| @@ -390,7 +390,7 @@ ENTRY(cpu_arm920_set_pte_ext) | |||
| 390 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ | 390 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
| 391 | .globl cpu_arm920_suspend_size | 391 | .globl cpu_arm920_suspend_size |
| 392 | .equ cpu_arm920_suspend_size, 4 * 3 | 392 | .equ cpu_arm920_suspend_size, 4 * 3 |
| 393 | #ifdef CONFIG_PM | 393 | #ifdef CONFIG_PM_SLEEP |
| 394 | ENTRY(cpu_arm920_do_suspend) | 394 | ENTRY(cpu_arm920_do_suspend) |
| 395 | stmfd sp!, {r4 - r7, lr} | 395 | stmfd sp!, {r4 - r7, lr} |
| 396 | mrc p15, 0, r4, c13, c0, 0 @ PID | 396 | mrc p15, 0, r4, c13, c0, 0 @ PID |
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 6a4bdb2c94a7..0ed85d930c09 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
| @@ -404,7 +404,7 @@ ENTRY(cpu_arm926_set_pte_ext) | |||
| 404 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ | 404 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
| 405 | .globl cpu_arm926_suspend_size | 405 | .globl cpu_arm926_suspend_size |
| 406 | .equ cpu_arm926_suspend_size, 4 * 3 | 406 | .equ cpu_arm926_suspend_size, 4 * 3 |
| 407 | #ifdef CONFIG_PM | 407 | #ifdef CONFIG_PM_SLEEP |
| 408 | ENTRY(cpu_arm926_do_suspend) | 408 | ENTRY(cpu_arm926_do_suspend) |
| 409 | stmfd sp!, {r4 - r7, lr} | 409 | stmfd sp!, {r4 - r7, lr} |
| 410 | mrc p15, 0, r4, c13, c0, 0 @ PID | 410 | mrc p15, 0, r4, c13, c0, 0 @ PID |
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 74483d1977fe..184a9c997e36 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
| @@ -171,7 +171,7 @@ ENTRY(cpu_sa1100_set_pte_ext) | |||
| 171 | 171 | ||
| 172 | .globl cpu_sa1100_suspend_size | 172 | .globl cpu_sa1100_suspend_size |
| 173 | .equ cpu_sa1100_suspend_size, 4*4 | 173 | .equ cpu_sa1100_suspend_size, 4*4 |
| 174 | #ifdef CONFIG_PM | 174 | #ifdef CONFIG_PM_SLEEP |
| 175 | ENTRY(cpu_sa1100_do_suspend) | 175 | ENTRY(cpu_sa1100_do_suspend) |
| 176 | stmfd sp!, {r4 - r7, lr} | 176 | stmfd sp!, {r4 - r7, lr} |
| 177 | mrc p15, 0, r4, c3, c0, 0 @ domain ID | 177 | mrc p15, 0, r4, c3, c0, 0 @ domain ID |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index bfa0c9f611c5..7c99cb4c8e4f 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
| @@ -124,7 +124,7 @@ ENTRY(cpu_v6_set_pte_ext) | |||
| 124 | /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ | 124 | /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ |
| 125 | .globl cpu_v6_suspend_size | 125 | .globl cpu_v6_suspend_size |
| 126 | .equ cpu_v6_suspend_size, 4 * 8 | 126 | .equ cpu_v6_suspend_size, 4 * 8 |
| 127 | #ifdef CONFIG_PM | 127 | #ifdef CONFIG_PM_SLEEP |
| 128 | ENTRY(cpu_v6_do_suspend) | 128 | ENTRY(cpu_v6_do_suspend) |
| 129 | stmfd sp!, {r4 - r11, lr} | 129 | stmfd sp!, {r4 - r11, lr} |
| 130 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 130 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index c35618e42f6f..babfba09c89f 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
| @@ -211,7 +211,7 @@ cpu_v7_name: | |||
| 211 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ | 211 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ |
| 212 | .globl cpu_v7_suspend_size | 212 | .globl cpu_v7_suspend_size |
| 213 | .equ cpu_v7_suspend_size, 4 * 8 | 213 | .equ cpu_v7_suspend_size, 4 * 8 |
| 214 | #ifdef CONFIG_PM | 214 | #ifdef CONFIG_PM_SLEEP |
| 215 | ENTRY(cpu_v7_do_suspend) | 215 | ENTRY(cpu_v7_do_suspend) |
| 216 | stmfd sp!, {r4 - r11, lr} | 216 | stmfd sp!, {r4 - r11, lr} |
| 217 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 217 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 63d8b2044e84..596213699f37 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
| @@ -417,7 +417,7 @@ ENTRY(cpu_xsc3_set_pte_ext) | |||
| 417 | 417 | ||
| 418 | .globl cpu_xsc3_suspend_size | 418 | .globl cpu_xsc3_suspend_size |
| 419 | .equ cpu_xsc3_suspend_size, 4 * 8 | 419 | .equ cpu_xsc3_suspend_size, 4 * 8 |
| 420 | #ifdef CONFIG_PM | 420 | #ifdef CONFIG_PM_SLEEP |
| 421 | ENTRY(cpu_xsc3_do_suspend) | 421 | ENTRY(cpu_xsc3_do_suspend) |
| 422 | stmfd sp!, {r4 - r10, lr} | 422 | stmfd sp!, {r4 - r10, lr} |
| 423 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | 423 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 086038cd86ab..ce233bcbf506 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
| @@ -518,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext) | |||
| 518 | 518 | ||
| 519 | .globl cpu_xscale_suspend_size | 519 | .globl cpu_xscale_suspend_size |
| 520 | .equ cpu_xscale_suspend_size, 4 * 7 | 520 | .equ cpu_xscale_suspend_size, 4 * 7 |
| 521 | #ifdef CONFIG_PM | 521 | #ifdef CONFIG_PM_SLEEP |
| 522 | ENTRY(cpu_xscale_do_suspend) | 522 | ENTRY(cpu_xscale_do_suspend) |
| 523 | stmfd sp!, {r4 - r10, lr} | 523 | stmfd sp!, {r4 - r10, lr} |
| 524 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | 524 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index bbf3da012afd..f74695075e64 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
| @@ -78,6 +78,14 @@ static void vfp_thread_exit(struct thread_info *thread) | |||
| 78 | put_cpu(); | 78 | put_cpu(); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static void vfp_thread_copy(struct thread_info *thread) | ||
| 82 | { | ||
| 83 | struct thread_info *parent = current_thread_info(); | ||
| 84 | |||
| 85 | vfp_sync_hwstate(parent); | ||
| 86 | thread->vfpstate = parent->vfpstate; | ||
| 87 | } | ||
| 88 | |||
| 81 | /* | 89 | /* |
| 82 | * When this function is called with the following 'cmd's, the following | 90 | * When this function is called with the following 'cmd's, the following |
| 83 | * is true while this function is being run: | 91 | * is true while this function is being run: |
| @@ -104,12 +112,17 @@ static void vfp_thread_exit(struct thread_info *thread) | |||
| 104 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | 112 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
| 105 | { | 113 | { |
| 106 | struct thread_info *thread = v; | 114 | struct thread_info *thread = v; |
| 115 | u32 fpexc; | ||
| 116 | #ifdef CONFIG_SMP | ||
| 117 | unsigned int cpu; | ||
| 118 | #endif | ||
| 107 | 119 | ||
| 108 | if (likely(cmd == THREAD_NOTIFY_SWITCH)) { | 120 | switch (cmd) { |
| 109 | u32 fpexc = fmrx(FPEXC); | 121 | case THREAD_NOTIFY_SWITCH: |
| 122 | fpexc = fmrx(FPEXC); | ||
| 110 | 123 | ||
| 111 | #ifdef CONFIG_SMP | 124 | #ifdef CONFIG_SMP |
| 112 | unsigned int cpu = thread->cpu; | 125 | cpu = thread->cpu; |
| 113 | 126 | ||
| 114 | /* | 127 | /* |
| 115 | * On SMP, if VFP is enabled, save the old state in | 128 | * On SMP, if VFP is enabled, save the old state in |
| @@ -134,13 +147,20 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |||
| 134 | * old state. | 147 | * old state. |
| 135 | */ | 148 | */ |
| 136 | fmxr(FPEXC, fpexc & ~FPEXC_EN); | 149 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
| 137 | return NOTIFY_DONE; | 150 | break; |
| 138 | } | ||
| 139 | 151 | ||
| 140 | if (cmd == THREAD_NOTIFY_FLUSH) | 152 | case THREAD_NOTIFY_FLUSH: |
| 141 | vfp_thread_flush(thread); | 153 | vfp_thread_flush(thread); |
| 142 | else | 154 | break; |
| 155 | |||
| 156 | case THREAD_NOTIFY_EXIT: | ||
| 143 | vfp_thread_exit(thread); | 157 | vfp_thread_exit(thread); |
| 158 | break; | ||
| 159 | |||
| 160 | case THREAD_NOTIFY_COPY: | ||
| 161 | vfp_thread_copy(thread); | ||
| 162 | break; | ||
| 163 | } | ||
| 144 | 164 | ||
| 145 | return NOTIFY_DONE; | 165 | return NOTIFY_DONE; |
| 146 | } | 166 | } |
diff --git a/arch/avr32/include/asm/setup.h b/arch/avr32/include/asm/setup.h index ff5b7cf6be4d..160543dbec7e 100644 --- a/arch/avr32/include/asm/setup.h +++ b/arch/avr32/include/asm/setup.h | |||
| @@ -94,6 +94,13 @@ struct tag_ethernet { | |||
| 94 | 94 | ||
| 95 | #define ETH_INVALID_PHY 0xff | 95 | #define ETH_INVALID_PHY 0xff |
| 96 | 96 | ||
| 97 | /* board information */ | ||
| 98 | #define ATAG_BOARDINFO 0x54410008 | ||
| 99 | |||
| 100 | struct tag_boardinfo { | ||
| 101 | u32 board_number; | ||
| 102 | }; | ||
| 103 | |||
| 97 | struct tag { | 104 | struct tag { |
| 98 | struct tag_header hdr; | 105 | struct tag_header hdr; |
| 99 | union { | 106 | union { |
| @@ -102,6 +109,7 @@ struct tag { | |||
| 102 | struct tag_cmdline cmdline; | 109 | struct tag_cmdline cmdline; |
| 103 | struct tag_clock clock; | 110 | struct tag_clock clock; |
| 104 | struct tag_ethernet ethernet; | 111 | struct tag_ethernet ethernet; |
| 112 | struct tag_boardinfo boardinfo; | ||
| 105 | } u; | 113 | } u; |
| 106 | }; | 114 | }; |
| 107 | 115 | ||
| @@ -128,6 +136,7 @@ extern struct tag *bootloader_tags; | |||
| 128 | 136 | ||
| 129 | extern resource_size_t fbmem_start; | 137 | extern resource_size_t fbmem_start; |
| 130 | extern resource_size_t fbmem_size; | 138 | extern resource_size_t fbmem_size; |
| 139 | extern u32 board_number; | ||
| 131 | 140 | ||
| 132 | void setup_processor(void); | 141 | void setup_processor(void); |
| 133 | 142 | ||
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c index 5c7083916c33..bb0974cce4ac 100644 --- a/arch/avr32/kernel/setup.c +++ b/arch/avr32/kernel/setup.c | |||
| @@ -391,6 +391,21 @@ static int __init parse_tag_clock(struct tag *tag) | |||
| 391 | __tagtable(ATAG_CLOCK, parse_tag_clock); | 391 | __tagtable(ATAG_CLOCK, parse_tag_clock); |
| 392 | 392 | ||
| 393 | /* | 393 | /* |
| 394 | * The board_number correspond to the bd->bi_board_number in U-Boot. This | ||
| 395 | * parameter is only available during initialisation and can be used in some | ||
| 396 | * kind of board identification. | ||
| 397 | */ | ||
| 398 | u32 __initdata board_number; | ||
| 399 | |||
| 400 | static int __init parse_tag_boardinfo(struct tag *tag) | ||
| 401 | { | ||
| 402 | board_number = tag->u.boardinfo.board_number; | ||
| 403 | |||
| 404 | return 0; | ||
| 405 | } | ||
| 406 | __tagtable(ATAG_BOARDINFO, parse_tag_boardinfo); | ||
| 407 | |||
| 408 | /* | ||
| 394 | * Scan the tag table for this tag, and call its parse function. The | 409 | * Scan the tag table for this tag, and call its parse function. The |
| 395 | * tag table is built by the linker from all the __tagtable | 410 | * tag table is built by the linker from all the __tagtable |
| 396 | * declarations. | 411 | * declarations. |
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c index b91b2044af9c..7aa25756412f 100644 --- a/arch/avr32/kernel/traps.c +++ b/arch/avr32/kernel/traps.c | |||
| @@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code, | |||
| 95 | info.si_code = code; | 95 | info.si_code = code; |
| 96 | info.si_addr = (void __user *)addr; | 96 | info.si_addr = (void __user *)addr; |
| 97 | force_sig_info(signr, &info, current); | 97 | force_sig_info(signr, &info, current); |
| 98 | |||
| 99 | /* | ||
| 100 | * Init gets no signals that it doesn't have a handler for. | ||
| 101 | * That's all very well, but if it has caused a synchronous | ||
| 102 | * exception and we ignore the resulting signal, it will just | ||
| 103 | * generate the same exception over and over again and we get | ||
| 104 | * nowhere. Better to kill it and let the kernel panic. | ||
| 105 | */ | ||
| 106 | if (is_global_init(current)) { | ||
| 107 | __sighandler_t handler; | ||
| 108 | |||
| 109 | spin_lock_irq(¤t->sighand->siglock); | ||
| 110 | handler = current->sighand->action[signr-1].sa.sa_handler; | ||
| 111 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 112 | if (handler == SIG_DFL) { | ||
| 113 | /* init has generated a synchronous exception | ||
| 114 | and it doesn't have a handler for the signal */ | ||
| 115 | printk(KERN_CRIT "init has generated signal %ld " | ||
| 116 | "but has no handler for it\n", signr); | ||
| 117 | do_exit(signr); | ||
| 118 | } | ||
| 119 | } | ||
| 120 | } | 98 | } |
| 121 | 99 | ||
| 122 | asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) | 100 | asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs) |
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c index 442f08c5e641..86925fd6ea5b 100644 --- a/arch/avr32/mach-at32ap/clock.c +++ b/arch/avr32/mach-at32ap/clock.c | |||
| @@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk) | |||
| 35 | spin_unlock(&clk_list_lock); | 35 | spin_unlock(&clk_list_lock); |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | struct clk *clk_get(struct device *dev, const char *id) | 38 | static struct clk *__clk_get(struct device *dev, const char *id) |
| 39 | { | 39 | { |
| 40 | struct clk *clk; | 40 | struct clk *clk; |
| 41 | 41 | ||
| 42 | spin_lock(&clk_list_lock); | ||
| 43 | |||
| 44 | list_for_each_entry(clk, &at32_clock_list, list) { | 42 | list_for_each_entry(clk, &at32_clock_list, list) { |
| 45 | if (clk->dev == dev && strcmp(id, clk->name) == 0) { | 43 | if (clk->dev == dev && strcmp(id, clk->name) == 0) { |
| 46 | spin_unlock(&clk_list_lock); | ||
| 47 | return clk; | 44 | return clk; |
| 48 | } | 45 | } |
| 49 | } | 46 | } |
| 50 | 47 | ||
| 51 | spin_unlock(&clk_list_lock); | ||
| 52 | return ERR_PTR(-ENOENT); | 48 | return ERR_PTR(-ENOENT); |
| 53 | } | 49 | } |
| 50 | |||
| 51 | struct clk *clk_get(struct device *dev, const char *id) | ||
| 52 | { | ||
| 53 | struct clk *clk; | ||
| 54 | |||
| 55 | spin_lock(&clk_list_lock); | ||
| 56 | clk = __clk_get(dev, id); | ||
| 57 | spin_unlock(&clk_list_lock); | ||
| 58 | |||
| 59 | return clk; | ||
| 60 | } | ||
| 61 | |||
| 54 | EXPORT_SYMBOL(clk_get); | 62 | EXPORT_SYMBOL(clk_get); |
| 55 | 63 | ||
| 56 | void clk_put(struct clk *clk) | 64 | void clk_put(struct clk *clk) |
| @@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused) | |||
| 257 | spin_lock(&clk_list_lock); | 265 | spin_lock(&clk_list_lock); |
| 258 | 266 | ||
| 259 | /* show clock tree as derived from the three oscillators */ | 267 | /* show clock tree as derived from the three oscillators */ |
| 260 | clk = clk_get(NULL, "osc32k"); | 268 | clk = __clk_get(NULL, "osc32k"); |
| 261 | dump_clock(clk, &r); | 269 | dump_clock(clk, &r); |
| 262 | clk_put(clk); | 270 | clk_put(clk); |
| 263 | 271 | ||
| 264 | clk = clk_get(NULL, "osc0"); | 272 | clk = __clk_get(NULL, "osc0"); |
| 265 | dump_clock(clk, &r); | 273 | dump_clock(clk, &r); |
| 266 | clk_put(clk); | 274 | clk_put(clk); |
| 267 | 275 | ||
| 268 | clk = clk_get(NULL, "osc1"); | 276 | clk = __clk_get(NULL, "osc1"); |
| 269 | dump_clock(clk, &r); | 277 | dump_clock(clk, &r); |
| 270 | clk_put(clk); | 278 | clk_put(clk); |
| 271 | 279 | ||
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c index 47ba4b9b6db1..fbc2aeaebddb 100644 --- a/arch/avr32/mach-at32ap/extint.c +++ b/arch/avr32/mach-at32ap/extint.c | |||
| @@ -61,34 +61,34 @@ struct eic { | |||
| 61 | static struct eic *nmi_eic; | 61 | static struct eic *nmi_eic; |
| 62 | static bool nmi_enabled; | 62 | static bool nmi_enabled; |
| 63 | 63 | ||
| 64 | static void eic_ack_irq(struct irq_chip *d) | 64 | static void eic_ack_irq(struct irq_data *d) |
| 65 | { | 65 | { |
| 66 | struct eic *eic = irq_data_get_irq_chip_data(data); | 66 | struct eic *eic = irq_data_get_irq_chip_data(d); |
| 67 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); | 67 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | static void eic_mask_irq(struct irq_chip *d) | 70 | static void eic_mask_irq(struct irq_data *d) |
| 71 | { | 71 | { |
| 72 | struct eic *eic = irq_data_get_irq_chip_data(data); | 72 | struct eic *eic = irq_data_get_irq_chip_data(d); |
| 73 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); | 73 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | static void eic_mask_ack_irq(struct irq_chip *d) | 76 | static void eic_mask_ack_irq(struct irq_data *d) |
| 77 | { | 77 | { |
| 78 | struct eic *eic = irq_data_get_irq_chip_data(data); | 78 | struct eic *eic = irq_data_get_irq_chip_data(d); |
| 79 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); | 79 | eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); |
| 80 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); | 80 | eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static void eic_unmask_irq(struct irq_chip *d) | 83 | static void eic_unmask_irq(struct irq_data *d) |
| 84 | { | 84 | { |
| 85 | struct eic *eic = irq_data_get_irq_chip_data(data); | 85 | struct eic *eic = irq_data_get_irq_chip_data(d); |
| 86 | eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); | 86 | eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type) | 89 | static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type) |
| 90 | { | 90 | { |
| 91 | struct eic *eic = irq_data_get_irq_chip_data(data); | 91 | struct eic *eic = irq_data_get_irq_chip_data(d); |
| 92 | unsigned int irq = d->irq; | 92 | unsigned int irq = d->irq; |
| 93 | unsigned int i = irq - eic->first_irq; | 93 | unsigned int i = irq - eic->first_irq; |
| 94 | u32 mode, edge, level; | 94 | u32 mode, edge, level; |
| @@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev) | |||
| 191 | 191 | ||
| 192 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 192 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 193 | int_irq = platform_get_irq(pdev, 0); | 193 | int_irq = platform_get_irq(pdev, 0); |
| 194 | if (!regs || !int_irq) { | 194 | if (!regs || (int)int_irq <= 0) { |
| 195 | dev_dbg(&pdev->dev, "missing regs and/or irq resource\n"); | 195 | dev_dbg(&pdev->dev, "missing regs and/or irq resource\n"); |
| 196 | return -ENXIO; | 196 | return -ENXIO; |
| 197 | } | 197 | } |
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c index f308e1ddc629..2e0aa853a4bc 100644 --- a/arch/avr32/mach-at32ap/pio.c +++ b/arch/avr32/mach-at32ap/pio.c | |||
| @@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d) | |||
| 257 | pio_writel(pio, IDR, 1 << (gpio & 0x1f)); | 257 | pio_writel(pio, IDR, 1 << (gpio & 0x1f)); |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | static void gpio_irq_unmask(struct irq_data *d)) | 260 | static void gpio_irq_unmask(struct irq_data *d) |
| 261 | { | 261 | { |
| 262 | unsigned gpio = irq_to_gpio(d->irq); | 262 | unsigned gpio = irq_to_gpio(d->irq); |
| 263 | struct pio_device *pio = &pio_dev[gpio >> 5]; | 263 | struct pio_device *pio = &pio_dev[gpio >> 5]; |
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S index 17503b0ed6c9..f868f4ce761b 100644 --- a/arch/avr32/mach-at32ap/pm-at32ap700x.S +++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S | |||
| @@ -53,7 +53,7 @@ cpu_enter_idle: | |||
| 53 | st.w r8[TI_flags], r9 | 53 | st.w r8[TI_flags], r9 |
| 54 | unmask_interrupts | 54 | unmask_interrupts |
| 55 | sleep CPU_SLEEP_IDLE | 55 | sleep CPU_SLEEP_IDLE |
| 56 | .size cpu_idle_sleep, . - cpu_idle_sleep | 56 | .size cpu_enter_idle, . - cpu_enter_idle |
| 57 | 57 | ||
| 58 | /* | 58 | /* |
| 59 | * Common return path for PM functions that don't run from | 59 | * Common return path for PM functions that don't run from |
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h index 19e2c7c3e63a..44bd0cced725 100644 --- a/arch/blackfin/include/asm/system.h +++ b/arch/blackfin/include/asm/system.h | |||
| @@ -19,11 +19,11 @@ | |||
| 19 | * Force strict CPU ordering. | 19 | * Force strict CPU ordering. |
| 20 | */ | 20 | */ |
| 21 | #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) | 21 | #define nop() __asm__ __volatile__ ("nop;\n\t" : : ) |
| 22 | #define mb() __asm__ __volatile__ ("" : : : "memory") | 22 | #define smp_mb() mb() |
| 23 | #define rmb() __asm__ __volatile__ ("" : : : "memory") | 23 | #define smp_rmb() rmb() |
| 24 | #define wmb() __asm__ __volatile__ ("" : : : "memory") | 24 | #define smp_wmb() wmb() |
| 25 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | 25 | #define set_mb(var, value) do { var = value; mb(); } while (0) |
| 26 | #define read_barrier_depends() do { } while(0) | 26 | #define smp_read_barrier_depends() read_barrier_depends() |
| 27 | 27 | ||
| 28 | #ifdef CONFIG_SMP | 28 | #ifdef CONFIG_SMP |
| 29 | asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); | 29 | asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value); |
| @@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr, | |||
| 37 | unsigned long new, unsigned long old); | 37 | unsigned long new, unsigned long old); |
| 38 | 38 | ||
| 39 | #ifdef __ARCH_SYNC_CORE_DCACHE | 39 | #ifdef __ARCH_SYNC_CORE_DCACHE |
| 40 | # define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) | 40 | /* Force Core data cache coherence */ |
| 41 | # define smp_rmb() do { barrier(); smp_check_barrier(); } while (0) | 41 | # define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0) |
| 42 | # define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0) | 42 | # define rmb() do { barrier(); smp_check_barrier(); } while (0) |
| 43 | #define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) | 43 | # define wmb() do { barrier(); smp_mark_barrier(); } while (0) |
| 44 | 44 | # define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0) | |
| 45 | #else | 45 | #else |
| 46 | # define smp_mb() barrier() | 46 | # define mb() barrier() |
| 47 | # define smp_rmb() barrier() | 47 | # define rmb() barrier() |
| 48 | # define smp_wmb() barrier() | 48 | # define wmb() barrier() |
| 49 | #define smp_read_barrier_depends() barrier() | 49 | # define read_barrier_depends() do { } while (0) |
| 50 | #endif | 50 | #endif |
| 51 | 51 | ||
| 52 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | 52 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
| @@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
| 99 | 99 | ||
| 100 | #else /* !CONFIG_SMP */ | 100 | #else /* !CONFIG_SMP */ |
| 101 | 101 | ||
| 102 | #define smp_mb() barrier() | 102 | #define mb() barrier() |
| 103 | #define smp_rmb() barrier() | 103 | #define rmb() barrier() |
| 104 | #define smp_wmb() barrier() | 104 | #define wmb() barrier() |
| 105 | #define smp_read_barrier_depends() do { } while(0) | 105 | #define read_barrier_depends() do { } while (0) |
| 106 | 106 | ||
| 107 | struct __xchg_dummy { | 107 | struct __xchg_dummy { |
| 108 | unsigned long a[100]; | 108 | unsigned long a[100]; |
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c index cdbe075de1dc..8b81dc04488a 100644 --- a/arch/blackfin/kernel/gptimers.c +++ b/arch/blackfin/kernel/gptimers.c | |||
| @@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask) | |||
| 268 | _disable_gptimers(mask); | 268 | _disable_gptimers(mask); |
| 269 | for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) | 269 | for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) |
| 270 | if (mask & (1 << i)) | 270 | if (mask & (1 << i)) |
| 271 | group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; | 271 | group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i]; |
| 272 | SSYNC(); | 272 | SSYNC(); |
| 273 | } | 273 | } |
| 274 | EXPORT_SYMBOL(disable_gptimers); | 274 | EXPORT_SYMBOL(disable_gptimers); |
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 8c9a43daf80f..cdb4beb6bc8f 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c | |||
| @@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) | |||
| 206 | { | 206 | { |
| 207 | struct clock_event_device *evt = dev_id; | 207 | struct clock_event_device *evt = dev_id; |
| 208 | smp_mb(); | 208 | smp_mb(); |
| 209 | evt->event_handler(evt); | 209 | /* |
| 210 | * We want to ACK before we handle so that we can handle smaller timer | ||
| 211 | * intervals. This way if the timer expires again while we're handling | ||
| 212 | * things, we're more likely to see that 2nd int rather than swallowing | ||
| 213 | * it by ACKing the int at the end of this handler. | ||
| 214 | */ | ||
| 210 | bfin_gptmr0_ack(); | 215 | bfin_gptmr0_ack(); |
| 216 | evt->event_handler(evt); | ||
| 211 | return IRQ_HANDLED; | 217 | return IRQ_HANDLED; |
| 212 | } | 218 | } |
| 213 | 219 | ||
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 6e17a265c4d3..8bce5ed031e4 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
| @@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info) | |||
| 109 | struct blackfin_flush_data *fdata = info; | 109 | struct blackfin_flush_data *fdata = info; |
| 110 | 110 | ||
| 111 | /* Invalidate the memory holding the bounds of the flushed region. */ | 111 | /* Invalidate the memory holding the bounds of the flushed region. */ |
| 112 | invalidate_dcache_range((unsigned long)fdata, | 112 | blackfin_dcache_invalidate_range((unsigned long)fdata, |
| 113 | (unsigned long)fdata + sizeof(*fdata)); | 113 | (unsigned long)fdata + sizeof(*fdata)); |
| 114 | |||
| 115 | /* Make sure all write buffers in the data side of the core | ||
| 116 | * are flushed before trying to invalidate the icache. This | ||
| 117 | * needs to be after the data flush and before the icache | ||
| 118 | * flush so that the SSYNC does the right thing in preventing | ||
| 119 | * the instruction prefetcher from hitting things in cached | ||
| 120 | * memory at the wrong time -- it runs much further ahead than | ||
| 121 | * the pipeline. | ||
| 122 | */ | ||
| 123 | SSYNC(); | ||
| 114 | 124 | ||
| 115 | flush_icache_range(fdata->start, fdata->end); | 125 | /* ipi_flaush_icache is invoked by generic flush_icache_range, |
| 126 | * so call blackfin arch icache flush directly here. | ||
| 127 | */ | ||
| 128 | blackfin_icache_flush_range(fdata->start, fdata->end); | ||
| 116 | } | 129 | } |
| 117 | 130 | ||
| 118 | static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | 131 | static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 26d851d385bb..29e17907d9f2 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
| @@ -343,10 +343,14 @@ | |||
| 343 | #define __NR_fanotify_init 337 | 343 | #define __NR_fanotify_init 337 |
| 344 | #define __NR_fanotify_mark 338 | 344 | #define __NR_fanotify_mark 338 |
| 345 | #define __NR_prlimit64 339 | 345 | #define __NR_prlimit64 339 |
| 346 | #define __NR_name_to_handle_at 340 | ||
| 347 | #define __NR_open_by_handle_at 341 | ||
| 348 | #define __NR_clock_adjtime 342 | ||
| 349 | #define __NR_syncfs 343 | ||
| 346 | 350 | ||
| 347 | #ifdef __KERNEL__ | 351 | #ifdef __KERNEL__ |
| 348 | 352 | ||
| 349 | #define NR_syscalls 340 | 353 | #define NR_syscalls 344 |
| 350 | 354 | ||
| 351 | #define __ARCH_WANT_IPC_PARSE_VERSION | 355 | #define __ARCH_WANT_IPC_PARSE_VERSION |
| 352 | #define __ARCH_WANT_OLD_READDIR | 356 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S index 1559dea36e55..1359ee659574 100644 --- a/arch/m68k/kernel/entry_mm.S +++ b/arch/m68k/kernel/entry_mm.S | |||
| @@ -750,4 +750,8 @@ sys_call_table: | |||
| 750 | .long sys_fanotify_init | 750 | .long sys_fanotify_init |
| 751 | .long sys_fanotify_mark | 751 | .long sys_fanotify_mark |
| 752 | .long sys_prlimit64 | 752 | .long sys_prlimit64 |
| 753 | .long sys_name_to_handle_at /* 340 */ | ||
| 754 | .long sys_open_by_handle_at | ||
| 755 | .long sys_clock_adjtime | ||
| 756 | .long sys_syncfs | ||
| 753 | 757 | ||
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 79b1ed198c07..9b8393d8adb8 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
| @@ -358,6 +358,10 @@ ENTRY(sys_call_table) | |||
| 358 | .long sys_fanotify_init | 358 | .long sys_fanotify_init |
| 359 | .long sys_fanotify_mark | 359 | .long sys_fanotify_mark |
| 360 | .long sys_prlimit64 | 360 | .long sys_prlimit64 |
| 361 | .long sys_name_to_handle_at /* 340 */ | ||
| 362 | .long sys_open_by_handle_at | ||
| 363 | .long sys_clock_adjtime | ||
| 364 | .long sys_syncfs | ||
| 361 | 365 | ||
| 362 | .rept NR_syscalls-(.-sys_call_table)/4 | 366 | .rept NR_syscalls-(.-sys_call_table)/4 |
| 363 | .long sys_ni_syscall | 367 | .long sys_ni_syscall |
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index c00d4ca1ee15..28581f1ad2c0 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
| @@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev) | |||
| 527 | 527 | ||
| 528 | #endif /* !CONFIG_SUSPEND */ | 528 | #endif /* !CONFIG_SUSPEND */ |
| 529 | 529 | ||
| 530 | #ifdef CONFIG_HIBERNATION | 530 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 531 | 531 | ||
| 532 | static int ibmebus_bus_pm_freeze(struct device *dev) | 532 | static int ibmebus_bus_pm_freeze(struct device *dev) |
| 533 | { | 533 | { |
| @@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) | |||
| 665 | return ret; | 665 | return ret; |
| 666 | } | 666 | } |
| 667 | 667 | ||
| 668 | #else /* !CONFIG_HIBERNATION */ | 668 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 669 | 669 | ||
| 670 | #define ibmebus_bus_pm_freeze NULL | 670 | #define ibmebus_bus_pm_freeze NULL |
| 671 | #define ibmebus_bus_pm_thaw NULL | 671 | #define ibmebus_bus_pm_thaw NULL |
| @@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev) | |||
| 676 | #define ibmebus_bus_pm_poweroff_noirq NULL | 676 | #define ibmebus_bus_pm_poweroff_noirq NULL |
| 677 | #define ibmebus_bus_pm_restore_noirq NULL | 677 | #define ibmebus_bus_pm_restore_noirq NULL |
| 678 | 678 | ||
| 679 | #endif /* !CONFIG_HIBERNATION */ | 679 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 680 | 680 | ||
| 681 | static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { | 681 | static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { |
| 682 | .prepare = ibmebus_bus_pm_prepare, | 682 | .prepare = ibmebus_bus_pm_prepare, |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 14232d57369c..49798532b477 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
| @@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev) | |||
| 1457 | port->ops = ops; | 1457 | port->ops = ops; |
| 1458 | port->priv = priv; | 1458 | port->priv = priv; |
| 1459 | port->phys_efptr = 0x100; | 1459 | port->phys_efptr = 0x100; |
| 1460 | rio_register_mport(port); | ||
| 1461 | 1460 | ||
| 1462 | priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); | 1461 | priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); |
| 1463 | rio_regs_win = priv->regs_win; | 1462 | rio_regs_win = priv->regs_win; |
| @@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev) | |||
| 1504 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", | 1503 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", |
| 1505 | port->sys_size ? 65536 : 256); | 1504 | port->sys_size ? 65536 : 256); |
| 1506 | 1505 | ||
| 1506 | if (rio_register_mport(port)) | ||
| 1507 | goto err; | ||
| 1508 | |||
| 1507 | if (port->host_deviceid >= 0) | 1509 | if (port->host_deviceid >= 0) |
| 1508 | out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | | 1510 | out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | |
| 1509 | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); | 1511 | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 7061398341d5..fb317bf2c378 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
| @@ -460,7 +460,7 @@ startup: | |||
| 460 | #ifndef CONFIG_MARCH_G5 | 460 | #ifndef CONFIG_MARCH_G5 |
| 461 | # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} | 461 | # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} |
| 462 | xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST | 462 | xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST |
| 463 | stfl __LC_STFL_FAC_LIST # store facility list | 463 | .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list |
| 464 | tm __LC_STFL_FAC_LIST,0x01 # stfle available ? | 464 | tm __LC_STFL_FAC_LIST,0x01 # stfle available ? |
| 465 | jz 0f | 465 | jz 0f |
| 466 | la %r0,0 | 466 | la %r0,0 |
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S index 469f11b574fa..20530dd2eab1 100644 --- a/arch/s390/kernel/switch_cpu.S +++ b/arch/s390/kernel/switch_cpu.S | |||
| @@ -46,7 +46,9 @@ smp_restart_cpu: | |||
| 46 | ltr %r4,%r4 /* New stack ? */ | 46 | ltr %r4,%r4 /* New stack ? */ |
| 47 | jz 1f | 47 | jz 1f |
| 48 | lr %r15,%r4 | 48 | lr %r15,%r4 |
| 49 | 1: basr %r14,%r2 | 49 | 1: lr %r14,%r2 /* r14: Function to call */ |
| 50 | lr %r2,%r3 /* r2 : Parameter for function*/ | ||
| 51 | basr %r14,%r14 /* Call function */ | ||
| 50 | 52 | ||
| 51 | .gprregs_addr: | 53 | .gprregs_addr: |
| 52 | .long .gprregs | 54 | .long .gprregs |
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S index d94aacc898cb..5be3f43898f9 100644 --- a/arch/s390/kernel/switch_cpu64.S +++ b/arch/s390/kernel/switch_cpu64.S | |||
| @@ -42,7 +42,9 @@ smp_restart_cpu: | |||
| 42 | ltgr %r4,%r4 /* New stack ? */ | 42 | ltgr %r4,%r4 /* New stack ? */ |
| 43 | jz 1f | 43 | jz 1f |
| 44 | lgr %r15,%r4 | 44 | lgr %r15,%r4 |
| 45 | 1: basr %r14,%r2 | 45 | 1: lgr %r14,%r2 /* r14: Function to call */ |
| 46 | lgr %r2,%r3 /* r2 : Parameter for function*/ | ||
| 47 | basr %r14,%r14 /* Call function */ | ||
| 46 | 48 | ||
| 47 | .section .data,"aw",@progbits | 49 | .section .data,"aw",@progbits |
| 48 | .gprregs: | 50 | .gprregs: |
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 3d48f4db246d..4952872d6f0a 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
| @@ -517,12 +517,8 @@ stop_exit: | |||
| 517 | 517 | ||
| 518 | static int check_hardware_prerequisites(void) | 518 | static int check_hardware_prerequisites(void) |
| 519 | { | 519 | { |
| 520 | unsigned long long facility_bits[2]; | 520 | if (!test_facility(68)) |
| 521 | |||
| 522 | memcpy(facility_bits, S390_lowcore.stfle_fac_list, 32); | ||
| 523 | if (!(facility_bits[1] & (1ULL << 59))) | ||
| 524 | return -EOPNOTSUPP; | 521 | return -EOPNOTSUPP; |
| 525 | |||
| 526 | return 0; | 522 | return 0; |
| 527 | } | 523 | } |
| 528 | /* | 524 | /* |
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86 index 02fb017fed47..a9da516a5274 100644 --- a/arch/um/Kconfig.x86 +++ b/arch/um/Kconfig.x86 | |||
| @@ -4,6 +4,10 @@ menu "UML-specific options" | |||
| 4 | 4 | ||
| 5 | menu "Host processor type and features" | 5 | menu "Host processor type and features" |
| 6 | 6 | ||
| 7 | config CMPXCHG_LOCAL | ||
| 8 | bool | ||
| 9 | default n | ||
| 10 | |||
| 7 | source "arch/x86/Kconfig.cpu" | 11 | source "arch/x86/Kconfig.cpu" |
| 8 | 12 | ||
| 9 | endmenu | 13 | endmenu |
diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h new file mode 100644 index 000000000000..9e33b864c359 --- /dev/null +++ b/arch/um/include/asm/bug.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | #ifndef __UM_BUG_H | ||
| 2 | #define __UM_BUG_H | ||
| 3 | |||
| 4 | #include <asm-generic/bug.h> | ||
| 5 | |||
| 6 | #endif | ||
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index fd5a1f365c95..3cce71413d0b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
| @@ -96,11 +96,15 @@ | |||
| 96 | #define MSR_IA32_MC0_ADDR 0x00000402 | 96 | #define MSR_IA32_MC0_ADDR 0x00000402 |
| 97 | #define MSR_IA32_MC0_MISC 0x00000403 | 97 | #define MSR_IA32_MC0_MISC 0x00000403 |
| 98 | 98 | ||
| 99 | #define MSR_AMD64_MC0_MASK 0xc0010044 | ||
| 100 | |||
| 99 | #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) | 101 | #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) |
| 100 | #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) | 102 | #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) |
| 101 | #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) | 103 | #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) |
| 102 | #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) | 104 | #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) |
| 103 | 105 | ||
| 106 | #define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) | ||
| 107 | |||
| 104 | /* These are consecutive and not in the normal 4er MCE bank block */ | 108 | /* These are consecutive and not in the normal 4er MCE bank block */ |
| 105 | #define MSR_IA32_MC0_CTL2 0x00000280 | 109 | #define MSR_IA32_MC0_CTL2 0x00000280 |
| 106 | #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) | 110 | #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3ecece0217ef..3532d3bf8105 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
| 615 | /* As a rule processors have APIC timer running in deep C states */ | 615 | /* As a rule processors have APIC timer running in deep C states */ |
| 616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) | 616 | if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) |
| 617 | set_cpu_cap(c, X86_FEATURE_ARAT); | 617 | set_cpu_cap(c, X86_FEATURE_ARAT); |
| 618 | |||
| 619 | /* | ||
| 620 | * Disable GART TLB Walk Errors on Fam10h. We do this here | ||
| 621 | * because this is always needed when GART is enabled, even in a | ||
| 622 | * kernel which has no MCE support built in. | ||
| 623 | */ | ||
| 624 | if (c->x86 == 0x10) { | ||
| 625 | /* | ||
| 626 | * BIOS should disable GartTlbWlk Errors themself. If | ||
| 627 | * it doesn't do it here as suggested by the BKDG. | ||
| 628 | * | ||
| 629 | * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 | ||
| 630 | */ | ||
| 631 | u64 mask; | ||
| 632 | |||
| 633 | rdmsrl(MSR_AMD64_MCx_MASK(4), mask); | ||
| 634 | mask |= (1 << 10); | ||
| 635 | wrmsrl(MSR_AMD64_MCx_MASK(4), mask); | ||
| 636 | } | ||
| 618 | } | 637 | } |
| 619 | 638 | ||
| 620 | #ifdef CONFIG_X86_32 | 639 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c2871d3c71b6..8ed8908cc9f7 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -312,6 +312,26 @@ void __cpuinit smp_store_cpu_info(int id) | |||
| 312 | identify_secondary_cpu(c); | 312 | identify_secondary_cpu(c); |
| 313 | } | 313 | } |
| 314 | 314 | ||
| 315 | static void __cpuinit check_cpu_siblings_on_same_node(int cpu1, int cpu2) | ||
| 316 | { | ||
| 317 | int node1 = early_cpu_to_node(cpu1); | ||
| 318 | int node2 = early_cpu_to_node(cpu2); | ||
| 319 | |||
| 320 | /* | ||
| 321 | * Our CPU scheduler assumes all logical cpus in the same physical cpu | ||
| 322 | * share the same node. But, buggy ACPI or NUMA emulation might assign | ||
| 323 | * them to different node. Fix it. | ||
| 324 | */ | ||
| 325 | if (node1 != node2) { | ||
| 326 | pr_warning("CPU %d in node %d and CPU %d in node %d are in the same physical CPU. forcing same node %d\n", | ||
| 327 | cpu1, node1, cpu2, node2, node2); | ||
| 328 | |||
| 329 | numa_remove_cpu(cpu1); | ||
| 330 | numa_set_node(cpu1, node2); | ||
| 331 | numa_add_cpu(cpu1); | ||
| 332 | } | ||
| 333 | } | ||
| 334 | |||
| 315 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | 335 | static void __cpuinit link_thread_siblings(int cpu1, int cpu2) |
| 316 | { | 336 | { |
| 317 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); | 337 | cpumask_set_cpu(cpu1, cpu_sibling_mask(cpu2)); |
| @@ -320,6 +340,7 @@ static void __cpuinit link_thread_siblings(int cpu1, int cpu2) | |||
| 320 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); | 340 | cpumask_set_cpu(cpu2, cpu_core_mask(cpu1)); |
| 321 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); | 341 | cpumask_set_cpu(cpu1, cpu_llc_shared_mask(cpu2)); |
| 322 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); | 342 | cpumask_set_cpu(cpu2, cpu_llc_shared_mask(cpu1)); |
| 343 | check_cpu_siblings_on_same_node(cpu1, cpu2); | ||
| 323 | } | 344 | } |
| 324 | 345 | ||
| 325 | 346 | ||
| @@ -361,10 +382,12 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 361 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | 382 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { |
| 362 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); | 383 | cpumask_set_cpu(i, cpu_llc_shared_mask(cpu)); |
| 363 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); | 384 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(i)); |
| 385 | check_cpu_siblings_on_same_node(cpu, i); | ||
| 364 | } | 386 | } |
| 365 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | 387 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { |
| 366 | cpumask_set_cpu(i, cpu_core_mask(cpu)); | 388 | cpumask_set_cpu(i, cpu_core_mask(cpu)); |
| 367 | cpumask_set_cpu(cpu, cpu_core_mask(i)); | 389 | cpumask_set_cpu(cpu, cpu_core_mask(i)); |
| 390 | check_cpu_siblings_on_same_node(cpu, i); | ||
| 368 | /* | 391 | /* |
| 369 | * Does this new cpu bringup a new core? | 392 | * Does this new cpu bringup a new core? |
| 370 | */ | 393 | */ |
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts index dc701ea58546..2d6d226f2b10 100644 --- a/arch/x86/platform/ce4100/falconfalls.dts +++ b/arch/x86/platform/ce4100/falconfalls.dts | |||
| @@ -74,6 +74,7 @@ | |||
| 74 | compatible = "intel,ce4100-pci", "pci"; | 74 | compatible = "intel,ce4100-pci", "pci"; |
| 75 | device_type = "pci"; | 75 | device_type = "pci"; |
| 76 | bus-range = <1 1>; | 76 | bus-range = <1 1>; |
| 77 | reg = <0x0800 0x0 0x0 0x0 0x0>; | ||
| 77 | ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; | 78 | ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; |
| 78 | 79 | ||
| 79 | interrupt-parent = <&ioapic2>; | 80 | interrupt-parent = <&ioapic2>; |
| @@ -412,6 +413,7 @@ | |||
| 412 | #address-cells = <2>; | 413 | #address-cells = <2>; |
| 413 | #size-cells = <1>; | 414 | #size-cells = <1>; |
| 414 | compatible = "isa"; | 415 | compatible = "isa"; |
| 416 | reg = <0xf800 0x0 0x0 0x0 0x0>; | ||
| 415 | ranges = <1 0 0 0 0 0x100>; | 417 | ranges = <1 0 0 0 0 0x100>; |
| 416 | 418 | ||
| 417 | rtc@70 { | 419 | rtc@70 { |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 5c0207bf959b..275dbc19e2cf 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
| @@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table) | |||
| 97 | pentry->freq_hz, pentry->irq); | 97 | pentry->freq_hz, pentry->irq); |
| 98 | if (!pentry->irq) | 98 | if (!pentry->irq) |
| 99 | continue; | 99 | continue; |
| 100 | mp_irq.type = MP_IOAPIC; | 100 | mp_irq.type = MP_INTSRC; |
| 101 | mp_irq.irqtype = mp_INT; | 101 | mp_irq.irqtype = mp_INT; |
| 102 | /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ | 102 | /* triggering mode edge bit 2-3, active high polarity bit 0-1 */ |
| 103 | mp_irq.irqflag = 5; | 103 | mp_irq.irqflag = 5; |
| 104 | mp_irq.srcbus = 0; | 104 | mp_irq.srcbus = MP_BUS_ISA; |
| 105 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | 105 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ |
| 106 | mp_irq.dstapic = MP_APIC_ALL; | 106 | mp_irq.dstapic = MP_APIC_ALL; |
| 107 | mp_irq.dstirq = pentry->irq; | 107 | mp_irq.dstirq = pentry->irq; |
| @@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table) | |||
| 168 | for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { | 168 | for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { |
| 169 | pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", | 169 | pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", |
| 170 | totallen, (u32)pentry->phys_addr, pentry->irq); | 170 | totallen, (u32)pentry->phys_addr, pentry->irq); |
| 171 | mp_irq.type = MP_IOAPIC; | 171 | mp_irq.type = MP_INTSRC; |
| 172 | mp_irq.irqtype = mp_INT; | 172 | mp_irq.irqtype = mp_INT; |
| 173 | mp_irq.irqflag = 0xf; /* level trigger and active low */ | 173 | mp_irq.irqflag = 0xf; /* level trigger and active low */ |
| 174 | mp_irq.srcbus = 0; | 174 | mp_irq.srcbus = MP_BUS_ISA; |
| 175 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ | 175 | mp_irq.srcbusirq = pentry->irq; /* IRQ */ |
| 176 | mp_irq.dstapic = MP_APIC_ALL; | 176 | mp_irq.dstapic = MP_APIC_ALL; |
| 177 | mp_irq.dstirq = pentry->irq; | 177 | mp_irq.dstirq = pentry->irq; |
| @@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void) | |||
| 282 | /* Avoid searching for BIOS MP tables */ | 282 | /* Avoid searching for BIOS MP tables */ |
| 283 | x86_init.mpparse.find_smp_config = x86_init_noop; | 283 | x86_init.mpparse.find_smp_config = x86_init_noop; |
| 284 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | 284 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; |
| 285 | 285 | set_bit(MP_BUS_ISA, mp_bus_not_pci); | |
| 286 | } | 286 | } |
| 287 | 287 | ||
| 288 | /* | 288 | /* |
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 1c7121ba18ff..5cc821cb2e09 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig | |||
| @@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY | |||
| 39 | config XEN_SAVE_RESTORE | 39 | config XEN_SAVE_RESTORE |
| 40 | bool | 40 | bool |
| 41 | depends on XEN | 41 | depends on XEN |
| 42 | select HIBERNATE_CALLBACKS | ||
| 42 | default y | 43 | default y |
| 43 | 44 | ||
| 44 | config XEN_DEBUG_FS | 45 | config XEN_DEBUG_FS |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 49dbd78ec3cb..e3c6a06cf725 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, | |||
| 238 | static __init void xen_init_cpuid_mask(void) | 238 | static __init void xen_init_cpuid_mask(void) |
| 239 | { | 239 | { |
| 240 | unsigned int ax, bx, cx, dx; | 240 | unsigned int ax, bx, cx, dx; |
| 241 | unsigned int xsave_mask; | ||
| 241 | 242 | ||
| 242 | cpuid_leaf1_edx_mask = | 243 | cpuid_leaf1_edx_mask = |
| 243 | ~((1 << X86_FEATURE_MCE) | /* disable MCE */ | 244 | ~((1 << X86_FEATURE_MCE) | /* disable MCE */ |
| @@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void) | |||
| 249 | cpuid_leaf1_edx_mask &= | 250 | cpuid_leaf1_edx_mask &= |
| 250 | ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ | 251 | ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ |
| 251 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ | 252 | (1 << X86_FEATURE_ACPI)); /* disable ACPI */ |
| 252 | |||
| 253 | ax = 1; | 253 | ax = 1; |
| 254 | cx = 0; | ||
| 255 | xen_cpuid(&ax, &bx, &cx, &dx); | 254 | xen_cpuid(&ax, &bx, &cx, &dx); |
| 256 | 255 | ||
| 257 | /* cpuid claims we support xsave; try enabling it to see what happens */ | 256 | xsave_mask = |
| 258 | if (cx & (1 << (X86_FEATURE_XSAVE % 32))) { | 257 | (1 << (X86_FEATURE_XSAVE % 32)) | |
| 259 | unsigned long cr4; | 258 | (1 << (X86_FEATURE_OSXSAVE % 32)); |
| 260 | |||
| 261 | set_in_cr4(X86_CR4_OSXSAVE); | ||
| 262 | |||
| 263 | cr4 = read_cr4(); | ||
| 264 | 259 | ||
| 265 | if ((cr4 & X86_CR4_OSXSAVE) == 0) | 260 | /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ |
| 266 | cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32)); | 261 | if ((cx & xsave_mask) != xsave_mask) |
| 267 | 262 | cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ | |
| 268 | clear_in_cr4(X86_CR4_OSXSAVE); | ||
| 269 | } | ||
| 270 | } | 263 | } |
| 271 | 264 | ||
| 272 | static void xen_set_debugreg(int reg, unsigned long val) | 265 | static void xen_set_debugreg(int reg, unsigned long val) |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index c82df6c9c0f0..a991b57f91fe 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte) | |||
| 565 | if (io_page && | 565 | if (io_page && |
| 566 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { | 566 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { |
| 567 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; | 567 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; |
| 568 | WARN(addr != other_addr, | 568 | WARN_ONCE(addr != other_addr, |
| 569 | "0x%lx is using VM_IO, but it is 0x%lx!\n", | 569 | "0x%lx is using VM_IO, but it is 0x%lx!\n", |
| 570 | (unsigned long)addr, (unsigned long)other_addr); | 570 | (unsigned long)addr, (unsigned long)other_addr); |
| 571 | } else { | 571 | } else { |
| 572 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; | 572 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; |
| 573 | other_addr = (_pte.pte & PTE_PFN_MASK); | 573 | other_addr = (_pte.pte & PTE_PFN_MASK); |
| 574 | WARN((addr == other_addr) && (!io_page) && (!iomap_set), | 574 | WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), |
| 575 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", | 575 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", |
| 576 | (unsigned long)addr); | 576 | (unsigned long)addr); |
| 577 | } | 577 | } |
diff --git a/block/blk-core.c b/block/blk-core.c index 90f22cc30799..3c8121072507 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -198,19 +198,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg) | |||
| 198 | } | 198 | } |
| 199 | EXPORT_SYMBOL(blk_dump_rq_flags); | 199 | EXPORT_SYMBOL(blk_dump_rq_flags); |
| 200 | 200 | ||
| 201 | /* | ||
| 202 | * Make sure that plugs that were pending when this function was entered, | ||
| 203 | * are now complete and requests pushed to the queue. | ||
| 204 | */ | ||
| 205 | static inline void queue_sync_plugs(struct request_queue *q) | ||
| 206 | { | ||
| 207 | /* | ||
| 208 | * If the current process is plugged and has barriers submitted, | ||
| 209 | * we will livelock if we don't unplug first. | ||
| 210 | */ | ||
| 211 | blk_flush_plug(current); | ||
| 212 | } | ||
| 213 | |||
| 214 | static void blk_delay_work(struct work_struct *work) | 201 | static void blk_delay_work(struct work_struct *work) |
| 215 | { | 202 | { |
| 216 | struct request_queue *q; | 203 | struct request_queue *q; |
| @@ -298,7 +285,6 @@ void blk_sync_queue(struct request_queue *q) | |||
| 298 | { | 285 | { |
| 299 | del_timer_sync(&q->timeout); | 286 | del_timer_sync(&q->timeout); |
| 300 | cancel_delayed_work_sync(&q->delay_work); | 287 | cancel_delayed_work_sync(&q->delay_work); |
| 301 | queue_sync_plugs(q); | ||
| 302 | } | 288 | } |
| 303 | EXPORT_SYMBOL(blk_sync_queue); | 289 | EXPORT_SYMBOL(blk_sync_queue); |
| 304 | 290 | ||
| @@ -1311,7 +1297,15 @@ get_rq: | |||
| 1311 | 1297 | ||
| 1312 | plug = current->plug; | 1298 | plug = current->plug; |
| 1313 | if (plug) { | 1299 | if (plug) { |
| 1314 | if (!plug->should_sort && !list_empty(&plug->list)) { | 1300 | /* |
| 1301 | * If this is the first request added after a plug, fire | ||
| 1302 | * of a plug trace. If others have been added before, check | ||
| 1303 | * if we have multiple devices in this plug. If so, make a | ||
| 1304 | * note to sort the list before dispatch. | ||
| 1305 | */ | ||
| 1306 | if (list_empty(&plug->list)) | ||
| 1307 | trace_block_plug(q); | ||
| 1308 | else if (!plug->should_sort) { | ||
| 1315 | struct request *__rq; | 1309 | struct request *__rq; |
| 1316 | 1310 | ||
| 1317 | __rq = list_entry_rq(plug->list.prev); | 1311 | __rq = list_entry_rq(plug->list.prev); |
| @@ -2668,33 +2662,56 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
| 2668 | return !(rqa->q <= rqb->q); | 2662 | return !(rqa->q <= rqb->q); |
| 2669 | } | 2663 | } |
| 2670 | 2664 | ||
| 2671 | static void flush_plug_list(struct blk_plug *plug) | 2665 | static void queue_unplugged(struct request_queue *q, unsigned int depth, |
| 2666 | bool force_kblockd) | ||
| 2667 | { | ||
| 2668 | trace_block_unplug_io(q, depth); | ||
| 2669 | __blk_run_queue(q, force_kblockd); | ||
| 2670 | |||
| 2671 | if (q->unplugged_fn) | ||
| 2672 | q->unplugged_fn(q); | ||
| 2673 | } | ||
| 2674 | |||
| 2675 | void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd) | ||
| 2672 | { | 2676 | { |
| 2673 | struct request_queue *q; | 2677 | struct request_queue *q; |
| 2674 | unsigned long flags; | 2678 | unsigned long flags; |
| 2675 | struct request *rq; | 2679 | struct request *rq; |
| 2680 | LIST_HEAD(list); | ||
| 2681 | unsigned int depth; | ||
| 2676 | 2682 | ||
| 2677 | BUG_ON(plug->magic != PLUG_MAGIC); | 2683 | BUG_ON(plug->magic != PLUG_MAGIC); |
| 2678 | 2684 | ||
| 2679 | if (list_empty(&plug->list)) | 2685 | if (list_empty(&plug->list)) |
| 2680 | return; | 2686 | return; |
| 2681 | 2687 | ||
| 2682 | if (plug->should_sort) | 2688 | list_splice_init(&plug->list, &list); |
| 2683 | list_sort(NULL, &plug->list, plug_rq_cmp); | 2689 | |
| 2690 | if (plug->should_sort) { | ||
| 2691 | list_sort(NULL, &list, plug_rq_cmp); | ||
| 2692 | plug->should_sort = 0; | ||
| 2693 | } | ||
| 2684 | 2694 | ||
| 2685 | q = NULL; | 2695 | q = NULL; |
| 2696 | depth = 0; | ||
| 2697 | |||
| 2698 | /* | ||
| 2699 | * Save and disable interrupts here, to avoid doing it for every | ||
| 2700 | * queue lock we have to take. | ||
| 2701 | */ | ||
| 2686 | local_irq_save(flags); | 2702 | local_irq_save(flags); |
| 2687 | while (!list_empty(&plug->list)) { | 2703 | while (!list_empty(&list)) { |
| 2688 | rq = list_entry_rq(plug->list.next); | 2704 | rq = list_entry_rq(list.next); |
| 2689 | list_del_init(&rq->queuelist); | 2705 | list_del_init(&rq->queuelist); |
| 2690 | BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); | 2706 | BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); |
| 2691 | BUG_ON(!rq->q); | 2707 | BUG_ON(!rq->q); |
| 2692 | if (rq->q != q) { | 2708 | if (rq->q != q) { |
| 2693 | if (q) { | 2709 | if (q) { |
| 2694 | __blk_run_queue(q, false); | 2710 | queue_unplugged(q, depth, force_kblockd); |
| 2695 | spin_unlock(q->queue_lock); | 2711 | spin_unlock(q->queue_lock); |
| 2696 | } | 2712 | } |
| 2697 | q = rq->q; | 2713 | q = rq->q; |
| 2714 | depth = 0; | ||
| 2698 | spin_lock(q->queue_lock); | 2715 | spin_lock(q->queue_lock); |
| 2699 | } | 2716 | } |
| 2700 | rq->cmd_flags &= ~REQ_ON_PLUG; | 2717 | rq->cmd_flags &= ~REQ_ON_PLUG; |
| @@ -2706,38 +2723,27 @@ static void flush_plug_list(struct blk_plug *plug) | |||
| 2706 | __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); | 2723 | __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); |
| 2707 | else | 2724 | else |
| 2708 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); | 2725 | __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); |
| 2726 | |||
| 2727 | depth++; | ||
| 2709 | } | 2728 | } |
| 2710 | 2729 | ||
| 2711 | if (q) { | 2730 | if (q) { |
| 2712 | __blk_run_queue(q, false); | 2731 | queue_unplugged(q, depth, force_kblockd); |
| 2713 | spin_unlock(q->queue_lock); | 2732 | spin_unlock(q->queue_lock); |
| 2714 | } | 2733 | } |
| 2715 | 2734 | ||
| 2716 | BUG_ON(!list_empty(&plug->list)); | ||
| 2717 | local_irq_restore(flags); | 2735 | local_irq_restore(flags); |
| 2718 | } | 2736 | } |
| 2719 | 2737 | EXPORT_SYMBOL(blk_flush_plug_list); | |
| 2720 | static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug) | ||
| 2721 | { | ||
| 2722 | flush_plug_list(plug); | ||
| 2723 | |||
| 2724 | if (plug == tsk->plug) | ||
| 2725 | tsk->plug = NULL; | ||
| 2726 | } | ||
| 2727 | 2738 | ||
| 2728 | void blk_finish_plug(struct blk_plug *plug) | 2739 | void blk_finish_plug(struct blk_plug *plug) |
| 2729 | { | 2740 | { |
| 2730 | if (plug) | 2741 | blk_flush_plug_list(plug, false); |
| 2731 | __blk_finish_plug(current, plug); | ||
| 2732 | } | ||
| 2733 | EXPORT_SYMBOL(blk_finish_plug); | ||
| 2734 | 2742 | ||
| 2735 | void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug) | 2743 | if (plug == current->plug) |
| 2736 | { | 2744 | current->plug = NULL; |
| 2737 | __blk_finish_plug(tsk, plug); | ||
| 2738 | tsk->plug = plug; | ||
| 2739 | } | 2745 | } |
| 2740 | EXPORT_SYMBOL(__blk_flush_plug); | 2746 | EXPORT_SYMBOL(blk_finish_plug); |
| 2741 | 2747 | ||
| 2742 | int __init blk_dev_init(void) | 2748 | int __init blk_dev_init(void) |
| 2743 | { | 2749 | { |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 1fa769293597..eb949045bb12 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
| @@ -790,6 +790,22 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush) | |||
| 790 | } | 790 | } |
| 791 | EXPORT_SYMBOL_GPL(blk_queue_flush); | 791 | EXPORT_SYMBOL_GPL(blk_queue_flush); |
| 792 | 792 | ||
| 793 | /** | ||
| 794 | * blk_queue_unplugged - register a callback for an unplug event | ||
| 795 | * @q: the request queue for the device | ||
| 796 | * @fn: the function to call | ||
| 797 | * | ||
| 798 | * Some stacked drivers may need to know when IO is dispatched on an | ||
| 799 | * unplug event. By registrering a callback here, they will be notified | ||
| 800 | * when someone flushes their on-stack queue plug. The function will be | ||
| 801 | * called with the queue lock held. | ||
| 802 | */ | ||
| 803 | void blk_queue_unplugged(struct request_queue *q, unplugged_fn *fn) | ||
| 804 | { | ||
| 805 | q->unplugged_fn = fn; | ||
| 806 | } | ||
| 807 | EXPORT_SYMBOL(blk_queue_unplugged); | ||
| 808 | |||
| 793 | static int __init blk_settings_init(void) | 809 | static int __init blk_settings_init(void) |
| 794 | { | 810 | { |
| 795 | blk_max_low_pfn = max_low_pfn - 1; | 811 | blk_max_low_pfn = max_low_pfn - 1; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 261c75c665ae..6d735122bc59 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
| @@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk) | |||
| 498 | { | 498 | { |
| 499 | int ret; | 499 | int ret; |
| 500 | struct device *dev = disk_to_dev(disk); | 500 | struct device *dev = disk_to_dev(disk); |
| 501 | |||
| 502 | struct request_queue *q = disk->queue; | 501 | struct request_queue *q = disk->queue; |
| 503 | 502 | ||
| 504 | if (WARN_ON(!q)) | 503 | if (WARN_ON(!q)) |
| @@ -521,7 +520,7 @@ int blk_register_queue(struct gendisk *disk) | |||
| 521 | if (ret) { | 520 | if (ret) { |
| 522 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 521 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 523 | kobject_del(&q->kobj); | 522 | kobject_del(&q->kobj); |
| 524 | blk_trace_remove_sysfs(disk_to_dev(disk)); | 523 | blk_trace_remove_sysfs(dev); |
| 525 | kobject_put(&dev->kobj); | 524 | kobject_put(&dev->kobj); |
| 526 | return ret; | 525 | return ret; |
| 527 | } | 526 | } |
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 821040503154..7025593a58c8 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c | |||
| @@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev) | |||
| 214 | 214 | ||
| 215 | #endif /* !CONFIG_SUSPEND */ | 215 | #endif /* !CONFIG_SUSPEND */ |
| 216 | 216 | ||
| 217 | #ifdef CONFIG_HIBERNATION | 217 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 218 | 218 | ||
| 219 | static int amba_pm_freeze(struct device *dev) | 219 | static int amba_pm_freeze(struct device *dev) |
| 220 | { | 220 | { |
| @@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev) | |||
| 352 | return ret; | 352 | return ret; |
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | #else /* !CONFIG_HIBERNATION */ | 355 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 356 | 356 | ||
| 357 | #define amba_pm_freeze NULL | 357 | #define amba_pm_freeze NULL |
| 358 | #define amba_pm_thaw NULL | 358 | #define amba_pm_thaw NULL |
| @@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev) | |||
| 363 | #define amba_pm_poweroff_noirq NULL | 363 | #define amba_pm_poweroff_noirq NULL |
| 364 | #define amba_pm_restore_noirq NULL | 364 | #define amba_pm_restore_noirq NULL |
| 365 | 365 | ||
| 366 | #endif /* !CONFIG_HIBERNATION */ | 366 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 367 | 367 | ||
| 368 | #ifdef CONFIG_PM | 368 | #ifdef CONFIG_PM |
| 369 | 369 | ||
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index f051cfff18af..9e0e4fc24c46 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
| @@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev) | |||
| 149 | 149 | ||
| 150 | of_device_node_put(&pa->pdev.dev); | 150 | of_device_node_put(&pa->pdev.dev); |
| 151 | kfree(pa->pdev.dev.platform_data); | 151 | kfree(pa->pdev.dev.platform_data); |
| 152 | kfree(pa->pdev.mfd_cell); | ||
| 152 | kfree(pa->pdev.resource); | 153 | kfree(pa->pdev.resource); |
| 153 | kfree(pa); | 154 | kfree(pa); |
| 154 | } | 155 | } |
| @@ -771,7 +772,7 @@ int __weak platform_pm_resume_noirq(struct device *dev) | |||
| 771 | 772 | ||
| 772 | #endif /* !CONFIG_SUSPEND */ | 773 | #endif /* !CONFIG_SUSPEND */ |
| 773 | 774 | ||
| 774 | #ifdef CONFIG_HIBERNATION | 775 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 775 | 776 | ||
| 776 | static int platform_pm_freeze(struct device *dev) | 777 | static int platform_pm_freeze(struct device *dev) |
| 777 | { | 778 | { |
| @@ -909,7 +910,7 @@ static int platform_pm_restore_noirq(struct device *dev) | |||
| 909 | return ret; | 910 | return ret; |
| 910 | } | 911 | } |
| 911 | 912 | ||
| 912 | #else /* !CONFIG_HIBERNATION */ | 913 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 913 | 914 | ||
| 914 | #define platform_pm_freeze NULL | 915 | #define platform_pm_freeze NULL |
| 915 | #define platform_pm_thaw NULL | 916 | #define platform_pm_thaw NULL |
| @@ -920,7 +921,7 @@ static int platform_pm_restore_noirq(struct device *dev) | |||
| 920 | #define platform_pm_poweroff_noirq NULL | 921 | #define platform_pm_poweroff_noirq NULL |
| 921 | #define platform_pm_restore_noirq NULL | 922 | #define platform_pm_restore_noirq NULL |
| 922 | 923 | ||
| 923 | #endif /* !CONFIG_HIBERNATION */ | 924 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 924 | 925 | ||
| 925 | #ifdef CONFIG_PM_RUNTIME | 926 | #ifdef CONFIG_PM_RUNTIME |
| 926 | 927 | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 052dc53eef38..fbc5b6e7c591 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -233,7 +233,7 @@ static int pm_op(struct device *dev, | |||
| 233 | } | 233 | } |
| 234 | break; | 234 | break; |
| 235 | #endif /* CONFIG_SUSPEND */ | 235 | #endif /* CONFIG_SUSPEND */ |
| 236 | #ifdef CONFIG_HIBERNATION | 236 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 237 | case PM_EVENT_FREEZE: | 237 | case PM_EVENT_FREEZE: |
| 238 | case PM_EVENT_QUIESCE: | 238 | case PM_EVENT_QUIESCE: |
| 239 | if (ops->freeze) { | 239 | if (ops->freeze) { |
| @@ -260,7 +260,7 @@ static int pm_op(struct device *dev, | |||
| 260 | suspend_report_result(ops->restore, error); | 260 | suspend_report_result(ops->restore, error); |
| 261 | } | 261 | } |
| 262 | break; | 262 | break; |
| 263 | #endif /* CONFIG_HIBERNATION */ | 263 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 264 | default: | 264 | default: |
| 265 | error = -EINVAL; | 265 | error = -EINVAL; |
| 266 | } | 266 | } |
| @@ -308,7 +308,7 @@ static int pm_noirq_op(struct device *dev, | |||
| 308 | } | 308 | } |
| 309 | break; | 309 | break; |
| 310 | #endif /* CONFIG_SUSPEND */ | 310 | #endif /* CONFIG_SUSPEND */ |
| 311 | #ifdef CONFIG_HIBERNATION | 311 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 312 | case PM_EVENT_FREEZE: | 312 | case PM_EVENT_FREEZE: |
| 313 | case PM_EVENT_QUIESCE: | 313 | case PM_EVENT_QUIESCE: |
| 314 | if (ops->freeze_noirq) { | 314 | if (ops->freeze_noirq) { |
| @@ -335,7 +335,7 @@ static int pm_noirq_op(struct device *dev, | |||
| 335 | suspend_report_result(ops->restore_noirq, error); | 335 | suspend_report_result(ops->restore_noirq, error); |
| 336 | } | 336 | } |
| 337 | break; | 337 | break; |
| 338 | #endif /* CONFIG_HIBERNATION */ | 338 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 339 | default: | 339 | default: |
| 340 | error = -EINVAL; | 340 | error = -EINVAL; |
| 341 | } | 341 | } |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 6b396759e7f5..8a781540590c 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
| @@ -1448,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = { | |||
| 1448 | {} | 1448 | {} |
| 1449 | }; | 1449 | }; |
| 1450 | 1450 | ||
| 1451 | static struct of_platform_driver fsldma_of_driver = { | 1451 | static struct platform_driver fsldma_of_driver = { |
| 1452 | .driver = { | 1452 | .driver = { |
| 1453 | .name = "fsl-elo-dma", | 1453 | .name = "fsl-elo-dma", |
| 1454 | .owner = THIS_MODULE, | 1454 | .owner = THIS_MODULE, |
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c index 7f6f01a4b145..0a775f7987c2 100644 --- a/drivers/gpio/ml_ioh_gpio.c +++ b/drivers/gpio/ml_ioh_gpio.c | |||
| @@ -116,6 +116,7 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr, | |||
| 116 | reg_val |= (1 << nr); | 116 | reg_val |= (1 << nr); |
| 117 | else | 117 | else |
| 118 | reg_val &= ~(1 << nr); | 118 | reg_val &= ~(1 << nr); |
| 119 | iowrite32(reg_val, &chip->reg->regs[chip->ch].po); | ||
| 119 | 120 | ||
| 120 | mutex_unlock(&chip->lock); | 121 | mutex_unlock(&chip->lock); |
| 121 | 122 | ||
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 583e92592073..7630ab7b9bec 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c | |||
| @@ -558,7 +558,7 @@ static int __devinit pca953x_probe(struct i2c_client *client, | |||
| 558 | 558 | ||
| 559 | ret = gpiochip_add(&chip->gpio_chip); | 559 | ret = gpiochip_add(&chip->gpio_chip); |
| 560 | if (ret) | 560 | if (ret) |
| 561 | goto out_failed; | 561 | goto out_failed_irq; |
| 562 | 562 | ||
| 563 | if (pdata->setup) { | 563 | if (pdata->setup) { |
| 564 | ret = pdata->setup(client, chip->gpio_chip.base, | 564 | ret = pdata->setup(client, chip->gpio_chip.base, |
| @@ -570,8 +570,9 @@ static int __devinit pca953x_probe(struct i2c_client *client, | |||
| 570 | i2c_set_clientdata(client, chip); | 570 | i2c_set_clientdata(client, chip); |
| 571 | return 0; | 571 | return 0; |
| 572 | 572 | ||
| 573 | out_failed: | 573 | out_failed_irq: |
| 574 | pca953x_irq_teardown(chip); | 574 | pca953x_irq_teardown(chip); |
| 575 | out_failed: | ||
| 575 | kfree(chip->dyn_pdata); | 576 | kfree(chip->dyn_pdata); |
| 576 | kfree(chip); | 577 | kfree(chip); |
| 577 | return ret; | 578 | return ret; |
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c index 2c6af8705103..f970a5f3585e 100644 --- a/drivers/gpio/pch_gpio.c +++ b/drivers/gpio/pch_gpio.c | |||
| @@ -105,6 +105,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr, | |||
| 105 | reg_val |= (1 << nr); | 105 | reg_val |= (1 << nr); |
| 106 | else | 106 | else |
| 107 | reg_val &= ~(1 << nr); | 107 | reg_val &= ~(1 << nr); |
| 108 | iowrite32(reg_val, &chip->reg->po); | ||
| 108 | 109 | ||
| 109 | mutex_unlock(&chip->lock); | 110 | mutex_unlock(&chip->lock); |
| 110 | 111 | ||
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index a6feb78c404c..c58f691ec3ce 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
| @@ -96,6 +96,7 @@ config DRM_I915 | |||
| 96 | # i915 depends on ACPI_VIDEO when ACPI is enabled | 96 | # i915 depends on ACPI_VIDEO when ACPI is enabled |
| 97 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 97 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
| 98 | select BACKLIGHT_CLASS_DEVICE if ACPI | 98 | select BACKLIGHT_CLASS_DEVICE if ACPI |
| 99 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
| 99 | select INPUT if ACPI | 100 | select INPUT if ACPI |
| 100 | select ACPI_VIDEO if ACPI | 101 | select ACPI_VIDEO if ACPI |
| 101 | select ACPI_BUTTON if ACPI | 102 | select ACPI_BUTTON if ACPI |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 8314a49b6b9a..90aef64b76f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -269,7 +269,7 @@ struct init_tbl_entry { | |||
| 269 | int (*handler)(struct nvbios *, uint16_t, struct init_exec *); | 269 | int (*handler)(struct nvbios *, uint16_t, struct init_exec *); |
| 270 | }; | 270 | }; |
| 271 | 271 | ||
| 272 | static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *); | 272 | static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *); |
| 273 | 273 | ||
| 274 | #define MACRO_INDEX_SIZE 2 | 274 | #define MACRO_INDEX_SIZE 2 |
| 275 | #define MACRO_SIZE 8 | 275 | #define MACRO_SIZE 8 |
| @@ -2011,6 +2011,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2011 | } | 2011 | } |
| 2012 | 2012 | ||
| 2013 | static int | 2013 | static int |
| 2014 | init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | ||
| 2015 | { | ||
| 2016 | /* | ||
| 2017 | * INIT_JUMP opcode: 0x5C ('\') | ||
| 2018 | * | ||
| 2019 | * offset (8 bit): opcode | ||
| 2020 | * offset + 1 (16 bit): offset (in bios) | ||
| 2021 | * | ||
| 2022 | * Continue execution of init table from 'offset' | ||
| 2023 | */ | ||
| 2024 | |||
| 2025 | uint16_t jmp_offset = ROM16(bios->data[offset + 1]); | ||
| 2026 | |||
| 2027 | if (!iexec->execute) | ||
| 2028 | return 3; | ||
| 2029 | |||
| 2030 | BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset); | ||
| 2031 | return jmp_offset - offset; | ||
| 2032 | } | ||
| 2033 | |||
| 2034 | static int | ||
| 2014 | init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2035 | init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2015 | { | 2036 | { |
| 2016 | /* | 2037 | /* |
| @@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = { | |||
| 3659 | { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence }, | 3680 | { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence }, |
| 3660 | /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ | 3681 | /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ |
| 3661 | { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct }, | 3682 | { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct }, |
| 3683 | { "INIT_JUMP" , 0x5C, init_jump }, | ||
| 3662 | { "INIT_I2C_IF" , 0x5E, init_i2c_if }, | 3684 | { "INIT_I2C_IF" , 0x5E, init_i2c_if }, |
| 3663 | { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg }, | 3685 | { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg }, |
| 3664 | { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io }, | 3686 | { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io }, |
| @@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = { | |||
| 3700 | #define MAX_TABLE_OPS 1000 | 3722 | #define MAX_TABLE_OPS 1000 |
| 3701 | 3723 | ||
| 3702 | static int | 3724 | static int |
| 3703 | parse_init_table(struct nvbios *bios, unsigned int offset, | 3725 | parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 3704 | struct init_exec *iexec) | ||
| 3705 | { | 3726 | { |
| 3706 | /* | 3727 | /* |
| 3707 | * Parses all commands in an init table. | 3728 | * Parses all commands in an init table. |
| @@ -6333,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) | |||
| 6333 | } | 6354 | } |
| 6334 | } | 6355 | } |
| 6335 | 6356 | ||
| 6357 | /* XFX GT-240X-YA | ||
| 6358 | * | ||
| 6359 | * So many things wrong here, replace the entire encoder table.. | ||
| 6360 | */ | ||
| 6361 | if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) { | ||
| 6362 | if (idx == 0) { | ||
| 6363 | *conn = 0x02001300; /* VGA, connector 1 */ | ||
| 6364 | *conf = 0x00000028; | ||
| 6365 | } else | ||
| 6366 | if (idx == 1) { | ||
| 6367 | *conn = 0x01010312; /* DVI, connector 0 */ | ||
| 6368 | *conf = 0x00020030; | ||
| 6369 | } else | ||
| 6370 | if (idx == 2) { | ||
| 6371 | *conn = 0x01010310; /* VGA, connector 0 */ | ||
| 6372 | *conf = 0x00000028; | ||
| 6373 | } else | ||
| 6374 | if (idx == 3) { | ||
| 6375 | *conn = 0x02022362; /* HDMI, connector 2 */ | ||
| 6376 | *conf = 0x00020010; | ||
| 6377 | } else { | ||
| 6378 | *conn = 0x0000000e; /* EOL */ | ||
| 6379 | *conf = 0x00000000; | ||
| 6380 | } | ||
| 6381 | } | ||
| 6382 | |||
| 6336 | return true; | 6383 | return true; |
| 6337 | } | 6384 | } |
| 6338 | 6385 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 57e5302503db..856d56a98d1e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -1190,7 +1190,7 @@ extern int nv50_graph_load_context(struct nouveau_channel *); | |||
| 1190 | extern int nv50_graph_unload_context(struct drm_device *); | 1190 | extern int nv50_graph_unload_context(struct drm_device *); |
| 1191 | extern int nv50_grctx_init(struct nouveau_grctx *); | 1191 | extern int nv50_grctx_init(struct nouveau_grctx *); |
| 1192 | extern void nv50_graph_tlb_flush(struct drm_device *dev); | 1192 | extern void nv50_graph_tlb_flush(struct drm_device *dev); |
| 1193 | extern void nv86_graph_tlb_flush(struct drm_device *dev); | 1193 | extern void nv84_graph_tlb_flush(struct drm_device *dev); |
| 1194 | extern struct nouveau_enum nv50_data_error_names[]; | 1194 | extern struct nouveau_enum nv50_data_error_names[]; |
| 1195 | 1195 | ||
| 1196 | /* nvc0_graph.c */ | 1196 | /* nvc0_graph.c */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 2683377f4131..78f467fe30be 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -552,6 +552,7 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
| 552 | u8 tRC; /* Byte 9 */ | 552 | u8 tRC; /* Byte 9 */ |
| 553 | u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; | 553 | u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14; |
| 554 | u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; | 554 | u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21; |
| 555 | u8 magic_number = 0; /* Yeah... sorry*/ | ||
| 555 | u8 *mem = NULL, *entry; | 556 | u8 *mem = NULL, *entry; |
| 556 | int i, recordlen, entries; | 557 | int i, recordlen, entries; |
| 557 | 558 | ||
| @@ -596,6 +597,12 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
| 596 | if (!memtimings->timing) | 597 | if (!memtimings->timing) |
| 597 | return; | 598 | return; |
| 598 | 599 | ||
| 600 | /* Get "some number" from the timing reg for NV_40 | ||
| 601 | * Used in calculations later */ | ||
| 602 | if(dev_priv->card_type == NV_40) { | ||
| 603 | magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24; | ||
| 604 | } | ||
| 605 | |||
| 599 | entry = mem + mem[1]; | 606 | entry = mem + mem[1]; |
| 600 | for (i = 0; i < entries; i++, entry += recordlen) { | 607 | for (i = 0; i < entries; i++, entry += recordlen) { |
| 601 | struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; | 608 | struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i]; |
| @@ -635,36 +642,51 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
| 635 | 642 | ||
| 636 | /* XXX: I don't trust the -1's and +1's... they must come | 643 | /* XXX: I don't trust the -1's and +1's... they must come |
| 637 | * from somewhere! */ | 644 | * from somewhere! */ |
| 638 | timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 | | 645 | timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 | |
| 639 | tUNK_18 << 16 | | 646 | tUNK_18 << 16 | |
| 640 | (tUNK_1 + tUNK_19 + 1) << 8 | | 647 | (tUNK_1 + tUNK_19 + 1 + magic_number) << 8; |
| 641 | (tUNK_2 - 1)); | 648 | if(dev_priv->chipset == 0xa8) { |
| 649 | timing->reg_100224 |= (tUNK_2 - 1); | ||
| 650 | } else { | ||
| 651 | timing->reg_100224 |= (tUNK_2 + 2 - magic_number); | ||
| 652 | } | ||
| 642 | 653 | ||
| 643 | timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); | 654 | timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); |
| 644 | if(recordlen > 19) { | 655 | if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) { |
| 645 | timing->reg_100228 += (tUNK_19 - 1) << 24; | 656 | timing->reg_100228 |= (tUNK_19 - 1) << 24; |
| 646 | }/* I cannot back-up this else-statement right now | 657 | } |
| 647 | else { | 658 | |
| 648 | timing->reg_100228 += tUNK_12 << 24; | 659 | if(dev_priv->card_type == NV_40) { |
| 649 | }*/ | 660 | /* NV40: don't know what the rest of the regs are.. |
| 650 | 661 | * And don't need to know either */ | |
| 651 | /* XXX: reg_10022c */ | 662 | timing->reg_100228 |= 0x20200000 | magic_number << 24; |
| 652 | timing->reg_10022c = tUNK_2 - 1; | 663 | } else if(dev_priv->card_type >= NV_50) { |
| 653 | 664 | /* XXX: reg_10022c */ | |
| 654 | timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | | 665 | timing->reg_10022c = tUNK_2 - 1; |
| 655 | tUNK_13 << 8 | tUNK_13); | 666 | |
| 656 | 667 | timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | | |
| 657 | /* XXX: +6? */ | 668 | tUNK_13 << 8 | tUNK_13); |
| 658 | timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); | 669 | |
| 659 | timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; | 670 | timing->reg_100234 = (tRAS << 24 | tRC); |
| 660 | 671 | timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; | |
| 661 | /* XXX; reg_100238, reg_10023c | 672 | |
| 662 | * reg: 0x00?????? | 673 | if(dev_priv->chipset < 0xa3) { |
| 663 | * reg_10023c: | 674 | timing->reg_100234 |= (tUNK_2 + 2) << 8; |
| 664 | * 0 for pre-NV50 cards | 675 | } else { |
| 665 | * 0x????0202 for NV50+ cards (empirical evidence) */ | 676 | /* XXX: +6? */ |
| 666 | if(dev_priv->card_type >= NV_50) { | 677 | timing->reg_100234 |= (tUNK_19 + 6) << 8; |
| 678 | } | ||
| 679 | |||
| 680 | /* XXX; reg_100238, reg_10023c | ||
| 681 | * reg_100238: 0x00?????? | ||
| 682 | * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */ | ||
| 667 | timing->reg_10023c = 0x202; | 683 | timing->reg_10023c = 0x202; |
| 684 | if(dev_priv->chipset < 0xa3) { | ||
| 685 | timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16; | ||
| 686 | } else { | ||
| 687 | /* currently unknown | ||
| 688 | * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */ | ||
| 689 | } | ||
| 668 | } | 690 | } |
| 669 | 691 | ||
| 670 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, | 692 | NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, |
| @@ -675,7 +697,7 @@ nouveau_mem_timing_init(struct drm_device *dev) | |||
| 675 | timing->reg_100238, timing->reg_10023c); | 697 | timing->reg_100238, timing->reg_10023c); |
| 676 | } | 698 | } |
| 677 | 699 | ||
| 678 | memtimings->nr_timing = entries; | 700 | memtimings->nr_timing = entries; |
| 679 | memtimings->supported = true; | 701 | memtimings->supported = true; |
| 680 | } | 702 | } |
| 681 | 703 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index ac62a1b8c4fc..670e3cb697ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c | |||
| @@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev) | |||
| 134 | case 0x13: | 134 | case 0x13: |
| 135 | case 0x15: | 135 | case 0x15: |
| 136 | perflvl->fanspeed = entry[55]; | 136 | perflvl->fanspeed = entry[55]; |
| 137 | perflvl->voltage = entry[56]; | 137 | perflvl->voltage = (recordlen > 56) ? entry[56] : 0; |
| 138 | perflvl->core = ROM32(entry[1]) * 10; | 138 | perflvl->core = ROM32(entry[1]) * 10; |
| 139 | perflvl->memory = ROM32(entry[5]) * 20; | 139 | perflvl->memory = ROM32(entry[5]) * 20; |
| 140 | break; | 140 | break; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 5bb2859001e2..6e2b1a6caa2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
| 376 | engine->graph.destroy_context = nv50_graph_destroy_context; | 376 | engine->graph.destroy_context = nv50_graph_destroy_context; |
| 377 | engine->graph.load_context = nv50_graph_load_context; | 377 | engine->graph.load_context = nv50_graph_load_context; |
| 378 | engine->graph.unload_context = nv50_graph_unload_context; | 378 | engine->graph.unload_context = nv50_graph_unload_context; |
| 379 | if (dev_priv->chipset != 0x86) | 379 | if (dev_priv->chipset == 0x50 || |
| 380 | dev_priv->chipset == 0xac) | ||
| 380 | engine->graph.tlb_flush = nv50_graph_tlb_flush; | 381 | engine->graph.tlb_flush = nv50_graph_tlb_flush; |
| 381 | else { | 382 | else |
| 382 | /* from what i can see nvidia do this on every | 383 | engine->graph.tlb_flush = nv84_graph_tlb_flush; |
| 383 | * pre-NVA3 board except NVAC, but, we've only | ||
| 384 | * ever seen problems on NV86 | ||
| 385 | */ | ||
| 386 | engine->graph.tlb_flush = nv86_graph_tlb_flush; | ||
| 387 | } | ||
| 388 | engine->fifo.channels = 128; | 384 | engine->fifo.channels = 128; |
| 389 | engine->fifo.init = nv50_fifo_init; | 385 | engine->fifo.init = nv50_fifo_init; |
| 390 | engine->fifo.takedown = nv50_fifo_takedown; | 386 | engine->fifo.takedown = nv50_fifo_takedown; |
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index c82db37d9f41..12098bf839c4 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c | |||
| @@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder) | |||
| 581 | int head = nv_encoder->restore.head; | 581 | int head = nv_encoder->restore.head; |
| 582 | 582 | ||
| 583 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { | 583 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { |
| 584 | struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode; | 584 | struct nouveau_connector *connector = |
| 585 | if (native_mode) | 585 | nouveau_encoder_connector_get(nv_encoder); |
| 586 | call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON, | 586 | |
| 587 | native_mode->clock); | 587 | if (connector && connector->native_mode) |
| 588 | else | 588 | call_lvds_script(dev, nv_encoder->dcb, head, |
| 589 | NV_ERROR(dev, "Not restoring LVDS without native mode\n"); | 589 | LVDS_PANEL_ON, |
| 590 | connector->native_mode->clock); | ||
| 590 | 591 | ||
| 591 | } else if (nv_encoder->dcb->type == OUTPUT_TMDS) { | 592 | } else if (nv_encoder->dcb->type == OUTPUT_TMDS) { |
| 592 | int clock = nouveau_hw_pllvals_to_clk | 593 | int clock = nouveau_hw_pllvals_to_clk |
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 2b9984027f41..a19ccaa025b3 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
| @@ -469,9 +469,6 @@ nv50_crtc_wait_complete(struct drm_crtc *crtc) | |||
| 469 | 469 | ||
| 470 | start = ptimer->read(dev); | 470 | start = ptimer->read(dev); |
| 471 | do { | 471 | do { |
| 472 | nv_wr32(dev, 0x61002c, 0x370); | ||
| 473 | nv_wr32(dev, 0x000140, 1); | ||
| 474 | |||
| 475 | if (nv_ro32(disp->ntfy, 0x000)) | 472 | if (nv_ro32(disp->ntfy, 0x000)) |
| 476 | return 0; | 473 | return 0; |
| 477 | } while (ptimer->read(dev) - start < 2000000000ULL); | 474 | } while (ptimer->read(dev) - start < 2000000000ULL); |
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index a2cfaa691e9b..c8e83c1a4de8 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c | |||
| @@ -186,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo) | |||
| 186 | nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); | 186 | nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); |
| 187 | 187 | ||
| 188 | evo->dma.max = (4096/4) - 2; | 188 | evo->dma.max = (4096/4) - 2; |
| 189 | evo->dma.max &= ~7; | ||
| 189 | evo->dma.put = 0; | 190 | evo->dma.put = 0; |
| 190 | evo->dma.cur = evo->dma.put; | 191 | evo->dma.cur = evo->dma.put; |
| 191 | evo->dma.free = evo->dma.max - evo->dma.cur; | 192 | evo->dma.free = evo->dma.max - evo->dma.cur; |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8675b00caf18..b02a5b1e7d37 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
| @@ -503,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev) | |||
| 503 | } | 503 | } |
| 504 | 504 | ||
| 505 | void | 505 | void |
| 506 | nv86_graph_tlb_flush(struct drm_device *dev) | 506 | nv84_graph_tlb_flush(struct drm_device *dev) |
| 507 | { | 507 | { |
| 508 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 508 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 509 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; | 509 | struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c index 69af0ba7edd3..a0a2a0277f73 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vm.c +++ b/drivers/gpu/drm/nouveau/nvc0_vm.c | |||
| @@ -104,20 +104,26 @@ nvc0_vm_flush(struct nouveau_vm *vm) | |||
| 104 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; | 104 | struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; |
| 105 | struct drm_device *dev = vm->dev; | 105 | struct drm_device *dev = vm->dev; |
| 106 | struct nouveau_vm_pgd *vpgd; | 106 | struct nouveau_vm_pgd *vpgd; |
| 107 | u32 r100c80, engine; | 107 | u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; |
| 108 | 108 | ||
| 109 | pinstmem->flush(vm->dev); | 109 | pinstmem->flush(vm->dev); |
| 110 | 110 | ||
| 111 | if (vm == dev_priv->chan_vm) | 111 | spin_lock(&dev_priv->ramin_lock); |
| 112 | engine = 1; | ||
| 113 | else | ||
| 114 | engine = 5; | ||
| 115 | |||
| 116 | list_for_each_entry(vpgd, &vm->pgd_list, head) { | 112 | list_for_each_entry(vpgd, &vm->pgd_list, head) { |
| 117 | r100c80 = nv_rd32(dev, 0x100c80); | 113 | /* looks like maybe a "free flush slots" counter, the |
| 114 | * faster you write to 0x100cbc to more it decreases | ||
| 115 | */ | ||
| 116 | if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) { | ||
| 117 | NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n", | ||
| 118 | nv_rd32(dev, 0x100c80), engine); | ||
| 119 | } | ||
| 118 | nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); | 120 | nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); |
| 119 | nv_wr32(dev, 0x100cbc, 0x80000000 | engine); | 121 | nv_wr32(dev, 0x100cbc, 0x80000000 | engine); |
| 120 | if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80)) | 122 | /* wait for flush to be queued? */ |
| 121 | NV_ERROR(dev, "vm flush timeout eng %d\n", engine); | 123 | if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) { |
| 124 | NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n", | ||
| 125 | nv_rd32(dev, 0x100c80), engine); | ||
| 126 | } | ||
| 122 | } | 127 | } |
| 128 | spin_unlock(&dev_priv->ramin_lock); | ||
| 123 | } | 129 | } |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 258fa5e7a2d9..d71d375149f8 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include "atom.h" | 32 | #include "atom.h" |
| 33 | #include "atom-names.h" | 33 | #include "atom-names.h" |
| 34 | #include "atom-bits.h" | 34 | #include "atom-bits.h" |
| 35 | #include "radeon.h" | ||
| 35 | 36 | ||
| 36 | #define ATOM_COND_ABOVE 0 | 37 | #define ATOM_COND_ABOVE 0 |
| 37 | #define ATOM_COND_ABOVEOREQUAL 1 | 38 | #define ATOM_COND_ABOVEOREQUAL 1 |
| @@ -101,7 +102,9 @@ static void debug_print_spaces(int n) | |||
| 101 | static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | 102 | static uint32_t atom_iio_execute(struct atom_context *ctx, int base, |
| 102 | uint32_t index, uint32_t data) | 103 | uint32_t index, uint32_t data) |
| 103 | { | 104 | { |
| 105 | struct radeon_device *rdev = ctx->card->dev->dev_private; | ||
| 104 | uint32_t temp = 0xCDCDCDCD; | 106 | uint32_t temp = 0xCDCDCDCD; |
| 107 | |||
| 105 | while (1) | 108 | while (1) |
| 106 | switch (CU8(base)) { | 109 | switch (CU8(base)) { |
| 107 | case ATOM_IIO_NOP: | 110 | case ATOM_IIO_NOP: |
| @@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
| 112 | base += 3; | 115 | base += 3; |
| 113 | break; | 116 | break; |
| 114 | case ATOM_IIO_WRITE: | 117 | case ATOM_IIO_WRITE: |
| 115 | (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); | 118 | if (rdev->family == CHIP_RV515) |
| 119 | (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); | ||
| 116 | ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); | 120 | ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); |
| 117 | base += 3; | 121 | base += 3; |
| 118 | break; | 122 | break; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b41ec59c7100..9d516a8c4dfa 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -531,6 +531,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 531 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; | 531 | pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
| 532 | else | 532 | else |
| 533 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 533 | pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
| 534 | |||
| 535 | if ((rdev->family == CHIP_R600) || | ||
| 536 | (rdev->family == CHIP_RV610) || | ||
| 537 | (rdev->family == CHIP_RV630) || | ||
| 538 | (rdev->family == CHIP_RV670)) | ||
| 539 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
| 534 | } else { | 540 | } else { |
| 535 | pll->flags |= RADEON_PLL_LEGACY; | 541 | pll->flags |= RADEON_PLL_LEGACY; |
| 536 | 542 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 0b0cc74c08c0..3453910ee0f3 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -120,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev) | |||
| 120 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; | 120 | struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; |
| 121 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; | 121 | struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; |
| 122 | 122 | ||
| 123 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | 123 | if (voltage->type == VOLTAGE_SW) { |
| 124 | if (voltage->voltage != rdev->pm.current_vddc) { | 124 | if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { |
| 125 | radeon_atom_set_voltage(rdev, voltage->voltage); | 125 | radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); |
| 126 | rdev->pm.current_vddc = voltage->voltage; | 126 | rdev->pm.current_vddc = voltage->voltage; |
| 127 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); | 127 | DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); |
| 128 | } | ||
| 129 | if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { | ||
| 130 | radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
| 131 | rdev->pm.current_vddci = voltage->vddci; | ||
| 132 | DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci); | ||
| 128 | } | 133 | } |
| 129 | } | 134 | } |
| 130 | } | 135 | } |
| @@ -3036,9 +3041,6 @@ int evergreen_init(struct radeon_device *rdev) | |||
| 3036 | { | 3041 | { |
| 3037 | int r; | 3042 | int r; |
| 3038 | 3043 | ||
| 3039 | r = radeon_dummy_page_init(rdev); | ||
| 3040 | if (r) | ||
| 3041 | return r; | ||
| 3042 | /* This don't do much */ | 3044 | /* This don't do much */ |
| 3043 | r = radeon_gem_init(rdev); | 3045 | r = radeon_gem_init(rdev); |
| 3044 | if (r) | 3046 | if (r) |
| @@ -3150,7 +3152,6 @@ void evergreen_fini(struct radeon_device *rdev) | |||
| 3150 | radeon_atombios_fini(rdev); | 3152 | radeon_atombios_fini(rdev); |
| 3151 | kfree(rdev->bios); | 3153 | kfree(rdev->bios); |
| 3152 | rdev->bios = NULL; | 3154 | rdev->bios = NULL; |
| 3153 | radeon_dummy_page_fini(rdev); | ||
| 3154 | } | 3155 | } |
| 3155 | 3156 | ||
| 3156 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) | 3157 | static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index be271c42de4d..15d58292677a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -587,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev) | |||
| 587 | 587 | ||
| 588 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | 588 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { |
| 589 | if (voltage->voltage != rdev->pm.current_vddc) { | 589 | if (voltage->voltage != rdev->pm.current_vddc) { |
| 590 | radeon_atom_set_voltage(rdev, voltage->voltage); | 590 | radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); |
| 591 | rdev->pm.current_vddc = voltage->voltage; | 591 | rdev->pm.current_vddc = voltage->voltage; |
| 592 | DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); | 592 | DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); |
| 593 | } | 593 | } |
| @@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev) | |||
| 2509 | { | 2509 | { |
| 2510 | int r; | 2510 | int r; |
| 2511 | 2511 | ||
| 2512 | r = radeon_dummy_page_init(rdev); | ||
| 2513 | if (r) | ||
| 2514 | return r; | ||
| 2515 | if (r600_debugfs_mc_info_init(rdev)) { | 2512 | if (r600_debugfs_mc_info_init(rdev)) { |
| 2516 | DRM_ERROR("Failed to register debugfs file for mc !\n"); | 2513 | DRM_ERROR("Failed to register debugfs file for mc !\n"); |
| 2517 | } | 2514 | } |
| @@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev) | |||
| 2625 | radeon_atombios_fini(rdev); | 2622 | radeon_atombios_fini(rdev); |
| 2626 | kfree(rdev->bios); | 2623 | kfree(rdev->bios); |
| 2627 | rdev->bios = NULL; | 2624 | rdev->bios = NULL; |
| 2628 | radeon_dummy_page_fini(rdev); | ||
| 2629 | } | 2625 | } |
| 2630 | 2626 | ||
| 2631 | 2627 | ||
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 93f536594c73..ba643b576054 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev); | |||
| 177 | void radeon_pm_resume(struct radeon_device *rdev); | 177 | void radeon_pm_resume(struct radeon_device *rdev); |
| 178 | void radeon_combios_get_power_modes(struct radeon_device *rdev); | 178 | void radeon_combios_get_power_modes(struct radeon_device *rdev); |
| 179 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); | 179 | void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
| 180 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); | 180 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); |
| 181 | void rs690_pm_info(struct radeon_device *rdev); | 181 | void rs690_pm_info(struct radeon_device *rdev); |
| 182 | extern int rv6xx_get_temp(struct radeon_device *rdev); | 182 | extern int rv6xx_get_temp(struct radeon_device *rdev); |
| 183 | extern int rv770_get_temp(struct radeon_device *rdev); | 183 | extern int rv770_get_temp(struct radeon_device *rdev); |
| @@ -767,7 +767,9 @@ struct radeon_voltage { | |||
| 767 | u8 vddci_id; /* index into vddci voltage table */ | 767 | u8 vddci_id; /* index into vddci voltage table */ |
| 768 | bool vddci_enabled; | 768 | bool vddci_enabled; |
| 769 | /* r6xx+ sw */ | 769 | /* r6xx+ sw */ |
| 770 | u32 voltage; | 770 | u16 voltage; |
| 771 | /* evergreen+ vddci */ | ||
| 772 | u16 vddci; | ||
| 771 | }; | 773 | }; |
| 772 | 774 | ||
| 773 | /* clock mode flags */ | 775 | /* clock mode flags */ |
| @@ -835,10 +837,12 @@ struct radeon_pm { | |||
| 835 | int default_power_state_index; | 837 | int default_power_state_index; |
| 836 | u32 current_sclk; | 838 | u32 current_sclk; |
| 837 | u32 current_mclk; | 839 | u32 current_mclk; |
| 838 | u32 current_vddc; | 840 | u16 current_vddc; |
| 841 | u16 current_vddci; | ||
| 839 | u32 default_sclk; | 842 | u32 default_sclk; |
| 840 | u32 default_mclk; | 843 | u32 default_mclk; |
| 841 | u32 default_vddc; | 844 | u16 default_vddc; |
| 845 | u16 default_vddci; | ||
| 842 | struct radeon_i2c_chan *i2c_bus; | 846 | struct radeon_i2c_chan *i2c_bus; |
| 843 | /* selected pm method */ | 847 | /* selected pm method */ |
| 844 | enum radeon_pm_method pm_method; | 848 | enum radeon_pm_method pm_method; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index eb888ee5f674..ca576191d058 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
| @@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) | |||
| 94 | rdev->mc_rreg = &rs600_mc_rreg; | 94 | rdev->mc_rreg = &rs600_mc_rreg; |
| 95 | rdev->mc_wreg = &rs600_mc_wreg; | 95 | rdev->mc_wreg = &rs600_mc_wreg; |
| 96 | } | 96 | } |
| 97 | if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) { | 97 | if (rdev->family >= CHIP_R600) { |
| 98 | rdev->pciep_rreg = &r600_pciep_rreg; | 98 | rdev->pciep_rreg = &r600_pciep_rreg; |
| 99 | rdev->pciep_wreg = &r600_pciep_wreg; | 99 | rdev->pciep_wreg = &r600_pciep_wreg; |
| 100 | } | 100 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 99768d9d91da..f5d12fb103fa 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -2176,24 +2176,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r | |||
| 2176 | } | 2176 | } |
| 2177 | } | 2177 | } |
| 2178 | 2178 | ||
| 2179 | static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) | 2179 | static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, |
| 2180 | u16 *vddc, u16 *vddci) | ||
| 2180 | { | 2181 | { |
| 2181 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 2182 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
| 2182 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | 2183 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); |
| 2183 | u8 frev, crev; | 2184 | u8 frev, crev; |
| 2184 | u16 data_offset; | 2185 | u16 data_offset; |
| 2185 | union firmware_info *firmware_info; | 2186 | union firmware_info *firmware_info; |
| 2186 | u16 vddc = 0; | 2187 | |
| 2188 | *vddc = 0; | ||
| 2189 | *vddci = 0; | ||
| 2187 | 2190 | ||
| 2188 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | 2191 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, |
| 2189 | &frev, &crev, &data_offset)) { | 2192 | &frev, &crev, &data_offset)) { |
| 2190 | firmware_info = | 2193 | firmware_info = |
| 2191 | (union firmware_info *)(mode_info->atom_context->bios + | 2194 | (union firmware_info *)(mode_info->atom_context->bios + |
| 2192 | data_offset); | 2195 | data_offset); |
| 2193 | vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); | 2196 | *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); |
| 2197 | if ((frev == 2) && (crev >= 2)) | ||
| 2198 | *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); | ||
| 2194 | } | 2199 | } |
| 2195 | |||
| 2196 | return vddc; | ||
| 2197 | } | 2200 | } |
| 2198 | 2201 | ||
| 2199 | static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, | 2202 | static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, |
| @@ -2203,7 +2206,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde | |||
| 2203 | int j; | 2206 | int j; |
| 2204 | u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); | 2207 | u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); |
| 2205 | u32 misc2 = le16_to_cpu(non_clock_info->usClassification); | 2208 | u32 misc2 = le16_to_cpu(non_clock_info->usClassification); |
| 2206 | u16 vddc = radeon_atombios_get_default_vddc(rdev); | 2209 | u16 vddc, vddci; |
| 2210 | |||
| 2211 | radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); | ||
| 2207 | 2212 | ||
| 2208 | rdev->pm.power_state[state_index].misc = misc; | 2213 | rdev->pm.power_state[state_index].misc = misc; |
| 2209 | rdev->pm.power_state[state_index].misc2 = misc2; | 2214 | rdev->pm.power_state[state_index].misc2 = misc2; |
| @@ -2244,6 +2249,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde | |||
| 2244 | rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; | 2249 | rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; |
| 2245 | rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; | 2250 | rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; |
| 2246 | rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; | 2251 | rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; |
| 2252 | rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; | ||
| 2247 | } else { | 2253 | } else { |
| 2248 | /* patch the table values with the default slck/mclk from firmware info */ | 2254 | /* patch the table values with the default slck/mclk from firmware info */ |
| 2249 | for (j = 0; j < mode_index; j++) { | 2255 | for (j = 0; j < mode_index; j++) { |
| @@ -2286,6 +2292,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, | |||
| 2286 | VOLTAGE_SW; | 2292 | VOLTAGE_SW; |
| 2287 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = | 2293 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = |
| 2288 | le16_to_cpu(clock_info->evergreen.usVDDC); | 2294 | le16_to_cpu(clock_info->evergreen.usVDDC); |
| 2295 | rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = | ||
| 2296 | le16_to_cpu(clock_info->evergreen.usVDDCI); | ||
| 2289 | } else { | 2297 | } else { |
| 2290 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); | 2298 | sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); |
| 2291 | sclk |= clock_info->r600.ucEngineClockHigh << 16; | 2299 | sclk |= clock_info->r600.ucEngineClockHigh << 16; |
| @@ -2577,25 +2585,25 @@ union set_voltage { | |||
| 2577 | struct _SET_VOLTAGE_PARAMETERS_V2 v2; | 2585 | struct _SET_VOLTAGE_PARAMETERS_V2 v2; |
| 2578 | }; | 2586 | }; |
| 2579 | 2587 | ||
| 2580 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level) | 2588 | void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type) |
| 2581 | { | 2589 | { |
| 2582 | union set_voltage args; | 2590 | union set_voltage args; |
| 2583 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | 2591 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); |
| 2584 | u8 frev, crev, volt_index = level; | 2592 | u8 frev, crev, volt_index = voltage_level; |
| 2585 | 2593 | ||
| 2586 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | 2594 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
| 2587 | return; | 2595 | return; |
| 2588 | 2596 | ||
| 2589 | switch (crev) { | 2597 | switch (crev) { |
| 2590 | case 1: | 2598 | case 1: |
| 2591 | args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; | 2599 | args.v1.ucVoltageType = voltage_type; |
| 2592 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; | 2600 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; |
| 2593 | args.v1.ucVoltageIndex = volt_index; | 2601 | args.v1.ucVoltageIndex = volt_index; |
| 2594 | break; | 2602 | break; |
| 2595 | case 2: | 2603 | case 2: |
| 2596 | args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC; | 2604 | args.v2.ucVoltageType = voltage_type; |
| 2597 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; | 2605 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; |
| 2598 | args.v2.usVoltageLevel = cpu_to_le16(level); | 2606 | args.v2.usVoltageLevel = cpu_to_le16(voltage_level); |
| 2599 | break; | 2607 | break; |
| 2600 | default: | 2608 | default: |
| 2601 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | 2609 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 9e59868d354e..bbcd1dd7bac0 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
| @@ -79,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev) | |||
| 79 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; | 79 | scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
| 80 | else | 80 | else |
| 81 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; | 81 | scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; |
| 82 | seq = rdev->wb.wb[scratch_index/4]; | 82 | seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); |
| 83 | } else | 83 | } else |
| 84 | seq = RREG32(rdev->fence_drv.scratch_reg); | 84 | seq = RREG32(rdev->fence_drv.scratch_reg); |
| 85 | if (seq != rdev->fence_drv.last_seq) { | 85 | if (seq != rdev->fence_drv.last_seq) { |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index f0534ef2f331..8a955bbdb608 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -285,4 +285,6 @@ void radeon_gart_fini(struct radeon_device *rdev) | |||
| 285 | rdev->gart.pages = NULL; | 285 | rdev->gart.pages = NULL; |
| 286 | rdev->gart.pages_addr = NULL; | 286 | rdev->gart.pages_addr = NULL; |
| 287 | rdev->gart.ttm_alloced = NULL; | 287 | rdev->gart.ttm_alloced = NULL; |
| 288 | |||
| 289 | radeon_dummy_page_fini(rdev); | ||
| 288 | } | 290 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index ded2a45bc95c..ccbabf734a61 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
| @@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, | |||
| 1062 | *val = in_buf[0]; | 1062 | *val = in_buf[0]; |
| 1063 | DRM_DEBUG("val = 0x%02x\n", *val); | 1063 | DRM_DEBUG("val = 0x%02x\n", *val); |
| 1064 | } else { | 1064 | } else { |
| 1065 | DRM_ERROR("i2c 0x%02x 0x%02x read failed\n", | 1065 | DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n", |
| 1066 | addr, *val); | 1066 | addr, *val); |
| 1067 | } | 1067 | } |
| 1068 | } | 1068 | } |
| @@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus, | |||
| 1084 | out_buf[1] = val; | 1084 | out_buf[1] = val; |
| 1085 | 1085 | ||
| 1086 | if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) | 1086 | if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) |
| 1087 | DRM_ERROR("i2c 0x%02x 0x%02x write failed\n", | 1087 | DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", |
| 1088 | addr, val); | 1088 | addr, val); |
| 1089 | } | 1089 | } |
| 1090 | 1090 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 5b54268ed6b2..2f46e0c8df53 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -269,7 +269,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { | |||
| 269 | .disable = radeon_legacy_encoder_disable, | 269 | .disable = radeon_legacy_encoder_disable, |
| 270 | }; | 270 | }; |
| 271 | 271 | ||
| 272 | #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE | 272 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) |
| 273 | 273 | ||
| 274 | #define MAX_RADEON_LEVEL 0xFF | 274 | #define MAX_RADEON_LEVEL 0xFF |
| 275 | 275 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 08de669e025a..86eda1ea94df 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include "drmP.h" | 23 | #include "drmP.h" |
| 24 | #include "radeon.h" | 24 | #include "radeon.h" |
| 25 | #include "avivod.h" | 25 | #include "avivod.h" |
| 26 | #include "atom.h" | ||
| 26 | #ifdef CONFIG_ACPI | 27 | #ifdef CONFIG_ACPI |
| 27 | #include <linux/acpi.h> | 28 | #include <linux/acpi.h> |
| 28 | #endif | 29 | #endif |
| @@ -535,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev) | |||
| 535 | /* set up the default clocks if the MC ucode is loaded */ | 536 | /* set up the default clocks if the MC ucode is loaded */ |
| 536 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { | 537 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { |
| 537 | if (rdev->pm.default_vddc) | 538 | if (rdev->pm.default_vddc) |
| 538 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); | 539 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
| 540 | SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
| 541 | if (rdev->pm.default_vddci) | ||
| 542 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, | ||
| 543 | SET_VOLTAGE_TYPE_ASIC_VDDCI); | ||
| 539 | if (rdev->pm.default_sclk) | 544 | if (rdev->pm.default_sclk) |
| 540 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | 545 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); |
| 541 | if (rdev->pm.default_mclk) | 546 | if (rdev->pm.default_mclk) |
| @@ -548,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev) | |||
| 548 | rdev->pm.current_sclk = rdev->pm.default_sclk; | 553 | rdev->pm.current_sclk = rdev->pm.default_sclk; |
| 549 | rdev->pm.current_mclk = rdev->pm.default_mclk; | 554 | rdev->pm.current_mclk = rdev->pm.default_mclk; |
| 550 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; | 555 | rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; |
| 556 | rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; | ||
| 551 | if (rdev->pm.pm_method == PM_METHOD_DYNPM | 557 | if (rdev->pm.pm_method == PM_METHOD_DYNPM |
| 552 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { | 558 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { |
| 553 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | 559 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
| @@ -585,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
| 585 | /* set up the default clocks if the MC ucode is loaded */ | 591 | /* set up the default clocks if the MC ucode is loaded */ |
| 586 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { | 592 | if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { |
| 587 | if (rdev->pm.default_vddc) | 593 | if (rdev->pm.default_vddc) |
| 588 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); | 594 | radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, |
| 595 | SET_VOLTAGE_TYPE_ASIC_VDDC); | ||
| 589 | if (rdev->pm.default_sclk) | 596 | if (rdev->pm.default_sclk) |
| 590 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); | 597 | radeon_set_engine_clock(rdev, rdev->pm.default_sclk); |
| 591 | if (rdev->pm.default_mclk) | 598 | if (rdev->pm.default_mclk) |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index bbc9cd823334..c6776e48fdde 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
| 248 | void radeon_ring_free_size(struct radeon_device *rdev) | 248 | void radeon_ring_free_size(struct radeon_device *rdev) |
| 249 | { | 249 | { |
| 250 | if (rdev->wb.enabled) | 250 | if (rdev->wb.enabled) |
| 251 | rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]; | 251 | rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]); |
| 252 | else { | 252 | else { |
| 253 | if (rdev->family >= CHIP_R600) | 253 | if (rdev->family >= CHIP_R600) |
| 254 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); | 254 | rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 876cebc4b8ba..6e3b11e5abbe 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -114,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev) | |||
| 114 | udelay(voltage->delay); | 114 | udelay(voltage->delay); |
| 115 | } | 115 | } |
| 116 | } else if (voltage->type == VOLTAGE_VDDC) | 116 | } else if (voltage->type == VOLTAGE_VDDC) |
| 117 | radeon_atom_set_voltage(rdev, voltage->vddc_id); | 117 | radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); |
| 118 | 118 | ||
| 119 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); | 119 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); |
| 120 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); | 120 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b974ac7df8df..ef8a5babe9f7 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev) | |||
| 106 | 106 | ||
| 107 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { | 107 | if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { |
| 108 | if (voltage->voltage != rdev->pm.current_vddc) { | 108 | if (voltage->voltage != rdev->pm.current_vddc) { |
| 109 | radeon_atom_set_voltage(rdev, voltage->voltage); | 109 | radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); |
| 110 | rdev->pm.current_vddc = voltage->voltage; | 110 | rdev->pm.current_vddc = voltage->voltage; |
| 111 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); | 111 | DRM_DEBUG("Setting: v: %d\n", voltage->voltage); |
| 112 | } | 112 | } |
| @@ -1255,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev) | |||
| 1255 | { | 1255 | { |
| 1256 | int r; | 1256 | int r; |
| 1257 | 1257 | ||
| 1258 | r = radeon_dummy_page_init(rdev); | ||
| 1259 | if (r) | ||
| 1260 | return r; | ||
| 1261 | /* This don't do much */ | 1258 | /* This don't do much */ |
| 1262 | r = radeon_gem_init(rdev); | 1259 | r = radeon_gem_init(rdev); |
| 1263 | if (r) | 1260 | if (r) |
| @@ -1372,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 1372 | radeon_atombios_fini(rdev); | 1369 | radeon_atombios_fini(rdev); |
| 1373 | kfree(rdev->bios); | 1370 | kfree(rdev->bios); |
| 1374 | rdev->bios = NULL; | 1371 | rdev->bios = NULL; |
| 1375 | radeon_dummy_page_fini(rdev); | ||
| 1376 | } | 1372 | } |
| 1377 | 1373 | ||
| 1378 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev) | 1374 | static void rv770_pcie_gen2_enable(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 737a2a2e46a5..9d9d92945f8c 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
| @@ -683,22 +683,14 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
| 683 | gfp_flags |= GFP_HIGHUSER; | 683 | gfp_flags |= GFP_HIGHUSER; |
| 684 | 684 | ||
| 685 | for (r = 0; r < count; ++r) { | 685 | for (r = 0; r < count; ++r) { |
| 686 | if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { | 686 | p = alloc_page(gfp_flags); |
| 687 | void *addr; | ||
| 688 | addr = dma_alloc_coherent(NULL, PAGE_SIZE, | ||
| 689 | &dma_address[r], | ||
| 690 | gfp_flags); | ||
| 691 | if (addr == NULL) | ||
| 692 | return -ENOMEM; | ||
| 693 | p = virt_to_page(addr); | ||
| 694 | } else | ||
| 695 | p = alloc_page(gfp_flags); | ||
| 696 | if (!p) { | 687 | if (!p) { |
| 697 | 688 | ||
| 698 | printk(KERN_ERR TTM_PFX | 689 | printk(KERN_ERR TTM_PFX |
| 699 | "Unable to allocate page."); | 690 | "Unable to allocate page."); |
| 700 | return -ENOMEM; | 691 | return -ENOMEM; |
| 701 | } | 692 | } |
| 693 | |||
| 702 | list_add(&p->lru, pages); | 694 | list_add(&p->lru, pages); |
| 703 | } | 695 | } |
| 704 | return 0; | 696 | return 0; |
| @@ -746,24 +738,12 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, | |||
| 746 | unsigned long irq_flags; | 738 | unsigned long irq_flags; |
| 747 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 739 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
| 748 | struct page *p, *tmp; | 740 | struct page *p, *tmp; |
| 749 | unsigned r; | ||
| 750 | 741 | ||
| 751 | if (pool == NULL) { | 742 | if (pool == NULL) { |
| 752 | /* No pool for this memory type so free the pages */ | 743 | /* No pool for this memory type so free the pages */ |
| 753 | 744 | ||
| 754 | r = page_count-1; | ||
| 755 | list_for_each_entry_safe(p, tmp, pages, lru) { | 745 | list_for_each_entry_safe(p, tmp, pages, lru) { |
| 756 | if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) { | 746 | __free_page(p); |
| 757 | void *addr = page_address(p); | ||
| 758 | WARN_ON(!addr || !dma_address[r]); | ||
| 759 | if (addr) | ||
| 760 | dma_free_coherent(NULL, PAGE_SIZE, | ||
| 761 | addr, | ||
| 762 | dma_address[r]); | ||
| 763 | dma_address[r] = 0; | ||
| 764 | } else | ||
| 765 | __free_page(p); | ||
| 766 | r--; | ||
| 767 | } | 747 | } |
| 768 | /* Make the pages list empty */ | 748 | /* Make the pages list empty */ |
| 769 | INIT_LIST_HEAD(pages); | 749 | INIT_LIST_HEAD(pages); |
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig index 70e60a4bb678..419917955bf6 100644 --- a/drivers/gpu/stub/Kconfig +++ b/drivers/gpu/stub/Kconfig | |||
| @@ -5,6 +5,7 @@ config STUB_POULSBO | |||
| 5 | # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled | 5 | # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled |
| 6 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 6 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
| 7 | select BACKLIGHT_CLASS_DEVICE if ACPI | 7 | select BACKLIGHT_CLASS_DEVICE if ACPI |
| 8 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
| 8 | select INPUT if ACPI | 9 | select INPUT if ACPI |
| 9 | select ACPI_VIDEO if ACPI | 10 | select ACPI_VIDEO if ACPI |
| 10 | select THERMAL if ACPI | 11 | select THERMAL if ACPI |
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c index 3790816643be..8497f56f8e46 100644 --- a/drivers/leds/leds-regulator.c +++ b/drivers/leds/leds-regulator.c | |||
| @@ -178,6 +178,10 @@ static int __devinit regulator_led_probe(struct platform_device *pdev) | |||
| 178 | led->cdev.flags |= LED_CORE_SUSPENDRESUME; | 178 | led->cdev.flags |= LED_CORE_SUSPENDRESUME; |
| 179 | led->vcc = vcc; | 179 | led->vcc = vcc; |
| 180 | 180 | ||
| 181 | /* to handle correctly an already enabled regulator */ | ||
| 182 | if (regulator_is_enabled(led->vcc)) | ||
| 183 | led->enabled = 1; | ||
| 184 | |||
| 181 | mutex_init(&led->mutex); | 185 | mutex_init(&led->mutex); |
| 182 | INIT_WORK(&led->work, led_work); | 186 | INIT_WORK(&led->work, led_work); |
| 183 | 187 | ||
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index d01574d98870..f4c8c844b913 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c | |||
| @@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev) | |||
| 55 | } | 55 | } |
| 56 | EXPORT_SYMBOL(mfd_cell_disable); | 56 | EXPORT_SYMBOL(mfd_cell_disable); |
| 57 | 57 | ||
| 58 | static int mfd_platform_add_cell(struct platform_device *pdev, | ||
| 59 | const struct mfd_cell *cell) | ||
| 60 | { | ||
| 61 | if (!cell) | ||
| 62 | return 0; | ||
| 63 | |||
| 64 | pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL); | ||
| 65 | if (!pdev->mfd_cell) | ||
| 66 | return -ENOMEM; | ||
| 67 | |||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 58 | static int mfd_add_device(struct device *parent, int id, | 71 | static int mfd_add_device(struct device *parent, int id, |
| 59 | const struct mfd_cell *cell, | 72 | const struct mfd_cell *cell, |
| 60 | struct resource *mem_base, | 73 | struct resource *mem_base, |
| @@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id, | |||
| 75 | 88 | ||
| 76 | pdev->dev.parent = parent; | 89 | pdev->dev.parent = parent; |
| 77 | 90 | ||
| 78 | ret = platform_device_add_data(pdev, cell, sizeof(*cell)); | 91 | ret = mfd_platform_add_cell(pdev, cell); |
| 79 | if (ret) | 92 | if (ret) |
| 80 | goto fail_res; | 93 | goto fail_res; |
| 81 | 94 | ||
| @@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id, | |||
| 123 | 136 | ||
| 124 | return 0; | 137 | return 0; |
| 125 | 138 | ||
| 126 | /* platform_device_del(pdev); */ | ||
| 127 | fail_res: | 139 | fail_res: |
| 128 | kfree(res); | 140 | kfree(res); |
| 129 | fail_device: | 141 | fail_device: |
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 20e4e9395b61..ecafa4ba238b 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
| @@ -348,15 +348,15 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep) | |||
| 348 | 348 | ||
| 349 | static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; | 349 | static int gru_irq_count[GRU_CHIPLETS_PER_BLADE]; |
| 350 | 350 | ||
| 351 | static void gru_noop(unsigned int irq) | 351 | static void gru_noop(struct irq_data *d) |
| 352 | { | 352 | { |
| 353 | } | 353 | } |
| 354 | 354 | ||
| 355 | static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { | 355 | static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = { |
| 356 | [0 ... GRU_CHIPLETS_PER_BLADE - 1] { | 356 | [0 ... GRU_CHIPLETS_PER_BLADE - 1] { |
| 357 | .mask = gru_noop, | 357 | .irq_mask = gru_noop, |
| 358 | .unmask = gru_noop, | 358 | .irq_unmask = gru_noop, |
| 359 | .ack = gru_noop | 359 | .irq_ack = gru_noop |
| 360 | } | 360 | } |
| 361 | }; | 361 | }; |
| 362 | 362 | ||
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index 237913c5c92c..fed215c4cfa1 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c | |||
| @@ -1452,7 +1452,7 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
| 1452 | oinfo = mtd->ecclayout; | 1452 | oinfo = mtd->ecclayout; |
| 1453 | if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) { | 1453 | if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) { |
| 1454 | printk(KERN_ERR "%s: Not enough free bytes in OOB, " | 1454 | printk(KERN_ERR "%s: Not enough free bytes in OOB, " |
| 1455 | "%d available, %lu needed.\n", | 1455 | "%d available, %zu needed.\n", |
| 1456 | MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE); | 1456 | MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE); |
| 1457 | return; | 1457 | return; |
| 1458 | } | 1458 | } |
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 6fae04b3fc6d..950646aa4c4b 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
| @@ -209,22 +209,8 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, | |||
| 209 | int err = -EIO; | 209 | int err = -EIO; |
| 210 | enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | 210 | enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
| 211 | 211 | ||
| 212 | if (buf >= high_memory) { | 212 | if (buf >= high_memory) |
| 213 | struct page *pg; | 213 | goto err_buf; |
| 214 | |||
| 215 | if (((size_t)buf & PAGE_MASK) != | ||
| 216 | ((size_t)(buf + len - 1) & PAGE_MASK)) { | ||
| 217 | dev_warn(host->dev, "Buffer not fit in one page\n"); | ||
| 218 | goto err_buf; | ||
| 219 | } | ||
| 220 | |||
| 221 | pg = vmalloc_to_page(buf); | ||
| 222 | if (pg == 0) { | ||
| 223 | dev_err(host->dev, "Failed to vmalloc_to_page\n"); | ||
| 224 | goto err_buf; | ||
| 225 | } | ||
| 226 | p = page_address(pg) + ((size_t)buf & ~PAGE_MASK); | ||
| 227 | } | ||
| 228 | 214 | ||
| 229 | dma_dev = host->dma_chan->device; | 215 | dma_dev = host->dma_chan->device; |
| 230 | 216 | ||
| @@ -280,7 +266,8 @@ static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) | |||
| 280 | struct nand_chip *chip = mtd->priv; | 266 | struct nand_chip *chip = mtd->priv; |
| 281 | struct atmel_nand_host *host = chip->priv; | 267 | struct atmel_nand_host *host = chip->priv; |
| 282 | 268 | ||
| 283 | if (use_dma && len >= mtd->oobsize) | 269 | if (use_dma && len > mtd->oobsize) |
| 270 | /* only use DMA for bigger than oob size: better performances */ | ||
| 284 | if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) | 271 | if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) |
| 285 | return; | 272 | return; |
| 286 | 273 | ||
| @@ -295,7 +282,8 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | |||
| 295 | struct nand_chip *chip = mtd->priv; | 282 | struct nand_chip *chip = mtd->priv; |
| 296 | struct atmel_nand_host *host = chip->priv; | 283 | struct atmel_nand_host *host = chip->priv; |
| 297 | 284 | ||
| 298 | if (use_dma && len >= mtd->oobsize) | 285 | if (use_dma && len > mtd->oobsize) |
| 286 | /* only use DMA for bigger than oob size: better performances */ | ||
| 299 | if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) | 287 | if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) |
| 300 | return; | 288 | return; |
| 301 | 289 | ||
| @@ -599,7 +587,10 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
| 599 | nand_chip->options |= NAND_USE_FLASH_BBT; | 587 | nand_chip->options |= NAND_USE_FLASH_BBT; |
| 600 | } | 588 | } |
| 601 | 589 | ||
| 602 | if (cpu_has_dma() && use_dma) { | 590 | if (!cpu_has_dma()) |
| 591 | use_dma = 0; | ||
| 592 | |||
| 593 | if (use_dma) { | ||
| 603 | dma_cap_mask_t mask; | 594 | dma_cap_mask_t mask; |
| 604 | 595 | ||
| 605 | dma_cap_zero(mask); | 596 | dma_cap_zero(mask); |
| @@ -611,7 +602,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
| 611 | } | 602 | } |
| 612 | } | 603 | } |
| 613 | if (use_dma) | 604 | if (use_dma) |
| 614 | dev_info(host->dev, "Using DMA for NAND access.\n"); | 605 | dev_info(host->dev, "Using %s for DMA transfers.\n", |
| 606 | dma_chan_name(host->dma_chan)); | ||
| 615 | else | 607 | else |
| 616 | dev_info(host->dev, "No DMA support for NAND access.\n"); | 608 | dev_info(host->dev, "No DMA support for NAND access.\n"); |
| 617 | 609 | ||
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index f803c58b941d..66823eded7a3 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h | |||
| @@ -154,7 +154,7 @@ struct be_eq_obj { | |||
| 154 | u16 min_eqd; /* in usecs */ | 154 | u16 min_eqd; /* in usecs */ |
| 155 | u16 max_eqd; /* in usecs */ | 155 | u16 max_eqd; /* in usecs */ |
| 156 | u16 cur_eqd; /* in usecs */ | 156 | u16 cur_eqd; /* in usecs */ |
| 157 | u8 msix_vec_idx; | 157 | u8 eq_idx; |
| 158 | 158 | ||
| 159 | struct napi_struct napi; | 159 | struct napi_struct napi; |
| 160 | }; | 160 | }; |
| @@ -291,7 +291,7 @@ struct be_adapter { | |||
| 291 | u32 num_rx_qs; | 291 | u32 num_rx_qs; |
| 292 | u32 big_page_size; /* Compounded page size shared by rx wrbs */ | 292 | u32 big_page_size; /* Compounded page size shared by rx wrbs */ |
| 293 | 293 | ||
| 294 | u8 msix_vec_next_idx; | 294 | u8 eq_next_idx; |
| 295 | struct be_drv_stats drv_stats; | 295 | struct be_drv_stats drv_stats; |
| 296 | 296 | ||
| 297 | struct vlan_group *vlan_grp; | 297 | struct vlan_group *vlan_grp; |
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 9a54c8b24ff9..7cb5a114c733 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
| @@ -1497,7 +1497,7 @@ static int be_tx_queues_create(struct be_adapter *adapter) | |||
| 1497 | if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) | 1497 | if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) |
| 1498 | goto tx_eq_free; | 1498 | goto tx_eq_free; |
| 1499 | 1499 | ||
| 1500 | adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++; | 1500 | adapter->tx_eq.eq_idx = adapter->eq_next_idx++; |
| 1501 | 1501 | ||
| 1502 | 1502 | ||
| 1503 | /* Alloc TX eth compl queue */ | 1503 | /* Alloc TX eth compl queue */ |
| @@ -1590,7 +1590,7 @@ static int be_rx_queues_create(struct be_adapter *adapter) | |||
| 1590 | if (rc) | 1590 | if (rc) |
| 1591 | goto err; | 1591 | goto err; |
| 1592 | 1592 | ||
| 1593 | rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++; | 1593 | rxo->rx_eq.eq_idx = adapter->eq_next_idx++; |
| 1594 | 1594 | ||
| 1595 | /* CQ */ | 1595 | /* CQ */ |
| 1596 | cq = &rxo->cq; | 1596 | cq = &rxo->cq; |
| @@ -1666,11 +1666,11 @@ static irqreturn_t be_intx(int irq, void *dev) | |||
| 1666 | if (!isr) | 1666 | if (!isr) |
| 1667 | return IRQ_NONE; | 1667 | return IRQ_NONE; |
| 1668 | 1668 | ||
| 1669 | if ((1 << adapter->tx_eq.msix_vec_idx & isr)) | 1669 | if ((1 << adapter->tx_eq.eq_idx & isr)) |
| 1670 | event_handle(adapter, &adapter->tx_eq); | 1670 | event_handle(adapter, &adapter->tx_eq); |
| 1671 | 1671 | ||
| 1672 | for_all_rx_queues(adapter, rxo, i) { | 1672 | for_all_rx_queues(adapter, rxo, i) { |
| 1673 | if ((1 << rxo->rx_eq.msix_vec_idx & isr)) | 1673 | if ((1 << rxo->rx_eq.eq_idx & isr)) |
| 1674 | event_handle(adapter, &rxo->rx_eq); | 1674 | event_handle(adapter, &rxo->rx_eq); |
| 1675 | } | 1675 | } |
| 1676 | } | 1676 | } |
| @@ -1951,7 +1951,7 @@ static void be_sriov_disable(struct be_adapter *adapter) | |||
| 1951 | static inline int be_msix_vec_get(struct be_adapter *adapter, | 1951 | static inline int be_msix_vec_get(struct be_adapter *adapter, |
| 1952 | struct be_eq_obj *eq_obj) | 1952 | struct be_eq_obj *eq_obj) |
| 1953 | { | 1953 | { |
| 1954 | return adapter->msix_entries[eq_obj->msix_vec_idx].vector; | 1954 | return adapter->msix_entries[eq_obj->eq_idx].vector; |
| 1955 | } | 1955 | } |
| 1956 | 1956 | ||
| 1957 | static int be_request_irq(struct be_adapter *adapter, | 1957 | static int be_request_irq(struct be_adapter *adapter, |
| @@ -2345,6 +2345,7 @@ static int be_clear(struct be_adapter *adapter) | |||
| 2345 | be_mcc_queues_destroy(adapter); | 2345 | be_mcc_queues_destroy(adapter); |
| 2346 | be_rx_queues_destroy(adapter); | 2346 | be_rx_queues_destroy(adapter); |
| 2347 | be_tx_queues_destroy(adapter); | 2347 | be_tx_queues_destroy(adapter); |
| 2348 | adapter->eq_next_idx = 0; | ||
| 2348 | 2349 | ||
| 2349 | if (be_physfn(adapter) && adapter->sriov_enabled) | 2350 | if (be_physfn(adapter) && adapter->sriov_enabled) |
| 2350 | for (vf = 0; vf < num_vfs; vf++) | 2351 | for (vf = 0; vf < num_vfs; vf++) |
| @@ -3141,12 +3142,14 @@ static int be_resume(struct pci_dev *pdev) | |||
| 3141 | static void be_shutdown(struct pci_dev *pdev) | 3142 | static void be_shutdown(struct pci_dev *pdev) |
| 3142 | { | 3143 | { |
| 3143 | struct be_adapter *adapter = pci_get_drvdata(pdev); | 3144 | struct be_adapter *adapter = pci_get_drvdata(pdev); |
| 3144 | struct net_device *netdev = adapter->netdev; | ||
| 3145 | 3145 | ||
| 3146 | if (netif_running(netdev)) | 3146 | if (!adapter) |
| 3147 | return; | ||
| 3148 | |||
| 3149 | if (netif_running(adapter->netdev)) | ||
| 3147 | cancel_delayed_work_sync(&adapter->work); | 3150 | cancel_delayed_work_sync(&adapter->work); |
| 3148 | 3151 | ||
| 3149 | netif_device_detach(netdev); | 3152 | netif_device_detach(adapter->netdev); |
| 3150 | 3153 | ||
| 3151 | be_cmd_reset_function(adapter); | 3154 | be_cmd_reset_function(adapter); |
| 3152 | 3155 | ||
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c index 34933cb9569f..e3de0b8625cd 100644 --- a/drivers/net/bna/bfa_ioc.c +++ b/drivers/net/bna/bfa_ioc.c | |||
| @@ -2219,13 +2219,9 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) | |||
| 2219 | static void | 2219 | static void |
| 2220 | bfa_ioc_recover(struct bfa_ioc *ioc) | 2220 | bfa_ioc_recover(struct bfa_ioc *ioc) |
| 2221 | { | 2221 | { |
| 2222 | u16 bdf; | 2222 | pr_crit("Heart Beat of IOC has failed\n"); |
| 2223 | 2223 | bfa_ioc_stats(ioc, ioc_hbfails); | |
| 2224 | bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 | | 2224 | bfa_fsm_send_event(ioc, IOC_E_HBFAIL); |
| 2225 | ioc->pcidev.device_id); | ||
| 2226 | |||
| 2227 | pr_crit("Firmware heartbeat failure at %d", bdf); | ||
| 2228 | BUG_ON(1); | ||
| 2229 | } | 2225 | } |
| 2230 | 2226 | ||
| 2231 | static void | 2227 | static void |
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 7513c4523ac4..330140ee266d 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c | |||
| @@ -931,7 +931,8 @@ static int mcp251x_open(struct net_device *net) | |||
| 931 | priv->tx_len = 0; | 931 | priv->tx_len = 0; |
| 932 | 932 | ||
| 933 | ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, | 933 | ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, |
| 934 | IRQF_TRIGGER_FALLING, DEVICE_NAME, priv); | 934 | pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING, |
| 935 | DEVICE_NAME, priv); | ||
| 935 | if (ret) { | 936 | if (ret) { |
| 936 | dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); | 937 | dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); |
| 937 | if (pdata->transceiver_enable) | 938 | if (pdata->transceiver_enable) |
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index cfd50bc49169..62dd21b06df4 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c | |||
| @@ -345,6 +345,8 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |||
| 345 | err = mlx4_en_init_allocator(priv, ring); | 345 | err = mlx4_en_init_allocator(priv, ring); |
| 346 | if (err) { | 346 | if (err) { |
| 347 | en_err(priv, "Failed initializing ring allocator\n"); | 347 | en_err(priv, "Failed initializing ring allocator\n"); |
| 348 | if (ring->stride <= TXBB_SIZE) | ||
| 349 | ring->buf -= TXBB_SIZE; | ||
| 348 | ring_ind--; | 350 | ring_ind--; |
| 349 | goto err_allocator; | 351 | goto err_allocator; |
| 350 | } | 352 | } |
| @@ -369,6 +371,8 @@ err_buffers: | |||
| 369 | ring_ind = priv->rx_ring_num - 1; | 371 | ring_ind = priv->rx_ring_num - 1; |
| 370 | err_allocator: | 372 | err_allocator: |
| 371 | while (ring_ind >= 0) { | 373 | while (ring_ind >= 0) { |
| 374 | if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE) | ||
| 375 | priv->rx_ring[ring_ind].buf -= TXBB_SIZE; | ||
| 372 | mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); | 376 | mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); |
| 373 | ring_ind--; | 377 | ring_ind--; |
| 374 | } | 378 | } |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 62fa7eec5f0c..3814fc9b1145 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
| @@ -944,6 +944,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
| 944 | } | 944 | } |
| 945 | 945 | ||
| 946 | for (port = 1; port <= dev->caps.num_ports; port++) { | 946 | for (port = 1; port <= dev->caps.num_ports; port++) { |
| 947 | enum mlx4_port_type port_type = 0; | ||
| 948 | mlx4_SENSE_PORT(dev, port, &port_type); | ||
| 949 | if (port_type) | ||
| 950 | dev->caps.port_type[port] = port_type; | ||
| 947 | ib_port_default_caps = 0; | 951 | ib_port_default_caps = 0; |
| 948 | err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); | 952 | err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); |
| 949 | if (err) | 953 | if (err) |
| @@ -958,6 +962,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
| 958 | goto err_mcg_table_free; | 962 | goto err_mcg_table_free; |
| 959 | } | 963 | } |
| 960 | } | 964 | } |
| 965 | mlx4_set_port_mask(dev); | ||
| 961 | 966 | ||
| 962 | return 0; | 967 | return 0; |
| 963 | 968 | ||
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index c1e0e5f1bcdb..dd7d745fbab4 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
| @@ -431,6 +431,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); | |||
| 431 | 431 | ||
| 432 | void mlx4_handle_catas_err(struct mlx4_dev *dev); | 432 | void mlx4_handle_catas_err(struct mlx4_dev *dev); |
| 433 | 433 | ||
| 434 | int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, | ||
| 435 | enum mlx4_port_type *type); | ||
| 434 | void mlx4_do_sense_ports(struct mlx4_dev *dev, | 436 | void mlx4_do_sense_ports(struct mlx4_dev *dev, |
| 435 | enum mlx4_port_type *stype, | 437 | enum mlx4_port_type *stype, |
| 436 | enum mlx4_port_type *defaults); | 438 | enum mlx4_port_type *defaults); |
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c index 015fbe785c13..e2337a7411d9 100644 --- a/drivers/net/mlx4/sense.c +++ b/drivers/net/mlx4/sense.c | |||
| @@ -38,8 +38,8 @@ | |||
| 38 | 38 | ||
| 39 | #include "mlx4.h" | 39 | #include "mlx4.h" |
| 40 | 40 | ||
| 41 | static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, | 41 | int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, |
| 42 | enum mlx4_port_type *type) | 42 | enum mlx4_port_type *type) |
| 43 | { | 43 | { |
| 44 | u64 out_param; | 44 | u64 out_param; |
| 45 | int err = 0; | 45 | int err = 0; |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 693aaef4e3ce..718879b35b7d 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
| @@ -317,7 +317,7 @@ static void pppoe_flush_dev(struct net_device *dev) | |||
| 317 | lock_sock(sk); | 317 | lock_sock(sk); |
| 318 | 318 | ||
| 319 | if (po->pppoe_dev == dev && | 319 | if (po->pppoe_dev == dev && |
| 320 | sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { | 320 | sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { |
| 321 | pppox_unbind_sock(sk); | 321 | pppox_unbind_sock(sk); |
| 322 | sk->sk_state = PPPOX_ZOMBIE; | 322 | sk->sk_state = PPPOX_ZOMBIE; |
| 323 | sk->sk_state_change(sk); | 323 | sk->sk_state_change(sk); |
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index c498b720b532..4b42ecc63dcf 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
| @@ -1818,6 +1818,7 @@ static int __devinit smsc911x_init(struct net_device *dev) | |||
| 1818 | SMSC_TRACE(PROBE, "PHY will be autodetected."); | 1818 | SMSC_TRACE(PROBE, "PHY will be autodetected."); |
| 1819 | 1819 | ||
| 1820 | spin_lock_init(&pdata->dev_lock); | 1820 | spin_lock_init(&pdata->dev_lock); |
| 1821 | spin_lock_init(&pdata->mac_lock); | ||
| 1821 | 1822 | ||
| 1822 | if (pdata->ioaddr == 0) { | 1823 | if (pdata->ioaddr == 0) { |
| 1823 | SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000"); | 1824 | SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000"); |
| @@ -1895,8 +1896,11 @@ static int __devinit smsc911x_init(struct net_device *dev) | |||
| 1895 | /* workaround for platforms without an eeprom, where the mac address | 1896 | /* workaround for platforms without an eeprom, where the mac address |
| 1896 | * is stored elsewhere and set by the bootloader. This saves the | 1897 | * is stored elsewhere and set by the bootloader. This saves the |
| 1897 | * mac address before resetting the device */ | 1898 | * mac address before resetting the device */ |
| 1898 | if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) | 1899 | if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) { |
| 1900 | spin_lock_irq(&pdata->mac_lock); | ||
| 1899 | smsc911x_read_mac_address(dev); | 1901 | smsc911x_read_mac_address(dev); |
| 1902 | spin_unlock_irq(&pdata->mac_lock); | ||
| 1903 | } | ||
| 1900 | 1904 | ||
| 1901 | /* Reset the LAN911x */ | 1905 | /* Reset the LAN911x */ |
| 1902 | if (smsc911x_soft_reset(pdata)) | 1906 | if (smsc911x_soft_reset(pdata)) |
| @@ -2059,8 +2063,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) | |||
| 2059 | SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name); | 2063 | SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name); |
| 2060 | } | 2064 | } |
| 2061 | 2065 | ||
| 2062 | spin_lock_init(&pdata->mac_lock); | ||
| 2063 | |||
| 2064 | retval = smsc911x_mii_init(pdev, dev); | 2066 | retval = smsc911x_mii_init(pdev, dev); |
| 2065 | if (retval) { | 2067 | if (retval) { |
| 2066 | SMSC_WARNING(PROBE, | 2068 | SMSC_WARNING(PROBE, |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 727874d9deb6..47a6c870b51f 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
| @@ -1313,6 +1313,21 @@ static const struct usb_device_id products[] = { | |||
| 1313 | USB_DEVICE(0x0424, 0x9909), | 1313 | USB_DEVICE(0x0424, 0x9909), |
| 1314 | .driver_info = (unsigned long) &smsc95xx_info, | 1314 | .driver_info = (unsigned long) &smsc95xx_info, |
| 1315 | }, | 1315 | }, |
| 1316 | { | ||
| 1317 | /* SMSC LAN9530 USB Ethernet Device */ | ||
| 1318 | USB_DEVICE(0x0424, 0x9530), | ||
| 1319 | .driver_info = (unsigned long) &smsc95xx_info, | ||
| 1320 | }, | ||
| 1321 | { | ||
| 1322 | /* SMSC LAN9730 USB Ethernet Device */ | ||
| 1323 | USB_DEVICE(0x0424, 0x9730), | ||
| 1324 | .driver_info = (unsigned long) &smsc95xx_info, | ||
| 1325 | }, | ||
| 1326 | { | ||
| 1327 | /* SMSC LAN89530 USB Ethernet Device */ | ||
| 1328 | USB_DEVICE(0x0424, 0x9E08), | ||
| 1329 | .driver_info = (unsigned long) &smsc95xx_info, | ||
| 1330 | }, | ||
| 1316 | { }, /* END */ | 1331 | { }, /* END */ |
| 1317 | }; | 1332 | }; |
| 1318 | MODULE_DEVICE_TABLE(usb, products); | 1333 | MODULE_DEVICE_TABLE(usb, products); |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 338b07502f1a..1ec9bcd6b281 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
| @@ -2546,6 +2546,7 @@ static struct { | |||
| 2546 | { AR_SREV_VERSION_9287, "9287" }, | 2546 | { AR_SREV_VERSION_9287, "9287" }, |
| 2547 | { AR_SREV_VERSION_9271, "9271" }, | 2547 | { AR_SREV_VERSION_9271, "9271" }, |
| 2548 | { AR_SREV_VERSION_9300, "9300" }, | 2548 | { AR_SREV_VERSION_9300, "9300" }, |
| 2549 | { AR_SREV_VERSION_9485, "9485" }, | ||
| 2549 | }; | 2550 | }; |
| 2550 | 2551 | ||
| 2551 | /* For devices with external radios */ | 2552 | /* For devices with external radios */ |
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 3d5566e7af0a..ff0f5ba14b2c 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
| @@ -1536,7 +1536,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) | |||
| 1536 | dmaaddr = meta->dmaaddr; | 1536 | dmaaddr = meta->dmaaddr; |
| 1537 | goto drop_recycle_buffer; | 1537 | goto drop_recycle_buffer; |
| 1538 | } | 1538 | } |
| 1539 | if (unlikely(len > ring->rx_buffersize)) { | 1539 | if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { |
| 1540 | /* The data did not fit into one descriptor buffer | 1540 | /* The data did not fit into one descriptor buffer |
| 1541 | * and is split over multiple buffers. | 1541 | * and is split over multiple buffers. |
| 1542 | * This should never happen, as we try to allocate buffers | 1542 | * This should never happen, as we try to allocate buffers |
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index a01c2100f166..e8a80a1251bf 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h | |||
| @@ -163,7 +163,7 @@ struct b43_dmadesc_generic { | |||
| 163 | /* DMA engine tuning knobs */ | 163 | /* DMA engine tuning knobs */ |
| 164 | #define B43_TXRING_SLOTS 256 | 164 | #define B43_TXRING_SLOTS 256 |
| 165 | #define B43_RXRING_SLOTS 64 | 165 | #define B43_RXRING_SLOTS 64 |
| 166 | #define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN | 166 | #define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN) |
| 167 | 167 | ||
| 168 | /* Pointer poison */ | 168 | /* Pointer poison */ |
| 169 | #define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM)) | 169 | #define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM)) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 98aa8af01192..20b66469d68f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h | |||
| @@ -241,7 +241,7 @@ struct iwl_eeprom_enhanced_txpwr { | |||
| 241 | 241 | ||
| 242 | /* 6x00 Specific */ | 242 | /* 6x00 Specific */ |
| 243 | #define EEPROM_6000_TX_POWER_VERSION (4) | 243 | #define EEPROM_6000_TX_POWER_VERSION (4) |
| 244 | #define EEPROM_6000_EEPROM_VERSION (0x434) | 244 | #define EEPROM_6000_EEPROM_VERSION (0x423) |
| 245 | 245 | ||
| 246 | /* 6x50 Specific */ | 246 | /* 6x50 Specific */ |
| 247 | #define EEPROM_6050_TX_POWER_VERSION (4) | 247 | #define EEPROM_6050_TX_POWER_VERSION (4) |
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 9b344a921e74..e18358725b69 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c | |||
| @@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { | |||
| 56 | {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ | 56 | {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ |
| 57 | {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ | 57 | {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ |
| 58 | {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ | 58 | {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ |
| 59 | {USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */ | ||
| 59 | {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ | 60 | {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ |
| 60 | {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ | 61 | {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ |
| 61 | {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ | 62 | {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ |
| @@ -68,6 +69,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { | |||
| 68 | {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ | 69 | {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ |
| 69 | {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ | 70 | {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ |
| 70 | {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ | 71 | {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ |
| 72 | {USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */ | ||
| 71 | {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ | 73 | {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ |
| 72 | {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ | 74 | {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ |
| 73 | 75 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 9de9dbe94399..84eb6ad36377 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c | |||
| @@ -1062,8 +1062,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) | |||
| 1062 | * Stop all work. | 1062 | * Stop all work. |
| 1063 | */ | 1063 | */ |
| 1064 | cancel_work_sync(&rt2x00dev->intf_work); | 1064 | cancel_work_sync(&rt2x00dev->intf_work); |
| 1065 | cancel_work_sync(&rt2x00dev->rxdone_work); | 1065 | if (rt2x00_is_usb(rt2x00dev)) { |
| 1066 | cancel_work_sync(&rt2x00dev->txdone_work); | 1066 | cancel_work_sync(&rt2x00dev->rxdone_work); |
| 1067 | cancel_work_sync(&rt2x00dev->txdone_work); | ||
| 1068 | } | ||
| 1067 | destroy_workqueue(rt2x00dev->workqueue); | 1069 | destroy_workqueue(rt2x00dev->workqueue); |
| 1068 | 1070 | ||
| 1069 | /* | 1071 | /* |
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index f74a8701c67d..590f14f45a89 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c | |||
| @@ -685,7 +685,7 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data) | |||
| 685 | 685 | ||
| 686 | u8 efuse_data, word_cnts = 0; | 686 | u8 efuse_data, word_cnts = 0; |
| 687 | u16 efuse_addr = 0; | 687 | u16 efuse_addr = 0; |
| 688 | u8 hworden; | 688 | u8 hworden = 0; |
| 689 | u8 tmpdata[8]; | 689 | u8 tmpdata[8]; |
| 690 | 690 | ||
| 691 | if (data == NULL) | 691 | if (data == NULL) |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c index 5ef91374b230..28a6ce3bc239 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c | |||
| @@ -303,7 +303,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, | |||
| 303 | u16 box_reg, box_extreg; | 303 | u16 box_reg, box_extreg; |
| 304 | u8 u1b_tmp; | 304 | u8 u1b_tmp; |
| 305 | bool isfw_read = false; | 305 | bool isfw_read = false; |
| 306 | u8 buf_index; | 306 | u8 buf_index = 0; |
| 307 | bool bwrite_sucess = false; | 307 | bool bwrite_sucess = false; |
| 308 | u8 wait_h2c_limmit = 100; | 308 | u8 wait_h2c_limmit = 100; |
| 309 | u8 wait_writeh2c_limmit = 100; | 309 | u8 wait_writeh2c_limmit = 100; |
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index a4b2613d6a8c..f5d85735d642 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c | |||
| @@ -246,7 +246,7 @@ static void _rtl_usb_io_handler_init(struct device *dev, | |||
| 246 | 246 | ||
| 247 | static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) | 247 | static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) |
| 248 | { | 248 | { |
| 249 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 249 | struct rtl_priv __maybe_unused *rtlpriv = rtl_priv(hw); |
| 250 | 250 | ||
| 251 | mutex_destroy(&rtlpriv->io.bb_mutex); | 251 | mutex_destroy(&rtlpriv->io.bb_mutex); |
| 252 | } | 252 | } |
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c index 5b9dbeafec06..b1c7d031c391 100644 --- a/drivers/net/wireless/wl12xx/sdio.c +++ b/drivers/net/wireless/wl12xx/sdio.c | |||
| @@ -340,7 +340,7 @@ module_init(wl1271_init); | |||
| 340 | module_exit(wl1271_exit); | 340 | module_exit(wl1271_exit); |
| 341 | 341 | ||
| 342 | MODULE_LICENSE("GPL"); | 342 | MODULE_LICENSE("GPL"); |
| 343 | MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); | 343 | MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); |
| 344 | MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); | 344 | MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); |
| 345 | MODULE_FIRMWARE(WL1271_FW_NAME); | 345 | MODULE_FIRMWARE(WL1271_FW_NAME); |
| 346 | MODULE_FIRMWARE(WL1271_AP_FW_NAME); | 346 | MODULE_FIRMWARE(WL1271_AP_FW_NAME); |
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c index 18cf01719ae0..ffc745b17f4d 100644 --- a/drivers/net/wireless/wl12xx/spi.c +++ b/drivers/net/wireless/wl12xx/spi.c | |||
| @@ -487,7 +487,7 @@ module_init(wl1271_init); | |||
| 487 | module_exit(wl1271_exit); | 487 | module_exit(wl1271_exit); |
| 488 | 488 | ||
| 489 | MODULE_LICENSE("GPL"); | 489 | MODULE_LICENSE("GPL"); |
| 490 | MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); | 490 | MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); |
| 491 | MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); | 491 | MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); |
| 492 | MODULE_FIRMWARE(WL1271_FW_NAME); | 492 | MODULE_FIRMWARE(WL1271_FW_NAME); |
| 493 | MODULE_FIRMWARE(WL1271_AP_FW_NAME); | 493 | MODULE_FIRMWARE(WL1271_AP_FW_NAME); |
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c index e64403b6896d..6ec06a4a4c6d 100644 --- a/drivers/net/wireless/wl12xx/testmode.c +++ b/drivers/net/wireless/wl12xx/testmode.c | |||
| @@ -204,7 +204,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[]) | |||
| 204 | 204 | ||
| 205 | kfree(wl->nvs); | 205 | kfree(wl->nvs); |
| 206 | 206 | ||
| 207 | wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL); | 207 | if (len != sizeof(struct wl1271_nvs_file)) |
| 208 | return -EINVAL; | ||
| 209 | |||
| 210 | wl->nvs = kzalloc(len, GFP_KERNEL); | ||
| 208 | if (!wl->nvs) { | 211 | if (!wl->nvs) { |
| 209 | wl1271_error("could not allocate memory for the nvs file"); | 212 | wl1271_error("could not allocate memory for the nvs file"); |
| 210 | ret = -ENOMEM; | 213 | ret = -ENOMEM; |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 58236e6d0921..ab607bbd6291 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
| @@ -643,7 +643,7 @@ static void rx_urb_complete(struct urb *urb) | |||
| 643 | usb = urb->context; | 643 | usb = urb->context; |
| 644 | rx = &usb->rx; | 644 | rx = &usb->rx; |
| 645 | 645 | ||
| 646 | zd_usb_reset_rx_idle_timer(usb); | 646 | tasklet_schedule(&rx->reset_timer_tasklet); |
| 647 | 647 | ||
| 648 | if (length%rx->usb_packet_size > rx->usb_packet_size-4) { | 648 | if (length%rx->usb_packet_size > rx->usb_packet_size-4) { |
| 649 | /* If there is an old first fragment, we don't care. */ | 649 | /* If there is an old first fragment, we don't care. */ |
| @@ -812,6 +812,7 @@ void zd_usb_disable_rx(struct zd_usb *usb) | |||
| 812 | __zd_usb_disable_rx(usb); | 812 | __zd_usb_disable_rx(usb); |
| 813 | mutex_unlock(&rx->setup_mutex); | 813 | mutex_unlock(&rx->setup_mutex); |
| 814 | 814 | ||
| 815 | tasklet_kill(&rx->reset_timer_tasklet); | ||
| 815 | cancel_delayed_work_sync(&rx->idle_work); | 816 | cancel_delayed_work_sync(&rx->idle_work); |
| 816 | } | 817 | } |
| 817 | 818 | ||
| @@ -1106,6 +1107,13 @@ static void zd_rx_idle_timer_handler(struct work_struct *work) | |||
| 1106 | zd_usb_reset_rx(usb); | 1107 | zd_usb_reset_rx(usb); |
| 1107 | } | 1108 | } |
| 1108 | 1109 | ||
| 1110 | static void zd_usb_reset_rx_idle_timer_tasklet(unsigned long param) | ||
| 1111 | { | ||
| 1112 | struct zd_usb *usb = (struct zd_usb *)param; | ||
| 1113 | |||
| 1114 | zd_usb_reset_rx_idle_timer(usb); | ||
| 1115 | } | ||
| 1116 | |||
| 1109 | void zd_usb_reset_rx_idle_timer(struct zd_usb *usb) | 1117 | void zd_usb_reset_rx_idle_timer(struct zd_usb *usb) |
| 1110 | { | 1118 | { |
| 1111 | struct zd_usb_rx *rx = &usb->rx; | 1119 | struct zd_usb_rx *rx = &usb->rx; |
| @@ -1127,6 +1135,7 @@ static inline void init_usb_interrupt(struct zd_usb *usb) | |||
| 1127 | static inline void init_usb_rx(struct zd_usb *usb) | 1135 | static inline void init_usb_rx(struct zd_usb *usb) |
| 1128 | { | 1136 | { |
| 1129 | struct zd_usb_rx *rx = &usb->rx; | 1137 | struct zd_usb_rx *rx = &usb->rx; |
| 1138 | |||
| 1130 | spin_lock_init(&rx->lock); | 1139 | spin_lock_init(&rx->lock); |
| 1131 | mutex_init(&rx->setup_mutex); | 1140 | mutex_init(&rx->setup_mutex); |
| 1132 | if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { | 1141 | if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { |
| @@ -1136,11 +1145,14 @@ static inline void init_usb_rx(struct zd_usb *usb) | |||
| 1136 | } | 1145 | } |
| 1137 | ZD_ASSERT(rx->fragment_length == 0); | 1146 | ZD_ASSERT(rx->fragment_length == 0); |
| 1138 | INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler); | 1147 | INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler); |
| 1148 | rx->reset_timer_tasklet.func = zd_usb_reset_rx_idle_timer_tasklet; | ||
| 1149 | rx->reset_timer_tasklet.data = (unsigned long)usb; | ||
| 1139 | } | 1150 | } |
| 1140 | 1151 | ||
| 1141 | static inline void init_usb_tx(struct zd_usb *usb) | 1152 | static inline void init_usb_tx(struct zd_usb *usb) |
| 1142 | { | 1153 | { |
| 1143 | struct zd_usb_tx *tx = &usb->tx; | 1154 | struct zd_usb_tx *tx = &usb->tx; |
| 1155 | |||
| 1144 | spin_lock_init(&tx->lock); | 1156 | spin_lock_init(&tx->lock); |
| 1145 | atomic_set(&tx->enabled, 0); | 1157 | atomic_set(&tx->enabled, 0); |
| 1146 | tx->stopped = 0; | 1158 | tx->stopped = 0; |
| @@ -1671,6 +1683,10 @@ static void iowrite16v_urb_complete(struct urb *urb) | |||
| 1671 | 1683 | ||
| 1672 | if (urb->status && !usb->cmd_error) | 1684 | if (urb->status && !usb->cmd_error) |
| 1673 | usb->cmd_error = urb->status; | 1685 | usb->cmd_error = urb->status; |
| 1686 | |||
| 1687 | if (!usb->cmd_error && | ||
| 1688 | urb->actual_length != urb->transfer_buffer_length) | ||
| 1689 | usb->cmd_error = -EIO; | ||
| 1674 | } | 1690 | } |
| 1675 | 1691 | ||
| 1676 | static int zd_submit_waiting_urb(struct zd_usb *usb, bool last) | 1692 | static int zd_submit_waiting_urb(struct zd_usb *usb, bool last) |
| @@ -1805,7 +1821,7 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, | |||
| 1805 | usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT), | 1821 | usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT), |
| 1806 | req, req_len, iowrite16v_urb_complete, usb, | 1822 | req, req_len, iowrite16v_urb_complete, usb, |
| 1807 | ep->desc.bInterval); | 1823 | ep->desc.bInterval); |
| 1808 | urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK; | 1824 | urb->transfer_flags |= URB_FREE_BUFFER; |
| 1809 | 1825 | ||
| 1810 | /* Submit previous URB */ | 1826 | /* Submit previous URB */ |
| 1811 | r = zd_submit_waiting_urb(usb, false); | 1827 | r = zd_submit_waiting_urb(usb, false); |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h index b3df2c8116cc..325d0f989257 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.h +++ b/drivers/net/wireless/zd1211rw/zd_usb.h | |||
| @@ -183,6 +183,7 @@ struct zd_usb_rx { | |||
| 183 | spinlock_t lock; | 183 | spinlock_t lock; |
| 184 | struct mutex setup_mutex; | 184 | struct mutex setup_mutex; |
| 185 | struct delayed_work idle_work; | 185 | struct delayed_work idle_work; |
| 186 | struct tasklet_struct reset_timer_tasklet; | ||
| 186 | u8 fragment[2 * USB_MAX_RX_SIZE]; | 187 | u8 fragment[2 * USB_MAX_RX_SIZE]; |
| 187 | unsigned int fragment_length; | 188 | unsigned int fragment_length; |
| 188 | unsigned int usb_packet_size; | 189 | unsigned int usb_packet_size; |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index d86ea8b01137..135df164a4c1 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
| @@ -781,7 +781,7 @@ static int pci_pm_resume(struct device *dev) | |||
| 781 | 781 | ||
| 782 | #endif /* !CONFIG_SUSPEND */ | 782 | #endif /* !CONFIG_SUSPEND */ |
| 783 | 783 | ||
| 784 | #ifdef CONFIG_HIBERNATION | 784 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 785 | 785 | ||
| 786 | static int pci_pm_freeze(struct device *dev) | 786 | static int pci_pm_freeze(struct device *dev) |
| 787 | { | 787 | { |
| @@ -970,7 +970,7 @@ static int pci_pm_restore(struct device *dev) | |||
| 970 | return error; | 970 | return error; |
| 971 | } | 971 | } |
| 972 | 972 | ||
| 973 | #else /* !CONFIG_HIBERNATION */ | 973 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 974 | 974 | ||
| 975 | #define pci_pm_freeze NULL | 975 | #define pci_pm_freeze NULL |
| 976 | #define pci_pm_freeze_noirq NULL | 976 | #define pci_pm_freeze_noirq NULL |
| @@ -981,7 +981,7 @@ static int pci_pm_restore(struct device *dev) | |||
| 981 | #define pci_pm_restore NULL | 981 | #define pci_pm_restore NULL |
| 982 | #define pci_pm_restore_noirq NULL | 982 | #define pci_pm_restore_noirq NULL |
| 983 | 983 | ||
| 984 | #endif /* !CONFIG_HIBERNATION */ | 984 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ |
| 985 | 985 | ||
| 986 | #ifdef CONFIG_PM_RUNTIME | 986 | #ifdef CONFIG_PM_RUNTIME |
| 987 | 987 | ||
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 89d0a6a88df7..ebf51ad1b714 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
| @@ -676,10 +676,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
| 676 | min_align = align1 >> 1; | 676 | min_align = align1 >> 1; |
| 677 | align += aligns[order]; | 677 | align += aligns[order]; |
| 678 | } | 678 | } |
| 679 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), align); | 679 | size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); |
| 680 | size1 = !add_size ? size : | 680 | size1 = !add_size ? size : |
| 681 | calculate_memsize(size, min_size+add_size, 0, | 681 | calculate_memsize(size, min_size+add_size, 0, |
| 682 | resource_size(b_res), align); | 682 | resource_size(b_res), min_align); |
| 683 | if (!size0 && !size1) { | 683 | if (!size0 && !size1) { |
| 684 | if (b_res->start || b_res->end) | 684 | if (b_res->start || b_res->end) |
| 685 | dev_info(&bus->self->dev, "disabling bridge window " | 685 | dev_info(&bus->self->dev, "disabling bridge window " |
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c index 453c54c97612..4c3e94c0ae85 100644 --- a/drivers/pcmcia/pxa2xx_balloon3.c +++ b/drivers/pcmcia/pxa2xx_balloon3.c | |||
| @@ -25,6 +25,8 @@ | |||
| 25 | 25 | ||
| 26 | #include <mach/balloon3.h> | 26 | #include <mach/balloon3.h> |
| 27 | 27 | ||
| 28 | #include <asm/mach-types.h> | ||
| 29 | |||
| 28 | #include "soc_common.h" | 30 | #include "soc_common.h" |
| 29 | 31 | ||
| 30 | /* | 32 | /* |
| @@ -127,6 +129,9 @@ static int __init balloon3_pcmcia_init(void) | |||
| 127 | { | 129 | { |
| 128 | int ret; | 130 | int ret; |
| 129 | 131 | ||
| 132 | if (!machine_is_balloon3()) | ||
| 133 | return -ENODEV; | ||
| 134 | |||
| 130 | balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); | 135 | balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); |
| 131 | if (!balloon3_pcmcia_device) | 136 | if (!balloon3_pcmcia_device) |
| 132 | return -ENOMEM; | 137 | return -ENOMEM; |
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c index b7e596620db1..b829e655457b 100644 --- a/drivers/pcmcia/pxa2xx_trizeps4.c +++ b/drivers/pcmcia/pxa2xx_trizeps4.c | |||
| @@ -69,15 +69,15 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
| 69 | for (i = 0; i < ARRAY_SIZE(irqs); i++) { | 69 | for (i = 0; i < ARRAY_SIZE(irqs); i++) { |
| 70 | if (irqs[i].sock != skt->nr) | 70 | if (irqs[i].sock != skt->nr) |
| 71 | continue; | 71 | continue; |
| 72 | if (gpio_request(IRQ_TO_GPIO(irqs[i].irq), irqs[i].str) < 0) { | 72 | if (gpio_request(irq_to_gpio(irqs[i].irq), irqs[i].str) < 0) { |
| 73 | pr_err("%s: sock %d unable to request gpio %d\n", | 73 | pr_err("%s: sock %d unable to request gpio %d\n", |
| 74 | __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq)); | 74 | __func__, skt->nr, irq_to_gpio(irqs[i].irq)); |
| 75 | ret = -EBUSY; | 75 | ret = -EBUSY; |
| 76 | goto error; | 76 | goto error; |
| 77 | } | 77 | } |
| 78 | if (gpio_direction_input(IRQ_TO_GPIO(irqs[i].irq)) < 0) { | 78 | if (gpio_direction_input(irq_to_gpio(irqs[i].irq)) < 0) { |
| 79 | pr_err("%s: sock %d unable to set input gpio %d\n", | 79 | pr_err("%s: sock %d unable to set input gpio %d\n", |
| 80 | __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq)); | 80 | __func__, skt->nr, irq_to_gpio(irqs[i].irq)); |
| 81 | ret = -EINVAL; | 81 | ret = -EINVAL; |
| 82 | goto error; | 82 | goto error; |
| 83 | } | 83 | } |
| @@ -86,7 +86,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt) | |||
| 86 | 86 | ||
| 87 | error: | 87 | error: |
| 88 | for (; i >= 0; i--) { | 88 | for (; i >= 0; i--) { |
| 89 | gpio_free(IRQ_TO_GPIO(irqs[i].irq)); | 89 | gpio_free(irq_to_gpio(irqs[i].irq)); |
| 90 | } | 90 | } |
| 91 | return (ret); | 91 | return (ret); |
| 92 | } | 92 | } |
| @@ -97,7 +97,7 @@ static void trizeps_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) | |||
| 97 | /* free allocated gpio's */ | 97 | /* free allocated gpio's */ |
| 98 | gpio_free(GPIO_PRDY); | 98 | gpio_free(GPIO_PRDY); |
| 99 | for (i = 0; i < ARRAY_SIZE(irqs); i++) | 99 | for (i = 0; i < ARRAY_SIZE(irqs); i++) |
| 100 | gpio_free(IRQ_TO_GPIO(irqs[i].irq)); | 100 | gpio_free(irq_to_gpio(irqs[i].irq)); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static unsigned long trizeps_pcmcia_status[2]; | 103 | static unsigned long trizeps_pcmcia_status[2]; |
| @@ -226,6 +226,9 @@ static int __init trizeps_pcmcia_init(void) | |||
| 226 | { | 226 | { |
| 227 | int ret; | 227 | int ret; |
| 228 | 228 | ||
| 229 | if (!machine_is_trizeps4() && !machine_is_trizeps4wl()) | ||
| 230 | return -ENODEV; | ||
| 231 | |||
| 229 | trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); | 232 | trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); |
| 230 | if (!trizeps_pcmcia_device) | 233 | if (!trizeps_pcmcia_device) |
| 231 | return -ENOMEM; | 234 | return -ENOMEM; |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 2ee442c2a5db..0485e394712a 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -187,7 +187,8 @@ config MSI_LAPTOP | |||
| 187 | depends on ACPI | 187 | depends on ACPI |
| 188 | depends on BACKLIGHT_CLASS_DEVICE | 188 | depends on BACKLIGHT_CLASS_DEVICE |
| 189 | depends on RFKILL | 189 | depends on RFKILL |
| 190 | depends on SERIO_I8042 | 190 | depends on INPUT && SERIO_I8042 |
| 191 | select INPUT_SPARSEKMAP | ||
| 191 | ---help--- | 192 | ---help--- |
| 192 | This is a driver for laptops built by MSI (MICRO-STAR | 193 | This is a driver for laptops built by MSI (MICRO-STAR |
| 193 | INTERNATIONAL): | 194 | INTERNATIONAL): |
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 5ea6c3477d17..ac4e7f83ce6c 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
| @@ -89,7 +89,7 @@ MODULE_LICENSE("GPL"); | |||
| 89 | #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" | 89 | #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" |
| 90 | 90 | ||
| 91 | MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); | 91 | MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); |
| 92 | MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3"); | 92 | MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"); |
| 93 | MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); | 93 | MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); |
| 94 | 94 | ||
| 95 | enum acer_wmi_event_ids { | 95 | enum acer_wmi_event_ids { |
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index efc776cb0c66..832a3fd7c1c8 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c | |||
| @@ -201,8 +201,8 @@ static int asus_wmi_input_init(struct asus_wmi *asus) | |||
| 201 | if (!asus->inputdev) | 201 | if (!asus->inputdev) |
| 202 | return -ENOMEM; | 202 | return -ENOMEM; |
| 203 | 203 | ||
| 204 | asus->inputdev->name = asus->driver->input_phys; | 204 | asus->inputdev->name = asus->driver->input_name; |
| 205 | asus->inputdev->phys = asus->driver->input_name; | 205 | asus->inputdev->phys = asus->driver->input_phys; |
| 206 | asus->inputdev->id.bustype = BUS_HOST; | 206 | asus->inputdev->id.bustype = BUS_HOST; |
| 207 | asus->inputdev->dev.parent = &asus->platform_device->dev; | 207 | asus->inputdev->dev.parent = &asus->platform_device->dev; |
| 208 | 208 | ||
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 0ddc434fb93b..649dcadd8ea3 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c | |||
| @@ -67,9 +67,11 @@ static const struct key_entry eeepc_wmi_keymap[] = { | |||
| 67 | { KE_KEY, 0x82, { KEY_CAMERA } }, | 67 | { KE_KEY, 0x82, { KEY_CAMERA } }, |
| 68 | { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, | 68 | { KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } }, |
| 69 | { KE_KEY, 0x88, { KEY_WLAN } }, | 69 | { KE_KEY, 0x88, { KEY_WLAN } }, |
| 70 | { KE_KEY, 0xbd, { KEY_CAMERA } }, | ||
| 70 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, | 71 | { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, |
| 71 | { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ | 72 | { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ |
| 72 | { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ | 73 | { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ |
| 74 | { KE_KEY, 0xe8, { KEY_SCREENLOCK } }, | ||
| 73 | { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, | 75 | { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, |
| 74 | { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, | 76 | { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, |
| 75 | { KE_KEY, 0xec, { KEY_CAMERA_UP } }, | 77 | { KE_KEY, 0xec, { KEY_CAMERA_UP } }, |
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index d653104b59cb..464bb3fc4d88 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c | |||
| @@ -74,6 +74,19 @@ struct pmic_gpio { | |||
| 74 | u32 trigger_type; | 74 | u32 trigger_type; |
| 75 | }; | 75 | }; |
| 76 | 76 | ||
| 77 | static void pmic_program_irqtype(int gpio, int type) | ||
| 78 | { | ||
| 79 | if (type & IRQ_TYPE_EDGE_RISING) | ||
| 80 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20); | ||
| 81 | else | ||
| 82 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20); | ||
| 83 | |||
| 84 | if (type & IRQ_TYPE_EDGE_FALLING) | ||
| 85 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10); | ||
| 86 | else | ||
| 87 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10); | ||
| 88 | }; | ||
| 89 | |||
| 77 | static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | 90 | static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) |
| 78 | { | 91 | { |
| 79 | if (offset > 8) { | 92 | if (offset > 8) { |
| @@ -166,16 +179,38 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | |||
| 166 | return pg->irq_base + offset; | 179 | return pg->irq_base + offset; |
| 167 | } | 180 | } |
| 168 | 181 | ||
| 182 | static void pmic_bus_lock(struct irq_data *data) | ||
| 183 | { | ||
| 184 | struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); | ||
| 185 | |||
| 186 | mutex_lock(&pg->buslock); | ||
| 187 | } | ||
| 188 | |||
| 189 | static void pmic_bus_sync_unlock(struct irq_data *data) | ||
| 190 | { | ||
| 191 | struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); | ||
| 192 | |||
| 193 | if (pg->update_type) { | ||
| 194 | unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE; | ||
| 195 | |||
| 196 | pmic_program_irqtype(gpio, pg->trigger_type); | ||
| 197 | pg->update_type = 0; | ||
| 198 | } | ||
| 199 | mutex_unlock(&pg->buslock); | ||
| 200 | } | ||
| 201 | |||
| 169 | /* the gpiointr register is read-clear, so just do nothing. */ | 202 | /* the gpiointr register is read-clear, so just do nothing. */ |
| 170 | static void pmic_irq_unmask(struct irq_data *data) { } | 203 | static void pmic_irq_unmask(struct irq_data *data) { } |
| 171 | 204 | ||
| 172 | static void pmic_irq_mask(struct irq_data *data) { } | 205 | static void pmic_irq_mask(struct irq_data *data) { } |
| 173 | 206 | ||
| 174 | static struct irq_chip pmic_irqchip = { | 207 | static struct irq_chip pmic_irqchip = { |
| 175 | .name = "PMIC-GPIO", | 208 | .name = "PMIC-GPIO", |
| 176 | .irq_mask = pmic_irq_mask, | 209 | .irq_mask = pmic_irq_mask, |
| 177 | .irq_unmask = pmic_irq_unmask, | 210 | .irq_unmask = pmic_irq_unmask, |
| 178 | .irq_set_type = pmic_irq_type, | 211 | .irq_set_type = pmic_irq_type, |
| 212 | .irq_bus_lock = pmic_bus_lock, | ||
| 213 | .irq_bus_sync_unlock = pmic_bus_sync_unlock, | ||
| 179 | }; | 214 | }; |
| 180 | 215 | ||
| 181 | static irqreturn_t pmic_irq_handler(int irq, void *data) | 216 | static irqreturn_t pmic_irq_handler(int irq, void *data) |
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index de434c6dc2d6..d347116d150e 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c | |||
| @@ -571,6 +571,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { | |||
| 571 | .callback = dmi_check_cb, | 571 | .callback = dmi_check_cb, |
| 572 | }, | 572 | }, |
| 573 | { | 573 | { |
| 574 | .ident = "R410 Plus", | ||
| 575 | .matches = { | ||
| 576 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 577 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 578 | DMI_MATCH(DMI_PRODUCT_NAME, "R410P"), | ||
| 579 | DMI_MATCH(DMI_BOARD_NAME, "R460"), | ||
| 580 | }, | ||
| 581 | .callback = dmi_check_cb, | ||
| 582 | }, | ||
| 583 | { | ||
| 574 | .ident = "R518", | 584 | .ident = "R518", |
| 575 | .matches = { | 585 | .matches = { |
| 576 | DMI_MATCH(DMI_SYS_VENDOR, | 586 | DMI_MATCH(DMI_SYS_VENDOR, |
| @@ -591,12 +601,12 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { | |||
| 591 | .callback = dmi_check_cb, | 601 | .callback = dmi_check_cb, |
| 592 | }, | 602 | }, |
| 593 | { | 603 | { |
| 594 | .ident = "N150/N210/N220", | 604 | .ident = "N150/N210/N220/N230", |
| 595 | .matches = { | 605 | .matches = { |
| 596 | DMI_MATCH(DMI_SYS_VENDOR, | 606 | DMI_MATCH(DMI_SYS_VENDOR, |
| 597 | "SAMSUNG ELECTRONICS CO., LTD."), | 607 | "SAMSUNG ELECTRONICS CO., LTD."), |
| 598 | DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), | 608 | DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"), |
| 599 | DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), | 609 | DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"), |
| 600 | }, | 610 | }, |
| 601 | .callback = dmi_check_cb, | 611 | .callback = dmi_check_cb, |
| 602 | }, | 612 | }, |
| @@ -771,6 +781,7 @@ static int __init samsung_init(void) | |||
| 771 | 781 | ||
| 772 | /* create a backlight device to talk to this one */ | 782 | /* create a backlight device to talk to this one */ |
| 773 | memset(&props, 0, sizeof(struct backlight_properties)); | 783 | memset(&props, 0, sizeof(struct backlight_properties)); |
| 784 | props.type = BACKLIGHT_PLATFORM; | ||
| 774 | props.max_brightness = sabi_config->max_brightness; | 785 | props.max_brightness = sabi_config->max_brightness; |
| 775 | backlight_device = backlight_device_register("samsung", &sdev->dev, | 786 | backlight_device = backlight_device_register("samsung", &sdev->dev, |
| 776 | NULL, &backlight_ops, | 787 | NULL, &backlight_ops, |
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index e642f5f29504..8f709aec4da0 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
| @@ -138,6 +138,8 @@ MODULE_PARM_DESC(kbd_backlight_timeout, | |||
| 138 | "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " | 138 | "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " |
| 139 | "(default: 0)"); | 139 | "(default: 0)"); |
| 140 | 140 | ||
| 141 | static void sony_nc_kbd_backlight_resume(void); | ||
| 142 | |||
| 141 | enum sony_nc_rfkill { | 143 | enum sony_nc_rfkill { |
| 142 | SONY_WIFI, | 144 | SONY_WIFI, |
| 143 | SONY_BLUETOOTH, | 145 | SONY_BLUETOOTH, |
| @@ -771,11 +773,6 @@ static int sony_nc_handles_setup(struct platform_device *pd) | |||
| 771 | if (!handles) | 773 | if (!handles) |
| 772 | return -ENOMEM; | 774 | return -ENOMEM; |
| 773 | 775 | ||
| 774 | sysfs_attr_init(&handles->devattr.attr); | ||
| 775 | handles->devattr.attr.name = "handles"; | ||
| 776 | handles->devattr.attr.mode = S_IRUGO; | ||
| 777 | handles->devattr.show = sony_nc_handles_show; | ||
| 778 | |||
| 779 | for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { | 776 | for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { |
| 780 | if (!acpi_callsetfunc(sony_nc_acpi_handle, | 777 | if (!acpi_callsetfunc(sony_nc_acpi_handle, |
| 781 | "SN00", i + 0x20, &result)) { | 778 | "SN00", i + 0x20, &result)) { |
| @@ -785,11 +782,18 @@ static int sony_nc_handles_setup(struct platform_device *pd) | |||
| 785 | } | 782 | } |
| 786 | } | 783 | } |
| 787 | 784 | ||
| 788 | /* allow reading capabilities via sysfs */ | 785 | if (debug) { |
| 789 | if (device_create_file(&pd->dev, &handles->devattr)) { | 786 | sysfs_attr_init(&handles->devattr.attr); |
| 790 | kfree(handles); | 787 | handles->devattr.attr.name = "handles"; |
| 791 | handles = NULL; | 788 | handles->devattr.attr.mode = S_IRUGO; |
| 792 | return -1; | 789 | handles->devattr.show = sony_nc_handles_show; |
| 790 | |||
| 791 | /* allow reading capabilities via sysfs */ | ||
| 792 | if (device_create_file(&pd->dev, &handles->devattr)) { | ||
| 793 | kfree(handles); | ||
| 794 | handles = NULL; | ||
| 795 | return -1; | ||
| 796 | } | ||
| 793 | } | 797 | } |
| 794 | 798 | ||
| 795 | return 0; | 799 | return 0; |
| @@ -798,7 +802,8 @@ static int sony_nc_handles_setup(struct platform_device *pd) | |||
| 798 | static int sony_nc_handles_cleanup(struct platform_device *pd) | 802 | static int sony_nc_handles_cleanup(struct platform_device *pd) |
| 799 | { | 803 | { |
| 800 | if (handles) { | 804 | if (handles) { |
| 801 | device_remove_file(&pd->dev, &handles->devattr); | 805 | if (debug) |
| 806 | device_remove_file(&pd->dev, &handles->devattr); | ||
| 802 | kfree(handles); | 807 | kfree(handles); |
| 803 | handles = NULL; | 808 | handles = NULL; |
| 804 | } | 809 | } |
| @@ -808,6 +813,11 @@ static int sony_nc_handles_cleanup(struct platform_device *pd) | |||
| 808 | static int sony_find_snc_handle(int handle) | 813 | static int sony_find_snc_handle(int handle) |
| 809 | { | 814 | { |
| 810 | int i; | 815 | int i; |
| 816 | |||
| 817 | /* not initialized yet, return early */ | ||
| 818 | if (!handles) | ||
| 819 | return -1; | ||
| 820 | |||
| 811 | for (i = 0; i < 0x10; i++) { | 821 | for (i = 0; i < 0x10; i++) { |
| 812 | if (handles->cap[i] == handle) { | 822 | if (handles->cap[i] == handle) { |
| 813 | dprintk("found handle 0x%.4x (offset: 0x%.2x)\n", | 823 | dprintk("found handle 0x%.4x (offset: 0x%.2x)\n", |
| @@ -1168,6 +1178,9 @@ static int sony_nc_resume(struct acpi_device *device) | |||
| 1168 | /* re-read rfkill state */ | 1178 | /* re-read rfkill state */ |
| 1169 | sony_nc_rfkill_update(); | 1179 | sony_nc_rfkill_update(); |
| 1170 | 1180 | ||
| 1181 | /* restore kbd backlight states */ | ||
| 1182 | sony_nc_kbd_backlight_resume(); | ||
| 1183 | |||
| 1171 | return 0; | 1184 | return 0; |
| 1172 | } | 1185 | } |
| 1173 | 1186 | ||
| @@ -1355,6 +1368,7 @@ out_no_enum: | |||
| 1355 | #define KBDBL_HANDLER 0x137 | 1368 | #define KBDBL_HANDLER 0x137 |
| 1356 | #define KBDBL_PRESENT 0xB00 | 1369 | #define KBDBL_PRESENT 0xB00 |
| 1357 | #define SET_MODE 0xC00 | 1370 | #define SET_MODE 0xC00 |
| 1371 | #define SET_STATE 0xD00 | ||
| 1358 | #define SET_TIMEOUT 0xE00 | 1372 | #define SET_TIMEOUT 0xE00 |
| 1359 | 1373 | ||
| 1360 | struct kbd_backlight { | 1374 | struct kbd_backlight { |
| @@ -1377,6 +1391,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) | |||
| 1377 | (value << 0x10) | SET_MODE, &result)) | 1391 | (value << 0x10) | SET_MODE, &result)) |
| 1378 | return -EIO; | 1392 | return -EIO; |
| 1379 | 1393 | ||
| 1394 | /* Try to turn the light on/off immediately */ | ||
| 1395 | sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE, | ||
| 1396 | &result); | ||
| 1397 | |||
| 1380 | kbdbl_handle->mode = value; | 1398 | kbdbl_handle->mode = value; |
| 1381 | 1399 | ||
| 1382 | return 0; | 1400 | return 0; |
| @@ -1458,7 +1476,7 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd) | |||
| 1458 | { | 1476 | { |
| 1459 | int result; | 1477 | int result; |
| 1460 | 1478 | ||
| 1461 | if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result)) | 1479 | if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result)) |
| 1462 | return 0; | 1480 | return 0; |
| 1463 | if (!(result & 0x02)) | 1481 | if (!(result & 0x02)) |
| 1464 | return 0; | 1482 | return 0; |
| @@ -1501,13 +1519,36 @@ outkzalloc: | |||
| 1501 | static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) | 1519 | static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) |
| 1502 | { | 1520 | { |
| 1503 | if (kbdbl_handle) { | 1521 | if (kbdbl_handle) { |
| 1522 | int result; | ||
| 1523 | |||
| 1504 | device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); | 1524 | device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); |
| 1505 | device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); | 1525 | device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); |
| 1526 | |||
| 1527 | /* restore the default hw behaviour */ | ||
| 1528 | sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result); | ||
| 1529 | sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result); | ||
| 1530 | |||
| 1506 | kfree(kbdbl_handle); | 1531 | kfree(kbdbl_handle); |
| 1507 | } | 1532 | } |
| 1508 | return 0; | 1533 | return 0; |
| 1509 | } | 1534 | } |
| 1510 | 1535 | ||
| 1536 | static void sony_nc_kbd_backlight_resume(void) | ||
| 1537 | { | ||
| 1538 | int ignore = 0; | ||
| 1539 | |||
| 1540 | if (!kbdbl_handle) | ||
| 1541 | return; | ||
| 1542 | |||
| 1543 | if (kbdbl_handle->mode == 0) | ||
| 1544 | sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore); | ||
| 1545 | |||
| 1546 | if (kbdbl_handle->timeout != 0) | ||
| 1547 | sony_call_snc_handle(KBDBL_HANDLER, | ||
| 1548 | (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT, | ||
| 1549 | &ignore); | ||
| 1550 | } | ||
| 1551 | |||
| 1511 | static void sony_nc_backlight_setup(void) | 1552 | static void sony_nc_backlight_setup(void) |
| 1512 | { | 1553 | { |
| 1513 | acpi_handle unused; | 1554 | acpi_handle unused; |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index a08561f5349e..efb3b6b9bcdb 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
| @@ -8618,8 +8618,7 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s, | |||
| 8618 | tpacpi_is_fw_digit(s[1]) && | 8618 | tpacpi_is_fw_digit(s[1]) && |
| 8619 | s[2] == t && s[3] == 'T' && | 8619 | s[2] == t && s[3] == 'T' && |
| 8620 | tpacpi_is_fw_digit(s[4]) && | 8620 | tpacpi_is_fw_digit(s[4]) && |
| 8621 | tpacpi_is_fw_digit(s[5]) && | 8621 | tpacpi_is_fw_digit(s[5]); |
| 8622 | s[6] == 'W' && s[7] == 'W'; | ||
| 8623 | } | 8622 | } |
| 8624 | 8623 | ||
| 8625 | /* returns 0 - probe ok, or < 0 - probe error. | 8624 | /* returns 0 - probe ok, or < 0 - probe error. |
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index c29719cacbca..86c9a091a2ff 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
| @@ -1171,16 +1171,17 @@ static int rio_hdid_setup(char *str) | |||
| 1171 | 1171 | ||
| 1172 | __setup("riohdid=", rio_hdid_setup); | 1172 | __setup("riohdid=", rio_hdid_setup); |
| 1173 | 1173 | ||
| 1174 | void rio_register_mport(struct rio_mport *port) | 1174 | int rio_register_mport(struct rio_mport *port) |
| 1175 | { | 1175 | { |
| 1176 | if (next_portid >= RIO_MAX_MPORTS) { | 1176 | if (next_portid >= RIO_MAX_MPORTS) { |
| 1177 | pr_err("RIO: reached specified max number of mports\n"); | 1177 | pr_err("RIO: reached specified max number of mports\n"); |
| 1178 | return; | 1178 | return 1; |
| 1179 | } | 1179 | } |
| 1180 | 1180 | ||
| 1181 | port->id = next_portid++; | 1181 | port->id = next_portid++; |
| 1182 | port->host_deviceid = rio_get_hdid(port->id); | 1182 | port->host_deviceid = rio_get_hdid(port->id); |
| 1183 | list_add_tail(&port->node, &rio_mports); | 1183 | list_add_tail(&port->node, &rio_mports); |
| 1184 | return 0; | ||
| 1184 | } | 1185 | } |
| 1185 | 1186 | ||
| 1186 | EXPORT_SYMBOL_GPL(rio_local_get_device_id); | 1187 | EXPORT_SYMBOL_GPL(rio_local_get_device_id); |
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index 095016a9dec1..ac2701b22e71 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c | |||
| @@ -418,3 +418,4 @@ DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init); | |||
| 418 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); | 418 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init); |
| 419 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); | 419 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init); |
| 420 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init); | 420 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init); |
| 421 | DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1432, idtg2_switch_init); | ||
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 09b4437b3e61..39013867cbd6 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c | |||
| @@ -171,7 +171,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, | |||
| 171 | err = __rtc_read_alarm(rtc, &alrm); | 171 | err = __rtc_read_alarm(rtc, &alrm); |
| 172 | 172 | ||
| 173 | if (!err && !rtc_valid_tm(&alrm.time)) | 173 | if (!err && !rtc_valid_tm(&alrm.time)) |
| 174 | rtc_set_alarm(rtc, &alrm); | 174 | rtc_initialize_alarm(rtc, &alrm); |
| 175 | 175 | ||
| 176 | strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); | 176 | strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); |
| 177 | dev_set_name(&rtc->dev, "rtc%d", id); | 177 | dev_set_name(&rtc->dev, "rtc%d", id); |
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 23719f0acbf6..ef6316acec43 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
| @@ -375,6 +375,32 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | |||
| 375 | } | 375 | } |
| 376 | EXPORT_SYMBOL_GPL(rtc_set_alarm); | 376 | EXPORT_SYMBOL_GPL(rtc_set_alarm); |
| 377 | 377 | ||
| 378 | /* Called once per device from rtc_device_register */ | ||
| 379 | int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) | ||
| 380 | { | ||
| 381 | int err; | ||
| 382 | |||
| 383 | err = rtc_valid_tm(&alarm->time); | ||
| 384 | if (err != 0) | ||
| 385 | return err; | ||
| 386 | |||
| 387 | err = mutex_lock_interruptible(&rtc->ops_lock); | ||
| 388 | if (err) | ||
| 389 | return err; | ||
| 390 | |||
| 391 | rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time); | ||
| 392 | rtc->aie_timer.period = ktime_set(0, 0); | ||
| 393 | if (alarm->enabled) { | ||
| 394 | rtc->aie_timer.enabled = 1; | ||
| 395 | timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node); | ||
| 396 | } | ||
| 397 | mutex_unlock(&rtc->ops_lock); | ||
| 398 | return err; | ||
| 399 | } | ||
| 400 | EXPORT_SYMBOL_GPL(rtc_initialize_alarm); | ||
| 401 | |||
| 402 | |||
| 403 | |||
| 378 | int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) | 404 | int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) |
| 379 | { | 405 | { |
| 380 | int err = mutex_lock_interruptible(&rtc->ops_lock); | 406 | int err = mutex_lock_interruptible(&rtc->ops_lock); |
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index a0fc4cf42abf..90d866272c8e 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
| @@ -250,6 +250,8 @@ static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
| 250 | bfin_rtc_int_set_alarm(rtc); | 250 | bfin_rtc_int_set_alarm(rtc); |
| 251 | else | 251 | else |
| 252 | bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); | 252 | bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); |
| 253 | |||
| 254 | return 0; | ||
| 253 | } | 255 | } |
| 254 | 256 | ||
| 255 | static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) | 257 | static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) |
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index c42006469559..c5ac03793e79 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c | |||
| @@ -401,6 +401,7 @@ const struct platform_device_id mc13xxx_rtc_idtable[] = { | |||
| 401 | }, { | 401 | }, { |
| 402 | .name = "mc13892-rtc", | 402 | .name = "mc13892-rtc", |
| 403 | }, | 403 | }, |
| 404 | { } | ||
| 404 | }; | 405 | }; |
| 405 | 406 | ||
| 406 | static struct platform_driver mc13xxx_rtc_driver = { | 407 | static struct platform_driver mc13xxx_rtc_driver = { |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 714964913e5e..b3466c491cd3 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
| @@ -336,7 +336,6 @@ static void s3c_rtc_release(struct device *dev) | |||
| 336 | 336 | ||
| 337 | /* do not clear AIE here, it may be needed for wake */ | 337 | /* do not clear AIE here, it may be needed for wake */ |
| 338 | 338 | ||
| 339 | s3c_rtc_setpie(dev, 0); | ||
| 340 | free_irq(s3c_rtc_alarmno, rtc_dev); | 339 | free_irq(s3c_rtc_alarmno, rtc_dev); |
| 341 | free_irq(s3c_rtc_tickno, rtc_dev); | 340 | free_irq(s3c_rtc_tickno, rtc_dev); |
| 342 | } | 341 | } |
| @@ -408,7 +407,6 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) | |||
| 408 | platform_set_drvdata(dev, NULL); | 407 | platform_set_drvdata(dev, NULL); |
| 409 | rtc_device_unregister(rtc); | 408 | rtc_device_unregister(rtc); |
| 410 | 409 | ||
| 411 | s3c_rtc_setpie(&dev->dev, 0); | ||
| 412 | s3c_rtc_setaie(&dev->dev, 0); | 410 | s3c_rtc_setaie(&dev->dev, 0); |
| 413 | 411 | ||
| 414 | clk_disable(rtc_clk); | 412 | clk_disable(rtc_clk); |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index df14c51f6532..8e04c00cf0ad 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
| @@ -541,15 +541,24 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, | |||
| 541 | int force, ret; | 541 | int force, ret; |
| 542 | unsigned long i; | 542 | unsigned long i; |
| 543 | 543 | ||
| 544 | if (!dev_fsm_final_state(cdev) && | 544 | /* Prevent conflict between multiple on-/offline processing requests. */ |
| 545 | cdev->private->state != DEV_STATE_DISCONNECTED) | ||
| 546 | return -EAGAIN; | ||
| 547 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) | 545 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) |
| 548 | return -EAGAIN; | 546 | return -EAGAIN; |
| 547 | /* Prevent conflict between internal I/Os and on-/offline processing. */ | ||
| 548 | if (!dev_fsm_final_state(cdev) && | ||
| 549 | cdev->private->state != DEV_STATE_DISCONNECTED) { | ||
| 550 | ret = -EAGAIN; | ||
| 551 | goto out_onoff; | ||
| 552 | } | ||
| 553 | /* Prevent conflict between pending work and on-/offline processing.*/ | ||
| 554 | if (work_pending(&cdev->private->todo_work)) { | ||
| 555 | ret = -EAGAIN; | ||
| 556 | goto out_onoff; | ||
| 557 | } | ||
| 549 | 558 | ||
| 550 | if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) { | 559 | if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) { |
| 551 | atomic_set(&cdev->private->onoff, 0); | 560 | ret = -EINVAL; |
| 552 | return -EINVAL; | 561 | goto out_onoff; |
| 553 | } | 562 | } |
| 554 | if (!strncmp(buf, "force\n", count)) { | 563 | if (!strncmp(buf, "force\n", count)) { |
| 555 | force = 1; | 564 | force = 1; |
| @@ -574,6 +583,7 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, | |||
| 574 | out: | 583 | out: |
| 575 | if (cdev->drv) | 584 | if (cdev->drv) |
| 576 | module_put(cdev->drv->driver.owner); | 585 | module_put(cdev->drv->driver.owner); |
| 586 | out_onoff: | ||
| 577 | atomic_set(&cdev->private->onoff, 0); | 587 | atomic_set(&cdev->private->onoff, 0); |
| 578 | return (ret < 0) ? ret : count; | 588 | return (ret < 0) ? ret : count; |
| 579 | } | 589 | } |
| @@ -1311,10 +1321,12 @@ static int purge_fn(struct device *dev, void *data) | |||
| 1311 | 1321 | ||
| 1312 | spin_lock_irq(cdev->ccwlock); | 1322 | spin_lock_irq(cdev->ccwlock); |
| 1313 | if (is_blacklisted(id->ssid, id->devno) && | 1323 | if (is_blacklisted(id->ssid, id->devno) && |
| 1314 | (cdev->private->state == DEV_STATE_OFFLINE)) { | 1324 | (cdev->private->state == DEV_STATE_OFFLINE) && |
| 1325 | (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { | ||
| 1315 | CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, | 1326 | CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, |
| 1316 | id->devno); | 1327 | id->devno); |
| 1317 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); | 1328 | ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); |
| 1329 | atomic_set(&cdev->private->onoff, 0); | ||
| 1318 | } | 1330 | } |
| 1319 | spin_unlock_irq(cdev->ccwlock); | 1331 | spin_unlock_irq(cdev->ccwlock); |
| 1320 | /* Abort loop in case of pending signal. */ | 1332 | /* Abort loop in case of pending signal. */ |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 479c665e9e7c..c532ba929ccd 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
| @@ -1649,26 +1649,26 @@ static int __init init_QDIO(void) | |||
| 1649 | { | 1649 | { |
| 1650 | int rc; | 1650 | int rc; |
| 1651 | 1651 | ||
| 1652 | rc = qdio_setup_init(); | 1652 | rc = qdio_debug_init(); |
| 1653 | if (rc) | 1653 | if (rc) |
| 1654 | return rc; | 1654 | return rc; |
| 1655 | rc = qdio_setup_init(); | ||
| 1656 | if (rc) | ||
| 1657 | goto out_debug; | ||
| 1655 | rc = tiqdio_allocate_memory(); | 1658 | rc = tiqdio_allocate_memory(); |
| 1656 | if (rc) | 1659 | if (rc) |
| 1657 | goto out_cache; | 1660 | goto out_cache; |
| 1658 | rc = qdio_debug_init(); | ||
| 1659 | if (rc) | ||
| 1660 | goto out_ti; | ||
| 1661 | rc = tiqdio_register_thinints(); | 1661 | rc = tiqdio_register_thinints(); |
| 1662 | if (rc) | 1662 | if (rc) |
| 1663 | goto out_debug; | 1663 | goto out_ti; |
| 1664 | return 0; | 1664 | return 0; |
| 1665 | 1665 | ||
| 1666 | out_debug: | ||
| 1667 | qdio_debug_exit(); | ||
| 1668 | out_ti: | 1666 | out_ti: |
| 1669 | tiqdio_free_memory(); | 1667 | tiqdio_free_memory(); |
| 1670 | out_cache: | 1668 | out_cache: |
| 1671 | qdio_setup_exit(); | 1669 | qdio_setup_exit(); |
| 1670 | out_debug: | ||
| 1671 | qdio_debug_exit(); | ||
| 1672 | return rc; | 1672 | return rc; |
| 1673 | } | 1673 | } |
| 1674 | 1674 | ||
| @@ -1676,8 +1676,8 @@ static void __exit exit_QDIO(void) | |||
| 1676 | { | 1676 | { |
| 1677 | tiqdio_unregister_thinints(); | 1677 | tiqdio_unregister_thinints(); |
| 1678 | tiqdio_free_memory(); | 1678 | tiqdio_free_memory(); |
| 1679 | qdio_debug_exit(); | ||
| 1680 | qdio_setup_exit(); | 1679 | qdio_setup_exit(); |
| 1680 | qdio_debug_exit(); | ||
| 1681 | } | 1681 | } |
| 1682 | 1682 | ||
| 1683 | module_init(init_QDIO); | 1683 | module_init(init_QDIO); |
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index 5825370bad25..08de58e7f59f 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c | |||
| @@ -1555,7 +1555,7 @@ static int stop_queue(struct pl022 *pl022) | |||
| 1555 | * A wait_queue on the pl022->busy could be used, but then the common | 1555 | * A wait_queue on the pl022->busy could be used, but then the common |
| 1556 | * execution path (pump_messages) would be required to call wake_up or | 1556 | * execution path (pump_messages) would be required to call wake_up or |
| 1557 | * friends on every SPI message. Do this instead */ | 1557 | * friends on every SPI message. Do this instead */ |
| 1558 | while (!list_empty(&pl022->queue) && pl022->busy && limit--) { | 1558 | while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { |
| 1559 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1559 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
| 1560 | msleep(10); | 1560 | msleep(10); |
| 1561 | spin_lock_irqsave(&pl022->queue_lock, flags); | 1561 | spin_lock_irqsave(&pl022->queue_lock, flags); |
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index b1a4b9f503ae..871e337c917f 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c | |||
| @@ -821,7 +821,7 @@ static int stop_queue(struct dw_spi *dws) | |||
| 821 | 821 | ||
| 822 | spin_lock_irqsave(&dws->lock, flags); | 822 | spin_lock_irqsave(&dws->lock, flags); |
| 823 | dws->run = QUEUE_STOPPED; | 823 | dws->run = QUEUE_STOPPED; |
| 824 | while (!list_empty(&dws->queue) && dws->busy && limit--) { | 824 | while ((!list_empty(&dws->queue) || dws->busy) && limit--) { |
| 825 | spin_unlock_irqrestore(&dws->lock, flags); | 825 | spin_unlock_irqrestore(&dws->lock, flags); |
| 826 | msleep(10); | 826 | msleep(10); |
| 827 | spin_lock_irqsave(&dws->lock, flags); | 827 | spin_lock_irqsave(&dws->lock, flags); |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 9c74aad6be93..dc25bee8d33f 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c | |||
| @@ -1493,7 +1493,7 @@ static int stop_queue(struct driver_data *drv_data) | |||
| 1493 | * execution path (pump_messages) would be required to call wake_up or | 1493 | * execution path (pump_messages) would be required to call wake_up or |
| 1494 | * friends on every SPI message. Do this instead */ | 1494 | * friends on every SPI message. Do this instead */ |
| 1495 | drv_data->run = QUEUE_STOPPED; | 1495 | drv_data->run = QUEUE_STOPPED; |
| 1496 | while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { | 1496 | while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { |
| 1497 | spin_unlock_irqrestore(&drv_data->lock, flags); | 1497 | spin_unlock_irqrestore(&drv_data->lock, flags); |
| 1498 | msleep(10); | 1498 | msleep(10); |
| 1499 | spin_lock_irqsave(&drv_data->lock, flags); | 1499 | spin_lock_irqsave(&drv_data->lock, flags); |
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c index bdb7289a1d22..f706dba165cf 100644 --- a/drivers/spi/spi_bfin5xx.c +++ b/drivers/spi/spi_bfin5xx.c | |||
| @@ -1284,7 +1284,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) | |||
| 1284 | * friends on every SPI message. Do this instead | 1284 | * friends on every SPI message. Do this instead |
| 1285 | */ | 1285 | */ |
| 1286 | drv_data->running = false; | 1286 | drv_data->running = false; |
| 1287 | while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { | 1287 | while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { |
| 1288 | spin_unlock_irqrestore(&drv_data->lock, flags); | 1288 | spin_unlock_irqrestore(&drv_data->lock, flags); |
| 1289 | msleep(10); | 1289 | msleep(10); |
| 1290 | spin_lock_irqsave(&drv_data->lock, flags); | 1290 | spin_lock_irqsave(&drv_data->lock, flags); |
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index dca4a0bb6ca9..e3786f161bc3 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
| @@ -131,8 +131,6 @@ source "drivers/staging/wlags49_h2/Kconfig" | |||
| 131 | 131 | ||
| 132 | source "drivers/staging/wlags49_h25/Kconfig" | 132 | source "drivers/staging/wlags49_h25/Kconfig" |
| 133 | 133 | ||
| 134 | source "drivers/staging/samsung-laptop/Kconfig" | ||
| 135 | |||
| 136 | source "drivers/staging/sm7xx/Kconfig" | 134 | source "drivers/staging/sm7xx/Kconfig" |
| 137 | 135 | ||
| 138 | source "drivers/staging/dt3155v4l/Kconfig" | 136 | source "drivers/staging/dt3155v4l/Kconfig" |
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index eb93012b6f59..f0d5c5315612 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile | |||
| @@ -48,7 +48,6 @@ obj-$(CONFIG_XVMALLOC) += zram/ | |||
| 48 | obj-$(CONFIG_ZCACHE) += zcache/ | 48 | obj-$(CONFIG_ZCACHE) += zcache/ |
| 49 | obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ | 49 | obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ |
| 50 | obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ | 50 | obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/ |
| 51 | obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/ | ||
| 52 | obj-$(CONFIG_FB_SM7XX) += sm7xx/ | 51 | obj-$(CONFIG_FB_SM7XX) += sm7xx/ |
| 53 | obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/ | 52 | obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/ |
| 54 | obj-$(CONFIG_CRYSTALHD) += crystalhd/ | 53 | obj-$(CONFIG_CRYSTALHD) += crystalhd/ |
diff --git a/drivers/staging/samsung-laptop/Kconfig b/drivers/staging/samsung-laptop/Kconfig deleted file mode 100644 index f27c60864c26..000000000000 --- a/drivers/staging/samsung-laptop/Kconfig +++ /dev/null | |||
| @@ -1,10 +0,0 @@ | |||
| 1 | config SAMSUNG_LAPTOP | ||
| 2 | tristate "Samsung Laptop driver" | ||
| 3 | default n | ||
| 4 | depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86 | ||
| 5 | help | ||
| 6 | This module implements a driver for the N128 Samsung Laptop | ||
| 7 | providing control over the Wireless LED and the LCD backlight | ||
| 8 | |||
| 9 | To compile this driver as a module, choose | ||
| 10 | M here: the module will be called samsung-laptop. | ||
diff --git a/drivers/staging/samsung-laptop/Makefile b/drivers/staging/samsung-laptop/Makefile deleted file mode 100644 index 3c6f42045211..000000000000 --- a/drivers/staging/samsung-laptop/Makefile +++ /dev/null | |||
| @@ -1 +0,0 @@ | |||
| 1 | obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o | ||
diff --git a/drivers/staging/samsung-laptop/TODO b/drivers/staging/samsung-laptop/TODO deleted file mode 100644 index f7a6d589916e..000000000000 --- a/drivers/staging/samsung-laptop/TODO +++ /dev/null | |||
| @@ -1,5 +0,0 @@ | |||
| 1 | TODO: | ||
| 2 | - review from other developers | ||
| 3 | - figure out ACPI video issues | ||
| 4 | |||
| 5 | Please send patches to Greg Kroah-Hartman <gregkh@suse.de> | ||
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c deleted file mode 100644 index 25294462b8b6..000000000000 --- a/drivers/staging/samsung-laptop/samsung-laptop.c +++ /dev/null | |||
| @@ -1,843 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Samsung Laptop driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de) | ||
| 5 | * Copyright (C) 2009,2011 Novell Inc. | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License version 2 as published by | ||
| 9 | * the Free Software Foundation. | ||
| 10 | * | ||
| 11 | */ | ||
| 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 13 | |||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/init.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/delay.h> | ||
| 18 | #include <linux/pci.h> | ||
| 19 | #include <linux/backlight.h> | ||
| 20 | #include <linux/fb.h> | ||
| 21 | #include <linux/dmi.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/rfkill.h> | ||
| 24 | |||
| 25 | /* | ||
| 26 | * This driver is needed because a number of Samsung laptops do not hook | ||
| 27 | * their control settings through ACPI. So we have to poke around in the | ||
| 28 | * BIOS to do things like brightness values, and "special" key controls. | ||
| 29 | */ | ||
| 30 | |||
| 31 | /* | ||
| 32 | * We have 0 - 8 as valid brightness levels. The specs say that level 0 should | ||
| 33 | * be reserved by the BIOS (which really doesn't make much sense), we tell | ||
| 34 | * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8 | ||
| 35 | */ | ||
| 36 | #define MAX_BRIGHT 0x07 | ||
| 37 | |||
| 38 | |||
| 39 | #define SABI_IFACE_MAIN 0x00 | ||
| 40 | #define SABI_IFACE_SUB 0x02 | ||
| 41 | #define SABI_IFACE_COMPLETE 0x04 | ||
| 42 | #define SABI_IFACE_DATA 0x05 | ||
| 43 | |||
| 44 | /* Structure to get data back to the calling function */ | ||
| 45 | struct sabi_retval { | ||
| 46 | u8 retval[20]; | ||
| 47 | }; | ||
| 48 | |||
| 49 | struct sabi_header_offsets { | ||
| 50 | u8 port; | ||
| 51 | u8 re_mem; | ||
| 52 | u8 iface_func; | ||
| 53 | u8 en_mem; | ||
| 54 | u8 data_offset; | ||
| 55 | u8 data_segment; | ||
| 56 | }; | ||
| 57 | |||
| 58 | struct sabi_commands { | ||
| 59 | /* | ||
| 60 | * Brightness is 0 - 8, as described above. | ||
| 61 | * Value 0 is for the BIOS to use | ||
| 62 | */ | ||
| 63 | u8 get_brightness; | ||
| 64 | u8 set_brightness; | ||
| 65 | |||
| 66 | /* | ||
| 67 | * first byte: | ||
| 68 | * 0x00 - wireless is off | ||
| 69 | * 0x01 - wireless is on | ||
| 70 | * second byte: | ||
| 71 | * 0x02 - 3G is off | ||
| 72 | * 0x03 - 3G is on | ||
| 73 | * TODO, verify 3G is correct, that doesn't seem right... | ||
| 74 | */ | ||
| 75 | u8 get_wireless_button; | ||
| 76 | u8 set_wireless_button; | ||
| 77 | |||
| 78 | /* 0 is off, 1 is on */ | ||
| 79 | u8 get_backlight; | ||
| 80 | u8 set_backlight; | ||
| 81 | |||
| 82 | /* | ||
| 83 | * 0x80 or 0x00 - no action | ||
| 84 | * 0x81 - recovery key pressed | ||
| 85 | */ | ||
| 86 | u8 get_recovery_mode; | ||
| 87 | u8 set_recovery_mode; | ||
| 88 | |||
| 89 | /* | ||
| 90 | * on seclinux: 0 is low, 1 is high, | ||
| 91 | * on swsmi: 0 is normal, 1 is silent, 2 is turbo | ||
| 92 | */ | ||
| 93 | u8 get_performance_level; | ||
| 94 | u8 set_performance_level; | ||
| 95 | |||
| 96 | /* | ||
| 97 | * Tell the BIOS that Linux is running on this machine. | ||
| 98 | * 81 is on, 80 is off | ||
| 99 | */ | ||
| 100 | u8 set_linux; | ||
| 101 | }; | ||
| 102 | |||
| 103 | struct sabi_performance_level { | ||
| 104 | const char *name; | ||
| 105 | u8 value; | ||
| 106 | }; | ||
| 107 | |||
| 108 | struct sabi_config { | ||
| 109 | const char *test_string; | ||
| 110 | u16 main_function; | ||
| 111 | const struct sabi_header_offsets header_offsets; | ||
| 112 | const struct sabi_commands commands; | ||
| 113 | const struct sabi_performance_level performance_levels[4]; | ||
| 114 | u8 min_brightness; | ||
| 115 | u8 max_brightness; | ||
| 116 | }; | ||
| 117 | |||
| 118 | static const struct sabi_config sabi_configs[] = { | ||
| 119 | { | ||
| 120 | .test_string = "SECLINUX", | ||
| 121 | |||
| 122 | .main_function = 0x4c49, | ||
| 123 | |||
| 124 | .header_offsets = { | ||
| 125 | .port = 0x00, | ||
| 126 | .re_mem = 0x02, | ||
| 127 | .iface_func = 0x03, | ||
| 128 | .en_mem = 0x04, | ||
| 129 | .data_offset = 0x05, | ||
| 130 | .data_segment = 0x07, | ||
| 131 | }, | ||
| 132 | |||
| 133 | .commands = { | ||
| 134 | .get_brightness = 0x00, | ||
| 135 | .set_brightness = 0x01, | ||
| 136 | |||
| 137 | .get_wireless_button = 0x02, | ||
| 138 | .set_wireless_button = 0x03, | ||
| 139 | |||
| 140 | .get_backlight = 0x04, | ||
| 141 | .set_backlight = 0x05, | ||
| 142 | |||
| 143 | .get_recovery_mode = 0x06, | ||
| 144 | .set_recovery_mode = 0x07, | ||
| 145 | |||
| 146 | .get_performance_level = 0x08, | ||
| 147 | .set_performance_level = 0x09, | ||
| 148 | |||
| 149 | .set_linux = 0x0a, | ||
| 150 | }, | ||
| 151 | |||
| 152 | .performance_levels = { | ||
| 153 | { | ||
| 154 | .name = "silent", | ||
| 155 | .value = 0, | ||
| 156 | }, | ||
| 157 | { | ||
| 158 | .name = "normal", | ||
| 159 | .value = 1, | ||
| 160 | }, | ||
| 161 | { }, | ||
| 162 | }, | ||
| 163 | .min_brightness = 1, | ||
| 164 | .max_brightness = 8, | ||
| 165 | }, | ||
| 166 | { | ||
| 167 | .test_string = "SwSmi@", | ||
| 168 | |||
| 169 | .main_function = 0x5843, | ||
| 170 | |||
| 171 | .header_offsets = { | ||
| 172 | .port = 0x00, | ||
| 173 | .re_mem = 0x04, | ||
| 174 | .iface_func = 0x02, | ||
| 175 | .en_mem = 0x03, | ||
| 176 | .data_offset = 0x05, | ||
| 177 | .data_segment = 0x07, | ||
| 178 | }, | ||
| 179 | |||
| 180 | .commands = { | ||
| 181 | .get_brightness = 0x10, | ||
| 182 | .set_brightness = 0x11, | ||
| 183 | |||
| 184 | .get_wireless_button = 0x12, | ||
| 185 | .set_wireless_button = 0x13, | ||
| 186 | |||
| 187 | .get_backlight = 0x2d, | ||
| 188 | .set_backlight = 0x2e, | ||
| 189 | |||
| 190 | .get_recovery_mode = 0xff, | ||
| 191 | .set_recovery_mode = 0xff, | ||
| 192 | |||
| 193 | .get_performance_level = 0x31, | ||
| 194 | .set_performance_level = 0x32, | ||
| 195 | |||
| 196 | .set_linux = 0xff, | ||
| 197 | }, | ||
| 198 | |||
| 199 | .performance_levels = { | ||
| 200 | { | ||
| 201 | .name = "normal", | ||
| 202 | .value = 0, | ||
| 203 | }, | ||
| 204 | { | ||
| 205 | .name = "silent", | ||
| 206 | .value = 1, | ||
| 207 | }, | ||
| 208 | { | ||
| 209 | .name = "overclock", | ||
| 210 | .value = 2, | ||
| 211 | }, | ||
| 212 | { }, | ||
| 213 | }, | ||
| 214 | .min_brightness = 0, | ||
| 215 | .max_brightness = 8, | ||
| 216 | }, | ||
| 217 | { }, | ||
| 218 | }; | ||
| 219 | |||
| 220 | static const struct sabi_config *sabi_config; | ||
| 221 | |||
| 222 | static void __iomem *sabi; | ||
| 223 | static void __iomem *sabi_iface; | ||
| 224 | static void __iomem *f0000_segment; | ||
| 225 | static struct backlight_device *backlight_device; | ||
| 226 | static struct mutex sabi_mutex; | ||
| 227 | static struct platform_device *sdev; | ||
| 228 | static struct rfkill *rfk; | ||
| 229 | |||
| 230 | static int force; | ||
| 231 | module_param(force, bool, 0); | ||
| 232 | MODULE_PARM_DESC(force, | ||
| 233 | "Disable the DMI check and forces the driver to be loaded"); | ||
| 234 | |||
| 235 | static int debug; | ||
| 236 | module_param(debug, bool, S_IRUGO | S_IWUSR); | ||
| 237 | MODULE_PARM_DESC(debug, "Debug enabled or not"); | ||
| 238 | |||
| 239 | static int sabi_get_command(u8 command, struct sabi_retval *sretval) | ||
| 240 | { | ||
| 241 | int retval = 0; | ||
| 242 | u16 port = readw(sabi + sabi_config->header_offsets.port); | ||
| 243 | u8 complete, iface_data; | ||
| 244 | |||
| 245 | mutex_lock(&sabi_mutex); | ||
| 246 | |||
| 247 | /* enable memory to be able to write to it */ | ||
| 248 | outb(readb(sabi + sabi_config->header_offsets.en_mem), port); | ||
| 249 | |||
| 250 | /* write out the command */ | ||
| 251 | writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); | ||
| 252 | writew(command, sabi_iface + SABI_IFACE_SUB); | ||
| 253 | writeb(0, sabi_iface + SABI_IFACE_COMPLETE); | ||
| 254 | outb(readb(sabi + sabi_config->header_offsets.iface_func), port); | ||
| 255 | |||
| 256 | /* write protect memory to make it safe */ | ||
| 257 | outb(readb(sabi + sabi_config->header_offsets.re_mem), port); | ||
| 258 | |||
| 259 | /* see if the command actually succeeded */ | ||
| 260 | complete = readb(sabi_iface + SABI_IFACE_COMPLETE); | ||
| 261 | iface_data = readb(sabi_iface + SABI_IFACE_DATA); | ||
| 262 | if (complete != 0xaa || iface_data == 0xff) { | ||
| 263 | pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", | ||
| 264 | command, complete, iface_data); | ||
| 265 | retval = -EINVAL; | ||
| 266 | goto exit; | ||
| 267 | } | ||
| 268 | /* | ||
| 269 | * Save off the data into a structure so the caller use it. | ||
| 270 | * Right now we only want the first 4 bytes, | ||
| 271 | * There are commands that need more, but not for the ones we | ||
| 272 | * currently care about. | ||
| 273 | */ | ||
| 274 | sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA); | ||
| 275 | sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1); | ||
| 276 | sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2); | ||
| 277 | sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3); | ||
| 278 | |||
| 279 | exit: | ||
| 280 | mutex_unlock(&sabi_mutex); | ||
| 281 | return retval; | ||
| 282 | |||
| 283 | } | ||
| 284 | |||
| 285 | static int sabi_set_command(u8 command, u8 data) | ||
| 286 | { | ||
| 287 | int retval = 0; | ||
| 288 | u16 port = readw(sabi + sabi_config->header_offsets.port); | ||
| 289 | u8 complete, iface_data; | ||
| 290 | |||
| 291 | mutex_lock(&sabi_mutex); | ||
| 292 | |||
| 293 | /* enable memory to be able to write to it */ | ||
| 294 | outb(readb(sabi + sabi_config->header_offsets.en_mem), port); | ||
| 295 | |||
| 296 | /* write out the command */ | ||
| 297 | writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN); | ||
| 298 | writew(command, sabi_iface + SABI_IFACE_SUB); | ||
| 299 | writeb(0, sabi_iface + SABI_IFACE_COMPLETE); | ||
| 300 | writeb(data, sabi_iface + SABI_IFACE_DATA); | ||
| 301 | outb(readb(sabi + sabi_config->header_offsets.iface_func), port); | ||
| 302 | |||
| 303 | /* write protect memory to make it safe */ | ||
| 304 | outb(readb(sabi + sabi_config->header_offsets.re_mem), port); | ||
| 305 | |||
| 306 | /* see if the command actually succeeded */ | ||
| 307 | complete = readb(sabi_iface + SABI_IFACE_COMPLETE); | ||
| 308 | iface_data = readb(sabi_iface + SABI_IFACE_DATA); | ||
| 309 | if (complete != 0xaa || iface_data == 0xff) { | ||
| 310 | pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n", | ||
| 311 | command, complete, iface_data); | ||
| 312 | retval = -EINVAL; | ||
| 313 | } | ||
| 314 | |||
| 315 | mutex_unlock(&sabi_mutex); | ||
| 316 | return retval; | ||
| 317 | } | ||
| 318 | |||
| 319 | static void test_backlight(void) | ||
| 320 | { | ||
| 321 | struct sabi_retval sretval; | ||
| 322 | |||
| 323 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
| 324 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
| 325 | |||
| 326 | sabi_set_command(sabi_config->commands.set_backlight, 0); | ||
| 327 | printk(KERN_DEBUG "backlight should be off\n"); | ||
| 328 | |||
| 329 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
| 330 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
| 331 | |||
| 332 | msleep(1000); | ||
| 333 | |||
| 334 | sabi_set_command(sabi_config->commands.set_backlight, 1); | ||
| 335 | printk(KERN_DEBUG "backlight should be on\n"); | ||
| 336 | |||
| 337 | sabi_get_command(sabi_config->commands.get_backlight, &sretval); | ||
| 338 | printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]); | ||
| 339 | } | ||
| 340 | |||
| 341 | static void test_wireless(void) | ||
| 342 | { | ||
| 343 | struct sabi_retval sretval; | ||
| 344 | |||
| 345 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
| 346 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
| 347 | |||
| 348 | sabi_set_command(sabi_config->commands.set_wireless_button, 0); | ||
| 349 | printk(KERN_DEBUG "wireless led should be off\n"); | ||
| 350 | |||
| 351 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
| 352 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
| 353 | |||
| 354 | msleep(1000); | ||
| 355 | |||
| 356 | sabi_set_command(sabi_config->commands.set_wireless_button, 1); | ||
| 357 | printk(KERN_DEBUG "wireless led should be on\n"); | ||
| 358 | |||
| 359 | sabi_get_command(sabi_config->commands.get_wireless_button, &sretval); | ||
| 360 | printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]); | ||
| 361 | } | ||
| 362 | |||
| 363 | static u8 read_brightness(void) | ||
| 364 | { | ||
| 365 | struct sabi_retval sretval; | ||
| 366 | int user_brightness = 0; | ||
| 367 | int retval; | ||
| 368 | |||
| 369 | retval = sabi_get_command(sabi_config->commands.get_brightness, | ||
| 370 | &sretval); | ||
| 371 | if (!retval) { | ||
| 372 | user_brightness = sretval.retval[0]; | ||
| 373 | if (user_brightness != 0) | ||
| 374 | user_brightness -= sabi_config->min_brightness; | ||
| 375 | } | ||
| 376 | return user_brightness; | ||
| 377 | } | ||
| 378 | |||
| 379 | static void set_brightness(u8 user_brightness) | ||
| 380 | { | ||
| 381 | u8 user_level = user_brightness - sabi_config->min_brightness; | ||
| 382 | |||
| 383 | sabi_set_command(sabi_config->commands.set_brightness, user_level); | ||
| 384 | } | ||
| 385 | |||
| 386 | static int get_brightness(struct backlight_device *bd) | ||
| 387 | { | ||
| 388 | return (int)read_brightness(); | ||
| 389 | } | ||
| 390 | |||
| 391 | static int update_status(struct backlight_device *bd) | ||
| 392 | { | ||
| 393 | set_brightness(bd->props.brightness); | ||
| 394 | |||
| 395 | if (bd->props.power == FB_BLANK_UNBLANK) | ||
| 396 | sabi_set_command(sabi_config->commands.set_backlight, 1); | ||
| 397 | else | ||
| 398 | sabi_set_command(sabi_config->commands.set_backlight, 0); | ||
| 399 | return 0; | ||
| 400 | } | ||
| 401 | |||
| 402 | static const struct backlight_ops backlight_ops = { | ||
| 403 | .get_brightness = get_brightness, | ||
| 404 | .update_status = update_status, | ||
| 405 | }; | ||
| 406 | |||
| 407 | static int rfkill_set(void *data, bool blocked) | ||
| 408 | { | ||
| 409 | /* Do something with blocked...*/ | ||
| 410 | /* | ||
| 411 | * blocked == false is on | ||
| 412 | * blocked == true is off | ||
| 413 | */ | ||
| 414 | if (blocked) | ||
| 415 | sabi_set_command(sabi_config->commands.set_wireless_button, 0); | ||
| 416 | else | ||
| 417 | sabi_set_command(sabi_config->commands.set_wireless_button, 1); | ||
| 418 | |||
| 419 | return 0; | ||
| 420 | } | ||
| 421 | |||
| 422 | static struct rfkill_ops rfkill_ops = { | ||
| 423 | .set_block = rfkill_set, | ||
| 424 | }; | ||
| 425 | |||
| 426 | static int init_wireless(struct platform_device *sdev) | ||
| 427 | { | ||
| 428 | int retval; | ||
| 429 | |||
| 430 | rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN, | ||
| 431 | &rfkill_ops, NULL); | ||
| 432 | if (!rfk) | ||
| 433 | return -ENOMEM; | ||
| 434 | |||
| 435 | retval = rfkill_register(rfk); | ||
| 436 | if (retval) { | ||
| 437 | rfkill_destroy(rfk); | ||
| 438 | return -ENODEV; | ||
| 439 | } | ||
| 440 | |||
| 441 | return 0; | ||
| 442 | } | ||
| 443 | |||
| 444 | static void destroy_wireless(void) | ||
| 445 | { | ||
| 446 | rfkill_unregister(rfk); | ||
| 447 | rfkill_destroy(rfk); | ||
| 448 | } | ||
| 449 | |||
| 450 | static ssize_t get_performance_level(struct device *dev, | ||
| 451 | struct device_attribute *attr, char *buf) | ||
| 452 | { | ||
| 453 | struct sabi_retval sretval; | ||
| 454 | int retval; | ||
| 455 | int i; | ||
| 456 | |||
| 457 | /* Read the state */ | ||
| 458 | retval = sabi_get_command(sabi_config->commands.get_performance_level, | ||
| 459 | &sretval); | ||
| 460 | if (retval) | ||
| 461 | return retval; | ||
| 462 | |||
| 463 | /* The logic is backwards, yeah, lots of fun... */ | ||
| 464 | for (i = 0; sabi_config->performance_levels[i].name; ++i) { | ||
| 465 | if (sretval.retval[0] == sabi_config->performance_levels[i].value) | ||
| 466 | return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name); | ||
| 467 | } | ||
| 468 | return sprintf(buf, "%s\n", "unknown"); | ||
| 469 | } | ||
| 470 | |||
| 471 | static ssize_t set_performance_level(struct device *dev, | ||
| 472 | struct device_attribute *attr, const char *buf, | ||
| 473 | size_t count) | ||
| 474 | { | ||
| 475 | if (count >= 1) { | ||
| 476 | int i; | ||
| 477 | for (i = 0; sabi_config->performance_levels[i].name; ++i) { | ||
| 478 | const struct sabi_performance_level *level = | ||
| 479 | &sabi_config->performance_levels[i]; | ||
| 480 | if (!strncasecmp(level->name, buf, strlen(level->name))) { | ||
| 481 | sabi_set_command(sabi_config->commands.set_performance_level, | ||
| 482 | level->value); | ||
| 483 | break; | ||
| 484 | } | ||
| 485 | } | ||
| 486 | if (!sabi_config->performance_levels[i].name) | ||
| 487 | return -EINVAL; | ||
| 488 | } | ||
| 489 | return count; | ||
| 490 | } | ||
| 491 | static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO, | ||
| 492 | get_performance_level, set_performance_level); | ||
| 493 | |||
| 494 | |||
| 495 | static int __init dmi_check_cb(const struct dmi_system_id *id) | ||
| 496 | { | ||
| 497 | pr_info("found laptop model '%s'\n", | ||
| 498 | id->ident); | ||
| 499 | return 0; | ||
| 500 | } | ||
| 501 | |||
| 502 | static struct dmi_system_id __initdata samsung_dmi_table[] = { | ||
| 503 | { | ||
| 504 | .ident = "N128", | ||
| 505 | .matches = { | ||
| 506 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 507 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 508 | DMI_MATCH(DMI_PRODUCT_NAME, "N128"), | ||
| 509 | DMI_MATCH(DMI_BOARD_NAME, "N128"), | ||
| 510 | }, | ||
| 511 | .callback = dmi_check_cb, | ||
| 512 | }, | ||
| 513 | { | ||
| 514 | .ident = "N130", | ||
| 515 | .matches = { | ||
| 516 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 517 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 518 | DMI_MATCH(DMI_PRODUCT_NAME, "N130"), | ||
| 519 | DMI_MATCH(DMI_BOARD_NAME, "N130"), | ||
| 520 | }, | ||
| 521 | .callback = dmi_check_cb, | ||
| 522 | }, | ||
| 523 | { | ||
| 524 | .ident = "X125", | ||
| 525 | .matches = { | ||
| 526 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 527 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 528 | DMI_MATCH(DMI_PRODUCT_NAME, "X125"), | ||
| 529 | DMI_MATCH(DMI_BOARD_NAME, "X125"), | ||
| 530 | }, | ||
| 531 | .callback = dmi_check_cb, | ||
| 532 | }, | ||
| 533 | { | ||
| 534 | .ident = "X120/X170", | ||
| 535 | .matches = { | ||
| 536 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 537 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 538 | DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"), | ||
| 539 | DMI_MATCH(DMI_BOARD_NAME, "X120/X170"), | ||
| 540 | }, | ||
| 541 | .callback = dmi_check_cb, | ||
| 542 | }, | ||
| 543 | { | ||
| 544 | .ident = "NC10", | ||
| 545 | .matches = { | ||
| 546 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 547 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 548 | DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), | ||
| 549 | DMI_MATCH(DMI_BOARD_NAME, "NC10"), | ||
| 550 | }, | ||
| 551 | .callback = dmi_check_cb, | ||
| 552 | }, | ||
| 553 | { | ||
| 554 | .ident = "NP-Q45", | ||
| 555 | .matches = { | ||
| 556 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 557 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 558 | DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"), | ||
| 559 | DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"), | ||
| 560 | }, | ||
| 561 | .callback = dmi_check_cb, | ||
| 562 | }, | ||
| 563 | { | ||
| 564 | .ident = "X360", | ||
| 565 | .matches = { | ||
| 566 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 567 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 568 | DMI_MATCH(DMI_PRODUCT_NAME, "X360"), | ||
| 569 | DMI_MATCH(DMI_BOARD_NAME, "X360"), | ||
| 570 | }, | ||
| 571 | .callback = dmi_check_cb, | ||
| 572 | }, | ||
| 573 | { | ||
| 574 | .ident = "R410 Plus", | ||
| 575 | .matches = { | ||
| 576 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 577 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 578 | DMI_MATCH(DMI_PRODUCT_NAME, "R410P"), | ||
| 579 | DMI_MATCH(DMI_BOARD_NAME, "R460"), | ||
| 580 | }, | ||
| 581 | .callback = dmi_check_cb, | ||
| 582 | }, | ||
| 583 | { | ||
| 584 | .ident = "R518", | ||
| 585 | .matches = { | ||
| 586 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 587 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 588 | DMI_MATCH(DMI_PRODUCT_NAME, "R518"), | ||
| 589 | DMI_MATCH(DMI_BOARD_NAME, "R518"), | ||
| 590 | }, | ||
| 591 | .callback = dmi_check_cb, | ||
| 592 | }, | ||
| 593 | { | ||
| 594 | .ident = "R519/R719", | ||
| 595 | .matches = { | ||
| 596 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 597 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 598 | DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"), | ||
| 599 | DMI_MATCH(DMI_BOARD_NAME, "R519/R719"), | ||
| 600 | }, | ||
| 601 | .callback = dmi_check_cb, | ||
| 602 | }, | ||
| 603 | { | ||
| 604 | .ident = "N150/N210/N220/N230", | ||
| 605 | .matches = { | ||
| 606 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 607 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 608 | DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"), | ||
| 609 | DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"), | ||
| 610 | }, | ||
| 611 | .callback = dmi_check_cb, | ||
| 612 | }, | ||
| 613 | { | ||
| 614 | .ident = "N150P/N210P/N220P", | ||
| 615 | .matches = { | ||
| 616 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 617 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 618 | DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"), | ||
| 619 | DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"), | ||
| 620 | }, | ||
| 621 | .callback = dmi_check_cb, | ||
| 622 | }, | ||
| 623 | { | ||
| 624 | .ident = "R530/R730", | ||
| 625 | .matches = { | ||
| 626 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 627 | DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"), | ||
| 628 | DMI_MATCH(DMI_BOARD_NAME, "R530/R730"), | ||
| 629 | }, | ||
| 630 | .callback = dmi_check_cb, | ||
| 631 | }, | ||
| 632 | { | ||
| 633 | .ident = "NF110/NF210/NF310", | ||
| 634 | .matches = { | ||
| 635 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 636 | DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"), | ||
| 637 | DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"), | ||
| 638 | }, | ||
| 639 | .callback = dmi_check_cb, | ||
| 640 | }, | ||
| 641 | { | ||
| 642 | .ident = "N145P/N250P/N260P", | ||
| 643 | .matches = { | ||
| 644 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 645 | DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), | ||
| 646 | DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), | ||
| 647 | }, | ||
| 648 | .callback = dmi_check_cb, | ||
| 649 | }, | ||
| 650 | { | ||
| 651 | .ident = "R70/R71", | ||
| 652 | .matches = { | ||
| 653 | DMI_MATCH(DMI_SYS_VENDOR, | ||
| 654 | "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 655 | DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"), | ||
| 656 | DMI_MATCH(DMI_BOARD_NAME, "R70/R71"), | ||
| 657 | }, | ||
| 658 | .callback = dmi_check_cb, | ||
| 659 | }, | ||
| 660 | { | ||
| 661 | .ident = "P460", | ||
| 662 | .matches = { | ||
| 663 | DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), | ||
| 664 | DMI_MATCH(DMI_PRODUCT_NAME, "P460"), | ||
| 665 | DMI_MATCH(DMI_BOARD_NAME, "P460"), | ||
| 666 | }, | ||
| 667 | .callback = dmi_check_cb, | ||
| 668 | }, | ||
| 669 | { }, | ||
| 670 | }; | ||
| 671 | MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); | ||
| 672 | |||
| 673 | static int find_signature(void __iomem *memcheck, const char *testStr) | ||
| 674 | { | ||
| 675 | int i = 0; | ||
| 676 | int loca; | ||
| 677 | |||
| 678 | for (loca = 0; loca < 0xffff; loca++) { | ||
| 679 | char temp = readb(memcheck + loca); | ||
| 680 | |||
| 681 | if (temp == testStr[i]) { | ||
| 682 | if (i == strlen(testStr)-1) | ||
| 683 | break; | ||
| 684 | ++i; | ||
| 685 | } else { | ||
| 686 | i = 0; | ||
| 687 | } | ||
| 688 | } | ||
| 689 | return loca; | ||
| 690 | } | ||
| 691 | |||
| 692 | static int __init samsung_init(void) | ||
| 693 | { | ||
| 694 | struct backlight_properties props; | ||
| 695 | struct sabi_retval sretval; | ||
| 696 | unsigned int ifaceP; | ||
| 697 | int i; | ||
| 698 | int loca; | ||
| 699 | int retval; | ||
| 700 | |||
| 701 | mutex_init(&sabi_mutex); | ||
| 702 | |||
| 703 | if (!force && !dmi_check_system(samsung_dmi_table)) | ||
| 704 | return -ENODEV; | ||
| 705 | |||
| 706 | f0000_segment = ioremap_nocache(0xf0000, 0xffff); | ||
| 707 | if (!f0000_segment) { | ||
| 708 | pr_err("Can't map the segment at 0xf0000\n"); | ||
| 709 | return -EINVAL; | ||
| 710 | } | ||
| 711 | |||
| 712 | /* Try to find one of the signatures in memory to find the header */ | ||
| 713 | for (i = 0; sabi_configs[i].test_string != 0; ++i) { | ||
| 714 | sabi_config = &sabi_configs[i]; | ||
| 715 | loca = find_signature(f0000_segment, sabi_config->test_string); | ||
| 716 | if (loca != 0xffff) | ||
| 717 | break; | ||
| 718 | } | ||
| 719 | |||
| 720 | if (loca == 0xffff) { | ||
| 721 | pr_err("This computer does not support SABI\n"); | ||
| 722 | goto error_no_signature; | ||
| 723 | } | ||
| 724 | |||
| 725 | /* point to the SMI port Number */ | ||
| 726 | loca += 1; | ||
| 727 | sabi = (f0000_segment + loca); | ||
| 728 | |||
| 729 | if (debug) { | ||
| 730 | printk(KERN_DEBUG "This computer supports SABI==%x\n", | ||
| 731 | loca + 0xf0000 - 6); | ||
| 732 | printk(KERN_DEBUG "SABI header:\n"); | ||
| 733 | printk(KERN_DEBUG " SMI Port Number = 0x%04x\n", | ||
| 734 | readw(sabi + sabi_config->header_offsets.port)); | ||
| 735 | printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n", | ||
| 736 | readb(sabi + sabi_config->header_offsets.iface_func)); | ||
| 737 | printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n", | ||
| 738 | readb(sabi + sabi_config->header_offsets.en_mem)); | ||
| 739 | printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n", | ||
| 740 | readb(sabi + sabi_config->header_offsets.re_mem)); | ||
| 741 | printk(KERN_DEBUG " SABI data offset = 0x%04x\n", | ||
| 742 | readw(sabi + sabi_config->header_offsets.data_offset)); | ||
| 743 | printk(KERN_DEBUG " SABI data segment = 0x%04x\n", | ||
| 744 | readw(sabi + sabi_config->header_offsets.data_segment)); | ||
| 745 | } | ||
| 746 | |||
| 747 | /* Get a pointer to the SABI Interface */ | ||
| 748 | ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4; | ||
| 749 | ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff; | ||
| 750 | sabi_iface = ioremap_nocache(ifaceP, 16); | ||
| 751 | if (!sabi_iface) { | ||
| 752 | pr_err("Can't remap %x\n", ifaceP); | ||
| 753 | goto exit; | ||
| 754 | } | ||
| 755 | if (debug) { | ||
| 756 | printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP); | ||
| 757 | printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface); | ||
| 758 | |||
| 759 | test_backlight(); | ||
| 760 | test_wireless(); | ||
| 761 | |||
| 762 | retval = sabi_get_command(sabi_config->commands.get_brightness, | ||
| 763 | &sretval); | ||
| 764 | printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]); | ||
| 765 | } | ||
| 766 | |||
| 767 | /* Turn on "Linux" mode in the BIOS */ | ||
| 768 | if (sabi_config->commands.set_linux != 0xff) { | ||
| 769 | retval = sabi_set_command(sabi_config->commands.set_linux, | ||
| 770 | 0x81); | ||
| 771 | if (retval) { | ||
| 772 | pr_warn("Linux mode was not set!\n"); | ||
| 773 | goto error_no_platform; | ||
| 774 | } | ||
| 775 | } | ||
| 776 | |||
| 777 | /* knock up a platform device to hang stuff off of */ | ||
| 778 | sdev = platform_device_register_simple("samsung", -1, NULL, 0); | ||
| 779 | if (IS_ERR(sdev)) | ||
| 780 | goto error_no_platform; | ||
| 781 | |||
| 782 | /* create a backlight device to talk to this one */ | ||
| 783 | memset(&props, 0, sizeof(struct backlight_properties)); | ||
| 784 | props.type = BACKLIGHT_PLATFORM; | ||
| 785 | props.max_brightness = sabi_config->max_brightness; | ||
| 786 | backlight_device = backlight_device_register("samsung", &sdev->dev, | ||
| 787 | NULL, &backlight_ops, | ||
| 788 | &props); | ||
| 789 | if (IS_ERR(backlight_device)) | ||
| 790 | goto error_no_backlight; | ||
| 791 | |||
| 792 | backlight_device->props.brightness = read_brightness(); | ||
| 793 | backlight_device->props.power = FB_BLANK_UNBLANK; | ||
| 794 | backlight_update_status(backlight_device); | ||
| 795 | |||
| 796 | retval = init_wireless(sdev); | ||
| 797 | if (retval) | ||
| 798 | goto error_no_rfk; | ||
| 799 | |||
| 800 | retval = device_create_file(&sdev->dev, &dev_attr_performance_level); | ||
| 801 | if (retval) | ||
| 802 | goto error_file_create; | ||
| 803 | |||
| 804 | exit: | ||
| 805 | return 0; | ||
| 806 | |||
| 807 | error_file_create: | ||
| 808 | destroy_wireless(); | ||
| 809 | |||
| 810 | error_no_rfk: | ||
| 811 | backlight_device_unregister(backlight_device); | ||
| 812 | |||
| 813 | error_no_backlight: | ||
| 814 | platform_device_unregister(sdev); | ||
| 815 | |||
| 816 | error_no_platform: | ||
| 817 | iounmap(sabi_iface); | ||
| 818 | |||
| 819 | error_no_signature: | ||
| 820 | iounmap(f0000_segment); | ||
| 821 | return -EINVAL; | ||
| 822 | } | ||
| 823 | |||
| 824 | static void __exit samsung_exit(void) | ||
| 825 | { | ||
| 826 | /* Turn off "Linux" mode in the BIOS */ | ||
| 827 | if (sabi_config->commands.set_linux != 0xff) | ||
| 828 | sabi_set_command(sabi_config->commands.set_linux, 0x80); | ||
| 829 | |||
| 830 | device_remove_file(&sdev->dev, &dev_attr_performance_level); | ||
| 831 | backlight_device_unregister(backlight_device); | ||
| 832 | destroy_wireless(); | ||
| 833 | iounmap(sabi_iface); | ||
| 834 | iounmap(f0000_segment); | ||
| 835 | platform_device_unregister(sdev); | ||
| 836 | } | ||
| 837 | |||
| 838 | module_init(samsung_init); | ||
| 839 | module_exit(samsung_exit); | ||
| 840 | |||
| 841 | MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>"); | ||
| 842 | MODULE_DESCRIPTION("Samsung Backlight driver"); | ||
| 843 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index a2e5b5100ab4..0f4e8c942f9e 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
| @@ -1648,7 +1648,9 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) | |||
| 1648 | 1648 | ||
| 1649 | switch (val) { | 1649 | switch (val) { |
| 1650 | case CPUFREQ_PRECHANGE: | 1650 | case CPUFREQ_PRECHANGE: |
| 1651 | if (!fbi->overlay[0].usage && !fbi->overlay[1].usage) | 1651 | #ifdef CONFIG_FB_PXA_OVERLAY |
| 1652 | if (!(fbi->overlay[0].usage || fbi->overlay[1].usage)) | ||
| 1653 | #endif | ||
| 1652 | set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); | 1654 | set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); |
| 1653 | break; | 1655 | break; |
| 1654 | 1656 | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 42d6c930cc87..33167b43ac7e 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
| @@ -912,8 +912,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn, | |||
| 912 | unsigned long irqflags, | 912 | unsigned long irqflags, |
| 913 | const char *devname, void *dev_id) | 913 | const char *devname, void *dev_id) |
| 914 | { | 914 | { |
| 915 | unsigned int irq; | 915 | int irq, retval; |
| 916 | int retval; | ||
| 917 | 916 | ||
| 918 | irq = bind_evtchn_to_irq(evtchn); | 917 | irq = bind_evtchn_to_irq(evtchn); |
| 919 | if (irq < 0) | 918 | if (irq < 0) |
| @@ -955,8 +954,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | |||
| 955 | irq_handler_t handler, | 954 | irq_handler_t handler, |
| 956 | unsigned long irqflags, const char *devname, void *dev_id) | 955 | unsigned long irqflags, const char *devname, void *dev_id) |
| 957 | { | 956 | { |
| 958 | unsigned int irq; | 957 | int irq, retval; |
| 959 | int retval; | ||
| 960 | 958 | ||
| 961 | irq = bind_virq_to_irq(virq, cpu); | 959 | irq = bind_virq_to_irq(virq, cpu); |
| 962 | if (irq < 0) | 960 | if (irq < 0) |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 95143dd6904d..1ac94125bf93 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
| @@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled) | |||
| 61 | xen_mm_unpin_all(); | 61 | xen_mm_unpin_all(); |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | #ifdef CONFIG_HIBERNATION | 64 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 65 | static int xen_suspend(void *data) | 65 | static int xen_suspend(void *data) |
| 66 | { | 66 | { |
| 67 | struct suspend_info *si = data; | 67 | struct suspend_info *si = data; |
| @@ -173,7 +173,7 @@ out: | |||
| 173 | #endif | 173 | #endif |
| 174 | shutting_down = SHUTDOWN_INVALID; | 174 | shutting_down = SHUTDOWN_INVALID; |
| 175 | } | 175 | } |
| 176 | #endif /* CONFIG_HIBERNATION */ | 176 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
| 177 | 177 | ||
| 178 | struct shutdown_handler { | 178 | struct shutdown_handler { |
| 179 | const char *command; | 179 | const char *command; |
| @@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch, | |||
| 202 | { "poweroff", do_poweroff }, | 202 | { "poweroff", do_poweroff }, |
| 203 | { "halt", do_poweroff }, | 203 | { "halt", do_poweroff }, |
| 204 | { "reboot", do_reboot }, | 204 | { "reboot", do_reboot }, |
| 205 | #ifdef CONFIG_HIBERNATION | 205 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
| 206 | { "suspend", do_suspend }, | 206 | { "suspend", do_suspend }, |
| 207 | #endif | 207 | #endif |
| 208 | {NULL, NULL}, | 208 | {NULL, NULL}, |
diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 0ee594569dcc..85b67ffa2a43 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c | |||
| @@ -286,11 +286,9 @@ static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid) | |||
| 286 | 286 | ||
| 287 | struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) | 287 | struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) |
| 288 | { | 288 | { |
| 289 | int err, flags; | 289 | int err; |
| 290 | struct p9_fid *fid; | 290 | struct p9_fid *fid; |
| 291 | struct v9fs_session_info *v9ses; | ||
| 292 | 291 | ||
| 293 | v9ses = v9fs_dentry2v9ses(dentry); | ||
| 294 | fid = v9fs_fid_clone_with_uid(dentry, 0); | 292 | fid = v9fs_fid_clone_with_uid(dentry, 0); |
| 295 | if (IS_ERR(fid)) | 293 | if (IS_ERR(fid)) |
| 296 | goto error_out; | 294 | goto error_out; |
| @@ -299,17 +297,8 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) | |||
| 299 | * dirty pages. We always request for the open fid in read-write | 297 | * dirty pages. We always request for the open fid in read-write |
| 300 | * mode so that a partial page write which result in page | 298 | * mode so that a partial page write which result in page |
| 301 | * read can work. | 299 | * read can work. |
| 302 | * | ||
| 303 | * we don't have a tsyncfs operation for older version | ||
| 304 | * of protocol. So make sure the write back fid is | ||
| 305 | * opened in O_SYNC mode. | ||
| 306 | */ | 300 | */ |
| 307 | if (!v9fs_proto_dotl(v9ses)) | 301 | err = p9_client_open(fid, O_RDWR); |
| 308 | flags = O_RDWR | O_SYNC; | ||
| 309 | else | ||
| 310 | flags = O_RDWR; | ||
| 311 | |||
| 312 | err = p9_client_open(fid, flags); | ||
| 313 | if (err < 0) { | 302 | if (err < 0) { |
| 314 | p9_client_clunk(fid); | 303 | p9_client_clunk(fid); |
| 315 | fid = ERR_PTR(err); | 304 | fid = ERR_PTR(err); |
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index 9665c2b840e6..e5ebedfc5ed8 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h | |||
| @@ -116,7 +116,6 @@ struct v9fs_session_info { | |||
| 116 | struct list_head slist; /* list of sessions registered with v9fs */ | 116 | struct list_head slist; /* list of sessions registered with v9fs */ |
| 117 | struct backing_dev_info bdi; | 117 | struct backing_dev_info bdi; |
| 118 | struct rw_semaphore rename_sem; | 118 | struct rw_semaphore rename_sem; |
| 119 | struct p9_fid *root_fid; /* Used for file system sync */ | ||
| 120 | }; | 119 | }; |
| 121 | 120 | ||
| 122 | /* cache_validity flags */ | 121 | /* cache_validity flags */ |
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index b6a3b9f7fe4d..e022890c6f40 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c | |||
| @@ -126,7 +126,9 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
| 126 | retval = v9fs_refresh_inode_dotl(fid, inode); | 126 | retval = v9fs_refresh_inode_dotl(fid, inode); |
| 127 | else | 127 | else |
| 128 | retval = v9fs_refresh_inode(fid, inode); | 128 | retval = v9fs_refresh_inode(fid, inode); |
| 129 | if (retval <= 0) | 129 | if (retval == -ENOENT) |
| 130 | return 0; | ||
| 131 | if (retval < 0) | ||
| 130 | return retval; | 132 | return retval; |
| 131 | } | 133 | } |
| 132 | out_valid: | 134 | out_valid: |
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index ffbb113d5f33..82a7c38ddad0 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c | |||
| @@ -811,7 +811,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd) | |||
| 811 | fid = v9fs_fid_lookup(dentry); | 811 | fid = v9fs_fid_lookup(dentry); |
| 812 | if (IS_ERR(fid)) { | 812 | if (IS_ERR(fid)) { |
| 813 | __putname(link); | 813 | __putname(link); |
| 814 | link = ERR_PTR(PTR_ERR(fid)); | 814 | link = ERR_CAST(fid); |
| 815 | goto ndset; | 815 | goto ndset; |
| 816 | } | 816 | } |
| 817 | retval = p9_client_readlink(fid, &target); | 817 | retval = p9_client_readlink(fid, &target); |
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index f3eed3383e4f..feef6cdc1fd2 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c | |||
| @@ -154,6 +154,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, | |||
| 154 | retval = PTR_ERR(inode); | 154 | retval = PTR_ERR(inode); |
| 155 | goto release_sb; | 155 | goto release_sb; |
| 156 | } | 156 | } |
| 157 | |||
| 157 | root = d_alloc_root(inode); | 158 | root = d_alloc_root(inode); |
| 158 | if (!root) { | 159 | if (!root) { |
| 159 | iput(inode); | 160 | iput(inode); |
| @@ -185,21 +186,10 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, | |||
| 185 | p9stat_free(st); | 186 | p9stat_free(st); |
| 186 | kfree(st); | 187 | kfree(st); |
| 187 | } | 188 | } |
| 188 | v9fs_fid_add(root, fid); | ||
| 189 | retval = v9fs_get_acl(inode, fid); | 189 | retval = v9fs_get_acl(inode, fid); |
| 190 | if (retval) | 190 | if (retval) |
| 191 | goto release_sb; | 191 | goto release_sb; |
| 192 | /* | 192 | v9fs_fid_add(root, fid); |
| 193 | * Add the root fid to session info. This is used | ||
| 194 | * for file system sync. We want a cloned fid here | ||
| 195 | * so that we can do a sync_filesystem after a | ||
| 196 | * shrink_dcache_for_umount | ||
| 197 | */ | ||
| 198 | v9ses->root_fid = v9fs_fid_clone(root); | ||
| 199 | if (IS_ERR(v9ses->root_fid)) { | ||
| 200 | retval = PTR_ERR(v9ses->root_fid); | ||
| 201 | goto release_sb; | ||
| 202 | } | ||
| 203 | 193 | ||
| 204 | P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); | 194 | P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); |
| 205 | return dget(sb->s_root); | 195 | return dget(sb->s_root); |
| @@ -210,11 +200,15 @@ close_session: | |||
| 210 | v9fs_session_close(v9ses); | 200 | v9fs_session_close(v9ses); |
| 211 | kfree(v9ses); | 201 | kfree(v9ses); |
| 212 | return ERR_PTR(retval); | 202 | return ERR_PTR(retval); |
| 203 | |||
| 213 | release_sb: | 204 | release_sb: |
| 214 | /* | 205 | /* |
| 215 | * we will do the session_close and root dentry | 206 | * we will do the session_close and root dentry release |
| 216 | * release in the below call. | 207 | * in the below call. But we need to clunk fid, because we haven't |
| 208 | * attached the fid to dentry so it won't get clunked | ||
| 209 | * automatically. | ||
| 217 | */ | 210 | */ |
| 211 | p9_client_clunk(fid); | ||
| 218 | deactivate_locked_super(sb); | 212 | deactivate_locked_super(sb); |
| 219 | return ERR_PTR(retval); | 213 | return ERR_PTR(retval); |
| 220 | } | 214 | } |
| @@ -232,7 +226,7 @@ static void v9fs_kill_super(struct super_block *s) | |||
| 232 | P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); | 226 | P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); |
| 233 | 227 | ||
| 234 | kill_anon_super(s); | 228 | kill_anon_super(s); |
| 235 | p9_client_clunk(v9ses->root_fid); | 229 | |
| 236 | v9fs_session_cancel(v9ses); | 230 | v9fs_session_cancel(v9ses); |
| 237 | v9fs_session_close(v9ses); | 231 | v9fs_session_close(v9ses); |
| 238 | kfree(v9ses); | 232 | kfree(v9ses); |
| @@ -285,14 +279,6 @@ done: | |||
| 285 | return res; | 279 | return res; |
| 286 | } | 280 | } |
| 287 | 281 | ||
| 288 | static int v9fs_sync_fs(struct super_block *sb, int wait) | ||
| 289 | { | ||
| 290 | struct v9fs_session_info *v9ses = sb->s_fs_info; | ||
| 291 | |||
| 292 | P9_DPRINTK(P9_DEBUG_VFS, "v9fs_sync_fs: super_block %p\n", sb); | ||
| 293 | return p9_client_sync_fs(v9ses->root_fid); | ||
| 294 | } | ||
| 295 | |||
| 296 | static int v9fs_drop_inode(struct inode *inode) | 282 | static int v9fs_drop_inode(struct inode *inode) |
| 297 | { | 283 | { |
| 298 | struct v9fs_session_info *v9ses; | 284 | struct v9fs_session_info *v9ses; |
| @@ -307,6 +293,51 @@ static int v9fs_drop_inode(struct inode *inode) | |||
| 307 | return 1; | 293 | return 1; |
| 308 | } | 294 | } |
| 309 | 295 | ||
| 296 | static int v9fs_write_inode(struct inode *inode, | ||
| 297 | struct writeback_control *wbc) | ||
| 298 | { | ||
| 299 | int ret; | ||
| 300 | struct p9_wstat wstat; | ||
| 301 | struct v9fs_inode *v9inode; | ||
| 302 | /* | ||
| 303 | * send an fsync request to server irrespective of | ||
| 304 | * wbc->sync_mode. | ||
| 305 | */ | ||
| 306 | P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); | ||
| 307 | v9inode = V9FS_I(inode); | ||
| 308 | if (!v9inode->writeback_fid) | ||
| 309 | return 0; | ||
| 310 | v9fs_blank_wstat(&wstat); | ||
| 311 | |||
| 312 | ret = p9_client_wstat(v9inode->writeback_fid, &wstat); | ||
| 313 | if (ret < 0) { | ||
| 314 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | ||
| 315 | return ret; | ||
| 316 | } | ||
| 317 | return 0; | ||
| 318 | } | ||
| 319 | |||
| 320 | static int v9fs_write_inode_dotl(struct inode *inode, | ||
| 321 | struct writeback_control *wbc) | ||
| 322 | { | ||
| 323 | int ret; | ||
| 324 | struct v9fs_inode *v9inode; | ||
| 325 | /* | ||
| 326 | * send an fsync request to server irrespective of | ||
| 327 | * wbc->sync_mode. | ||
| 328 | */ | ||
| 329 | P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); | ||
| 330 | v9inode = V9FS_I(inode); | ||
| 331 | if (!v9inode->writeback_fid) | ||
| 332 | return 0; | ||
| 333 | ret = p9_client_fsync(v9inode->writeback_fid, 0); | ||
| 334 | if (ret < 0) { | ||
| 335 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | ||
| 336 | return ret; | ||
| 337 | } | ||
| 338 | return 0; | ||
| 339 | } | ||
| 340 | |||
| 310 | static const struct super_operations v9fs_super_ops = { | 341 | static const struct super_operations v9fs_super_ops = { |
| 311 | .alloc_inode = v9fs_alloc_inode, | 342 | .alloc_inode = v9fs_alloc_inode, |
| 312 | .destroy_inode = v9fs_destroy_inode, | 343 | .destroy_inode = v9fs_destroy_inode, |
| @@ -314,17 +345,18 @@ static const struct super_operations v9fs_super_ops = { | |||
| 314 | .evict_inode = v9fs_evict_inode, | 345 | .evict_inode = v9fs_evict_inode, |
| 315 | .show_options = generic_show_options, | 346 | .show_options = generic_show_options, |
| 316 | .umount_begin = v9fs_umount_begin, | 347 | .umount_begin = v9fs_umount_begin, |
| 348 | .write_inode = v9fs_write_inode, | ||
| 317 | }; | 349 | }; |
| 318 | 350 | ||
| 319 | static const struct super_operations v9fs_super_ops_dotl = { | 351 | static const struct super_operations v9fs_super_ops_dotl = { |
| 320 | .alloc_inode = v9fs_alloc_inode, | 352 | .alloc_inode = v9fs_alloc_inode, |
| 321 | .destroy_inode = v9fs_destroy_inode, | 353 | .destroy_inode = v9fs_destroy_inode, |
| 322 | .sync_fs = v9fs_sync_fs, | ||
| 323 | .statfs = v9fs_statfs, | 354 | .statfs = v9fs_statfs, |
| 324 | .drop_inode = v9fs_drop_inode, | 355 | .drop_inode = v9fs_drop_inode, |
| 325 | .evict_inode = v9fs_evict_inode, | 356 | .evict_inode = v9fs_evict_inode, |
| 326 | .show_options = generic_show_options, | 357 | .show_options = generic_show_options, |
| 327 | .umount_begin = v9fs_umount_begin, | 358 | .umount_begin = v9fs_umount_begin, |
| 359 | .write_inode = v9fs_write_inode_dotl, | ||
| 328 | }; | 360 | }; |
| 329 | 361 | ||
| 330 | struct file_system_type v9fs_fs_type = { | 362 | struct file_system_type v9fs_fs_type = { |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index f34078d702d3..303983fabfd6 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
| @@ -941,9 +941,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
| 941 | current->mm->start_stack = bprm->p; | 941 | current->mm->start_stack = bprm->p; |
| 942 | 942 | ||
| 943 | #ifdef arch_randomize_brk | 943 | #ifdef arch_randomize_brk |
| 944 | if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) | 944 | if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { |
| 945 | current->mm->brk = current->mm->start_brk = | 945 | current->mm->brk = current->mm->start_brk = |
| 946 | arch_randomize_brk(current->mm); | 946 | arch_randomize_brk(current->mm); |
| 947 | #ifdef CONFIG_COMPAT_BRK | ||
| 948 | current->brk_randomized = 1; | ||
| 949 | #endif | ||
| 950 | } | ||
| 947 | #endif | 951 | #endif |
| 948 | 952 | ||
| 949 | if (current->personality & MMAP_PAGE_ZERO) { | 953 | if (current->personality & MMAP_PAGE_ZERO) { |
diff --git a/fs/cifs/README b/fs/cifs/README index fe1683590828..74ab165fc646 100644 --- a/fs/cifs/README +++ b/fs/cifs/README | |||
| @@ -685,22 +685,6 @@ LinuxExtensionsEnabled If set to one then the client will attempt to | |||
| 685 | support and want to map the uid and gid fields | 685 | support and want to map the uid and gid fields |
| 686 | to values supplied at mount (rather than the | 686 | to values supplied at mount (rather than the |
| 687 | actual values, then set this to zero. (default 1) | 687 | actual values, then set this to zero. (default 1) |
| 688 | Experimental When set to 1 used to enable certain experimental | ||
| 689 | features (currently enables multipage writes | ||
| 690 | when signing is enabled, the multipage write | ||
| 691 | performance enhancement was disabled when | ||
| 692 | signing turned on in case buffer was modified | ||
| 693 | just before it was sent, also this flag will | ||
| 694 | be used to use the new experimental directory change | ||
| 695 | notification code). When set to 2 enables | ||
| 696 | an additional experimental feature, "raw ntlmssp" | ||
| 697 | session establishment support (which allows | ||
| 698 | specifying "sec=ntlmssp" on mount). The Linux cifs | ||
| 699 | module will use ntlmv2 authentication encapsulated | ||
| 700 | in "raw ntlmssp" (not using SPNEGO) when | ||
| 701 | "sec=ntlmssp" is specified on mount. | ||
| 702 | This support also requires building cifs with | ||
| 703 | the CONFIG_CIFS_EXPERIMENTAL configuration flag. | ||
| 704 | 688 | ||
| 705 | These experimental features and tracing can be enabled by changing flags in | 689 | These experimental features and tracing can be enabled by changing flags in |
| 706 | /proc/fs/cifs (after the cifs module has been installed or built into the | 690 | /proc/fs/cifs (after the cifs module has been installed or built into the |
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c index e654dfd092c3..53d57a3fe427 100644 --- a/fs/cifs/cache.c +++ b/fs/cifs/cache.c | |||
| @@ -50,7 +50,7 @@ void cifs_fscache_unregister(void) | |||
| 50 | */ | 50 | */ |
| 51 | struct cifs_server_key { | 51 | struct cifs_server_key { |
| 52 | uint16_t family; /* address family */ | 52 | uint16_t family; /* address family */ |
| 53 | uint16_t port; /* IP port */ | 53 | __be16 port; /* IP port */ |
| 54 | union { | 54 | union { |
| 55 | struct in_addr ipv4_addr; | 55 | struct in_addr ipv4_addr; |
| 56 | struct in6_addr ipv6_addr; | 56 | struct in6_addr ipv6_addr; |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 65829d32128c..30d01bc90855 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
| @@ -423,7 +423,6 @@ static const struct file_operations cifs_lookup_cache_proc_fops; | |||
| 423 | static const struct file_operations traceSMB_proc_fops; | 423 | static const struct file_operations traceSMB_proc_fops; |
| 424 | static const struct file_operations cifs_multiuser_mount_proc_fops; | 424 | static const struct file_operations cifs_multiuser_mount_proc_fops; |
| 425 | static const struct file_operations cifs_security_flags_proc_fops; | 425 | static const struct file_operations cifs_security_flags_proc_fops; |
| 426 | static const struct file_operations cifs_experimental_proc_fops; | ||
| 427 | static const struct file_operations cifs_linux_ext_proc_fops; | 426 | static const struct file_operations cifs_linux_ext_proc_fops; |
| 428 | 427 | ||
| 429 | void | 428 | void |
| @@ -441,8 +440,6 @@ cifs_proc_init(void) | |||
| 441 | proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops); | 440 | proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops); |
| 442 | proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops); | 441 | proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops); |
| 443 | proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops); | 442 | proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops); |
| 444 | proc_create("Experimental", 0, proc_fs_cifs, | ||
| 445 | &cifs_experimental_proc_fops); | ||
| 446 | proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs, | 443 | proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs, |
| 447 | &cifs_linux_ext_proc_fops); | 444 | &cifs_linux_ext_proc_fops); |
| 448 | proc_create("MultiuserMount", 0, proc_fs_cifs, | 445 | proc_create("MultiuserMount", 0, proc_fs_cifs, |
| @@ -469,7 +466,6 @@ cifs_proc_clean(void) | |||
| 469 | remove_proc_entry("OplockEnabled", proc_fs_cifs); | 466 | remove_proc_entry("OplockEnabled", proc_fs_cifs); |
| 470 | remove_proc_entry("SecurityFlags", proc_fs_cifs); | 467 | remove_proc_entry("SecurityFlags", proc_fs_cifs); |
| 471 | remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); | 468 | remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); |
| 472 | remove_proc_entry("Experimental", proc_fs_cifs); | ||
| 473 | remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); | 469 | remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); |
| 474 | remove_proc_entry("fs/cifs", NULL); | 470 | remove_proc_entry("fs/cifs", NULL); |
| 475 | } | 471 | } |
| @@ -550,45 +546,6 @@ static const struct file_operations cifs_oplock_proc_fops = { | |||
| 550 | .write = cifs_oplock_proc_write, | 546 | .write = cifs_oplock_proc_write, |
| 551 | }; | 547 | }; |
| 552 | 548 | ||
| 553 | static int cifs_experimental_proc_show(struct seq_file *m, void *v) | ||
| 554 | { | ||
| 555 | seq_printf(m, "%d\n", experimEnabled); | ||
| 556 | return 0; | ||
| 557 | } | ||
| 558 | |||
| 559 | static int cifs_experimental_proc_open(struct inode *inode, struct file *file) | ||
| 560 | { | ||
| 561 | return single_open(file, cifs_experimental_proc_show, NULL); | ||
| 562 | } | ||
| 563 | |||
| 564 | static ssize_t cifs_experimental_proc_write(struct file *file, | ||
| 565 | const char __user *buffer, size_t count, loff_t *ppos) | ||
| 566 | { | ||
| 567 | char c; | ||
| 568 | int rc; | ||
| 569 | |||
| 570 | rc = get_user(c, buffer); | ||
| 571 | if (rc) | ||
| 572 | return rc; | ||
| 573 | if (c == '0' || c == 'n' || c == 'N') | ||
| 574 | experimEnabled = 0; | ||
| 575 | else if (c == '1' || c == 'y' || c == 'Y') | ||
| 576 | experimEnabled = 1; | ||
| 577 | else if (c == '2') | ||
| 578 | experimEnabled = 2; | ||
| 579 | |||
| 580 | return count; | ||
| 581 | } | ||
| 582 | |||
| 583 | static const struct file_operations cifs_experimental_proc_fops = { | ||
| 584 | .owner = THIS_MODULE, | ||
| 585 | .open = cifs_experimental_proc_open, | ||
| 586 | .read = seq_read, | ||
| 587 | .llseek = seq_lseek, | ||
| 588 | .release = single_release, | ||
| 589 | .write = cifs_experimental_proc_write, | ||
| 590 | }; | ||
| 591 | |||
| 592 | static int cifs_linux_ext_proc_show(struct seq_file *m, void *v) | 549 | static int cifs_linux_ext_proc_show(struct seq_file *m, void *v) |
| 593 | { | 550 | { |
| 594 | seq_printf(m, "%d\n", linuxExtEnabled); | 551 | seq_printf(m, "%d\n", linuxExtEnabled); |
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 4dfba8283165..33d221394aca 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c | |||
| @@ -113,7 +113,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) | |||
| 113 | MAX_MECH_STR_LEN + | 113 | MAX_MECH_STR_LEN + |
| 114 | UID_KEY_LEN + (sizeof(uid_t) * 2) + | 114 | UID_KEY_LEN + (sizeof(uid_t) * 2) + |
| 115 | CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + | 115 | CREDUID_KEY_LEN + (sizeof(uid_t) * 2) + |
| 116 | USER_KEY_LEN + strlen(sesInfo->userName) + | 116 | USER_KEY_LEN + strlen(sesInfo->user_name) + |
| 117 | PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; | 117 | PID_KEY_LEN + (sizeof(pid_t) * 2) + 1; |
| 118 | 118 | ||
| 119 | spnego_key = ERR_PTR(-ENOMEM); | 119 | spnego_key = ERR_PTR(-ENOMEM); |
| @@ -153,7 +153,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) | |||
| 153 | sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid); | 153 | sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid); |
| 154 | 154 | ||
| 155 | dp = description + strlen(description); | 155 | dp = description + strlen(description); |
| 156 | sprintf(dp, ";user=%s", sesInfo->userName); | 156 | sprintf(dp, ";user=%s", sesInfo->user_name); |
| 157 | 157 | ||
| 158 | dp = description + strlen(description); | 158 | dp = description + strlen(description); |
| 159 | sprintf(dp, ";pid=0x%x", current->pid); | 159 | sprintf(dp, ";pid=0x%x", current->pid); |
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index fc0fd4fde306..23d43cde4306 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c | |||
| @@ -90,7 +90,7 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, | |||
| 90 | case UNI_COLON: | 90 | case UNI_COLON: |
| 91 | *target = ':'; | 91 | *target = ':'; |
| 92 | break; | 92 | break; |
| 93 | case UNI_ASTERIK: | 93 | case UNI_ASTERISK: |
| 94 | *target = '*'; | 94 | *target = '*'; |
| 95 | break; | 95 | break; |
| 96 | case UNI_QUESTION: | 96 | case UNI_QUESTION: |
| @@ -264,40 +264,40 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode, | |||
| 264 | * names are little endian 16 bit Unicode on the wire | 264 | * names are little endian 16 bit Unicode on the wire |
| 265 | */ | 265 | */ |
| 266 | int | 266 | int |
| 267 | cifsConvertToUCS(__le16 *target, const char *source, int maxlen, | 267 | cifsConvertToUCS(__le16 *target, const char *source, int srclen, |
| 268 | const struct nls_table *cp, int mapChars) | 268 | const struct nls_table *cp, int mapChars) |
| 269 | { | 269 | { |
| 270 | int i, j, charlen; | 270 | int i, j, charlen; |
| 271 | int len_remaining = maxlen; | ||
| 272 | char src_char; | 271 | char src_char; |
| 273 | __u16 temp; | 272 | __le16 dst_char; |
| 273 | wchar_t tmp; | ||
| 274 | 274 | ||
| 275 | if (!mapChars) | 275 | if (!mapChars) |
| 276 | return cifs_strtoUCS(target, source, PATH_MAX, cp); | 276 | return cifs_strtoUCS(target, source, PATH_MAX, cp); |
| 277 | 277 | ||
| 278 | for (i = 0, j = 0; i < maxlen; j++) { | 278 | for (i = 0, j = 0; i < srclen; j++) { |
| 279 | src_char = source[i]; | 279 | src_char = source[i]; |
| 280 | switch (src_char) { | 280 | switch (src_char) { |
| 281 | case 0: | 281 | case 0: |
| 282 | put_unaligned_le16(0, &target[j]); | 282 | put_unaligned(0, &target[j]); |
| 283 | goto ctoUCS_out; | 283 | goto ctoUCS_out; |
| 284 | case ':': | 284 | case ':': |
| 285 | temp = UNI_COLON; | 285 | dst_char = cpu_to_le16(UNI_COLON); |
| 286 | break; | 286 | break; |
| 287 | case '*': | 287 | case '*': |
| 288 | temp = UNI_ASTERIK; | 288 | dst_char = cpu_to_le16(UNI_ASTERISK); |
| 289 | break; | 289 | break; |
| 290 | case '?': | 290 | case '?': |
| 291 | temp = UNI_QUESTION; | 291 | dst_char = cpu_to_le16(UNI_QUESTION); |
| 292 | break; | 292 | break; |
| 293 | case '<': | 293 | case '<': |
| 294 | temp = UNI_LESSTHAN; | 294 | dst_char = cpu_to_le16(UNI_LESSTHAN); |
| 295 | break; | 295 | break; |
| 296 | case '>': | 296 | case '>': |
| 297 | temp = UNI_GRTRTHAN; | 297 | dst_char = cpu_to_le16(UNI_GRTRTHAN); |
| 298 | break; | 298 | break; |
| 299 | case '|': | 299 | case '|': |
| 300 | temp = UNI_PIPE; | 300 | dst_char = cpu_to_le16(UNI_PIPE); |
| 301 | break; | 301 | break; |
| 302 | /* | 302 | /* |
| 303 | * FIXME: We can not handle remapping backslash (UNI_SLASH) | 303 | * FIXME: We can not handle remapping backslash (UNI_SLASH) |
| @@ -305,17 +305,17 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen, | |||
| 305 | * as they use backslash as separator. | 305 | * as they use backslash as separator. |
| 306 | */ | 306 | */ |
| 307 | default: | 307 | default: |
| 308 | charlen = cp->char2uni(source+i, len_remaining, | 308 | charlen = cp->char2uni(source + i, srclen - i, &tmp); |
| 309 | &temp); | 309 | dst_char = cpu_to_le16(tmp); |
| 310 | |||
| 310 | /* | 311 | /* |
| 311 | * if no match, use question mark, which at least in | 312 | * if no match, use question mark, which at least in |
| 312 | * some cases serves as wild card | 313 | * some cases serves as wild card |
| 313 | */ | 314 | */ |
| 314 | if (charlen < 1) { | 315 | if (charlen < 1) { |
| 315 | temp = 0x003f; | 316 | dst_char = cpu_to_le16(0x003f); |
| 316 | charlen = 1; | 317 | charlen = 1; |
| 317 | } | 318 | } |
| 318 | len_remaining -= charlen; | ||
| 319 | /* | 319 | /* |
| 320 | * character may take more than one byte in the source | 320 | * character may take more than one byte in the source |
| 321 | * string, but will take exactly two bytes in the | 321 | * string, but will take exactly two bytes in the |
| @@ -324,9 +324,8 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen, | |||
| 324 | i += charlen; | 324 | i += charlen; |
| 325 | continue; | 325 | continue; |
| 326 | } | 326 | } |
| 327 | put_unaligned_le16(temp, &target[j]); | 327 | put_unaligned(dst_char, &target[j]); |
| 328 | i++; /* move to next char in source string */ | 328 | i++; /* move to next char in source string */ |
| 329 | len_remaining--; | ||
| 330 | } | 329 | } |
| 331 | 330 | ||
| 332 | ctoUCS_out: | 331 | ctoUCS_out: |
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 7fe6b52df507..644dd882a560 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h | |||
| @@ -44,7 +44,7 @@ | |||
| 44 | * reserved symbols (along with \ and /), otherwise illegal to store | 44 | * reserved symbols (along with \ and /), otherwise illegal to store |
| 45 | * in filenames in NTFS | 45 | * in filenames in NTFS |
| 46 | */ | 46 | */ |
| 47 | #define UNI_ASTERIK (__u16) ('*' + 0xF000) | 47 | #define UNI_ASTERISK (__u16) ('*' + 0xF000) |
| 48 | #define UNI_QUESTION (__u16) ('?' + 0xF000) | 48 | #define UNI_QUESTION (__u16) ('?' + 0xF000) |
| 49 | #define UNI_COLON (__u16) (':' + 0xF000) | 49 | #define UNI_COLON (__u16) (':' + 0xF000) |
| 50 | #define UNI_GRTRTHAN (__u16) ('>' + 0xF000) | 50 | #define UNI_GRTRTHAN (__u16) ('>' + 0xF000) |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index a51585f9852b..d1a016be73ba 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
| @@ -30,12 +30,13 @@ | |||
| 30 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
| 31 | #include <linux/random.h> | 31 | #include <linux/random.h> |
| 32 | 32 | ||
| 33 | /* Calculate and return the CIFS signature based on the mac key and SMB PDU */ | 33 | /* |
| 34 | /* the 16 byte signature must be allocated by the caller */ | 34 | * Calculate and return the CIFS signature based on the mac key and SMB PDU. |
| 35 | /* Note we only use the 1st eight bytes */ | 35 | * The 16 byte signature must be allocated by the caller. Note we only use the |
| 36 | /* Note that the smb header signature field on input contains the | 36 | * 1st eight bytes and that the smb header signature field on input contains |
| 37 | sequence number before this function is called */ | 37 | * the sequence number before this function is called. Also, this function |
| 38 | 38 | * should be called with the server->srv_mutex held. | |
| 39 | */ | ||
| 39 | static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, | 40 | static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, |
| 40 | struct TCP_Server_Info *server, char *signature) | 41 | struct TCP_Server_Info *server, char *signature) |
| 41 | { | 42 | { |
| @@ -209,8 +210,10 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, | |||
| 209 | cpu_to_le32(expected_sequence_number); | 210 | cpu_to_le32(expected_sequence_number); |
| 210 | cifs_pdu->Signature.Sequence.Reserved = 0; | 211 | cifs_pdu->Signature.Sequence.Reserved = 0; |
| 211 | 212 | ||
| 213 | mutex_lock(&server->srv_mutex); | ||
| 212 | rc = cifs_calculate_signature(cifs_pdu, server, | 214 | rc = cifs_calculate_signature(cifs_pdu, server, |
| 213 | what_we_think_sig_should_be); | 215 | what_we_think_sig_should_be); |
| 216 | mutex_unlock(&server->srv_mutex); | ||
| 214 | 217 | ||
| 215 | if (rc) | 218 | if (rc) |
| 216 | return rc; | 219 | return rc; |
| @@ -469,15 +472,15 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash, | |||
| 469 | return rc; | 472 | return rc; |
| 470 | } | 473 | } |
| 471 | 474 | ||
| 472 | /* convert ses->userName to unicode and uppercase */ | 475 | /* convert ses->user_name to unicode and uppercase */ |
| 473 | len = strlen(ses->userName); | 476 | len = strlen(ses->user_name); |
| 474 | user = kmalloc(2 + (len * 2), GFP_KERNEL); | 477 | user = kmalloc(2 + (len * 2), GFP_KERNEL); |
| 475 | if (user == NULL) { | 478 | if (user == NULL) { |
| 476 | cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); | 479 | cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n"); |
| 477 | rc = -ENOMEM; | 480 | rc = -ENOMEM; |
| 478 | goto calc_exit_2; | 481 | goto calc_exit_2; |
| 479 | } | 482 | } |
| 480 | len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp); | 483 | len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp); |
| 481 | UniStrupr(user); | 484 | UniStrupr(user); |
| 482 | 485 | ||
| 483 | crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, | 486 | crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f2970136d17d..5c412b33cd7c 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
| @@ -53,7 +53,6 @@ int cifsFYI = 0; | |||
| 53 | int cifsERROR = 1; | 53 | int cifsERROR = 1; |
| 54 | int traceSMB = 0; | 54 | int traceSMB = 0; |
| 55 | unsigned int oplockEnabled = 1; | 55 | unsigned int oplockEnabled = 1; |
| 56 | unsigned int experimEnabled = 0; | ||
| 57 | unsigned int linuxExtEnabled = 1; | 56 | unsigned int linuxExtEnabled = 1; |
| 58 | unsigned int lookupCacheEnabled = 1; | 57 | unsigned int lookupCacheEnabled = 1; |
| 59 | unsigned int multiuser_mount = 0; | 58 | unsigned int multiuser_mount = 0; |
| @@ -127,6 +126,7 @@ cifs_read_super(struct super_block *sb, void *data, | |||
| 127 | kfree(cifs_sb); | 126 | kfree(cifs_sb); |
| 128 | return rc; | 127 | return rc; |
| 129 | } | 128 | } |
| 129 | cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; | ||
| 130 | 130 | ||
| 131 | #ifdef CONFIG_CIFS_DFS_UPCALL | 131 | #ifdef CONFIG_CIFS_DFS_UPCALL |
| 132 | /* copy mount params to sb for use in submounts */ | 132 | /* copy mount params to sb for use in submounts */ |
| @@ -409,8 +409,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) | |||
| 409 | 409 | ||
| 410 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) | 410 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) |
| 411 | seq_printf(s, ",multiuser"); | 411 | seq_printf(s, ",multiuser"); |
| 412 | else if (tcon->ses->userName) | 412 | else if (tcon->ses->user_name) |
| 413 | seq_printf(s, ",username=%s", tcon->ses->userName); | 413 | seq_printf(s, ",username=%s", tcon->ses->user_name); |
| 414 | 414 | ||
| 415 | if (tcon->ses->domainName) | 415 | if (tcon->ses->domainName) |
| 416 | seq_printf(s, ",domain=%s", tcon->ses->domainName); | 416 | seq_printf(s, ",domain=%s", tcon->ses->domainName); |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 17afb0fbcaed..a5d1106fcbde 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
| @@ -37,10 +37,9 @@ | |||
| 37 | 37 | ||
| 38 | #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) | 38 | #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) |
| 39 | #define MAX_SERVER_SIZE 15 | 39 | #define MAX_SERVER_SIZE 15 |
| 40 | #define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */ | 40 | #define MAX_SHARE_SIZE 80 |
| 41 | #define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null | 41 | #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ |
| 42 | termination then *2 for unicode versions */ | 42 | #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ |
| 43 | #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ | ||
| 44 | 43 | ||
| 45 | #define CIFS_MIN_RCV_POOL 4 | 44 | #define CIFS_MIN_RCV_POOL 4 |
| 46 | 45 | ||
| @@ -92,7 +91,8 @@ enum statusEnum { | |||
| 92 | CifsNew = 0, | 91 | CifsNew = 0, |
| 93 | CifsGood, | 92 | CifsGood, |
| 94 | CifsExiting, | 93 | CifsExiting, |
| 95 | CifsNeedReconnect | 94 | CifsNeedReconnect, |
| 95 | CifsNeedNegotiate | ||
| 96 | }; | 96 | }; |
| 97 | 97 | ||
| 98 | enum securityEnum { | 98 | enum securityEnum { |
| @@ -274,7 +274,7 @@ struct cifsSesInfo { | |||
| 274 | int capabilities; | 274 | int capabilities; |
| 275 | char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for | 275 | char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for |
| 276 | TCP names - will ipv6 and sctp addresses fit? */ | 276 | TCP names - will ipv6 and sctp addresses fit? */ |
| 277 | char userName[MAX_USERNAME_SIZE + 1]; | 277 | char *user_name; |
| 278 | char *domainName; | 278 | char *domainName; |
| 279 | char *password; | 279 | char *password; |
| 280 | struct session_key auth_key; | 280 | struct session_key auth_key; |
| @@ -817,7 +817,6 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions | |||
| 817 | have the uid/password or Kerberos credential | 817 | have the uid/password or Kerberos credential |
| 818 | or equivalent for current user */ | 818 | or equivalent for current user */ |
| 819 | GLOBAL_EXTERN unsigned int oplockEnabled; | 819 | GLOBAL_EXTERN unsigned int oplockEnabled; |
| 820 | GLOBAL_EXTERN unsigned int experimEnabled; | ||
| 821 | GLOBAL_EXTERN unsigned int lookupCacheEnabled; | 820 | GLOBAL_EXTERN unsigned int lookupCacheEnabled; |
| 822 | GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent | 821 | GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent |
| 823 | with more secure ntlmssp2 challenge/resp */ | 822 | with more secure ntlmssp2 challenge/resp */ |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 2644a5d6cc67..df959bae6728 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
| @@ -142,9 +142,9 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | |||
| 142 | */ | 142 | */ |
| 143 | while (server->tcpStatus == CifsNeedReconnect) { | 143 | while (server->tcpStatus == CifsNeedReconnect) { |
| 144 | wait_event_interruptible_timeout(server->response_q, | 144 | wait_event_interruptible_timeout(server->response_q, |
| 145 | (server->tcpStatus == CifsGood), 10 * HZ); | 145 | (server->tcpStatus != CifsNeedReconnect), 10 * HZ); |
| 146 | 146 | ||
| 147 | /* is TCP session is reestablished now ?*/ | 147 | /* are we still trying to reconnect? */ |
| 148 | if (server->tcpStatus != CifsNeedReconnect) | 148 | if (server->tcpStatus != CifsNeedReconnect) |
| 149 | break; | 149 | break; |
| 150 | 150 | ||
| @@ -729,7 +729,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server) | |||
| 729 | return rc; | 729 | return rc; |
| 730 | 730 | ||
| 731 | /* set up echo request */ | 731 | /* set up echo request */ |
| 732 | smb->hdr.Tid = cpu_to_le16(0xffff); | 732 | smb->hdr.Tid = 0xffff; |
| 733 | smb->hdr.WordCount = 1; | 733 | smb->hdr.WordCount = 1; |
| 734 | put_unaligned_le16(1, &smb->EchoCount); | 734 | put_unaligned_le16(1, &smb->EchoCount); |
| 735 | put_bcc_le(1, &smb->hdr); | 735 | put_bcc_le(1, &smb->hdr); |
| @@ -1884,10 +1884,10 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | |||
| 1884 | __constant_cpu_to_le16(CIFS_WRLCK)) | 1884 | __constant_cpu_to_le16(CIFS_WRLCK)) |
| 1885 | pLockData->fl_type = F_WRLCK; | 1885 | pLockData->fl_type = F_WRLCK; |
| 1886 | 1886 | ||
| 1887 | pLockData->fl_start = parm_data->start; | 1887 | pLockData->fl_start = le64_to_cpu(parm_data->start); |
| 1888 | pLockData->fl_end = parm_data->start + | 1888 | pLockData->fl_end = pLockData->fl_start + |
| 1889 | parm_data->length - 1; | 1889 | le64_to_cpu(parm_data->length) - 1; |
| 1890 | pLockData->fl_pid = parm_data->pid; | 1890 | pLockData->fl_pid = le32_to_cpu(parm_data->pid); |
| 1891 | } | 1891 | } |
| 1892 | } | 1892 | } |
| 1893 | 1893 | ||
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6e2b2addfc78..db9d55b507d0 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -199,8 +199,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
| 199 | } | 199 | } |
| 200 | spin_unlock(&GlobalMid_Lock); | 200 | spin_unlock(&GlobalMid_Lock); |
| 201 | 201 | ||
| 202 | while ((server->tcpStatus != CifsExiting) && | 202 | while (server->tcpStatus == CifsNeedReconnect) { |
| 203 | (server->tcpStatus != CifsGood)) { | ||
| 204 | try_to_freeze(); | 203 | try_to_freeze(); |
| 205 | 204 | ||
| 206 | /* we should try only the port we connected to before */ | 205 | /* we should try only the port we connected to before */ |
| @@ -212,7 +211,7 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
| 212 | atomic_inc(&tcpSesReconnectCount); | 211 | atomic_inc(&tcpSesReconnectCount); |
| 213 | spin_lock(&GlobalMid_Lock); | 212 | spin_lock(&GlobalMid_Lock); |
| 214 | if (server->tcpStatus != CifsExiting) | 213 | if (server->tcpStatus != CifsExiting) |
| 215 | server->tcpStatus = CifsGood; | 214 | server->tcpStatus = CifsNeedNegotiate; |
| 216 | spin_unlock(&GlobalMid_Lock); | 215 | spin_unlock(&GlobalMid_Lock); |
| 217 | } | 216 | } |
| 218 | } | 217 | } |
| @@ -248,24 +247,24 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) | |||
| 248 | total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); | 247 | total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); |
| 249 | data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); | 248 | data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); |
| 250 | 249 | ||
| 251 | remaining = total_data_size - data_in_this_rsp; | 250 | if (total_data_size == data_in_this_rsp) |
| 252 | |||
| 253 | if (remaining == 0) | ||
| 254 | return 0; | 251 | return 0; |
| 255 | else if (remaining < 0) { | 252 | else if (total_data_size < data_in_this_rsp) { |
| 256 | cFYI(1, "total data %d smaller than data in frame %d", | 253 | cFYI(1, "total data %d smaller than data in frame %d", |
| 257 | total_data_size, data_in_this_rsp); | 254 | total_data_size, data_in_this_rsp); |
| 258 | return -EINVAL; | 255 | return -EINVAL; |
| 259 | } else { | ||
| 260 | cFYI(1, "missing %d bytes from transact2, check next response", | ||
| 261 | remaining); | ||
| 262 | if (total_data_size > maxBufSize) { | ||
| 263 | cERROR(1, "TotalDataSize %d is over maximum buffer %d", | ||
| 264 | total_data_size, maxBufSize); | ||
| 265 | return -EINVAL; | ||
| 266 | } | ||
| 267 | return remaining; | ||
| 268 | } | 256 | } |
| 257 | |||
| 258 | remaining = total_data_size - data_in_this_rsp; | ||
| 259 | |||
| 260 | cFYI(1, "missing %d bytes from transact2, check next response", | ||
| 261 | remaining); | ||
| 262 | if (total_data_size > maxBufSize) { | ||
| 263 | cERROR(1, "TotalDataSize %d is over maximum buffer %d", | ||
| 264 | total_data_size, maxBufSize); | ||
| 265 | return -EINVAL; | ||
| 266 | } | ||
| 267 | return remaining; | ||
| 269 | } | 268 | } |
| 270 | 269 | ||
| 271 | static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) | 270 | static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) |
| @@ -421,7 +420,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) | |||
| 421 | pdu_length = 4; /* enough to get RFC1001 header */ | 420 | pdu_length = 4; /* enough to get RFC1001 header */ |
| 422 | 421 | ||
| 423 | incomplete_rcv: | 422 | incomplete_rcv: |
| 424 | if (echo_retries > 0 && | 423 | if (echo_retries > 0 && server->tcpStatus == CifsGood && |
| 425 | time_after(jiffies, server->lstrp + | 424 | time_after(jiffies, server->lstrp + |
| 426 | (echo_retries * SMB_ECHO_INTERVAL))) { | 425 | (echo_retries * SMB_ECHO_INTERVAL))) { |
| 427 | cERROR(1, "Server %s has not responded in %d seconds. " | 426 | cERROR(1, "Server %s has not responded in %d seconds. " |
| @@ -881,7 +880,8 @@ cifs_parse_mount_options(char *options, const char *devname, | |||
| 881 | /* null user, ie anonymous, authentication */ | 880 | /* null user, ie anonymous, authentication */ |
| 882 | vol->nullauth = 1; | 881 | vol->nullauth = 1; |
| 883 | } | 882 | } |
| 884 | if (strnlen(value, 200) < 200) { | 883 | if (strnlen(value, MAX_USERNAME_SIZE) < |
| 884 | MAX_USERNAME_SIZE) { | ||
| 885 | vol->username = value; | 885 | vol->username = value; |
| 886 | } else { | 886 | } else { |
| 887 | printk(KERN_WARNING "CIFS: username too long\n"); | 887 | printk(KERN_WARNING "CIFS: username too long\n"); |
| @@ -1472,7 +1472,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs) | |||
| 1472 | static bool | 1472 | static bool |
| 1473 | match_port(struct TCP_Server_Info *server, struct sockaddr *addr) | 1473 | match_port(struct TCP_Server_Info *server, struct sockaddr *addr) |
| 1474 | { | 1474 | { |
| 1475 | unsigned short int port, *sport; | 1475 | __be16 port, *sport; |
| 1476 | 1476 | ||
| 1477 | switch (addr->sa_family) { | 1477 | switch (addr->sa_family) { |
| 1478 | case AF_INET: | 1478 | case AF_INET: |
| @@ -1765,6 +1765,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) | |||
| 1765 | module_put(THIS_MODULE); | 1765 | module_put(THIS_MODULE); |
| 1766 | goto out_err_crypto_release; | 1766 | goto out_err_crypto_release; |
| 1767 | } | 1767 | } |
| 1768 | tcp_ses->tcpStatus = CifsNeedNegotiate; | ||
| 1768 | 1769 | ||
| 1769 | /* thread spawned, put it on the list */ | 1770 | /* thread spawned, put it on the list */ |
| 1770 | spin_lock(&cifs_tcp_ses_lock); | 1771 | spin_lock(&cifs_tcp_ses_lock); |
| @@ -1808,7 +1809,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) | |||
| 1808 | break; | 1809 | break; |
| 1809 | default: | 1810 | default: |
| 1810 | /* anything else takes username/password */ | 1811 | /* anything else takes username/password */ |
| 1811 | if (strncmp(ses->userName, vol->username, | 1812 | if (ses->user_name == NULL) |
| 1813 | continue; | ||
| 1814 | if (strncmp(ses->user_name, vol->username, | ||
| 1812 | MAX_USERNAME_SIZE)) | 1815 | MAX_USERNAME_SIZE)) |
| 1813 | continue; | 1816 | continue; |
| 1814 | if (strlen(vol->username) != 0 && | 1817 | if (strlen(vol->username) != 0 && |
| @@ -1851,6 +1854,8 @@ cifs_put_smb_ses(struct cifsSesInfo *ses) | |||
| 1851 | cifs_put_tcp_session(server); | 1854 | cifs_put_tcp_session(server); |
| 1852 | } | 1855 | } |
| 1853 | 1856 | ||
| 1857 | static bool warned_on_ntlm; /* globals init to false automatically */ | ||
| 1858 | |||
| 1854 | static struct cifsSesInfo * | 1859 | static struct cifsSesInfo * |
| 1855 | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | 1860 | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) |
| 1856 | { | 1861 | { |
| @@ -1906,9 +1911,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
| 1906 | else | 1911 | else |
| 1907 | sprintf(ses->serverName, "%pI4", &addr->sin_addr); | 1912 | sprintf(ses->serverName, "%pI4", &addr->sin_addr); |
| 1908 | 1913 | ||
| 1909 | if (volume_info->username) | 1914 | if (volume_info->username) { |
| 1910 | strncpy(ses->userName, volume_info->username, | 1915 | ses->user_name = kstrdup(volume_info->username, GFP_KERNEL); |
| 1911 | MAX_USERNAME_SIZE); | 1916 | if (!ses->user_name) |
| 1917 | goto get_ses_fail; | ||
| 1918 | } | ||
| 1912 | 1919 | ||
| 1913 | /* volume_info->password freed at unmount */ | 1920 | /* volume_info->password freed at unmount */ |
| 1914 | if (volume_info->password) { | 1921 | if (volume_info->password) { |
| @@ -1923,6 +1930,15 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
| 1923 | } | 1930 | } |
| 1924 | ses->cred_uid = volume_info->cred_uid; | 1931 | ses->cred_uid = volume_info->cred_uid; |
| 1925 | ses->linux_uid = volume_info->linux_uid; | 1932 | ses->linux_uid = volume_info->linux_uid; |
| 1933 | |||
| 1934 | /* ntlmv2 is much stronger than ntlm security, and has been broadly | ||
| 1935 | supported for many years, time to update default security mechanism */ | ||
| 1936 | if ((volume_info->secFlg == 0) && warned_on_ntlm == false) { | ||
| 1937 | warned_on_ntlm = true; | ||
| 1938 | cERROR(1, "default security mechanism requested. The default " | ||
| 1939 | "security mechanism will be upgraded from ntlm to " | ||
| 1940 | "ntlmv2 in kernel release 2.6.41"); | ||
| 1941 | } | ||
| 1926 | ses->overrideSecFlg = volume_info->secFlg; | 1942 | ses->overrideSecFlg = volume_info->secFlg; |
| 1927 | 1943 | ||
| 1928 | mutex_lock(&ses->session_mutex); | 1944 | mutex_lock(&ses->session_mutex); |
| @@ -2276,7 +2292,7 @@ static int | |||
| 2276 | generic_ip_connect(struct TCP_Server_Info *server) | 2292 | generic_ip_connect(struct TCP_Server_Info *server) |
| 2277 | { | 2293 | { |
| 2278 | int rc = 0; | 2294 | int rc = 0; |
| 2279 | unsigned short int sport; | 2295 | __be16 sport; |
| 2280 | int slen, sfamily; | 2296 | int slen, sfamily; |
| 2281 | struct socket *socket = server->ssocket; | 2297 | struct socket *socket = server->ssocket; |
| 2282 | struct sockaddr *saddr; | 2298 | struct sockaddr *saddr; |
| @@ -2361,7 +2377,7 @@ generic_ip_connect(struct TCP_Server_Info *server) | |||
| 2361 | static int | 2377 | static int |
| 2362 | ip_connect(struct TCP_Server_Info *server) | 2378 | ip_connect(struct TCP_Server_Info *server) |
| 2363 | { | 2379 | { |
| 2364 | unsigned short int *sport; | 2380 | __be16 *sport; |
| 2365 | struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; | 2381 | struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; |
| 2366 | struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; | 2382 | struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; |
| 2367 | 2383 | ||
| @@ -2826,7 +2842,7 @@ try_mount_again: | |||
| 2826 | 2842 | ||
| 2827 | remote_path_check: | 2843 | remote_path_check: |
| 2828 | /* check if a whole path (including prepath) is not remote */ | 2844 | /* check if a whole path (including prepath) is not remote */ |
| 2829 | if (!rc && cifs_sb->prepathlen && tcon) { | 2845 | if (!rc && tcon) { |
| 2830 | /* build_path_to_root works only when we have a valid tcon */ | 2846 | /* build_path_to_root works only when we have a valid tcon */ |
| 2831 | full_path = cifs_build_path_to_root(cifs_sb, tcon); | 2847 | full_path = cifs_build_path_to_root(cifs_sb, tcon); |
| 2832 | if (full_path == NULL) { | 2848 | if (full_path == NULL) { |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index c27d236738fc..faf59529e847 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -575,8 +575,10 @@ reopen_error_exit: | |||
| 575 | 575 | ||
| 576 | int cifs_close(struct inode *inode, struct file *file) | 576 | int cifs_close(struct inode *inode, struct file *file) |
| 577 | { | 577 | { |
| 578 | cifsFileInfo_put(file->private_data); | 578 | if (file->private_data != NULL) { |
| 579 | file->private_data = NULL; | 579 | cifsFileInfo_put(file->private_data); |
| 580 | file->private_data = NULL; | ||
| 581 | } | ||
| 580 | 582 | ||
| 581 | /* return code from the ->release op is always ignored */ | 583 | /* return code from the ->release op is always ignored */ |
| 582 | return 0; | 584 | return 0; |
| @@ -970,6 +972,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, | |||
| 970 | total_written += bytes_written) { | 972 | total_written += bytes_written) { |
| 971 | rc = -EAGAIN; | 973 | rc = -EAGAIN; |
| 972 | while (rc == -EAGAIN) { | 974 | while (rc == -EAGAIN) { |
| 975 | struct kvec iov[2]; | ||
| 976 | unsigned int len; | ||
| 977 | |||
| 973 | if (open_file->invalidHandle) { | 978 | if (open_file->invalidHandle) { |
| 974 | /* we could deadlock if we called | 979 | /* we could deadlock if we called |
| 975 | filemap_fdatawait from here so tell | 980 | filemap_fdatawait from here so tell |
| @@ -979,31 +984,14 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, | |||
| 979 | if (rc != 0) | 984 | if (rc != 0) |
| 980 | break; | 985 | break; |
| 981 | } | 986 | } |
| 982 | if (experimEnabled || (pTcon->ses->server && | 987 | |
| 983 | ((pTcon->ses->server->secMode & | 988 | len = min((size_t)cifs_sb->wsize, |
| 984 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 989 | write_size - total_written); |
| 985 | == 0))) { | 990 | /* iov[0] is reserved for smb header */ |
| 986 | struct kvec iov[2]; | 991 | iov[1].iov_base = (char *)write_data + total_written; |
| 987 | unsigned int len; | 992 | iov[1].iov_len = len; |
| 988 | 993 | rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len, | |
| 989 | len = min((size_t)cifs_sb->wsize, | 994 | *poffset, &bytes_written, iov, 1, 0); |
| 990 | write_size - total_written); | ||
| 991 | /* iov[0] is reserved for smb header */ | ||
| 992 | iov[1].iov_base = (char *)write_data + | ||
| 993 | total_written; | ||
| 994 | iov[1].iov_len = len; | ||
| 995 | rc = CIFSSMBWrite2(xid, pTcon, | ||
| 996 | open_file->netfid, len, | ||
| 997 | *poffset, &bytes_written, | ||
| 998 | iov, 1, 0); | ||
| 999 | } else | ||
| 1000 | rc = CIFSSMBWrite(xid, pTcon, | ||
| 1001 | open_file->netfid, | ||
| 1002 | min_t(const int, cifs_sb->wsize, | ||
| 1003 | write_size - total_written), | ||
| 1004 | *poffset, &bytes_written, | ||
| 1005 | write_data + total_written, | ||
| 1006 | NULL, 0); | ||
| 1007 | } | 995 | } |
| 1008 | if (rc || (bytes_written == 0)) { | 996 | if (rc || (bytes_written == 0)) { |
| 1009 | if (total_written) | 997 | if (total_written) |
| @@ -1240,12 +1228,6 @@ static int cifs_writepages(struct address_space *mapping, | |||
| 1240 | } | 1228 | } |
| 1241 | 1229 | ||
| 1242 | tcon = tlink_tcon(open_file->tlink); | 1230 | tcon = tlink_tcon(open_file->tlink); |
| 1243 | if (!experimEnabled && tcon->ses->server->secMode & | ||
| 1244 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { | ||
| 1245 | cifsFileInfo_put(open_file); | ||
| 1246 | kfree(iov); | ||
| 1247 | return generic_writepages(mapping, wbc); | ||
| 1248 | } | ||
| 1249 | cifsFileInfo_put(open_file); | 1231 | cifsFileInfo_put(open_file); |
| 1250 | 1232 | ||
| 1251 | xid = GetXid(); | 1233 | xid = GetXid(); |
| @@ -1980,6 +1962,24 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, | |||
| 1980 | return total_read; | 1962 | return total_read; |
| 1981 | } | 1963 | } |
| 1982 | 1964 | ||
| 1965 | /* | ||
| 1966 | * If the page is mmap'ed into a process' page tables, then we need to make | ||
| 1967 | * sure that it doesn't change while being written back. | ||
| 1968 | */ | ||
| 1969 | static int | ||
| 1970 | cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
| 1971 | { | ||
| 1972 | struct page *page = vmf->page; | ||
| 1973 | |||
| 1974 | lock_page(page); | ||
| 1975 | return VM_FAULT_LOCKED; | ||
| 1976 | } | ||
| 1977 | |||
| 1978 | static struct vm_operations_struct cifs_file_vm_ops = { | ||
| 1979 | .fault = filemap_fault, | ||
| 1980 | .page_mkwrite = cifs_page_mkwrite, | ||
| 1981 | }; | ||
| 1982 | |||
| 1983 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) | 1983 | int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) |
| 1984 | { | 1984 | { |
| 1985 | int rc, xid; | 1985 | int rc, xid; |
| @@ -1991,6 +1991,8 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 1991 | cifs_invalidate_mapping(inode); | 1991 | cifs_invalidate_mapping(inode); |
| 1992 | 1992 | ||
| 1993 | rc = generic_file_mmap(file, vma); | 1993 | rc = generic_file_mmap(file, vma); |
| 1994 | if (rc == 0) | ||
| 1995 | vma->vm_ops = &cifs_file_vm_ops; | ||
| 1994 | FreeXid(xid); | 1996 | FreeXid(xid); |
| 1995 | return rc; | 1997 | return rc; |
| 1996 | } | 1998 | } |
| @@ -2007,6 +2009,8 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 2007 | return rc; | 2009 | return rc; |
| 2008 | } | 2010 | } |
| 2009 | rc = generic_file_mmap(file, vma); | 2011 | rc = generic_file_mmap(file, vma); |
| 2012 | if (rc == 0) | ||
| 2013 | vma->vm_ops = &cifs_file_vm_ops; | ||
| 2010 | FreeXid(xid); | 2014 | FreeXid(xid); |
| 2011 | return rc; | 2015 | return rc; |
| 2012 | } | 2016 | } |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index e8804d373404..ce417a9764a3 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
| @@ -239,7 +239,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon, | |||
| 239 | if (rc != 0) | 239 | if (rc != 0) |
| 240 | return rc; | 240 | return rc; |
| 241 | 241 | ||
| 242 | if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) { | 242 | if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { |
| 243 | CIFSSMBClose(xid, tcon, netfid); | 243 | CIFSSMBClose(xid, tcon, netfid); |
| 244 | /* it's not a symlink */ | 244 | /* it's not a symlink */ |
| 245 | return -EINVAL; | 245 | return -EINVAL; |
| @@ -316,7 +316,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr, | |||
| 316 | if (rc != 0) | 316 | if (rc != 0) |
| 317 | goto out; | 317 | goto out; |
| 318 | 318 | ||
| 319 | if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) { | 319 | if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { |
| 320 | CIFSSMBClose(xid, pTcon, netfid); | 320 | CIFSSMBClose(xid, pTcon, netfid); |
| 321 | /* it's not a symlink */ | 321 | /* it's not a symlink */ |
| 322 | goto out; | 322 | goto out; |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 2a930a752a78..0c684ae4c071 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
| @@ -100,6 +100,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free) | |||
| 100 | memset(buf_to_free->password, 0, strlen(buf_to_free->password)); | 100 | memset(buf_to_free->password, 0, strlen(buf_to_free->password)); |
| 101 | kfree(buf_to_free->password); | 101 | kfree(buf_to_free->password); |
| 102 | } | 102 | } |
| 103 | kfree(buf_to_free->user_name); | ||
| 103 | kfree(buf_to_free->domainName); | 104 | kfree(buf_to_free->domainName); |
| 104 | kfree(buf_to_free); | 105 | kfree(buf_to_free); |
| 105 | } | 106 | } |
| @@ -520,7 +521,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
| 520 | (struct smb_com_transaction_change_notify_rsp *)buf; | 521 | (struct smb_com_transaction_change_notify_rsp *)buf; |
| 521 | struct file_notify_information *pnotify; | 522 | struct file_notify_information *pnotify; |
| 522 | __u32 data_offset = 0; | 523 | __u32 data_offset = 0; |
| 523 | if (pSMBr->ByteCount > sizeof(struct file_notify_information)) { | 524 | if (get_bcc_le(buf) > sizeof(struct file_notify_information)) { |
| 524 | data_offset = le32_to_cpu(pSMBr->DataOffset); | 525 | data_offset = le32_to_cpu(pSMBr->DataOffset); |
| 525 | 526 | ||
| 526 | pnotify = (struct file_notify_information *) | 527 | pnotify = (struct file_notify_information *) |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 16765703131b..f6728eb6f4b9 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
| @@ -219,12 +219,12 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, | |||
| 219 | bcc_ptr++; | 219 | bcc_ptr++; |
| 220 | } */ | 220 | } */ |
| 221 | /* copy user */ | 221 | /* copy user */ |
| 222 | if (ses->userName == NULL) { | 222 | if (ses->user_name == NULL) { |
| 223 | /* null user mount */ | 223 | /* null user mount */ |
| 224 | *bcc_ptr = 0; | 224 | *bcc_ptr = 0; |
| 225 | *(bcc_ptr+1) = 0; | 225 | *(bcc_ptr+1) = 0; |
| 226 | } else { | 226 | } else { |
| 227 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName, | 227 | bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name, |
| 228 | MAX_USERNAME_SIZE, nls_cp); | 228 | MAX_USERNAME_SIZE, nls_cp); |
| 229 | } | 229 | } |
| 230 | bcc_ptr += 2 * bytes_ret; | 230 | bcc_ptr += 2 * bytes_ret; |
| @@ -244,12 +244,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, | |||
| 244 | /* copy user */ | 244 | /* copy user */ |
| 245 | /* BB what about null user mounts - check that we do this BB */ | 245 | /* BB what about null user mounts - check that we do this BB */ |
| 246 | /* copy user */ | 246 | /* copy user */ |
| 247 | if (ses->userName == NULL) { | 247 | if (ses->user_name != NULL) |
| 248 | /* BB what about null user mounts - check that we do this BB */ | 248 | strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE); |
| 249 | } else { | 249 | /* else null user mount */ |
| 250 | strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE); | 250 | |
| 251 | } | 251 | bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE); |
| 252 | bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE); | ||
| 253 | *bcc_ptr = 0; | 252 | *bcc_ptr = 0; |
| 254 | bcc_ptr++; /* account for null termination */ | 253 | bcc_ptr++; /* account for null termination */ |
| 255 | 254 | ||
| @@ -405,8 +404,8 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, | |||
| 405 | /* BB spec says that if AvId field of MsvAvTimestamp is populated then | 404 | /* BB spec says that if AvId field of MsvAvTimestamp is populated then |
| 406 | we must set the MIC field of the AUTHENTICATE_MESSAGE */ | 405 | we must set the MIC field of the AUTHENTICATE_MESSAGE */ |
| 407 | ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); | 406 | ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); |
| 408 | tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset); | 407 | tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset); |
| 409 | tilen = cpu_to_le16(pblob->TargetInfoArray.Length); | 408 | tilen = le16_to_cpu(pblob->TargetInfoArray.Length); |
| 410 | if (tilen) { | 409 | if (tilen) { |
| 411 | ses->auth_key.response = kmalloc(tilen, GFP_KERNEL); | 410 | ses->auth_key.response = kmalloc(tilen, GFP_KERNEL); |
| 412 | if (!ses->auth_key.response) { | 411 | if (!ses->auth_key.response) { |
| @@ -523,14 +522,14 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | |||
| 523 | tmp += len; | 522 | tmp += len; |
| 524 | } | 523 | } |
| 525 | 524 | ||
| 526 | if (ses->userName == NULL) { | 525 | if (ses->user_name == NULL) { |
| 527 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); | 526 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); |
| 528 | sec_blob->UserName.Length = 0; | 527 | sec_blob->UserName.Length = 0; |
| 529 | sec_blob->UserName.MaximumLength = 0; | 528 | sec_blob->UserName.MaximumLength = 0; |
| 530 | tmp += 2; | 529 | tmp += 2; |
| 531 | } else { | 530 | } else { |
| 532 | int len; | 531 | int len; |
| 533 | len = cifs_strtoUCS((__le16 *)tmp, ses->userName, | 532 | len = cifs_strtoUCS((__le16 *)tmp, ses->user_name, |
| 534 | MAX_USERNAME_SIZE, nls_cp); | 533 | MAX_USERNAME_SIZE, nls_cp); |
| 535 | len *= 2; /* unicode is 2 bytes each */ | 534 | len *= 2; /* unicode is 2 bytes each */ |
| 536 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); | 535 | sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer); |
diff --git a/fs/dcache.c b/fs/dcache.c index ad25c4cec7d5..129a35730994 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -2131,7 +2131,7 @@ EXPORT_SYMBOL(d_rehash); | |||
| 2131 | */ | 2131 | */ |
| 2132 | void dentry_update_name_case(struct dentry *dentry, struct qstr *name) | 2132 | void dentry_update_name_case(struct dentry *dentry, struct qstr *name) |
| 2133 | { | 2133 | { |
| 2134 | BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); | 2134 | BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex)); |
| 2135 | BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ | 2135 | BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ |
| 2136 | 2136 | ||
| 2137 | spin_lock(&dentry->d_lock); | 2137 | spin_lock(&dentry->d_lock); |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index b5c2f3c97d71..68b2e43d7c35 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
| @@ -3291,7 +3291,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode) | |||
| 3291 | if (ext3_should_journal_data(inode)) | 3291 | if (ext3_should_journal_data(inode)) |
| 3292 | ret = 3 * (bpp + indirects) + 2; | 3292 | ret = 3 * (bpp + indirects) + 2; |
| 3293 | else | 3293 | else |
| 3294 | ret = 2 * (bpp + indirects) + 2; | 3294 | ret = 2 * (bpp + indirects) + indirects + 2; |
| 3295 | 3295 | ||
| 3296 | #ifdef CONFIG_QUOTA | 3296 | #ifdef CONFIG_QUOTA |
| 3297 | /* We know that structure was already allocated during dquot_initialize so | 3297 | /* We know that structure was already allocated during dquot_initialize so |
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index e25e99bf7ee1..d0f53538a57f 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h | |||
| @@ -86,8 +86,8 @@ | |||
| 86 | 86 | ||
| 87 | #ifdef CONFIG_QUOTA | 87 | #ifdef CONFIG_QUOTA |
| 88 | /* Amount of blocks needed for quota update - we know that the structure was | 88 | /* Amount of blocks needed for quota update - we know that the structure was |
| 89 | * allocated so we need to update only inode+data */ | 89 | * allocated so we need to update only data block */ |
| 90 | #define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0) | 90 | #define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0) |
| 91 | /* Amount of blocks needed for quota insert/delete - we do some block writes | 91 | /* Amount of blocks needed for quota insert/delete - we do some block writes |
| 92 | * but inode, sb and group updates are done only once */ | 92 | * but inode, sb and group updates are done only once */ |
| 93 | #define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\ | 93 | #define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\ |
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 4673bc05274f..e9473cbe80df 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c | |||
| @@ -125,9 +125,11 @@ extern int ext4_flush_completed_IO(struct inode *inode) | |||
| 125 | * the parent directory's parent as well, and so on recursively, if | 125 | * the parent directory's parent as well, and so on recursively, if |
| 126 | * they are also freshly created. | 126 | * they are also freshly created. |
| 127 | */ | 127 | */ |
| 128 | static void ext4_sync_parent(struct inode *inode) | 128 | static int ext4_sync_parent(struct inode *inode) |
| 129 | { | 129 | { |
| 130 | struct writeback_control wbc; | ||
| 130 | struct dentry *dentry = NULL; | 131 | struct dentry *dentry = NULL; |
| 132 | int ret = 0; | ||
| 131 | 133 | ||
| 132 | while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { | 134 | while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { |
| 133 | ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); | 135 | ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); |
| @@ -136,8 +138,17 @@ static void ext4_sync_parent(struct inode *inode) | |||
| 136 | if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode) | 138 | if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode) |
| 137 | break; | 139 | break; |
| 138 | inode = dentry->d_parent->d_inode; | 140 | inode = dentry->d_parent->d_inode; |
| 139 | sync_mapping_buffers(inode->i_mapping); | 141 | ret = sync_mapping_buffers(inode->i_mapping); |
| 142 | if (ret) | ||
| 143 | break; | ||
| 144 | memset(&wbc, 0, sizeof(wbc)); | ||
| 145 | wbc.sync_mode = WB_SYNC_ALL; | ||
| 146 | wbc.nr_to_write = 0; /* only write out the inode */ | ||
| 147 | ret = sync_inode(inode, &wbc); | ||
| 148 | if (ret) | ||
| 149 | break; | ||
| 140 | } | 150 | } |
| 151 | return ret; | ||
| 141 | } | 152 | } |
| 142 | 153 | ||
| 143 | /* | 154 | /* |
| @@ -176,7 +187,7 @@ int ext4_sync_file(struct file *file, int datasync) | |||
| 176 | if (!journal) { | 187 | if (!journal) { |
| 177 | ret = generic_file_fsync(file, datasync); | 188 | ret = generic_file_fsync(file, datasync); |
| 178 | if (!ret && !list_empty(&inode->i_dentry)) | 189 | if (!ret && !list_empty(&inode->i_dentry)) |
| 179 | ext4_sync_parent(inode); | 190 | ret = ext4_sync_parent(inode); |
| 180 | goto out; | 191 | goto out; |
| 181 | } | 192 | } |
| 182 | 193 | ||
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index ad8e303c0d29..f2fa5e8a582c 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -2502,6 +2502,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock, | |||
| 2502 | * for partial write. | 2502 | * for partial write. |
| 2503 | */ | 2503 | */ |
| 2504 | set_buffer_new(bh); | 2504 | set_buffer_new(bh); |
| 2505 | set_buffer_mapped(bh); | ||
| 2505 | } | 2506 | } |
| 2506 | return 0; | 2507 | return 0; |
| 2507 | } | 2508 | } |
| @@ -4429,8 +4430,8 @@ void ext4_truncate(struct inode *inode) | |||
| 4429 | Indirect chain[4]; | 4430 | Indirect chain[4]; |
| 4430 | Indirect *partial; | 4431 | Indirect *partial; |
| 4431 | __le32 nr = 0; | 4432 | __le32 nr = 0; |
| 4432 | int n; | 4433 | int n = 0; |
| 4433 | ext4_lblk_t last_block; | 4434 | ext4_lblk_t last_block, max_block; |
| 4434 | unsigned blocksize = inode->i_sb->s_blocksize; | 4435 | unsigned blocksize = inode->i_sb->s_blocksize; |
| 4435 | 4436 | ||
| 4436 | trace_ext4_truncate_enter(inode); | 4437 | trace_ext4_truncate_enter(inode); |
| @@ -4455,14 +4456,18 @@ void ext4_truncate(struct inode *inode) | |||
| 4455 | 4456 | ||
| 4456 | last_block = (inode->i_size + blocksize-1) | 4457 | last_block = (inode->i_size + blocksize-1) |
| 4457 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); | 4458 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); |
| 4459 | max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) | ||
| 4460 | >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); | ||
| 4458 | 4461 | ||
| 4459 | if (inode->i_size & (blocksize - 1)) | 4462 | if (inode->i_size & (blocksize - 1)) |
| 4460 | if (ext4_block_truncate_page(handle, mapping, inode->i_size)) | 4463 | if (ext4_block_truncate_page(handle, mapping, inode->i_size)) |
| 4461 | goto out_stop; | 4464 | goto out_stop; |
| 4462 | 4465 | ||
| 4463 | n = ext4_block_to_path(inode, last_block, offsets, NULL); | 4466 | if (last_block != max_block) { |
| 4464 | if (n == 0) | 4467 | n = ext4_block_to_path(inode, last_block, offsets, NULL); |
| 4465 | goto out_stop; /* error */ | 4468 | if (n == 0) |
| 4469 | goto out_stop; /* error */ | ||
| 4470 | } | ||
| 4466 | 4471 | ||
| 4467 | /* | 4472 | /* |
| 4468 | * OK. This truncate is going to happen. We add the inode to the | 4473 | * OK. This truncate is going to happen. We add the inode to the |
| @@ -4493,7 +4498,13 @@ void ext4_truncate(struct inode *inode) | |||
| 4493 | */ | 4498 | */ |
| 4494 | ei->i_disksize = inode->i_size; | 4499 | ei->i_disksize = inode->i_size; |
| 4495 | 4500 | ||
| 4496 | if (n == 1) { /* direct blocks */ | 4501 | if (last_block == max_block) { |
| 4502 | /* | ||
| 4503 | * It is unnecessary to free any data blocks if last_block is | ||
| 4504 | * equal to the indirect block limit. | ||
| 4505 | */ | ||
| 4506 | goto out_unlock; | ||
| 4507 | } else if (n == 1) { /* direct blocks */ | ||
| 4497 | ext4_free_data(handle, inode, NULL, i_data+offsets[0], | 4508 | ext4_free_data(handle, inode, NULL, i_data+offsets[0], |
| 4498 | i_data + EXT4_NDIR_BLOCKS); | 4509 | i_data + EXT4_NDIR_BLOCKS); |
| 4499 | goto do_indirects; | 4510 | goto do_indirects; |
| @@ -4553,6 +4564,7 @@ do_indirects: | |||
| 4553 | ; | 4564 | ; |
| 4554 | } | 4565 | } |
| 4555 | 4566 | ||
| 4567 | out_unlock: | ||
| 4556 | up_write(&ei->i_data_sem); | 4568 | up_write(&ei->i_data_sem); |
| 4557 | inode->i_mtime = inode->i_ctime = ext4_current_time(inode); | 4569 | inode->i_mtime = inode->i_ctime = ext4_current_time(inode); |
| 4558 | ext4_mark_inode_dirty(handle, inode); | 4570 | ext4_mark_inode_dirty(handle, inode); |
| @@ -5398,13 +5410,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, | |||
| 5398 | /* if nrblocks are contiguous */ | 5410 | /* if nrblocks are contiguous */ |
| 5399 | if (chunk) { | 5411 | if (chunk) { |
| 5400 | /* | 5412 | /* |
| 5401 | * With N contiguous data blocks, it need at most | 5413 | * With N contiguous data blocks, we need at most |
| 5402 | * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks | 5414 | * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, |
| 5403 | * 2 dindirect blocks | 5415 | * 2 dindirect blocks, and 1 tindirect block |
| 5404 | * 1 tindirect block | ||
| 5405 | */ | 5416 | */ |
| 5406 | indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); | 5417 | return DIV_ROUND_UP(nrblocks, |
| 5407 | return indirects + 3; | 5418 | EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; |
| 5408 | } | 5419 | } |
| 5409 | /* | 5420 | /* |
| 5410 | * if nrblocks are not contiguous, worse case, each block touch | 5421 | * if nrblocks are not contiguous, worse case, each block touch |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 056474b7b8e0..8553dfb310af 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -242,27 +242,44 @@ static void ext4_put_nojournal(handle_t *handle) | |||
| 242 | * journal_end calls result in the superblock being marked dirty, so | 242 | * journal_end calls result in the superblock being marked dirty, so |
| 243 | * that sync() will call the filesystem's write_super callback if | 243 | * that sync() will call the filesystem's write_super callback if |
| 244 | * appropriate. | 244 | * appropriate. |
| 245 | * | ||
| 246 | * To avoid j_barrier hold in userspace when a user calls freeze(), | ||
| 247 | * ext4 prevents a new handle from being started by s_frozen, which | ||
| 248 | * is in an upper layer. | ||
| 245 | */ | 249 | */ |
| 246 | handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) | 250 | handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) |
| 247 | { | 251 | { |
| 248 | journal_t *journal; | 252 | journal_t *journal; |
| 253 | handle_t *handle; | ||
| 249 | 254 | ||
| 250 | if (sb->s_flags & MS_RDONLY) | 255 | if (sb->s_flags & MS_RDONLY) |
| 251 | return ERR_PTR(-EROFS); | 256 | return ERR_PTR(-EROFS); |
| 252 | 257 | ||
| 253 | vfs_check_frozen(sb, SB_FREEZE_TRANS); | ||
| 254 | /* Special case here: if the journal has aborted behind our | ||
| 255 | * backs (eg. EIO in the commit thread), then we still need to | ||
| 256 | * take the FS itself readonly cleanly. */ | ||
| 257 | journal = EXT4_SB(sb)->s_journal; | 258 | journal = EXT4_SB(sb)->s_journal; |
| 258 | if (journal) { | 259 | handle = ext4_journal_current_handle(); |
| 259 | if (is_journal_aborted(journal)) { | 260 | |
| 260 | ext4_abort(sb, "Detected aborted journal"); | 261 | /* |
| 261 | return ERR_PTR(-EROFS); | 262 | * If a handle has been started, it should be allowed to |
| 262 | } | 263 | * finish, otherwise deadlock could happen between freeze |
| 263 | return jbd2_journal_start(journal, nblocks); | 264 | * and others(e.g. truncate) due to the restart of the |
| 265 | * journal handle if the filesystem is forzen and active | ||
| 266 | * handles are not stopped. | ||
| 267 | */ | ||
| 268 | if (!handle) | ||
| 269 | vfs_check_frozen(sb, SB_FREEZE_TRANS); | ||
| 270 | |||
| 271 | if (!journal) | ||
| 272 | return ext4_get_nojournal(); | ||
| 273 | /* | ||
| 274 | * Special case here: if the journal has aborted behind our | ||
| 275 | * backs (eg. EIO in the commit thread), then we still need to | ||
| 276 | * take the FS itself readonly cleanly. | ||
| 277 | */ | ||
| 278 | if (is_journal_aborted(journal)) { | ||
| 279 | ext4_abort(sb, "Detected aborted journal"); | ||
| 280 | return ERR_PTR(-EROFS); | ||
| 264 | } | 281 | } |
| 265 | return ext4_get_nojournal(); | 282 | return jbd2_journal_start(journal, nblocks); |
| 266 | } | 283 | } |
| 267 | 284 | ||
| 268 | /* | 285 | /* |
| @@ -2975,6 +2992,12 @@ static int ext4_register_li_request(struct super_block *sb, | |||
| 2975 | mutex_unlock(&ext4_li_info->li_list_mtx); | 2992 | mutex_unlock(&ext4_li_info->li_list_mtx); |
| 2976 | 2993 | ||
| 2977 | sbi->s_li_request = elr; | 2994 | sbi->s_li_request = elr; |
| 2995 | /* | ||
| 2996 | * set elr to NULL here since it has been inserted to | ||
| 2997 | * the request_list and the removal and free of it is | ||
| 2998 | * handled by ext4_clear_request_list from now on. | ||
| 2999 | */ | ||
| 3000 | elr = NULL; | ||
| 2978 | 3001 | ||
| 2979 | if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { | 3002 | if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { |
| 2980 | ret = ext4_run_lazyinit_thread(); | 3003 | ret = ext4_run_lazyinit_thread(); |
| @@ -3385,6 +3408,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
| 3385 | get_random_bytes(&sbi->s_next_generation, sizeof(u32)); | 3408 | get_random_bytes(&sbi->s_next_generation, sizeof(u32)); |
| 3386 | spin_lock_init(&sbi->s_next_gen_lock); | 3409 | spin_lock_init(&sbi->s_next_gen_lock); |
| 3387 | 3410 | ||
| 3411 | init_timer(&sbi->s_err_report); | ||
| 3412 | sbi->s_err_report.function = print_daily_error_info; | ||
| 3413 | sbi->s_err_report.data = (unsigned long) sb; | ||
| 3414 | |||
| 3388 | err = percpu_counter_init(&sbi->s_freeblocks_counter, | 3415 | err = percpu_counter_init(&sbi->s_freeblocks_counter, |
| 3389 | ext4_count_free_blocks(sb)); | 3416 | ext4_count_free_blocks(sb)); |
| 3390 | if (!err) { | 3417 | if (!err) { |
| @@ -3646,9 +3673,6 @@ no_journal: | |||
| 3646 | "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, | 3673 | "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, |
| 3647 | *sbi->s_es->s_mount_opts ? "; " : "", orig_data); | 3674 | *sbi->s_es->s_mount_opts ? "; " : "", orig_data); |
| 3648 | 3675 | ||
| 3649 | init_timer(&sbi->s_err_report); | ||
| 3650 | sbi->s_err_report.function = print_daily_error_info; | ||
| 3651 | sbi->s_err_report.data = (unsigned long) sb; | ||
| 3652 | if (es->s_error_count) | 3676 | if (es->s_error_count) |
| 3653 | mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ | 3677 | mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ |
| 3654 | 3678 | ||
| @@ -3672,6 +3696,7 @@ failed_mount_wq: | |||
| 3672 | sbi->s_journal = NULL; | 3696 | sbi->s_journal = NULL; |
| 3673 | } | 3697 | } |
| 3674 | failed_mount3: | 3698 | failed_mount3: |
| 3699 | del_timer(&sbi->s_err_report); | ||
| 3675 | if (sbi->s_flex_groups) { | 3700 | if (sbi->s_flex_groups) { |
| 3676 | if (is_vmalloc_addr(sbi->s_flex_groups)) | 3701 | if (is_vmalloc_addr(sbi->s_flex_groups)) |
| 3677 | vfree(sbi->s_flex_groups); | 3702 | vfree(sbi->s_flex_groups); |
| @@ -4138,6 +4163,11 @@ static int ext4_sync_fs(struct super_block *sb, int wait) | |||
| 4138 | /* | 4163 | /* |
| 4139 | * LVM calls this function before a (read-only) snapshot is created. This | 4164 | * LVM calls this function before a (read-only) snapshot is created. This |
| 4140 | * gives us a chance to flush the journal completely and mark the fs clean. | 4165 | * gives us a chance to flush the journal completely and mark the fs clean. |
| 4166 | * | ||
| 4167 | * Note that only this function cannot bring a filesystem to be in a clean | ||
| 4168 | * state independently, because ext4 prevents a new handle from being started | ||
| 4169 | * by @sb->s_frozen, which stays in an upper layer. It thus needs help from | ||
| 4170 | * the upper layer. | ||
| 4141 | */ | 4171 | */ |
| 4142 | static int ext4_freeze(struct super_block *sb) | 4172 | static int ext4_freeze(struct super_block *sb) |
| 4143 | { | 4173 | { |
| @@ -4614,11 +4644,24 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, | |||
| 4614 | 4644 | ||
| 4615 | static int ext4_quota_off(struct super_block *sb, int type) | 4645 | static int ext4_quota_off(struct super_block *sb, int type) |
| 4616 | { | 4646 | { |
| 4647 | struct inode *inode = sb_dqopt(sb)->files[type]; | ||
| 4648 | handle_t *handle; | ||
| 4649 | |||
| 4617 | /* Force all delayed allocation blocks to be allocated. | 4650 | /* Force all delayed allocation blocks to be allocated. |
| 4618 | * Caller already holds s_umount sem */ | 4651 | * Caller already holds s_umount sem */ |
| 4619 | if (test_opt(sb, DELALLOC)) | 4652 | if (test_opt(sb, DELALLOC)) |
| 4620 | sync_filesystem(sb); | 4653 | sync_filesystem(sb); |
| 4621 | 4654 | ||
| 4655 | /* Update modification times of quota files when userspace can | ||
| 4656 | * start looking at them */ | ||
| 4657 | handle = ext4_journal_start(inode, 1); | ||
| 4658 | if (IS_ERR(handle)) | ||
| 4659 | goto out; | ||
| 4660 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
| 4661 | ext4_mark_inode_dirty(handle, inode); | ||
| 4662 | ext4_journal_stop(handle); | ||
| 4663 | |||
| 4664 | out: | ||
| 4622 | return dquot_quota_off(sb, type); | 4665 | return dquot_quota_off(sb, type); |
| 4623 | } | 4666 | } |
| 4624 | 4667 | ||
| @@ -4714,9 +4757,8 @@ out: | |||
| 4714 | if (inode->i_size < off + len) { | 4757 | if (inode->i_size < off + len) { |
| 4715 | i_size_write(inode, off + len); | 4758 | i_size_write(inode, off + len); |
| 4716 | EXT4_I(inode)->i_disksize = inode->i_size; | 4759 | EXT4_I(inode)->i_disksize = inode->i_size; |
| 4760 | ext4_mark_inode_dirty(handle, inode); | ||
| 4717 | } | 4761 | } |
| 4718 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
| 4719 | ext4_mark_inode_dirty(handle, inode); | ||
| 4720 | mutex_unlock(&inode->i_mutex); | 4762 | mutex_unlock(&inode->i_mutex); |
| 4721 | return len; | 4763 | return len; |
| 4722 | } | 4764 | } |
diff --git a/fs/fhandle.c b/fs/fhandle.c index bf93ad2bee07..6b088641f5bf 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/exportfs.h> | 7 | #include <linux/exportfs.h> |
| 8 | #include <linux/fs_struct.h> | 8 | #include <linux/fs_struct.h> |
| 9 | #include <linux/fsnotify.h> | 9 | #include <linux/fsnotify.h> |
| 10 | #include <linux/personality.h> | ||
| 10 | #include <asm/uaccess.h> | 11 | #include <asm/uaccess.h> |
| 11 | #include "internal.h" | 12 | #include "internal.h" |
| 12 | 13 | ||
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 20af62f4304b..6e28000a4b21 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
| @@ -105,6 +105,8 @@ static int journal_submit_commit_record(journal_t *journal, | |||
| 105 | int ret; | 105 | int ret; |
| 106 | struct timespec now = current_kernel_time(); | 106 | struct timespec now = current_kernel_time(); |
| 107 | 107 | ||
| 108 | *cbh = NULL; | ||
| 109 | |||
| 108 | if (is_journal_aborted(journal)) | 110 | if (is_journal_aborted(journal)) |
| 109 | return 0; | 111 | return 0; |
| 110 | 112 | ||
| @@ -806,7 +808,7 @@ wait_for_iobuf: | |||
| 806 | if (err) | 808 | if (err) |
| 807 | __jbd2_journal_abort_hard(journal); | 809 | __jbd2_journal_abort_hard(journal); |
| 808 | } | 810 | } |
| 809 | if (!err && !is_journal_aborted(journal)) | 811 | if (cbh) |
| 810 | err = journal_wait_on_commit_record(journal, cbh); | 812 | err = journal_wait_on_commit_record(journal, cbh); |
| 811 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, | 813 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, |
| 812 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) && | 814 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) && |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index aba8ebaec25c..e0ec3db1c395 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
| @@ -2413,10 +2413,12 @@ const char *jbd2_dev_to_name(dev_t device) | |||
| 2413 | new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL); | 2413 | new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL); |
| 2414 | if (!new_dev) | 2414 | if (!new_dev) |
| 2415 | return "NODEV-ALLOCFAILURE"; /* Something non-NULL */ | 2415 | return "NODEV-ALLOCFAILURE"; /* Something non-NULL */ |
| 2416 | bd = bdget(device); | ||
| 2416 | spin_lock(&devname_cache_lock); | 2417 | spin_lock(&devname_cache_lock); |
| 2417 | if (devcache[i]) { | 2418 | if (devcache[i]) { |
| 2418 | if (devcache[i]->device == device) { | 2419 | if (devcache[i]->device == device) { |
| 2419 | kfree(new_dev); | 2420 | kfree(new_dev); |
| 2421 | bdput(bd); | ||
| 2420 | ret = devcache[i]->devname; | 2422 | ret = devcache[i]->devname; |
| 2421 | spin_unlock(&devname_cache_lock); | 2423 | spin_unlock(&devname_cache_lock); |
| 2422 | return ret; | 2424 | return ret; |
| @@ -2425,7 +2427,6 @@ const char *jbd2_dev_to_name(dev_t device) | |||
| 2425 | } | 2427 | } |
| 2426 | devcache[i] = new_dev; | 2428 | devcache[i] = new_dev; |
| 2427 | devcache[i]->device = device; | 2429 | devcache[i]->device = device; |
| 2428 | bd = bdget(device); | ||
| 2429 | if (bd) { | 2430 | if (bd) { |
| 2430 | bdevname(bd, devcache[i]->devname); | 2431 | bdevname(bd, devcache[i]->devname); |
| 2431 | bdput(bd); | 2432 | bdput(bd); |
diff --git a/fs/namei.c b/fs/namei.c index e6cd6113872c..54fc993e3027 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -697,6 +697,7 @@ static __always_inline void set_root_rcu(struct nameidata *nd) | |||
| 697 | do { | 697 | do { |
| 698 | seq = read_seqcount_begin(&fs->seq); | 698 | seq = read_seqcount_begin(&fs->seq); |
| 699 | nd->root = fs->root; | 699 | nd->root = fs->root; |
| 700 | nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); | ||
| 700 | } while (read_seqcount_retry(&fs->seq, seq)); | 701 | } while (read_seqcount_retry(&fs->seq, seq)); |
| 701 | } | 702 | } |
| 702 | } | 703 | } |
diff --git a/fs/namespace.c b/fs/namespace.c index 7dba2ed03429..d99bcf59e4c2 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -1030,18 +1030,6 @@ const struct seq_operations mounts_op = { | |||
| 1030 | .show = show_vfsmnt | 1030 | .show = show_vfsmnt |
| 1031 | }; | 1031 | }; |
| 1032 | 1032 | ||
| 1033 | static int uuid_is_nil(u8 *uuid) | ||
| 1034 | { | ||
| 1035 | int i; | ||
| 1036 | u8 *cp = (u8 *)uuid; | ||
| 1037 | |||
| 1038 | for (i = 0; i < 16; i++) { | ||
| 1039 | if (*cp++) | ||
| 1040 | return 0; | ||
| 1041 | } | ||
| 1042 | return 1; | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | static int show_mountinfo(struct seq_file *m, void *v) | 1033 | static int show_mountinfo(struct seq_file *m, void *v) |
| 1046 | { | 1034 | { |
| 1047 | struct proc_mounts *p = m->private; | 1035 | struct proc_mounts *p = m->private; |
| @@ -1085,10 +1073,6 @@ static int show_mountinfo(struct seq_file *m, void *v) | |||
| 1085 | if (IS_MNT_UNBINDABLE(mnt)) | 1073 | if (IS_MNT_UNBINDABLE(mnt)) |
| 1086 | seq_puts(m, " unbindable"); | 1074 | seq_puts(m, " unbindable"); |
| 1087 | 1075 | ||
| 1088 | if (!uuid_is_nil(mnt->mnt_sb->s_uuid)) | ||
| 1089 | /* print the uuid */ | ||
| 1090 | seq_printf(m, " uuid:%pU", mnt->mnt_sb->s_uuid); | ||
| 1091 | |||
| 1092 | /* Filesystem specific data */ | 1076 | /* Filesystem specific data */ |
| 1093 | seq_puts(m, " - "); | 1077 | seq_puts(m, " - "); |
| 1094 | show_type(m, sb); | 1078 | show_type(m, sb); |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 9166fcb66da2..89fc160fd5b0 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
| @@ -148,67 +148,64 @@ static rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors, | |||
| 148 | return pseudoflavor; | 148 | return pseudoflavor; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | static rpc_authflavor_t nfs_negotiate_security(const struct dentry *parent, const struct dentry *dentry) | 151 | static int nfs_negotiate_security(const struct dentry *parent, |
| 152 | const struct dentry *dentry, | ||
| 153 | rpc_authflavor_t *flavor) | ||
| 152 | { | 154 | { |
| 153 | int status = 0; | ||
| 154 | struct page *page; | 155 | struct page *page; |
| 155 | struct nfs4_secinfo_flavors *flavors; | 156 | struct nfs4_secinfo_flavors *flavors; |
| 156 | int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *); | 157 | int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *); |
| 157 | rpc_authflavor_t flavor = RPC_AUTH_UNIX; | 158 | int ret = -EPERM; |
| 158 | 159 | ||
| 159 | secinfo = NFS_PROTO(parent->d_inode)->secinfo; | 160 | secinfo = NFS_PROTO(parent->d_inode)->secinfo; |
| 160 | if (secinfo != NULL) { | 161 | if (secinfo != NULL) { |
| 161 | page = alloc_page(GFP_KERNEL); | 162 | page = alloc_page(GFP_KERNEL); |
| 162 | if (!page) { | 163 | if (!page) { |
| 163 | status = -ENOMEM; | 164 | ret = -ENOMEM; |
| 164 | goto out; | 165 | goto out; |
| 165 | } | 166 | } |
| 166 | flavors = page_address(page); | 167 | flavors = page_address(page); |
| 167 | status = secinfo(parent->d_inode, &dentry->d_name, flavors); | 168 | ret = secinfo(parent->d_inode, &dentry->d_name, flavors); |
| 168 | flavor = nfs_find_best_sec(flavors, dentry->d_inode); | 169 | *flavor = nfs_find_best_sec(flavors, dentry->d_inode); |
| 169 | put_page(page); | 170 | put_page(page); |
| 170 | } | 171 | } |
| 171 | 172 | ||
| 172 | return flavor; | ||
| 173 | |||
| 174 | out: | 173 | out: |
| 175 | status = -ENOMEM; | 174 | return ret; |
| 176 | return status; | ||
| 177 | } | 175 | } |
| 178 | 176 | ||
| 179 | static rpc_authflavor_t nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent, | 177 | static int nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent, |
| 180 | struct dentry *dentry, struct path *path, | 178 | struct dentry *dentry, struct path *path, |
| 181 | struct nfs_fh *fh, struct nfs_fattr *fattr) | 179 | struct nfs_fh *fh, struct nfs_fattr *fattr, |
| 180 | rpc_authflavor_t *flavor) | ||
| 182 | { | 181 | { |
| 183 | rpc_authflavor_t flavor; | ||
| 184 | struct rpc_clnt *clone; | 182 | struct rpc_clnt *clone; |
| 185 | struct rpc_auth *auth; | 183 | struct rpc_auth *auth; |
| 186 | int err; | 184 | int err; |
| 187 | 185 | ||
| 188 | flavor = nfs_negotiate_security(parent, path->dentry); | 186 | err = nfs_negotiate_security(parent, path->dentry, flavor); |
| 189 | if (flavor < 0) | 187 | if (err < 0) |
| 190 | goto out; | 188 | goto out; |
| 191 | clone = rpc_clone_client(server->client); | 189 | clone = rpc_clone_client(server->client); |
| 192 | auth = rpcauth_create(flavor, clone); | 190 | auth = rpcauth_create(*flavor, clone); |
| 193 | if (!auth) { | 191 | if (!auth) { |
| 194 | flavor = -EIO; | 192 | err = -EIO; |
| 195 | goto out_shutdown; | 193 | goto out_shutdown; |
| 196 | } | 194 | } |
| 197 | err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode, | 195 | err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode, |
| 198 | &path->dentry->d_name, | 196 | &path->dentry->d_name, |
| 199 | fh, fattr); | 197 | fh, fattr); |
| 200 | if (err < 0) | ||
| 201 | flavor = err; | ||
| 202 | out_shutdown: | 198 | out_shutdown: |
| 203 | rpc_shutdown_client(clone); | 199 | rpc_shutdown_client(clone); |
| 204 | out: | 200 | out: |
| 205 | return flavor; | 201 | return err; |
| 206 | } | 202 | } |
| 207 | #else /* CONFIG_NFS_V4 */ | 203 | #else /* CONFIG_NFS_V4 */ |
| 208 | static inline rpc_authflavor_t nfs_lookup_with_sec(struct nfs_server *server, | 204 | static inline int nfs_lookup_with_sec(struct nfs_server *server, |
| 209 | struct dentry *parent, struct dentry *dentry, | 205 | struct dentry *parent, struct dentry *dentry, |
| 210 | struct path *path, struct nfs_fh *fh, | 206 | struct path *path, struct nfs_fh *fh, |
| 211 | struct nfs_fattr *fattr) | 207 | struct nfs_fattr *fattr, |
| 208 | rpc_authflavor_t *flavor) | ||
| 212 | { | 209 | { |
| 213 | return -EPERM; | 210 | return -EPERM; |
| 214 | } | 211 | } |
| @@ -234,7 +231,7 @@ struct vfsmount *nfs_d_automount(struct path *path) | |||
| 234 | struct nfs_fh *fh = NULL; | 231 | struct nfs_fh *fh = NULL; |
| 235 | struct nfs_fattr *fattr = NULL; | 232 | struct nfs_fattr *fattr = NULL; |
| 236 | int err; | 233 | int err; |
| 237 | rpc_authflavor_t flavor = 1; | 234 | rpc_authflavor_t flavor = RPC_AUTH_UNIX; |
| 238 | 235 | ||
| 239 | dprintk("--> nfs_d_automount()\n"); | 236 | dprintk("--> nfs_d_automount()\n"); |
| 240 | 237 | ||
| @@ -255,13 +252,8 @@ struct vfsmount *nfs_d_automount(struct path *path) | |||
| 255 | err = server->nfs_client->rpc_ops->lookup(server->client, parent->d_inode, | 252 | err = server->nfs_client->rpc_ops->lookup(server->client, parent->d_inode, |
| 256 | &path->dentry->d_name, | 253 | &path->dentry->d_name, |
| 257 | fh, fattr); | 254 | fh, fattr); |
| 258 | if (err == -EPERM) { | 255 | if (err == -EPERM && NFS_PROTO(parent->d_inode)->secinfo != NULL) |
| 259 | flavor = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr); | 256 | err = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr, &flavor); |
| 260 | if (flavor < 0) | ||
| 261 | err = flavor; | ||
| 262 | else | ||
| 263 | err = 0; | ||
| 264 | } | ||
| 265 | dput(parent); | 257 | dput(parent); |
| 266 | if (err != 0) { | 258 | if (err != 0) { |
| 267 | mnt = ERR_PTR(err); | 259 | mnt = ERR_PTR(err); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index dfd1e6d7e6c3..9bf41eab3e46 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -2204,8 +2204,6 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl | |||
| 2204 | goto out; | 2204 | goto out; |
| 2205 | } | 2205 | } |
| 2206 | ret = nfs4_lookup_root(server, fhandle, info); | 2206 | ret = nfs4_lookup_root(server, fhandle, info); |
| 2207 | if (ret < 0) | ||
| 2208 | ret = -EAGAIN; | ||
| 2209 | out: | 2207 | out: |
| 2210 | return ret; | 2208 | return ret; |
| 2211 | } | 2209 | } |
| @@ -2226,7 +2224,7 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, | |||
| 2226 | 2224 | ||
| 2227 | for (i = 0; i < len; i++) { | 2225 | for (i = 0; i < len; i++) { |
| 2228 | status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); | 2226 | status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); |
| 2229 | if (status == 0) | 2227 | if (status != -EPERM) |
| 2230 | break; | 2228 | break; |
| 2231 | } | 2229 | } |
| 2232 | if (status == 0) | 2230 | if (status == 0) |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index af0c6279a4a7..e4cbc11a74ab 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -542,11 +542,15 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u | |||
| 542 | if (!nfs_need_commit(nfsi)) | 542 | if (!nfs_need_commit(nfsi)) |
| 543 | return 0; | 543 | return 0; |
| 544 | 544 | ||
| 545 | spin_lock(&inode->i_lock); | ||
| 545 | ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); | 546 | ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); |
| 546 | if (ret > 0) | 547 | if (ret > 0) |
| 547 | nfsi->ncommit -= ret; | 548 | nfsi->ncommit -= ret; |
| 549 | spin_unlock(&inode->i_lock); | ||
| 550 | |||
| 548 | if (nfs_need_commit(NFS_I(inode))) | 551 | if (nfs_need_commit(NFS_I(inode))) |
| 549 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | 552 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
| 553 | |||
| 550 | return ret; | 554 | return ret; |
| 551 | } | 555 | } |
| 552 | #else | 556 | #else |
| @@ -1483,9 +1487,7 @@ int nfs_commit_inode(struct inode *inode, int how) | |||
| 1483 | res = nfs_commit_set_lock(NFS_I(inode), may_wait); | 1487 | res = nfs_commit_set_lock(NFS_I(inode), may_wait); |
| 1484 | if (res <= 0) | 1488 | if (res <= 0) |
| 1485 | goto out_mark_dirty; | 1489 | goto out_mark_dirty; |
| 1486 | spin_lock(&inode->i_lock); | ||
| 1487 | res = nfs_scan_commit(inode, &head, 0, 0); | 1490 | res = nfs_scan_commit(inode, &head, 0, 0); |
| 1488 | spin_unlock(&inode->i_lock); | ||
| 1489 | if (res) { | 1491 | if (res) { |
| 1490 | int error; | 1492 | int error; |
| 1491 | 1493 | ||
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c index 0c6d81670137..7c831a2731fa 100644 --- a/fs/nfsd/lockd.c +++ b/fs/nfsd/lockd.c | |||
| @@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp) | |||
| 38 | exp_readlock(); | 38 | exp_readlock(); |
| 39 | nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp); | 39 | nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp); |
| 40 | fh_put(&fh); | 40 | fh_put(&fh); |
| 41 | rqstp->rq_client = NULL; | ||
| 42 | exp_readunlock(); | 41 | exp_readunlock(); |
| 43 | /* We return nlm error codes as nlm doesn't know | 42 | /* We return nlm error codes as nlm doesn't know |
| 44 | * about nfsd, but nfsd does know about nlm.. | 43 | * about nfsd, but nfsd does know about nlm.. |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 4b36ec3eb8ea..aa309aa93fe8 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -397,10 +397,13 @@ static void unhash_generic_stateid(struct nfs4_stateid *stp) | |||
| 397 | 397 | ||
| 398 | static void free_generic_stateid(struct nfs4_stateid *stp) | 398 | static void free_generic_stateid(struct nfs4_stateid *stp) |
| 399 | { | 399 | { |
| 400 | int oflag = nfs4_access_bmap_to_omode(stp); | 400 | int oflag; |
| 401 | 401 | ||
| 402 | nfs4_file_put_access(stp->st_file, oflag); | 402 | if (stp->st_access_bmap) { |
| 403 | put_nfs4_file(stp->st_file); | 403 | oflag = nfs4_access_bmap_to_omode(stp); |
| 404 | nfs4_file_put_access(stp->st_file, oflag); | ||
| 405 | put_nfs4_file(stp->st_file); | ||
| 406 | } | ||
| 404 | kmem_cache_free(stateid_slab, stp); | 407 | kmem_cache_free(stateid_slab, stp); |
| 405 | } | 408 | } |
| 406 | 409 | ||
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index b10e3540d5b7..ce4f62440425 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c | |||
| @@ -1299,6 +1299,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) | |||
| 1299 | 1299 | ||
| 1300 | BUG_ON (!data || !frags); | 1300 | BUG_ON (!data || !frags); |
| 1301 | 1301 | ||
| 1302 | if (size < 2 * VBLK_SIZE_HEAD) { | ||
| 1303 | ldm_error("Value of size is to small."); | ||
| 1304 | return false; | ||
| 1305 | } | ||
| 1306 | |||
| 1302 | group = get_unaligned_be32(data + 0x08); | 1307 | group = get_unaligned_be32(data + 0x08); |
| 1303 | rec = get_unaligned_be16(data + 0x0C); | 1308 | rec = get_unaligned_be16(data + 0x0C); |
| 1304 | num = get_unaligned_be16(data + 0x0E); | 1309 | num = get_unaligned_be16(data + 0x0E); |
| @@ -1306,6 +1311,10 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags) | |||
| 1306 | ldm_error ("A VBLK claims to have %d parts.", num); | 1311 | ldm_error ("A VBLK claims to have %d parts.", num); |
| 1307 | return false; | 1312 | return false; |
| 1308 | } | 1313 | } |
| 1314 | if (rec >= num) { | ||
| 1315 | ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num); | ||
| 1316 | return false; | ||
| 1317 | } | ||
| 1309 | 1318 | ||
| 1310 | list_for_each (item, frags) { | 1319 | list_for_each (item, frags) { |
| 1311 | f = list_entry (item, struct frag, list); | 1320 | f = list_entry (item, struct frag, list); |
| @@ -1334,10 +1343,9 @@ found: | |||
| 1334 | 1343 | ||
| 1335 | f->map |= (1 << rec); | 1344 | f->map |= (1 << rec); |
| 1336 | 1345 | ||
| 1337 | if (num > 0) { | 1346 | data += VBLK_SIZE_HEAD; |
| 1338 | data += VBLK_SIZE_HEAD; | 1347 | size -= VBLK_SIZE_HEAD; |
| 1339 | size -= VBLK_SIZE_HEAD; | 1348 | |
| 1340 | } | ||
| 1341 | memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size); | 1349 | memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size); |
| 1342 | 1350 | ||
| 1343 | return true; | 1351 | return true; |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index a925bf205497..d3c032f5fa0a 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
| @@ -442,7 +442,7 @@ EXPORT_SYMBOL(dquot_acquire); | |||
| 442 | */ | 442 | */ |
| 443 | int dquot_commit(struct dquot *dquot) | 443 | int dquot_commit(struct dquot *dquot) |
| 444 | { | 444 | { |
| 445 | int ret = 0, ret2 = 0; | 445 | int ret = 0; |
| 446 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); | 446 | struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); |
| 447 | 447 | ||
| 448 | mutex_lock(&dqopt->dqio_mutex); | 448 | mutex_lock(&dqopt->dqio_mutex); |
| @@ -454,15 +454,10 @@ int dquot_commit(struct dquot *dquot) | |||
| 454 | spin_unlock(&dq_list_lock); | 454 | spin_unlock(&dq_list_lock); |
| 455 | /* Inactive dquot can be only if there was error during read/init | 455 | /* Inactive dquot can be only if there was error during read/init |
| 456 | * => we have better not writing it */ | 456 | * => we have better not writing it */ |
| 457 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { | 457 | if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) |
| 458 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); | 458 | ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); |
| 459 | if (info_dirty(&dqopt->info[dquot->dq_type])) { | 459 | else |
| 460 | ret2 = dqopt->ops[dquot->dq_type]->write_file_info( | 460 | ret = -EIO; |
| 461 | dquot->dq_sb, dquot->dq_type); | ||
| 462 | } | ||
| 463 | if (ret >= 0) | ||
| 464 | ret = ret2; | ||
| 465 | } | ||
| 466 | out_sem: | 461 | out_sem: |
| 467 | mutex_unlock(&dqopt->dqio_mutex); | 462 | mutex_unlock(&dqopt->dqio_mutex); |
| 468 | return ret; | 463 | return ret; |
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 9eead2c796b7..fbb0b478a346 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
| @@ -112,6 +112,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | |||
| 112 | SetPageDirty(page); | 112 | SetPageDirty(page); |
| 113 | 113 | ||
| 114 | unlock_page(page); | 114 | unlock_page(page); |
| 115 | put_page(page); | ||
| 115 | } | 116 | } |
| 116 | 117 | ||
| 117 | return 0; | 118 | return 0; |
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 919f0de29d8f..e6493cac193d 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h | |||
| @@ -23,6 +23,12 @@ | |||
| 23 | #ifndef __UBIFS_DEBUG_H__ | 23 | #ifndef __UBIFS_DEBUG_H__ |
| 24 | #define __UBIFS_DEBUG_H__ | 24 | #define __UBIFS_DEBUG_H__ |
| 25 | 25 | ||
| 26 | /* Checking helper functions */ | ||
| 27 | typedef int (*dbg_leaf_callback)(struct ubifs_info *c, | ||
| 28 | struct ubifs_zbranch *zbr, void *priv); | ||
| 29 | typedef int (*dbg_znode_callback)(struct ubifs_info *c, | ||
| 30 | struct ubifs_znode *znode, void *priv); | ||
| 31 | |||
| 26 | #ifdef CONFIG_UBIFS_FS_DEBUG | 32 | #ifdef CONFIG_UBIFS_FS_DEBUG |
| 27 | 33 | ||
| 28 | /** | 34 | /** |
| @@ -270,11 +276,6 @@ void dbg_dump_tnc(struct ubifs_info *c); | |||
| 270 | void dbg_dump_index(struct ubifs_info *c); | 276 | void dbg_dump_index(struct ubifs_info *c); |
| 271 | void dbg_dump_lpt_lebs(const struct ubifs_info *c); | 277 | void dbg_dump_lpt_lebs(const struct ubifs_info *c); |
| 272 | 278 | ||
| 273 | /* Checking helper functions */ | ||
| 274 | typedef int (*dbg_leaf_callback)(struct ubifs_info *c, | ||
| 275 | struct ubifs_zbranch *zbr, void *priv); | ||
| 276 | typedef int (*dbg_znode_callback)(struct ubifs_info *c, | ||
| 277 | struct ubifs_znode *znode, void *priv); | ||
| 278 | int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, | 279 | int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, |
| 279 | dbg_znode_callback znode_cb, void *priv); | 280 | dbg_znode_callback znode_cb, void *priv); |
| 280 | 281 | ||
| @@ -295,7 +296,6 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size); | |||
| 295 | int dbg_check_filesystem(struct ubifs_info *c); | 296 | int dbg_check_filesystem(struct ubifs_info *c); |
| 296 | void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, | 297 | void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, |
| 297 | int add_pos); | 298 | int add_pos); |
| 298 | int dbg_check_lprops(struct ubifs_info *c); | ||
| 299 | int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, | 299 | int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, |
| 300 | int row, int col); | 300 | int row, int col); |
| 301 | int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, | 301 | int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, |
| @@ -401,58 +401,94 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); | |||
| 401 | #define DBGKEY(key) ((char *)(key)) | 401 | #define DBGKEY(key) ((char *)(key)) |
| 402 | #define DBGKEY1(key) ((char *)(key)) | 402 | #define DBGKEY1(key) ((char *)(key)) |
| 403 | 403 | ||
| 404 | #define ubifs_debugging_init(c) 0 | 404 | static inline int ubifs_debugging_init(struct ubifs_info *c) { return 0; } |
| 405 | #define ubifs_debugging_exit(c) ({}) | 405 | static inline void ubifs_debugging_exit(struct ubifs_info *c) { return; } |
| 406 | 406 | static inline const char *dbg_ntype(int type) { return ""; } | |
| 407 | #define dbg_ntype(type) "" | 407 | static inline const char *dbg_cstate(int cmt_state) { return ""; } |
| 408 | #define dbg_cstate(cmt_state) "" | 408 | static inline const char *dbg_jhead(int jhead) { return ""; } |
| 409 | #define dbg_jhead(jhead) "" | 409 | static inline const char * |
| 410 | #define dbg_get_key_dump(c, key) ({}) | 410 | dbg_get_key_dump(const struct ubifs_info *c, |
| 411 | #define dbg_dump_inode(c, inode) ({}) | 411 | const union ubifs_key *key) { return ""; } |
| 412 | #define dbg_dump_node(c, node) ({}) | 412 | static inline void dbg_dump_inode(const struct ubifs_info *c, |
| 413 | #define dbg_dump_lpt_node(c, node, lnum, offs) ({}) | 413 | const struct inode *inode) { return; } |
| 414 | #define dbg_dump_budget_req(req) ({}) | 414 | static inline void dbg_dump_node(const struct ubifs_info *c, |
| 415 | #define dbg_dump_lstats(lst) ({}) | 415 | const void *node) { return; } |
| 416 | #define dbg_dump_budg(c) ({}) | 416 | static inline void dbg_dump_lpt_node(const struct ubifs_info *c, |
| 417 | #define dbg_dump_lprop(c, lp) ({}) | 417 | void *node, int lnum, |
| 418 | #define dbg_dump_lprops(c) ({}) | 418 | int offs) { return; } |
| 419 | #define dbg_dump_lpt_info(c) ({}) | 419 | static inline void |
| 420 | #define dbg_dump_leb(c, lnum) ({}) | 420 | dbg_dump_budget_req(const struct ubifs_budget_req *req) { return; } |
| 421 | #define dbg_dump_znode(c, znode) ({}) | 421 | static inline void |
| 422 | #define dbg_dump_heap(c, heap, cat) ({}) | 422 | dbg_dump_lstats(const struct ubifs_lp_stats *lst) { return; } |
| 423 | #define dbg_dump_pnode(c, pnode, parent, iip) ({}) | 423 | static inline void dbg_dump_budg(struct ubifs_info *c) { return; } |
| 424 | #define dbg_dump_tnc(c) ({}) | 424 | static inline void dbg_dump_lprop(const struct ubifs_info *c, |
| 425 | #define dbg_dump_index(c) ({}) | 425 | const struct ubifs_lprops *lp) { return; } |
| 426 | #define dbg_dump_lpt_lebs(c) ({}) | 426 | static inline void dbg_dump_lprops(struct ubifs_info *c) { return; } |
| 427 | 427 | static inline void dbg_dump_lpt_info(struct ubifs_info *c) { return; } | |
| 428 | #define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0 | 428 | static inline void dbg_dump_leb(const struct ubifs_info *c, |
| 429 | #define dbg_old_index_check_init(c, zroot) 0 | 429 | int lnum) { return; } |
| 430 | #define dbg_save_space_info(c) ({}) | 430 | static inline void |
| 431 | #define dbg_check_space_info(c) 0 | 431 | dbg_dump_znode(const struct ubifs_info *c, |
| 432 | #define dbg_check_old_index(c, zroot) 0 | 432 | const struct ubifs_znode *znode) { return; } |
| 433 | #define dbg_check_cats(c) 0 | 433 | static inline void dbg_dump_heap(struct ubifs_info *c, |
| 434 | #define dbg_check_ltab(c) 0 | 434 | struct ubifs_lpt_heap *heap, |
| 435 | #define dbg_chk_lpt_free_spc(c) 0 | 435 | int cat) { return; } |
| 436 | #define dbg_chk_lpt_sz(c, action, len) 0 | 436 | static inline void dbg_dump_pnode(struct ubifs_info *c, |
| 437 | #define dbg_check_synced_i_size(inode) 0 | 437 | struct ubifs_pnode *pnode, |
| 438 | #define dbg_check_dir_size(c, dir) 0 | 438 | struct ubifs_nnode *parent, |
| 439 | #define dbg_check_tnc(c, x) 0 | 439 | int iip) { return; } |
| 440 | #define dbg_check_idx_size(c, idx_size) 0 | 440 | static inline void dbg_dump_tnc(struct ubifs_info *c) { return; } |
| 441 | #define dbg_check_filesystem(c) 0 | 441 | static inline void dbg_dump_index(struct ubifs_info *c) { return; } |
| 442 | #define dbg_check_heap(c, heap, cat, add_pos) ({}) | 442 | static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c) { return; } |
| 443 | #define dbg_check_lprops(c) 0 | 443 | |
| 444 | #define dbg_check_lpt_nodes(c, cnode, row, col) 0 | 444 | static inline int dbg_walk_index(struct ubifs_info *c, |
| 445 | #define dbg_check_inode_size(c, inode, size) 0 | 445 | dbg_leaf_callback leaf_cb, |
| 446 | #define dbg_check_data_nodes_order(c, head) 0 | 446 | dbg_znode_callback znode_cb, |
| 447 | #define dbg_check_nondata_nodes_order(c, head) 0 | 447 | void *priv) { return 0; } |
| 448 | #define dbg_force_in_the_gaps_enabled 0 | 448 | static inline void dbg_save_space_info(struct ubifs_info *c) { return; } |
| 449 | #define dbg_force_in_the_gaps() 0 | 449 | static inline int dbg_check_space_info(struct ubifs_info *c) { return 0; } |
| 450 | #define dbg_failure_mode 0 | 450 | static inline int dbg_check_lprops(struct ubifs_info *c) { return 0; } |
| 451 | 451 | static inline int | |
| 452 | #define dbg_debugfs_init() 0 | 452 | dbg_old_index_check_init(struct ubifs_info *c, |
| 453 | #define dbg_debugfs_exit() | 453 | struct ubifs_zbranch *zroot) { return 0; } |
| 454 | #define dbg_debugfs_init_fs(c) 0 | 454 | static inline int |
| 455 | #define dbg_debugfs_exit_fs(c) 0 | 455 | dbg_check_old_index(struct ubifs_info *c, |
| 456 | struct ubifs_zbranch *zroot) { return 0; } | ||
| 457 | static inline int dbg_check_cats(struct ubifs_info *c) { return 0; } | ||
| 458 | static inline int dbg_check_ltab(struct ubifs_info *c) { return 0; } | ||
| 459 | static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; } | ||
| 460 | static inline int dbg_chk_lpt_sz(struct ubifs_info *c, | ||
| 461 | int action, int len) { return 0; } | ||
| 462 | static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; } | ||
| 463 | static inline int dbg_check_dir_size(struct ubifs_info *c, | ||
| 464 | const struct inode *dir) { return 0; } | ||
| 465 | static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; } | ||
| 466 | static inline int dbg_check_idx_size(struct ubifs_info *c, | ||
| 467 | long long idx_size) { return 0; } | ||
| 468 | static inline int dbg_check_filesystem(struct ubifs_info *c) { return 0; } | ||
| 469 | static inline void dbg_check_heap(struct ubifs_info *c, | ||
| 470 | struct ubifs_lpt_heap *heap, | ||
| 471 | int cat, int add_pos) { return; } | ||
| 472 | static inline int dbg_check_lpt_nodes(struct ubifs_info *c, | ||
| 473 | struct ubifs_cnode *cnode, int row, int col) { return 0; } | ||
| 474 | static inline int dbg_check_inode_size(struct ubifs_info *c, | ||
| 475 | const struct inode *inode, | ||
| 476 | loff_t size) { return 0; } | ||
| 477 | static inline int | ||
| 478 | dbg_check_data_nodes_order(struct ubifs_info *c, | ||
| 479 | struct list_head *head) { return 0; } | ||
| 480 | static inline int | ||
| 481 | dbg_check_nondata_nodes_order(struct ubifs_info *c, | ||
| 482 | struct list_head *head) { return 0; } | ||
| 483 | |||
| 484 | static inline int dbg_force_in_the_gaps(void) { return 0; } | ||
| 485 | #define dbg_force_in_the_gaps_enabled 0 | ||
| 486 | #define dbg_failure_mode 0 | ||
| 487 | |||
| 488 | static inline int dbg_debugfs_init(void) { return 0; } | ||
| 489 | static inline void dbg_debugfs_exit(void) { return; } | ||
| 490 | static inline int dbg_debugfs_init_fs(struct ubifs_info *c) { return 0; } | ||
| 491 | static inline int dbg_debugfs_exit_fs(struct ubifs_info *c) { return 0; } | ||
| 456 | 492 | ||
| 457 | #endif /* !CONFIG_UBIFS_FS_DEBUG */ | 493 | #endif /* !CONFIG_UBIFS_FS_DEBUG */ |
| 458 | #endif /* !__UBIFS_DEBUG_H__ */ | 494 | #endif /* !__UBIFS_DEBUG_H__ */ |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 28be1e6a65e8..b286db79c686 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
| @@ -1312,6 +1312,9 @@ int ubifs_fsync(struct file *file, int datasync) | |||
| 1312 | 1312 | ||
| 1313 | dbg_gen("syncing inode %lu", inode->i_ino); | 1313 | dbg_gen("syncing inode %lu", inode->i_ino); |
| 1314 | 1314 | ||
| 1315 | if (inode->i_sb->s_flags & MS_RDONLY) | ||
| 1316 | return 0; | ||
| 1317 | |||
| 1315 | /* | 1318 | /* |
| 1316 | * VFS has already synchronized dirty pages for this inode. Synchronize | 1319 | * VFS has already synchronized dirty pages for this inode. Synchronize |
| 1317 | * the inode unless this is a 'datasync()' call. | 1320 | * the inode unless this is a 'datasync()' call. |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 5ea402023ebd..9ef9ed2cfe2e 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
| @@ -293,7 +293,6 @@ xfs_buf_allocate_memory( | |||
| 293 | size_t nbytes, offset; | 293 | size_t nbytes, offset; |
| 294 | gfp_t gfp_mask = xb_to_gfp(flags); | 294 | gfp_t gfp_mask = xb_to_gfp(flags); |
| 295 | unsigned short page_count, i; | 295 | unsigned short page_count, i; |
| 296 | pgoff_t first; | ||
| 297 | xfs_off_t end; | 296 | xfs_off_t end; |
| 298 | int error; | 297 | int error; |
| 299 | 298 | ||
| @@ -333,7 +332,6 @@ use_alloc_page: | |||
| 333 | return error; | 332 | return error; |
| 334 | 333 | ||
| 335 | offset = bp->b_offset; | 334 | offset = bp->b_offset; |
| 336 | first = bp->b_file_offset >> PAGE_SHIFT; | ||
| 337 | bp->b_flags |= _XBF_PAGES; | 335 | bp->b_flags |= _XBF_PAGES; |
| 338 | 336 | ||
| 339 | for (i = 0; i < bp->b_page_count; i++) { | 337 | for (i = 0; i < bp->b_page_count; i++) { |
| @@ -657,8 +655,6 @@ xfs_buf_readahead( | |||
| 657 | xfs_off_t ioff, | 655 | xfs_off_t ioff, |
| 658 | size_t isize) | 656 | size_t isize) |
| 659 | { | 657 | { |
| 660 | struct backing_dev_info *bdi; | ||
| 661 | |||
| 662 | if (bdi_read_congested(target->bt_bdi)) | 658 | if (bdi_read_congested(target->bt_bdi)) |
| 663 | return; | 659 | return; |
| 664 | 660 | ||
| @@ -919,8 +915,6 @@ xfs_buf_lock( | |||
| 919 | 915 | ||
| 920 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) | 916 | if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) |
| 921 | xfs_log_force(bp->b_target->bt_mount, 0); | 917 | xfs_log_force(bp->b_target->bt_mount, 0); |
| 922 | if (atomic_read(&bp->b_io_remaining)) | ||
| 923 | blk_flush_plug(current); | ||
| 924 | down(&bp->b_sema); | 918 | down(&bp->b_sema); |
| 925 | XB_SET_OWNER(bp); | 919 | XB_SET_OWNER(bp); |
| 926 | 920 | ||
| @@ -1309,8 +1303,6 @@ xfs_buf_iowait( | |||
| 1309 | { | 1303 | { |
| 1310 | trace_xfs_buf_iowait(bp, _RET_IP_); | 1304 | trace_xfs_buf_iowait(bp, _RET_IP_); |
| 1311 | 1305 | ||
| 1312 | if (atomic_read(&bp->b_io_remaining)) | ||
| 1313 | blk_flush_plug(current); | ||
| 1314 | wait_for_completion(&bp->b_iowait); | 1306 | wait_for_completion(&bp->b_iowait); |
| 1315 | 1307 | ||
| 1316 | trace_xfs_buf_iowait_done(bp, _RET_IP_); | 1308 | trace_xfs_buf_iowait_done(bp, _RET_IP_); |
| @@ -1747,8 +1739,8 @@ xfsbufd( | |||
| 1747 | do { | 1739 | do { |
| 1748 | long age = xfs_buf_age_centisecs * msecs_to_jiffies(10); | 1740 | long age = xfs_buf_age_centisecs * msecs_to_jiffies(10); |
| 1749 | long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10); | 1741 | long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10); |
| 1750 | int count = 0; | ||
| 1751 | struct list_head tmp; | 1742 | struct list_head tmp; |
| 1743 | struct blk_plug plug; | ||
| 1752 | 1744 | ||
| 1753 | if (unlikely(freezing(current))) { | 1745 | if (unlikely(freezing(current))) { |
| 1754 | set_bit(XBT_FORCE_SLEEP, &target->bt_flags); | 1746 | set_bit(XBT_FORCE_SLEEP, &target->bt_flags); |
| @@ -1764,16 +1756,15 @@ xfsbufd( | |||
| 1764 | 1756 | ||
| 1765 | xfs_buf_delwri_split(target, &tmp, age); | 1757 | xfs_buf_delwri_split(target, &tmp, age); |
| 1766 | list_sort(NULL, &tmp, xfs_buf_cmp); | 1758 | list_sort(NULL, &tmp, xfs_buf_cmp); |
| 1759 | |||
| 1760 | blk_start_plug(&plug); | ||
| 1767 | while (!list_empty(&tmp)) { | 1761 | while (!list_empty(&tmp)) { |
| 1768 | struct xfs_buf *bp; | 1762 | struct xfs_buf *bp; |
| 1769 | bp = list_first_entry(&tmp, struct xfs_buf, b_list); | 1763 | bp = list_first_entry(&tmp, struct xfs_buf, b_list); |
| 1770 | list_del_init(&bp->b_list); | 1764 | list_del_init(&bp->b_list); |
| 1771 | xfs_bdstrat_cb(bp); | 1765 | xfs_bdstrat_cb(bp); |
| 1772 | count++; | ||
| 1773 | } | 1766 | } |
| 1774 | if (count) | 1767 | blk_finish_plug(&plug); |
| 1775 | blk_flush_plug(current); | ||
| 1776 | |||
| 1777 | } while (!kthread_should_stop()); | 1768 | } while (!kthread_should_stop()); |
| 1778 | 1769 | ||
| 1779 | return 0; | 1770 | return 0; |
| @@ -1793,6 +1784,7 @@ xfs_flush_buftarg( | |||
| 1793 | int pincount = 0; | 1784 | int pincount = 0; |
| 1794 | LIST_HEAD(tmp_list); | 1785 | LIST_HEAD(tmp_list); |
| 1795 | LIST_HEAD(wait_list); | 1786 | LIST_HEAD(wait_list); |
| 1787 | struct blk_plug plug; | ||
| 1796 | 1788 | ||
| 1797 | xfs_buf_runall_queues(xfsconvertd_workqueue); | 1789 | xfs_buf_runall_queues(xfsconvertd_workqueue); |
| 1798 | xfs_buf_runall_queues(xfsdatad_workqueue); | 1790 | xfs_buf_runall_queues(xfsdatad_workqueue); |
| @@ -1807,6 +1799,8 @@ xfs_flush_buftarg( | |||
| 1807 | * we do that after issuing all the IO. | 1799 | * we do that after issuing all the IO. |
| 1808 | */ | 1800 | */ |
| 1809 | list_sort(NULL, &tmp_list, xfs_buf_cmp); | 1801 | list_sort(NULL, &tmp_list, xfs_buf_cmp); |
| 1802 | |||
| 1803 | blk_start_plug(&plug); | ||
| 1810 | while (!list_empty(&tmp_list)) { | 1804 | while (!list_empty(&tmp_list)) { |
| 1811 | bp = list_first_entry(&tmp_list, struct xfs_buf, b_list); | 1805 | bp = list_first_entry(&tmp_list, struct xfs_buf, b_list); |
| 1812 | ASSERT(target == bp->b_target); | 1806 | ASSERT(target == bp->b_target); |
| @@ -1817,10 +1811,10 @@ xfs_flush_buftarg( | |||
| 1817 | } | 1811 | } |
| 1818 | xfs_bdstrat_cb(bp); | 1812 | xfs_bdstrat_cb(bp); |
| 1819 | } | 1813 | } |
| 1814 | blk_finish_plug(&plug); | ||
| 1820 | 1815 | ||
| 1821 | if (wait) { | 1816 | if (wait) { |
| 1822 | /* Expedite and wait for IO to complete. */ | 1817 | /* Wait for IO to complete. */ |
| 1823 | blk_flush_plug(current); | ||
| 1824 | while (!list_empty(&wait_list)) { | 1818 | while (!list_empty(&wait_list)) { |
| 1825 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); | 1819 | bp = list_first_entry(&wait_list, struct xfs_buf, b_list); |
| 1826 | 1820 | ||
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/linux-2.6/xfs_message.c index 508e06fd7d1e..3ca795609113 100644 --- a/fs/xfs/linux-2.6/xfs_message.c +++ b/fs/xfs/linux-2.6/xfs_message.c | |||
| @@ -28,53 +28,47 @@ | |||
| 28 | /* | 28 | /* |
| 29 | * XFS logging functions | 29 | * XFS logging functions |
| 30 | */ | 30 | */ |
| 31 | static int | 31 | static void |
| 32 | __xfs_printk( | 32 | __xfs_printk( |
| 33 | const char *level, | 33 | const char *level, |
| 34 | const struct xfs_mount *mp, | 34 | const struct xfs_mount *mp, |
| 35 | struct va_format *vaf) | 35 | struct va_format *vaf) |
| 36 | { | 36 | { |
| 37 | if (mp && mp->m_fsname) | 37 | if (mp && mp->m_fsname) |
| 38 | return printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); | 38 | printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); |
| 39 | return printk("%sXFS: %pV\n", level, vaf); | 39 | printk("%sXFS: %pV\n", level, vaf); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | int xfs_printk( | 42 | void xfs_printk( |
| 43 | const char *level, | 43 | const char *level, |
| 44 | const struct xfs_mount *mp, | 44 | const struct xfs_mount *mp, |
| 45 | const char *fmt, ...) | 45 | const char *fmt, ...) |
| 46 | { | 46 | { |
| 47 | struct va_format vaf; | 47 | struct va_format vaf; |
| 48 | va_list args; | 48 | va_list args; |
| 49 | int r; | ||
| 50 | 49 | ||
| 51 | va_start(args, fmt); | 50 | va_start(args, fmt); |
| 52 | 51 | ||
| 53 | vaf.fmt = fmt; | 52 | vaf.fmt = fmt; |
| 54 | vaf.va = &args; | 53 | vaf.va = &args; |
| 55 | 54 | ||
| 56 | r = __xfs_printk(level, mp, &vaf); | 55 | __xfs_printk(level, mp, &vaf); |
| 57 | va_end(args); | 56 | va_end(args); |
| 58 | |||
| 59 | return r; | ||
| 60 | } | 57 | } |
| 61 | 58 | ||
| 62 | #define define_xfs_printk_level(func, kern_level) \ | 59 | #define define_xfs_printk_level(func, kern_level) \ |
| 63 | int func(const struct xfs_mount *mp, const char *fmt, ...) \ | 60 | void func(const struct xfs_mount *mp, const char *fmt, ...) \ |
| 64 | { \ | 61 | { \ |
| 65 | struct va_format vaf; \ | 62 | struct va_format vaf; \ |
| 66 | va_list args; \ | 63 | va_list args; \ |
| 67 | int r; \ | ||
| 68 | \ | 64 | \ |
| 69 | va_start(args, fmt); \ | 65 | va_start(args, fmt); \ |
| 70 | \ | 66 | \ |
| 71 | vaf.fmt = fmt; \ | 67 | vaf.fmt = fmt; \ |
| 72 | vaf.va = &args; \ | 68 | vaf.va = &args; \ |
| 73 | \ | 69 | \ |
| 74 | r = __xfs_printk(kern_level, mp, &vaf); \ | 70 | __xfs_printk(kern_level, mp, &vaf); \ |
| 75 | va_end(args); \ | 71 | va_end(args); \ |
| 76 | \ | ||
| 77 | return r; \ | ||
| 78 | } \ | 72 | } \ |
| 79 | 73 | ||
| 80 | define_xfs_printk_level(xfs_emerg, KERN_EMERG); | 74 | define_xfs_printk_level(xfs_emerg, KERN_EMERG); |
| @@ -88,7 +82,7 @@ define_xfs_printk_level(xfs_info, KERN_INFO); | |||
| 88 | define_xfs_printk_level(xfs_debug, KERN_DEBUG); | 82 | define_xfs_printk_level(xfs_debug, KERN_DEBUG); |
| 89 | #endif | 83 | #endif |
| 90 | 84 | ||
| 91 | int | 85 | void |
| 92 | xfs_alert_tag( | 86 | xfs_alert_tag( |
| 93 | const struct xfs_mount *mp, | 87 | const struct xfs_mount *mp, |
| 94 | int panic_tag, | 88 | int panic_tag, |
| @@ -97,7 +91,6 @@ xfs_alert_tag( | |||
| 97 | struct va_format vaf; | 91 | struct va_format vaf; |
| 98 | va_list args; | 92 | va_list args; |
| 99 | int do_panic = 0; | 93 | int do_panic = 0; |
| 100 | int r; | ||
| 101 | 94 | ||
| 102 | if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { | 95 | if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { |
| 103 | xfs_printk(KERN_ALERT, mp, | 96 | xfs_printk(KERN_ALERT, mp, |
| @@ -110,12 +103,10 @@ xfs_alert_tag( | |||
| 110 | vaf.fmt = fmt; | 103 | vaf.fmt = fmt; |
| 111 | vaf.va = &args; | 104 | vaf.va = &args; |
| 112 | 105 | ||
| 113 | r = __xfs_printk(KERN_ALERT, mp, &vaf); | 106 | __xfs_printk(KERN_ALERT, mp, &vaf); |
| 114 | va_end(args); | 107 | va_end(args); |
| 115 | 108 | ||
| 116 | BUG_ON(do_panic); | 109 | BUG_ON(do_panic); |
| 117 | |||
| 118 | return r; | ||
| 119 | } | 110 | } |
| 120 | 111 | ||
| 121 | void | 112 | void |
diff --git a/fs/xfs/linux-2.6/xfs_message.h b/fs/xfs/linux-2.6/xfs_message.h index e77ffa16745b..f1b3fc1b6c4e 100644 --- a/fs/xfs/linux-2.6/xfs_message.h +++ b/fs/xfs/linux-2.6/xfs_message.h | |||
| @@ -3,32 +3,34 @@ | |||
| 3 | 3 | ||
| 4 | struct xfs_mount; | 4 | struct xfs_mount; |
| 5 | 5 | ||
| 6 | extern int xfs_printk(const char *level, const struct xfs_mount *mp, | 6 | extern void xfs_printk(const char *level, const struct xfs_mount *mp, |
| 7 | const char *fmt, ...) | 7 | const char *fmt, ...) |
| 8 | __attribute__ ((format (printf, 3, 4))); | 8 | __attribute__ ((format (printf, 3, 4))); |
| 9 | extern int xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...) | 9 | extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...) |
| 10 | __attribute__ ((format (printf, 2, 3))); | 10 | __attribute__ ((format (printf, 2, 3))); |
| 11 | extern int xfs_alert(const struct xfs_mount *mp, const char *fmt, ...) | 11 | extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...) |
| 12 | __attribute__ ((format (printf, 2, 3))); | 12 | __attribute__ ((format (printf, 2, 3))); |
| 13 | extern int xfs_alert_tag(const struct xfs_mount *mp, int tag, | 13 | extern void xfs_alert_tag(const struct xfs_mount *mp, int tag, |
| 14 | const char *fmt, ...) | 14 | const char *fmt, ...) |
| 15 | __attribute__ ((format (printf, 3, 4))); | 15 | __attribute__ ((format (printf, 3, 4))); |
| 16 | extern int xfs_crit(const struct xfs_mount *mp, const char *fmt, ...) | 16 | extern void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...) |
| 17 | __attribute__ ((format (printf, 2, 3))); | 17 | __attribute__ ((format (printf, 2, 3))); |
| 18 | extern int xfs_err(const struct xfs_mount *mp, const char *fmt, ...) | 18 | extern void xfs_err(const struct xfs_mount *mp, const char *fmt, ...) |
| 19 | __attribute__ ((format (printf, 2, 3))); | 19 | __attribute__ ((format (printf, 2, 3))); |
| 20 | extern int xfs_warn(const struct xfs_mount *mp, const char *fmt, ...) | 20 | extern void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...) |
| 21 | __attribute__ ((format (printf, 2, 3))); | 21 | __attribute__ ((format (printf, 2, 3))); |
| 22 | extern int xfs_notice(const struct xfs_mount *mp, const char *fmt, ...) | 22 | extern void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...) |
| 23 | __attribute__ ((format (printf, 2, 3))); | 23 | __attribute__ ((format (printf, 2, 3))); |
| 24 | extern int xfs_info(const struct xfs_mount *mp, const char *fmt, ...) | 24 | extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...) |
| 25 | __attribute__ ((format (printf, 2, 3))); | 25 | __attribute__ ((format (printf, 2, 3))); |
| 26 | 26 | ||
| 27 | #ifdef DEBUG | 27 | #ifdef DEBUG |
| 28 | extern int xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) | 28 | extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) |
| 29 | __attribute__ ((format (printf, 2, 3))); | 29 | __attribute__ ((format (printf, 2, 3))); |
| 30 | #else | 30 | #else |
| 31 | #define xfs_debug(mp, fmt, ...) (0) | 31 | static inline void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) |
| 32 | { | ||
| 33 | } | ||
| 32 | #endif | 34 | #endif |
| 33 | 35 | ||
| 34 | extern void assfail(char *expr, char *f, int l); | 36 | extern void assfail(char *expr, char *f, int l); |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 1ba5c451da36..b38e58d02299 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
| @@ -816,75 +816,6 @@ xfs_setup_devices( | |||
| 816 | return 0; | 816 | return 0; |
| 817 | } | 817 | } |
| 818 | 818 | ||
| 819 | /* | ||
| 820 | * XFS AIL push thread support | ||
| 821 | */ | ||
| 822 | void | ||
| 823 | xfsaild_wakeup( | ||
| 824 | struct xfs_ail *ailp, | ||
| 825 | xfs_lsn_t threshold_lsn) | ||
| 826 | { | ||
| 827 | /* only ever move the target forwards */ | ||
| 828 | if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) { | ||
| 829 | ailp->xa_target = threshold_lsn; | ||
| 830 | wake_up_process(ailp->xa_task); | ||
| 831 | } | ||
| 832 | } | ||
| 833 | |||
| 834 | STATIC int | ||
| 835 | xfsaild( | ||
| 836 | void *data) | ||
| 837 | { | ||
| 838 | struct xfs_ail *ailp = data; | ||
| 839 | xfs_lsn_t last_pushed_lsn = 0; | ||
| 840 | long tout = 0; /* milliseconds */ | ||
| 841 | |||
| 842 | while (!kthread_should_stop()) { | ||
| 843 | /* | ||
| 844 | * for short sleeps indicating congestion, don't allow us to | ||
| 845 | * get woken early. Otherwise all we do is bang on the AIL lock | ||
| 846 | * without making progress. | ||
| 847 | */ | ||
| 848 | if (tout && tout <= 20) | ||
| 849 | __set_current_state(TASK_KILLABLE); | ||
| 850 | else | ||
| 851 | __set_current_state(TASK_INTERRUPTIBLE); | ||
| 852 | schedule_timeout(tout ? | ||
| 853 | msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); | ||
| 854 | |||
| 855 | /* swsusp */ | ||
| 856 | try_to_freeze(); | ||
| 857 | |||
| 858 | ASSERT(ailp->xa_mount->m_log); | ||
| 859 | if (XFS_FORCED_SHUTDOWN(ailp->xa_mount)) | ||
| 860 | continue; | ||
| 861 | |||
| 862 | tout = xfsaild_push(ailp, &last_pushed_lsn); | ||
| 863 | } | ||
| 864 | |||
| 865 | return 0; | ||
| 866 | } /* xfsaild */ | ||
| 867 | |||
| 868 | int | ||
| 869 | xfsaild_start( | ||
| 870 | struct xfs_ail *ailp) | ||
| 871 | { | ||
| 872 | ailp->xa_target = 0; | ||
| 873 | ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s", | ||
| 874 | ailp->xa_mount->m_fsname); | ||
| 875 | if (IS_ERR(ailp->xa_task)) | ||
| 876 | return -PTR_ERR(ailp->xa_task); | ||
| 877 | return 0; | ||
| 878 | } | ||
| 879 | |||
| 880 | void | ||
| 881 | xfsaild_stop( | ||
| 882 | struct xfs_ail *ailp) | ||
| 883 | { | ||
| 884 | kthread_stop(ailp->xa_task); | ||
| 885 | } | ||
| 886 | |||
| 887 | |||
| 888 | /* Catch misguided souls that try to use this interface on XFS */ | 819 | /* Catch misguided souls that try to use this interface on XFS */ |
| 889 | STATIC struct inode * | 820 | STATIC struct inode * |
| 890 | xfs_fs_alloc_inode( | 821 | xfs_fs_alloc_inode( |
| @@ -1191,22 +1122,12 @@ xfs_fs_sync_fs( | |||
| 1191 | return -error; | 1122 | return -error; |
| 1192 | 1123 | ||
| 1193 | if (laptop_mode) { | 1124 | if (laptop_mode) { |
| 1194 | int prev_sync_seq = mp->m_sync_seq; | ||
| 1195 | |||
| 1196 | /* | 1125 | /* |
| 1197 | * The disk must be active because we're syncing. | 1126 | * The disk must be active because we're syncing. |
| 1198 | * We schedule xfssyncd now (now that the disk is | 1127 | * We schedule xfssyncd now (now that the disk is |
| 1199 | * active) instead of later (when it might not be). | 1128 | * active) instead of later (when it might not be). |
| 1200 | */ | 1129 | */ |
| 1201 | wake_up_process(mp->m_sync_task); | 1130 | flush_delayed_work_sync(&mp->m_sync_work); |
| 1202 | /* | ||
| 1203 | * We have to wait for the sync iteration to complete. | ||
| 1204 | * If we don't, the disk activity caused by the sync | ||
| 1205 | * will come after the sync is completed, and that | ||
| 1206 | * triggers another sync from laptop mode. | ||
| 1207 | */ | ||
| 1208 | wait_event(mp->m_wait_single_sync_task, | ||
| 1209 | mp->m_sync_seq != prev_sync_seq); | ||
| 1210 | } | 1131 | } |
| 1211 | 1132 | ||
| 1212 | return 0; | 1133 | return 0; |
| @@ -1490,9 +1411,6 @@ xfs_fs_fill_super( | |||
| 1490 | spin_lock_init(&mp->m_sb_lock); | 1411 | spin_lock_init(&mp->m_sb_lock); |
| 1491 | mutex_init(&mp->m_growlock); | 1412 | mutex_init(&mp->m_growlock); |
| 1492 | atomic_set(&mp->m_active_trans, 0); | 1413 | atomic_set(&mp->m_active_trans, 0); |
| 1493 | INIT_LIST_HEAD(&mp->m_sync_list); | ||
| 1494 | spin_lock_init(&mp->m_sync_lock); | ||
| 1495 | init_waitqueue_head(&mp->m_wait_single_sync_task); | ||
| 1496 | 1414 | ||
| 1497 | mp->m_super = sb; | 1415 | mp->m_super = sb; |
| 1498 | sb->s_fs_info = mp; | 1416 | sb->s_fs_info = mp; |
| @@ -1799,6 +1717,38 @@ xfs_destroy_zones(void) | |||
| 1799 | } | 1717 | } |
| 1800 | 1718 | ||
| 1801 | STATIC int __init | 1719 | STATIC int __init |
| 1720 | xfs_init_workqueues(void) | ||
| 1721 | { | ||
| 1722 | /* | ||
| 1723 | * max_active is set to 8 to give enough concurency to allow | ||
| 1724 | * multiple work operations on each CPU to run. This allows multiple | ||
| 1725 | * filesystems to be running sync work concurrently, and scales with | ||
| 1726 | * the number of CPUs in the system. | ||
| 1727 | */ | ||
| 1728 | xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); | ||
| 1729 | if (!xfs_syncd_wq) | ||
| 1730 | goto out; | ||
| 1731 | |||
| 1732 | xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8); | ||
| 1733 | if (!xfs_ail_wq) | ||
| 1734 | goto out_destroy_syncd; | ||
| 1735 | |||
| 1736 | return 0; | ||
| 1737 | |||
| 1738 | out_destroy_syncd: | ||
| 1739 | destroy_workqueue(xfs_syncd_wq); | ||
| 1740 | out: | ||
| 1741 | return -ENOMEM; | ||
| 1742 | } | ||
| 1743 | |||
| 1744 | STATIC void | ||
| 1745 | xfs_destroy_workqueues(void) | ||
| 1746 | { | ||
| 1747 | destroy_workqueue(xfs_ail_wq); | ||
| 1748 | destroy_workqueue(xfs_syncd_wq); | ||
| 1749 | } | ||
| 1750 | |||
| 1751 | STATIC int __init | ||
| 1802 | init_xfs_fs(void) | 1752 | init_xfs_fs(void) |
| 1803 | { | 1753 | { |
| 1804 | int error; | 1754 | int error; |
| @@ -1813,10 +1763,14 @@ init_xfs_fs(void) | |||
| 1813 | if (error) | 1763 | if (error) |
| 1814 | goto out; | 1764 | goto out; |
| 1815 | 1765 | ||
| 1816 | error = xfs_mru_cache_init(); | 1766 | error = xfs_init_workqueues(); |
| 1817 | if (error) | 1767 | if (error) |
| 1818 | goto out_destroy_zones; | 1768 | goto out_destroy_zones; |
| 1819 | 1769 | ||
| 1770 | error = xfs_mru_cache_init(); | ||
| 1771 | if (error) | ||
| 1772 | goto out_destroy_wq; | ||
| 1773 | |||
| 1820 | error = xfs_filestream_init(); | 1774 | error = xfs_filestream_init(); |
| 1821 | if (error) | 1775 | if (error) |
| 1822 | goto out_mru_cache_uninit; | 1776 | goto out_mru_cache_uninit; |
| @@ -1833,6 +1787,10 @@ init_xfs_fs(void) | |||
| 1833 | if (error) | 1787 | if (error) |
| 1834 | goto out_cleanup_procfs; | 1788 | goto out_cleanup_procfs; |
| 1835 | 1789 | ||
| 1790 | error = xfs_init_workqueues(); | ||
| 1791 | if (error) | ||
| 1792 | goto out_sysctl_unregister; | ||
| 1793 | |||
| 1836 | vfs_initquota(); | 1794 | vfs_initquota(); |
| 1837 | 1795 | ||
| 1838 | error = register_filesystem(&xfs_fs_type); | 1796 | error = register_filesystem(&xfs_fs_type); |
| @@ -1850,6 +1808,8 @@ init_xfs_fs(void) | |||
| 1850 | xfs_filestream_uninit(); | 1808 | xfs_filestream_uninit(); |
| 1851 | out_mru_cache_uninit: | 1809 | out_mru_cache_uninit: |
| 1852 | xfs_mru_cache_uninit(); | 1810 | xfs_mru_cache_uninit(); |
| 1811 | out_destroy_wq: | ||
| 1812 | xfs_destroy_workqueues(); | ||
| 1853 | out_destroy_zones: | 1813 | out_destroy_zones: |
| 1854 | xfs_destroy_zones(); | 1814 | xfs_destroy_zones(); |
| 1855 | out: | 1815 | out: |
| @@ -1866,6 +1826,7 @@ exit_xfs_fs(void) | |||
| 1866 | xfs_buf_terminate(); | 1826 | xfs_buf_terminate(); |
| 1867 | xfs_filestream_uninit(); | 1827 | xfs_filestream_uninit(); |
| 1868 | xfs_mru_cache_uninit(); | 1828 | xfs_mru_cache_uninit(); |
| 1829 | xfs_destroy_workqueues(); | ||
| 1869 | xfs_destroy_zones(); | 1830 | xfs_destroy_zones(); |
| 1870 | } | 1831 | } |
| 1871 | 1832 | ||
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 9cf35a688f53..e4f9c1b0836c 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include "xfs_log.h" | 22 | #include "xfs_log.h" |
| 23 | #include "xfs_inum.h" | 23 | #include "xfs_inum.h" |
| 24 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
| 25 | #include "xfs_trans_priv.h" | ||
| 25 | #include "xfs_sb.h" | 26 | #include "xfs_sb.h" |
| 26 | #include "xfs_ag.h" | 27 | #include "xfs_ag.h" |
| 27 | #include "xfs_mount.h" | 28 | #include "xfs_mount.h" |
| @@ -39,6 +40,8 @@ | |||
| 39 | #include <linux/kthread.h> | 40 | #include <linux/kthread.h> |
| 40 | #include <linux/freezer.h> | 41 | #include <linux/freezer.h> |
| 41 | 42 | ||
| 43 | struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */ | ||
| 44 | |||
| 42 | /* | 45 | /* |
| 43 | * The inode lookup is done in batches to keep the amount of lock traffic and | 46 | * The inode lookup is done in batches to keep the amount of lock traffic and |
| 44 | * radix tree lookups to a minimum. The batch size is a trade off between | 47 | * radix tree lookups to a minimum. The batch size is a trade off between |
| @@ -431,62 +434,12 @@ xfs_quiesce_attr( | |||
| 431 | xfs_unmountfs_writesb(mp); | 434 | xfs_unmountfs_writesb(mp); |
| 432 | } | 435 | } |
| 433 | 436 | ||
| 434 | /* | 437 | static void |
| 435 | * Enqueue a work item to be picked up by the vfs xfssyncd thread. | 438 | xfs_syncd_queue_sync( |
| 436 | * Doing this has two advantages: | 439 | struct xfs_mount *mp) |
| 437 | * - It saves on stack space, which is tight in certain situations | ||
| 438 | * - It can be used (with care) as a mechanism to avoid deadlocks. | ||
| 439 | * Flushing while allocating in a full filesystem requires both. | ||
| 440 | */ | ||
| 441 | STATIC void | ||
| 442 | xfs_syncd_queue_work( | ||
| 443 | struct xfs_mount *mp, | ||
| 444 | void *data, | ||
| 445 | void (*syncer)(struct xfs_mount *, void *), | ||
| 446 | struct completion *completion) | ||
| 447 | { | ||
| 448 | struct xfs_sync_work *work; | ||
| 449 | |||
| 450 | work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP); | ||
| 451 | INIT_LIST_HEAD(&work->w_list); | ||
| 452 | work->w_syncer = syncer; | ||
| 453 | work->w_data = data; | ||
| 454 | work->w_mount = mp; | ||
| 455 | work->w_completion = completion; | ||
| 456 | spin_lock(&mp->m_sync_lock); | ||
| 457 | list_add_tail(&work->w_list, &mp->m_sync_list); | ||
| 458 | spin_unlock(&mp->m_sync_lock); | ||
| 459 | wake_up_process(mp->m_sync_task); | ||
| 460 | } | ||
| 461 | |||
| 462 | /* | ||
| 463 | * Flush delayed allocate data, attempting to free up reserved space | ||
| 464 | * from existing allocations. At this point a new allocation attempt | ||
| 465 | * has failed with ENOSPC and we are in the process of scratching our | ||
| 466 | * heads, looking about for more room... | ||
| 467 | */ | ||
| 468 | STATIC void | ||
| 469 | xfs_flush_inodes_work( | ||
| 470 | struct xfs_mount *mp, | ||
| 471 | void *arg) | ||
| 472 | { | ||
| 473 | struct inode *inode = arg; | ||
| 474 | xfs_sync_data(mp, SYNC_TRYLOCK); | ||
| 475 | xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); | ||
| 476 | iput(inode); | ||
| 477 | } | ||
| 478 | |||
| 479 | void | ||
| 480 | xfs_flush_inodes( | ||
| 481 | xfs_inode_t *ip) | ||
| 482 | { | 440 | { |
| 483 | struct inode *inode = VFS_I(ip); | 441 | queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work, |
| 484 | DECLARE_COMPLETION_ONSTACK(completion); | 442 | msecs_to_jiffies(xfs_syncd_centisecs * 10)); |
| 485 | |||
| 486 | igrab(inode); | ||
| 487 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion); | ||
| 488 | wait_for_completion(&completion); | ||
| 489 | xfs_log_force(ip->i_mount, XFS_LOG_SYNC); | ||
| 490 | } | 443 | } |
| 491 | 444 | ||
| 492 | /* | 445 | /* |
| @@ -496,9 +449,10 @@ xfs_flush_inodes( | |||
| 496 | */ | 449 | */ |
| 497 | STATIC void | 450 | STATIC void |
| 498 | xfs_sync_worker( | 451 | xfs_sync_worker( |
| 499 | struct xfs_mount *mp, | 452 | struct work_struct *work) |
| 500 | void *unused) | ||
| 501 | { | 453 | { |
| 454 | struct xfs_mount *mp = container_of(to_delayed_work(work), | ||
| 455 | struct xfs_mount, m_sync_work); | ||
| 502 | int error; | 456 | int error; |
| 503 | 457 | ||
| 504 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { | 458 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { |
| @@ -508,73 +462,106 @@ xfs_sync_worker( | |||
| 508 | error = xfs_fs_log_dummy(mp); | 462 | error = xfs_fs_log_dummy(mp); |
| 509 | else | 463 | else |
| 510 | xfs_log_force(mp, 0); | 464 | xfs_log_force(mp, 0); |
| 511 | xfs_reclaim_inodes(mp, 0); | ||
| 512 | error = xfs_qm_sync(mp, SYNC_TRYLOCK); | 465 | error = xfs_qm_sync(mp, SYNC_TRYLOCK); |
| 466 | |||
| 467 | /* start pushing all the metadata that is currently dirty */ | ||
| 468 | xfs_ail_push_all(mp->m_ail); | ||
| 513 | } | 469 | } |
| 514 | mp->m_sync_seq++; | 470 | |
| 515 | wake_up(&mp->m_wait_single_sync_task); | 471 | /* queue us up again */ |
| 472 | xfs_syncd_queue_sync(mp); | ||
| 516 | } | 473 | } |
| 517 | 474 | ||
| 518 | STATIC int | 475 | /* |
| 519 | xfssyncd( | 476 | * Queue a new inode reclaim pass if there are reclaimable inodes and there |
| 520 | void *arg) | 477 | * isn't a reclaim pass already in progress. By default it runs every 5s based |
| 478 | * on the xfs syncd work default of 30s. Perhaps this should have it's own | ||
| 479 | * tunable, but that can be done if this method proves to be ineffective or too | ||
| 480 | * aggressive. | ||
| 481 | */ | ||
| 482 | static void | ||
| 483 | xfs_syncd_queue_reclaim( | ||
| 484 | struct xfs_mount *mp) | ||
| 521 | { | 485 | { |
| 522 | struct xfs_mount *mp = arg; | ||
| 523 | long timeleft; | ||
| 524 | xfs_sync_work_t *work, *n; | ||
| 525 | LIST_HEAD (tmp); | ||
| 526 | |||
| 527 | set_freezable(); | ||
| 528 | timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); | ||
| 529 | for (;;) { | ||
| 530 | if (list_empty(&mp->m_sync_list)) | ||
| 531 | timeleft = schedule_timeout_interruptible(timeleft); | ||
| 532 | /* swsusp */ | ||
| 533 | try_to_freeze(); | ||
| 534 | if (kthread_should_stop() && list_empty(&mp->m_sync_list)) | ||
| 535 | break; | ||
| 536 | 486 | ||
| 537 | spin_lock(&mp->m_sync_lock); | 487 | /* |
| 538 | /* | 488 | * We can have inodes enter reclaim after we've shut down the syncd |
| 539 | * We can get woken by laptop mode, to do a sync - | 489 | * workqueue during unmount, so don't allow reclaim work to be queued |
| 540 | * that's the (only!) case where the list would be | 490 | * during unmount. |
| 541 | * empty with time remaining. | 491 | */ |
| 542 | */ | 492 | if (!(mp->m_super->s_flags & MS_ACTIVE)) |
| 543 | if (!timeleft || list_empty(&mp->m_sync_list)) { | 493 | return; |
| 544 | if (!timeleft) | ||
| 545 | timeleft = xfs_syncd_centisecs * | ||
| 546 | msecs_to_jiffies(10); | ||
| 547 | INIT_LIST_HEAD(&mp->m_sync_work.w_list); | ||
| 548 | list_add_tail(&mp->m_sync_work.w_list, | ||
| 549 | &mp->m_sync_list); | ||
| 550 | } | ||
| 551 | list_splice_init(&mp->m_sync_list, &tmp); | ||
| 552 | spin_unlock(&mp->m_sync_lock); | ||
| 553 | 494 | ||
| 554 | list_for_each_entry_safe(work, n, &tmp, w_list) { | 495 | rcu_read_lock(); |
| 555 | (*work->w_syncer)(mp, work->w_data); | 496 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { |
| 556 | list_del(&work->w_list); | 497 | queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work, |
| 557 | if (work == &mp->m_sync_work) | 498 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); |
| 558 | continue; | ||
| 559 | if (work->w_completion) | ||
| 560 | complete(work->w_completion); | ||
| 561 | kmem_free(work); | ||
| 562 | } | ||
| 563 | } | 499 | } |
| 500 | rcu_read_unlock(); | ||
| 501 | } | ||
| 564 | 502 | ||
| 565 | return 0; | 503 | /* |
| 504 | * This is a fast pass over the inode cache to try to get reclaim moving on as | ||
| 505 | * many inodes as possible in a short period of time. It kicks itself every few | ||
| 506 | * seconds, as well as being kicked by the inode cache shrinker when memory | ||
| 507 | * goes low. It scans as quickly as possible avoiding locked inodes or those | ||
| 508 | * already being flushed, and once done schedules a future pass. | ||
| 509 | */ | ||
| 510 | STATIC void | ||
| 511 | xfs_reclaim_worker( | ||
| 512 | struct work_struct *work) | ||
| 513 | { | ||
| 514 | struct xfs_mount *mp = container_of(to_delayed_work(work), | ||
| 515 | struct xfs_mount, m_reclaim_work); | ||
| 516 | |||
| 517 | xfs_reclaim_inodes(mp, SYNC_TRYLOCK); | ||
| 518 | xfs_syncd_queue_reclaim(mp); | ||
| 519 | } | ||
| 520 | |||
| 521 | /* | ||
| 522 | * Flush delayed allocate data, attempting to free up reserved space | ||
| 523 | * from existing allocations. At this point a new allocation attempt | ||
| 524 | * has failed with ENOSPC and we are in the process of scratching our | ||
| 525 | * heads, looking about for more room. | ||
| 526 | * | ||
| 527 | * Queue a new data flush if there isn't one already in progress and | ||
| 528 | * wait for completion of the flush. This means that we only ever have one | ||
| 529 | * inode flush in progress no matter how many ENOSPC events are occurring and | ||
| 530 | * so will prevent the system from bogging down due to every concurrent | ||
| 531 | * ENOSPC event scanning all the active inodes in the system for writeback. | ||
| 532 | */ | ||
| 533 | void | ||
| 534 | xfs_flush_inodes( | ||
| 535 | struct xfs_inode *ip) | ||
| 536 | { | ||
| 537 | struct xfs_mount *mp = ip->i_mount; | ||
| 538 | |||
| 539 | queue_work(xfs_syncd_wq, &mp->m_flush_work); | ||
| 540 | flush_work_sync(&mp->m_flush_work); | ||
| 541 | } | ||
| 542 | |||
| 543 | STATIC void | ||
| 544 | xfs_flush_worker( | ||
| 545 | struct work_struct *work) | ||
| 546 | { | ||
| 547 | struct xfs_mount *mp = container_of(work, | ||
| 548 | struct xfs_mount, m_flush_work); | ||
| 549 | |||
| 550 | xfs_sync_data(mp, SYNC_TRYLOCK); | ||
| 551 | xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); | ||
| 566 | } | 552 | } |
| 567 | 553 | ||
| 568 | int | 554 | int |
| 569 | xfs_syncd_init( | 555 | xfs_syncd_init( |
| 570 | struct xfs_mount *mp) | 556 | struct xfs_mount *mp) |
| 571 | { | 557 | { |
| 572 | mp->m_sync_work.w_syncer = xfs_sync_worker; | 558 | INIT_WORK(&mp->m_flush_work, xfs_flush_worker); |
| 573 | mp->m_sync_work.w_mount = mp; | 559 | INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker); |
| 574 | mp->m_sync_work.w_completion = NULL; | 560 | INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); |
| 575 | mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname); | 561 | |
| 576 | if (IS_ERR(mp->m_sync_task)) | 562 | xfs_syncd_queue_sync(mp); |
| 577 | return -PTR_ERR(mp->m_sync_task); | 563 | xfs_syncd_queue_reclaim(mp); |
| 564 | |||
| 578 | return 0; | 565 | return 0; |
| 579 | } | 566 | } |
| 580 | 567 | ||
| @@ -582,7 +569,9 @@ void | |||
| 582 | xfs_syncd_stop( | 569 | xfs_syncd_stop( |
| 583 | struct xfs_mount *mp) | 570 | struct xfs_mount *mp) |
| 584 | { | 571 | { |
| 585 | kthread_stop(mp->m_sync_task); | 572 | cancel_delayed_work_sync(&mp->m_sync_work); |
| 573 | cancel_delayed_work_sync(&mp->m_reclaim_work); | ||
| 574 | cancel_work_sync(&mp->m_flush_work); | ||
| 586 | } | 575 | } |
| 587 | 576 | ||
| 588 | void | 577 | void |
| @@ -601,6 +590,10 @@ __xfs_inode_set_reclaim_tag( | |||
| 601 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | 590 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), |
| 602 | XFS_ICI_RECLAIM_TAG); | 591 | XFS_ICI_RECLAIM_TAG); |
| 603 | spin_unlock(&ip->i_mount->m_perag_lock); | 592 | spin_unlock(&ip->i_mount->m_perag_lock); |
| 593 | |||
| 594 | /* schedule periodic background inode reclaim */ | ||
| 595 | xfs_syncd_queue_reclaim(ip->i_mount); | ||
| 596 | |||
| 604 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, | 597 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, |
| 605 | -1, _RET_IP_); | 598 | -1, _RET_IP_); |
| 606 | } | 599 | } |
| @@ -1017,7 +1010,13 @@ xfs_reclaim_inodes( | |||
| 1017 | } | 1010 | } |
| 1018 | 1011 | ||
| 1019 | /* | 1012 | /* |
| 1020 | * Shrinker infrastructure. | 1013 | * Inode cache shrinker. |
| 1014 | * | ||
| 1015 | * When called we make sure that there is a background (fast) inode reclaim in | ||
| 1016 | * progress, while we will throttle the speed of reclaim via doiing synchronous | ||
| 1017 | * reclaim of inodes. That means if we come across dirty inodes, we wait for | ||
| 1018 | * them to be cleaned, which we hope will not be very long due to the | ||
| 1019 | * background walker having already kicked the IO off on those dirty inodes. | ||
| 1021 | */ | 1020 | */ |
| 1022 | static int | 1021 | static int |
| 1023 | xfs_reclaim_inode_shrink( | 1022 | xfs_reclaim_inode_shrink( |
| @@ -1032,10 +1031,15 @@ xfs_reclaim_inode_shrink( | |||
| 1032 | 1031 | ||
| 1033 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); | 1032 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); |
| 1034 | if (nr_to_scan) { | 1033 | if (nr_to_scan) { |
| 1034 | /* kick background reclaimer and push the AIL */ | ||
| 1035 | xfs_syncd_queue_reclaim(mp); | ||
| 1036 | xfs_ail_push_all(mp->m_ail); | ||
| 1037 | |||
| 1035 | if (!(gfp_mask & __GFP_FS)) | 1038 | if (!(gfp_mask & __GFP_FS)) |
| 1036 | return -1; | 1039 | return -1; |
| 1037 | 1040 | ||
| 1038 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan); | 1041 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, |
| 1042 | &nr_to_scan); | ||
| 1039 | /* terminate if we don't exhaust the scan */ | 1043 | /* terminate if we don't exhaust the scan */ |
| 1040 | if (nr_to_scan > 0) | 1044 | if (nr_to_scan > 0) |
| 1041 | return -1; | 1045 | return -1; |
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 32ba6628290c..e3a6ad27415f 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h | |||
| @@ -32,6 +32,8 @@ typedef struct xfs_sync_work { | |||
| 32 | #define SYNC_WAIT 0x0001 /* wait for i/o to complete */ | 32 | #define SYNC_WAIT 0x0001 /* wait for i/o to complete */ |
| 33 | #define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ | 33 | #define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ |
| 34 | 34 | ||
| 35 | extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */ | ||
| 36 | |||
| 35 | int xfs_syncd_init(struct xfs_mount *mp); | 37 | int xfs_syncd_init(struct xfs_mount *mp); |
| 36 | void xfs_syncd_stop(struct xfs_mount *mp); | 38 | void xfs_syncd_stop(struct xfs_mount *mp); |
| 37 | 39 | ||
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 254ee062bd7d..69228aa8605a 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c | |||
| @@ -461,12 +461,10 @@ xfs_qm_dqflush_all( | |||
| 461 | struct xfs_quotainfo *q = mp->m_quotainfo; | 461 | struct xfs_quotainfo *q = mp->m_quotainfo; |
| 462 | int recl; | 462 | int recl; |
| 463 | struct xfs_dquot *dqp; | 463 | struct xfs_dquot *dqp; |
| 464 | int niters; | ||
| 465 | int error; | 464 | int error; |
| 466 | 465 | ||
| 467 | if (!q) | 466 | if (!q) |
| 468 | return 0; | 467 | return 0; |
| 469 | niters = 0; | ||
| 470 | again: | 468 | again: |
| 471 | mutex_lock(&q->qi_dqlist_lock); | 469 | mutex_lock(&q->qi_dqlist_lock); |
| 472 | list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { | 470 | list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { |
| @@ -1314,14 +1312,9 @@ xfs_qm_dqiter_bufs( | |||
| 1314 | { | 1312 | { |
| 1315 | xfs_buf_t *bp; | 1313 | xfs_buf_t *bp; |
| 1316 | int error; | 1314 | int error; |
| 1317 | int notcommitted; | ||
| 1318 | int incr; | ||
| 1319 | int type; | 1315 | int type; |
| 1320 | 1316 | ||
| 1321 | ASSERT(blkcnt > 0); | 1317 | ASSERT(blkcnt > 0); |
| 1322 | notcommitted = 0; | ||
| 1323 | incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ? | ||
| 1324 | XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt; | ||
| 1325 | type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : | 1318 | type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : |
| 1326 | (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); | 1319 | (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); |
| 1327 | error = 0; | 1320 | error = 0; |
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h index c9446f1c726d..567b29b9f1b3 100644 --- a/fs/xfs/quota/xfs_qm.h +++ b/fs/xfs/quota/xfs_qm.h | |||
| @@ -65,11 +65,6 @@ extern kmem_zone_t *qm_dqtrxzone; | |||
| 65 | * block in the dquot/xqm code. | 65 | * block in the dquot/xqm code. |
| 66 | */ | 66 | */ |
| 67 | #define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 | 67 | #define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 |
| 68 | /* | ||
| 69 | * When doing a quotacheck, we log dquot clusters of this many FSBs at most | ||
| 70 | * in a single transaction. We don't want to ask for too huge a log reservation. | ||
| 71 | */ | ||
| 72 | #define XFS_QM_MAX_DQCLUSTER_LOGSZ 3 | ||
| 73 | 68 | ||
| 74 | typedef xfs_dqhash_t xfs_dqlist_t; | 69 | typedef xfs_dqhash_t xfs_dqlist_t; |
| 75 | 70 | ||
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 0d62a07b7fd8..2dadb15d5ca9 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c | |||
| @@ -313,14 +313,12 @@ xfs_qm_scall_quotaon( | |||
| 313 | { | 313 | { |
| 314 | int error; | 314 | int error; |
| 315 | uint qf; | 315 | uint qf; |
| 316 | uint accflags; | ||
| 317 | __int64_t sbflags; | 316 | __int64_t sbflags; |
| 318 | 317 | ||
| 319 | flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); | 318 | flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); |
| 320 | /* | 319 | /* |
| 321 | * Switching on quota accounting must be done at mount time. | 320 | * Switching on quota accounting must be done at mount time. |
| 322 | */ | 321 | */ |
| 323 | accflags = flags & XFS_ALL_QUOTA_ACCT; | ||
| 324 | flags &= ~(XFS_ALL_QUOTA_ACCT); | 322 | flags &= ~(XFS_ALL_QUOTA_ACCT); |
| 325 | 323 | ||
| 326 | sbflags = 0; | 324 | sbflags = 0; |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 4bc3c649aee4..27d64d752eab 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
| @@ -2395,17 +2395,33 @@ xfs_free_extent( | |||
| 2395 | memset(&args, 0, sizeof(xfs_alloc_arg_t)); | 2395 | memset(&args, 0, sizeof(xfs_alloc_arg_t)); |
| 2396 | args.tp = tp; | 2396 | args.tp = tp; |
| 2397 | args.mp = tp->t_mountp; | 2397 | args.mp = tp->t_mountp; |
| 2398 | |||
| 2399 | /* | ||
| 2400 | * validate that the block number is legal - the enables us to detect | ||
| 2401 | * and handle a silent filesystem corruption rather than crashing. | ||
| 2402 | */ | ||
| 2398 | args.agno = XFS_FSB_TO_AGNO(args.mp, bno); | 2403 | args.agno = XFS_FSB_TO_AGNO(args.mp, bno); |
| 2399 | ASSERT(args.agno < args.mp->m_sb.sb_agcount); | 2404 | if (args.agno >= args.mp->m_sb.sb_agcount) |
| 2405 | return EFSCORRUPTED; | ||
| 2406 | |||
| 2400 | args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); | 2407 | args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); |
| 2408 | if (args.agbno >= args.mp->m_sb.sb_agblocks) | ||
| 2409 | return EFSCORRUPTED; | ||
| 2410 | |||
| 2401 | args.pag = xfs_perag_get(args.mp, args.agno); | 2411 | args.pag = xfs_perag_get(args.mp, args.agno); |
| 2402 | if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING))) | 2412 | ASSERT(args.pag); |
| 2413 | |||
| 2414 | error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING); | ||
| 2415 | if (error) | ||
| 2403 | goto error0; | 2416 | goto error0; |
| 2404 | #ifdef DEBUG | 2417 | |
| 2405 | ASSERT(args.agbp != NULL); | 2418 | /* validate the extent size is legal now we have the agf locked */ |
| 2406 | ASSERT((args.agbno + len) <= | 2419 | if (args.agbno + len > |
| 2407 | be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)); | 2420 | be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) { |
| 2408 | #endif | 2421 | error = EFSCORRUPTED; |
| 2422 | goto error0; | ||
| 2423 | } | ||
| 2424 | |||
| 2409 | error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); | 2425 | error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); |
| 2410 | error0: | 2426 | error0: |
| 2411 | xfs_perag_put(args.pag); | 2427 | xfs_perag_put(args.pag); |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 46cc40131d4a..576fdfe81d60 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
| @@ -198,6 +198,41 @@ xfs_inode_item_size( | |||
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | /* | 200 | /* |
| 201 | * xfs_inode_item_format_extents - convert in-core extents to on-disk form | ||
| 202 | * | ||
| 203 | * For either the data or attr fork in extent format, we need to endian convert | ||
| 204 | * the in-core extent as we place them into the on-disk inode. In this case, we | ||
| 205 | * need to do this conversion before we write the extents into the log. Because | ||
| 206 | * we don't have the disk inode to write into here, we allocate a buffer and | ||
| 207 | * format the extents into it via xfs_iextents_copy(). We free the buffer in | ||
| 208 | * the unlock routine after the copy for the log has been made. | ||
| 209 | * | ||
| 210 | * In the case of the data fork, the in-core and on-disk fork sizes can be | ||
| 211 | * different due to delayed allocation extents. We only log on-disk extents | ||
| 212 | * here, so always use the physical fork size to determine the size of the | ||
| 213 | * buffer we need to allocate. | ||
| 214 | */ | ||
| 215 | STATIC void | ||
| 216 | xfs_inode_item_format_extents( | ||
| 217 | struct xfs_inode *ip, | ||
| 218 | struct xfs_log_iovec *vecp, | ||
| 219 | int whichfork, | ||
| 220 | int type) | ||
| 221 | { | ||
| 222 | xfs_bmbt_rec_t *ext_buffer; | ||
| 223 | |||
| 224 | ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP); | ||
| 225 | if (whichfork == XFS_DATA_FORK) | ||
| 226 | ip->i_itemp->ili_extents_buf = ext_buffer; | ||
| 227 | else | ||
| 228 | ip->i_itemp->ili_aextents_buf = ext_buffer; | ||
| 229 | |||
| 230 | vecp->i_addr = ext_buffer; | ||
| 231 | vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork); | ||
| 232 | vecp->i_type = type; | ||
| 233 | } | ||
| 234 | |||
| 235 | /* | ||
| 201 | * This is called to fill in the vector of log iovecs for the | 236 | * This is called to fill in the vector of log iovecs for the |
| 202 | * given inode log item. It fills the first item with an inode | 237 | * given inode log item. It fills the first item with an inode |
| 203 | * log format structure, the second with the on-disk inode structure, | 238 | * log format structure, the second with the on-disk inode structure, |
| @@ -213,7 +248,6 @@ xfs_inode_item_format( | |||
| 213 | struct xfs_inode *ip = iip->ili_inode; | 248 | struct xfs_inode *ip = iip->ili_inode; |
| 214 | uint nvecs; | 249 | uint nvecs; |
| 215 | size_t data_bytes; | 250 | size_t data_bytes; |
| 216 | xfs_bmbt_rec_t *ext_buffer; | ||
| 217 | xfs_mount_t *mp; | 251 | xfs_mount_t *mp; |
| 218 | 252 | ||
| 219 | vecp->i_addr = &iip->ili_format; | 253 | vecp->i_addr = &iip->ili_format; |
| @@ -320,22 +354,8 @@ xfs_inode_item_format( | |||
| 320 | } else | 354 | } else |
| 321 | #endif | 355 | #endif |
| 322 | { | 356 | { |
| 323 | /* | 357 | xfs_inode_item_format_extents(ip, vecp, |
| 324 | * There are delayed allocation extents | 358 | XFS_DATA_FORK, XLOG_REG_TYPE_IEXT); |
| 325 | * in the inode, or we need to convert | ||
| 326 | * the extents to on disk format. | ||
| 327 | * Use xfs_iextents_copy() | ||
| 328 | * to copy only the real extents into | ||
| 329 | * a separate buffer. We'll free the | ||
| 330 | * buffer in the unlock routine. | ||
| 331 | */ | ||
| 332 | ext_buffer = kmem_alloc(ip->i_df.if_bytes, | ||
| 333 | KM_SLEEP); | ||
| 334 | iip->ili_extents_buf = ext_buffer; | ||
| 335 | vecp->i_addr = ext_buffer; | ||
| 336 | vecp->i_len = xfs_iextents_copy(ip, ext_buffer, | ||
| 337 | XFS_DATA_FORK); | ||
| 338 | vecp->i_type = XLOG_REG_TYPE_IEXT; | ||
| 339 | } | 359 | } |
| 340 | ASSERT(vecp->i_len <= ip->i_df.if_bytes); | 360 | ASSERT(vecp->i_len <= ip->i_df.if_bytes); |
| 341 | iip->ili_format.ilf_dsize = vecp->i_len; | 361 | iip->ili_format.ilf_dsize = vecp->i_len; |
| @@ -445,19 +465,12 @@ xfs_inode_item_format( | |||
| 445 | */ | 465 | */ |
| 446 | vecp->i_addr = ip->i_afp->if_u1.if_extents; | 466 | vecp->i_addr = ip->i_afp->if_u1.if_extents; |
| 447 | vecp->i_len = ip->i_afp->if_bytes; | 467 | vecp->i_len = ip->i_afp->if_bytes; |
| 468 | vecp->i_type = XLOG_REG_TYPE_IATTR_EXT; | ||
| 448 | #else | 469 | #else |
| 449 | ASSERT(iip->ili_aextents_buf == NULL); | 470 | ASSERT(iip->ili_aextents_buf == NULL); |
| 450 | /* | 471 | xfs_inode_item_format_extents(ip, vecp, |
| 451 | * Need to endian flip before logging | 472 | XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT); |
| 452 | */ | ||
| 453 | ext_buffer = kmem_alloc(ip->i_afp->if_bytes, | ||
| 454 | KM_SLEEP); | ||
| 455 | iip->ili_aextents_buf = ext_buffer; | ||
| 456 | vecp->i_addr = ext_buffer; | ||
| 457 | vecp->i_len = xfs_iextents_copy(ip, ext_buffer, | ||
| 458 | XFS_ATTR_FORK); | ||
| 459 | #endif | 473 | #endif |
| 460 | vecp->i_type = XLOG_REG_TYPE_IATTR_EXT; | ||
| 461 | iip->ili_format.ilf_asize = vecp->i_len; | 474 | iip->ili_format.ilf_asize = vecp->i_len; |
| 462 | vecp++; | 475 | vecp++; |
| 463 | nvecs++; | 476 | nvecs++; |
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index dc1882adaf54..751e94fe1f77 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c | |||
| @@ -204,7 +204,6 @@ xfs_bulkstat( | |||
| 204 | xfs_agi_t *agi; /* agi header data */ | 204 | xfs_agi_t *agi; /* agi header data */ |
| 205 | xfs_agino_t agino; /* inode # in allocation group */ | 205 | xfs_agino_t agino; /* inode # in allocation group */ |
| 206 | xfs_agnumber_t agno; /* allocation group number */ | 206 | xfs_agnumber_t agno; /* allocation group number */ |
| 207 | xfs_daddr_t bno; /* inode cluster start daddr */ | ||
| 208 | int chunkidx; /* current index into inode chunk */ | 207 | int chunkidx; /* current index into inode chunk */ |
| 209 | int clustidx; /* current index into inode cluster */ | 208 | int clustidx; /* current index into inode cluster */ |
| 210 | xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ | 209 | xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ |
| @@ -463,7 +462,6 @@ xfs_bulkstat( | |||
| 463 | mp->m_sb.sb_inopblog); | 462 | mp->m_sb.sb_inopblog); |
| 464 | } | 463 | } |
| 465 | ino = XFS_AGINO_TO_INO(mp, agno, agino); | 464 | ino = XFS_AGINO_TO_INO(mp, agno, agino); |
| 466 | bno = XFS_AGB_TO_DADDR(mp, agno, agbno); | ||
| 467 | /* | 465 | /* |
| 468 | * Skip if this inode is free. | 466 | * Skip if this inode is free. |
| 469 | */ | 467 | */ |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 25efa9b8a602..b612ce4520ae 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
| @@ -761,7 +761,7 @@ xfs_log_need_covered(xfs_mount_t *mp) | |||
| 761 | break; | 761 | break; |
| 762 | case XLOG_STATE_COVER_NEED: | 762 | case XLOG_STATE_COVER_NEED: |
| 763 | case XLOG_STATE_COVER_NEED2: | 763 | case XLOG_STATE_COVER_NEED2: |
| 764 | if (!xfs_trans_ail_tail(log->l_ailp) && | 764 | if (!xfs_ail_min_lsn(log->l_ailp) && |
| 765 | xlog_iclogs_empty(log)) { | 765 | xlog_iclogs_empty(log)) { |
| 766 | if (log->l_covered_state == XLOG_STATE_COVER_NEED) | 766 | if (log->l_covered_state == XLOG_STATE_COVER_NEED) |
| 767 | log->l_covered_state = XLOG_STATE_COVER_DONE; | 767 | log->l_covered_state = XLOG_STATE_COVER_DONE; |
| @@ -801,7 +801,7 @@ xlog_assign_tail_lsn( | |||
| 801 | xfs_lsn_t tail_lsn; | 801 | xfs_lsn_t tail_lsn; |
| 802 | struct log *log = mp->m_log; | 802 | struct log *log = mp->m_log; |
| 803 | 803 | ||
| 804 | tail_lsn = xfs_trans_ail_tail(mp->m_ail); | 804 | tail_lsn = xfs_ail_min_lsn(mp->m_ail); |
| 805 | if (!tail_lsn) | 805 | if (!tail_lsn) |
| 806 | tail_lsn = atomic64_read(&log->l_last_sync_lsn); | 806 | tail_lsn = atomic64_read(&log->l_last_sync_lsn); |
| 807 | 807 | ||
| @@ -1239,7 +1239,7 @@ xlog_grant_push_ail( | |||
| 1239 | * the filesystem is shutting down. | 1239 | * the filesystem is shutting down. |
| 1240 | */ | 1240 | */ |
| 1241 | if (!XLOG_FORCED_SHUTDOWN(log)) | 1241 | if (!XLOG_FORCED_SHUTDOWN(log)) |
| 1242 | xfs_trans_ail_push(log->l_ailp, threshold_lsn); | 1242 | xfs_ail_push(log->l_ailp, threshold_lsn); |
| 1243 | } | 1243 | } |
| 1244 | 1244 | ||
| 1245 | /* | 1245 | /* |
| @@ -3407,6 +3407,17 @@ xlog_verify_dest_ptr( | |||
| 3407 | xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); | 3407 | xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); |
| 3408 | } | 3408 | } |
| 3409 | 3409 | ||
| 3410 | /* | ||
| 3411 | * Check to make sure the grant write head didn't just over lap the tail. If | ||
| 3412 | * the cycles are the same, we can't be overlapping. Otherwise, make sure that | ||
| 3413 | * the cycles differ by exactly one and check the byte count. | ||
| 3414 | * | ||
| 3415 | * This check is run unlocked, so can give false positives. Rather than assert | ||
| 3416 | * on failures, use a warn-once flag and a panic tag to allow the admin to | ||
| 3417 | * determine if they want to panic the machine when such an error occurs. For | ||
| 3418 | * debug kernels this will have the same effect as using an assert but, unlinke | ||
| 3419 | * an assert, it can be turned off at runtime. | ||
| 3420 | */ | ||
| 3410 | STATIC void | 3421 | STATIC void |
| 3411 | xlog_verify_grant_tail( | 3422 | xlog_verify_grant_tail( |
| 3412 | struct log *log) | 3423 | struct log *log) |
| @@ -3414,17 +3425,22 @@ xlog_verify_grant_tail( | |||
| 3414 | int tail_cycle, tail_blocks; | 3425 | int tail_cycle, tail_blocks; |
| 3415 | int cycle, space; | 3426 | int cycle, space; |
| 3416 | 3427 | ||
| 3417 | /* | ||
| 3418 | * Check to make sure the grant write head didn't just over lap the | ||
| 3419 | * tail. If the cycles are the same, we can't be overlapping. | ||
| 3420 | * Otherwise, make sure that the cycles differ by exactly one and | ||
| 3421 | * check the byte count. | ||
| 3422 | */ | ||
| 3423 | xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space); | 3428 | xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space); |
| 3424 | xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); | 3429 | xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); |
| 3425 | if (tail_cycle != cycle) { | 3430 | if (tail_cycle != cycle) { |
| 3426 | ASSERT(cycle - 1 == tail_cycle); | 3431 | if (cycle - 1 != tail_cycle && |
| 3427 | ASSERT(space <= BBTOB(tail_blocks)); | 3432 | !(log->l_flags & XLOG_TAIL_WARN)) { |
| 3433 | xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, | ||
| 3434 | "%s: cycle - 1 != tail_cycle", __func__); | ||
| 3435 | log->l_flags |= XLOG_TAIL_WARN; | ||
| 3436 | } | ||
| 3437 | |||
| 3438 | if (space > BBTOB(tail_blocks) && | ||
| 3439 | !(log->l_flags & XLOG_TAIL_WARN)) { | ||
| 3440 | xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, | ||
| 3441 | "%s: space > BBTOB(tail_blocks)", __func__); | ||
| 3442 | log->l_flags |= XLOG_TAIL_WARN; | ||
| 3443 | } | ||
| 3428 | } | 3444 | } |
| 3429 | } | 3445 | } |
| 3430 | 3446 | ||
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index ffae692c9832..5864850e9e34 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
| @@ -144,6 +144,7 @@ static inline uint xlog_get_client_id(__be32 i) | |||
| 144 | #define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */ | 144 | #define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */ |
| 145 | #define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being | 145 | #define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being |
| 146 | shutdown */ | 146 | shutdown */ |
| 147 | #define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */ | ||
| 147 | 148 | ||
| 148 | #ifdef __KERNEL__ | 149 | #ifdef __KERNEL__ |
| 149 | /* | 150 | /* |
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index a62e8971539d..19af0ab0d0c6 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
| @@ -203,12 +203,9 @@ typedef struct xfs_mount { | |||
| 203 | struct mutex m_icsb_mutex; /* balancer sync lock */ | 203 | struct mutex m_icsb_mutex; /* balancer sync lock */ |
| 204 | #endif | 204 | #endif |
| 205 | struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ | 205 | struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ |
| 206 | struct task_struct *m_sync_task; /* generalised sync thread */ | 206 | struct delayed_work m_sync_work; /* background sync work */ |
| 207 | xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */ | 207 | struct delayed_work m_reclaim_work; /* background inode reclaim */ |
| 208 | struct list_head m_sync_list; /* sync thread work item list */ | 208 | struct work_struct m_flush_work; /* background inode flush */ |
| 209 | spinlock_t m_sync_lock; /* work item list lock */ | ||
| 210 | int m_sync_seq; /* sync thread generation no. */ | ||
| 211 | wait_queue_head_t m_wait_single_sync_task; | ||
| 212 | __int64_t m_update_flags; /* sb flags we need to update | 209 | __int64_t m_update_flags; /* sb flags we need to update |
| 213 | on the next remount,rw */ | 210 | on the next remount,rw */ |
| 214 | struct shrinker m_inode_shrink; /* inode reclaim shrinker */ | 211 | struct shrinker m_inode_shrink; /* inode reclaim shrinker */ |
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 12aff9584e29..acdb92f14d51 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
| @@ -28,74 +28,138 @@ | |||
| 28 | #include "xfs_trans_priv.h" | 28 | #include "xfs_trans_priv.h" |
| 29 | #include "xfs_error.h" | 29 | #include "xfs_error.h" |
| 30 | 30 | ||
| 31 | STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t); | 31 | struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ |
| 32 | STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); | ||
| 33 | STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); | ||
| 34 | STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *); | ||
| 35 | 32 | ||
| 36 | #ifdef DEBUG | 33 | #ifdef DEBUG |
| 37 | STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *); | 34 | /* |
| 38 | #else | 35 | * Check that the list is sorted as it should be. |
| 36 | */ | ||
| 37 | STATIC void | ||
| 38 | xfs_ail_check( | ||
| 39 | struct xfs_ail *ailp, | ||
| 40 | xfs_log_item_t *lip) | ||
| 41 | { | ||
| 42 | xfs_log_item_t *prev_lip; | ||
| 43 | |||
| 44 | if (list_empty(&ailp->xa_ail)) | ||
| 45 | return; | ||
| 46 | |||
| 47 | /* | ||
| 48 | * Check the next and previous entries are valid. | ||
| 49 | */ | ||
| 50 | ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); | ||
| 51 | prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail); | ||
| 52 | if (&prev_lip->li_ail != &ailp->xa_ail) | ||
| 53 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); | ||
| 54 | |||
| 55 | prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail); | ||
| 56 | if (&prev_lip->li_ail != &ailp->xa_ail) | ||
| 57 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); | ||
| 58 | |||
| 59 | |||
| 60 | #ifdef XFS_TRANS_DEBUG | ||
| 61 | /* | ||
| 62 | * Walk the list checking lsn ordering, and that every entry has the | ||
| 63 | * XFS_LI_IN_AIL flag set. This is really expensive, so only do it | ||
| 64 | * when specifically debugging the transaction subsystem. | ||
| 65 | */ | ||
| 66 | prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); | ||
| 67 | list_for_each_entry(lip, &ailp->xa_ail, li_ail) { | ||
| 68 | if (&prev_lip->li_ail != &ailp->xa_ail) | ||
| 69 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); | ||
| 70 | ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); | ||
| 71 | prev_lip = lip; | ||
| 72 | } | ||
| 73 | #endif /* XFS_TRANS_DEBUG */ | ||
| 74 | } | ||
| 75 | #else /* !DEBUG */ | ||
| 39 | #define xfs_ail_check(a,l) | 76 | #define xfs_ail_check(a,l) |
| 40 | #endif /* DEBUG */ | 77 | #endif /* DEBUG */ |
| 41 | 78 | ||
| 79 | /* | ||
| 80 | * Return a pointer to the first item in the AIL. If the AIL is empty, then | ||
| 81 | * return NULL. | ||
| 82 | */ | ||
| 83 | static xfs_log_item_t * | ||
| 84 | xfs_ail_min( | ||
| 85 | struct xfs_ail *ailp) | ||
| 86 | { | ||
| 87 | if (list_empty(&ailp->xa_ail)) | ||
| 88 | return NULL; | ||
| 89 | |||
| 90 | return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); | ||
| 91 | } | ||
| 92 | |||
| 93 | /* | ||
| 94 | * Return a pointer to the last item in the AIL. If the AIL is empty, then | ||
| 95 | * return NULL. | ||
| 96 | */ | ||
| 97 | static xfs_log_item_t * | ||
| 98 | xfs_ail_max( | ||
| 99 | struct xfs_ail *ailp) | ||
| 100 | { | ||
| 101 | if (list_empty(&ailp->xa_ail)) | ||
| 102 | return NULL; | ||
| 103 | |||
| 104 | return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail); | ||
| 105 | } | ||
| 106 | |||
| 107 | /* | ||
| 108 | * Return a pointer to the item which follows the given item in the AIL. If | ||
| 109 | * the given item is the last item in the list, then return NULL. | ||
| 110 | */ | ||
| 111 | static xfs_log_item_t * | ||
| 112 | xfs_ail_next( | ||
| 113 | struct xfs_ail *ailp, | ||
| 114 | xfs_log_item_t *lip) | ||
| 115 | { | ||
| 116 | if (lip->li_ail.next == &ailp->xa_ail) | ||
| 117 | return NULL; | ||
| 118 | |||
| 119 | return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail); | ||
| 120 | } | ||
| 42 | 121 | ||
| 43 | /* | 122 | /* |
| 44 | * This is called by the log manager code to determine the LSN | 123 | * This is called by the log manager code to determine the LSN of the tail of |
| 45 | * of the tail of the log. This is exactly the LSN of the first | 124 | * the log. This is exactly the LSN of the first item in the AIL. If the AIL |
| 46 | * item in the AIL. If the AIL is empty, then this function | 125 | * is empty, then this function returns 0. |
| 47 | * returns 0. | ||
| 48 | * | 126 | * |
| 49 | * We need the AIL lock in order to get a coherent read of the | 127 | * We need the AIL lock in order to get a coherent read of the lsn of the last |
| 50 | * lsn of the last item in the AIL. | 128 | * item in the AIL. |
| 51 | */ | 129 | */ |
| 52 | xfs_lsn_t | 130 | xfs_lsn_t |
| 53 | xfs_trans_ail_tail( | 131 | xfs_ail_min_lsn( |
| 54 | struct xfs_ail *ailp) | 132 | struct xfs_ail *ailp) |
| 55 | { | 133 | { |
| 56 | xfs_lsn_t lsn; | 134 | xfs_lsn_t lsn = 0; |
| 57 | xfs_log_item_t *lip; | 135 | xfs_log_item_t *lip; |
| 58 | 136 | ||
| 59 | spin_lock(&ailp->xa_lock); | 137 | spin_lock(&ailp->xa_lock); |
| 60 | lip = xfs_ail_min(ailp); | 138 | lip = xfs_ail_min(ailp); |
| 61 | if (lip == NULL) { | 139 | if (lip) |
| 62 | lsn = (xfs_lsn_t)0; | ||
| 63 | } else { | ||
| 64 | lsn = lip->li_lsn; | 140 | lsn = lip->li_lsn; |
| 65 | } | ||
| 66 | spin_unlock(&ailp->xa_lock); | 141 | spin_unlock(&ailp->xa_lock); |
| 67 | 142 | ||
| 68 | return lsn; | 143 | return lsn; |
| 69 | } | 144 | } |
| 70 | 145 | ||
| 71 | /* | 146 | /* |
| 72 | * xfs_trans_push_ail | 147 | * Return the maximum lsn held in the AIL, or zero if the AIL is empty. |
| 73 | * | ||
| 74 | * This routine is called to move the tail of the AIL forward. It does this by | ||
| 75 | * trying to flush items in the AIL whose lsns are below the given | ||
| 76 | * threshold_lsn. | ||
| 77 | * | ||
| 78 | * the push is run asynchronously in a separate thread, so we return the tail | ||
| 79 | * of the log right now instead of the tail after the push. This means we will | ||
| 80 | * either continue right away, or we will sleep waiting on the async thread to | ||
| 81 | * do its work. | ||
| 82 | * | ||
| 83 | * We do this unlocked - we only need to know whether there is anything in the | ||
| 84 | * AIL at the time we are called. We don't need to access the contents of | ||
| 85 | * any of the objects, so the lock is not needed. | ||
| 86 | */ | 148 | */ |
| 87 | void | 149 | static xfs_lsn_t |
| 88 | xfs_trans_ail_push( | 150 | xfs_ail_max_lsn( |
| 89 | struct xfs_ail *ailp, | 151 | struct xfs_ail *ailp) |
| 90 | xfs_lsn_t threshold_lsn) | ||
| 91 | { | 152 | { |
| 92 | xfs_log_item_t *lip; | 153 | xfs_lsn_t lsn = 0; |
| 154 | xfs_log_item_t *lip; | ||
| 93 | 155 | ||
| 94 | lip = xfs_ail_min(ailp); | 156 | spin_lock(&ailp->xa_lock); |
| 95 | if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { | 157 | lip = xfs_ail_max(ailp); |
| 96 | if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) | 158 | if (lip) |
| 97 | xfsaild_wakeup(ailp, threshold_lsn); | 159 | lsn = lip->li_lsn; |
| 98 | } | 160 | spin_unlock(&ailp->xa_lock); |
| 161 | |||
| 162 | return lsn; | ||
| 99 | } | 163 | } |
| 100 | 164 | ||
| 101 | /* | 165 | /* |
| @@ -236,16 +300,57 @@ out: | |||
| 236 | } | 300 | } |
| 237 | 301 | ||
| 238 | /* | 302 | /* |
| 239 | * xfsaild_push does the work of pushing on the AIL. Returning a timeout of | 303 | * splice the log item list into the AIL at the given LSN. |
| 240 | * zero indicates that the caller should sleep until woken. | ||
| 241 | */ | 304 | */ |
| 242 | long | 305 | static void |
| 243 | xfsaild_push( | 306 | xfs_ail_splice( |
| 244 | struct xfs_ail *ailp, | 307 | struct xfs_ail *ailp, |
| 245 | xfs_lsn_t *last_lsn) | 308 | struct list_head *list, |
| 309 | xfs_lsn_t lsn) | ||
| 246 | { | 310 | { |
| 247 | long tout = 0; | 311 | xfs_log_item_t *next_lip; |
| 248 | xfs_lsn_t last_pushed_lsn = *last_lsn; | 312 | |
| 313 | /* If the list is empty, just insert the item. */ | ||
| 314 | if (list_empty(&ailp->xa_ail)) { | ||
| 315 | list_splice(list, &ailp->xa_ail); | ||
| 316 | return; | ||
| 317 | } | ||
| 318 | |||
| 319 | list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { | ||
| 320 | if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0) | ||
| 321 | break; | ||
| 322 | } | ||
| 323 | |||
| 324 | ASSERT(&next_lip->li_ail == &ailp->xa_ail || | ||
| 325 | XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0); | ||
| 326 | |||
| 327 | list_splice_init(list, &next_lip->li_ail); | ||
| 328 | } | ||
| 329 | |||
| 330 | /* | ||
| 331 | * Delete the given item from the AIL. Return a pointer to the item. | ||
| 332 | */ | ||
| 333 | static void | ||
| 334 | xfs_ail_delete( | ||
| 335 | struct xfs_ail *ailp, | ||
| 336 | xfs_log_item_t *lip) | ||
| 337 | { | ||
| 338 | xfs_ail_check(ailp, lip); | ||
| 339 | list_del(&lip->li_ail); | ||
| 340 | xfs_trans_ail_cursor_clear(ailp, lip); | ||
| 341 | } | ||
| 342 | |||
| 343 | /* | ||
| 344 | * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself | ||
| 345 | * to run at a later time if there is more work to do to complete the push. | ||
| 346 | */ | ||
| 347 | STATIC void | ||
| 348 | xfs_ail_worker( | ||
| 349 | struct work_struct *work) | ||
| 350 | { | ||
| 351 | struct xfs_ail *ailp = container_of(to_delayed_work(work), | ||
| 352 | struct xfs_ail, xa_work); | ||
| 353 | long tout; | ||
| 249 | xfs_lsn_t target = ailp->xa_target; | 354 | xfs_lsn_t target = ailp->xa_target; |
| 250 | xfs_lsn_t lsn; | 355 | xfs_lsn_t lsn; |
| 251 | xfs_log_item_t *lip; | 356 | xfs_log_item_t *lip; |
| @@ -256,15 +361,15 @@ xfsaild_push( | |||
| 256 | 361 | ||
| 257 | spin_lock(&ailp->xa_lock); | 362 | spin_lock(&ailp->xa_lock); |
| 258 | xfs_trans_ail_cursor_init(ailp, cur); | 363 | xfs_trans_ail_cursor_init(ailp, cur); |
| 259 | lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn); | 364 | lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); |
| 260 | if (!lip || XFS_FORCED_SHUTDOWN(mp)) { | 365 | if (!lip || XFS_FORCED_SHUTDOWN(mp)) { |
| 261 | /* | 366 | /* |
| 262 | * AIL is empty or our push has reached the end. | 367 | * AIL is empty or our push has reached the end. |
| 263 | */ | 368 | */ |
| 264 | xfs_trans_ail_cursor_done(ailp, cur); | 369 | xfs_trans_ail_cursor_done(ailp, cur); |
| 265 | spin_unlock(&ailp->xa_lock); | 370 | spin_unlock(&ailp->xa_lock); |
| 266 | *last_lsn = 0; | 371 | ailp->xa_last_pushed_lsn = 0; |
| 267 | return tout; | 372 | return; |
| 268 | } | 373 | } |
| 269 | 374 | ||
| 270 | XFS_STATS_INC(xs_push_ail); | 375 | XFS_STATS_INC(xs_push_ail); |
| @@ -301,13 +406,13 @@ xfsaild_push( | |||
| 301 | case XFS_ITEM_SUCCESS: | 406 | case XFS_ITEM_SUCCESS: |
| 302 | XFS_STATS_INC(xs_push_ail_success); | 407 | XFS_STATS_INC(xs_push_ail_success); |
| 303 | IOP_PUSH(lip); | 408 | IOP_PUSH(lip); |
| 304 | last_pushed_lsn = lsn; | 409 | ailp->xa_last_pushed_lsn = lsn; |
| 305 | break; | 410 | break; |
| 306 | 411 | ||
| 307 | case XFS_ITEM_PUSHBUF: | 412 | case XFS_ITEM_PUSHBUF: |
| 308 | XFS_STATS_INC(xs_push_ail_pushbuf); | 413 | XFS_STATS_INC(xs_push_ail_pushbuf); |
| 309 | IOP_PUSHBUF(lip); | 414 | IOP_PUSHBUF(lip); |
| 310 | last_pushed_lsn = lsn; | 415 | ailp->xa_last_pushed_lsn = lsn; |
| 311 | push_xfsbufd = 1; | 416 | push_xfsbufd = 1; |
| 312 | break; | 417 | break; |
| 313 | 418 | ||
| @@ -319,7 +424,7 @@ xfsaild_push( | |||
| 319 | 424 | ||
| 320 | case XFS_ITEM_LOCKED: | 425 | case XFS_ITEM_LOCKED: |
| 321 | XFS_STATS_INC(xs_push_ail_locked); | 426 | XFS_STATS_INC(xs_push_ail_locked); |
| 322 | last_pushed_lsn = lsn; | 427 | ailp->xa_last_pushed_lsn = lsn; |
| 323 | stuck++; | 428 | stuck++; |
| 324 | break; | 429 | break; |
| 325 | 430 | ||
| @@ -374,9 +479,23 @@ xfsaild_push( | |||
| 374 | wake_up_process(mp->m_ddev_targp->bt_task); | 479 | wake_up_process(mp->m_ddev_targp->bt_task); |
| 375 | } | 480 | } |
| 376 | 481 | ||
| 482 | /* assume we have more work to do in a short while */ | ||
| 483 | tout = 10; | ||
| 377 | if (!count) { | 484 | if (!count) { |
| 378 | /* We're past our target or empty, so idle */ | 485 | /* We're past our target or empty, so idle */ |
| 379 | last_pushed_lsn = 0; | 486 | ailp->xa_last_pushed_lsn = 0; |
| 487 | |||
| 488 | /* | ||
| 489 | * Check for an updated push target before clearing the | ||
| 490 | * XFS_AIL_PUSHING_BIT. If the target changed, we've got more | ||
| 491 | * work to do. Wait a bit longer before starting that work. | ||
| 492 | */ | ||
| 493 | smp_rmb(); | ||
| 494 | if (ailp->xa_target == target) { | ||
| 495 | clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); | ||
| 496 | return; | ||
| 497 | } | ||
| 498 | tout = 50; | ||
| 380 | } else if (XFS_LSN_CMP(lsn, target) >= 0) { | 499 | } else if (XFS_LSN_CMP(lsn, target) >= 0) { |
| 381 | /* | 500 | /* |
| 382 | * We reached the target so wait a bit longer for I/O to | 501 | * We reached the target so wait a bit longer for I/O to |
| @@ -384,7 +503,7 @@ xfsaild_push( | |||
| 384 | * start the next scan from the start of the AIL. | 503 | * start the next scan from the start of the AIL. |
| 385 | */ | 504 | */ |
| 386 | tout = 50; | 505 | tout = 50; |
| 387 | last_pushed_lsn = 0; | 506 | ailp->xa_last_pushed_lsn = 0; |
| 388 | } else if ((stuck * 100) / count > 90) { | 507 | } else if ((stuck * 100) / count > 90) { |
| 389 | /* | 508 | /* |
| 390 | * Either there is a lot of contention on the AIL or we | 509 | * Either there is a lot of contention on the AIL or we |
| @@ -396,14 +515,61 @@ xfsaild_push( | |||
| 396 | * continuing from where we were. | 515 | * continuing from where we were. |
| 397 | */ | 516 | */ |
| 398 | tout = 20; | 517 | tout = 20; |
| 399 | } else { | ||
| 400 | /* more to do, but wait a short while before continuing */ | ||
| 401 | tout = 10; | ||
| 402 | } | 518 | } |
| 403 | *last_lsn = last_pushed_lsn; | 519 | |
| 404 | return tout; | 520 | /* There is more to do, requeue us. */ |
| 521 | queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, | ||
| 522 | msecs_to_jiffies(tout)); | ||
| 523 | } | ||
| 524 | |||
| 525 | /* | ||
| 526 | * This routine is called to move the tail of the AIL forward. It does this by | ||
| 527 | * trying to flush items in the AIL whose lsns are below the given | ||
| 528 | * threshold_lsn. | ||
| 529 | * | ||
| 530 | * The push is run asynchronously in a workqueue, which means the caller needs | ||
| 531 | * to handle waiting on the async flush for space to become available. | ||
| 532 | * We don't want to interrupt any push that is in progress, hence we only queue | ||
| 533 | * work if we set the pushing bit approriately. | ||
| 534 | * | ||
| 535 | * We do this unlocked - we only need to know whether there is anything in the | ||
| 536 | * AIL at the time we are called. We don't need to access the contents of | ||
| 537 | * any of the objects, so the lock is not needed. | ||
| 538 | */ | ||
| 539 | void | ||
| 540 | xfs_ail_push( | ||
| 541 | struct xfs_ail *ailp, | ||
| 542 | xfs_lsn_t threshold_lsn) | ||
| 543 | { | ||
| 544 | xfs_log_item_t *lip; | ||
| 545 | |||
| 546 | lip = xfs_ail_min(ailp); | ||
| 547 | if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) || | ||
| 548 | XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0) | ||
| 549 | return; | ||
| 550 | |||
| 551 | /* | ||
| 552 | * Ensure that the new target is noticed in push code before it clears | ||
| 553 | * the XFS_AIL_PUSHING_BIT. | ||
| 554 | */ | ||
| 555 | smp_wmb(); | ||
| 556 | ailp->xa_target = threshold_lsn; | ||
| 557 | if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) | ||
| 558 | queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); | ||
| 405 | } | 559 | } |
| 406 | 560 | ||
| 561 | /* | ||
| 562 | * Push out all items in the AIL immediately | ||
| 563 | */ | ||
| 564 | void | ||
| 565 | xfs_ail_push_all( | ||
| 566 | struct xfs_ail *ailp) | ||
| 567 | { | ||
| 568 | xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp); | ||
| 569 | |||
| 570 | if (threshold_lsn) | ||
| 571 | xfs_ail_push(ailp, threshold_lsn); | ||
| 572 | } | ||
| 407 | 573 | ||
| 408 | /* | 574 | /* |
| 409 | * This is to be called when an item is unlocked that may have | 575 | * This is to be called when an item is unlocked that may have |
| @@ -615,7 +781,6 @@ xfs_trans_ail_init( | |||
| 615 | xfs_mount_t *mp) | 781 | xfs_mount_t *mp) |
| 616 | { | 782 | { |
| 617 | struct xfs_ail *ailp; | 783 | struct xfs_ail *ailp; |
| 618 | int error; | ||
| 619 | 784 | ||
| 620 | ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); | 785 | ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); |
| 621 | if (!ailp) | 786 | if (!ailp) |
| @@ -624,15 +789,9 @@ xfs_trans_ail_init( | |||
| 624 | ailp->xa_mount = mp; | 789 | ailp->xa_mount = mp; |
| 625 | INIT_LIST_HEAD(&ailp->xa_ail); | 790 | INIT_LIST_HEAD(&ailp->xa_ail); |
| 626 | spin_lock_init(&ailp->xa_lock); | 791 | spin_lock_init(&ailp->xa_lock); |
| 627 | error = xfsaild_start(ailp); | 792 | INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); |
| 628 | if (error) | ||
| 629 | goto out_free_ailp; | ||
| 630 | mp->m_ail = ailp; | 793 | mp->m_ail = ailp; |
| 631 | return 0; | 794 | return 0; |
| 632 | |||
| 633 | out_free_ailp: | ||
| 634 | kmem_free(ailp); | ||
| 635 | return error; | ||
| 636 | } | 795 | } |
| 637 | 796 | ||
| 638 | void | 797 | void |
| @@ -641,124 +800,6 @@ xfs_trans_ail_destroy( | |||
| 641 | { | 800 | { |
| 642 | struct xfs_ail *ailp = mp->m_ail; | 801 | struct xfs_ail *ailp = mp->m_ail; |
| 643 | 802 | ||
| 644 | xfsaild_stop(ailp); | 803 | cancel_delayed_work_sync(&ailp->xa_work); |
| 645 | kmem_free(ailp); | 804 | kmem_free(ailp); |
| 646 | } | 805 | } |
| 647 | |||
| 648 | /* | ||
| 649 | * splice the log item list into the AIL at the given LSN. | ||
| 650 | */ | ||
| 651 | STATIC void | ||
| 652 | xfs_ail_splice( | ||
| 653 | struct xfs_ail *ailp, | ||
| 654 | struct list_head *list, | ||
| 655 | xfs_lsn_t lsn) | ||
| 656 | { | ||
| 657 | xfs_log_item_t *next_lip; | ||
| 658 | |||
| 659 | /* | ||
| 660 | * If the list is empty, just insert the item. | ||
| 661 | */ | ||
| 662 | if (list_empty(&ailp->xa_ail)) { | ||
| 663 | list_splice(list, &ailp->xa_ail); | ||
| 664 | return; | ||
| 665 | } | ||
| 666 | |||
| 667 | list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { | ||
| 668 | if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0) | ||
| 669 | break; | ||
| 670 | } | ||
| 671 | |||
| 672 | ASSERT((&next_lip->li_ail == &ailp->xa_ail) || | ||
| 673 | (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)); | ||
| 674 | |||
| 675 | list_splice_init(list, &next_lip->li_ail); | ||
| 676 | return; | ||
| 677 | } | ||
| 678 | |||
| 679 | /* | ||
| 680 | * Delete the given item from the AIL. Return a pointer to the item. | ||
| 681 | */ | ||
| 682 | STATIC void | ||
| 683 | xfs_ail_delete( | ||
| 684 | struct xfs_ail *ailp, | ||
| 685 | xfs_log_item_t *lip) | ||
| 686 | { | ||
| 687 | xfs_ail_check(ailp, lip); | ||
| 688 | list_del(&lip->li_ail); | ||
| 689 | xfs_trans_ail_cursor_clear(ailp, lip); | ||
| 690 | } | ||
| 691 | |||
| 692 | /* | ||
| 693 | * Return a pointer to the first item in the AIL. | ||
| 694 | * If the AIL is empty, then return NULL. | ||
| 695 | */ | ||
| 696 | STATIC xfs_log_item_t * | ||
| 697 | xfs_ail_min( | ||
| 698 | struct xfs_ail *ailp) | ||
| 699 | { | ||
| 700 | if (list_empty(&ailp->xa_ail)) | ||
| 701 | return NULL; | ||
| 702 | |||
| 703 | return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); | ||
| 704 | } | ||
| 705 | |||
| 706 | /* | ||
| 707 | * Return a pointer to the item which follows | ||
| 708 | * the given item in the AIL. If the given item | ||
| 709 | * is the last item in the list, then return NULL. | ||
| 710 | */ | ||
| 711 | STATIC xfs_log_item_t * | ||
| 712 | xfs_ail_next( | ||
| 713 | struct xfs_ail *ailp, | ||
| 714 | xfs_log_item_t *lip) | ||
| 715 | { | ||
| 716 | if (lip->li_ail.next == &ailp->xa_ail) | ||
| 717 | return NULL; | ||
| 718 | |||
| 719 | return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail); | ||
| 720 | } | ||
| 721 | |||
| 722 | #ifdef DEBUG | ||
| 723 | /* | ||
| 724 | * Check that the list is sorted as it should be. | ||
| 725 | */ | ||
| 726 | STATIC void | ||
| 727 | xfs_ail_check( | ||
| 728 | struct xfs_ail *ailp, | ||
| 729 | xfs_log_item_t *lip) | ||
| 730 | { | ||
| 731 | xfs_log_item_t *prev_lip; | ||
| 732 | |||
| 733 | if (list_empty(&ailp->xa_ail)) | ||
| 734 | return; | ||
| 735 | |||
| 736 | /* | ||
| 737 | * Check the next and previous entries are valid. | ||
| 738 | */ | ||
| 739 | ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); | ||
| 740 | prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail); | ||
| 741 | if (&prev_lip->li_ail != &ailp->xa_ail) | ||
| 742 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); | ||
| 743 | |||
| 744 | prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail); | ||
| 745 | if (&prev_lip->li_ail != &ailp->xa_ail) | ||
| 746 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); | ||
| 747 | |||
| 748 | |||
| 749 | #ifdef XFS_TRANS_DEBUG | ||
| 750 | /* | ||
| 751 | * Walk the list checking lsn ordering, and that every entry has the | ||
| 752 | * XFS_LI_IN_AIL flag set. This is really expensive, so only do it | ||
| 753 | * when specifically debugging the transaction subsystem. | ||
| 754 | */ | ||
| 755 | prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); | ||
| 756 | list_for_each_entry(lip, &ailp->xa_ail, li_ail) { | ||
| 757 | if (&prev_lip->li_ail != &ailp->xa_ail) | ||
| 758 | ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); | ||
| 759 | ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); | ||
| 760 | prev_lip = lip; | ||
| 761 | } | ||
| 762 | #endif /* XFS_TRANS_DEBUG */ | ||
| 763 | } | ||
| 764 | #endif /* DEBUG */ | ||
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 35162c238fa3..6b164e9e9a1f 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h | |||
| @@ -65,16 +65,22 @@ struct xfs_ail_cursor { | |||
| 65 | struct xfs_ail { | 65 | struct xfs_ail { |
| 66 | struct xfs_mount *xa_mount; | 66 | struct xfs_mount *xa_mount; |
| 67 | struct list_head xa_ail; | 67 | struct list_head xa_ail; |
| 68 | uint xa_gen; | ||
| 69 | struct task_struct *xa_task; | ||
| 70 | xfs_lsn_t xa_target; | 68 | xfs_lsn_t xa_target; |
| 71 | struct xfs_ail_cursor xa_cursors; | 69 | struct xfs_ail_cursor xa_cursors; |
| 72 | spinlock_t xa_lock; | 70 | spinlock_t xa_lock; |
| 71 | struct delayed_work xa_work; | ||
| 72 | xfs_lsn_t xa_last_pushed_lsn; | ||
| 73 | unsigned long xa_flags; | ||
| 73 | }; | 74 | }; |
| 74 | 75 | ||
| 76 | #define XFS_AIL_PUSHING_BIT 0 | ||
| 77 | |||
| 75 | /* | 78 | /* |
| 76 | * From xfs_trans_ail.c | 79 | * From xfs_trans_ail.c |
| 77 | */ | 80 | */ |
| 81 | |||
| 82 | extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ | ||
| 83 | |||
| 78 | void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, | 84 | void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, |
| 79 | struct xfs_log_item **log_items, int nr_items, | 85 | struct xfs_log_item **log_items, int nr_items, |
| 80 | xfs_lsn_t lsn) __releases(ailp->xa_lock); | 86 | xfs_lsn_t lsn) __releases(ailp->xa_lock); |
| @@ -98,12 +104,13 @@ xfs_trans_ail_delete( | |||
| 98 | xfs_trans_ail_delete_bulk(ailp, &lip, 1); | 104 | xfs_trans_ail_delete_bulk(ailp, &lip, 1); |
| 99 | } | 105 | } |
| 100 | 106 | ||
| 101 | void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t); | 107 | void xfs_ail_push(struct xfs_ail *, xfs_lsn_t); |
| 108 | void xfs_ail_push_all(struct xfs_ail *); | ||
| 109 | xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp); | ||
| 110 | |||
| 102 | void xfs_trans_unlocked_item(struct xfs_ail *, | 111 | void xfs_trans_unlocked_item(struct xfs_ail *, |
| 103 | xfs_log_item_t *); | 112 | xfs_log_item_t *); |
| 104 | 113 | ||
| 105 | xfs_lsn_t xfs_trans_ail_tail(struct xfs_ail *ailp); | ||
| 106 | |||
| 107 | struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp, | 114 | struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp, |
| 108 | struct xfs_ail_cursor *cur, | 115 | struct xfs_ail_cursor *cur, |
| 109 | xfs_lsn_t lsn); | 116 | xfs_lsn_t lsn); |
| @@ -112,11 +119,6 @@ struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp, | |||
| 112 | void xfs_trans_ail_cursor_done(struct xfs_ail *ailp, | 119 | void xfs_trans_ail_cursor_done(struct xfs_ail *ailp, |
| 113 | struct xfs_ail_cursor *cur); | 120 | struct xfs_ail_cursor *cur); |
| 114 | 121 | ||
| 115 | long xfsaild_push(struct xfs_ail *, xfs_lsn_t *); | ||
| 116 | void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t); | ||
| 117 | int xfsaild_start(struct xfs_ail *); | ||
| 118 | void xfsaild_stop(struct xfs_ail *); | ||
| 119 | |||
| 120 | #if BITS_PER_LONG != 64 | 122 | #if BITS_PER_LONG != 64 |
| 121 | static inline void | 123 | static inline void |
| 122 | xfs_trans_ail_copy_lsn( | 124 | xfs_trans_ail_copy_lsn( |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 32176cc8e715..1c76506fcf11 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -196,6 +196,7 @@ typedef void (request_fn_proc) (struct request_queue *q); | |||
| 196 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 196 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
| 197 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 197 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
| 198 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 198 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
| 199 | typedef void (unplugged_fn) (struct request_queue *); | ||
| 199 | 200 | ||
| 200 | struct bio_vec; | 201 | struct bio_vec; |
| 201 | struct bvec_merge_data { | 202 | struct bvec_merge_data { |
| @@ -283,6 +284,7 @@ struct request_queue | |||
| 283 | rq_timed_out_fn *rq_timed_out_fn; | 284 | rq_timed_out_fn *rq_timed_out_fn; |
| 284 | dma_drain_needed_fn *dma_drain_needed; | 285 | dma_drain_needed_fn *dma_drain_needed; |
| 285 | lld_busy_fn *lld_busy_fn; | 286 | lld_busy_fn *lld_busy_fn; |
| 287 | unplugged_fn *unplugged_fn; | ||
| 286 | 288 | ||
| 287 | /* | 289 | /* |
| 288 | * Dispatch queue sorting | 290 | * Dispatch queue sorting |
| @@ -841,6 +843,7 @@ extern void blk_queue_dma_alignment(struct request_queue *, int); | |||
| 841 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 843 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
| 842 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 844 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
| 843 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 845 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
| 846 | extern void blk_queue_unplugged(struct request_queue *, unplugged_fn *); | ||
| 844 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 847 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
| 845 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); | 848 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); |
| 846 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 849 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
| @@ -862,14 +865,14 @@ struct blk_plug { | |||
| 862 | 865 | ||
| 863 | extern void blk_start_plug(struct blk_plug *); | 866 | extern void blk_start_plug(struct blk_plug *); |
| 864 | extern void blk_finish_plug(struct blk_plug *); | 867 | extern void blk_finish_plug(struct blk_plug *); |
| 865 | extern void __blk_flush_plug(struct task_struct *, struct blk_plug *); | 868 | extern void blk_flush_plug_list(struct blk_plug *, bool); |
| 866 | 869 | ||
| 867 | static inline void blk_flush_plug(struct task_struct *tsk) | 870 | static inline void blk_flush_plug(struct task_struct *tsk) |
| 868 | { | 871 | { |
| 869 | struct blk_plug *plug = tsk->plug; | 872 | struct blk_plug *plug = tsk->plug; |
| 870 | 873 | ||
| 871 | if (unlikely(plug)) | 874 | if (plug) |
| 872 | __blk_flush_plug(tsk, plug); | 875 | blk_flush_plug_list(plug, true); |
| 873 | } | 876 | } |
| 874 | 877 | ||
| 875 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 878 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) |
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h index 8e20540043f5..089fe43211a4 100644 --- a/include/linux/can/platform/mcp251x.h +++ b/include/linux/can/platform/mcp251x.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | /** | 12 | /** |
| 13 | * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data | 13 | * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data |
| 14 | * @oscillator_frequency: - oscillator frequency in Hz | 14 | * @oscillator_frequency: - oscillator frequency in Hz |
| 15 | * @irq_flags: - IRQF configuration flags | ||
| 15 | * @board_specific_setup: - called before probing the chip (power,reset) | 16 | * @board_specific_setup: - called before probing the chip (power,reset) |
| 16 | * @transceiver_enable: - called to power on/off the transceiver | 17 | * @transceiver_enable: - called to power on/off the transceiver |
| 17 | * @power_enable: - called to power on/off the mcp *and* the | 18 | * @power_enable: - called to power on/off the mcp *and* the |
| @@ -24,6 +25,7 @@ | |||
| 24 | 25 | ||
| 25 | struct mcp251x_platform_data { | 26 | struct mcp251x_platform_data { |
| 26 | unsigned long oscillator_frequency; | 27 | unsigned long oscillator_frequency; |
| 28 | unsigned long irq_flags; | ||
| 27 | int (*board_specific_setup)(struct spi_device *spi); | 29 | int (*board_specific_setup)(struct spi_device *spi); |
| 28 | int (*transceiver_enable)(int enable); | 30 | int (*transceiver_enable)(int enable); |
| 29 | int (*power_enable) (int enable); | 31 | int (*power_enable) (int enable); |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5a5ce7055839..5e9840f50980 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -216,7 +216,7 @@ static inline void mem_cgroup_del_lru_list(struct page *page, int lru) | |||
| 216 | return ; | 216 | return ; |
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | static inline inline void mem_cgroup_rotate_reclaimable_page(struct page *page) | 219 | static inline void mem_cgroup_rotate_reclaimable_page(struct page *page) |
| 220 | { | 220 | { |
| 221 | return ; | 221 | return ; |
| 222 | } | 222 | } |
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index ad1b19aa6508..aef23309a742 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h | |||
| @@ -86,16 +86,25 @@ extern int mfd_clone_cell(const char *cell, const char **clones, | |||
| 86 | */ | 86 | */ |
| 87 | static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) | 87 | static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) |
| 88 | { | 88 | { |
| 89 | return pdev->dev.platform_data; | 89 | return pdev->mfd_cell; |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | /* | 92 | /* |
| 93 | * Given a platform device that's been created by mfd_add_devices(), fetch | 93 | * Given a platform device that's been created by mfd_add_devices(), fetch |
| 94 | * the .mfd_data entry from the mfd_cell that created it. | 94 | * the .mfd_data entry from the mfd_cell that created it. |
| 95 | * Otherwise just return the platform_data pointer. | ||
| 96 | * This maintains compatibility with platform drivers whose devices aren't | ||
| 97 | * created by the mfd layer, and expect platform_data to contain what would've | ||
| 98 | * otherwise been in mfd_data. | ||
| 95 | */ | 99 | */ |
| 96 | static inline void *mfd_get_data(struct platform_device *pdev) | 100 | static inline void *mfd_get_data(struct platform_device *pdev) |
| 97 | { | 101 | { |
| 98 | return mfd_get_cell(pdev)->mfd_data; | 102 | const struct mfd_cell *cell = mfd_get_cell(pdev); |
| 103 | |||
| 104 | if (cell) | ||
| 105 | return cell->mfd_data; | ||
| 106 | else | ||
| 107 | return pdev->dev.platform_data; | ||
| 99 | } | 108 | } |
| 100 | 109 | ||
| 101 | extern int mfd_add_devices(struct device *parent, int id, | 110 | extern int mfd_add_devices(struct device *parent, int id, |
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index eeec00abb664..7fa95df60146 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
| @@ -270,7 +270,8 @@ struct nf_afinfo { | |||
| 270 | unsigned int dataoff, | 270 | unsigned int dataoff, |
| 271 | unsigned int len, | 271 | unsigned int len, |
| 272 | u_int8_t protocol); | 272 | u_int8_t protocol); |
| 273 | int (*route)(struct dst_entry **dst, struct flowi *fl); | 273 | int (*route)(struct net *net, struct dst_entry **dst, |
| 274 | struct flowi *fl, bool strict); | ||
| 274 | void (*saveroute)(const struct sk_buff *skb, | 275 | void (*saveroute)(const struct sk_buff *skb, |
| 275 | struct nf_queue_entry *entry); | 276 | struct nf_queue_entry *entry); |
| 276 | int (*reroute)(struct sk_buff *skb, | 277 | int (*reroute)(struct sk_buff *skb, |
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index ec333d83f3b4..5a262e3ae715 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h | |||
| @@ -293,7 +293,7 @@ struct ip_set { | |||
| 293 | /* Lock protecting the set data */ | 293 | /* Lock protecting the set data */ |
| 294 | rwlock_t lock; | 294 | rwlock_t lock; |
| 295 | /* References to the set */ | 295 | /* References to the set */ |
| 296 | atomic_t ref; | 296 | u32 ref; |
| 297 | /* The core set type */ | 297 | /* The core set type */ |
| 298 | struct ip_set_type *type; | 298 | struct ip_set_type *type; |
| 299 | /* The type variant doing the real job */ | 299 | /* The type variant doing the real job */ |
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h index ec9d9bea1e37..a0196ac79051 100644 --- a/include/linux/netfilter/ipset/ip_set_ahash.h +++ b/include/linux/netfilter/ipset/ip_set_ahash.h | |||
| @@ -515,8 +515,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb) | |||
| 515 | if (h->netmask != HOST_MASK) | 515 | if (h->netmask != HOST_MASK) |
| 516 | NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); | 516 | NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); |
| 517 | #endif | 517 | #endif |
| 518 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, | 518 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); |
| 519 | htonl(atomic_read(&set->ref) - 1)); | ||
| 520 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); | 519 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); |
| 521 | if (with_timeout(h->timeout)) | 520 | if (with_timeout(h->timeout)) |
| 522 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); | 521 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index d96db9825708..744942c95fec 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
| 15 | #include <linux/mod_devicetable.h> | 15 | #include <linux/mod_devicetable.h> |
| 16 | 16 | ||
| 17 | struct mfd_cell; | ||
| 18 | |||
| 17 | struct platform_device { | 19 | struct platform_device { |
| 18 | const char * name; | 20 | const char * name; |
| 19 | int id; | 21 | int id; |
| @@ -23,6 +25,9 @@ struct platform_device { | |||
| 23 | 25 | ||
| 24 | const struct platform_device_id *id_entry; | 26 | const struct platform_device_id *id_entry; |
| 25 | 27 | ||
| 28 | /* MFD cell pointer */ | ||
| 29 | struct mfd_cell *mfd_cell; | ||
| 30 | |||
| 26 | /* arch specific additions */ | 31 | /* arch specific additions */ |
| 27 | struct pdev_archdata archdata; | 32 | struct pdev_archdata archdata; |
| 28 | }; | 33 | }; |
diff --git a/include/linux/rio.h b/include/linux/rio.h index 4e37a7cfa726..4d50611112ba 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h | |||
| @@ -396,7 +396,7 @@ union rio_pw_msg { | |||
| 396 | }; | 396 | }; |
| 397 | 397 | ||
| 398 | /* Architecture and hardware-specific functions */ | 398 | /* Architecture and hardware-specific functions */ |
| 399 | extern void rio_register_mport(struct rio_mport *); | 399 | extern int rio_register_mport(struct rio_mport *); |
| 400 | extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); | 400 | extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); |
| 401 | extern void rio_close_inb_mbox(struct rio_mport *, int); | 401 | extern void rio_close_inb_mbox(struct rio_mport *, int); |
| 402 | extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int); | 402 | extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int); |
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h index 7410d3365e2a..0cee0152aca9 100644 --- a/include/linux/rio_ids.h +++ b/include/linux/rio_ids.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #define RIO_DID_IDTCPS6Q 0x035f | 35 | #define RIO_DID_IDTCPS6Q 0x035f |
| 36 | #define RIO_DID_IDTCPS10Q 0x035e | 36 | #define RIO_DID_IDTCPS10Q 0x035e |
| 37 | #define RIO_DID_IDTCPS1848 0x0374 | 37 | #define RIO_DID_IDTCPS1848 0x0374 |
| 38 | #define RIO_DID_IDTCPS1432 0x0375 | ||
| 38 | #define RIO_DID_IDTCPS1616 0x0379 | 39 | #define RIO_DID_IDTCPS1616 0x0379 |
| 39 | #define RIO_DID_IDTVPS1616 0x0377 | 40 | #define RIO_DID_IDTVPS1616 0x0377 |
| 40 | #define RIO_DID_IDTSPS1616 0x0378 | 41 | #define RIO_DID_IDTSPS1616 0x0378 |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 2ca7e8a78060..877ece45426f 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
| @@ -228,6 +228,8 @@ extern int rtc_read_alarm(struct rtc_device *rtc, | |||
| 228 | struct rtc_wkalrm *alrm); | 228 | struct rtc_wkalrm *alrm); |
| 229 | extern int rtc_set_alarm(struct rtc_device *rtc, | 229 | extern int rtc_set_alarm(struct rtc_device *rtc, |
| 230 | struct rtc_wkalrm *alrm); | 230 | struct rtc_wkalrm *alrm); |
| 231 | extern int rtc_initialize_alarm(struct rtc_device *rtc, | ||
| 232 | struct rtc_wkalrm *alrm); | ||
| 231 | extern void rtc_update_irq(struct rtc_device *rtc, | 233 | extern void rtc_update_irq(struct rtc_device *rtc, |
| 232 | unsigned long num, unsigned long events); | 234 | unsigned long num, unsigned long events); |
| 233 | 235 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ec2c027e92c..18d63cea2848 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -1254,6 +1254,9 @@ struct task_struct { | |||
| 1254 | #endif | 1254 | #endif |
| 1255 | 1255 | ||
| 1256 | struct mm_struct *mm, *active_mm; | 1256 | struct mm_struct *mm, *active_mm; |
| 1257 | #ifdef CONFIG_COMPAT_BRK | ||
| 1258 | unsigned brk_randomized:1; | ||
| 1259 | #endif | ||
| 1257 | #if defined(SPLIT_RSS_COUNTING) | 1260 | #if defined(SPLIT_RSS_COUNTING) |
| 1258 | struct task_rss_stat rss_stat; | 1261 | struct task_rss_stat rss_stat; |
| 1259 | #endif | 1262 | #endif |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 5a89e3612875..083ffea7ba18 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
| @@ -249,6 +249,8 @@ extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); | |||
| 249 | extern int hibernate(void); | 249 | extern int hibernate(void); |
| 250 | extern bool system_entering_hibernation(void); | 250 | extern bool system_entering_hibernation(void); |
| 251 | #else /* CONFIG_HIBERNATION */ | 251 | #else /* CONFIG_HIBERNATION */ |
| 252 | static inline void register_nosave_region(unsigned long b, unsigned long e) {} | ||
| 253 | static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} | ||
| 252 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } | 254 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } |
| 253 | static inline void swsusp_set_page_free(struct page *p) {} | 255 | static inline void swsusp_set_page_free(struct page *p) {} |
| 254 | static inline void swsusp_unset_page_free(struct page *p) {} | 256 | static inline void swsusp_unset_page_free(struct page *p) {} |
| @@ -297,14 +299,7 @@ static inline bool pm_wakeup_pending(void) { return false; } | |||
| 297 | 299 | ||
| 298 | extern struct mutex pm_mutex; | 300 | extern struct mutex pm_mutex; |
| 299 | 301 | ||
| 300 | #ifndef CONFIG_HIBERNATION | 302 | #ifndef CONFIG_HIBERNATE_CALLBACKS |
| 301 | static inline void register_nosave_region(unsigned long b, unsigned long e) | ||
| 302 | { | ||
| 303 | } | ||
| 304 | static inline void register_nosave_region_late(unsigned long b, unsigned long e) | ||
| 305 | { | ||
| 306 | } | ||
| 307 | |||
| 308 | static inline void lock_system_sleep(void) {} | 303 | static inline void lock_system_sleep(void) {} |
| 309 | static inline void unlock_system_sleep(void) {} | 304 | static inline void unlock_system_sleep(void) {} |
| 310 | 305 | ||
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 461c0119664f..2b3831b58aa4 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
| @@ -58,6 +58,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
| 58 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ | 58 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ |
| 59 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ | 59 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ |
| 60 | UNEVICTABLE_MLOCKFREED, | 60 | UNEVICTABLE_MLOCKFREED, |
| 61 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 62 | THP_FAULT_ALLOC, | ||
| 63 | THP_FAULT_FALLBACK, | ||
| 64 | THP_COLLAPSE_ALLOC, | ||
| 65 | THP_COLLAPSE_ALLOC_FAILED, | ||
| 66 | THP_SPLIT, | ||
| 67 | #endif | ||
| 61 | NR_VM_EVENT_ITEMS | 68 | NR_VM_EVENT_ITEMS |
| 62 | }; | 69 | }; |
| 63 | 70 | ||
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index cdf2e8ac4309..d2df55b0c213 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h | |||
| @@ -139,8 +139,6 @@ do { \ | |||
| 139 | */ | 139 | */ |
| 140 | 140 | ||
| 141 | enum p9_msg_t { | 141 | enum p9_msg_t { |
| 142 | P9_TSYNCFS = 0, | ||
| 143 | P9_RSYNCFS, | ||
| 144 | P9_TLERROR = 6, | 142 | P9_TLERROR = 6, |
| 145 | P9_RLERROR, | 143 | P9_RLERROR, |
| 146 | P9_TSTATFS = 8, | 144 | P9_TSTATFS = 8, |
diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 85c1413f054d..051a99f79769 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h | |||
| @@ -218,8 +218,8 @@ void p9_client_disconnect(struct p9_client *clnt); | |||
| 218 | void p9_client_begin_disconnect(struct p9_client *clnt); | 218 | void p9_client_begin_disconnect(struct p9_client *clnt); |
| 219 | struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, | 219 | struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, |
| 220 | char *uname, u32 n_uname, char *aname); | 220 | char *uname, u32 n_uname, char *aname); |
| 221 | struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, | 221 | struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, |
| 222 | int clone); | 222 | char **wnames, int clone); |
| 223 | int p9_client_open(struct p9_fid *fid, int mode); | 223 | int p9_client_open(struct p9_fid *fid, int mode); |
| 224 | int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, | 224 | int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, |
| 225 | char *extension); | 225 | char *extension); |
| @@ -230,7 +230,6 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, | |||
| 230 | gid_t gid, struct p9_qid *qid); | 230 | gid_t gid, struct p9_qid *qid); |
| 231 | int p9_client_clunk(struct p9_fid *fid); | 231 | int p9_client_clunk(struct p9_fid *fid); |
| 232 | int p9_client_fsync(struct p9_fid *fid, int datasync); | 232 | int p9_client_fsync(struct p9_fid *fid, int datasync); |
| 233 | int p9_client_sync_fs(struct p9_fid *fid); | ||
| 234 | int p9_client_remove(struct p9_fid *fid); | 233 | int p9_client_remove(struct p9_fid *fid); |
| 235 | int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, | 234 | int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, |
| 236 | u64 offset, u32 count); | 235 | u64 offset, u32 count); |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 814b434db749..d516f00c8e0f 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
| @@ -52,7 +52,7 @@ static inline struct net *skb_net(const struct sk_buff *skb) | |||
| 52 | */ | 52 | */ |
| 53 | if (likely(skb->dev && skb->dev->nd_net)) | 53 | if (likely(skb->dev && skb->dev->nd_net)) |
| 54 | return dev_net(skb->dev); | 54 | return dev_net(skb->dev); |
| 55 | if (skb_dst(skb)->dev) | 55 | if (skb_dst(skb) && skb_dst(skb)->dev) |
| 56 | return dev_net(skb_dst(skb)->dev); | 56 | return dev_net(skb_dst(skb)->dev); |
| 57 | WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", | 57 | WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n", |
| 58 | __func__, __LINE__); | 58 | __func__, __LINE__); |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index cb13239fe8e3..025d4cc7bbf8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
| @@ -1753,8 +1753,19 @@ enum ieee80211_ampdu_mlme_action { | |||
| 1753 | * that TX/RX_STOP can pass NULL for this parameter. | 1753 | * that TX/RX_STOP can pass NULL for this parameter. |
| 1754 | * The @buf_size parameter is only valid when the action is set to | 1754 | * The @buf_size parameter is only valid when the action is set to |
| 1755 | * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder | 1755 | * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder |
| 1756 | * buffer size (number of subframes) for this session -- aggregates | 1756 | * buffer size (number of subframes) for this session -- the driver |
| 1757 | * containing more subframes than this may not be transmitted to the peer. | 1757 | * may neither send aggregates containing more subframes than this |
| 1758 | * nor send aggregates in a way that lost frames would exceed the | ||
| 1759 | * buffer size. If just limiting the aggregate size, this would be | ||
| 1760 | * possible with a buf_size of 8: | ||
| 1761 | * - TX: 1.....7 | ||
| 1762 | * - RX: 2....7 (lost frame #1) | ||
| 1763 | * - TX: 8..1... | ||
| 1764 | * which is invalid since #1 was now re-transmitted well past the | ||
| 1765 | * buffer size of 8. Correct ways to retransmit #1 would be: | ||
| 1766 | * - TX: 1 or 18 or 81 | ||
| 1767 | * Even "189" would be wrong since 1 could be lost again. | ||
| 1768 | * | ||
| 1758 | * Returns a negative error code on failure. | 1769 | * Returns a negative error code on failure. |
| 1759 | * The callback can sleep. | 1770 | * The callback can sleep. |
| 1760 | * | 1771 | * |
diff --git a/include/net/route.h b/include/net/route.h index f88429cad52a..8fce0621cad1 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
| @@ -64,6 +64,7 @@ struct rtable { | |||
| 64 | 64 | ||
| 65 | __be32 rt_dst; /* Path destination */ | 65 | __be32 rt_dst; /* Path destination */ |
| 66 | __be32 rt_src; /* Path source */ | 66 | __be32 rt_src; /* Path source */ |
| 67 | int rt_route_iif; | ||
| 67 | int rt_iif; | 68 | int rt_iif; |
| 68 | int rt_oif; | 69 | int rt_oif; |
| 69 | __u32 rt_mark; | 70 | __u32 rt_mark; |
| @@ -80,12 +81,12 @@ struct rtable { | |||
| 80 | 81 | ||
| 81 | static inline bool rt_is_input_route(struct rtable *rt) | 82 | static inline bool rt_is_input_route(struct rtable *rt) |
| 82 | { | 83 | { |
| 83 | return rt->rt_iif != 0; | 84 | return rt->rt_route_iif != 0; |
| 84 | } | 85 | } |
| 85 | 86 | ||
| 86 | static inline bool rt_is_output_route(struct rtable *rt) | 87 | static inline bool rt_is_output_route(struct rtable *rt) |
| 87 | { | 88 | { |
| 88 | return rt->rt_iif == 0; | 89 | return rt->rt_route_iif == 0; |
| 89 | } | 90 | } |
| 90 | 91 | ||
| 91 | struct ip_rt_acct { | 92 | struct ip_rt_acct { |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 78f18adb49c8..006e60b58306 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
| @@ -401,9 +401,9 @@ TRACE_EVENT(block_plug, | |||
| 401 | 401 | ||
| 402 | DECLARE_EVENT_CLASS(block_unplug, | 402 | DECLARE_EVENT_CLASS(block_unplug, |
| 403 | 403 | ||
| 404 | TP_PROTO(struct request_queue *q), | 404 | TP_PROTO(struct request_queue *q, unsigned int depth), |
| 405 | 405 | ||
| 406 | TP_ARGS(q), | 406 | TP_ARGS(q, depth), |
| 407 | 407 | ||
| 408 | TP_STRUCT__entry( | 408 | TP_STRUCT__entry( |
| 409 | __field( int, nr_rq ) | 409 | __field( int, nr_rq ) |
| @@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug, | |||
| 411 | ), | 411 | ), |
| 412 | 412 | ||
| 413 | TP_fast_assign( | 413 | TP_fast_assign( |
| 414 | __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; | 414 | __entry->nr_rq = depth; |
| 415 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 415 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
| 416 | ), | 416 | ), |
| 417 | 417 | ||
| @@ -419,31 +419,18 @@ DECLARE_EVENT_CLASS(block_unplug, | |||
| 419 | ); | 419 | ); |
| 420 | 420 | ||
| 421 | /** | 421 | /** |
| 422 | * block_unplug_timer - timed release of operations requests in queue to device driver | ||
| 423 | * @q: request queue to unplug | ||
| 424 | * | ||
| 425 | * Unplug the request queue @q because a timer expired and allow block | ||
| 426 | * operation requests to be sent to the device driver. | ||
| 427 | */ | ||
| 428 | DEFINE_EVENT(block_unplug, block_unplug_timer, | ||
| 429 | |||
| 430 | TP_PROTO(struct request_queue *q), | ||
| 431 | |||
| 432 | TP_ARGS(q) | ||
| 433 | ); | ||
| 434 | |||
| 435 | /** | ||
| 436 | * block_unplug_io - release of operations requests in request queue | 422 | * block_unplug_io - release of operations requests in request queue |
| 437 | * @q: request queue to unplug | 423 | * @q: request queue to unplug |
| 424 | * @depth: number of requests just added to the queue | ||
| 438 | * | 425 | * |
| 439 | * Unplug request queue @q because device driver is scheduled to work | 426 | * Unplug request queue @q because device driver is scheduled to work |
| 440 | * on elements in the request queue. | 427 | * on elements in the request queue. |
| 441 | */ | 428 | */ |
| 442 | DEFINE_EVENT(block_unplug, block_unplug_io, | 429 | DEFINE_EVENT(block_unplug, block_unplug_io, |
| 443 | 430 | ||
| 444 | TP_PROTO(struct request_queue *q), | 431 | TP_PROTO(struct request_queue *q, unsigned int depth), |
| 445 | 432 | ||
| 446 | TP_ARGS(q) | 433 | TP_ARGS(q, depth) |
| 447 | ); | 434 | ); |
| 448 | 435 | ||
| 449 | /** | 436 | /** |
diff --git a/kernel/futex.c b/kernel/futex.c index dfb924ffe65b..fe28dc282eae 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -1886,7 +1886,7 @@ retry: | |||
| 1886 | restart->futex.val = val; | 1886 | restart->futex.val = val; |
| 1887 | restart->futex.time = abs_time->tv64; | 1887 | restart->futex.time = abs_time->tv64; |
| 1888 | restart->futex.bitset = bitset; | 1888 | restart->futex.bitset = bitset; |
| 1889 | restart->futex.flags = flags; | 1889 | restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; |
| 1890 | 1890 | ||
| 1891 | ret = -ERESTART_RESTARTBLOCK; | 1891 | ret = -ERESTART_RESTARTBLOCK; |
| 1892 | 1892 | ||
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 4603f08dc47b..6de9a8fc3417 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
| @@ -18,9 +18,13 @@ config SUSPEND_FREEZER | |||
| 18 | 18 | ||
| 19 | Turning OFF this setting is NOT recommended! If in doubt, say Y. | 19 | Turning OFF this setting is NOT recommended! If in doubt, say Y. |
| 20 | 20 | ||
| 21 | config HIBERNATE_CALLBACKS | ||
| 22 | bool | ||
| 23 | |||
| 21 | config HIBERNATION | 24 | config HIBERNATION |
| 22 | bool "Hibernation (aka 'suspend to disk')" | 25 | bool "Hibernation (aka 'suspend to disk')" |
| 23 | depends on SWAP && ARCH_HIBERNATION_POSSIBLE | 26 | depends on SWAP && ARCH_HIBERNATION_POSSIBLE |
| 27 | select HIBERNATE_CALLBACKS | ||
| 24 | select LZO_COMPRESS | 28 | select LZO_COMPRESS |
| 25 | select LZO_DECOMPRESS | 29 | select LZO_DECOMPRESS |
| 26 | ---help--- | 30 | ---help--- |
| @@ -85,7 +89,7 @@ config PM_STD_PARTITION | |||
| 85 | 89 | ||
| 86 | config PM_SLEEP | 90 | config PM_SLEEP |
| 87 | def_bool y | 91 | def_bool y |
| 88 | depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE | 92 | depends on SUSPEND || HIBERNATE_CALLBACKS |
| 89 | 93 | ||
| 90 | config PM_SLEEP_SMP | 94 | config PM_SLEEP_SMP |
| 91 | def_bool y | 95 | def_bool y |
diff --git a/kernel/sched.c b/kernel/sched.c index 48013633d792..a187c3fe027b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4111,20 +4111,20 @@ need_resched: | |||
| 4111 | try_to_wake_up_local(to_wakeup); | 4111 | try_to_wake_up_local(to_wakeup); |
| 4112 | } | 4112 | } |
| 4113 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | 4113 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
| 4114 | |||
| 4115 | /* | ||
| 4116 | * If we are going to sleep and we have plugged IO queued, make | ||
| 4117 | * sure to submit it to avoid deadlocks. | ||
| 4118 | */ | ||
| 4119 | if (blk_needs_flush_plug(prev)) { | ||
| 4120 | raw_spin_unlock(&rq->lock); | ||
| 4121 | blk_flush_plug(prev); | ||
| 4122 | raw_spin_lock(&rq->lock); | ||
| 4123 | } | ||
| 4114 | } | 4124 | } |
| 4115 | switch_count = &prev->nvcsw; | 4125 | switch_count = &prev->nvcsw; |
| 4116 | } | 4126 | } |
| 4117 | 4127 | ||
| 4118 | /* | ||
| 4119 | * If we are going to sleep and we have plugged IO queued, make | ||
| 4120 | * sure to submit it to avoid deadlocks. | ||
| 4121 | */ | ||
| 4122 | if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) { | ||
| 4123 | raw_spin_unlock(&rq->lock); | ||
| 4124 | blk_flush_plug(prev); | ||
| 4125 | raw_spin_lock(&rq->lock); | ||
| 4126 | } | ||
| 4127 | |||
| 4128 | pre_schedule(rq, prev); | 4128 | pre_schedule(rq, prev); |
| 4129 | 4129 | ||
| 4130 | if (unlikely(!rq->nr_running)) | 4130 | if (unlikely(!rq->nr_running)) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7f00772e57c9..6fa833ab2cb8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -2104,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
| 2104 | enum cpu_idle_type idle, int *all_pinned, | 2104 | enum cpu_idle_type idle, int *all_pinned, |
| 2105 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) | 2105 | int *this_best_prio, struct cfs_rq *busiest_cfs_rq) |
| 2106 | { | 2106 | { |
| 2107 | int loops = 0, pulled = 0, pinned = 0; | 2107 | int loops = 0, pulled = 0; |
| 2108 | long rem_load_move = max_load_move; | 2108 | long rem_load_move = max_load_move; |
| 2109 | struct task_struct *p, *n; | 2109 | struct task_struct *p, *n; |
| 2110 | 2110 | ||
| 2111 | if (max_load_move == 0) | 2111 | if (max_load_move == 0) |
| 2112 | goto out; | 2112 | goto out; |
| 2113 | 2113 | ||
| 2114 | pinned = 1; | ||
| 2115 | |||
| 2116 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { | 2114 | list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { |
| 2117 | if (loops++ > sysctl_sched_nr_migrate) | 2115 | if (loops++ > sysctl_sched_nr_migrate) |
| 2118 | break; | 2116 | break; |
| 2119 | 2117 | ||
| 2120 | if ((p->se.load.weight >> 1) > rem_load_move || | 2118 | if ((p->se.load.weight >> 1) > rem_load_move || |
| 2121 | !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) | 2119 | !can_migrate_task(p, busiest, this_cpu, sd, idle, |
| 2120 | all_pinned)) | ||
| 2122 | continue; | 2121 | continue; |
| 2123 | 2122 | ||
| 2124 | pull_task(busiest, p, this_rq, this_cpu); | 2123 | pull_task(busiest, p, this_rq, this_cpu); |
| @@ -2153,9 +2152,6 @@ out: | |||
| 2153 | */ | 2152 | */ |
| 2154 | schedstat_add(sd, lb_gained[idle], pulled); | 2153 | schedstat_add(sd, lb_gained[idle], pulled); |
| 2155 | 2154 | ||
| 2156 | if (all_pinned) | ||
| 2157 | *all_pinned = pinned; | ||
| 2158 | |||
| 2159 | return max_load_move - rem_load_move; | 2155 | return max_load_move - rem_load_move; |
| 2160 | } | 2156 | } |
| 2161 | 2157 | ||
| @@ -3127,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3127 | if (!sds.busiest || sds.busiest_nr_running == 0) | 3123 | if (!sds.busiest || sds.busiest_nr_running == 0) |
| 3128 | goto out_balanced; | 3124 | goto out_balanced; |
| 3129 | 3125 | ||
| 3126 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
| 3127 | |||
| 3130 | /* | 3128 | /* |
| 3131 | * If the busiest group is imbalanced the below checks don't | 3129 | * If the busiest group is imbalanced the below checks don't |
| 3132 | * work because they assumes all things are equal, which typically | 3130 | * work because they assumes all things are equal, which typically |
| @@ -3151,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 3151 | * Don't pull any tasks if this group is already above the domain | 3149 | * Don't pull any tasks if this group is already above the domain |
| 3152 | * average load. | 3150 | * average load. |
| 3153 | */ | 3151 | */ |
| 3154 | sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr; | ||
| 3155 | if (sds.this_load >= sds.avg_load) | 3152 | if (sds.this_load >= sds.avg_load) |
| 3156 | goto out_balanced; | 3153 | goto out_balanced; |
| 3157 | 3154 | ||
| @@ -3340,6 +3337,7 @@ redo: | |||
| 3340 | * still unbalanced. ld_moved simply stays zero, so it is | 3337 | * still unbalanced. ld_moved simply stays zero, so it is |
| 3341 | * correctly treated as an imbalance. | 3338 | * correctly treated as an imbalance. |
| 3342 | */ | 3339 | */ |
| 3340 | all_pinned = 1; | ||
| 3343 | local_irq_save(flags); | 3341 | local_irq_save(flags); |
| 3344 | double_rq_lock(this_rq, busiest); | 3342 | double_rq_lock(this_rq, busiest); |
| 3345 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3343 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
diff --git a/kernel/signal.c b/kernel/signal.c index 29e233fd7a0f..7165af5f1b11 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -2711,8 +2711,8 @@ out: | |||
| 2711 | /** | 2711 | /** |
| 2712 | * sys_rt_sigaction - alter an action taken by a process | 2712 | * sys_rt_sigaction - alter an action taken by a process |
| 2713 | * @sig: signal to be sent | 2713 | * @sig: signal to be sent |
| 2714 | * @act: the thread group ID of the thread | 2714 | * @act: new sigaction |
| 2715 | * @oact: the PID of the thread | 2715 | * @oact: used to save the previous sigaction |
| 2716 | * @sigsetsize: size of sigset_t type | 2716 | * @sigsetsize: size of sigset_t type |
| 2717 | */ | 2717 | */ |
| 2718 | SYSCALL_DEFINE4(rt_sigaction, int, sig, | 2718 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 7aa40f8e182d..3e3970d53d14 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -850,32 +850,19 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q) | |||
| 850 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); | 850 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); |
| 851 | } | 851 | } |
| 852 | 852 | ||
| 853 | static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) | 853 | static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q, |
| 854 | unsigned int depth) | ||
| 854 | { | 855 | { |
| 855 | struct blk_trace *bt = q->blk_trace; | 856 | struct blk_trace *bt = q->blk_trace; |
| 856 | 857 | ||
| 857 | if (bt) { | 858 | if (bt) { |
| 858 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | 859 | __be64 rpdu = cpu_to_be64(depth); |
| 859 | __be64 rpdu = cpu_to_be64(pdu); | ||
| 860 | 860 | ||
| 861 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, | 861 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, |
| 862 | sizeof(rpdu), &rpdu); | 862 | sizeof(rpdu), &rpdu); |
| 863 | } | 863 | } |
| 864 | } | 864 | } |
| 865 | 865 | ||
| 866 | static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q) | ||
| 867 | { | ||
| 868 | struct blk_trace *bt = q->blk_trace; | ||
| 869 | |||
| 870 | if (bt) { | ||
| 871 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | ||
| 872 | __be64 rpdu = cpu_to_be64(pdu); | ||
| 873 | |||
| 874 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, | ||
| 875 | sizeof(rpdu), &rpdu); | ||
| 876 | } | ||
| 877 | } | ||
| 878 | |||
| 879 | static void blk_add_trace_split(void *ignore, | 866 | static void blk_add_trace_split(void *ignore, |
| 880 | struct request_queue *q, struct bio *bio, | 867 | struct request_queue *q, struct bio *bio, |
| 881 | unsigned int pdu) | 868 | unsigned int pdu) |
| @@ -1015,8 +1002,6 @@ static void blk_register_tracepoints(void) | |||
| 1015 | WARN_ON(ret); | 1002 | WARN_ON(ret); |
| 1016 | ret = register_trace_block_plug(blk_add_trace_plug, NULL); | 1003 | ret = register_trace_block_plug(blk_add_trace_plug, NULL); |
| 1017 | WARN_ON(ret); | 1004 | WARN_ON(ret); |
| 1018 | ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); | ||
| 1019 | WARN_ON(ret); | ||
| 1020 | ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); | 1005 | ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); |
| 1021 | WARN_ON(ret); | 1006 | WARN_ON(ret); |
| 1022 | ret = register_trace_block_split(blk_add_trace_split, NULL); | 1007 | ret = register_trace_block_split(blk_add_trace_split, NULL); |
| @@ -1033,7 +1018,6 @@ static void blk_unregister_tracepoints(void) | |||
| 1033 | unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); | 1018 | unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); |
| 1034 | unregister_trace_block_split(blk_add_trace_split, NULL); | 1019 | unregister_trace_block_split(blk_add_trace_split, NULL); |
| 1035 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); | 1020 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); |
| 1036 | unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); | ||
| 1037 | unregister_trace_block_plug(blk_add_trace_plug, NULL); | 1021 | unregister_trace_block_plug(blk_add_trace_plug, NULL); |
| 1038 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); | 1022 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); |
| 1039 | unregister_trace_block_getrq(blk_add_trace_getrq, NULL); | 1023 | unregister_trace_block_getrq(blk_add_trace_getrq, NULL); |
| @@ -1348,7 +1332,6 @@ static const struct { | |||
| 1348 | [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, | 1332 | [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, |
| 1349 | [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, | 1333 | [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, |
| 1350 | [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, | 1334 | [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, |
| 1351 | [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, | ||
| 1352 | [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, | 1335 | [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, |
| 1353 | [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, | 1336 | [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, |
| 1354 | [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, | 1337 | [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, |
diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 05672e819f8c..a235f3cc471c 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c | |||
| @@ -49,12 +49,9 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) | |||
| 49 | val = *s - '0'; | 49 | val = *s - '0'; |
| 50 | else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') | 50 | else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f') |
| 51 | val = _tolower(*s) - 'a' + 10; | 51 | val = _tolower(*s) - 'a' + 10; |
| 52 | else if (*s == '\n') { | 52 | else if (*s == '\n' && *(s + 1) == '\0') |
| 53 | if (*(s + 1) == '\0') | 53 | break; |
| 54 | break; | 54 | else |
| 55 | else | ||
| 56 | return -EINVAL; | ||
| 57 | } else | ||
| 58 | return -EINVAL; | 55 | return -EINVAL; |
| 59 | 56 | ||
| 60 | if (val >= base) | 57 | if (val >= base) |
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c index 325c2f9ecebd..d55769d63cb8 100644 --- a/lib/test-kstrtox.c +++ b/lib/test-kstrtox.c | |||
| @@ -315,12 +315,12 @@ static void __init test_kstrtou64_ok(void) | |||
| 315 | {"65537", 10, 65537}, | 315 | {"65537", 10, 65537}, |
| 316 | {"2147483646", 10, 2147483646}, | 316 | {"2147483646", 10, 2147483646}, |
| 317 | {"2147483647", 10, 2147483647}, | 317 | {"2147483647", 10, 2147483647}, |
| 318 | {"2147483648", 10, 2147483648}, | 318 | {"2147483648", 10, 2147483648ULL}, |
| 319 | {"2147483649", 10, 2147483649}, | 319 | {"2147483649", 10, 2147483649ULL}, |
| 320 | {"4294967294", 10, 4294967294}, | 320 | {"4294967294", 10, 4294967294ULL}, |
| 321 | {"4294967295", 10, 4294967295}, | 321 | {"4294967295", 10, 4294967295ULL}, |
| 322 | {"4294967296", 10, 4294967296}, | 322 | {"4294967296", 10, 4294967296ULL}, |
| 323 | {"4294967297", 10, 4294967297}, | 323 | {"4294967297", 10, 4294967297ULL}, |
| 324 | {"9223372036854775806", 10, 9223372036854775806ULL}, | 324 | {"9223372036854775806", 10, 9223372036854775806ULL}, |
| 325 | {"9223372036854775807", 10, 9223372036854775807ULL}, | 325 | {"9223372036854775807", 10, 9223372036854775807ULL}, |
| 326 | {"9223372036854775808", 10, 9223372036854775808ULL}, | 326 | {"9223372036854775808", 10, 9223372036854775808ULL}, |
| @@ -369,12 +369,12 @@ static void __init test_kstrtos64_ok(void) | |||
| 369 | {"65537", 10, 65537}, | 369 | {"65537", 10, 65537}, |
| 370 | {"2147483646", 10, 2147483646}, | 370 | {"2147483646", 10, 2147483646}, |
| 371 | {"2147483647", 10, 2147483647}, | 371 | {"2147483647", 10, 2147483647}, |
| 372 | {"2147483648", 10, 2147483648}, | 372 | {"2147483648", 10, 2147483648LL}, |
| 373 | {"2147483649", 10, 2147483649}, | 373 | {"2147483649", 10, 2147483649LL}, |
| 374 | {"4294967294", 10, 4294967294}, | 374 | {"4294967294", 10, 4294967294LL}, |
| 375 | {"4294967295", 10, 4294967295}, | 375 | {"4294967295", 10, 4294967295LL}, |
| 376 | {"4294967296", 10, 4294967296}, | 376 | {"4294967296", 10, 4294967296LL}, |
| 377 | {"4294967297", 10, 4294967297}, | 377 | {"4294967297", 10, 4294967297LL}, |
| 378 | {"9223372036854775806", 10, 9223372036854775806LL}, | 378 | {"9223372036854775806", 10, 9223372036854775806LL}, |
| 379 | {"9223372036854775807", 10, 9223372036854775807LL}, | 379 | {"9223372036854775807", 10, 9223372036854775807LL}, |
| 380 | }; | 380 | }; |
| @@ -418,10 +418,10 @@ static void __init test_kstrtou32_ok(void) | |||
| 418 | {"65537", 10, 65537}, | 418 | {"65537", 10, 65537}, |
| 419 | {"2147483646", 10, 2147483646}, | 419 | {"2147483646", 10, 2147483646}, |
| 420 | {"2147483647", 10, 2147483647}, | 420 | {"2147483647", 10, 2147483647}, |
| 421 | {"2147483648", 10, 2147483648}, | 421 | {"2147483648", 10, 2147483648U}, |
| 422 | {"2147483649", 10, 2147483649}, | 422 | {"2147483649", 10, 2147483649U}, |
| 423 | {"4294967294", 10, 4294967294}, | 423 | {"4294967294", 10, 4294967294U}, |
| 424 | {"4294967295", 10, 4294967295}, | 424 | {"4294967295", 10, 4294967295U}, |
| 425 | }; | 425 | }; |
| 426 | TEST_OK(kstrtou32, u32, "%u", test_u32_ok); | 426 | TEST_OK(kstrtou32, u32, "%u", test_u32_ok); |
| 427 | } | 427 | } |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0a619e0e2e0b..470dcda10add 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -244,24 +244,28 @@ static ssize_t single_flag_show(struct kobject *kobj, | |||
| 244 | struct kobj_attribute *attr, char *buf, | 244 | struct kobj_attribute *attr, char *buf, |
| 245 | enum transparent_hugepage_flag flag) | 245 | enum transparent_hugepage_flag flag) |
| 246 | { | 246 | { |
| 247 | if (test_bit(flag, &transparent_hugepage_flags)) | 247 | return sprintf(buf, "%d\n", |
| 248 | return sprintf(buf, "[yes] no\n"); | 248 | !!test_bit(flag, &transparent_hugepage_flags)); |
| 249 | else | ||
| 250 | return sprintf(buf, "yes [no]\n"); | ||
| 251 | } | 249 | } |
| 250 | |||
| 252 | static ssize_t single_flag_store(struct kobject *kobj, | 251 | static ssize_t single_flag_store(struct kobject *kobj, |
| 253 | struct kobj_attribute *attr, | 252 | struct kobj_attribute *attr, |
| 254 | const char *buf, size_t count, | 253 | const char *buf, size_t count, |
| 255 | enum transparent_hugepage_flag flag) | 254 | enum transparent_hugepage_flag flag) |
| 256 | { | 255 | { |
| 257 | if (!memcmp("yes", buf, | 256 | unsigned long value; |
| 258 | min(sizeof("yes")-1, count))) { | 257 | int ret; |
| 258 | |||
| 259 | ret = kstrtoul(buf, 10, &value); | ||
| 260 | if (ret < 0) | ||
| 261 | return ret; | ||
| 262 | if (value > 1) | ||
| 263 | return -EINVAL; | ||
| 264 | |||
| 265 | if (value) | ||
| 259 | set_bit(flag, &transparent_hugepage_flags); | 266 | set_bit(flag, &transparent_hugepage_flags); |
| 260 | } else if (!memcmp("no", buf, | 267 | else |
| 261 | min(sizeof("no")-1, count))) { | ||
| 262 | clear_bit(flag, &transparent_hugepage_flags); | 268 | clear_bit(flag, &transparent_hugepage_flags); |
| 263 | } else | ||
| 264 | return -EINVAL; | ||
| 265 | 269 | ||
| 266 | return count; | 270 | return count; |
| 267 | } | 271 | } |
| @@ -680,8 +684,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 680 | return VM_FAULT_OOM; | 684 | return VM_FAULT_OOM; |
| 681 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 685 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
| 682 | vma, haddr, numa_node_id(), 0); | 686 | vma, haddr, numa_node_id(), 0); |
| 683 | if (unlikely(!page)) | 687 | if (unlikely(!page)) { |
| 688 | count_vm_event(THP_FAULT_FALLBACK); | ||
| 684 | goto out; | 689 | goto out; |
| 690 | } | ||
| 691 | count_vm_event(THP_FAULT_ALLOC); | ||
| 685 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { | 692 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { |
| 686 | put_page(page); | 693 | put_page(page); |
| 687 | goto out; | 694 | goto out; |
| @@ -909,11 +916,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 909 | new_page = NULL; | 916 | new_page = NULL; |
| 910 | 917 | ||
| 911 | if (unlikely(!new_page)) { | 918 | if (unlikely(!new_page)) { |
| 919 | count_vm_event(THP_FAULT_FALLBACK); | ||
| 912 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, | 920 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, |
| 913 | pmd, orig_pmd, page, haddr); | 921 | pmd, orig_pmd, page, haddr); |
| 914 | put_page(page); | 922 | put_page(page); |
| 915 | goto out; | 923 | goto out; |
| 916 | } | 924 | } |
| 925 | count_vm_event(THP_FAULT_ALLOC); | ||
| 917 | 926 | ||
| 918 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 927 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
| 919 | put_page(new_page); | 928 | put_page(new_page); |
| @@ -1390,6 +1399,7 @@ int split_huge_page(struct page *page) | |||
| 1390 | 1399 | ||
| 1391 | BUG_ON(!PageSwapBacked(page)); | 1400 | BUG_ON(!PageSwapBacked(page)); |
| 1392 | __split_huge_page(page, anon_vma); | 1401 | __split_huge_page(page, anon_vma); |
| 1402 | count_vm_event(THP_SPLIT); | ||
| 1393 | 1403 | ||
| 1394 | BUG_ON(PageCompound(page)); | 1404 | BUG_ON(PageCompound(page)); |
| 1395 | out_unlock: | 1405 | out_unlock: |
| @@ -1784,9 +1794,11 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
| 1784 | node, __GFP_OTHER_NODE); | 1794 | node, __GFP_OTHER_NODE); |
| 1785 | if (unlikely(!new_page)) { | 1795 | if (unlikely(!new_page)) { |
| 1786 | up_read(&mm->mmap_sem); | 1796 | up_read(&mm->mmap_sem); |
| 1797 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 1787 | *hpage = ERR_PTR(-ENOMEM); | 1798 | *hpage = ERR_PTR(-ENOMEM); |
| 1788 | return; | 1799 | return; |
| 1789 | } | 1800 | } |
| 1801 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 1790 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 1802 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
| 1791 | up_read(&mm->mmap_sem); | 1803 | up_read(&mm->mmap_sem); |
| 1792 | put_page(new_page); | 1804 | put_page(new_page); |
| @@ -2151,8 +2163,11 @@ static void khugepaged_do_scan(struct page **hpage) | |||
| 2151 | #ifndef CONFIG_NUMA | 2163 | #ifndef CONFIG_NUMA |
| 2152 | if (!*hpage) { | 2164 | if (!*hpage) { |
| 2153 | *hpage = alloc_hugepage(khugepaged_defrag()); | 2165 | *hpage = alloc_hugepage(khugepaged_defrag()); |
| 2154 | if (unlikely(!*hpage)) | 2166 | if (unlikely(!*hpage)) { |
| 2167 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 2155 | break; | 2168 | break; |
| 2169 | } | ||
| 2170 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 2156 | } | 2171 | } |
| 2157 | #else | 2172 | #else |
| 2158 | if (IS_ERR(*hpage)) | 2173 | if (IS_ERR(*hpage)) |
| @@ -2192,8 +2207,11 @@ static struct page *khugepaged_alloc_hugepage(void) | |||
| 2192 | 2207 | ||
| 2193 | do { | 2208 | do { |
| 2194 | hpage = alloc_hugepage(khugepaged_defrag()); | 2209 | hpage = alloc_hugepage(khugepaged_defrag()); |
| 2195 | if (!hpage) | 2210 | if (!hpage) { |
| 2211 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 2196 | khugepaged_alloc_sleep(); | 2212 | khugepaged_alloc_sleep(); |
| 2213 | } else | ||
| 2214 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 2197 | } while (unlikely(!hpage) && | 2215 | } while (unlikely(!hpage) && |
| 2198 | likely(khugepaged_enabled())); | 2216 | likely(khugepaged_enabled())); |
| 2199 | return hpage; | 2217 | return hpage; |
| @@ -2210,8 +2228,11 @@ static void khugepaged_loop(void) | |||
| 2210 | while (likely(khugepaged_enabled())) { | 2228 | while (likely(khugepaged_enabled())) { |
| 2211 | #ifndef CONFIG_NUMA | 2229 | #ifndef CONFIG_NUMA |
| 2212 | hpage = khugepaged_alloc_hugepage(); | 2230 | hpage = khugepaged_alloc_hugepage(); |
| 2213 | if (unlikely(!hpage)) | 2231 | if (unlikely(!hpage)) { |
| 2232 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); | ||
| 2214 | break; | 2233 | break; |
| 2234 | } | ||
| 2235 | count_vm_event(THP_COLLAPSE_ALLOC); | ||
| 2215 | #else | 2236 | #else |
| 2216 | if (IS_ERR(hpage)) { | 2237 | if (IS_ERR(hpage)) { |
| 2217 | khugepaged_alloc_sleep(); | 2238 | khugepaged_alloc_sleep(); |
diff --git a/mm/memory.c b/mm/memory.c index 9da8cab1b1b0..ce22a250926f 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -1410,6 +1410,13 @@ no_page_table: | |||
| 1410 | return page; | 1410 | return page; |
| 1411 | } | 1411 | } |
| 1412 | 1412 | ||
| 1413 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | ||
| 1414 | { | ||
| 1415 | return (vma->vm_flags & VM_GROWSDOWN) && | ||
| 1416 | (vma->vm_start == addr) && | ||
| 1417 | !vma_stack_continue(vma->vm_prev, addr); | ||
| 1418 | } | ||
| 1419 | |||
| 1413 | /** | 1420 | /** |
| 1414 | * __get_user_pages() - pin user pages in memory | 1421 | * __get_user_pages() - pin user pages in memory |
| 1415 | * @tsk: task_struct of target task | 1422 | * @tsk: task_struct of target task |
| @@ -1488,7 +1495,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1488 | vma = find_extend_vma(mm, start); | 1495 | vma = find_extend_vma(mm, start); |
| 1489 | if (!vma && in_gate_area(mm, start)) { | 1496 | if (!vma && in_gate_area(mm, start)) { |
| 1490 | unsigned long pg = start & PAGE_MASK; | 1497 | unsigned long pg = start & PAGE_MASK; |
| 1491 | struct vm_area_struct *gate_vma = get_gate_vma(mm); | ||
| 1492 | pgd_t *pgd; | 1498 | pgd_t *pgd; |
| 1493 | pud_t *pud; | 1499 | pud_t *pud; |
| 1494 | pmd_t *pmd; | 1500 | pmd_t *pmd; |
| @@ -1513,10 +1519,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1513 | pte_unmap(pte); | 1519 | pte_unmap(pte); |
| 1514 | return i ? : -EFAULT; | 1520 | return i ? : -EFAULT; |
| 1515 | } | 1521 | } |
| 1522 | vma = get_gate_vma(mm); | ||
| 1516 | if (pages) { | 1523 | if (pages) { |
| 1517 | struct page *page; | 1524 | struct page *page; |
| 1518 | 1525 | ||
| 1519 | page = vm_normal_page(gate_vma, start, *pte); | 1526 | page = vm_normal_page(vma, start, *pte); |
| 1520 | if (!page) { | 1527 | if (!page) { |
| 1521 | if (!(gup_flags & FOLL_DUMP) && | 1528 | if (!(gup_flags & FOLL_DUMP) && |
| 1522 | is_zero_pfn(pte_pfn(*pte))) | 1529 | is_zero_pfn(pte_pfn(*pte))) |
| @@ -1530,12 +1537,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1530 | get_page(page); | 1537 | get_page(page); |
| 1531 | } | 1538 | } |
| 1532 | pte_unmap(pte); | 1539 | pte_unmap(pte); |
| 1533 | if (vmas) | 1540 | goto next_page; |
| 1534 | vmas[i] = gate_vma; | ||
| 1535 | i++; | ||
| 1536 | start += PAGE_SIZE; | ||
| 1537 | nr_pages--; | ||
| 1538 | continue; | ||
| 1539 | } | 1541 | } |
| 1540 | 1542 | ||
| 1541 | if (!vma || | 1543 | if (!vma || |
| @@ -1549,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1549 | continue; | 1551 | continue; |
| 1550 | } | 1552 | } |
| 1551 | 1553 | ||
| 1554 | /* | ||
| 1555 | * If we don't actually want the page itself, | ||
| 1556 | * and it's the stack guard page, just skip it. | ||
| 1557 | */ | ||
| 1558 | if (!pages && stack_guard_page(vma, start)) | ||
| 1559 | goto next_page; | ||
| 1560 | |||
| 1552 | do { | 1561 | do { |
| 1553 | struct page *page; | 1562 | struct page *page; |
| 1554 | unsigned int foll_flags = gup_flags; | 1563 | unsigned int foll_flags = gup_flags; |
| @@ -1631,6 +1640,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1631 | flush_anon_page(vma, page, start); | 1640 | flush_anon_page(vma, page, start); |
| 1632 | flush_dcache_page(page); | 1641 | flush_dcache_page(page); |
| 1633 | } | 1642 | } |
| 1643 | next_page: | ||
| 1634 | if (vmas) | 1644 | if (vmas) |
| 1635 | vmas[i] = vma; | 1645 | vmas[i] = vma; |
| 1636 | i++; | 1646 | i++; |
| @@ -3678,7 +3688,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | |||
| 3678 | */ | 3688 | */ |
| 3679 | #ifdef CONFIG_HAVE_IOREMAP_PROT | 3689 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
| 3680 | vma = find_vma(mm, addr); | 3690 | vma = find_vma(mm, addr); |
| 3681 | if (!vma) | 3691 | if (!vma || vma->vm_start > addr) |
| 3682 | break; | 3692 | break; |
| 3683 | if (vma->vm_ops && vma->vm_ops->access) | 3693 | if (vma->vm_ops && vma->vm_ops->access) |
| 3684 | ret = vma->vm_ops->access(vma, addr, buf, | 3694 | ret = vma->vm_ops->access(vma, addr, buf, |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a2acaf820fe5..9ca1d604f7cd 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -375,7 +375,7 @@ void online_page(struct page *page) | |||
| 375 | #endif | 375 | #endif |
| 376 | 376 | ||
| 377 | #ifdef CONFIG_FLATMEM | 377 | #ifdef CONFIG_FLATMEM |
| 378 | max_mapnr = max(page_to_pfn(page), max_mapnr); | 378 | max_mapnr = max(pfn, max_mapnr); |
| 379 | #endif | 379 | #endif |
| 380 | 380 | ||
| 381 | ClearPageReserved(page); | 381 | ClearPageReserved(page); |
diff --git a/mm/mlock.c b/mm/mlock.c index 2689a08c79af..6b55e3efe0df 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
| @@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page) | |||
| 135 | } | 135 | } |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) | ||
| 139 | { | ||
| 140 | return (vma->vm_flags & VM_GROWSDOWN) && | ||
| 141 | (vma->vm_start == addr) && | ||
| 142 | !vma_stack_continue(vma->vm_prev, addr); | ||
| 143 | } | ||
| 144 | |||
| 145 | /** | 138 | /** |
| 146 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. | 139 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. |
| 147 | * @vma: target vma | 140 | * @vma: target vma |
| @@ -188,12 +181,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
| 188 | if (vma->vm_flags & VM_LOCKED) | 181 | if (vma->vm_flags & VM_LOCKED) |
| 189 | gup_flags |= FOLL_MLOCK; | 182 | gup_flags |= FOLL_MLOCK; |
| 190 | 183 | ||
| 191 | /* We don't try to access the guard page of a stack vma */ | ||
| 192 | if (stack_guard_page(vma, start)) { | ||
| 193 | addr += PAGE_SIZE; | ||
| 194 | nr_pages--; | ||
| 195 | } | ||
| 196 | |||
| 197 | return __get_user_pages(current, mm, addr, nr_pages, gup_flags, | 184 | return __get_user_pages(current, mm, addr, nr_pages, gup_flags, |
| 198 | NULL, NULL, nonblocking); | 185 | NULL, NULL, nonblocking); |
| 199 | } | 186 | } |
| @@ -259,7 +259,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) | |||
| 259 | * randomize_va_space to 2, which will still cause mm->start_brk | 259 | * randomize_va_space to 2, which will still cause mm->start_brk |
| 260 | * to be arbitrarily shifted | 260 | * to be arbitrarily shifted |
| 261 | */ | 261 | */ |
| 262 | if (mm->start_brk > PAGE_ALIGN(mm->end_data)) | 262 | if (current->brk_randomized) |
| 263 | min_brk = mm->start_brk; | 263 | min_brk = mm->start_brk; |
| 264 | else | 264 | else |
| 265 | min_brk = mm->end_data; | 265 | min_brk = mm->end_data; |
| @@ -1814,11 +1814,14 @@ static int expand_downwards(struct vm_area_struct *vma, | |||
| 1814 | size = vma->vm_end - address; | 1814 | size = vma->vm_end - address; |
| 1815 | grow = (vma->vm_start - address) >> PAGE_SHIFT; | 1815 | grow = (vma->vm_start - address) >> PAGE_SHIFT; |
| 1816 | 1816 | ||
| 1817 | error = acct_stack_growth(vma, size, grow); | 1817 | error = -ENOMEM; |
| 1818 | if (!error) { | 1818 | if (grow <= vma->vm_pgoff) { |
| 1819 | vma->vm_start = address; | 1819 | error = acct_stack_growth(vma, size, grow); |
| 1820 | vma->vm_pgoff -= grow; | 1820 | if (!error) { |
| 1821 | perf_event_mmap(vma); | 1821 | vma->vm_start = address; |
| 1822 | vma->vm_pgoff -= grow; | ||
| 1823 | perf_event_mmap(vma); | ||
| 1824 | } | ||
| 1822 | } | 1825 | } |
| 1823 | } | 1826 | } |
| 1824 | vma_unlock_anon_vma(vma); | 1827 | vma_unlock_anon_vma(vma); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 6a819d1b2c7d..83fb72c108b7 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -84,24 +84,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, | |||
| 84 | #endif /* CONFIG_NUMA */ | 84 | #endif /* CONFIG_NUMA */ |
| 85 | 85 | ||
| 86 | /* | 86 | /* |
| 87 | * If this is a system OOM (not a memcg OOM) and the task selected to be | ||
| 88 | * killed is not already running at high (RT) priorities, speed up the | ||
| 89 | * recovery by boosting the dying task to the lowest FIFO priority. | ||
| 90 | * That helps with the recovery and avoids interfering with RT tasks. | ||
| 91 | */ | ||
| 92 | static void boost_dying_task_prio(struct task_struct *p, | ||
| 93 | struct mem_cgroup *mem) | ||
| 94 | { | ||
| 95 | struct sched_param param = { .sched_priority = 1 }; | ||
| 96 | |||
| 97 | if (mem) | ||
| 98 | return; | ||
| 99 | |||
| 100 | if (!rt_task(p)) | ||
| 101 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | ||
| 102 | } | ||
| 103 | |||
| 104 | /* | ||
| 105 | * The process p may have detached its own ->mm while exiting or through | 87 | * The process p may have detached its own ->mm while exiting or through |
| 106 | * use_mm(), but one or more of its subthreads may still have a valid | 88 | * use_mm(), but one or more of its subthreads may still have a valid |
| 107 | * pointer. Return p, or any of its subthreads with a valid ->mm, with | 89 | * pointer. Return p, or any of its subthreads with a valid ->mm, with |
| @@ -452,13 +434,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) | |||
| 452 | set_tsk_thread_flag(p, TIF_MEMDIE); | 434 | set_tsk_thread_flag(p, TIF_MEMDIE); |
| 453 | force_sig(SIGKILL, p); | 435 | force_sig(SIGKILL, p); |
| 454 | 436 | ||
| 455 | /* | ||
| 456 | * We give our sacrificial lamb high priority and access to | ||
| 457 | * all the memory it needs. That way it should be able to | ||
| 458 | * exit() and clear out its resources quickly... | ||
| 459 | */ | ||
| 460 | boost_dying_task_prio(p, mem); | ||
| 461 | |||
| 462 | return 0; | 437 | return 0; |
| 463 | } | 438 | } |
| 464 | #undef K | 439 | #undef K |
| @@ -482,7 +457,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
| 482 | */ | 457 | */ |
| 483 | if (p->flags & PF_EXITING) { | 458 | if (p->flags & PF_EXITING) { |
| 484 | set_tsk_thread_flag(p, TIF_MEMDIE); | 459 | set_tsk_thread_flag(p, TIF_MEMDIE); |
| 485 | boost_dying_task_prio(p, mem); | ||
| 486 | return 0; | 460 | return 0; |
| 487 | } | 461 | } |
| 488 | 462 | ||
| @@ -556,7 +530,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) | |||
| 556 | */ | 530 | */ |
| 557 | if (fatal_signal_pending(current)) { | 531 | if (fatal_signal_pending(current)) { |
| 558 | set_thread_flag(TIF_MEMDIE); | 532 | set_thread_flag(TIF_MEMDIE); |
| 559 | boost_dying_task_prio(current, NULL); | ||
| 560 | return; | 533 | return; |
| 561 | } | 534 | } |
| 562 | 535 | ||
| @@ -712,7 +685,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
| 712 | */ | 685 | */ |
| 713 | if (fatal_signal_pending(current)) { | 686 | if (fatal_signal_pending(current)) { |
| 714 | set_thread_flag(TIF_MEMDIE); | 687 | set_thread_flag(TIF_MEMDIE); |
| 715 | boost_dying_task_prio(current, NULL); | ||
| 716 | return; | 688 | return; |
| 717 | } | 689 | } |
| 718 | 690 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2747f5e5abc1..9f8a97b9a350 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -3176,7 +3176,7 @@ static __init_refok int __build_all_zonelists(void *data) | |||
| 3176 | * Called with zonelists_mutex held always | 3176 | * Called with zonelists_mutex held always |
| 3177 | * unless system_state == SYSTEM_BOOTING. | 3177 | * unless system_state == SYSTEM_BOOTING. |
| 3178 | */ | 3178 | */ |
| 3179 | void build_all_zonelists(void *data) | 3179 | void __ref build_all_zonelists(void *data) |
| 3180 | { | 3180 | { |
| 3181 | set_zonelist_order(); | 3181 | set_zonelist_order(); |
| 3182 | 3182 | ||
diff --git a/mm/shmem.c b/mm/shmem.c index 58da7c150ba6..8fa27e4e582a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -421,7 +421,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long | |||
| 421 | * a waste to allocate index if we cannot allocate data. | 421 | * a waste to allocate index if we cannot allocate data. |
| 422 | */ | 422 | */ |
| 423 | if (sbinfo->max_blocks) { | 423 | if (sbinfo->max_blocks) { |
| 424 | if (percpu_counter_compare(&sbinfo->used_blocks, (sbinfo->max_blocks - 1)) > 0) | 424 | if (percpu_counter_compare(&sbinfo->used_blocks, |
| 425 | sbinfo->max_blocks - 1) >= 0) | ||
| 425 | return ERR_PTR(-ENOSPC); | 426 | return ERR_PTR(-ENOSPC); |
| 426 | percpu_counter_inc(&sbinfo->used_blocks); | 427 | percpu_counter_inc(&sbinfo->used_blocks); |
| 427 | spin_lock(&inode->i_lock); | 428 | spin_lock(&inode->i_lock); |
| @@ -1397,7 +1398,8 @@ repeat: | |||
| 1397 | shmem_swp_unmap(entry); | 1398 | shmem_swp_unmap(entry); |
| 1398 | sbinfo = SHMEM_SB(inode->i_sb); | 1399 | sbinfo = SHMEM_SB(inode->i_sb); |
| 1399 | if (sbinfo->max_blocks) { | 1400 | if (sbinfo->max_blocks) { |
| 1400 | if ((percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) > 0) || | 1401 | if (percpu_counter_compare(&sbinfo->used_blocks, |
| 1402 | sbinfo->max_blocks) >= 0 || | ||
| 1401 | shmem_acct_block(info->flags)) { | 1403 | shmem_acct_block(info->flags)) { |
| 1402 | spin_unlock(&info->lock); | 1404 | spin_unlock(&info->lock); |
| 1403 | error = -ENOSPC; | 1405 | error = -ENOSPC; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index c7f5a6d4b75b..f6b435c80079 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #include <linux/memcontrol.h> | 41 | #include <linux/memcontrol.h> |
| 42 | #include <linux/delayacct.h> | 42 | #include <linux/delayacct.h> |
| 43 | #include <linux/sysctl.h> | 43 | #include <linux/sysctl.h> |
| 44 | #include <linux/oom.h> | ||
| 44 | 45 | ||
| 45 | #include <asm/tlbflush.h> | 46 | #include <asm/tlbflush.h> |
| 46 | #include <asm/div64.h> | 47 | #include <asm/div64.h> |
| @@ -1988,17 +1989,12 @@ static bool zone_reclaimable(struct zone *zone) | |||
| 1988 | return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; | 1989 | return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; |
| 1989 | } | 1990 | } |
| 1990 | 1991 | ||
| 1991 | /* | 1992 | /* All zones in zonelist are unreclaimable? */ |
| 1992 | * As hibernation is going on, kswapd is freezed so that it can't mark | ||
| 1993 | * the zone into all_unreclaimable. It can't handle OOM during hibernation. | ||
| 1994 | * So let's check zone's unreclaimable in direct reclaim as well as kswapd. | ||
| 1995 | */ | ||
| 1996 | static bool all_unreclaimable(struct zonelist *zonelist, | 1993 | static bool all_unreclaimable(struct zonelist *zonelist, |
| 1997 | struct scan_control *sc) | 1994 | struct scan_control *sc) |
| 1998 | { | 1995 | { |
| 1999 | struct zoneref *z; | 1996 | struct zoneref *z; |
| 2000 | struct zone *zone; | 1997 | struct zone *zone; |
| 2001 | bool all_unreclaimable = true; | ||
| 2002 | 1998 | ||
| 2003 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 1999 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
| 2004 | gfp_zone(sc->gfp_mask), sc->nodemask) { | 2000 | gfp_zone(sc->gfp_mask), sc->nodemask) { |
| @@ -2006,13 +2002,11 @@ static bool all_unreclaimable(struct zonelist *zonelist, | |||
| 2006 | continue; | 2002 | continue; |
| 2007 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | 2003 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) |
| 2008 | continue; | 2004 | continue; |
| 2009 | if (zone_reclaimable(zone)) { | 2005 | if (!zone->all_unreclaimable) |
| 2010 | all_unreclaimable = false; | 2006 | return false; |
| 2011 | break; | ||
| 2012 | } | ||
| 2013 | } | 2007 | } |
| 2014 | 2008 | ||
| 2015 | return all_unreclaimable; | 2009 | return true; |
| 2016 | } | 2010 | } |
| 2017 | 2011 | ||
| 2018 | /* | 2012 | /* |
| @@ -2108,6 +2102,14 @@ out: | |||
| 2108 | if (sc->nr_reclaimed) | 2102 | if (sc->nr_reclaimed) |
| 2109 | return sc->nr_reclaimed; | 2103 | return sc->nr_reclaimed; |
| 2110 | 2104 | ||
| 2105 | /* | ||
| 2106 | * As hibernation is going on, kswapd is freezed so that it can't mark | ||
| 2107 | * the zone into all_unreclaimable. Thus bypassing all_unreclaimable | ||
| 2108 | * check. | ||
| 2109 | */ | ||
| 2110 | if (oom_killer_disabled) | ||
| 2111 | return 0; | ||
| 2112 | |||
| 2111 | /* top priority shrink_zones still had more to do? don't OOM, then */ | 2113 | /* top priority shrink_zones still had more to do? don't OOM, then */ |
| 2112 | if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) | 2114 | if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) |
| 2113 | return 1; | 2115 | return 1; |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 772b39b87d95..897ea9e88238 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -321,9 +321,12 @@ static inline void mod_state(struct zone *zone, | |||
| 321 | /* | 321 | /* |
| 322 | * The fetching of the stat_threshold is racy. We may apply | 322 | * The fetching of the stat_threshold is racy. We may apply |
| 323 | * a counter threshold to the wrong the cpu if we get | 323 | * a counter threshold to the wrong the cpu if we get |
| 324 | * rescheduled while executing here. However, the following | 324 | * rescheduled while executing here. However, the next |
| 325 | * will apply the threshold again and therefore bring the | 325 | * counter update will apply the threshold again and |
| 326 | * counter under the threshold. | 326 | * therefore bring the counter under the threshold again. |
| 327 | * | ||
| 328 | * Most of the time the thresholds are the same anyways | ||
| 329 | * for all cpus in a zone. | ||
| 327 | */ | 330 | */ |
| 328 | t = this_cpu_read(pcp->stat_threshold); | 331 | t = this_cpu_read(pcp->stat_threshold); |
| 329 | 332 | ||
| @@ -945,7 +948,16 @@ static const char * const vmstat_text[] = { | |||
| 945 | "unevictable_pgs_cleared", | 948 | "unevictable_pgs_cleared", |
| 946 | "unevictable_pgs_stranded", | 949 | "unevictable_pgs_stranded", |
| 947 | "unevictable_pgs_mlockfreed", | 950 | "unevictable_pgs_mlockfreed", |
| 951 | |||
| 952 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
| 953 | "thp_fault_alloc", | ||
| 954 | "thp_fault_fallback", | ||
| 955 | "thp_collapse_alloc", | ||
| 956 | "thp_collapse_alloc_failed", | ||
| 957 | "thp_split", | ||
| 948 | #endif | 958 | #endif |
| 959 | |||
| 960 | #endif /* CONFIG_VM_EVENTS_COUNTERS */ | ||
| 949 | }; | 961 | }; |
| 950 | 962 | ||
| 951 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, | 963 | static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, |
diff --git a/net/9p/client.c b/net/9p/client.c index 48b8e084e710..77367745be9b 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
| @@ -929,15 +929,15 @@ error: | |||
| 929 | } | 929 | } |
| 930 | EXPORT_SYMBOL(p9_client_attach); | 930 | EXPORT_SYMBOL(p9_client_attach); |
| 931 | 931 | ||
| 932 | struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, | 932 | struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, |
| 933 | int clone) | 933 | char **wnames, int clone) |
| 934 | { | 934 | { |
| 935 | int err; | 935 | int err; |
| 936 | struct p9_client *clnt; | 936 | struct p9_client *clnt; |
| 937 | struct p9_fid *fid; | 937 | struct p9_fid *fid; |
| 938 | struct p9_qid *wqids; | 938 | struct p9_qid *wqids; |
| 939 | struct p9_req_t *req; | 939 | struct p9_req_t *req; |
| 940 | int16_t nwqids, count; | 940 | uint16_t nwqids, count; |
| 941 | 941 | ||
| 942 | err = 0; | 942 | err = 0; |
| 943 | wqids = NULL; | 943 | wqids = NULL; |
| @@ -955,7 +955,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, | |||
| 955 | fid = oldfid; | 955 | fid = oldfid; |
| 956 | 956 | ||
| 957 | 957 | ||
| 958 | P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %d wname[0] %s\n", | 958 | P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n", |
| 959 | oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL); | 959 | oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL); |
| 960 | 960 | ||
| 961 | req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid, | 961 | req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid, |
| @@ -1220,27 +1220,6 @@ error: | |||
| 1220 | } | 1220 | } |
| 1221 | EXPORT_SYMBOL(p9_client_fsync); | 1221 | EXPORT_SYMBOL(p9_client_fsync); |
| 1222 | 1222 | ||
| 1223 | int p9_client_sync_fs(struct p9_fid *fid) | ||
| 1224 | { | ||
| 1225 | int err = 0; | ||
| 1226 | struct p9_req_t *req; | ||
| 1227 | struct p9_client *clnt; | ||
| 1228 | |||
| 1229 | P9_DPRINTK(P9_DEBUG_9P, ">>> TSYNC_FS fid %d\n", fid->fid); | ||
| 1230 | |||
| 1231 | clnt = fid->clnt; | ||
| 1232 | req = p9_client_rpc(clnt, P9_TSYNCFS, "d", fid->fid); | ||
| 1233 | if (IS_ERR(req)) { | ||
| 1234 | err = PTR_ERR(req); | ||
| 1235 | goto error; | ||
| 1236 | } | ||
| 1237 | P9_DPRINTK(P9_DEBUG_9P, "<<< RSYNCFS fid %d\n", fid->fid); | ||
| 1238 | p9_free_req(clnt, req); | ||
| 1239 | error: | ||
| 1240 | return err; | ||
| 1241 | } | ||
| 1242 | EXPORT_SYMBOL(p9_client_sync_fs); | ||
| 1243 | |||
| 1244 | int p9_client_clunk(struct p9_fid *fid) | 1223 | int p9_client_clunk(struct p9_fid *fid) |
| 1245 | { | 1224 | { |
| 1246 | int err; | 1225 | int err; |
diff --git a/net/9p/protocol.c b/net/9p/protocol.c index 8a4084fa8b5a..b58a501cf3d1 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c | |||
| @@ -265,7 +265,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 265 | } | 265 | } |
| 266 | break; | 266 | break; |
| 267 | case 'T':{ | 267 | case 'T':{ |
| 268 | int16_t *nwname = va_arg(ap, int16_t *); | 268 | uint16_t *nwname = va_arg(ap, uint16_t *); |
| 269 | char ***wnames = va_arg(ap, char ***); | 269 | char ***wnames = va_arg(ap, char ***); |
| 270 | 270 | ||
| 271 | errcode = p9pdu_readf(pdu, proto_version, | 271 | errcode = p9pdu_readf(pdu, proto_version, |
| @@ -468,7 +468,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 468 | case 'E':{ | 468 | case 'E':{ |
| 469 | int32_t cnt = va_arg(ap, int32_t); | 469 | int32_t cnt = va_arg(ap, int32_t); |
| 470 | const char *k = va_arg(ap, const void *); | 470 | const char *k = va_arg(ap, const void *); |
| 471 | const char *u = va_arg(ap, const void *); | 471 | const char __user *u = va_arg(ap, |
| 472 | const void __user *); | ||
| 472 | errcode = p9pdu_writef(pdu, proto_version, "d", | 473 | errcode = p9pdu_writef(pdu, proto_version, "d", |
| 473 | cnt); | 474 | cnt); |
| 474 | if (!errcode && pdu_write_urw(pdu, k, u, cnt)) | 475 | if (!errcode && pdu_write_urw(pdu, k, u, cnt)) |
| @@ -495,7 +496,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, | |||
| 495 | } | 496 | } |
| 496 | break; | 497 | break; |
| 497 | case 'T':{ | 498 | case 'T':{ |
| 498 | int16_t nwname = va_arg(ap, int); | 499 | uint16_t nwname = va_arg(ap, int); |
| 499 | const char **wnames = va_arg(ap, const char **); | 500 | const char **wnames = va_arg(ap, const char **); |
| 500 | 501 | ||
| 501 | errcode = p9pdu_writef(pdu, proto_version, "w", | 502 | errcode = p9pdu_writef(pdu, proto_version, "w", |
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c index d47880e971dd..e883172f9aa2 100644 --- a/net/9p/trans_common.c +++ b/net/9p/trans_common.c | |||
| @@ -66,7 +66,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, | |||
| 66 | uint32_t pdata_mapped_pages; | 66 | uint32_t pdata_mapped_pages; |
| 67 | struct trans_rpage_info *rpinfo; | 67 | struct trans_rpage_info *rpinfo; |
| 68 | 68 | ||
| 69 | *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1); | 69 | *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); |
| 70 | 70 | ||
| 71 | if (*pdata_off) | 71 | if (*pdata_off) |
| 72 | first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off), | 72 | first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off), |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index e8f046b07182..244e70742183 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
| @@ -326,8 +326,11 @@ req_retry_pinned: | |||
| 326 | outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, | 326 | outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, |
| 327 | pdata_off, rpinfo->rp_data, pdata_len); | 327 | pdata_off, rpinfo->rp_data, pdata_len); |
| 328 | } else { | 328 | } else { |
| 329 | char *pbuf = req->tc->pubuf ? req->tc->pubuf : | 329 | char *pbuf; |
| 330 | req->tc->pkbuf; | 330 | if (req->tc->pubuf) |
| 331 | pbuf = (__force char *) req->tc->pubuf; | ||
| 332 | else | ||
| 333 | pbuf = req->tc->pkbuf; | ||
| 331 | outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf, | 334 | outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf, |
| 332 | req->tc->pbuf_size); | 335 | req->tc->pbuf_size); |
| 333 | } | 336 | } |
| @@ -352,8 +355,12 @@ req_retry_pinned: | |||
| 352 | in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM, | 355 | in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM, |
| 353 | pdata_off, rpinfo->rp_data, pdata_len); | 356 | pdata_off, rpinfo->rp_data, pdata_len); |
| 354 | } else { | 357 | } else { |
| 355 | char *pbuf = req->tc->pubuf ? req->tc->pubuf : | 358 | char *pbuf; |
| 356 | req->tc->pkbuf; | 359 | if (req->tc->pubuf) |
| 360 | pbuf = (__force char *) req->tc->pubuf; | ||
| 361 | else | ||
| 362 | pbuf = req->tc->pkbuf; | ||
| 363 | |||
| 357 | in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM, | 364 | in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM, |
| 358 | pbuf, req->tc->pbuf_size); | 365 | pbuf, req->tc->pbuf_size); |
| 359 | } | 366 | } |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 50af02737a3d..5a80f41c0cba 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
| @@ -579,9 +579,15 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc, | |||
| 579 | 579 | ||
| 580 | list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, | 580 | list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, |
| 581 | r_linger_osd) { | 581 | r_linger_osd) { |
| 582 | __unregister_linger_request(osdc, req); | 582 | /* |
| 583 | * reregister request prior to unregistering linger so | ||
| 584 | * that r_osd is preserved. | ||
| 585 | */ | ||
| 586 | BUG_ON(!list_empty(&req->r_req_lru_item)); | ||
| 583 | __register_request(osdc, req); | 587 | __register_request(osdc, req); |
| 584 | list_move(&req->r_req_lru_item, &osdc->req_unsent); | 588 | list_add(&req->r_req_lru_item, &osdc->req_unsent); |
| 589 | list_add(&req->r_osd_item, &req->r_osd->o_requests); | ||
| 590 | __unregister_linger_request(osdc, req); | ||
| 585 | dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, | 591 | dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, |
| 586 | osd->o_osd); | 592 | osd->o_osd); |
| 587 | } | 593 | } |
| @@ -798,7 +804,7 @@ static void __register_request(struct ceph_osd_client *osdc, | |||
| 798 | req->r_request->hdr.tid = cpu_to_le64(req->r_tid); | 804 | req->r_request->hdr.tid = cpu_to_le64(req->r_tid); |
| 799 | INIT_LIST_HEAD(&req->r_req_lru_item); | 805 | INIT_LIST_HEAD(&req->r_req_lru_item); |
| 800 | 806 | ||
| 801 | dout("register_request %p tid %lld\n", req, req->r_tid); | 807 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
| 802 | __insert_request(osdc, req); | 808 | __insert_request(osdc, req); |
| 803 | ceph_osdc_get_request(req); | 809 | ceph_osdc_get_request(req); |
| 804 | osdc->num_requests++; | 810 | osdc->num_requests++; |
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c index d951f93644bf..3da418894efc 100644 --- a/net/dsa/mv88e6131.c +++ b/net/dsa/mv88e6131.c | |||
| @@ -14,6 +14,13 @@ | |||
| 14 | #include "dsa_priv.h" | 14 | #include "dsa_priv.h" |
| 15 | #include "mv88e6xxx.h" | 15 | #include "mv88e6xxx.h" |
| 16 | 16 | ||
| 17 | /* | ||
| 18 | * Switch product IDs | ||
| 19 | */ | ||
| 20 | #define ID_6085 0x04a0 | ||
| 21 | #define ID_6095 0x0950 | ||
| 22 | #define ID_6131 0x1060 | ||
| 23 | |||
| 17 | static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) | 24 | static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) |
| 18 | { | 25 | { |
| 19 | int ret; | 26 | int ret; |
| @@ -21,9 +28,11 @@ static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) | |||
| 21 | ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); | 28 | ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); |
| 22 | if (ret >= 0) { | 29 | if (ret >= 0) { |
| 23 | ret &= 0xfff0; | 30 | ret &= 0xfff0; |
| 24 | if (ret == 0x0950) | 31 | if (ret == ID_6085) |
| 32 | return "Marvell 88E6085"; | ||
| 33 | if (ret == ID_6095) | ||
| 25 | return "Marvell 88E6095/88E6095F"; | 34 | return "Marvell 88E6095/88E6095F"; |
| 26 | if (ret == 0x1060) | 35 | if (ret == ID_6131) |
| 27 | return "Marvell 88E6131"; | 36 | return "Marvell 88E6131"; |
| 28 | } | 37 | } |
| 29 | 38 | ||
| @@ -164,6 +173,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds) | |||
| 164 | 173 | ||
| 165 | static int mv88e6131_setup_port(struct dsa_switch *ds, int p) | 174 | static int mv88e6131_setup_port(struct dsa_switch *ds, int p) |
| 166 | { | 175 | { |
| 176 | struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); | ||
| 167 | int addr = REG_PORT(p); | 177 | int addr = REG_PORT(p); |
| 168 | u16 val; | 178 | u16 val; |
| 169 | 179 | ||
| @@ -171,10 +181,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p) | |||
| 171 | * MAC Forcing register: don't force link, speed, duplex | 181 | * MAC Forcing register: don't force link, speed, duplex |
| 172 | * or flow control state to any particular values on physical | 182 | * or flow control state to any particular values on physical |
| 173 | * ports, but force the CPU port and all DSA ports to 1000 Mb/s | 183 | * ports, but force the CPU port and all DSA ports to 1000 Mb/s |
| 174 | * full duplex. | 184 | * (100 Mb/s on 6085) full duplex. |
| 175 | */ | 185 | */ |
| 176 | if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) | 186 | if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) |
| 177 | REG_WRITE(addr, 0x01, 0x003e); | 187 | if (ps->id == ID_6085) |
| 188 | REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */ | ||
| 189 | else | ||
| 190 | REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */ | ||
| 178 | else | 191 | else |
| 179 | REG_WRITE(addr, 0x01, 0x0003); | 192 | REG_WRITE(addr, 0x01, 0x0003); |
| 180 | 193 | ||
| @@ -286,6 +299,8 @@ static int mv88e6131_setup(struct dsa_switch *ds) | |||
| 286 | mv88e6xxx_ppu_state_init(ds); | 299 | mv88e6xxx_ppu_state_init(ds); |
| 287 | mutex_init(&ps->stats_mutex); | 300 | mutex_init(&ps->stats_mutex); |
| 288 | 301 | ||
| 302 | ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; | ||
| 303 | |||
| 289 | ret = mv88e6131_switch_reset(ds); | 304 | ret = mv88e6131_switch_reset(ds); |
| 290 | if (ret < 0) | 305 | if (ret < 0) |
| 291 | return ret; | 306 | return ret; |
diff --git a/net/dsa/mv88e6xxx.h b/net/dsa/mv88e6xxx.h index eb0e0aaa9f1b..61156ca26a0d 100644 --- a/net/dsa/mv88e6xxx.h +++ b/net/dsa/mv88e6xxx.h | |||
| @@ -39,6 +39,8 @@ struct mv88e6xxx_priv_state { | |||
| 39 | * Hold this mutex over snapshot + dump sequences. | 39 | * Hold this mutex over snapshot + dump sequences. |
| 40 | */ | 40 | */ |
| 41 | struct mutex stats_mutex; | 41 | struct mutex stats_mutex; |
| 42 | |||
| 43 | int id; /* switch product id */ | ||
| 42 | }; | 44 | }; |
| 43 | 45 | ||
| 44 | struct mv88e6xxx_hw_stat { | 46 | struct mv88e6xxx_hw_stat { |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index f3c0b549b8e1..4614babdc45f 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
| @@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, | |||
| 221 | return csum; | 221 | return csum; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | static int nf_ip_route(struct dst_entry **dst, struct flowi *fl) | 224 | static int nf_ip_route(struct net *net, struct dst_entry **dst, |
| 225 | struct flowi *fl, bool strict __always_unused) | ||
| 225 | { | 226 | { |
| 226 | struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4); | 227 | struct rtable *rt = ip_route_output_key(net, &fl->u.ip4); |
| 227 | if (IS_ERR(rt)) | 228 | if (IS_ERR(rt)) |
| 228 | return PTR_ERR(rt); | 229 | return PTR_ERR(rt); |
| 229 | *dst = &rt->dst; | 230 | *dst = &rt->dst; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ea107515c53e..c1acf69858fd 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -1891,6 +1891,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
| 1891 | #ifdef CONFIG_IP_ROUTE_CLASSID | 1891 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 1892 | rth->dst.tclassid = itag; | 1892 | rth->dst.tclassid = itag; |
| 1893 | #endif | 1893 | #endif |
| 1894 | rth->rt_route_iif = dev->ifindex; | ||
| 1894 | rth->rt_iif = dev->ifindex; | 1895 | rth->rt_iif = dev->ifindex; |
| 1895 | rth->dst.dev = init_net.loopback_dev; | 1896 | rth->dst.dev = init_net.loopback_dev; |
| 1896 | dev_hold(rth->dst.dev); | 1897 | dev_hold(rth->dst.dev); |
| @@ -2026,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb, | |||
| 2026 | rth->rt_key_src = saddr; | 2027 | rth->rt_key_src = saddr; |
| 2027 | rth->rt_src = saddr; | 2028 | rth->rt_src = saddr; |
| 2028 | rth->rt_gateway = daddr; | 2029 | rth->rt_gateway = daddr; |
| 2030 | rth->rt_route_iif = in_dev->dev->ifindex; | ||
| 2029 | rth->rt_iif = in_dev->dev->ifindex; | 2031 | rth->rt_iif = in_dev->dev->ifindex; |
| 2030 | rth->dst.dev = (out_dev)->dev; | 2032 | rth->dst.dev = (out_dev)->dev; |
| 2031 | dev_hold(rth->dst.dev); | 2033 | dev_hold(rth->dst.dev); |
| @@ -2202,6 +2204,7 @@ local_input: | |||
| 2202 | #ifdef CONFIG_IP_ROUTE_CLASSID | 2204 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 2203 | rth->dst.tclassid = itag; | 2205 | rth->dst.tclassid = itag; |
| 2204 | #endif | 2206 | #endif |
| 2207 | rth->rt_route_iif = dev->ifindex; | ||
| 2205 | rth->rt_iif = dev->ifindex; | 2208 | rth->rt_iif = dev->ifindex; |
| 2206 | rth->dst.dev = net->loopback_dev; | 2209 | rth->dst.dev = net->loopback_dev; |
| 2207 | dev_hold(rth->dst.dev); | 2210 | dev_hold(rth->dst.dev); |
| @@ -2401,7 +2404,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res, | |||
| 2401 | rth->rt_mark = oldflp4->flowi4_mark; | 2404 | rth->rt_mark = oldflp4->flowi4_mark; |
| 2402 | rth->rt_dst = fl4->daddr; | 2405 | rth->rt_dst = fl4->daddr; |
| 2403 | rth->rt_src = fl4->saddr; | 2406 | rth->rt_src = fl4->saddr; |
| 2404 | rth->rt_iif = 0; | 2407 | rth->rt_route_iif = 0; |
| 2408 | rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex; | ||
| 2405 | /* get references to the devices that are to be hold by the routing | 2409 | /* get references to the devices that are to be hold by the routing |
| 2406 | cache entry */ | 2410 | cache entry */ |
| 2407 | rth->dst.dev = dev_out; | 2411 | rth->dst.dev = dev_out; |
| @@ -2716,6 +2720,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
| 2716 | rt->rt_key_dst = ort->rt_key_dst; | 2720 | rt->rt_key_dst = ort->rt_key_dst; |
| 2717 | rt->rt_key_src = ort->rt_key_src; | 2721 | rt->rt_key_src = ort->rt_key_src; |
| 2718 | rt->rt_tos = ort->rt_tos; | 2722 | rt->rt_tos = ort->rt_tos; |
| 2723 | rt->rt_route_iif = ort->rt_route_iif; | ||
| 2719 | rt->rt_iif = ort->rt_iif; | 2724 | rt->rt_iif = ort->rt_iif; |
| 2720 | rt->rt_oif = ort->rt_oif; | 2725 | rt->rt_oif = ort->rt_oif; |
| 2721 | rt->rt_mark = ort->rt_mark; | 2726 | rt->rt_mark = ort->rt_mark; |
| @@ -2725,7 +2730,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
| 2725 | rt->rt_type = ort->rt_type; | 2730 | rt->rt_type = ort->rt_type; |
| 2726 | rt->rt_dst = ort->rt_dst; | 2731 | rt->rt_dst = ort->rt_dst; |
| 2727 | rt->rt_src = ort->rt_src; | 2732 | rt->rt_src = ort->rt_src; |
| 2728 | rt->rt_iif = ort->rt_iif; | ||
| 2729 | rt->rt_gateway = ort->rt_gateway; | 2733 | rt->rt_gateway = ort->rt_gateway; |
| 2730 | rt->rt_spec_dst = ort->rt_spec_dst; | 2734 | rt->rt_spec_dst = ort->rt_spec_dst; |
| 2731 | rt->peer = ort->peer; | 2735 | rt->peer = ort->peer; |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 13e0e7f659ff..d20a05e970d8 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
| @@ -74,6 +74,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, | |||
| 74 | rt->rt_key_dst = fl4->daddr; | 74 | rt->rt_key_dst = fl4->daddr; |
| 75 | rt->rt_key_src = fl4->saddr; | 75 | rt->rt_key_src = fl4->saddr; |
| 76 | rt->rt_tos = fl4->flowi4_tos; | 76 | rt->rt_tos = fl4->flowi4_tos; |
| 77 | rt->rt_route_iif = fl4->flowi4_iif; | ||
| 77 | rt->rt_iif = fl4->flowi4_iif; | 78 | rt->rt_iif = fl4->flowi4_iif; |
| 78 | rt->rt_oif = fl4->flowi4_oif; | 79 | rt->rt_oif = fl4->flowi4_oif; |
| 79 | rt->rt_mark = fl4->flowi4_mark; | 80 | rt->rt_mark = fl4->flowi4_mark; |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 39aaca2b4fd2..28bc1f644b7b 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
| @@ -90,9 +90,18 @@ static int nf_ip6_reroute(struct sk_buff *skb, | |||
| 90 | return 0; | 90 | return 0; |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) | 93 | static int nf_ip6_route(struct net *net, struct dst_entry **dst, |
| 94 | struct flowi *fl, bool strict) | ||
| 94 | { | 95 | { |
| 95 | *dst = ip6_route_output(&init_net, NULL, &fl->u.ip6); | 96 | static const struct ipv6_pinfo fake_pinfo; |
| 97 | static const struct inet_sock fake_sk = { | ||
| 98 | /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */ | ||
| 99 | .sk.sk_bound_dev_if = 1, | ||
| 100 | .pinet6 = (struct ipv6_pinfo *) &fake_pinfo, | ||
| 101 | }; | ||
| 102 | const void *sk = strict ? &fake_sk : NULL; | ||
| 103 | |||
| 104 | *dst = ip6_route_output(net, sk, &fl->u.ip6); | ||
| 96 | return (*dst)->error; | 105 | return (*dst)->error; |
| 97 | } | 106 | } |
| 98 | 107 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 56fa12538d45..4f49e5dd41bb 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -1622,6 +1622,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 1622 | opt_skb = skb_clone(skb, GFP_ATOMIC); | 1622 | opt_skb = skb_clone(skb, GFP_ATOMIC); |
| 1623 | 1623 | ||
| 1624 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ | 1624 | if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ |
| 1625 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
| 1625 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) | 1626 | if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) |
| 1626 | goto reset; | 1627 | goto reset; |
| 1627 | if (opt_skb) | 1628 | if (opt_skb) |
| @@ -1649,7 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
| 1649 | __kfree_skb(opt_skb); | 1650 | __kfree_skb(opt_skb); |
| 1650 | return 0; | 1651 | return 0; |
| 1651 | } | 1652 | } |
| 1652 | } | 1653 | } else |
| 1654 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
| 1653 | 1655 | ||
| 1654 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) | 1656 | if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) |
| 1655 | goto reset; | 1657 | goto reset; |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index d7037c006e13..15c37746845e 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -505,6 +505,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |||
| 505 | int rc; | 505 | int rc; |
| 506 | int is_udplite = IS_UDPLITE(sk); | 506 | int is_udplite = IS_UDPLITE(sk); |
| 507 | 507 | ||
| 508 | if (!ipv6_addr_any(&inet6_sk(sk)->daddr)) | ||
| 509 | sock_rps_save_rxhash(sk, skb->rxhash); | ||
| 510 | |||
| 508 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 511 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
| 509 | goto drop; | 512 | goto drop; |
| 510 | 513 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 9d192d665ff5..c5d4530d8284 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -2541,7 +2541,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) | |||
| 2541 | * same TID from the same station | 2541 | * same TID from the same station |
| 2542 | */ | 2542 | */ |
| 2543 | rx->skb = skb; | 2543 | rx->skb = skb; |
| 2544 | rx->flags = 0; | ||
| 2545 | 2544 | ||
| 2546 | CALL_RXH(ieee80211_rx_h_decrypt) | 2545 | CALL_RXH(ieee80211_rx_h_decrypt) |
| 2547 | CALL_RXH(ieee80211_rx_h_check_more_data) | 2546 | CALL_RXH(ieee80211_rx_h_check_more_data) |
| @@ -2612,6 +2611,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | |||
| 2612 | .sdata = sta->sdata, | 2611 | .sdata = sta->sdata, |
| 2613 | .local = sta->local, | 2612 | .local = sta->local, |
| 2614 | .queue = tid, | 2613 | .queue = tid, |
| 2614 | .flags = 0, | ||
| 2615 | }; | 2615 | }; |
| 2616 | struct tid_ampdu_rx *tid_agg_rx; | 2616 | struct tid_ampdu_rx *tid_agg_rx; |
| 2617 | 2617 | ||
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index c3f988aa1152..32bff6d86cb2 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
| @@ -652,7 +652,6 @@ comment "Xtables matches" | |||
| 652 | config NETFILTER_XT_MATCH_ADDRTYPE | 652 | config NETFILTER_XT_MATCH_ADDRTYPE |
| 653 | tristate '"addrtype" address type match support' | 653 | tristate '"addrtype" address type match support' |
| 654 | depends on NETFILTER_ADVANCED | 654 | depends on NETFILTER_ADVANCED |
| 655 | depends on (IPV6 || IPV6=n) | ||
| 656 | ---help--- | 655 | ---help--- |
| 657 | This option allows you to match what routing thinks of an address, | 656 | This option allows you to match what routing thinks of an address, |
| 658 | eg. UNICAST, LOCAL, BROADCAST, ... | 657 | eg. UNICAST, LOCAL, BROADCAST, ... |
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index bca96990218d..a113ff066928 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c | |||
| @@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb) | |||
| 338 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); | 338 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); |
| 339 | if (map->netmask != 32) | 339 | if (map->netmask != 32) |
| 340 | NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); | 340 | NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); |
| 341 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, | 341 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); |
| 342 | htonl(atomic_read(&set->ref) - 1)); | ||
| 343 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, | 342 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, |
| 344 | htonl(sizeof(*map) + map->memsize)); | 343 | htonl(sizeof(*map) + map->memsize)); |
| 345 | if (with_timeout(map->timeout)) | 344 | if (with_timeout(map->timeout)) |
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 5e790172deff..00a33242e90c 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c | |||
| @@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb) | |||
| 434 | goto nla_put_failure; | 434 | goto nla_put_failure; |
| 435 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); | 435 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); |
| 436 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); | 436 | NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); |
| 437 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, | 437 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); |
| 438 | htonl(atomic_read(&set->ref) - 1)); | ||
| 439 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, | 438 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, |
| 440 | htonl(sizeof(*map) | 439 | htonl(sizeof(*map) |
| 441 | + (map->last_ip - map->first_ip + 1) * map->dsize)); | 440 | + (map->last_ip - map->first_ip + 1) * map->dsize)); |
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index 165f09b1a9cb..6b38eb8f6ed8 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c | |||
| @@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb) | |||
| 320 | goto nla_put_failure; | 320 | goto nla_put_failure; |
| 321 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); | 321 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); |
| 322 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); | 322 | NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); |
| 323 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, | 323 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); |
| 324 | htonl(atomic_read(&set->ref) - 1)); | ||
| 325 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, | 324 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, |
| 326 | htonl(sizeof(*map) + map->memsize)); | 325 | htonl(sizeof(*map) + map->memsize)); |
| 327 | if (with_timeout(map->timeout)) | 326 | if (with_timeout(map->timeout)) |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 253326e8d990..9152e69a162d 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | 26 | ||
| 27 | static LIST_HEAD(ip_set_type_list); /* all registered set types */ | 27 | static LIST_HEAD(ip_set_type_list); /* all registered set types */ |
| 28 | static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ | 28 | static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ |
| 29 | static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */ | ||
| 29 | 30 | ||
| 30 | static struct ip_set **ip_set_list; /* all individual sets */ | 31 | static struct ip_set **ip_set_list; /* all individual sets */ |
| 31 | static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ | 32 | static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ |
| @@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6); | |||
| 301 | static inline void | 302 | static inline void |
| 302 | __ip_set_get(ip_set_id_t index) | 303 | __ip_set_get(ip_set_id_t index) |
| 303 | { | 304 | { |
| 304 | atomic_inc(&ip_set_list[index]->ref); | 305 | write_lock_bh(&ip_set_ref_lock); |
| 306 | ip_set_list[index]->ref++; | ||
| 307 | write_unlock_bh(&ip_set_ref_lock); | ||
| 305 | } | 308 | } |
| 306 | 309 | ||
| 307 | static inline void | 310 | static inline void |
| 308 | __ip_set_put(ip_set_id_t index) | 311 | __ip_set_put(ip_set_id_t index) |
| 309 | { | 312 | { |
| 310 | atomic_dec(&ip_set_list[index]->ref); | 313 | write_lock_bh(&ip_set_ref_lock); |
| 314 | BUG_ON(ip_set_list[index]->ref == 0); | ||
| 315 | ip_set_list[index]->ref--; | ||
| 316 | write_unlock_bh(&ip_set_ref_lock); | ||
| 311 | } | 317 | } |
| 312 | 318 | ||
| 313 | /* | 319 | /* |
| @@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb, | |||
| 324 | struct ip_set *set = ip_set_list[index]; | 330 | struct ip_set *set = ip_set_list[index]; |
| 325 | int ret = 0; | 331 | int ret = 0; |
| 326 | 332 | ||
| 327 | BUG_ON(set == NULL || atomic_read(&set->ref) == 0); | 333 | BUG_ON(set == NULL); |
| 328 | pr_debug("set %s, index %u\n", set->name, index); | 334 | pr_debug("set %s, index %u\n", set->name, index); |
| 329 | 335 | ||
| 330 | if (dim < set->type->dimension || | 336 | if (dim < set->type->dimension || |
| @@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb, | |||
| 356 | struct ip_set *set = ip_set_list[index]; | 362 | struct ip_set *set = ip_set_list[index]; |
| 357 | int ret; | 363 | int ret; |
| 358 | 364 | ||
| 359 | BUG_ON(set == NULL || atomic_read(&set->ref) == 0); | 365 | BUG_ON(set == NULL); |
| 360 | pr_debug("set %s, index %u\n", set->name, index); | 366 | pr_debug("set %s, index %u\n", set->name, index); |
| 361 | 367 | ||
| 362 | if (dim < set->type->dimension || | 368 | if (dim < set->type->dimension || |
| @@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb, | |||
| 378 | struct ip_set *set = ip_set_list[index]; | 384 | struct ip_set *set = ip_set_list[index]; |
| 379 | int ret = 0; | 385 | int ret = 0; |
| 380 | 386 | ||
| 381 | BUG_ON(set == NULL || atomic_read(&set->ref) == 0); | 387 | BUG_ON(set == NULL); |
| 382 | pr_debug("set %s, index %u\n", set->name, index); | 388 | pr_debug("set %s, index %u\n", set->name, index); |
| 383 | 389 | ||
| 384 | if (dim < set->type->dimension || | 390 | if (dim < set->type->dimension || |
| @@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del); | |||
| 397 | * Find set by name, reference it once. The reference makes sure the | 403 | * Find set by name, reference it once. The reference makes sure the |
| 398 | * thing pointed to, does not go away under our feet. | 404 | * thing pointed to, does not go away under our feet. |
| 399 | * | 405 | * |
| 400 | * The nfnl mutex must already be activated. | ||
| 401 | */ | 406 | */ |
| 402 | ip_set_id_t | 407 | ip_set_id_t |
| 403 | ip_set_get_byname(const char *name, struct ip_set **set) | 408 | ip_set_get_byname(const char *name, struct ip_set **set) |
| @@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname); | |||
| 423 | * reference count by 1. The caller shall not assume the index | 428 | * reference count by 1. The caller shall not assume the index |
| 424 | * to be valid, after calling this function. | 429 | * to be valid, after calling this function. |
| 425 | * | 430 | * |
| 426 | * The nfnl mutex must already be activated. | ||
| 427 | */ | 431 | */ |
| 428 | void | 432 | void |
| 429 | ip_set_put_byindex(ip_set_id_t index) | 433 | ip_set_put_byindex(ip_set_id_t index) |
| 430 | { | 434 | { |
| 431 | if (ip_set_list[index] != NULL) { | 435 | if (ip_set_list[index] != NULL) |
| 432 | BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0); | ||
| 433 | __ip_set_put(index); | 436 | __ip_set_put(index); |
| 434 | } | ||
| 435 | } | 437 | } |
| 436 | EXPORT_SYMBOL_GPL(ip_set_put_byindex); | 438 | EXPORT_SYMBOL_GPL(ip_set_put_byindex); |
| 437 | 439 | ||
| @@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex); | |||
| 441 | * can't be destroyed. The set cannot be renamed due to | 443 | * can't be destroyed. The set cannot be renamed due to |
| 442 | * the referencing either. | 444 | * the referencing either. |
| 443 | * | 445 | * |
| 444 | * The nfnl mutex must already be activated. | ||
| 445 | */ | 446 | */ |
| 446 | const char * | 447 | const char * |
| 447 | ip_set_name_byindex(ip_set_id_t index) | 448 | ip_set_name_byindex(ip_set_id_t index) |
| @@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index) | |||
| 449 | const struct ip_set *set = ip_set_list[index]; | 450 | const struct ip_set *set = ip_set_list[index]; |
| 450 | 451 | ||
| 451 | BUG_ON(set == NULL); | 452 | BUG_ON(set == NULL); |
| 452 | BUG_ON(atomic_read(&set->ref) == 0); | 453 | BUG_ON(set->ref == 0); |
| 453 | 454 | ||
| 454 | /* Referenced, so it's safe */ | 455 | /* Referenced, so it's safe */ |
| 455 | return set->name; | 456 | return set->name; |
| @@ -515,10 +516,7 @@ void | |||
| 515 | ip_set_nfnl_put(ip_set_id_t index) | 516 | ip_set_nfnl_put(ip_set_id_t index) |
| 516 | { | 517 | { |
| 517 | nfnl_lock(); | 518 | nfnl_lock(); |
| 518 | if (ip_set_list[index] != NULL) { | 519 | ip_set_put_byindex(index); |
| 519 | BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0); | ||
| 520 | __ip_set_put(index); | ||
| 521 | } | ||
| 522 | nfnl_unlock(); | 520 | nfnl_unlock(); |
| 523 | } | 521 | } |
| 524 | EXPORT_SYMBOL_GPL(ip_set_nfnl_put); | 522 | EXPORT_SYMBOL_GPL(ip_set_nfnl_put); |
| @@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put); | |||
| 526 | /* | 524 | /* |
| 527 | * Communication protocol with userspace over netlink. | 525 | * Communication protocol with userspace over netlink. |
| 528 | * | 526 | * |
| 529 | * We already locked by nfnl_lock. | 527 | * The commands are serialized by the nfnl mutex. |
| 530 | */ | 528 | */ |
| 531 | 529 | ||
| 532 | static inline bool | 530 | static inline bool |
| @@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb, | |||
| 657 | return -ENOMEM; | 655 | return -ENOMEM; |
| 658 | rwlock_init(&set->lock); | 656 | rwlock_init(&set->lock); |
| 659 | strlcpy(set->name, name, IPSET_MAXNAMELEN); | 657 | strlcpy(set->name, name, IPSET_MAXNAMELEN); |
| 660 | atomic_set(&set->ref, 0); | ||
| 661 | set->family = family; | 658 | set->family = family; |
| 662 | 659 | ||
| 663 | /* | 660 | /* |
| @@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb, | |||
| 690 | 687 | ||
| 691 | /* | 688 | /* |
| 692 | * Here, we have a valid, constructed set and we are protected | 689 | * Here, we have a valid, constructed set and we are protected |
| 693 | * by nfnl_lock. Find the first free index in ip_set_list and | 690 | * by the nfnl mutex. Find the first free index in ip_set_list |
| 694 | * check clashing. | 691 | * and check clashing. |
| 695 | */ | 692 | */ |
| 696 | if ((ret = find_free_id(set->name, &index, &clash)) != 0) { | 693 | if ((ret = find_free_id(set->name, &index, &clash)) != 0) { |
| 697 | /* If this is the same set and requested, ignore error */ | 694 | /* If this is the same set and requested, ignore error */ |
| @@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb, | |||
| 751 | const struct nlattr * const attr[]) | 748 | const struct nlattr * const attr[]) |
| 752 | { | 749 | { |
| 753 | ip_set_id_t i; | 750 | ip_set_id_t i; |
| 751 | int ret = 0; | ||
| 754 | 752 | ||
| 755 | if (unlikely(protocol_failed(attr))) | 753 | if (unlikely(protocol_failed(attr))) |
| 756 | return -IPSET_ERR_PROTOCOL; | 754 | return -IPSET_ERR_PROTOCOL; |
| 757 | 755 | ||
| 758 | /* References are protected by the nfnl mutex */ | 756 | /* Commands are serialized and references are |
| 757 | * protected by the ip_set_ref_lock. | ||
| 758 | * External systems (i.e. xt_set) must call | ||
| 759 | * ip_set_put|get_nfnl_* functions, that way we | ||
| 760 | * can safely check references here. | ||
| 761 | * | ||
| 762 | * list:set timer can only decrement the reference | ||
| 763 | * counter, so if it's already zero, we can proceed | ||
| 764 | * without holding the lock. | ||
| 765 | */ | ||
| 766 | read_lock_bh(&ip_set_ref_lock); | ||
| 759 | if (!attr[IPSET_ATTR_SETNAME]) { | 767 | if (!attr[IPSET_ATTR_SETNAME]) { |
| 760 | for (i = 0; i < ip_set_max; i++) { | 768 | for (i = 0; i < ip_set_max; i++) { |
| 761 | if (ip_set_list[i] != NULL && | 769 | if (ip_set_list[i] != NULL && ip_set_list[i]->ref) { |
| 762 | (atomic_read(&ip_set_list[i]->ref))) | 770 | ret = IPSET_ERR_BUSY; |
| 763 | return -IPSET_ERR_BUSY; | 771 | goto out; |
| 772 | } | ||
| 764 | } | 773 | } |
| 774 | read_unlock_bh(&ip_set_ref_lock); | ||
| 765 | for (i = 0; i < ip_set_max; i++) { | 775 | for (i = 0; i < ip_set_max; i++) { |
| 766 | if (ip_set_list[i] != NULL) | 776 | if (ip_set_list[i] != NULL) |
| 767 | ip_set_destroy_set(i); | 777 | ip_set_destroy_set(i); |
| 768 | } | 778 | } |
| 769 | } else { | 779 | } else { |
| 770 | i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); | 780 | i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); |
| 771 | if (i == IPSET_INVALID_ID) | 781 | if (i == IPSET_INVALID_ID) { |
| 772 | return -ENOENT; | 782 | ret = -ENOENT; |
| 773 | else if (atomic_read(&ip_set_list[i]->ref)) | 783 | goto out; |
| 774 | return -IPSET_ERR_BUSY; | 784 | } else if (ip_set_list[i]->ref) { |
| 785 | ret = -IPSET_ERR_BUSY; | ||
| 786 | goto out; | ||
| 787 | } | ||
| 788 | read_unlock_bh(&ip_set_ref_lock); | ||
| 775 | 789 | ||
| 776 | ip_set_destroy_set(i); | 790 | ip_set_destroy_set(i); |
| 777 | } | 791 | } |
| 778 | return 0; | 792 | return 0; |
| 793 | out: | ||
| 794 | read_unlock_bh(&ip_set_ref_lock); | ||
| 795 | return ret; | ||
| 779 | } | 796 | } |
| 780 | 797 | ||
| 781 | /* Flush sets */ | 798 | /* Flush sets */ |
| @@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb, | |||
| 834 | struct ip_set *set; | 851 | struct ip_set *set; |
| 835 | const char *name2; | 852 | const char *name2; |
| 836 | ip_set_id_t i; | 853 | ip_set_id_t i; |
| 854 | int ret = 0; | ||
| 837 | 855 | ||
| 838 | if (unlikely(protocol_failed(attr) || | 856 | if (unlikely(protocol_failed(attr) || |
| 839 | attr[IPSET_ATTR_SETNAME] == NULL || | 857 | attr[IPSET_ATTR_SETNAME] == NULL || |
| @@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb, | |||
| 843 | set = find_set(nla_data(attr[IPSET_ATTR_SETNAME])); | 861 | set = find_set(nla_data(attr[IPSET_ATTR_SETNAME])); |
| 844 | if (set == NULL) | 862 | if (set == NULL) |
| 845 | return -ENOENT; | 863 | return -ENOENT; |
| 846 | if (atomic_read(&set->ref) != 0) | 864 | |
| 847 | return -IPSET_ERR_REFERENCED; | 865 | read_lock_bh(&ip_set_ref_lock); |
| 866 | if (set->ref != 0) { | ||
| 867 | ret = -IPSET_ERR_REFERENCED; | ||
| 868 | goto out; | ||
| 869 | } | ||
| 848 | 870 | ||
| 849 | name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); | 871 | name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); |
| 850 | for (i = 0; i < ip_set_max; i++) { | 872 | for (i = 0; i < ip_set_max; i++) { |
| 851 | if (ip_set_list[i] != NULL && | 873 | if (ip_set_list[i] != NULL && |
| 852 | STREQ(ip_set_list[i]->name, name2)) | 874 | STREQ(ip_set_list[i]->name, name2)) { |
| 853 | return -IPSET_ERR_EXIST_SETNAME2; | 875 | ret = -IPSET_ERR_EXIST_SETNAME2; |
| 876 | goto out; | ||
| 877 | } | ||
| 854 | } | 878 | } |
| 855 | strncpy(set->name, name2, IPSET_MAXNAMELEN); | 879 | strncpy(set->name, name2, IPSET_MAXNAMELEN); |
| 856 | 880 | ||
| 857 | return 0; | 881 | out: |
| 882 | read_unlock_bh(&ip_set_ref_lock); | ||
| 883 | return ret; | ||
| 858 | } | 884 | } |
| 859 | 885 | ||
| 860 | /* Swap two sets so that name/index points to the other. | 886 | /* Swap two sets so that name/index points to the other. |
| 861 | * References and set names are also swapped. | 887 | * References and set names are also swapped. |
| 862 | * | 888 | * |
| 863 | * We are protected by the nfnl mutex and references are | 889 | * The commands are serialized by the nfnl mutex and references are |
| 864 | * manipulated only by holding the mutex. The kernel interfaces | 890 | * protected by the ip_set_ref_lock. The kernel interfaces |
| 865 | * do not hold the mutex but the pointer settings are atomic | 891 | * do not hold the mutex but the pointer settings are atomic |
| 866 | * so the ip_set_list always contains valid pointers to the sets. | 892 | * so the ip_set_list always contains valid pointers to the sets. |
| 867 | */ | 893 | */ |
| @@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb, | |||
| 874 | struct ip_set *from, *to; | 900 | struct ip_set *from, *to; |
| 875 | ip_set_id_t from_id, to_id; | 901 | ip_set_id_t from_id, to_id; |
| 876 | char from_name[IPSET_MAXNAMELEN]; | 902 | char from_name[IPSET_MAXNAMELEN]; |
| 877 | u32 from_ref; | ||
| 878 | 903 | ||
| 879 | if (unlikely(protocol_failed(attr) || | 904 | if (unlikely(protocol_failed(attr) || |
| 880 | attr[IPSET_ATTR_SETNAME] == NULL || | 905 | attr[IPSET_ATTR_SETNAME] == NULL || |
| @@ -899,17 +924,15 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb, | |||
| 899 | from->type->family == to->type->family)) | 924 | from->type->family == to->type->family)) |
| 900 | return -IPSET_ERR_TYPE_MISMATCH; | 925 | return -IPSET_ERR_TYPE_MISMATCH; |
| 901 | 926 | ||
| 902 | /* No magic here: ref munging protected by the nfnl_lock */ | ||
| 903 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); | 927 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); |
| 904 | from_ref = atomic_read(&from->ref); | ||
| 905 | |||
| 906 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); | 928 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); |
| 907 | atomic_set(&from->ref, atomic_read(&to->ref)); | ||
| 908 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); | 929 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); |
| 909 | atomic_set(&to->ref, from_ref); | ||
| 910 | 930 | ||
| 931 | write_lock_bh(&ip_set_ref_lock); | ||
| 932 | swap(from->ref, to->ref); | ||
| 911 | ip_set_list[from_id] = to; | 933 | ip_set_list[from_id] = to; |
| 912 | ip_set_list[to_id] = from; | 934 | ip_set_list[to_id] = from; |
| 935 | write_unlock_bh(&ip_set_ref_lock); | ||
| 913 | 936 | ||
| 914 | return 0; | 937 | return 0; |
| 915 | } | 938 | } |
| @@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb) | |||
| 926 | { | 949 | { |
| 927 | if (cb->args[2]) { | 950 | if (cb->args[2]) { |
| 928 | pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); | 951 | pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); |
| 929 | __ip_set_put((ip_set_id_t) cb->args[1]); | 952 | ip_set_put_byindex((ip_set_id_t) cb->args[1]); |
| 930 | } | 953 | } |
| 931 | return 0; | 954 | return 0; |
| 932 | } | 955 | } |
| @@ -1068,7 +1091,7 @@ release_refcount: | |||
| 1068 | /* If there was an error or set is done, release set */ | 1091 | /* If there was an error or set is done, release set */ |
| 1069 | if (ret || !cb->args[2]) { | 1092 | if (ret || !cb->args[2]) { |
| 1070 | pr_debug("release set %s\n", ip_set_list[index]->name); | 1093 | pr_debug("release set %s\n", ip_set_list[index]->name); |
| 1071 | __ip_set_put(index); | 1094 | ip_set_put_byindex(index); |
| 1072 | } | 1095 | } |
| 1073 | 1096 | ||
| 1074 | /* If we dump all sets, continue with dumping last ones */ | 1097 | /* If we dump all sets, continue with dumping last ones */ |
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index a47c32982f06..e9159e99fc4b 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c | |||
| @@ -43,14 +43,19 @@ struct list_set { | |||
| 43 | static inline struct set_elem * | 43 | static inline struct set_elem * |
| 44 | list_set_elem(const struct list_set *map, u32 id) | 44 | list_set_elem(const struct list_set *map, u32 id) |
| 45 | { | 45 | { |
| 46 | return (struct set_elem *)((char *)map->members + id * map->dsize); | 46 | return (struct set_elem *)((void *)map->members + id * map->dsize); |
| 47 | } | ||
| 48 | |||
| 49 | static inline struct set_telem * | ||
| 50 | list_set_telem(const struct list_set *map, u32 id) | ||
| 51 | { | ||
| 52 | return (struct set_telem *)((void *)map->members + id * map->dsize); | ||
| 47 | } | 53 | } |
| 48 | 54 | ||
| 49 | static inline bool | 55 | static inline bool |
| 50 | list_set_timeout(const struct list_set *map, u32 id) | 56 | list_set_timeout(const struct list_set *map, u32 id) |
| 51 | { | 57 | { |
| 52 | const struct set_telem *elem = | 58 | const struct set_telem *elem = list_set_telem(map, id); |
| 53 | (const struct set_telem *) list_set_elem(map, id); | ||
| 54 | 59 | ||
| 55 | return ip_set_timeout_test(elem->timeout); | 60 | return ip_set_timeout_test(elem->timeout); |
| 56 | } | 61 | } |
| @@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id) | |||
| 58 | static inline bool | 63 | static inline bool |
| 59 | list_set_expired(const struct list_set *map, u32 id) | 64 | list_set_expired(const struct list_set *map, u32 id) |
| 60 | { | 65 | { |
| 61 | const struct set_telem *elem = | 66 | const struct set_telem *elem = list_set_telem(map, id); |
| 62 | (const struct set_telem *) list_set_elem(map, id); | ||
| 63 | 67 | ||
| 64 | return ip_set_timeout_expired(elem->timeout); | 68 | return ip_set_timeout_expired(elem->timeout); |
| 65 | } | 69 | } |
| 66 | 70 | ||
| 67 | static inline int | ||
| 68 | list_set_exist(const struct set_telem *elem) | ||
| 69 | { | ||
| 70 | return elem->id != IPSET_INVALID_ID && | ||
| 71 | !ip_set_timeout_expired(elem->timeout); | ||
| 72 | } | ||
| 73 | |||
| 74 | /* Set list without and with timeout */ | 71 | /* Set list without and with timeout */ |
| 75 | 72 | ||
| 76 | static int | 73 | static int |
| @@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id, | |||
| 146 | struct set_telem *e; | 143 | struct set_telem *e; |
| 147 | 144 | ||
| 148 | for (; i < map->size; i++) { | 145 | for (; i < map->size; i++) { |
| 149 | e = (struct set_telem *)list_set_elem(map, i); | 146 | e = list_set_telem(map, i); |
| 150 | swap(e->id, id); | 147 | swap(e->id, id); |
| 148 | swap(e->timeout, timeout); | ||
| 151 | if (e->id == IPSET_INVALID_ID) | 149 | if (e->id == IPSET_INVALID_ID) |
| 152 | break; | 150 | break; |
| 153 | swap(e->timeout, timeout); | ||
| 154 | } | 151 | } |
| 155 | } | 152 | } |
| 156 | 153 | ||
| @@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id, | |||
| 164 | /* Last element replaced: e.g. add new,before,last */ | 161 | /* Last element replaced: e.g. add new,before,last */ |
| 165 | ip_set_put_byindex(e->id); | 162 | ip_set_put_byindex(e->id); |
| 166 | if (with_timeout(map->timeout)) | 163 | if (with_timeout(map->timeout)) |
| 167 | list_elem_tadd(map, i, id, timeout); | 164 | list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); |
| 168 | else | 165 | else |
| 169 | list_elem_add(map, i, id); | 166 | list_elem_add(map, i, id); |
| 170 | 167 | ||
| @@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id, | |||
| 172 | } | 169 | } |
| 173 | 170 | ||
| 174 | static int | 171 | static int |
| 175 | list_set_del(struct list_set *map, ip_set_id_t id, u32 i) | 172 | list_set_del(struct list_set *map, u32 i) |
| 176 | { | 173 | { |
| 177 | struct set_elem *a = list_set_elem(map, i), *b; | 174 | struct set_elem *a = list_set_elem(map, i), *b; |
| 178 | 175 | ||
| 179 | ip_set_put_byindex(id); | 176 | ip_set_put_byindex(a->id); |
| 180 | 177 | ||
| 181 | for (; i < map->size - 1; i++) { | 178 | for (; i < map->size - 1; i++) { |
| 182 | b = list_set_elem(map, i + 1); | 179 | b = list_set_elem(map, i + 1); |
| @@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 308 | (before == 0 || | 305 | (before == 0 || |
| 309 | (before > 0 && | 306 | (before > 0 && |
| 310 | next_id_eq(map, i, refid)))) | 307 | next_id_eq(map, i, refid)))) |
| 311 | ret = list_set_del(map, id, i); | 308 | ret = list_set_del(map, i); |
| 312 | else if (before < 0 && | 309 | else if (before < 0 && |
| 313 | elem->id == refid && | 310 | elem->id == refid && |
| 314 | next_id_eq(map, i, id)) | 311 | next_id_eq(map, i, id)) |
| 315 | ret = list_set_del(map, id, i + 1); | 312 | ret = list_set_del(map, i + 1); |
| 316 | } | 313 | } |
| 317 | break; | 314 | break; |
| 318 | default: | 315 | default: |
| @@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb) | |||
| 369 | NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); | 366 | NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); |
| 370 | if (with_timeout(map->timeout)) | 367 | if (with_timeout(map->timeout)) |
| 371 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); | 368 | NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); |
| 372 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, | 369 | NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); |
| 373 | htonl(atomic_read(&set->ref) - 1)); | ||
| 374 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, | 370 | NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, |
| 375 | htonl(sizeof(*map) + map->size * map->dsize)); | 371 | htonl(sizeof(*map) + map->size * map->dsize)); |
| 376 | ipset_nest_end(skb, nested); | 372 | ipset_nest_end(skb, nested); |
| @@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set) | |||
| 461 | struct set_telem *e; | 457 | struct set_telem *e; |
| 462 | u32 i; | 458 | u32 i; |
| 463 | 459 | ||
| 464 | /* We run parallel with other readers (test element) | 460 | write_lock_bh(&set->lock); |
| 465 | * but adding/deleting new entries is locked out */ | 461 | for (i = 0; i < map->size; i++) { |
| 466 | read_lock_bh(&set->lock); | 462 | e = list_set_telem(map, i); |
| 467 | for (i = map->size - 1; i >= 0; i--) { | 463 | if (e->id != IPSET_INVALID_ID && list_set_expired(map, i)) |
| 468 | e = (struct set_telem *) list_set_elem(map, i); | 464 | list_set_del(map, i); |
| 469 | if (e->id != IPSET_INVALID_ID && | ||
| 470 | list_set_expired(map, i)) | ||
| 471 | list_set_del(map, e->id, i); | ||
| 472 | } | 465 | } |
| 473 | read_unlock_bh(&set->lock); | 466 | write_unlock_bh(&set->lock); |
| 474 | 467 | ||
| 475 | map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; | 468 | map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; |
| 476 | add_timer(&map->gc); | 469 | add_timer(&map->gc); |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 33733c8872e7..ae47090bf45f 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
| @@ -3120,7 +3120,7 @@ nla_put_failure: | |||
| 3120 | static int ip_vs_genl_dump_daemons(struct sk_buff *skb, | 3120 | static int ip_vs_genl_dump_daemons(struct sk_buff *skb, |
| 3121 | struct netlink_callback *cb) | 3121 | struct netlink_callback *cb) |
| 3122 | { | 3122 | { |
| 3123 | struct net *net = skb_net(skb); | 3123 | struct net *net = skb_sknet(skb); |
| 3124 | struct netns_ipvs *ipvs = net_ipvs(net); | 3124 | struct netns_ipvs *ipvs = net_ipvs(net); |
| 3125 | 3125 | ||
| 3126 | mutex_lock(&__ip_vs_mutex); | 3126 | mutex_lock(&__ip_vs_mutex); |
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index 867882313e49..bcd5ed6b7130 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c | |||
| @@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f, | |||
| 631 | CHECK_BOUND(bs, 2); | 631 | CHECK_BOUND(bs, 2); |
| 632 | count = *bs->cur++; | 632 | count = *bs->cur++; |
| 633 | count <<= 8; | 633 | count <<= 8; |
| 634 | count = *bs->cur++; | 634 | count += *bs->cur++; |
| 635 | break; | 635 | break; |
| 636 | case SEMI: | 636 | case SEMI: |
| 637 | BYTE_ALIGN(bs); | 637 | BYTE_ALIGN(bs); |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 533a183e6661..18b2ce5c8ced 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
| @@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src, | |||
| 731 | 731 | ||
| 732 | memset(&fl2, 0, sizeof(fl2)); | 732 | memset(&fl2, 0, sizeof(fl2)); |
| 733 | fl2.daddr = dst->ip; | 733 | fl2.daddr = dst->ip; |
| 734 | if (!afinfo->route((struct dst_entry **)&rt1, | 734 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt1, |
| 735 | flowi4_to_flowi(&fl1))) { | 735 | flowi4_to_flowi(&fl1), false)) { |
| 736 | if (!afinfo->route((struct dst_entry **)&rt2, | 736 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, |
| 737 | flowi4_to_flowi(&fl2))) { | 737 | flowi4_to_flowi(&fl2), false)) { |
| 738 | if (rt1->rt_gateway == rt2->rt_gateway && | 738 | if (rt1->rt_gateway == rt2->rt_gateway && |
| 739 | rt1->dst.dev == rt2->dst.dev) | 739 | rt1->dst.dev == rt2->dst.dev) |
| 740 | ret = 1; | 740 | ret = 1; |
| @@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src, | |||
| 755 | 755 | ||
| 756 | memset(&fl2, 0, sizeof(fl2)); | 756 | memset(&fl2, 0, sizeof(fl2)); |
| 757 | ipv6_addr_copy(&fl2.daddr, &dst->in6); | 757 | ipv6_addr_copy(&fl2.daddr, &dst->in6); |
| 758 | if (!afinfo->route((struct dst_entry **)&rt1, | 758 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt1, |
| 759 | flowi6_to_flowi(&fl1))) { | 759 | flowi6_to_flowi(&fl1), false)) { |
| 760 | if (!afinfo->route((struct dst_entry **)&rt2, | 760 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, |
| 761 | flowi6_to_flowi(&fl2))) { | 761 | flowi6_to_flowi(&fl2), false)) { |
| 762 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, | 762 | if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, |
| 763 | sizeof(rt1->rt6i_gateway)) && | 763 | sizeof(rt1->rt6i_gateway)) && |
| 764 | rt1->dst.dev == rt2->dst.dev) | 764 | rt1->dst.dev == rt2->dst.dev) |
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 6e6b46cb1db9..9e63b43faeed 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
| @@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb, | |||
| 166 | rcu_read_lock(); | 166 | rcu_read_lock(); |
| 167 | ai = nf_get_afinfo(family); | 167 | ai = nf_get_afinfo(family); |
| 168 | if (ai != NULL) | 168 | if (ai != NULL) |
| 169 | ai->route((struct dst_entry **)&rt, &fl); | 169 | ai->route(&init_net, (struct dst_entry **)&rt, &fl, false); |
| 170 | rcu_read_unlock(); | 170 | rcu_read_unlock(); |
| 171 | 171 | ||
| 172 | if (rt != NULL) { | 172 | if (rt != NULL) { |
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c index 2220b85e9519..b77d383cec78 100644 --- a/net/netfilter/xt_addrtype.c +++ b/net/netfilter/xt_addrtype.c | |||
| @@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype"); | |||
| 32 | MODULE_ALIAS("ip6t_addrtype"); | 32 | MODULE_ALIAS("ip6t_addrtype"); |
| 33 | 33 | ||
| 34 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) | 34 | #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) |
| 35 | static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt) | 35 | static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, |
| 36 | const struct in6_addr *addr) | ||
| 36 | { | 37 | { |
| 38 | const struct nf_afinfo *afinfo; | ||
| 39 | struct flowi6 flow; | ||
| 40 | struct rt6_info *rt; | ||
| 37 | u32 ret; | 41 | u32 ret; |
| 42 | int route_err; | ||
| 38 | 43 | ||
| 39 | if (!rt) | 44 | memset(&flow, 0, sizeof(flow)); |
| 45 | ipv6_addr_copy(&flow.daddr, addr); | ||
| 46 | if (dev) | ||
| 47 | flow.flowi6_oif = dev->ifindex; | ||
| 48 | |||
| 49 | rcu_read_lock(); | ||
| 50 | |||
| 51 | afinfo = nf_get_afinfo(NFPROTO_IPV6); | ||
| 52 | if (afinfo != NULL) | ||
| 53 | route_err = afinfo->route(net, (struct dst_entry **)&rt, | ||
| 54 | flowi6_to_flowi(&flow), !!dev); | ||
| 55 | else | ||
| 56 | route_err = 1; | ||
| 57 | |||
| 58 | rcu_read_unlock(); | ||
| 59 | |||
| 60 | if (route_err) | ||
| 40 | return XT_ADDRTYPE_UNREACHABLE; | 61 | return XT_ADDRTYPE_UNREACHABLE; |
| 41 | 62 | ||
| 42 | if (rt->rt6i_flags & RTF_REJECT) | 63 | if (rt->rt6i_flags & RTF_REJECT) |
| @@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt) | |||
| 48 | ret |= XT_ADDRTYPE_LOCAL; | 69 | ret |= XT_ADDRTYPE_LOCAL; |
| 49 | if (rt->rt6i_flags & RTF_ANYCAST) | 70 | if (rt->rt6i_flags & RTF_ANYCAST) |
| 50 | ret |= XT_ADDRTYPE_ANYCAST; | 71 | ret |= XT_ADDRTYPE_ANYCAST; |
| 72 | |||
| 73 | |||
| 74 | dst_release(&rt->dst); | ||
| 51 | return ret; | 75 | return ret; |
| 52 | } | 76 | } |
| 53 | 77 | ||
| @@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev, | |||
| 65 | return false; | 89 | return false; |
| 66 | 90 | ||
| 67 | if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | | 91 | if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | |
| 68 | XT_ADDRTYPE_UNREACHABLE) & mask) { | 92 | XT_ADDRTYPE_UNREACHABLE) & mask) |
| 69 | struct rt6_info *rt; | 93 | return !!(mask & match_lookup_rt6(net, dev, addr)); |
| 70 | u32 type; | ||
| 71 | int ifindex = dev ? dev->ifindex : 0; | ||
| 72 | |||
| 73 | rt = rt6_lookup(net, addr, NULL, ifindex, !!dev); | ||
| 74 | |||
| 75 | type = xt_addrtype_rt6_to_type(rt); | ||
| 76 | |||
| 77 | dst_release(&rt->dst); | ||
| 78 | return !!(mask & type); | ||
| 79 | } | ||
| 80 | return true; | 94 | return true; |
| 81 | } | 95 | } |
| 82 | 96 | ||
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 2c0086a4751e..481a86fdc409 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c | |||
| @@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par, | |||
| 195 | return info->match_flags & XT_CONNTRACK_STATE; | 195 | return info->match_flags & XT_CONNTRACK_STATE; |
| 196 | if ((info->match_flags & XT_CONNTRACK_DIRECTION) && | 196 | if ((info->match_flags & XT_CONNTRACK_DIRECTION) && |
| 197 | (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^ | 197 | (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^ |
| 198 | !!(info->invert_flags & XT_CONNTRACK_DIRECTION)) | 198 | !(info->invert_flags & XT_CONNTRACK_DIRECTION)) |
| 199 | return false; | 199 | return false; |
| 200 | 200 | ||
| 201 | if (info->match_flags & XT_CONNTRACK_ORIGSRC) | 201 | if (info->match_flags & XT_CONNTRACK_ORIGSRC) |
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 9022f0a6503e..0a9a2ec2e469 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
| @@ -427,7 +427,7 @@ static int | |||
| 427 | context_derive_keys_rc4(struct krb5_ctx *ctx) | 427 | context_derive_keys_rc4(struct krb5_ctx *ctx) |
| 428 | { | 428 | { |
| 429 | struct crypto_hash *hmac; | 429 | struct crypto_hash *hmac; |
| 430 | static const char sigkeyconstant[] = "signaturekey"; | 430 | char sigkeyconstant[] = "signaturekey"; |
| 431 | int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ | 431 | int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ |
| 432 | struct hash_desc desc; | 432 | struct hash_desc desc; |
| 433 | struct scatterlist sg[1]; | 433 | struct scatterlist sg[1]; |
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c index 8808b82311b1..76e0d5695075 100644 --- a/sound/arm/pxa2xx-pcm-lib.c +++ b/sound/arm/pxa2xx-pcm-lib.c | |||
| @@ -140,6 +140,9 @@ int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream) | |||
| 140 | if (!prtd || !prtd->params) | 140 | if (!prtd || !prtd->params) |
| 141 | return 0; | 141 | return 0; |
| 142 | 142 | ||
| 143 | if (prtd->dma_ch == -1) | ||
| 144 | return -EINVAL; | ||
| 145 | |||
| 143 | DCSR(prtd->dma_ch) &= ~DCSR_RUN; | 146 | DCSR(prtd->dma_ch) &= ~DCSR_RUN; |
| 144 | DCSR(prtd->dma_ch) = 0; | 147 | DCSR(prtd->dma_ch) = 0; |
| 145 | DCMD(prtd->dma_ch) = 0; | 148 | DCMD(prtd->dma_ch) = 0; |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 69e33869a53e..ad97d937d3a8 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
| @@ -3035,6 +3035,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
| 3035 | SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), | 3035 | SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), |
| 3036 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), | 3036 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), |
| 3037 | SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), | 3037 | SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), |
| 3038 | SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), | ||
| 3038 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), | 3039 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), |
| 3039 | SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ | 3040 | SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ |
| 3040 | {} | 3041 | {} |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 251773e45f61..715615a88a8d 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
| @@ -1280,6 +1280,39 @@ static int simple_playback_pcm_prepare(struct hda_pcm_stream *hinfo, | |||
| 1280 | stream_tag, format, substream); | 1280 | stream_tag, format, substream); |
| 1281 | } | 1281 | } |
| 1282 | 1282 | ||
| 1283 | static void nvhdmi_8ch_7x_set_info_frame_parameters(struct hda_codec *codec, | ||
| 1284 | int channels) | ||
| 1285 | { | ||
| 1286 | unsigned int chanmask; | ||
| 1287 | int chan = channels ? (channels - 1) : 1; | ||
| 1288 | |||
| 1289 | switch (channels) { | ||
| 1290 | default: | ||
| 1291 | case 0: | ||
| 1292 | case 2: | ||
| 1293 | chanmask = 0x00; | ||
| 1294 | break; | ||
| 1295 | case 4: | ||
| 1296 | chanmask = 0x08; | ||
| 1297 | break; | ||
| 1298 | case 6: | ||
| 1299 | chanmask = 0x0b; | ||
| 1300 | break; | ||
| 1301 | case 8: | ||
| 1302 | chanmask = 0x13; | ||
| 1303 | break; | ||
| 1304 | } | ||
| 1305 | |||
| 1306 | /* Set the audio infoframe channel allocation and checksum fields. The | ||
| 1307 | * channel count is computed implicitly by the hardware. */ | ||
| 1308 | snd_hda_codec_write(codec, 0x1, 0, | ||
| 1309 | Nv_VERB_SET_Channel_Allocation, chanmask); | ||
| 1310 | |||
| 1311 | snd_hda_codec_write(codec, 0x1, 0, | ||
| 1312 | Nv_VERB_SET_Info_Frame_Checksum, | ||
| 1313 | (0x71 - chan - chanmask)); | ||
| 1314 | } | ||
| 1315 | |||
| 1283 | static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo, | 1316 | static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo, |
| 1284 | struct hda_codec *codec, | 1317 | struct hda_codec *codec, |
| 1285 | struct snd_pcm_substream *substream) | 1318 | struct snd_pcm_substream *substream) |
| @@ -1298,6 +1331,10 @@ static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo, | |||
| 1298 | AC_VERB_SET_STREAM_FORMAT, 0); | 1331 | AC_VERB_SET_STREAM_FORMAT, 0); |
| 1299 | } | 1332 | } |
| 1300 | 1333 | ||
| 1334 | /* The audio hardware sends a channel count of 0x7 (8ch) when all the | ||
| 1335 | * streams are disabled. */ | ||
| 1336 | nvhdmi_8ch_7x_set_info_frame_parameters(codec, 8); | ||
| 1337 | |||
| 1301 | return snd_hda_multi_out_dig_close(codec, &spec->multiout); | 1338 | return snd_hda_multi_out_dig_close(codec, &spec->multiout); |
| 1302 | } | 1339 | } |
| 1303 | 1340 | ||
| @@ -1308,37 +1345,16 @@ static int nvhdmi_8ch_7x_pcm_prepare(struct hda_pcm_stream *hinfo, | |||
| 1308 | struct snd_pcm_substream *substream) | 1345 | struct snd_pcm_substream *substream) |
| 1309 | { | 1346 | { |
| 1310 | int chs; | 1347 | int chs; |
| 1311 | unsigned int dataDCC1, dataDCC2, chan, chanmask, channel_id; | 1348 | unsigned int dataDCC1, dataDCC2, channel_id; |
| 1312 | int i; | 1349 | int i; |
| 1313 | 1350 | ||
| 1314 | mutex_lock(&codec->spdif_mutex); | 1351 | mutex_lock(&codec->spdif_mutex); |
| 1315 | 1352 | ||
| 1316 | chs = substream->runtime->channels; | 1353 | chs = substream->runtime->channels; |
| 1317 | chan = chs ? (chs - 1) : 1; | ||
| 1318 | 1354 | ||
| 1319 | switch (chs) { | ||
| 1320 | default: | ||
| 1321 | case 0: | ||
| 1322 | case 2: | ||
| 1323 | chanmask = 0x00; | ||
| 1324 | break; | ||
| 1325 | case 4: | ||
| 1326 | chanmask = 0x08; | ||
| 1327 | break; | ||
| 1328 | case 6: | ||
| 1329 | chanmask = 0x0b; | ||
| 1330 | break; | ||
| 1331 | case 8: | ||
| 1332 | chanmask = 0x13; | ||
| 1333 | break; | ||
| 1334 | } | ||
| 1335 | dataDCC1 = AC_DIG1_ENABLE | AC_DIG1_COPYRIGHT; | 1355 | dataDCC1 = AC_DIG1_ENABLE | AC_DIG1_COPYRIGHT; |
| 1336 | dataDCC2 = 0x2; | 1356 | dataDCC2 = 0x2; |
| 1337 | 1357 | ||
| 1338 | /* set the Audio InforFrame Channel Allocation */ | ||
| 1339 | snd_hda_codec_write(codec, 0x1, 0, | ||
| 1340 | Nv_VERB_SET_Channel_Allocation, chanmask); | ||
| 1341 | |||
| 1342 | /* turn off SPDIF once; otherwise the IEC958 bits won't be updated */ | 1358 | /* turn off SPDIF once; otherwise the IEC958 bits won't be updated */ |
| 1343 | if (codec->spdif_status_reset && (codec->spdif_ctls & AC_DIG1_ENABLE)) | 1359 | if (codec->spdif_status_reset && (codec->spdif_ctls & AC_DIG1_ENABLE)) |
| 1344 | snd_hda_codec_write(codec, | 1360 | snd_hda_codec_write(codec, |
| @@ -1413,10 +1429,7 @@ static int nvhdmi_8ch_7x_pcm_prepare(struct hda_pcm_stream *hinfo, | |||
| 1413 | } | 1429 | } |
| 1414 | } | 1430 | } |
| 1415 | 1431 | ||
| 1416 | /* set the Audio Info Frame Checksum */ | 1432 | nvhdmi_8ch_7x_set_info_frame_parameters(codec, chs); |
| 1417 | snd_hda_codec_write(codec, 0x1, 0, | ||
| 1418 | Nv_VERB_SET_Info_Frame_Checksum, | ||
| 1419 | (0x71 - chan - chanmask)); | ||
| 1420 | 1433 | ||
| 1421 | mutex_unlock(&codec->spdif_mutex); | 1434 | mutex_unlock(&codec->spdif_mutex); |
| 1422 | return 0; | 1435 | return 0; |
| @@ -1512,6 +1525,11 @@ static int patch_nvhdmi_8ch_7x(struct hda_codec *codec) | |||
| 1512 | spec->multiout.max_channels = 8; | 1525 | spec->multiout.max_channels = 8; |
| 1513 | spec->pcm_playback = &nvhdmi_pcm_playback_8ch_7x; | 1526 | spec->pcm_playback = &nvhdmi_pcm_playback_8ch_7x; |
| 1514 | codec->patch_ops = nvhdmi_patch_ops_8ch_7x; | 1527 | codec->patch_ops = nvhdmi_patch_ops_8ch_7x; |
| 1528 | |||
| 1529 | /* Initialize the audio infoframe channel mask and checksum to something | ||
| 1530 | * valid */ | ||
| 1531 | nvhdmi_8ch_7x_set_info_frame_parameters(codec, 8); | ||
| 1532 | |||
| 1515 | return 0; | 1533 | return 0; |
| 1516 | } | 1534 | } |
| 1517 | 1535 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 7e28a64884f6..52928d9a72da 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -14124,7 +14124,7 @@ static hda_nid_t alc269vb_capsrc_nids[1] = { | |||
| 14124 | }; | 14124 | }; |
| 14125 | 14125 | ||
| 14126 | static hda_nid_t alc269_adc_candidates[] = { | 14126 | static hda_nid_t alc269_adc_candidates[] = { |
| 14127 | 0x08, 0x09, 0x07, | 14127 | 0x08, 0x09, 0x07, 0x11, |
| 14128 | }; | 14128 | }; |
| 14129 | 14129 | ||
| 14130 | #define alc269_modes alc260_modes | 14130 | #define alc269_modes alc260_modes |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 1395991c39f2..94d19c03a7f4 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
| @@ -3408,6 +3408,9 @@ static int get_connection_index(struct hda_codec *codec, hda_nid_t mux, | |||
| 3408 | hda_nid_t conn[HDA_MAX_NUM_INPUTS]; | 3408 | hda_nid_t conn[HDA_MAX_NUM_INPUTS]; |
| 3409 | int i, nums; | 3409 | int i, nums; |
| 3410 | 3410 | ||
| 3411 | if (!(get_wcaps(codec, mux) & AC_WCAP_CONN_LIST)) | ||
| 3412 | return -1; | ||
| 3413 | |||
| 3411 | nums = snd_hda_get_connections(codec, mux, conn, ARRAY_SIZE(conn)); | 3414 | nums = snd_hda_get_connections(codec, mux, conn, ARRAY_SIZE(conn)); |
| 3412 | for (i = 0; i < nums; i++) | 3415 | for (i = 0; i < nums; i++) |
| 3413 | if (conn[i] == nid) | 3416 | if (conn[i] == nid) |
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c index 02fb66416ddc..2ce0b2d891d5 100644 --- a/sound/soc/pxa/pxa2xx-pcm.c +++ b/sound/soc/pxa/pxa2xx-pcm.c | |||
| @@ -65,6 +65,7 @@ static int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream) | |||
| 65 | if (prtd->dma_ch >= 0) { | 65 | if (prtd->dma_ch >= 0) { |
| 66 | pxa_free_dma(prtd->dma_ch); | 66 | pxa_free_dma(prtd->dma_ch); |
| 67 | prtd->dma_ch = -1; | 67 | prtd->dma_ch = -1; |
| 68 | prtd->params = NULL; | ||
| 68 | } | 69 | } |
| 69 | 70 | ||
| 70 | return 0; | 71 | return 0; |
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c index ac577263b3e3..b6445757fc54 100644 --- a/sound/soc/pxa/zylonite.c +++ b/sound/soc/pxa/zylonite.c | |||
| @@ -167,7 +167,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { | |||
| 167 | .codec_name = "wm9713-codec", | 167 | .codec_name = "wm9713-codec", |
| 168 | .platform_name = "pxa-pcm-audio", | 168 | .platform_name = "pxa-pcm-audio", |
| 169 | .cpu_dai_name = "pxa2xx-ac97", | 169 | .cpu_dai_name = "pxa2xx-ac97", |
| 170 | .codec_name = "wm9713-hifi", | 170 | .codec_dai_name = "wm9713-hifi", |
| 171 | .init = zylonite_wm9713_init, | 171 | .init = zylonite_wm9713_init, |
| 172 | }, | 172 | }, |
| 173 | { | 173 | { |
| @@ -176,7 +176,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { | |||
| 176 | .codec_name = "wm9713-codec", | 176 | .codec_name = "wm9713-codec", |
| 177 | .platform_name = "pxa-pcm-audio", | 177 | .platform_name = "pxa-pcm-audio", |
| 178 | .cpu_dai_name = "pxa2xx-ac97-aux", | 178 | .cpu_dai_name = "pxa2xx-ac97-aux", |
| 179 | .codec_name = "wm9713-aux", | 179 | .codec_dai_name = "wm9713-aux", |
| 180 | }, | 180 | }, |
| 181 | { | 181 | { |
| 182 | .name = "WM9713 Voice", | 182 | .name = "WM9713 Voice", |
| @@ -184,7 +184,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { | |||
| 184 | .codec_name = "wm9713-codec", | 184 | .codec_name = "wm9713-codec", |
| 185 | .platform_name = "pxa-pcm-audio", | 185 | .platform_name = "pxa-pcm-audio", |
| 186 | .cpu_dai_name = "pxa-ssp-dai.2", | 186 | .cpu_dai_name = "pxa-ssp-dai.2", |
| 187 | .codec_name = "wm9713-voice", | 187 | .codec_dai_name = "wm9713-voice", |
| 188 | .ops = &zylonite_voice_ops, | 188 | .ops = &zylonite_voice_ops, |
| 189 | }, | 189 | }, |
| 190 | }; | 190 | }; |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 4dda58926bc5..b76b74db0968 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
| @@ -92,8 +92,8 @@ static int min_bytes_needed(unsigned long val) | |||
| 92 | static int format_register_str(struct snd_soc_codec *codec, | 92 | static int format_register_str(struct snd_soc_codec *codec, |
| 93 | unsigned int reg, char *buf, size_t len) | 93 | unsigned int reg, char *buf, size_t len) |
| 94 | { | 94 | { |
| 95 | int wordsize = codec->driver->reg_word_size * 2; | 95 | int wordsize = min_bytes_needed(codec->driver->reg_cache_size) * 2; |
| 96 | int regsize = min_bytes_needed(codec->driver->reg_cache_size) * 2; | 96 | int regsize = codec->driver->reg_word_size * 2; |
| 97 | int ret; | 97 | int ret; |
| 98 | char tmpbuf[len + 1]; | 98 | char tmpbuf[len + 1]; |
| 99 | char regbuf[regsize + 1]; | 99 | char regbuf[regsize + 1]; |
| @@ -132,8 +132,8 @@ static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf, | |||
| 132 | size_t total = 0; | 132 | size_t total = 0; |
| 133 | loff_t p = 0; | 133 | loff_t p = 0; |
| 134 | 134 | ||
| 135 | wordsize = codec->driver->reg_word_size * 2; | 135 | wordsize = min_bytes_needed(codec->driver->reg_cache_size) * 2; |
| 136 | regsize = min_bytes_needed(codec->driver->reg_cache_size) * 2; | 136 | regsize = codec->driver->reg_word_size * 2; |
| 137 | 137 | ||
| 138 | len = wordsize + regsize + 2 + 1; | 138 | len = wordsize + regsize + 2 + 1; |
| 139 | 139 | ||
diff --git a/sound/usb/midi.c b/sound/usb/midi.c index b4b39c0b6c9e..f9289102886a 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c | |||
| @@ -1301,6 +1301,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi* umidi, | |||
| 1301 | case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */ | 1301 | case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */ |
| 1302 | case USB_ID(0x15ca, 0x1806): /* Textech USB Midi Cable */ | 1302 | case USB_ID(0x15ca, 0x1806): /* Textech USB Midi Cable */ |
| 1303 | case USB_ID(0x1a86, 0x752d): /* QinHeng CH345 "USB2.0-MIDI" */ | 1303 | case USB_ID(0x1a86, 0x752d): /* QinHeng CH345 "USB2.0-MIDI" */ |
| 1304 | case USB_ID(0xfc08, 0x0101): /* Unknown vendor Cable */ | ||
| 1304 | ep->max_transfer = 4; | 1305 | ep->max_transfer = 4; |
| 1305 | break; | 1306 | break; |
| 1306 | /* | 1307 | /* |
