diff options
615 files changed, 6458 insertions, 4050 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index 7514dbf0a679..c36892c072da 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl | |||
@@ -227,7 +227,7 @@ X!Isound/sound_firmware.c | |||
227 | <chapter id="uart16x50"> | 227 | <chapter id="uart16x50"> |
228 | <title>16x50 UART Driver</title> | 228 | <title>16x50 UART Driver</title> |
229 | !Edrivers/tty/serial/serial_core.c | 229 | !Edrivers/tty/serial/serial_core.c |
230 | !Edrivers/tty/serial/8250/8250.c | 230 | !Edrivers/tty/serial/8250/8250_core.c |
231 | </chapter> | 231 | </chapter> |
232 | 232 | ||
233 | <chapter id="fbdev"> | 233 | <chapter id="fbdev"> |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4609e81dbc37..8ccbf27aead4 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -596,9 +596,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
596 | is selected automatically. Check | 596 | is selected automatically. Check |
597 | Documentation/kdump/kdump.txt for further details. | 597 | Documentation/kdump/kdump.txt for further details. |
598 | 598 | ||
599 | crashkernel_low=size[KMG] | ||
600 | [KNL, x86] parts under 4G. | ||
601 | |||
602 | crashkernel=range1:size1[,range2:size2,...][@offset] | 599 | crashkernel=range1:size1[,range2:size2,...][@offset] |
603 | [KNL] Same as above, but depends on the memory | 600 | [KNL] Same as above, but depends on the memory |
604 | in the running system. The syntax of range is | 601 | in the running system. The syntax of range is |
@@ -606,6 +603,26 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
606 | a memory unit (amount[KMG]). See also | 603 | a memory unit (amount[KMG]). See also |
607 | Documentation/kdump/kdump.txt for an example. | 604 | Documentation/kdump/kdump.txt for an example. |
608 | 605 | ||
606 | crashkernel=size[KMG],high | ||
607 | [KNL, x86_64] range could be above 4G. Allow kernel | ||
608 | to allocate physical memory region from top, so could | ||
609 | be above 4G if system have more than 4G ram installed. | ||
610 | Otherwise memory region will be allocated below 4G, if | ||
611 | available. | ||
612 | It will be ignored if crashkernel=X is specified. | ||
613 | crashkernel=size[KMG],low | ||
614 | [KNL, x86_64] range under 4G. When crashkernel=X,high | ||
615 | is passed, kernel could allocate physical memory region | ||
616 | above 4G, that cause second kernel crash on system | ||
617 | that require some amount of low memory, e.g. swiotlb | ||
618 | requires at least 64M+32K low memory. Kernel would | ||
619 | try to allocate 72M below 4G automatically. | ||
620 | This one let user to specify own low range under 4G | ||
621 | for second kernel instead. | ||
622 | 0: to disable low allocation. | ||
623 | It will be ignored when crashkernel=X,high is not used | ||
624 | or memory reserved is below 4G. | ||
625 | |||
609 | cs89x0_dma= [HW,NET] | 626 | cs89x0_dma= [HW,NET] |
610 | Format: <dma> | 627 | Format: <dma> |
611 | 628 | ||
@@ -788,6 +805,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
788 | edd= [EDD] | 805 | edd= [EDD] |
789 | Format: {"off" | "on" | "skip[mbr]"} | 806 | Format: {"off" | "on" | "skip[mbr]"} |
790 | 807 | ||
808 | efi_no_storage_paranoia [EFI; X86] | ||
809 | Using this parameter you can use more than 50% of | ||
810 | your efi variable storage. Use this parameter only if | ||
811 | you are really sure that your UEFI does sane gc and | ||
812 | fulfills the spec otherwise your board may brick. | ||
813 | |||
791 | eisa_irq_edge= [PARISC,HW] | 814 | eisa_irq_edge= [PARISC,HW] |
792 | See header of drivers/parisc/eisa.c. | 815 | See header of drivers/parisc/eisa.c. |
793 | 816 | ||
diff --git a/Documentation/scsi/LICENSE.qla2xxx b/Documentation/scsi/LICENSE.qla2xxx index 27a91cf43d6d..5020b7b5a244 100644 --- a/Documentation/scsi/LICENSE.qla2xxx +++ b/Documentation/scsi/LICENSE.qla2xxx | |||
@@ -1,4 +1,4 @@ | |||
1 | Copyright (c) 2003-2012 QLogic Corporation | 1 | Copyright (c) 2003-2013 QLogic Corporation |
2 | QLogic Linux FC-FCoE Driver | 2 | QLogic Linux FC-FCoE Driver |
3 | 3 | ||
4 | This program includes a device driver for Linux 3.x. | 4 | This program includes a device driver for Linux 3.x. |
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt index 4499bd948860..95731a08f257 100644 --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt | |||
@@ -890,9 +890,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. | |||
890 | enable_msi - Enable Message Signaled Interrupt (MSI) (default = off) | 890 | enable_msi - Enable Message Signaled Interrupt (MSI) (default = off) |
891 | power_save - Automatic power-saving timeout (in second, 0 = | 891 | power_save - Automatic power-saving timeout (in second, 0 = |
892 | disable) | 892 | disable) |
893 | power_save_controller - Support runtime D3 of HD-audio controller | 893 | power_save_controller - Reset HD-audio controller in power-saving mode |
894 | (-1 = on for supported chip (default), false = off, | 894 | (default = on) |
895 | true = force to on even for unsupported hardware) | ||
896 | align_buffer_size - Force rounding of buffer/period sizes to multiples | 895 | align_buffer_size - Force rounding of buffer/period sizes to multiples |
897 | of 128 bytes. This is more efficient in terms of memory | 896 | of 128 bytes. This is more efficient in terms of memory |
898 | access but isn't required by the HDA spec and prevents | 897 | access but isn't required by the HDA spec and prevents |
diff --git a/MAINTAINERS b/MAINTAINERS index e0cd7e53acc0..3a6b36c51083 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4950,6 +4950,12 @@ W: logfs.org | |||
4950 | S: Maintained | 4950 | S: Maintained |
4951 | F: fs/logfs/ | 4951 | F: fs/logfs/ |
4952 | 4952 | ||
4953 | LPC32XX MACHINE SUPPORT | ||
4954 | M: Roland Stigge <stigge@antcom.de> | ||
4955 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
4956 | S: Maintained | ||
4957 | F: arch/arm/mach-lpc32xx/ | ||
4958 | |||
4953 | LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) | 4959 | LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) |
4954 | M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> | 4960 | M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> |
4955 | M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> | 4961 | M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> |
@@ -5074,9 +5080,8 @@ S: Maintained | |||
5074 | F: drivers/net/ethernet/marvell/sk* | 5080 | F: drivers/net/ethernet/marvell/sk* |
5075 | 5081 | ||
5076 | MARVELL LIBERTAS WIRELESS DRIVER | 5082 | MARVELL LIBERTAS WIRELESS DRIVER |
5077 | M: Dan Williams <dcbw@redhat.com> | ||
5078 | L: libertas-dev@lists.infradead.org | 5083 | L: libertas-dev@lists.infradead.org |
5079 | S: Maintained | 5084 | S: Orphan |
5080 | F: drivers/net/wireless/libertas/ | 5085 | F: drivers/net/wireless/libertas/ |
5081 | 5086 | ||
5082 | MARVELL MV643XX ETHERNET DRIVER | 5087 | MARVELL MV643XX ETHERNET DRIVER |
@@ -5578,6 +5583,7 @@ F: include/uapi/linux/if_* | |||
5578 | F: include/uapi/linux/netdevice.h | 5583 | F: include/uapi/linux/netdevice.h |
5579 | 5584 | ||
5580 | NETXEN (1/10) GbE SUPPORT | 5585 | NETXEN (1/10) GbE SUPPORT |
5586 | M: Manish Chopra <manish.chopra@qlogic.com> | ||
5581 | M: Sony Chacko <sony.chacko@qlogic.com> | 5587 | M: Sony Chacko <sony.chacko@qlogic.com> |
5582 | M: Rajesh Borundia <rajesh.borundia@qlogic.com> | 5588 | M: Rajesh Borundia <rajesh.borundia@qlogic.com> |
5583 | L: netdev@vger.kernel.org | 5589 | L: netdev@vger.kernel.org |
@@ -6634,7 +6640,7 @@ S: Supported | |||
6634 | F: fs/reiserfs/ | 6640 | F: fs/reiserfs/ |
6635 | 6641 | ||
6636 | REGISTER MAP ABSTRACTION | 6642 | REGISTER MAP ABSTRACTION |
6637 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> | 6643 | M: Mark Brown <broonie@kernel.org> |
6638 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git | 6644 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git |
6639 | S: Supported | 6645 | S: Supported |
6640 | F: drivers/base/regmap/ | 6646 | F: drivers/base/regmap/ |
@@ -6960,7 +6966,6 @@ F: drivers/scsi/st* | |||
6960 | 6966 | ||
6961 | SCTP PROTOCOL | 6967 | SCTP PROTOCOL |
6962 | M: Vlad Yasevich <vyasevich@gmail.com> | 6968 | M: Vlad Yasevich <vyasevich@gmail.com> |
6963 | M: Sridhar Samudrala <sri@us.ibm.com> | ||
6964 | M: Neil Horman <nhorman@tuxdriver.com> | 6969 | M: Neil Horman <nhorman@tuxdriver.com> |
6965 | L: linux-sctp@vger.kernel.org | 6970 | L: linux-sctp@vger.kernel.org |
6966 | W: http://lksctp.sourceforge.net | 6971 | W: http://lksctp.sourceforge.net |
@@ -7383,7 +7388,7 @@ F: sound/ | |||
7383 | 7388 | ||
7384 | SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) | 7389 | SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) |
7385 | M: Liam Girdwood <lgirdwood@gmail.com> | 7390 | M: Liam Girdwood <lgirdwood@gmail.com> |
7386 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> | 7391 | M: Mark Brown <broonie@kernel.org> |
7387 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git | 7392 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git |
7388 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 7393 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
7389 | W: http://alsa-project.org/main/index.php/ASoC | 7394 | W: http://alsa-project.org/main/index.php/ASoC |
@@ -7472,7 +7477,7 @@ F: drivers/clk/spear/ | |||
7472 | 7477 | ||
7473 | SPI SUBSYSTEM | 7478 | SPI SUBSYSTEM |
7474 | M: Grant Likely <grant.likely@secretlab.ca> | 7479 | M: Grant Likely <grant.likely@secretlab.ca> |
7475 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> | 7480 | M: Mark Brown <broonie@kernel.org> |
7476 | L: spi-devel-general@lists.sourceforge.net | 7481 | L: spi-devel-general@lists.sourceforge.net |
7477 | Q: http://patchwork.kernel.org/project/spi-devel-general/list/ | 7482 | Q: http://patchwork.kernel.org/project/spi-devel-general/list/ |
7478 | T: git git://git.secretlab.ca/git/linux-2.6.git | 7483 | T: git git://git.secretlab.ca/git/linux-2.6.git |
@@ -8717,7 +8722,7 @@ F: drivers/scsi/vmw_pvscsi.h | |||
8717 | 8722 | ||
8718 | VOLTAGE AND CURRENT REGULATOR FRAMEWORK | 8723 | VOLTAGE AND CURRENT REGULATOR FRAMEWORK |
8719 | M: Liam Girdwood <lrg@ti.com> | 8724 | M: Liam Girdwood <lrg@ti.com> |
8720 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> | 8725 | M: Mark Brown <broonie@kernel.org> |
8721 | W: http://opensource.wolfsonmicro.com/node/15 | 8726 | W: http://opensource.wolfsonmicro.com/node/15 |
8722 | W: http://www.slimlogic.co.uk/?p=48 | 8727 | W: http://www.slimlogic.co.uk/?p=48 |
8723 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git | 8728 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 9 | 2 | PATCHLEVEL = 9 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = |
5 | NAME = Unicycling Gorilla | 5 | NAME = Unicycling Gorilla |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -513,7 +513,8 @@ ifeq ($(KBUILD_EXTMOD),) | |||
513 | # Carefully list dependencies so we do not try to build scripts twice | 513 | # Carefully list dependencies so we do not try to build scripts twice |
514 | # in parallel | 514 | # in parallel |
515 | PHONY += scripts | 515 | PHONY += scripts |
516 | scripts: scripts_basic include/config/auto.conf include/config/tristate.conf | 516 | scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \ |
517 | asm-generic | ||
517 | $(Q)$(MAKE) $(build)=$(@) | 518 | $(Q)$(MAKE) $(build)=$(@) |
518 | 519 | ||
519 | # Objects we will link into vmlinux / subdirs we need to visit | 520 | # Objects we will link into vmlinux / subdirs we need to visit |
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 4759fe751aa1..2cc3cc519c54 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile | |||
@@ -12,7 +12,7 @@ NM := $(NM) -B | |||
12 | 12 | ||
13 | LDFLAGS_vmlinux := -static -N #-relax | 13 | LDFLAGS_vmlinux := -static -N #-relax |
14 | CHECKFLAGS += -D__alpha__ -m64 | 14 | CHECKFLAGS += -D__alpha__ -m64 |
15 | cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data | 15 | cflags-y := -pipe -mno-fp-regs -ffixed-8 |
16 | cflags-y += $(call cc-option, -fno-jump-tables) | 16 | cflags-y += $(call cc-option, -fno-jump-tables) |
17 | 17 | ||
18 | cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 | 18 | cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 |
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h index 46cefbd50e73..bae97eb19d26 100644 --- a/arch/alpha/include/asm/floppy.h +++ b/arch/alpha/include/asm/floppy.h | |||
@@ -26,7 +26,7 @@ | |||
26 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) | 26 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) |
27 | #define fd_cacheflush(addr,size) /* nothing */ | 27 | #define fd_cacheflush(addr,size) /* nothing */ |
28 | #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ | 28 | #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ |
29 | IRQF_DISABLED, "floppy", NULL) | 29 | 0, "floppy", NULL) |
30 | #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) | 30 | #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) |
31 | 31 | ||
32 | #ifdef CONFIG_PCI | 32 | #ifdef CONFIG_PCI |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2872accd2215..7b2be251c30f 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -117,13 +117,6 @@ handle_irq(int irq) | |||
117 | return; | 117 | return; |
118 | } | 118 | } |
119 | 119 | ||
120 | /* | ||
121 | * From here we must proceed with IPL_MAX. Note that we do not | ||
122 | * explicitly enable interrupts afterwards - some MILO PALcode | ||
123 | * (namely LX164 one) seems to have severe problems with RTI | ||
124 | * at IPL 0. | ||
125 | */ | ||
126 | local_irq_disable(); | ||
127 | irq_enter(); | 120 | irq_enter(); |
128 | generic_handle_irq_desc(irq, desc); | 121 | generic_handle_irq_desc(irq, desc); |
129 | irq_exit(); | 122 | irq_exit(); |
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 772ddfdb71a8..f433fc11877a 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c | |||
@@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector, | |||
45 | unsigned long la_ptr, struct pt_regs *regs) | 45 | unsigned long la_ptr, struct pt_regs *regs) |
46 | { | 46 | { |
47 | struct pt_regs *old_regs; | 47 | struct pt_regs *old_regs; |
48 | |||
49 | /* | ||
50 | * Disable interrupts during IRQ handling. | ||
51 | * Note that there is no matching local_irq_enable() due to | ||
52 | * severe problems with RTI at IPL0 and some MILO PALcode | ||
53 | * (namely LX164). | ||
54 | */ | ||
55 | local_irq_disable(); | ||
48 | switch (type) { | 56 | switch (type) { |
49 | case 0: | 57 | case 0: |
50 | #ifdef CONFIG_SMP | 58 | #ifdef CONFIG_SMP |
@@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector, | |||
62 | { | 70 | { |
63 | long cpu; | 71 | long cpu; |
64 | 72 | ||
65 | local_irq_disable(); | ||
66 | smp_percpu_timer_interrupt(regs); | 73 | smp_percpu_timer_interrupt(regs); |
67 | cpu = smp_processor_id(); | 74 | cpu = smp_processor_id(); |
68 | if (cpu != boot_cpuid) { | 75 | if (cpu != boot_cpuid) { |
@@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, | |||
222 | 229 | ||
223 | struct irqaction timer_irqaction = { | 230 | struct irqaction timer_irqaction = { |
224 | .handler = timer_interrupt, | 231 | .handler = timer_interrupt, |
225 | .flags = IRQF_DISABLED, | ||
226 | .name = "timer", | 232 | .name = "timer", |
227 | }; | 233 | }; |
228 | 234 | ||
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 4d4c046f708d..1383f8601a93 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c | |||
@@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) | |||
188 | extern void free_reserved_mem(void *, void *); | 188 | extern void free_reserved_mem(void *, void *); |
189 | extern void pcibios_claim_one_bus(struct pci_bus *); | 189 | extern void pcibios_claim_one_bus(struct pci_bus *); |
190 | 190 | ||
191 | static struct resource irongate_io = { | ||
192 | .name = "Irongate PCI IO", | ||
193 | .flags = IORESOURCE_IO, | ||
194 | }; | ||
191 | static struct resource irongate_mem = { | 195 | static struct resource irongate_mem = { |
192 | .name = "Irongate PCI MEM", | 196 | .name = "Irongate PCI MEM", |
193 | .flags = IORESOURCE_MEM, | 197 | .flags = IORESOURCE_MEM, |
@@ -209,6 +213,7 @@ nautilus_init_pci(void) | |||
209 | 213 | ||
210 | irongate = pci_get_bus_and_slot(0, 0); | 214 | irongate = pci_get_bus_and_slot(0, 0); |
211 | bus->self = irongate; | 215 | bus->self = irongate; |
216 | bus->resource[0] = &irongate_io; | ||
212 | bus->resource[1] = &irongate_mem; | 217 | bus->resource[1] = &irongate_mem; |
213 | 218 | ||
214 | pci_bus_size_bridges(bus); | 219 | pci_bus_size_bridges(bus); |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 5cf4a481b8c5..a53cf03f49d5 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -280,15 +280,15 @@ titan_late_init(void) | |||
280 | * all reported to the kernel as machine checks, so the handler | 280 | * all reported to the kernel as machine checks, so the handler |
281 | * is a nop so it can be called to count the individual events. | 281 | * is a nop so it can be called to count the individual events. |
282 | */ | 282 | */ |
283 | titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, | 283 | titan_request_irq(63+16, titan_intr_nop, 0, |
284 | "CChip Error", NULL); | 284 | "CChip Error", NULL); |
285 | titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, | 285 | titan_request_irq(62+16, titan_intr_nop, 0, |
286 | "PChip 0 H_Error", NULL); | 286 | "PChip 0 H_Error", NULL); |
287 | titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, | 287 | titan_request_irq(61+16, titan_intr_nop, 0, |
288 | "PChip 1 H_Error", NULL); | 288 | "PChip 1 H_Error", NULL); |
289 | titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, | 289 | titan_request_irq(60+16, titan_intr_nop, 0, |
290 | "PChip 0 C_Error", NULL); | 290 | "PChip 0 C_Error", NULL); |
291 | titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, | 291 | titan_request_irq(59+16, titan_intr_nop, 0, |
292 | "PChip 1 C_Error", NULL); | 292 | "PChip 1 C_Error", NULL); |
293 | 293 | ||
294 | /* | 294 | /* |
@@ -348,9 +348,9 @@ privateer_init_pci(void) | |||
348 | * Hook a couple of extra err interrupts that the | 348 | * Hook a couple of extra err interrupts that the |
349 | * common titan code won't. | 349 | * common titan code won't. |
350 | */ | 350 | */ |
351 | titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, | 351 | titan_request_irq(53+16, titan_intr_nop, 0, |
352 | "NMI", NULL); | 352 | "NMI", NULL); |
353 | titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, | 353 | titan_request_irq(50+16, titan_intr_nop, 0, |
354 | "Temperature Warning", NULL); | 354 | "Temperature Warning", NULL); |
355 | 355 | ||
356 | /* | 356 | /* |
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index ccd84806b62f..eac071668201 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h | |||
@@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void) | |||
39 | " flag.nz %0 \n" | 39 | " flag.nz %0 \n" |
40 | : "=r"(temp), "=r"(flags) | 40 | : "=r"(temp), "=r"(flags) |
41 | : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) | 41 | : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) |
42 | : "cc"); | 42 | : "memory", "cc"); |
43 | 43 | ||
44 | return flags; | 44 | return flags; |
45 | } | 45 | } |
@@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags) | |||
53 | __asm__ __volatile__( | 53 | __asm__ __volatile__( |
54 | " flag %0 \n" | 54 | " flag %0 \n" |
55 | : | 55 | : |
56 | : "r"(flags)); | 56 | : "r"(flags) |
57 | : "memory"); | ||
57 | } | 58 | } |
58 | 59 | ||
59 | /* | 60 | /* |
@@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void) | |||
73 | " and %0, %0, %1 \n" | 74 | " and %0, %0, %1 \n" |
74 | " flag %0 \n" | 75 | " flag %0 \n" |
75 | : "=&r"(temp) | 76 | : "=&r"(temp) |
76 | : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); | 77 | : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)) |
78 | : "memory"); | ||
77 | } | 79 | } |
78 | 80 | ||
79 | /* | 81 | /* |
@@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void) | |||
85 | 87 | ||
86 | __asm__ __volatile__( | 88 | __asm__ __volatile__( |
87 | " lr %0, [status32] \n" | 89 | " lr %0, [status32] \n" |
88 | : "=&r"(temp)); | 90 | : "=&r"(temp) |
91 | : | ||
92 | : "memory"); | ||
89 | 93 | ||
90 | return temp; | 94 | return temp; |
91 | } | 95 | } |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 13b739469c51..1cacda426a0e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1183,9 +1183,9 @@ config ARM_NR_BANKS | |||
1183 | default 8 | 1183 | default 8 |
1184 | 1184 | ||
1185 | config IWMMXT | 1185 | config IWMMXT |
1186 | bool "Enable iWMMXt support" | 1186 | bool "Enable iWMMXt support" if !CPU_PJ4 |
1187 | depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 | 1187 | depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 |
1188 | default y if PXA27x || PXA3xx || ARCH_MMP | 1188 | default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 |
1189 | help | 1189 | help |
1190 | Enable support for iWMMXt context switching at run time if | 1190 | Enable support for iWMMXt context switching at run time if |
1191 | running on a CPU that supports it. | 1191 | running on a CPU that supports it. |
@@ -1439,6 +1439,16 @@ config ARM_ERRATA_775420 | |||
1439 | to deadlock. This workaround puts DSB before executing ISB if | 1439 | to deadlock. This workaround puts DSB before executing ISB if |
1440 | an abort may occur on cache maintenance. | 1440 | an abort may occur on cache maintenance. |
1441 | 1441 | ||
1442 | config ARM_ERRATA_798181 | ||
1443 | bool "ARM errata: TLBI/DSB failure on Cortex-A15" | ||
1444 | depends on CPU_V7 && SMP | ||
1445 | help | ||
1446 | On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not | ||
1447 | adequately shooting down all use of the old entries. This | ||
1448 | option enables the Linux kernel workaround for this erratum | ||
1449 | which sends an IPI to the CPUs that are running the same ASID | ||
1450 | as the one being invalidated. | ||
1451 | |||
1442 | endmenu | 1452 | endmenu |
1443 | 1453 | ||
1444 | source "arch/arm/common/Kconfig" | 1454 | source "arch/arm/common/Kconfig" |
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts index dd0c57dd9f30..3234875824dc 100644 --- a/arch/arm/boot/dts/armada-370-mirabox.dts +++ b/arch/arm/boot/dts/armada-370-mirabox.dts | |||
@@ -54,7 +54,7 @@ | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | mvsdio@d00d4000 { | 56 | mvsdio@d00d4000 { |
57 | pinctrl-0 = <&sdio_pins2>; | 57 | pinctrl-0 = <&sdio_pins3>; |
58 | pinctrl-names = "default"; | 58 | pinctrl-names = "default"; |
59 | status = "okay"; | 59 | status = "okay"; |
60 | /* | 60 | /* |
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 8188d138020e..a195debb67d3 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi | |||
@@ -59,6 +59,12 @@ | |||
59 | "mpp50", "mpp51", "mpp52"; | 59 | "mpp50", "mpp51", "mpp52"; |
60 | marvell,function = "sd0"; | 60 | marvell,function = "sd0"; |
61 | }; | 61 | }; |
62 | |||
63 | sdio_pins3: sdio-pins3 { | ||
64 | marvell,pins = "mpp48", "mpp49", "mpp50", | ||
65 | "mpp51", "mpp52", "mpp53"; | ||
66 | marvell,function = "sd0"; | ||
67 | }; | ||
62 | }; | 68 | }; |
63 | 69 | ||
64 | gpio0: gpio@d0018100 { | 70 | gpio0: gpio@d0018100 { |
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi index 9de93096601a..aaa63d0a8096 100644 --- a/arch/arm/boot/dts/dbx5x0.dtsi +++ b/arch/arm/boot/dts/dbx5x0.dtsi | |||
@@ -191,8 +191,8 @@ | |||
191 | 191 | ||
192 | prcmu: prcmu@80157000 { | 192 | prcmu: prcmu@80157000 { |
193 | compatible = "stericsson,db8500-prcmu"; | 193 | compatible = "stericsson,db8500-prcmu"; |
194 | reg = <0x80157000 0x1000>; | 194 | reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>; |
195 | reg-names = "prcmu"; | 195 | reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm"; |
196 | interrupts = <0 47 0x4>; | 196 | interrupts = <0 47 0x4>; |
197 | #address-cells = <1>; | 197 | #address-cells = <1>; |
198 | #size-cells = <1>; | 198 | #size-cells = <1>; |
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts index 6ce3d17c3a29..fd36e1cca104 100644 --- a/arch/arm/boot/dts/imx28-m28evk.dts +++ b/arch/arm/boot/dts/imx28-m28evk.dts | |||
@@ -152,7 +152,6 @@ | |||
152 | i2c0: i2c@80058000 { | 152 | i2c0: i2c@80058000 { |
153 | pinctrl-names = "default"; | 153 | pinctrl-names = "default"; |
154 | pinctrl-0 = <&i2c0_pins_a>; | 154 | pinctrl-0 = <&i2c0_pins_a>; |
155 | clock-frequency = <400000>; | ||
156 | status = "okay"; | 155 | status = "okay"; |
157 | 156 | ||
158 | sgtl5000: codec@0a { | 157 | sgtl5000: codec@0a { |
diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts index e6cde8aa7fff..6c6a5442800a 100644 --- a/arch/arm/boot/dts/imx28-sps1.dts +++ b/arch/arm/boot/dts/imx28-sps1.dts | |||
@@ -70,7 +70,6 @@ | |||
70 | i2c0: i2c@80058000 { | 70 | i2c0: i2c@80058000 { |
71 | pinctrl-names = "default"; | 71 | pinctrl-names = "default"; |
72 | pinctrl-0 = <&i2c0_pins_a>; | 72 | pinctrl-0 = <&i2c0_pins_a>; |
73 | clock-frequency = <400000>; | ||
74 | status = "okay"; | 73 | status = "okay"; |
75 | 74 | ||
76 | rtc: rtc@51 { | 75 | rtc: rtc@51 { |
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 06ec460b4581..281a223591ff 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi | |||
@@ -91,6 +91,7 @@ | |||
91 | compatible = "arm,cortex-a9-twd-timer"; | 91 | compatible = "arm,cortex-a9-twd-timer"; |
92 | reg = <0x00a00600 0x20>; | 92 | reg = <0x00a00600 0x20>; |
93 | interrupts = <1 13 0xf01>; | 93 | interrupts = <1 13 0xf01>; |
94 | clocks = <&clks 15>; | ||
94 | }; | 95 | }; |
95 | 96 | ||
96 | L2: l2-cache@00a02000 { | 97 | L2: l2-cache@00a02000 { |
diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts index bd83b8fc7c83..c3573be7b92c 100644 --- a/arch/arm/boot/dts/kirkwood-goflexnet.dts +++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts | |||
@@ -77,6 +77,7 @@ | |||
77 | }; | 77 | }; |
78 | 78 | ||
79 | nand@3000000 { | 79 | nand@3000000 { |
80 | chip-delay = <40>; | ||
80 | status = "okay"; | 81 | status = "okay"; |
81 | 82 | ||
82 | partition@0 { | 83 | partition@0 { |
diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts index 93c3afbef9ee..3694e94f6e99 100644 --- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts +++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts | |||
@@ -96,11 +96,11 @@ | |||
96 | marvell,function = "gpio"; | 96 | marvell,function = "gpio"; |
97 | }; | 97 | }; |
98 | pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { | 98 | pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { |
99 | marvell,pins = "mpp44"; | 99 | marvell,pins = "mpp46"; |
100 | marvell,function = "gpio"; | 100 | marvell,function = "gpio"; |
101 | }; | 101 | }; |
102 | pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { | 102 | pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { |
103 | marvell,pins = "mpp45"; | 103 | marvell,pins = "mpp47"; |
104 | marvell,function = "gpio"; | 104 | marvell,function = "gpio"; |
105 | }; | 105 | }; |
106 | 106 | ||
@@ -157,14 +157,14 @@ | |||
157 | gpios = <&gpio0 16 0>; | 157 | gpios = <&gpio0 16 0>; |
158 | linux,default-trigger = "default-on"; | 158 | linux,default-trigger = "default-on"; |
159 | }; | 159 | }; |
160 | health_led1 { | 160 | rebuild_led { |
161 | label = "status:white:rebuild_led"; | ||
162 | gpios = <&gpio1 4 0>; | ||
163 | }; | ||
164 | health_led { | ||
161 | label = "status:red:health_led"; | 165 | label = "status:red:health_led"; |
162 | gpios = <&gpio1 5 0>; | 166 | gpios = <&gpio1 5 0>; |
163 | }; | 167 | }; |
164 | health_led2 { | ||
165 | label = "status:white:health_led"; | ||
166 | gpios = <&gpio1 4 0>; | ||
167 | }; | ||
168 | backup_led { | 168 | backup_led { |
169 | label = "status:blue:backup_led"; | 169 | label = "status:blue:backup_led"; |
170 | gpios = <&gpio0 15 0>; | 170 | gpios = <&gpio0 15 0>; |
diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi index 8aad00f81ed9..f7bec3b1ba32 100644 --- a/arch/arm/boot/dts/orion5x.dtsi +++ b/arch/arm/boot/dts/orion5x.dtsi | |||
@@ -13,6 +13,9 @@ | |||
13 | compatible = "marvell,orion5x"; | 13 | compatible = "marvell,orion5x"; |
14 | interrupt-parent = <&intc>; | 14 | interrupt-parent = <&intc>; |
15 | 15 | ||
16 | aliases { | ||
17 | gpio0 = &gpio0; | ||
18 | }; | ||
16 | intc: interrupt-controller { | 19 | intc: interrupt-controller { |
17 | compatible = "marvell,orion-intc", "marvell,intc"; | 20 | compatible = "marvell,orion-intc", "marvell,intc"; |
18 | interrupt-controller; | 21 | interrupt-controller; |
@@ -32,7 +35,9 @@ | |||
32 | #gpio-cells = <2>; | 35 | #gpio-cells = <2>; |
33 | gpio-controller; | 36 | gpio-controller; |
34 | reg = <0x10100 0x40>; | 37 | reg = <0x10100 0x40>; |
35 | ngpio = <32>; | 38 | ngpios = <32>; |
39 | interrupt-controller; | ||
40 | #interrupt-cells = <2>; | ||
36 | interrupts = <6>, <7>, <8>, <9>; | 41 | interrupts = <6>, <7>, <8>, <9>; |
37 | }; | 42 | }; |
38 | 43 | ||
@@ -91,7 +96,7 @@ | |||
91 | reg = <0x90000 0x10000>, | 96 | reg = <0x90000 0x10000>, |
92 | <0xf2200000 0x800>; | 97 | <0xf2200000 0x800>; |
93 | reg-names = "regs", "sram"; | 98 | reg-names = "regs", "sram"; |
94 | interrupts = <22>; | 99 | interrupts = <28>; |
95 | status = "okay"; | 100 | status = "okay"; |
96 | }; | 101 | }; |
97 | }; | 102 | }; |
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799fd3a81..dff714d886d5 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h | |||
@@ -24,7 +24,7 @@ extern struct arm_delay_ops { | |||
24 | void (*delay)(unsigned long); | 24 | void (*delay)(unsigned long); |
25 | void (*const_udelay)(unsigned long); | 25 | void (*const_udelay)(unsigned long); |
26 | void (*udelay)(unsigned long); | 26 | void (*udelay)(unsigned long); |
27 | bool const_clock; | 27 | unsigned long ticks_per_jiffy; |
28 | } arm_delay_ops; | 28 | } arm_delay_ops; |
29 | 29 | ||
30 | #define __delay(n) arm_delay_ops.delay(n) | 30 | #define __delay(n) arm_delay_ops.delay(n) |
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h index cca9f15704ed..ea289e1435e7 100644 --- a/arch/arm/include/asm/glue-cache.h +++ b/arch/arm/include/asm/glue-cache.h | |||
@@ -19,14 +19,6 @@ | |||
19 | #undef _CACHE | 19 | #undef _CACHE |
20 | #undef MULTI_CACHE | 20 | #undef MULTI_CACHE |
21 | 21 | ||
22 | #if defined(CONFIG_CPU_CACHE_V3) | ||
23 | # ifdef _CACHE | ||
24 | # define MULTI_CACHE 1 | ||
25 | # else | ||
26 | # define _CACHE v3 | ||
27 | # endif | ||
28 | #endif | ||
29 | |||
30 | #if defined(CONFIG_CPU_CACHE_V4) | 22 | #if defined(CONFIG_CPU_CACHE_V4) |
31 | # ifdef _CACHE | 23 | # ifdef _CACHE |
32 | # define MULTI_CACHE 1 | 24 | # define MULTI_CACHE 1 |
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 02fe2fbe2477..ed94b1a366ae 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h | |||
@@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void); | |||
37 | * IOP3XX processor registers | 37 | * IOP3XX processor registers |
38 | */ | 38 | */ |
39 | #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 | 39 | #define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000 |
40 | #define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000 | 40 | #define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000 |
41 | #define IOP3XX_PERIPHERAL_SIZE 0x00002000 | 41 | #define IOP3XX_PERIPHERAL_SIZE 0x00002000 |
42 | #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ | 42 | #define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\ |
43 | IOP3XX_PERIPHERAL_SIZE - 1) | 43 | IOP3XX_PERIPHERAL_SIZE - 1) |
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828f484d..91b99abe7a95 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page); | |||
41 | #endif | 41 | #endif |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | /* | ||
45 | * Needed to be able to broadcast the TLB invalidation for kmap. | ||
46 | */ | ||
47 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
48 | #undef ARCH_NEEDS_KMAP_HIGH_GET | ||
49 | #endif | ||
50 | |||
44 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET | 51 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
45 | extern void *kmap_high_get(struct page *page); | 52 | extern void *kmap_high_get(struct page *page); |
46 | #else | 53 | #else |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a6611323c..a7b85e0d0cc1 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm); | |||
27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); | 27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); |
28 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) | 28 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) |
29 | 29 | ||
30 | DECLARE_PER_CPU(atomic64_t, active_asids); | ||
31 | |||
30 | #else /* !CONFIG_CPU_HAS_ASID */ | 32 | #else /* !CONFIG_CPU_HAS_ASID */ |
31 | 33 | ||
32 | #ifdef CONFIG_MMU | 34 | #ifdef CONFIG_MMU |
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 6ef8afd1b64c..86b8fe398b95 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h | |||
@@ -111,7 +111,7 @@ | |||
111 | #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ | 111 | #define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ |
112 | #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ | 112 | #define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ |
113 | #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ | 113 | #define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ |
114 | #define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ | 114 | #define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * Hyp-mode PL2 PTE definitions for LPAE. | 117 | * Hyp-mode PL2 PTE definitions for LPAE. |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c8820f0d..ab865e65a84c 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | #include <asm/glue.h> | 15 | #include <asm/glue.h> |
16 | 16 | ||
17 | #define TLB_V3_PAGE (1 << 0) | ||
18 | #define TLB_V4_U_PAGE (1 << 1) | 17 | #define TLB_V4_U_PAGE (1 << 1) |
19 | #define TLB_V4_D_PAGE (1 << 2) | 18 | #define TLB_V4_D_PAGE (1 << 2) |
20 | #define TLB_V4_I_PAGE (1 << 3) | 19 | #define TLB_V4_I_PAGE (1 << 3) |
@@ -22,7 +21,6 @@ | |||
22 | #define TLB_V6_D_PAGE (1 << 5) | 21 | #define TLB_V6_D_PAGE (1 << 5) |
23 | #define TLB_V6_I_PAGE (1 << 6) | 22 | #define TLB_V6_I_PAGE (1 << 6) |
24 | 23 | ||
25 | #define TLB_V3_FULL (1 << 8) | ||
26 | #define TLB_V4_U_FULL (1 << 9) | 24 | #define TLB_V4_U_FULL (1 << 9) |
27 | #define TLB_V4_D_FULL (1 << 10) | 25 | #define TLB_V4_D_FULL (1 << 10) |
28 | #define TLB_V4_I_FULL (1 << 11) | 26 | #define TLB_V4_I_FULL (1 << 11) |
@@ -52,7 +50,6 @@ | |||
52 | * ============= | 50 | * ============= |
53 | * | 51 | * |
54 | * We have the following to choose from: | 52 | * We have the following to choose from: |
55 | * v3 - ARMv3 | ||
56 | * v4 - ARMv4 without write buffer | 53 | * v4 - ARMv4 without write buffer |
57 | * v4wb - ARMv4 with write buffer without I TLB flush entry instruction | 54 | * v4wb - ARMv4 with write buffer without I TLB flush entry instruction |
58 | * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction | 55 | * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction |
@@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void) | |||
330 | if (tlb_flag(TLB_WB)) | 327 | if (tlb_flag(TLB_WB)) |
331 | dsb(); | 328 | dsb(); |
332 | 329 | ||
333 | tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); | ||
334 | tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); | 330 | tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); |
335 | tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); | 331 | tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); |
336 | tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); | 332 | tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); |
@@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) | |||
351 | if (tlb_flag(TLB_WB)) | 347 | if (tlb_flag(TLB_WB)) |
352 | dsb(); | 348 | dsb(); |
353 | 349 | ||
354 | if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { | 350 | if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { |
355 | if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { | 351 | if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { |
356 | tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); | ||
357 | tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); | 352 | tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); |
358 | tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); | 353 | tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); |
359 | tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); | 354 | tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); |
@@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |||
385 | if (tlb_flag(TLB_WB)) | 380 | if (tlb_flag(TLB_WB)) |
386 | dsb(); | 381 | dsb(); |
387 | 382 | ||
388 | if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && | 383 | if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && |
389 | cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 384 | cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
390 | tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); | ||
391 | tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); | 385 | tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); |
392 | tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); | 386 | tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); |
393 | tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); | 387 | tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); |
@@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) | |||
418 | if (tlb_flag(TLB_WB)) | 412 | if (tlb_flag(TLB_WB)) |
419 | dsb(); | 413 | dsb(); |
420 | 414 | ||
421 | tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); | ||
422 | tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); | 415 | tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); |
423 | tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); | 416 | tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); |
424 | tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); | 417 | tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); |
@@ -450,6 +443,21 @@ static inline void local_flush_bp_all(void) | |||
450 | isb(); | 443 | isb(); |
451 | } | 444 | } |
452 | 445 | ||
446 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
447 | static inline void dummy_flush_tlb_a15_erratum(void) | ||
448 | { | ||
449 | /* | ||
450 | * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. | ||
451 | */ | ||
452 | asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); | ||
453 | dsb(); | ||
454 | } | ||
455 | #else | ||
456 | static inline void dummy_flush_tlb_a15_erratum(void) | ||
457 | { | ||
458 | } | ||
459 | #endif | ||
460 | |||
453 | /* | 461 | /* |
454 | * flush_pmd_entry | 462 | * flush_pmd_entry |
455 | * | 463 | * |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 3248cde504ed..fefd7f971437 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old) | |||
276 | */ | 276 | */ |
277 | 277 | ||
278 | .macro mcount_enter | 278 | .macro mcount_enter |
279 | /* | ||
280 | * This pad compensates for the push {lr} at the call site. Note that we are | ||
281 | * unable to unwind through a function which does not otherwise save its lr. | ||
282 | */ | ||
283 | UNWIND(.pad #4) | ||
279 | stmdb sp!, {r0-r3, lr} | 284 | stmdb sp!, {r0-r3, lr} |
285 | UNWIND(.save {r0-r3, lr}) | ||
280 | .endm | 286 | .endm |
281 | 287 | ||
282 | .macro mcount_get_lr reg | 288 | .macro mcount_get_lr reg |
@@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old) | |||
289 | .endm | 295 | .endm |
290 | 296 | ||
291 | ENTRY(__gnu_mcount_nc) | 297 | ENTRY(__gnu_mcount_nc) |
298 | UNWIND(.fnstart) | ||
292 | #ifdef CONFIG_DYNAMIC_FTRACE | 299 | #ifdef CONFIG_DYNAMIC_FTRACE |
293 | mov ip, lr | 300 | mov ip, lr |
294 | ldmia sp!, {lr} | 301 | ldmia sp!, {lr} |
@@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc) | |||
296 | #else | 303 | #else |
297 | __mcount | 304 | __mcount |
298 | #endif | 305 | #endif |
306 | UNWIND(.fnend) | ||
299 | ENDPROC(__gnu_mcount_nc) | 307 | ENDPROC(__gnu_mcount_nc) |
300 | 308 | ||
301 | #ifdef CONFIG_DYNAMIC_FTRACE | 309 | #ifdef CONFIG_DYNAMIC_FTRACE |
302 | ENTRY(ftrace_caller) | 310 | ENTRY(ftrace_caller) |
311 | UNWIND(.fnstart) | ||
303 | __ftrace_caller | 312 | __ftrace_caller |
313 | UNWIND(.fnend) | ||
304 | ENDPROC(ftrace_caller) | 314 | ENDPROC(ftrace_caller) |
305 | #endif | 315 | #endif |
306 | 316 | ||
307 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 317 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
308 | ENTRY(ftrace_graph_caller) | 318 | ENTRY(ftrace_graph_caller) |
319 | UNWIND(.fnstart) | ||
309 | __ftrace_graph_caller | 320 | __ftrace_graph_caller |
321 | UNWIND(.fnend) | ||
310 | ENDPROC(ftrace_graph_caller) | 322 | ENDPROC(ftrace_graph_caller) |
311 | #endif | 323 | #endif |
312 | 324 | ||
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index e0eb9a1cae77..8bac553fe213 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -267,7 +267,7 @@ __create_page_tables: | |||
267 | addne r6, r6, #1 << SECTION_SHIFT | 267 | addne r6, r6, #1 << SECTION_SHIFT |
268 | strne r6, [r3] | 268 | strne r6, [r3] |
269 | 269 | ||
270 | #if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) | 270 | #if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) |
271 | sub r4, r4, #4 @ Fixup page table pointer | 271 | sub r4, r4, #4 @ Fixup page table pointer |
272 | @ for 64-bit descriptors | 272 | @ for 64-bit descriptors |
273 | #endif | 273 | #endif |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 96093b75ab90..1fd749ee4a1b 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused) | |||
966 | } | 966 | } |
967 | 967 | ||
968 | if (err) { | 968 | if (err) { |
969 | pr_warning("CPU %d debug is powered down!\n", cpu); | 969 | pr_warn_once("CPU %d debug is powered down!\n", cpu); |
970 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); | 970 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); |
971 | return; | 971 | return; |
972 | } | 972 | } |
@@ -987,7 +987,7 @@ clear_vcr: | |||
987 | isb(); | 987 | isb(); |
988 | 988 | ||
989 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { | 989 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { |
990 | pr_warning("CPU %d failed to disable vector catch\n", cpu); | 990 | pr_warn_once("CPU %d failed to disable vector catch\n", cpu); |
991 | return; | 991 | return; |
992 | } | 992 | } |
993 | 993 | ||
@@ -1007,7 +1007,7 @@ clear_vcr: | |||
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { | 1009 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { |
1010 | pr_warning("CPU %d failed to clear debug register pairs\n", cpu); | 1010 | pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); |
1011 | return; | 1011 | return; |
1012 | } | 1012 | } |
1013 | 1013 | ||
@@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, | |||
1043 | return NOTIFY_OK; | 1043 | return NOTIFY_OK; |
1044 | } | 1044 | } |
1045 | 1045 | ||
1046 | static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { | 1046 | static struct notifier_block dbg_cpu_pm_nb = { |
1047 | .notifier_call = dbg_cpu_pm_notify, | 1047 | .notifier_call = dbg_cpu_pm_notify, |
1048 | }; | 1048 | }; |
1049 | 1049 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 146157dfe27c..8c3094d0f7b7 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events, | |||
253 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 253 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
254 | struct pmu *leader_pmu = event->group_leader->pmu; | 254 | struct pmu *leader_pmu = event->group_leader->pmu; |
255 | 255 | ||
256 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) | 256 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) |
257 | return 1; | ||
258 | |||
259 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | ||
257 | return 1; | 260 | return 1; |
258 | 261 | ||
259 | return armpmu->get_event_idx(hw_events, event) >= 0; | 262 | return armpmu->get_event_idx(hw_events, event) >= 0; |
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index bd6f56b9ec21..59d2adb764a9 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c | |||
@@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void) | |||
45 | 45 | ||
46 | static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; | 46 | static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; |
47 | 47 | ||
48 | static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) | 48 | static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
49 | { | 49 | { |
50 | return (cyc * mult) >> shift; | 50 | return (cyc * mult) >> shift; |
51 | } | 51 | } |
52 | 52 | ||
53 | static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) | 53 | static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) |
54 | { | 54 | { |
55 | u64 epoch_ns; | 55 | u64 epoch_ns; |
56 | u32 epoch_cyc; | 56 | u32 epoch_cyc; |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3f6cbb2e3eda..234e339196c0 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -56,7 +56,6 @@ | |||
56 | #include <asm/virt.h> | 56 | #include <asm/virt.h> |
57 | 57 | ||
58 | #include "atags.h" | 58 | #include "atags.h" |
59 | #include "tcm.h" | ||
60 | 59 | ||
61 | 60 | ||
62 | #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) | 61 | #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) |
@@ -353,6 +352,23 @@ void __init early_print(const char *str, ...) | |||
353 | printk("%s", buf); | 352 | printk("%s", buf); |
354 | } | 353 | } |
355 | 354 | ||
355 | static void __init cpuid_init_hwcaps(void) | ||
356 | { | ||
357 | unsigned int divide_instrs; | ||
358 | |||
359 | if (cpu_architecture() < CPU_ARCH_ARMv7) | ||
360 | return; | ||
361 | |||
362 | divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; | ||
363 | |||
364 | switch (divide_instrs) { | ||
365 | case 2: | ||
366 | elf_hwcap |= HWCAP_IDIVA; | ||
367 | case 1: | ||
368 | elf_hwcap |= HWCAP_IDIVT; | ||
369 | } | ||
370 | } | ||
371 | |||
356 | static void __init feat_v6_fixup(void) | 372 | static void __init feat_v6_fixup(void) |
357 | { | 373 | { |
358 | int id = read_cpuid_id(); | 374 | int id = read_cpuid_id(); |
@@ -483,8 +499,11 @@ static void __init setup_processor(void) | |||
483 | snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", | 499 | snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", |
484 | list->elf_name, ENDIANNESS); | 500 | list->elf_name, ENDIANNESS); |
485 | elf_hwcap = list->elf_hwcap; | 501 | elf_hwcap = list->elf_hwcap; |
502 | |||
503 | cpuid_init_hwcaps(); | ||
504 | |||
486 | #ifndef CONFIG_ARM_THUMB | 505 | #ifndef CONFIG_ARM_THUMB |
487 | elf_hwcap &= ~HWCAP_THUMB; | 506 | elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); |
488 | #endif | 507 | #endif |
489 | 508 | ||
490 | feat_v6_fixup(); | 509 | feat_v6_fixup(); |
@@ -524,7 +543,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) | |||
524 | size -= start & ~PAGE_MASK; | 543 | size -= start & ~PAGE_MASK; |
525 | bank->start = PAGE_ALIGN(start); | 544 | bank->start = PAGE_ALIGN(start); |
526 | 545 | ||
527 | #ifndef CONFIG_LPAE | 546 | #ifndef CONFIG_ARM_LPAE |
528 | if (bank->start + size < bank->start) { | 547 | if (bank->start + size < bank->start) { |
529 | printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " | 548 | printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " |
530 | "32-bit physical address space\n", (long long)start); | 549 | "32-bit physical address space\n", (long long)start); |
@@ -778,8 +797,6 @@ void __init setup_arch(char **cmdline_p) | |||
778 | 797 | ||
779 | reserve_crashkernel(); | 798 | reserve_crashkernel(); |
780 | 799 | ||
781 | tcm_init(); | ||
782 | |||
783 | #ifdef CONFIG_MULTI_IRQ_HANDLER | 800 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
784 | handle_arch_irq = mdesc->handle_irq; | 801 | handle_arch_irq = mdesc->handle_irq; |
785 | #endif | 802 | #endif |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 79078edbb9bc..1f2ccccaf009 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb, | |||
673 | if (freq->flags & CPUFREQ_CONST_LOOPS) | 673 | if (freq->flags & CPUFREQ_CONST_LOOPS) |
674 | return NOTIFY_OK; | 674 | return NOTIFY_OK; |
675 | 675 | ||
676 | if (arm_delay_ops.const_clock) | ||
677 | return NOTIFY_OK; | ||
678 | |||
679 | if (!per_cpu(l_p_j_ref, cpu)) { | 676 | if (!per_cpu(l_p_j_ref, cpu)) { |
680 | per_cpu(l_p_j_ref, cpu) = | 677 | per_cpu(l_p_j_ref, cpu) = |
681 | per_cpu(cpu_data, cpu).loops_per_jiffy; | 678 | per_cpu(cpu_data, cpu).loops_per_jiffy; |
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index bd0300531399..e82e1d248772 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <asm/smp_plat.h> | 13 | #include <asm/smp_plat.h> |
14 | #include <asm/tlbflush.h> | 14 | #include <asm/tlbflush.h> |
15 | #include <asm/mmu_context.h> | ||
15 | 16 | ||
16 | /**********************************************************************/ | 17 | /**********************************************************************/ |
17 | 18 | ||
@@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored) | |||
69 | local_flush_bp_all(); | 70 | local_flush_bp_all(); |
70 | } | 71 | } |
71 | 72 | ||
73 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
74 | static int erratum_a15_798181(void) | ||
75 | { | ||
76 | unsigned int midr = read_cpuid_id(); | ||
77 | |||
78 | /* Cortex-A15 r0p0..r3p2 affected */ | ||
79 | if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) | ||
80 | return 0; | ||
81 | return 1; | ||
82 | } | ||
83 | #else | ||
84 | static int erratum_a15_798181(void) | ||
85 | { | ||
86 | return 0; | ||
87 | } | ||
88 | #endif | ||
89 | |||
90 | static void ipi_flush_tlb_a15_erratum(void *arg) | ||
91 | { | ||
92 | dmb(); | ||
93 | } | ||
94 | |||
95 | static void broadcast_tlb_a15_erratum(void) | ||
96 | { | ||
97 | if (!erratum_a15_798181()) | ||
98 | return; | ||
99 | |||
100 | dummy_flush_tlb_a15_erratum(); | ||
101 | smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, | ||
102 | NULL, 1); | ||
103 | } | ||
104 | |||
105 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | ||
106 | { | ||
107 | int cpu; | ||
108 | cpumask_t mask = { CPU_BITS_NONE }; | ||
109 | |||
110 | if (!erratum_a15_798181()) | ||
111 | return; | ||
112 | |||
113 | dummy_flush_tlb_a15_erratum(); | ||
114 | for_each_online_cpu(cpu) { | ||
115 | if (cpu == smp_processor_id()) | ||
116 | continue; | ||
117 | /* | ||
118 | * We only need to send an IPI if the other CPUs are running | ||
119 | * the same ASID as the one being invalidated. There is no | ||
120 | * need for locking around the active_asids check since the | ||
121 | * switch_mm() function has at least one dmb() (as required by | ||
122 | * this workaround) in case a context switch happens on | ||
123 | * another CPU after the condition below. | ||
124 | */ | ||
125 | if (atomic64_read(&mm->context.id) == | ||
126 | atomic64_read(&per_cpu(active_asids, cpu))) | ||
127 | cpumask_set_cpu(cpu, &mask); | ||
128 | } | ||
129 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); | ||
130 | } | ||
131 | |||
72 | void flush_tlb_all(void) | 132 | void flush_tlb_all(void) |
73 | { | 133 | { |
74 | if (tlb_ops_need_broadcast()) | 134 | if (tlb_ops_need_broadcast()) |
75 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); | 135 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
76 | else | 136 | else |
77 | local_flush_tlb_all(); | 137 | local_flush_tlb_all(); |
138 | broadcast_tlb_a15_erratum(); | ||
78 | } | 139 | } |
79 | 140 | ||
80 | void flush_tlb_mm(struct mm_struct *mm) | 141 | void flush_tlb_mm(struct mm_struct *mm) |
@@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
83 | on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); | 144 | on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); |
84 | else | 145 | else |
85 | local_flush_tlb_mm(mm); | 146 | local_flush_tlb_mm(mm); |
147 | broadcast_tlb_mm_a15_erratum(mm); | ||
86 | } | 148 | } |
87 | 149 | ||
88 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | 150 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
@@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |||
95 | &ta, 1); | 157 | &ta, 1); |
96 | } else | 158 | } else |
97 | local_flush_tlb_page(vma, uaddr); | 159 | local_flush_tlb_page(vma, uaddr); |
160 | broadcast_tlb_mm_a15_erratum(vma->vm_mm); | ||
98 | } | 161 | } |
99 | 162 | ||
100 | void flush_tlb_kernel_page(unsigned long kaddr) | 163 | void flush_tlb_kernel_page(unsigned long kaddr) |
@@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) | |||
105 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); | 168 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); |
106 | } else | 169 | } else |
107 | local_flush_tlb_kernel_page(kaddr); | 170 | local_flush_tlb_kernel_page(kaddr); |
171 | broadcast_tlb_a15_erratum(); | ||
108 | } | 172 | } |
109 | 173 | ||
110 | void flush_tlb_range(struct vm_area_struct *vma, | 174 | void flush_tlb_range(struct vm_area_struct *vma, |
@@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma, | |||
119 | &ta, 1); | 183 | &ta, 1); |
120 | } else | 184 | } else |
121 | local_flush_tlb_range(vma, start, end); | 185 | local_flush_tlb_range(vma, start, end); |
186 | broadcast_tlb_mm_a15_erratum(vma->vm_mm); | ||
122 | } | 187 | } |
123 | 188 | ||
124 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 189 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
@@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
130 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); | 195 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); |
131 | } else | 196 | } else |
132 | local_flush_tlb_kernel_range(start, end); | 197 | local_flush_tlb_kernel_range(start, end); |
198 | broadcast_tlb_a15_erratum(); | ||
133 | } | 199 | } |
134 | 200 | ||
135 | void flush_bp_all(void) | 201 | void flush_bp_all(void) |
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 30ae6bb4a310..f50f19e5c138 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <asm/mach/map.h> | 17 | #include <asm/mach/map.h> |
18 | #include <asm/memory.h> | 18 | #include <asm/memory.h> |
19 | #include <asm/system_info.h> | 19 | #include <asm/system_info.h> |
20 | #include "tcm.h" | ||
21 | 20 | ||
22 | static struct gen_pool *tcm_pool; | 21 | static struct gen_pool *tcm_pool; |
23 | static bool dtcm_present; | 22 | static bool dtcm_present; |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 5a936988eb24..c1fe498983ac 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
201 | break; | 201 | break; |
202 | case KVM_CAP_ARM_SET_DEVICE_ADDR: | 202 | case KVM_CAP_ARM_SET_DEVICE_ADDR: |
203 | r = 1; | 203 | r = 1; |
204 | break; | ||
204 | case KVM_CAP_NR_VCPUS: | 205 | case KVM_CAP_NR_VCPUS: |
205 | r = num_online_cpus(); | 206 | r = num_online_cpus(); |
206 | break; | 207 | break; |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4ea9a982269c..7bed7556077a 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, | |||
79 | u32 val; | 79 | u32 val; |
80 | int cpu; | 80 | int cpu; |
81 | 81 | ||
82 | cpu = get_cpu(); | ||
83 | |||
84 | if (!p->is_write) | 82 | if (!p->is_write) |
85 | return read_from_write_only(vcpu, p); | 83 | return read_from_write_only(vcpu, p); |
86 | 84 | ||
85 | cpu = get_cpu(); | ||
86 | |||
87 | cpumask_setall(&vcpu->arch.require_dcache_flush); | 87 | cpumask_setall(&vcpu->arch.require_dcache_flush); |
88 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | 88 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); |
89 | 89 | ||
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a17316e9fe..0e4cfe123b38 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c | |||
@@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
883 | lr, irq, vgic_cpu->vgic_lr[lr]); | 883 | lr, irq, vgic_cpu->vgic_lr[lr]); |
884 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | 884 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); |
885 | vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; | 885 | vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; |
886 | 886 | return true; | |
887 | goto out; | ||
888 | } | 887 | } |
889 | 888 | ||
890 | /* Try to use another LR for this interrupt */ | 889 | /* Try to use another LR for this interrupt */ |
@@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
898 | vgic_cpu->vgic_irq_lr_map[irq] = lr; | 897 | vgic_cpu->vgic_irq_lr_map[irq] = lr; |
899 | set_bit(lr, vgic_cpu->lr_used); | 898 | set_bit(lr, vgic_cpu->lr_used); |
900 | 899 | ||
901 | out: | ||
902 | if (!vgic_irq_is_edge(vcpu, irq)) | 900 | if (!vgic_irq_is_edge(vcpu, irq)) |
903 | vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; | 901 | vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; |
904 | 902 | ||
@@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1018 | 1016 | ||
1019 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); | 1017 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); |
1020 | 1018 | ||
1021 | /* | ||
1022 | * We do not need to take the distributor lock here, since the only | ||
1023 | * action we perform is clearing the irq_active_bit for an EOIed | ||
1024 | * level interrupt. There is a potential race with | ||
1025 | * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we | ||
1026 | * check if the interrupt is already active. Two possibilities: | ||
1027 | * | ||
1028 | * - The queuing is occurring on the same vcpu: cannot happen, | ||
1029 | * as we're already in the context of this vcpu, and | ||
1030 | * executing the handler | ||
1031 | * - The interrupt has been migrated to another vcpu, and we | ||
1032 | * ignore this interrupt for this run. Big deal. It is still | ||
1033 | * pending though, and will get considered when this vcpu | ||
1034 | * exits. | ||
1035 | */ | ||
1036 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { | 1019 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { |
1037 | /* | 1020 | /* |
1038 | * Some level interrupts have been EOIed. Clear their | 1021 | * Some level interrupts have been EOIed. Clear their |
@@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1054 | } else { | 1037 | } else { |
1055 | vgic_cpu_irq_clear(vcpu, irq); | 1038 | vgic_cpu_irq_clear(vcpu, irq); |
1056 | } | 1039 | } |
1040 | |||
1041 | /* | ||
1042 | * Despite being EOIed, the LR may not have | ||
1043 | * been marked as empty. | ||
1044 | */ | ||
1045 | set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); | ||
1046 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; | ||
1057 | } | 1047 | } |
1058 | } | 1048 | } |
1059 | 1049 | ||
@@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1064 | } | 1054 | } |
1065 | 1055 | ||
1066 | /* | 1056 | /* |
1067 | * Sync back the VGIC state after a guest run. We do not really touch | 1057 | * Sync back the VGIC state after a guest run. The distributor lock is |
1068 | * the distributor here (the irq_pending_on_cpu bit is safe to set), | 1058 | * needed so we don't get preempted in the middle of the state processing. |
1069 | * so there is no need for taking its lock. | ||
1070 | */ | 1059 | */ |
1071 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 1060 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
1072 | { | 1061 | { |
@@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
1112 | 1101 | ||
1113 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 1102 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
1114 | { | 1103 | { |
1104 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1105 | |||
1115 | if (!irqchip_in_kernel(vcpu->kvm)) | 1106 | if (!irqchip_in_kernel(vcpu->kvm)) |
1116 | return; | 1107 | return; |
1117 | 1108 | ||
1109 | spin_lock(&dist->lock); | ||
1118 | __kvm_vgic_sync_hwstate(vcpu); | 1110 | __kvm_vgic_sync_hwstate(vcpu); |
1111 | spin_unlock(&dist->lock); | ||
1119 | } | 1112 | } |
1120 | 1113 | ||
1121 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | 1114 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 6b93f6a1a3c7..64dbfa57204a 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c | |||
@@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles) | |||
58 | static void __timer_const_udelay(unsigned long xloops) | 58 | static void __timer_const_udelay(unsigned long xloops) |
59 | { | 59 | { |
60 | unsigned long long loops = xloops; | 60 | unsigned long long loops = xloops; |
61 | loops *= loops_per_jiffy; | 61 | loops *= arm_delay_ops.ticks_per_jiffy; |
62 | __timer_delay(loops >> UDELAY_SHIFT); | 62 | __timer_delay(loops >> UDELAY_SHIFT); |
63 | } | 63 | } |
64 | 64 | ||
@@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer) | |||
73 | pr_info("Switching to timer-based delay loop\n"); | 73 | pr_info("Switching to timer-based delay loop\n"); |
74 | delay_timer = timer; | 74 | delay_timer = timer; |
75 | lpj_fine = timer->freq / HZ; | 75 | lpj_fine = timer->freq / HZ; |
76 | loops_per_jiffy = lpj_fine; | 76 | |
77 | /* cpufreq may scale loops_per_jiffy, so keep a private copy */ | ||
78 | arm_delay_ops.ticks_per_jiffy = lpj_fine; | ||
77 | arm_delay_ops.delay = __timer_delay; | 79 | arm_delay_ops.delay = __timer_delay; |
78 | arm_delay_ops.const_udelay = __timer_const_udelay; | 80 | arm_delay_ops.const_udelay = __timer_const_udelay; |
79 | arm_delay_ops.udelay = __timer_udelay; | 81 | arm_delay_ops.udelay = __timer_udelay; |
80 | arm_delay_ops.const_clock = true; | 82 | |
81 | delay_calibrated = true; | 83 | delay_calibrated = true; |
82 | } else { | 84 | } else { |
83 | pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); | 85 | pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); |
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index e698f26cc0cb..52e4bb5cf12d 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c | |||
@@ -22,19 +22,9 @@ | |||
22 | 22 | ||
23 | static struct map_desc cns3xxx_io_desc[] __initdata = { | 23 | static struct map_desc cns3xxx_io_desc[] __initdata = { |
24 | { | 24 | { |
25 | .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, | 25 | .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT, |
26 | .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), | 26 | .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE), |
27 | .length = SZ_4K, | 27 | .length = SZ_8K, |
28 | .type = MT_DEVICE, | ||
29 | }, { | ||
30 | .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT, | ||
31 | .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE), | ||
32 | .length = SZ_4K, | ||
33 | .type = MT_DEVICE, | ||
34 | }, { | ||
35 | .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, | ||
36 | .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE), | ||
37 | .length = SZ_4K, | ||
38 | .type = MT_DEVICE, | 28 | .type = MT_DEVICE, |
39 | }, { | 29 | }, { |
40 | .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, | 30 | .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, |
diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h index 191c8e57f289..b1021aafa481 100644 --- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h +++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h | |||
@@ -94,10 +94,10 @@ | |||
94 | #define RTC_INTR_STS_OFFSET 0x34 | 94 | #define RTC_INTR_STS_OFFSET 0x34 |
95 | 95 | ||
96 | #define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ | 96 | #define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ |
97 | #define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ | 97 | #define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */ |
98 | 98 | ||
99 | #define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ | 99 | #define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ |
100 | #define CNS3XXX_PM_BASE_VIRT 0xFFF08000 | 100 | #define CNS3XXX_PM_BASE_VIRT 0xFB001000 |
101 | 101 | ||
102 | #define PM_CLK_GATE_OFFSET 0x00 | 102 | #define PM_CLK_GATE_OFFSET 0x00 |
103 | #define PM_SOFT_RST_OFFSET 0x04 | 103 | #define PM_SOFT_RST_OFFSET 0x04 |
@@ -109,7 +109,7 @@ | |||
109 | #define PM_PLL_HM_PD_OFFSET 0x1C | 109 | #define PM_PLL_HM_PD_OFFSET 0x1C |
110 | 110 | ||
111 | #define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ | 111 | #define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ |
112 | #define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 | 112 | #define CNS3XXX_UART0_BASE_VIRT 0xFB002000 |
113 | 113 | ||
114 | #define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ | 114 | #define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ |
115 | #define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 | 115 | #define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 |
@@ -130,7 +130,7 @@ | |||
130 | #define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 | 130 | #define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 |
131 | 131 | ||
132 | #define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ | 132 | #define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ |
133 | #define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 | 133 | #define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000 |
134 | 134 | ||
135 | #define TIMER1_COUNTER_OFFSET 0x00 | 135 | #define TIMER1_COUNTER_OFFSET 0x00 |
136 | #define TIMER1_AUTO_RELOAD_OFFSET 0x04 | 136 | #define TIMER1_AUTO_RELOAD_OFFSET 0x04 |
@@ -227,16 +227,16 @@ | |||
227 | * Testchip peripheral and fpga gic regions | 227 | * Testchip peripheral and fpga gic regions |
228 | */ | 228 | */ |
229 | #define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ | 229 | #define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ |
230 | #define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 | 230 | #define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000 |
231 | 231 | ||
232 | #define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ | 232 | #define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ |
233 | #define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 | 233 | #define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100) |
234 | 234 | ||
235 | #define CNS3XXX_TC11MP_TWD_BASE 0x90000600 | 235 | #define CNS3XXX_TC11MP_TWD_BASE 0x90000600 |
236 | #define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 | 236 | #define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600) |
237 | 237 | ||
238 | #define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ | 238 | #define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ |
239 | #define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 | 239 | #define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000) |
240 | 240 | ||
241 | #define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ | 241 | #define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ |
242 | #define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 | 242 | #define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 |
diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h index d2afb4dd82ab..b5cc77d2380b 100644 --- a/arch/arm/mach-ep93xx/include/mach/uncompress.h +++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h | |||
@@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr) | |||
47 | 47 | ||
48 | static inline void putc(int c) | 48 | static inline void putc(int c) |
49 | { | 49 | { |
50 | /* Transmit fifo not full? */ | 50 | int i; |
51 | while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF) | 51 | |
52 | ; | 52 | for (i = 0; i < 10000; i++) { |
53 | /* Transmit fifo not full? */ | ||
54 | if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)) | ||
55 | break; | ||
56 | } | ||
53 | 57 | ||
54 | __raw_writeb(c, PHYS_UART_DATA); | 58 | __raw_writeb(c, PHYS_UART_DATA); |
55 | } | 59 | } |
diff --git a/arch/arm/mach-highbank/hotplug.c b/arch/arm/mach-highbank/hotplug.c index f30c52843396..890cae23c12a 100644 --- a/arch/arm/mach-highbank/hotplug.c +++ b/arch/arm/mach-highbank/hotplug.c | |||
@@ -28,13 +28,11 @@ extern void secondary_startup(void); | |||
28 | */ | 28 | */ |
29 | void __ref highbank_cpu_die(unsigned int cpu) | 29 | void __ref highbank_cpu_die(unsigned int cpu) |
30 | { | 30 | { |
31 | flush_cache_all(); | ||
32 | |||
33 | highbank_set_cpu_jump(cpu, phys_to_virt(0)); | 31 | highbank_set_cpu_jump(cpu, phys_to_virt(0)); |
34 | highbank_set_core_pwr(); | ||
35 | 32 | ||
36 | cpu_do_idle(); | 33 | flush_cache_louis(); |
34 | highbank_set_core_pwr(); | ||
37 | 35 | ||
38 | /* We should never return from idle */ | 36 | while (1) |
39 | panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu); | 37 | cpu_do_idle(); |
40 | } | 38 | } |
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index e13a8fa5e62c..2193c834f55c 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c | |||
@@ -257,6 +257,7 @@ int __init mx35_clocks_init(void) | |||
257 | clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); | 257 | clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); |
258 | clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); | 258 | clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); |
259 | clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); | 259 | clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); |
260 | clk_register_clkdev(clk[admux_gate], "audmux", NULL); | ||
260 | 261 | ||
261 | clk_prepare_enable(clk[spba_gate]); | 262 | clk_prepare_enable(clk[spba_gate]); |
262 | clk_prepare_enable(clk[gpio1_gate]); | 263 | clk_prepare_enable(clk[gpio1_gate]); |
@@ -265,6 +266,7 @@ int __init mx35_clocks_init(void) | |||
265 | clk_prepare_enable(clk[iim_gate]); | 266 | clk_prepare_enable(clk[iim_gate]); |
266 | clk_prepare_enable(clk[emi_gate]); | 267 | clk_prepare_enable(clk[emi_gate]); |
267 | clk_prepare_enable(clk[max_gate]); | 268 | clk_prepare_enable(clk[max_gate]); |
269 | clk_prepare_enable(clk[iomuxc_gate]); | ||
268 | 270 | ||
269 | /* | 271 | /* |
270 | * SCC is needed to boot via mmc after a watchdog reset. The clock code | 272 | * SCC is needed to boot via mmc after a watchdog reset. The clock code |
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 2f9ff93a4e61..d38e54f5b6d7 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c | |||
@@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m" | |||
115 | static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; | 115 | static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; |
116 | static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; | 116 | static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; |
117 | static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; | 117 | static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; |
118 | static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; | 118 | static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; |
119 | static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; | 119 | static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; |
120 | static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; | 120 | static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; |
121 | static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; | 121 | static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; |
@@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void) | |||
443 | 443 | ||
444 | clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); | 444 | clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); |
445 | clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); | 445 | clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); |
446 | clk_register_clkdev(clk[twd], NULL, "smp_twd"); | ||
447 | clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); | 446 | clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); |
448 | clk_register_clkdev(clk[ahb], "ahb", NULL); | 447 | clk_register_clkdev(clk[ahb], "ahb", NULL); |
449 | clk_register_clkdev(clk[cko1], "cko1", NULL); | 448 | clk_register_clkdev(clk[cko1], "cko1", NULL); |
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 5a800bfcec5b..5bf4a97ab241 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h | |||
@@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *); | |||
110 | 110 | ||
111 | extern void imx_enable_cpu(int cpu, bool enable); | 111 | extern void imx_enable_cpu(int cpu, bool enable); |
112 | extern void imx_set_cpu_jump(int cpu, void *jump_addr); | 112 | extern void imx_set_cpu_jump(int cpu, void *jump_addr); |
113 | extern u32 imx_get_cpu_arg(int cpu); | ||
114 | extern void imx_set_cpu_arg(int cpu, u32 arg); | ||
113 | extern void v7_cpu_resume(void); | 115 | extern void v7_cpu_resume(void); |
114 | extern u32 *pl310_get_save_ptr(void); | 116 | extern u32 *pl310_get_save_ptr(void); |
115 | #ifdef CONFIG_SMP | 117 | #ifdef CONFIG_SMP |
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 7bc5fe15dda2..361a253e2b63 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c | |||
@@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void) | |||
46 | void imx_cpu_die(unsigned int cpu) | 46 | void imx_cpu_die(unsigned int cpu) |
47 | { | 47 | { |
48 | cpu_enter_lowpower(); | 48 | cpu_enter_lowpower(); |
49 | /* | ||
50 | * We use the cpu jumping argument register to sync with | ||
51 | * imx_cpu_kill() which is running on cpu0 and waiting for | ||
52 | * the register being cleared to kill the cpu. | ||
53 | */ | ||
54 | imx_set_cpu_arg(cpu, ~0); | ||
49 | cpu_do_idle(); | 55 | cpu_do_idle(); |
50 | } | 56 | } |
51 | 57 | ||
52 | int imx_cpu_kill(unsigned int cpu) | 58 | int imx_cpu_kill(unsigned int cpu) |
53 | { | 59 | { |
60 | unsigned long timeout = jiffies + msecs_to_jiffies(50); | ||
61 | |||
62 | while (imx_get_cpu_arg(cpu) == 0) | ||
63 | if (time_after(jiffies, timeout)) | ||
64 | return 0; | ||
54 | imx_enable_cpu(cpu, false); | 65 | imx_enable_cpu(cpu, false); |
66 | imx_set_cpu_arg(cpu, 0); | ||
55 | return 1; | 67 | return 1; |
56 | } | 68 | } |
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index e15f1555c59b..09a742f8c7ab 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c | |||
@@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr) | |||
43 | src_base + SRC_GPR1 + cpu * 8); | 43 | src_base + SRC_GPR1 + cpu * 8); |
44 | } | 44 | } |
45 | 45 | ||
46 | u32 imx_get_cpu_arg(int cpu) | ||
47 | { | ||
48 | cpu = cpu_logical_map(cpu); | ||
49 | return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); | ||
50 | } | ||
51 | |||
52 | void imx_set_cpu_arg(int cpu, u32 arg) | ||
53 | { | ||
54 | cpu = cpu_logical_map(cpu); | ||
55 | writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); | ||
56 | } | ||
57 | |||
46 | void imx_src_prepare_restart(void) | 58 | void imx_src_prepare_restart(void) |
47 | { | 59 | { |
48 | u32 val; | 60 | u32 val; |
diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c index f655b2637b0e..e5f70415905a 100644 --- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c +++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c | |||
@@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = { | |||
20 | .duplex = DUPLEX_FULL, | 20 | .duplex = DUPLEX_FULL, |
21 | }; | 21 | }; |
22 | 22 | ||
23 | static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = { | ||
24 | .phy_addr = MV643XX_ETH_PHY_ADDR(11), | ||
25 | }; | ||
26 | |||
23 | void __init iomega_ix2_200_init(void) | 27 | void __init iomega_ix2_200_init(void) |
24 | { | 28 | { |
25 | /* | 29 | /* |
26 | * Basic setup. Needs to be called early. | 30 | * Basic setup. Needs to be called early. |
27 | */ | 31 | */ |
28 | kirkwood_ge01_init(&iomega_ix2_200_ge00_data); | 32 | kirkwood_ge00_init(&iomega_ix2_200_ge00_data); |
33 | kirkwood_ge01_init(&iomega_ix2_200_ge01_data); | ||
29 | } | 34 | } |
diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c index 1c6e736cbbf8..08dd739aa709 100644 --- a/arch/arm/mach-kirkwood/guruplug-setup.c +++ b/arch/arm/mach-kirkwood/guruplug-setup.c | |||
@@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = { | |||
53 | 53 | ||
54 | static struct mvsdio_platform_data guruplug_mvsdio_data = { | 54 | static struct mvsdio_platform_data guruplug_mvsdio_data = { |
55 | /* unfortunately the CD signal has not been connected */ | 55 | /* unfortunately the CD signal has not been connected */ |
56 | .gpio_card_detect = -1, | ||
57 | .gpio_write_protect = -1, | ||
56 | }; | 58 | }; |
57 | 59 | ||
58 | static struct gpio_led guruplug_led_pins[] = { | 60 | static struct gpio_led guruplug_led_pins[] = { |
diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c index 8ddd69fdc937..6a6eb548307d 100644 --- a/arch/arm/mach-kirkwood/openrd-setup.c +++ b/arch/arm/mach-kirkwood/openrd-setup.c | |||
@@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = { | |||
55 | 55 | ||
56 | static struct mvsdio_platform_data openrd_mvsdio_data = { | 56 | static struct mvsdio_platform_data openrd_mvsdio_data = { |
57 | .gpio_card_detect = 29, /* MPP29 used as SD card detect */ | 57 | .gpio_card_detect = 29, /* MPP29 used as SD card detect */ |
58 | .gpio_write_protect = -1, | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | static unsigned int openrd_mpp_config[] __initdata = { | 61 | static unsigned int openrd_mpp_config[] __initdata = { |
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c index c7d93b48926b..d24223166e06 100644 --- a/arch/arm/mach-kirkwood/rd88f6281-setup.c +++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c | |||
@@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = { | |||
69 | 69 | ||
70 | static struct mvsdio_platform_data rd88f6281_mvsdio_data = { | 70 | static struct mvsdio_platform_data rd88f6281_mvsdio_data = { |
71 | .gpio_card_detect = 28, | 71 | .gpio_card_detect = 28, |
72 | .gpio_write_protect = -1, | ||
72 | }; | 73 | }; |
73 | 74 | ||
74 | static unsigned int rd88f6281_mpp_config[] __initdata = { | 75 | static unsigned int rd88f6281_mpp_config[] __initdata = { |
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 2969027f02fa..f9fd77e8f1f5 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c | |||
@@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles, | |||
62 | { | 62 | { |
63 | u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); | 63 | u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); |
64 | 64 | ||
65 | writel_relaxed(0, event_base + TIMER_CLEAR); | 65 | ctrl &= ~TIMER_ENABLE_EN; |
66 | writel_relaxed(ctrl, event_base + TIMER_ENABLE); | ||
67 | |||
68 | writel_relaxed(ctrl, event_base + TIMER_CLEAR); | ||
66 | writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); | 69 | writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); |
67 | writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); | 70 | writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); |
68 | return 0; | 71 | return 0; |
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c index 274ff58271de..d5970f5a1e8d 100644 --- a/arch/arm/mach-mvebu/irq-armada-370-xp.c +++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c | |||
@@ -44,6 +44,8 @@ | |||
44 | 44 | ||
45 | #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) | 45 | #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) |
46 | 46 | ||
47 | #define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) | ||
48 | |||
47 | #define ACTIVE_DOORBELLS (8) | 49 | #define ACTIVE_DOORBELLS (8) |
48 | 50 | ||
49 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); | 51 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
@@ -59,36 +61,26 @@ static struct irq_domain *armada_370_xp_mpic_domain; | |||
59 | */ | 61 | */ |
60 | static void armada_370_xp_irq_mask(struct irq_data *d) | 62 | static void armada_370_xp_irq_mask(struct irq_data *d) |
61 | { | 63 | { |
62 | #ifdef CONFIG_SMP | ||
63 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | 64 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
64 | 65 | ||
65 | if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) | 66 | if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) |
66 | writel(hwirq, main_int_base + | 67 | writel(hwirq, main_int_base + |
67 | ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); | 68 | ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); |
68 | else | 69 | else |
69 | writel(hwirq, per_cpu_int_base + | 70 | writel(hwirq, per_cpu_int_base + |
70 | ARMADA_370_XP_INT_SET_MASK_OFFS); | 71 | ARMADA_370_XP_INT_SET_MASK_OFFS); |
71 | #else | ||
72 | writel(irqd_to_hwirq(d), | ||
73 | per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); | ||
74 | #endif | ||
75 | } | 72 | } |
76 | 73 | ||
77 | static void armada_370_xp_irq_unmask(struct irq_data *d) | 74 | static void armada_370_xp_irq_unmask(struct irq_data *d) |
78 | { | 75 | { |
79 | #ifdef CONFIG_SMP | ||
80 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | 76 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
81 | 77 | ||
82 | if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) | 78 | if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) |
83 | writel(hwirq, main_int_base + | 79 | writel(hwirq, main_int_base + |
84 | ARMADA_370_XP_INT_SET_ENABLE_OFFS); | 80 | ARMADA_370_XP_INT_SET_ENABLE_OFFS); |
85 | else | 81 | else |
86 | writel(hwirq, per_cpu_int_base + | 82 | writel(hwirq, per_cpu_int_base + |
87 | ARMADA_370_XP_INT_CLEAR_MASK_OFFS); | 83 | ARMADA_370_XP_INT_CLEAR_MASK_OFFS); |
88 | #else | ||
89 | writel(irqd_to_hwirq(d), | ||
90 | per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); | ||
91 | #endif | ||
92 | } | 84 | } |
93 | 85 | ||
94 | #ifdef CONFIG_SMP | 86 | #ifdef CONFIG_SMP |
@@ -144,10 +136,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, | |||
144 | unsigned int virq, irq_hw_number_t hw) | 136 | unsigned int virq, irq_hw_number_t hw) |
145 | { | 137 | { |
146 | armada_370_xp_irq_mask(irq_get_irq_data(virq)); | 138 | armada_370_xp_irq_mask(irq_get_irq_data(virq)); |
147 | writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); | 139 | if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) |
140 | writel(hw, per_cpu_int_base + | ||
141 | ARMADA_370_XP_INT_CLEAR_MASK_OFFS); | ||
142 | else | ||
143 | writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); | ||
148 | irq_set_status_flags(virq, IRQ_LEVEL); | 144 | irq_set_status_flags(virq, IRQ_LEVEL); |
149 | 145 | ||
150 | if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) { | 146 | if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { |
151 | irq_set_percpu_devid(virq); | 147 | irq_set_percpu_devid(virq); |
152 | irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, | 148 | irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, |
153 | handle_percpu_devid_irq); | 149 | handle_percpu_devid_irq); |
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c index cb7c6ae2e3fc..6c4f766365a2 100644 --- a/arch/arm/mach-omap1/clock_data.c +++ b/arch/arm/mach-omap1/clock_data.c | |||
@@ -543,15 +543,6 @@ static struct clk usb_dc_ck = { | |||
543 | /* Direct from ULPD, no parent */ | 543 | /* Direct from ULPD, no parent */ |
544 | .rate = 48000000, | 544 | .rate = 48000000, |
545 | .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), | 545 | .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), |
546 | .enable_bit = USB_REQ_EN_SHIFT, | ||
547 | }; | ||
548 | |||
549 | static struct clk usb_dc_ck7xx = { | ||
550 | .name = "usb_dc_ck", | ||
551 | .ops = &clkops_generic, | ||
552 | /* Direct from ULPD, no parent */ | ||
553 | .rate = 48000000, | ||
554 | .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), | ||
555 | .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT, | 546 | .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT, |
556 | }; | 547 | }; |
557 | 548 | ||
@@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = { | |||
727 | CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), | 718 | CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), |
728 | CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), | 719 | CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), |
729 | CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), | 720 | CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), |
730 | CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX), | 721 | CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX), |
731 | CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX), | ||
732 | CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), | 722 | CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), |
733 | CLK(NULL, "mclk", &mclk_16xx, CK_16XX), | 723 | CLK(NULL, "mclk", &mclk_16xx, CK_16XX), |
734 | CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310), | 724 | CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310), |
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index 3d58f335f173..0c6834ae1fc4 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c | |||
@@ -52,6 +52,13 @@ | |||
52 | */ | 52 | */ |
53 | #define OMAP4_DPLL_ABE_DEFFREQ 98304000 | 53 | #define OMAP4_DPLL_ABE_DEFFREQ 98304000 |
54 | 54 | ||
55 | /* | ||
56 | * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section | ||
57 | * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred | ||
58 | * locked frequency for the USB DPLL is 960MHz. | ||
59 | */ | ||
60 | #define OMAP4_DPLL_USB_DEFFREQ 960000000 | ||
61 | |||
55 | /* Root clocks */ | 62 | /* Root clocks */ |
56 | 63 | ||
57 | DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); | 64 | DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); |
@@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel, | |||
1011 | OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, | 1018 | OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, |
1012 | hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); | 1019 | hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); |
1013 | 1020 | ||
1021 | DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0, | ||
1022 | OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, | ||
1023 | OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL); | ||
1024 | |||
1014 | DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, | 1025 | DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, |
1015 | OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, | 1026 | OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, |
1016 | OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); | 1027 | OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); |
@@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = { | |||
1538 | CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X), | 1549 | CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X), |
1539 | CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X), | 1550 | CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X), |
1540 | CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X), | 1551 | CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X), |
1552 | CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X), | ||
1541 | CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X), | 1553 | CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X), |
1542 | CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X), | 1554 | CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X), |
1543 | CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X), | 1555 | CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X), |
@@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void) | |||
1705 | if (rc) | 1717 | if (rc) |
1706 | pr_err("%s: failed to configure ABE DPLL!\n", __func__); | 1718 | pr_err("%s: failed to configure ABE DPLL!\n", __func__); |
1707 | 1719 | ||
1720 | /* | ||
1721 | * Lock USB DPLL on OMAP4 devices so that the L3INIT power | ||
1722 | * domain can transition to retention state when not in use. | ||
1723 | */ | ||
1724 | rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ); | ||
1725 | if (rc) | ||
1726 | pr_err("%s: failed to configure USB DPLL!\n", __func__); | ||
1727 | |||
1708 | return 0; | 1728 | return 0; |
1709 | } | 1729 | } |
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 40f4a03d728f..d6ba13e1c540 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h | |||
@@ -293,5 +293,8 @@ extern void omap_reserve(void); | |||
293 | struct omap_hwmod; | 293 | struct omap_hwmod; |
294 | extern int omap_dss_reset(struct omap_hwmod *); | 294 | extern int omap_dss_reset(struct omap_hwmod *); |
295 | 295 | ||
296 | /* SoC specific clock initializer */ | ||
297 | extern int (*omap_clk_init)(void); | ||
298 | |||
296 | #endif /* __ASSEMBLER__ */ | 299 | #endif /* __ASSEMBLER__ */ |
297 | #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ | 300 | #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ |
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 2c3fdd65387b..5c445ca1e271 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c | |||
@@ -55,6 +55,12 @@ | |||
55 | #include "prm44xx.h" | 55 | #include "prm44xx.h" |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * omap_clk_init: points to a function that does the SoC-specific | ||
59 | * clock initializations | ||
60 | */ | ||
61 | int (*omap_clk_init)(void); | ||
62 | |||
63 | /* | ||
58 | * The machine specific code may provide the extra mapping besides the | 64 | * The machine specific code may provide the extra mapping besides the |
59 | * default mapping provided here. | 65 | * default mapping provided here. |
60 | */ | 66 | */ |
@@ -397,7 +403,7 @@ void __init omap2420_init_early(void) | |||
397 | omap242x_clockdomains_init(); | 403 | omap242x_clockdomains_init(); |
398 | omap2420_hwmod_init(); | 404 | omap2420_hwmod_init(); |
399 | omap_hwmod_init_postsetup(); | 405 | omap_hwmod_init_postsetup(); |
400 | omap2420_clk_init(); | 406 | omap_clk_init = omap2420_clk_init; |
401 | } | 407 | } |
402 | 408 | ||
403 | void __init omap2420_init_late(void) | 409 | void __init omap2420_init_late(void) |
@@ -427,7 +433,7 @@ void __init omap2430_init_early(void) | |||
427 | omap243x_clockdomains_init(); | 433 | omap243x_clockdomains_init(); |
428 | omap2430_hwmod_init(); | 434 | omap2430_hwmod_init(); |
429 | omap_hwmod_init_postsetup(); | 435 | omap_hwmod_init_postsetup(); |
430 | omap2430_clk_init(); | 436 | omap_clk_init = omap2430_clk_init; |
431 | } | 437 | } |
432 | 438 | ||
433 | void __init omap2430_init_late(void) | 439 | void __init omap2430_init_late(void) |
@@ -462,7 +468,7 @@ void __init omap3_init_early(void) | |||
462 | omap3xxx_clockdomains_init(); | 468 | omap3xxx_clockdomains_init(); |
463 | omap3xxx_hwmod_init(); | 469 | omap3xxx_hwmod_init(); |
464 | omap_hwmod_init_postsetup(); | 470 | omap_hwmod_init_postsetup(); |
465 | omap3xxx_clk_init(); | 471 | omap_clk_init = omap3xxx_clk_init; |
466 | } | 472 | } |
467 | 473 | ||
468 | void __init omap3430_init_early(void) | 474 | void __init omap3430_init_early(void) |
@@ -500,7 +506,7 @@ void __init ti81xx_init_early(void) | |||
500 | omap3xxx_clockdomains_init(); | 506 | omap3xxx_clockdomains_init(); |
501 | omap3xxx_hwmod_init(); | 507 | omap3xxx_hwmod_init(); |
502 | omap_hwmod_init_postsetup(); | 508 | omap_hwmod_init_postsetup(); |
503 | omap3xxx_clk_init(); | 509 | omap_clk_init = omap3xxx_clk_init; |
504 | } | 510 | } |
505 | 511 | ||
506 | void __init omap3_init_late(void) | 512 | void __init omap3_init_late(void) |
@@ -568,7 +574,7 @@ void __init am33xx_init_early(void) | |||
568 | am33xx_clockdomains_init(); | 574 | am33xx_clockdomains_init(); |
569 | am33xx_hwmod_init(); | 575 | am33xx_hwmod_init(); |
570 | omap_hwmod_init_postsetup(); | 576 | omap_hwmod_init_postsetup(); |
571 | am33xx_clk_init(); | 577 | omap_clk_init = am33xx_clk_init; |
572 | } | 578 | } |
573 | #endif | 579 | #endif |
574 | 580 | ||
@@ -593,7 +599,7 @@ void __init omap4430_init_early(void) | |||
593 | omap44xx_clockdomains_init(); | 599 | omap44xx_clockdomains_init(); |
594 | omap44xx_hwmod_init(); | 600 | omap44xx_hwmod_init(); |
595 | omap_hwmod_init_postsetup(); | 601 | omap_hwmod_init_postsetup(); |
596 | omap4xxx_clk_init(); | 602 | omap_clk_init = omap4xxx_clk_init; |
597 | } | 603 | } |
598 | 604 | ||
599 | void __init omap4430_init_late(void) | 605 | void __init omap4430_init_late(void) |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index c2c798c08c2b..a202a4785104 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh) | |||
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | if (sf & SYSC_HAS_MIDLEMODE) { | 1370 | if (sf & SYSC_HAS_MIDLEMODE) { |
1371 | if (oh->flags & HWMOD_SWSUP_MSTANDBY) { | 1371 | if (oh->flags & HWMOD_FORCE_MSTANDBY) { |
1372 | idlemode = HWMOD_IDLEMODE_FORCE; | ||
1373 | } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) { | ||
1372 | idlemode = HWMOD_IDLEMODE_NO; | 1374 | idlemode = HWMOD_IDLEMODE_NO; |
1373 | } else { | 1375 | } else { |
1374 | if (sf & SYSC_HAS_ENAWAKEUP) | 1376 | if (sf & SYSC_HAS_ENAWAKEUP) |
@@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh) | |||
1440 | } | 1442 | } |
1441 | 1443 | ||
1442 | if (sf & SYSC_HAS_MIDLEMODE) { | 1444 | if (sf & SYSC_HAS_MIDLEMODE) { |
1443 | if (oh->flags & HWMOD_SWSUP_MSTANDBY) { | 1445 | if ((oh->flags & HWMOD_SWSUP_MSTANDBY) || |
1446 | (oh->flags & HWMOD_FORCE_MSTANDBY)) { | ||
1444 | idlemode = HWMOD_IDLEMODE_FORCE; | 1447 | idlemode = HWMOD_IDLEMODE_FORCE; |
1445 | } else { | 1448 | } else { |
1446 | if (sf & SYSC_HAS_ENAWAKEUP) | 1449 | if (sf & SYSC_HAS_ENAWAKEUP) |
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index d43d9b608eda..d5dc935f6060 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h | |||
@@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm { | |||
427 | * | 427 | * |
428 | * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out | 428 | * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out |
429 | * of idle, rather than relying on module smart-idle | 429 | * of idle, rather than relying on module smart-idle |
430 | * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out | 430 | * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and |
431 | * of standby, rather than relying on module smart-standby | 431 | * out of standby, rather than relying on module smart-standby |
432 | * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for | 432 | * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for |
433 | * SDRAM controller, etc. XXX probably belongs outside the main hwmod file | 433 | * SDRAM controller, etc. XXX probably belongs outside the main hwmod file |
434 | * XXX Should be HWMOD_SETUP_NO_RESET | 434 | * XXX Should be HWMOD_SETUP_NO_RESET |
@@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm { | |||
459 | * correctly, or this is being abused to deal with some PM latency | 459 | * correctly, or this is being abused to deal with some PM latency |
460 | * issues -- but we're currently suffering from a shortage of | 460 | * issues -- but we're currently suffering from a shortage of |
461 | * folks who are able to track these issues down properly. | 461 | * folks who are able to track these issues down properly. |
462 | * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device | ||
463 | * is kept in force-standby mode. Failing to do so causes PM problems | ||
464 | * with musb on OMAP3630 at least. Note that musb has a dedicated register | ||
465 | * to control MSTANDBY signal when MIDLEMODE is set to force-standby. | ||
462 | */ | 466 | */ |
463 | #define HWMOD_SWSUP_SIDLE (1 << 0) | 467 | #define HWMOD_SWSUP_SIDLE (1 << 0) |
464 | #define HWMOD_SWSUP_MSTANDBY (1 << 1) | 468 | #define HWMOD_SWSUP_MSTANDBY (1 << 1) |
@@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm { | |||
471 | #define HWMOD_16BIT_REG (1 << 8) | 475 | #define HWMOD_16BIT_REG (1 << 8) |
472 | #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) | 476 | #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) |
473 | #define HWMOD_BLOCK_WFI (1 << 10) | 477 | #define HWMOD_BLOCK_WFI (1 << 10) |
478 | #define HWMOD_FORCE_MSTANDBY (1 << 11) | ||
474 | 479 | ||
475 | /* | 480 | /* |
476 | * omap_hwmod._int_flags definitions | 481 | * omap_hwmod._int_flags definitions |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index ac7e03ec952f..5112d04e7b79 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = { | |||
1707 | * Erratum ID: i479 idle_req / idle_ack mechanism potentially | 1707 | * Erratum ID: i479 idle_req / idle_ack mechanism potentially |
1708 | * broken when autoidle is enabled | 1708 | * broken when autoidle is enabled |
1709 | * workaround is to disable the autoidle bit at module level. | 1709 | * workaround is to disable the autoidle bit at module level. |
1710 | * | ||
1711 | * Enabling the device in any other MIDLEMODE setting but force-idle | ||
1712 | * causes core_pwrdm not enter idle states at least on OMAP3630. | ||
1713 | * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY | ||
1714 | * signal when MIDLEMODE is set to force-idle. | ||
1710 | */ | 1715 | */ |
1711 | .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE | 1716 | .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE |
1712 | | HWMOD_SWSUP_MSTANDBY, | 1717 | | HWMOD_FORCE_MSTANDBY, |
1713 | }; | 1718 | }; |
1714 | 1719 | ||
1715 | /* usb_otg_hs */ | 1720 | /* usb_otg_hs */ |
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 0e47d2e1687c..eaba9dc91a0d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
@@ -2719,7 +2719,17 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { | |||
2719 | .name = "ocp2scp_usb_phy", | 2719 | .name = "ocp2scp_usb_phy", |
2720 | .class = &omap44xx_ocp2scp_hwmod_class, | 2720 | .class = &omap44xx_ocp2scp_hwmod_class, |
2721 | .clkdm_name = "l3_init_clkdm", | 2721 | .clkdm_name = "l3_init_clkdm", |
2722 | .main_clk = "func_48m_fclk", | 2722 | /* |
2723 | * ocp2scp_usb_phy_phy_48m is provided by the OMAP4 PRCM IP | ||
2724 | * block as an "optional clock," and normally should never be | ||
2725 | * specified as the main_clk for an OMAP IP block. However it | ||
2726 | * turns out that this clock is actually the main clock for | ||
2727 | * the ocp2scp_usb_phy IP block: | ||
2728 | * http://lists.infradead.org/pipermail/linux-arm-kernel/2012-September/119943.html | ||
2729 | * So listing ocp2scp_usb_phy_phy_48m as a main_clk here seems | ||
2730 | * to be the best workaround. | ||
2731 | */ | ||
2732 | .main_clk = "ocp2scp_usb_phy_phy_48m", | ||
2723 | .prcm = { | 2733 | .prcm = { |
2724 | .omap4 = { | 2734 | .omap4 = { |
2725 | .clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET, | 2735 | .clkctrl_offs = OMAP4_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL_OFFSET, |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 2bdd4cf17a8f..f62b509ed08d 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void) | |||
547 | clksrc_nr, clksrc_src) \ | 547 | clksrc_nr, clksrc_src) \ |
548 | void __init omap##name##_gptimer_timer_init(void) \ | 548 | void __init omap##name##_gptimer_timer_init(void) \ |
549 | { \ | 549 | { \ |
550 | if (omap_clk_init) \ | ||
551 | omap_clk_init(); \ | ||
550 | omap_dmtimer_init(); \ | 552 | omap_dmtimer_init(); \ |
551 | omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ | 553 | omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ |
552 | omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ | 554 | omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ |
@@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \ | |||
556 | clksrc_nr, clksrc_src) \ | 558 | clksrc_nr, clksrc_src) \ |
557 | void __init omap##name##_sync32k_timer_init(void) \ | 559 | void __init omap##name##_sync32k_timer_init(void) \ |
558 | { \ | 560 | { \ |
561 | if (omap_clk_init) \ | ||
562 | omap_clk_init(); \ | ||
559 | omap_dmtimer_init(); \ | 563 | omap_dmtimer_init(); \ |
560 | omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ | 564 | omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ |
561 | /* Enable the use of clocksource="gp_timer" kernel parameter */ \ | 565 | /* Enable the use of clocksource="gp_timer" kernel parameter */ \ |
diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h index b7a9f4d469e8..1e73f5fa8659 100644 --- a/arch/arm/mach-s3c24xx/include/mach/irqs.h +++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h | |||
@@ -188,10 +188,8 @@ | |||
188 | 188 | ||
189 | #if defined(CONFIG_CPU_S3C2416) | 189 | #if defined(CONFIG_CPU_S3C2416) |
190 | #define NR_IRQS (IRQ_S3C2416_I2S1 + 1) | 190 | #define NR_IRQS (IRQ_S3C2416_I2S1 + 1) |
191 | #elif defined(CONFIG_CPU_S3C2443) | ||
192 | #define NR_IRQS (IRQ_S3C2443_AC97+1) | ||
193 | #else | 191 | #else |
194 | #define NR_IRQS (IRQ_S3C2440_AC97+1) | 192 | #define NR_IRQS (IRQ_S3C2443_AC97 + 1) |
195 | #endif | 193 | #endif |
196 | 194 | ||
197 | /* compatibility define. */ | 195 | /* compatibility define. */ |
diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c index cb9f5e011e73..d8ba9bee4c7e 100644 --- a/arch/arm/mach-s3c24xx/irq.c +++ b/arch/arm/mach-s3c24xx/irq.c | |||
@@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np, | |||
500 | base = (void *)0xfd000000; | 500 | base = (void *)0xfd000000; |
501 | 501 | ||
502 | intc->reg_mask = base + 0xa4; | 502 | intc->reg_mask = base + 0xa4; |
503 | intc->reg_pending = base + 0x08; | 503 | intc->reg_pending = base + 0xa8; |
504 | irq_num = 20; | 504 | irq_num = 20; |
505 | irq_start = S3C2410_IRQ(32); | 505 | irq_start = S3C2410_IRQ(32); |
506 | irq_offset = 4; | 506 | irq_offset = 4; |
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index 051b62c27102..7f2cb6c5e2c1 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c | |||
@@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = { | |||
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | struct mmci_platform_data mop500_sdi0_data = { | 83 | struct mmci_platform_data mop500_sdi0_data = { |
84 | .ios_handler = mop500_sdi0_ios_handler, | ||
85 | .ocr_mask = MMC_VDD_29_30, | 84 | .ocr_mask = MMC_VDD_29_30, |
86 | .f_max = 50000000, | 85 | .f_max = 50000000, |
87 | .capabilities = MMC_CAP_4_BIT_DATA | | 86 | .capabilities = MMC_CAP_4_BIT_DATA | |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index b03457881c4b..87d2d7b38ce9 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | ||
15 | #include <linux/io.h> | 16 | #include <linux/io.h> |
16 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
17 | #include <linux/platform_data/i2c-nomadik.h> | 18 | #include <linux/platform_data/i2c-nomadik.h> |
@@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev) | |||
439 | regulator_put(prox_regulator); | 440 | regulator_put(prox_regulator); |
440 | } | 441 | } |
441 | 442 | ||
443 | void mop500_snowball_ethernet_clock_enable(void) | ||
444 | { | ||
445 | struct clk *clk; | ||
446 | |||
447 | clk = clk_get_sys("fsmc", NULL); | ||
448 | if (!IS_ERR(clk)) | ||
449 | clk_prepare_enable(clk); | ||
450 | } | ||
451 | |||
442 | static struct cryp_platform_data u8500_cryp1_platform_data = { | 452 | static struct cryp_platform_data u8500_cryp1_platform_data = { |
443 | .mem_to_engine = { | 453 | .mem_to_engine = { |
444 | .dir = STEDMA40_MEM_TO_PERIPH, | 454 | .dir = STEDMA40_MEM_TO_PERIPH, |
@@ -683,6 +693,8 @@ static void __init snowball_init_machine(void) | |||
683 | mop500_audio_init(parent); | 693 | mop500_audio_init(parent); |
684 | mop500_uart_init(parent); | 694 | mop500_uart_init(parent); |
685 | 695 | ||
696 | mop500_snowball_ethernet_clock_enable(); | ||
697 | |||
686 | /* This board has full regulator constraints */ | 698 | /* This board has full regulator constraints */ |
687 | regulator_has_full_constraints(); | 699 | regulator_has_full_constraints(); |
688 | } | 700 | } |
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index eaa605f5d90d..d38951be70df 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h | |||
@@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void); | |||
104 | void __init snowball_pinmaps_init(void); | 104 | void __init snowball_pinmaps_init(void); |
105 | void __init hrefv60_pinmaps_init(void); | 105 | void __init hrefv60_pinmaps_init(void); |
106 | void mop500_audio_init(struct device *parent); | 106 | void mop500_audio_init(struct device *parent); |
107 | void mop500_snowball_ethernet_clock_enable(void); | ||
107 | 108 | ||
108 | int __init mop500_uib_init(void); | 109 | int __init mop500_uib_init(void); |
109 | void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, | 110 | void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, |
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 19235cf7bbe3..f1a581844372 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c | |||
@@ -312,9 +312,10 @@ static void __init u8500_init_machine(void) | |||
312 | /* Pinmaps must be in place before devices register */ | 312 | /* Pinmaps must be in place before devices register */ |
313 | if (of_machine_is_compatible("st-ericsson,mop500")) | 313 | if (of_machine_is_compatible("st-ericsson,mop500")) |
314 | mop500_pinmaps_init(); | 314 | mop500_pinmaps_init(); |
315 | else if (of_machine_is_compatible("calaosystems,snowball-a9500")) | 315 | else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { |
316 | snowball_pinmaps_init(); | 316 | snowball_pinmaps_init(); |
317 | else if (of_machine_is_compatible("st-ericsson,hrefv60+")) | 317 | mop500_snowball_ethernet_clock_enable(); |
318 | } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) | ||
318 | hrefv60_pinmaps_init(); | 319 | hrefv60_pinmaps_init(); |
319 | else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} | 320 | else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} |
320 | /* TODO: Add pinmaps for ccu9540 board. */ | 321 | /* TODO: Add pinmaps for ccu9540 board. */ |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 025d17328730..4045c4931a30 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -43,7 +43,7 @@ config CPU_ARM740T | |||
43 | depends on !MMU | 43 | depends on !MMU |
44 | select CPU_32v4T | 44 | select CPU_32v4T |
45 | select CPU_ABRT_LV4T | 45 | select CPU_ABRT_LV4T |
46 | select CPU_CACHE_V3 # although the core is v4t | 46 | select CPU_CACHE_V4 |
47 | select CPU_CP15_MPU | 47 | select CPU_CP15_MPU |
48 | select CPU_PABRT_LEGACY | 48 | select CPU_PABRT_LEGACY |
49 | help | 49 | help |
@@ -469,9 +469,6 @@ config CPU_PABRT_V7 | |||
469 | bool | 469 | bool |
470 | 470 | ||
471 | # The cache model | 471 | # The cache model |
472 | config CPU_CACHE_V3 | ||
473 | bool | ||
474 | |||
475 | config CPU_CACHE_V4 | 472 | config CPU_CACHE_V4 |
476 | bool | 473 | bool |
477 | 474 | ||
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 4e333fa2756f..9e51be96f635 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o | |||
33 | obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o | 33 | obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o |
34 | obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o | 34 | obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o |
35 | 35 | ||
36 | obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o | ||
37 | obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o | 36 | obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o |
38 | obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o | 37 | obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o |
39 | obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o | 38 | obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o |
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index dd3d59122cc3..48bc3c0a87ce 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override) | |||
343 | outer_cache.inv_range = feroceon_l2_inv_range; | 343 | outer_cache.inv_range = feroceon_l2_inv_range; |
344 | outer_cache.clean_range = feroceon_l2_clean_range; | 344 | outer_cache.clean_range = feroceon_l2_clean_range; |
345 | outer_cache.flush_range = feroceon_l2_flush_range; | 345 | outer_cache.flush_range = feroceon_l2_flush_range; |
346 | outer_cache.inv_all = l2_inv_all; | ||
346 | 347 | ||
347 | enable_l2(); | 348 | enable_l2(); |
348 | 349 | ||
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f37390308a..c465faca51b0 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id) | |||
299 | int lockregs; | 299 | int lockregs; |
300 | int i; | 300 | int i; |
301 | 301 | ||
302 | switch (cache_id) { | 302 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { |
303 | case L2X0_CACHE_ID_PART_L310: | 303 | case L2X0_CACHE_ID_PART_L310: |
304 | lockregs = 8; | 304 | lockregs = 8; |
305 | break; | 305 | break; |
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
333 | if (cache_id_part_number_from_dt) | 333 | if (cache_id_part_number_from_dt) |
334 | cache_id = cache_id_part_number_from_dt; | 334 | cache_id = cache_id_part_number_from_dt; |
335 | else | 335 | else |
336 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) | 336 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); |
337 | & L2X0_CACHE_ID_PART_MASK; | ||
338 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | 337 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); |
339 | 338 | ||
340 | aux &= aux_mask; | 339 | aux &= aux_mask; |
341 | aux |= aux_val; | 340 | aux |= aux_val; |
342 | 341 | ||
343 | /* Determine the number of ways */ | 342 | /* Determine the number of ways */ |
344 | switch (cache_id) { | 343 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { |
345 | case L2X0_CACHE_ID_PART_L310: | 344 | case L2X0_CACHE_ID_PART_L310: |
346 | if (aux & (1 << 16)) | 345 | if (aux & (1 << 16)) |
347 | ways = 16; | 346 | ways = 16; |
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = { | |||
725 | .flush_all = l2x0_flush_all, | 724 | .flush_all = l2x0_flush_all, |
726 | .inv_all = l2x0_inv_all, | 725 | .inv_all = l2x0_inv_all, |
727 | .disable = l2x0_disable, | 726 | .disable = l2x0_disable, |
728 | .set_debug = pl310_set_debug, | ||
729 | }, | 727 | }, |
730 | }; | 728 | }; |
731 | 729 | ||
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) | |||
814 | data->save(); | 812 | data->save(); |
815 | 813 | ||
816 | of_init = true; | 814 | of_init = true; |
817 | l2x0_init(l2x0_base, aux_val, aux_mask); | ||
818 | |||
819 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); | 815 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); |
816 | l2x0_init(l2x0_base, aux_val, aux_mask); | ||
820 | 817 | ||
821 | return 0; | 818 | return 0; |
822 | } | 819 | } |
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S deleted file mode 100644 index 8a3fadece8d3..000000000000 --- a/arch/arm/mm/cache-v3.S +++ /dev/null | |||
@@ -1,137 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/cache-v3.S | ||
3 | * | ||
4 | * Copyright (C) 1997-2002 Russell king | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/linkage.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <asm/page.h> | ||
13 | #include "proc-macros.S" | ||
14 | |||
15 | /* | ||
16 | * flush_icache_all() | ||
17 | * | ||
18 | * Unconditionally clean and invalidate the entire icache. | ||
19 | */ | ||
20 | ENTRY(v3_flush_icache_all) | ||
21 | mov pc, lr | ||
22 | ENDPROC(v3_flush_icache_all) | ||
23 | |||
24 | /* | ||
25 | * flush_user_cache_all() | ||
26 | * | ||
27 | * Invalidate all cache entries in a particular address | ||
28 | * space. | ||
29 | * | ||
30 | * - mm - mm_struct describing address space | ||
31 | */ | ||
32 | ENTRY(v3_flush_user_cache_all) | ||
33 | /* FALLTHROUGH */ | ||
34 | /* | ||
35 | * flush_kern_cache_all() | ||
36 | * | ||
37 | * Clean and invalidate the entire cache. | ||
38 | */ | ||
39 | ENTRY(v3_flush_kern_cache_all) | ||
40 | /* FALLTHROUGH */ | ||
41 | |||
42 | /* | ||
43 | * flush_user_cache_range(start, end, flags) | ||
44 | * | ||
45 | * Invalidate a range of cache entries in the specified | ||
46 | * address space. | ||
47 | * | ||
48 | * - start - start address (may not be aligned) | ||
49 | * - end - end address (exclusive, may not be aligned) | ||
50 | * - flags - vma_area_struct flags describing address space | ||
51 | */ | ||
52 | ENTRY(v3_flush_user_cache_range) | ||
53 | mov ip, #0 | ||
54 | mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache | ||
55 | mov pc, lr | ||
56 | |||
57 | /* | ||
58 | * coherent_kern_range(start, end) | ||
59 | * | ||
60 | * Ensure coherency between the Icache and the Dcache in the | ||
61 | * region described by start. If you have non-snooping | ||
62 | * Harvard caches, you need to implement this function. | ||
63 | * | ||
64 | * - start - virtual start address | ||
65 | * - end - virtual end address | ||
66 | */ | ||
67 | ENTRY(v3_coherent_kern_range) | ||
68 | /* FALLTHROUGH */ | ||
69 | |||
70 | /* | ||
71 | * coherent_user_range(start, end) | ||
72 | * | ||
73 | * Ensure coherency between the Icache and the Dcache in the | ||
74 | * region described by start. If you have non-snooping | ||
75 | * Harvard caches, you need to implement this function. | ||
76 | * | ||
77 | * - start - virtual start address | ||
78 | * - end - virtual end address | ||
79 | */ | ||
80 | ENTRY(v3_coherent_user_range) | ||
81 | mov r0, #0 | ||
82 | mov pc, lr | ||
83 | |||
84 | /* | ||
85 | * flush_kern_dcache_area(void *page, size_t size) | ||
86 | * | ||
87 | * Ensure no D cache aliasing occurs, either with itself or | ||
88 | * the I cache | ||
89 | * | ||
90 | * - addr - kernel address | ||
91 | * - size - region size | ||
92 | */ | ||
93 | ENTRY(v3_flush_kern_dcache_area) | ||
94 | /* FALLTHROUGH */ | ||
95 | |||
96 | /* | ||
97 | * dma_flush_range(start, end) | ||
98 | * | ||
99 | * Clean and invalidate the specified virtual address range. | ||
100 | * | ||
101 | * - start - virtual start address | ||
102 | * - end - virtual end address | ||
103 | */ | ||
104 | ENTRY(v3_dma_flush_range) | ||
105 | mov r0, #0 | ||
106 | mcr p15, 0, r0, c7, c0, 0 @ flush ID cache | ||
107 | mov pc, lr | ||
108 | |||
109 | /* | ||
110 | * dma_unmap_area(start, size, dir) | ||
111 | * - start - kernel virtual start address | ||
112 | * - size - size of region | ||
113 | * - dir - DMA direction | ||
114 | */ | ||
115 | ENTRY(v3_dma_unmap_area) | ||
116 | teq r2, #DMA_TO_DEVICE | ||
117 | bne v3_dma_flush_range | ||
118 | /* FALLTHROUGH */ | ||
119 | |||
120 | /* | ||
121 | * dma_map_area(start, size, dir) | ||
122 | * - start - kernel virtual start address | ||
123 | * - size - size of region | ||
124 | * - dir - DMA direction | ||
125 | */ | ||
126 | ENTRY(v3_dma_map_area) | ||
127 | mov pc, lr | ||
128 | ENDPROC(v3_dma_unmap_area) | ||
129 | ENDPROC(v3_dma_map_area) | ||
130 | |||
131 | .globl v3_flush_kern_cache_louis | ||
132 | .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all | ||
133 | |||
134 | __INITDATA | ||
135 | |||
136 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | ||
137 | define_cache_functions v3 | ||
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 43e5d77be677..a7ba68f59f0c 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S | |||
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all) | |||
58 | ENTRY(v4_flush_user_cache_range) | 58 | ENTRY(v4_flush_user_cache_range) |
59 | #ifdef CONFIG_CPU_CP15 | 59 | #ifdef CONFIG_CPU_CP15 |
60 | mov ip, #0 | 60 | mov ip, #0 |
61 | mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache | 61 | mcr p15, 0, ip, c7, c7, 0 @ flush ID cache |
62 | mov pc, lr | 62 | mov pc, lr |
63 | #else | 63 | #else |
64 | /* FALLTHROUGH */ | 64 | /* FALLTHROUGH */ |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a5a4b2bc42ba..2ac37372ef52 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |||
48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | 49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
50 | 50 | ||
51 | static DEFINE_PER_CPU(atomic64_t, active_asids); | 51 | DEFINE_PER_CPU(atomic64_t, active_asids); |
52 | static DEFINE_PER_CPU(u64, reserved_asids); | 52 | static DEFINE_PER_CPU(u64, reserved_asids); |
53 | static cpumask_t tlb_flush_pending; | 53 | static cpumask_t tlb_flush_pending; |
54 | 54 | ||
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { | 215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
216 | local_flush_bp_all(); | 216 | local_flush_bp_all(); |
217 | local_flush_tlb_all(); | 217 | local_flush_tlb_all(); |
218 | dummy_flush_tlb_a15_erratum(); | ||
218 | } | 219 | } |
219 | 220 | ||
220 | atomic64_set(&per_cpu(active_asids, cpu), asid); | 221 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e95a996ab78f..a84ff763ac39 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/mach/pci.h> | 34 | #include <asm/mach/pci.h> |
35 | 35 | ||
36 | #include "mm.h" | 36 | #include "mm.h" |
37 | #include "tcm.h" | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * empty_zero_page is a special page that is used for | 40 | * empty_zero_page is a special page that is used for |
@@ -598,39 +599,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
598 | } while (pte++, addr += PAGE_SIZE, addr != end); | 599 | } while (pte++, addr += PAGE_SIZE, addr != end); |
599 | } | 600 | } |
600 | 601 | ||
601 | static void __init alloc_init_section(pud_t *pud, unsigned long addr, | 602 | static void __init map_init_section(pmd_t *pmd, unsigned long addr, |
602 | unsigned long end, phys_addr_t phys, | 603 | unsigned long end, phys_addr_t phys, |
603 | const struct mem_type *type) | 604 | const struct mem_type *type) |
604 | { | 605 | { |
605 | pmd_t *pmd = pmd_offset(pud, addr); | 606 | #ifndef CONFIG_ARM_LPAE |
606 | |||
607 | /* | 607 | /* |
608 | * Try a section mapping - end, addr and phys must all be aligned | 608 | * In classic MMU format, puds and pmds are folded in to |
609 | * to a section boundary. Note that PMDs refer to the individual | 609 | * the pgds. pmd_offset gives the PGD entry. PGDs refer to a |
610 | * L1 entries, whereas PGDs refer to a group of L1 entries making | 610 | * group of L1 entries making up one logical pointer to |
611 | * up one logical pointer to an L2 table. | 611 | * an L2 table (2MB), where as PMDs refer to the individual |
612 | * L1 entries (1MB). Hence increment to get the correct | ||
613 | * offset for odd 1MB sections. | ||
614 | * (See arch/arm/include/asm/pgtable-2level.h) | ||
612 | */ | 615 | */ |
613 | if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { | 616 | if (addr & SECTION_SIZE) |
614 | pmd_t *p = pmd; | 617 | pmd++; |
615 | |||
616 | #ifndef CONFIG_ARM_LPAE | ||
617 | if (addr & SECTION_SIZE) | ||
618 | pmd++; | ||
619 | #endif | 618 | #endif |
619 | do { | ||
620 | *pmd = __pmd(phys | type->prot_sect); | ||
621 | phys += SECTION_SIZE; | ||
622 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
620 | 623 | ||
621 | do { | 624 | flush_pmd_entry(pmd); |
622 | *pmd = __pmd(phys | type->prot_sect); | 625 | } |
623 | phys += SECTION_SIZE; | ||
624 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
625 | 626 | ||
626 | flush_pmd_entry(p); | 627 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, |
627 | } else { | 628 | unsigned long end, phys_addr_t phys, |
629 | const struct mem_type *type) | ||
630 | { | ||
631 | pmd_t *pmd = pmd_offset(pud, addr); | ||
632 | unsigned long next; | ||
633 | |||
634 | do { | ||
628 | /* | 635 | /* |
629 | * No need to loop; pte's aren't interested in the | 636 | * With LPAE, we must loop over to map |
630 | * individual L1 entries. | 637 | * all the pmds for the given range. |
631 | */ | 638 | */ |
632 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | 639 | next = pmd_addr_end(addr, end); |
633 | } | 640 | |
641 | /* | ||
642 | * Try a section mapping - addr, next and phys must all be | ||
643 | * aligned to a section boundary. | ||
644 | */ | ||
645 | if (type->prot_sect && | ||
646 | ((addr | next | phys) & ~SECTION_MASK) == 0) { | ||
647 | map_init_section(pmd, addr, next, phys, type); | ||
648 | } else { | ||
649 | alloc_init_pte(pmd, addr, next, | ||
650 | __phys_to_pfn(phys), type); | ||
651 | } | ||
652 | |||
653 | phys += next - addr; | ||
654 | |||
655 | } while (pmd++, addr = next, addr != end); | ||
634 | } | 656 | } |
635 | 657 | ||
636 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | 658 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, |
@@ -641,7 +663,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |||
641 | 663 | ||
642 | do { | 664 | do { |
643 | next = pud_addr_end(addr, end); | 665 | next = pud_addr_end(addr, end); |
644 | alloc_init_section(pud, addr, next, phys, type); | 666 | alloc_init_pmd(pud, addr, next, phys, type); |
645 | phys += next - addr; | 667 | phys += next - addr; |
646 | } while (pud++, addr = next, addr != end); | 668 | } while (pud++, addr = next, addr != end); |
647 | } | 669 | } |
@@ -1256,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1256 | dma_contiguous_remap(); | 1278 | dma_contiguous_remap(); |
1257 | devicemaps_init(mdesc); | 1279 | devicemaps_init(mdesc); |
1258 | kmap_init(); | 1280 | kmap_init(); |
1281 | tcm_init(); | ||
1259 | 1282 | ||
1260 | top_pmd = pmd_off_k(0xffff0000); | 1283 | top_pmd = pmd_off_k(0xffff0000); |
1261 | 1284 | ||
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index dc5de5d53f20..fde2d2a794cf 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S | |||
@@ -77,24 +77,27 @@ __arm740_setup: | |||
77 | mcr p15, 0, r0, c6, c0 @ set area 0, default | 77 | mcr p15, 0, r0, c6, c0 @ set area 0, default |
78 | 78 | ||
79 | ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM | 79 | ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM |
80 | ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) | 80 | ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) |
81 | mov r2, #10 @ 11 is the minimum (4KB) | 81 | mov r4, #10 @ 11 is the minimum (4KB) |
82 | 1: add r2, r2, #1 @ area size *= 2 | 82 | 1: add r4, r4, #1 @ area size *= 2 |
83 | mov r1, r1, lsr #1 | 83 | movs r3, r3, lsr #1 |
84 | bne 1b @ count not zero r-shift | 84 | bne 1b @ count not zero r-shift |
85 | orr r0, r0, r2, lsl #1 @ the area register value | 85 | orr r0, r0, r4, lsl #1 @ the area register value |
86 | orr r0, r0, #1 @ set enable bit | 86 | orr r0, r0, #1 @ set enable bit |
87 | mcr p15, 0, r0, c6, c1 @ set area 1, RAM | 87 | mcr p15, 0, r0, c6, c1 @ set area 1, RAM |
88 | 88 | ||
89 | ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH | 89 | ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH |
90 | ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) | 90 | ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) |
91 | mov r2, #10 @ 11 is the minimum (4KB) | 91 | cmp r3, #0 |
92 | 1: add r2, r2, #1 @ area size *= 2 | 92 | moveq r0, #0 |
93 | mov r1, r1, lsr #1 | 93 | beq 2f |
94 | mov r4, #10 @ 11 is the minimum (4KB) | ||
95 | 1: add r4, r4, #1 @ area size *= 2 | ||
96 | movs r3, r3, lsr #1 | ||
94 | bne 1b @ count not zero r-shift | 97 | bne 1b @ count not zero r-shift |
95 | orr r0, r0, r2, lsl #1 @ the area register value | 98 | orr r0, r0, r4, lsl #1 @ the area register value |
96 | orr r0, r0, #1 @ set enable bit | 99 | orr r0, r0, #1 @ set enable bit |
97 | mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH | 100 | 2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH |
98 | 101 | ||
99 | mov r0, #0x06 | 102 | mov r0, #0x06 |
100 | mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable | 103 | mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable |
@@ -137,13 +140,14 @@ __arm740_proc_info: | |||
137 | .long 0x41807400 | 140 | .long 0x41807400 |
138 | .long 0xfffffff0 | 141 | .long 0xfffffff0 |
139 | .long 0 | 142 | .long 0 |
143 | .long 0 | ||
140 | b __arm740_setup | 144 | b __arm740_setup |
141 | .long cpu_arch_name | 145 | .long cpu_arch_name |
142 | .long cpu_elf_name | 146 | .long cpu_elf_name |
143 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | 147 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT |
144 | .long cpu_arm740_name | 148 | .long cpu_arm740_name |
145 | .long arm740_processor_functions | 149 | .long arm740_processor_functions |
146 | .long 0 | 150 | .long 0 |
147 | .long 0 | 151 | .long 0 |
148 | .long v3_cache_fns @ cache model | 152 | .long v4_cache_fns @ cache model |
149 | .size __arm740_proc_info, . - __arm740_proc_info | 153 | .size __arm740_proc_info, . - __arm740_proc_info |
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2c3b9421ab5e..2556cf1c2da1 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext) | |||
387 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ | 387 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
388 | .globl cpu_arm920_suspend_size | 388 | .globl cpu_arm920_suspend_size |
389 | .equ cpu_arm920_suspend_size, 4 * 3 | 389 | .equ cpu_arm920_suspend_size, 4 * 3 |
390 | #ifdef CONFIG_PM_SLEEP | 390 | #ifdef CONFIG_ARM_CPU_SUSPEND |
391 | ENTRY(cpu_arm920_do_suspend) | 391 | ENTRY(cpu_arm920_do_suspend) |
392 | stmfd sp!, {r4 - r6, lr} | 392 | stmfd sp!, {r4 - r6, lr} |
393 | mrc p15, 0, r4, c13, c0, 0 @ PID | 393 | mrc p15, 0, r4, c13, c0, 0 @ PID |
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index f1803f7e2972..344c8a548cc0 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext) | |||
402 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ | 402 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
403 | .globl cpu_arm926_suspend_size | 403 | .globl cpu_arm926_suspend_size |
404 | .equ cpu_arm926_suspend_size, 4 * 3 | 404 | .equ cpu_arm926_suspend_size, 4 * 3 |
405 | #ifdef CONFIG_PM_SLEEP | 405 | #ifdef CONFIG_ARM_CPU_SUSPEND |
406 | ENTRY(cpu_arm926_do_suspend) | 406 | ENTRY(cpu_arm926_do_suspend) |
407 | stmfd sp!, {r4 - r6, lr} | 407 | stmfd sp!, {r4 - r6, lr} |
408 | mrc p15, 0, r4, c13, c0, 0 @ PID | 408 | mrc p15, 0, r4, c13, c0, 0 @ PID |
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 82f9cdc751d6..0b60dd3d742a 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext) | |||
350 | 350 | ||
351 | .globl cpu_mohawk_suspend_size | 351 | .globl cpu_mohawk_suspend_size |
352 | .equ cpu_mohawk_suspend_size, 4 * 6 | 352 | .equ cpu_mohawk_suspend_size, 4 * 6 |
353 | #ifdef CONFIG_PM_SLEEP | 353 | #ifdef CONFIG_ARM_CPU_SUSPEND |
354 | ENTRY(cpu_mohawk_do_suspend) | 354 | ENTRY(cpu_mohawk_do_suspend) |
355 | stmfd sp!, {r4 - r9, lr} | 355 | stmfd sp!, {r4 - r9, lr} |
356 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | 356 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 3aa0da11fd84..d92dfd081429 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext) | |||
172 | 172 | ||
173 | .globl cpu_sa1100_suspend_size | 173 | .globl cpu_sa1100_suspend_size |
174 | .equ cpu_sa1100_suspend_size, 4 * 3 | 174 | .equ cpu_sa1100_suspend_size, 4 * 3 |
175 | #ifdef CONFIG_PM_SLEEP | 175 | #ifdef CONFIG_ARM_CPU_SUSPEND |
176 | ENTRY(cpu_sa1100_do_suspend) | 176 | ENTRY(cpu_sa1100_do_suspend) |
177 | stmfd sp!, {r4 - r6, lr} | 177 | stmfd sp!, {r4 - r6, lr} |
178 | mrc p15, 0, r4, c3, c0, 0 @ domain ID | 178 | mrc p15, 0, r4, c3, c0, 0 @ domain ID |
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 3e6210b4d6d4..054b491ff764 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c | |||
@@ -17,7 +17,9 @@ | |||
17 | 17 | ||
18 | #ifndef MULTI_CPU | 18 | #ifndef MULTI_CPU |
19 | EXPORT_SYMBOL(cpu_dcache_clean_area); | 19 | EXPORT_SYMBOL(cpu_dcache_clean_area); |
20 | #ifdef CONFIG_MMU | ||
20 | EXPORT_SYMBOL(cpu_set_pte_ext); | 21 | EXPORT_SYMBOL(cpu_set_pte_ext); |
22 | #endif | ||
21 | #else | 23 | #else |
22 | EXPORT_SYMBOL(processor); | 24 | EXPORT_SYMBOL(processor); |
23 | #endif | 25 | #endif |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index bcaaa8de9325..5c07ee4fe3eb 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext) | |||
138 | /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ | 138 | /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ |
139 | .globl cpu_v6_suspend_size | 139 | .globl cpu_v6_suspend_size |
140 | .equ cpu_v6_suspend_size, 4 * 6 | 140 | .equ cpu_v6_suspend_size, 4 * 6 |
141 | #ifdef CONFIG_PM_SLEEP | 141 | #ifdef CONFIG_ARM_CPU_SUSPEND |
142 | ENTRY(cpu_v6_do_suspend) | 142 | ENTRY(cpu_v6_do_suspend) |
143 | stmfd sp!, {r4 - r9, lr} | 143 | stmfd sp!, {r4 - r9, lr} |
144 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 144 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015f8d5c..f584d3f5b37c 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info: | |||
420 | __v7_ca7mp_proc_info: | 420 | __v7_ca7mp_proc_info: |
421 | .long 0x410fc070 | 421 | .long 0x410fc070 |
422 | .long 0xff0ffff0 | 422 | .long 0xff0ffff0 |
423 | __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV | 423 | __v7_proc __v7_ca7mp_setup |
424 | .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info | 424 | .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info |
425 | 425 | ||
426 | /* | 426 | /* |
@@ -430,10 +430,25 @@ __v7_ca7mp_proc_info: | |||
430 | __v7_ca15mp_proc_info: | 430 | __v7_ca15mp_proc_info: |
431 | .long 0x410fc0f0 | 431 | .long 0x410fc0f0 |
432 | .long 0xff0ffff0 | 432 | .long 0xff0ffff0 |
433 | __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV | 433 | __v7_proc __v7_ca15mp_setup |
434 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info | 434 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info |
435 | 435 | ||
436 | /* | 436 | /* |
437 | * Qualcomm Inc. Krait processors. | ||
438 | */ | ||
439 | .type __krait_proc_info, #object | ||
440 | __krait_proc_info: | ||
441 | .long 0x510f0400 @ Required ID value | ||
442 | .long 0xff0ffc00 @ Mask for ID | ||
443 | /* | ||
444 | * Some Krait processors don't indicate support for SDIV and UDIV | ||
445 | * instructions in the ARM instruction set, even though they actually | ||
446 | * do support them. | ||
447 | */ | ||
448 | __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | ||
449 | .size __krait_proc_info, . - __krait_proc_info | ||
450 | |||
451 | /* | ||
437 | * Match any ARMv7 processor core. | 452 | * Match any ARMv7 processor core. |
438 | */ | 453 | */ |
439 | .type __v7_proc_info, #object | 454 | .type __v7_proc_info, #object |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index eb93d6487f35..e8efd83b6f25 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext) | |||
413 | 413 | ||
414 | .globl cpu_xsc3_suspend_size | 414 | .globl cpu_xsc3_suspend_size |
415 | .equ cpu_xsc3_suspend_size, 4 * 6 | 415 | .equ cpu_xsc3_suspend_size, 4 * 6 |
416 | #ifdef CONFIG_PM_SLEEP | 416 | #ifdef CONFIG_ARM_CPU_SUSPEND |
417 | ENTRY(cpu_xsc3_do_suspend) | 417 | ENTRY(cpu_xsc3_do_suspend) |
418 | stmfd sp!, {r4 - r9, lr} | 418 | stmfd sp!, {r4 - r9, lr} |
419 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | 419 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 25510361aa18..e766f889bfd6 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext) | |||
528 | 528 | ||
529 | .globl cpu_xscale_suspend_size | 529 | .globl cpu_xscale_suspend_size |
530 | .equ cpu_xscale_suspend_size, 4 * 6 | 530 | .equ cpu_xscale_suspend_size, 4 * 6 |
531 | #ifdef CONFIG_PM_SLEEP | 531 | #ifdef CONFIG_ARM_CPU_SUSPEND |
532 | ENTRY(cpu_xscale_do_suspend) | 532 | ENTRY(cpu_xscale_do_suspend) |
533 | stmfd sp!, {r4 - r9, lr} | 533 | stmfd sp!, {r4 - r9, lr} |
534 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | 534 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
diff --git a/arch/arm/kernel/tcm.h b/arch/arm/mm/tcm.h index 8015ad434a40..8015ad434a40 100644 --- a/arch/arm/kernel/tcm.h +++ b/arch/arm/mm/tcm.h | |||
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h index cf60d0a9f176..fc6483f83ccc 100644 --- a/arch/avr32/include/asm/io.h +++ b/arch/avr32/include/asm/io.h | |||
@@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32) | |||
165 | #define readw_be __raw_readw | 165 | #define readw_be __raw_readw |
166 | #define readl_be __raw_readl | 166 | #define readl_be __raw_readl |
167 | 167 | ||
168 | #define writeb_relaxed writeb | ||
169 | #define writew_relaxed writew | ||
170 | #define writel_relaxed writel | ||
171 | |||
168 | #define writeb_be __raw_writeb | 172 | #define writeb_be __raw_writeb |
169 | #define writew_be __raw_writew | 173 | #define writew_be __raw_writew |
170 | #define writel_be __raw_writel | 174 | #define writel_be __raw_writel |
diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h index cf78e09e18c3..2c71d5634ec2 100644 --- a/arch/c6x/include/asm/irqflags.h +++ b/arch/c6x/include/asm/irqflags.h | |||
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void) | |||
27 | /* set interrupt enabled status */ | 27 | /* set interrupt enabled status */ |
28 | static inline void arch_local_irq_restore(unsigned long flags) | 28 | static inline void arch_local_irq_restore(unsigned long flags) |
29 | { | 29 | { |
30 | asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); | 30 | asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory"); |
31 | } | 31 | } |
32 | 32 | ||
33 | /* unconditionally enable interrupts */ | 33 | /* unconditionally enable interrupts */ |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 9a02f71c6b1f..da18c8e05217 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -110,6 +110,7 @@ config DMI | |||
110 | 110 | ||
111 | config EFI | 111 | config EFI |
112 | bool | 112 | bool |
113 | select UCS2_STRING | ||
113 | default y | 114 | default y |
114 | 115 | ||
115 | config SCHED_OMIT_FRAME_POINTER | 116 | config SCHED_OMIT_FRAME_POINTER |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 77597e5ea60a..79521d5499f9 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={ | |||
849 | 849 | ||
850 | #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) | 850 | #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) |
851 | 851 | ||
852 | /* | ||
853 | * this array is used to keep track of the proc entries we create. This is | ||
854 | * required in the module mode when we need to remove all entries. The procfs code | ||
855 | * does not do recursion of deletion | ||
856 | * | ||
857 | * Notes: | ||
858 | * - +1 accounts for the cpuN directory entry in /proc/pal | ||
859 | */ | ||
860 | #define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1)) | ||
861 | |||
862 | static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES]; | ||
863 | static struct proc_dir_entry *palinfo_dir; | 852 | static struct proc_dir_entry *palinfo_dir; |
864 | 853 | ||
865 | /* | 854 | /* |
@@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi | |||
971 | static void __cpuinit | 960 | static void __cpuinit |
972 | create_palinfo_proc_entries(unsigned int cpu) | 961 | create_palinfo_proc_entries(unsigned int cpu) |
973 | { | 962 | { |
974 | # define CPUSTR "cpu%d" | ||
975 | |||
976 | pal_func_cpu_u_t f; | 963 | pal_func_cpu_u_t f; |
977 | struct proc_dir_entry **pdir; | ||
978 | struct proc_dir_entry *cpu_dir; | 964 | struct proc_dir_entry *cpu_dir; |
979 | int j; | 965 | int j; |
980 | char cpustr[sizeof(CPUSTR)]; | 966 | char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ |
981 | 967 | sprintf(cpustr, "cpu%d", cpu); | |
982 | |||
983 | /* | ||
984 | * we keep track of created entries in a depth-first order for | ||
985 | * cleanup purposes. Each entry is stored into palinfo_proc_entries | ||
986 | */ | ||
987 | sprintf(cpustr,CPUSTR, cpu); | ||
988 | 968 | ||
989 | cpu_dir = proc_mkdir(cpustr, palinfo_dir); | 969 | cpu_dir = proc_mkdir(cpustr, palinfo_dir); |
970 | if (!cpu_dir) | ||
971 | return; | ||
990 | 972 | ||
991 | f.req_cpu = cpu; | 973 | f.req_cpu = cpu; |
992 | 974 | ||
993 | /* | ||
994 | * Compute the location to store per cpu entries | ||
995 | * We dont store the top level entry in this list, but | ||
996 | * remove it finally after removing all cpu entries. | ||
997 | */ | ||
998 | pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)]; | ||
999 | *pdir++ = cpu_dir; | ||
1000 | for (j=0; j < NR_PALINFO_ENTRIES; j++) { | 975 | for (j=0; j < NR_PALINFO_ENTRIES; j++) { |
1001 | f.func_id = j; | 976 | f.func_id = j; |
1002 | *pdir = create_proc_read_entry( | 977 | create_proc_read_entry( |
1003 | palinfo_entries[j].name, 0, cpu_dir, | 978 | palinfo_entries[j].name, 0, cpu_dir, |
1004 | palinfo_read_entry, (void *)f.value); | 979 | palinfo_read_entry, (void *)f.value); |
1005 | pdir++; | ||
1006 | } | 980 | } |
1007 | } | 981 | } |
1008 | 982 | ||
1009 | static void | 983 | static void |
1010 | remove_palinfo_proc_entries(unsigned int hcpu) | 984 | remove_palinfo_proc_entries(unsigned int hcpu) |
1011 | { | 985 | { |
1012 | int j; | 986 | char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ |
1013 | struct proc_dir_entry *cpu_dir, **pdir; | 987 | sprintf(cpustr, "cpu%d", hcpu); |
1014 | 988 | remove_proc_subtree(cpustr, palinfo_dir); | |
1015 | pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)]; | ||
1016 | cpu_dir = *pdir; | ||
1017 | *pdir++=NULL; | ||
1018 | for (j=0; j < (NR_PALINFO_ENTRIES); j++) { | ||
1019 | if ((*pdir)) { | ||
1020 | remove_proc_entry ((*pdir)->name, cpu_dir); | ||
1021 | *pdir ++= NULL; | ||
1022 | } | ||
1023 | } | ||
1024 | |||
1025 | if (cpu_dir) { | ||
1026 | remove_proc_entry(cpu_dir->name, palinfo_dir); | ||
1027 | } | ||
1028 | } | 989 | } |
1029 | 990 | ||
1030 | static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, | 991 | static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, |
@@ -1058,6 +1019,8 @@ palinfo_init(void) | |||
1058 | 1019 | ||
1059 | printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); | 1020 | printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); |
1060 | palinfo_dir = proc_mkdir("pal", NULL); | 1021 | palinfo_dir = proc_mkdir("pal", NULL); |
1022 | if (!palinfo_dir) | ||
1023 | return -ENOMEM; | ||
1061 | 1024 | ||
1062 | /* Create palinfo dirs in /proc for all online cpus */ | 1025 | /* Create palinfo dirs in /proc for all online cpus */ |
1063 | for_each_online_cpu(i) { | 1026 | for_each_online_cpu(i) { |
@@ -1073,22 +1036,8 @@ palinfo_init(void) | |||
1073 | static void __exit | 1036 | static void __exit |
1074 | palinfo_exit(void) | 1037 | palinfo_exit(void) |
1075 | { | 1038 | { |
1076 | int i = 0; | ||
1077 | |||
1078 | /* remove all nodes: depth first pass. Could optimize this */ | ||
1079 | for_each_online_cpu(i) { | ||
1080 | remove_palinfo_proc_entries(i); | ||
1081 | } | ||
1082 | |||
1083 | /* | ||
1084 | * Remove the top level entry finally | ||
1085 | */ | ||
1086 | remove_proc_entry(palinfo_dir->name, NULL); | ||
1087 | |||
1088 | /* | ||
1089 | * Unregister from cpu notifier callbacks | ||
1090 | */ | ||
1091 | unregister_hotcpu_notifier(&palinfo_cpu_notifier); | 1039 | unregister_hotcpu_notifier(&palinfo_cpu_notifier); |
1040 | remove_proc_subtree("pal", NULL); | ||
1092 | } | 1041 | } |
1093 | 1042 | ||
1094 | module_init(palinfo_init); | 1043 | module_init(palinfo_init); |
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h index 4395ffc51fdb..8cc83431805b 100644 --- a/arch/m68k/include/asm/gpio.h +++ b/arch/m68k/include/asm/gpio.h | |||
@@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio) | |||
86 | return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); | 86 | return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) | ||
90 | { | ||
91 | int err; | ||
92 | |||
93 | err = gpio_request(gpio, label); | ||
94 | if (err) | ||
95 | return err; | ||
96 | |||
97 | if (flags & GPIOF_DIR_IN) | ||
98 | err = gpio_direction_input(gpio); | ||
99 | else | ||
100 | err = gpio_direction_output(gpio, | ||
101 | (flags & GPIOF_INIT_HIGH) ? 1 : 0); | ||
102 | |||
103 | if (err) | ||
104 | gpio_free(gpio); | ||
105 | |||
106 | return err; | ||
107 | } | ||
108 | |||
89 | #endif | 109 | #endif |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cd2e21ff562a..51244bf97271 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -18,7 +18,7 @@ config MIPS | |||
18 | select HAVE_KRETPROBES | 18 | select HAVE_KRETPROBES |
19 | select HAVE_DEBUG_KMEMLEAK | 19 | select HAVE_DEBUG_KMEMLEAK |
20 | select ARCH_BINFMT_ELF_RANDOMIZE_PIE | 20 | select ARCH_BINFMT_ELF_RANDOMIZE_PIE |
21 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE | 21 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT |
22 | select RTC_LIB if !MACH_LOONGSON | 22 | select RTC_LIB if !MACH_LOONGSON |
23 | select GENERIC_ATOMIC64 if !64BIT | 23 | select GENERIC_ATOMIC64 if !64BIT |
24 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 24 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
@@ -657,7 +657,7 @@ config SNI_RM | |||
657 | bool "SNI RM200/300/400" | 657 | bool "SNI RM200/300/400" |
658 | select FW_ARC if CPU_LITTLE_ENDIAN | 658 | select FW_ARC if CPU_LITTLE_ENDIAN |
659 | select FW_ARC32 if CPU_LITTLE_ENDIAN | 659 | select FW_ARC32 if CPU_LITTLE_ENDIAN |
660 | select SNIPROM if CPU_BIG_ENDIAN | 660 | select FW_SNIPROM if CPU_BIG_ENDIAN |
661 | select ARCH_MAY_HAVE_PC_FDC | 661 | select ARCH_MAY_HAVE_PC_FDC |
662 | select BOOT_ELF32 | 662 | select BOOT_ELF32 |
663 | select CEVT_R4K | 663 | select CEVT_R4K |
@@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION | |||
1144 | config FW_ARC32 | 1144 | config FW_ARC32 |
1145 | bool | 1145 | bool |
1146 | 1146 | ||
1147 | config SNIPROM | 1147 | config FW_SNIPROM |
1148 | bool | 1148 | bool |
1149 | 1149 | ||
1150 | config BOOT_ELF32 | 1150 | config BOOT_ELF32 |
@@ -1493,7 +1493,6 @@ config CPU_XLP | |||
1493 | select CPU_SUPPORTS_32BIT_KERNEL | 1493 | select CPU_SUPPORTS_32BIT_KERNEL |
1494 | select CPU_SUPPORTS_64BIT_KERNEL | 1494 | select CPU_SUPPORTS_64BIT_KERNEL |
1495 | select CPU_SUPPORTS_HIGHMEM | 1495 | select CPU_SUPPORTS_HIGHMEM |
1496 | select CPU_HAS_LLSC | ||
1497 | select WEAK_ORDERING | 1496 | select WEAK_ORDERING |
1498 | select WEAK_REORDERING_BEYOND_LLSC | 1497 | select WEAK_REORDERING_BEYOND_LLSC |
1499 | select CPU_HAS_PREFETCH | 1498 | select CPU_HAS_PREFETCH |
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index ed1949c29508..9aa7d44898ed 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c | |||
@@ -745,10 +745,7 @@ void __init board_prom_init(void) | |||
745 | strcpy(cfe_version, "unknown"); | 745 | strcpy(cfe_version, "unknown"); |
746 | printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); | 746 | printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); |
747 | 747 | ||
748 | if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { | 748 | bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET); |
749 | printk(KERN_ERR PFX "invalid nvram checksum\n"); | ||
750 | return; | ||
751 | } | ||
752 | 749 | ||
753 | board_name = bcm63xx_nvram_get_name(); | 750 | board_name = bcm63xx_nvram_get_name(); |
754 | /* find board by name */ | 751 | /* find board by name */ |
diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index 620611680839..a4b8864f9307 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c | |||
@@ -38,7 +38,7 @@ struct bcm963xx_nvram { | |||
38 | static struct bcm963xx_nvram nvram; | 38 | static struct bcm963xx_nvram nvram; |
39 | static int mac_addr_used; | 39 | static int mac_addr_used; |
40 | 40 | ||
41 | int __init bcm63xx_nvram_init(void *addr) | 41 | void __init bcm63xx_nvram_init(void *addr) |
42 | { | 42 | { |
43 | unsigned int check_len; | 43 | unsigned int check_len; |
44 | u32 crc, expected_crc; | 44 | u32 crc, expected_crc; |
@@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr) | |||
60 | crc = crc32_le(~0, (u8 *)&nvram, check_len); | 60 | crc = crc32_le(~0, (u8 *)&nvram, check_len); |
61 | 61 | ||
62 | if (crc != expected_crc) | 62 | if (crc != expected_crc) |
63 | return -EINVAL; | 63 | pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", |
64 | 64 | expected_crc, crc); | |
65 | return 0; | ||
66 | } | 65 | } |
67 | 66 | ||
68 | u8 *bcm63xx_nvram_get_name(void) | 67 | u8 *bcm63xx_nvram_get_name(void) |
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 314231be788c..35e18e98beb9 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c | |||
@@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void) | |||
157 | return board_register_devices(); | 157 | return board_register_devices(); |
158 | } | 158 | } |
159 | 159 | ||
160 | device_initcall(bcm63xx_register_devices); | 160 | arch_initcall(bcm63xx_register_devices); |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index c594a3d4f743..b0baa299f899 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image) | |||
174 | 174 | ||
175 | static void octeon_generic_shutdown(void) | 175 | static void octeon_generic_shutdown(void) |
176 | { | 176 | { |
177 | int cpu, i; | 177 | int i; |
178 | #ifdef CONFIG_SMP | ||
179 | int cpu; | ||
180 | #endif | ||
178 | struct cvmx_bootmem_desc *bootmem_desc; | 181 | struct cvmx_bootmem_desc *bootmem_desc; |
179 | void *named_block_array_ptr; | 182 | void *named_block_array_ptr; |
180 | 183 | ||
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h index 62d6a3b4d3b7..4e0b6bc1165e 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h | |||
@@ -9,10 +9,8 @@ | |||
9 | * | 9 | * |
10 | * Initialized the local nvram copy from the target address and checks | 10 | * Initialized the local nvram copy from the target address and checks |
11 | * its checksum. | 11 | * its checksum. |
12 | * | ||
13 | * Returns 0 on success. | ||
14 | */ | 12 | */ |
15 | int __init bcm63xx_nvram_init(void *nvram); | 13 | void bcm63xx_nvram_init(void *nvram); |
16 | 14 | ||
17 | /** | 15 | /** |
18 | * bcm63xx_nvram_get_name() - returns the board name according to nvram | 16 | * bcm63xx_nvram_get_name() - returns the board name according to nvram |
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index d9c828419037..193c0912d38e 100644 --- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h | |||
@@ -28,11 +28,7 @@ | |||
28 | /* #define cpu_has_prefetch ? */ | 28 | /* #define cpu_has_prefetch ? */ |
29 | #define cpu_has_mcheck 1 | 29 | #define cpu_has_mcheck 1 |
30 | /* #define cpu_has_ejtag ? */ | 30 | /* #define cpu_has_ejtag ? */ |
31 | #ifdef CONFIG_CPU_HAS_LLSC | ||
32 | #define cpu_has_llsc 1 | 31 | #define cpu_has_llsc 1 |
33 | #else | ||
34 | #define cpu_has_llsc 0 | ||
35 | #endif | ||
36 | /* #define cpu_has_vtag_icache ? */ | 32 | /* #define cpu_has_vtag_icache ? */ |
37 | /* #define cpu_has_dc_aliases ? */ | 33 | /* #define cpu_has_dc_aliases ? */ |
38 | /* #define cpu_has_ic_fills_f_dc ? */ | 34 | /* #define cpu_has_ic_fills_f_dc ? */ |
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 12b70c25906a..0da44d422f5b 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h | |||
@@ -1166,7 +1166,10 @@ do { \ | |||
1166 | unsigned int __dspctl; \ | 1166 | unsigned int __dspctl; \ |
1167 | \ | 1167 | \ |
1168 | __asm__ __volatile__( \ | 1168 | __asm__ __volatile__( \ |
1169 | " .set push \n" \ | ||
1170 | " .set dsp \n" \ | ||
1169 | " rddsp %0, %x1 \n" \ | 1171 | " rddsp %0, %x1 \n" \ |
1172 | " .set pop \n" \ | ||
1170 | : "=r" (__dspctl) \ | 1173 | : "=r" (__dspctl) \ |
1171 | : "i" (mask)); \ | 1174 | : "i" (mask)); \ |
1172 | __dspctl; \ | 1175 | __dspctl; \ |
@@ -1175,30 +1178,198 @@ do { \ | |||
1175 | #define wrdsp(val, mask) \ | 1178 | #define wrdsp(val, mask) \ |
1176 | do { \ | 1179 | do { \ |
1177 | __asm__ __volatile__( \ | 1180 | __asm__ __volatile__( \ |
1181 | " .set push \n" \ | ||
1182 | " .set dsp \n" \ | ||
1178 | " wrdsp %0, %x1 \n" \ | 1183 | " wrdsp %0, %x1 \n" \ |
1184 | " .set pop \n" \ | ||
1179 | : \ | 1185 | : \ |
1180 | : "r" (val), "i" (mask)); \ | 1186 | : "r" (val), "i" (mask)); \ |
1181 | } while (0) | 1187 | } while (0) |
1182 | 1188 | ||
1183 | #define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) | 1189 | #define mflo0() \ |
1184 | #define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) | 1190 | ({ \ |
1185 | #define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) | 1191 | long mflo0; \ |
1186 | #define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) | 1192 | __asm__( \ |
1187 | 1193 | " .set push \n" \ | |
1188 | #define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) | 1194 | " .set dsp \n" \ |
1189 | #define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) | 1195 | " mflo %0, $ac0 \n" \ |
1190 | #define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) | 1196 | " .set pop \n" \ |
1191 | #define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) | 1197 | : "=r" (mflo0)); \ |
1192 | 1198 | mflo0; \ | |
1193 | #define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) | 1199 | }) |
1194 | #define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) | 1200 | |
1195 | #define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) | 1201 | #define mflo1() \ |
1196 | #define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) | 1202 | ({ \ |
1197 | 1203 | long mflo1; \ | |
1198 | #define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) | 1204 | __asm__( \ |
1199 | #define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) | 1205 | " .set push \n" \ |
1200 | #define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) | 1206 | " .set dsp \n" \ |
1201 | #define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) | 1207 | " mflo %0, $ac1 \n" \ |
1208 | " .set pop \n" \ | ||
1209 | : "=r" (mflo1)); \ | ||
1210 | mflo1; \ | ||
1211 | }) | ||
1212 | |||
1213 | #define mflo2() \ | ||
1214 | ({ \ | ||
1215 | long mflo2; \ | ||
1216 | __asm__( \ | ||
1217 | " .set push \n" \ | ||
1218 | " .set dsp \n" \ | ||
1219 | " mflo %0, $ac2 \n" \ | ||
1220 | " .set pop \n" \ | ||
1221 | : "=r" (mflo2)); \ | ||
1222 | mflo2; \ | ||
1223 | }) | ||
1224 | |||
1225 | #define mflo3() \ | ||
1226 | ({ \ | ||
1227 | long mflo3; \ | ||
1228 | __asm__( \ | ||
1229 | " .set push \n" \ | ||
1230 | " .set dsp \n" \ | ||
1231 | " mflo %0, $ac3 \n" \ | ||
1232 | " .set pop \n" \ | ||
1233 | : "=r" (mflo3)); \ | ||
1234 | mflo3; \ | ||
1235 | }) | ||
1236 | |||
1237 | #define mfhi0() \ | ||
1238 | ({ \ | ||
1239 | long mfhi0; \ | ||
1240 | __asm__( \ | ||
1241 | " .set push \n" \ | ||
1242 | " .set dsp \n" \ | ||
1243 | " mfhi %0, $ac0 \n" \ | ||
1244 | " .set pop \n" \ | ||
1245 | : "=r" (mfhi0)); \ | ||
1246 | mfhi0; \ | ||
1247 | }) | ||
1248 | |||
1249 | #define mfhi1() \ | ||
1250 | ({ \ | ||
1251 | long mfhi1; \ | ||
1252 | __asm__( \ | ||
1253 | " .set push \n" \ | ||
1254 | " .set dsp \n" \ | ||
1255 | " mfhi %0, $ac1 \n" \ | ||
1256 | " .set pop \n" \ | ||
1257 | : "=r" (mfhi1)); \ | ||
1258 | mfhi1; \ | ||
1259 | }) | ||
1260 | |||
1261 | #define mfhi2() \ | ||
1262 | ({ \ | ||
1263 | long mfhi2; \ | ||
1264 | __asm__( \ | ||
1265 | " .set push \n" \ | ||
1266 | " .set dsp \n" \ | ||
1267 | " mfhi %0, $ac2 \n" \ | ||
1268 | " .set pop \n" \ | ||
1269 | : "=r" (mfhi2)); \ | ||
1270 | mfhi2; \ | ||
1271 | }) | ||
1272 | |||
1273 | #define mfhi3() \ | ||
1274 | ({ \ | ||
1275 | long mfhi3; \ | ||
1276 | __asm__( \ | ||
1277 | " .set push \n" \ | ||
1278 | " .set dsp \n" \ | ||
1279 | " mfhi %0, $ac3 \n" \ | ||
1280 | " .set pop \n" \ | ||
1281 | : "=r" (mfhi3)); \ | ||
1282 | mfhi3; \ | ||
1283 | }) | ||
1284 | |||
1285 | |||
1286 | #define mtlo0(x) \ | ||
1287 | ({ \ | ||
1288 | __asm__( \ | ||
1289 | " .set push \n" \ | ||
1290 | " .set dsp \n" \ | ||
1291 | " mtlo %0, $ac0 \n" \ | ||
1292 | " .set pop \n" \ | ||
1293 | : \ | ||
1294 | : "r" (x)); \ | ||
1295 | }) | ||
1296 | |||
1297 | #define mtlo1(x) \ | ||
1298 | ({ \ | ||
1299 | __asm__( \ | ||
1300 | " .set push \n" \ | ||
1301 | " .set dsp \n" \ | ||
1302 | " mtlo %0, $ac1 \n" \ | ||
1303 | " .set pop \n" \ | ||
1304 | : \ | ||
1305 | : "r" (x)); \ | ||
1306 | }) | ||
1307 | |||
1308 | #define mtlo2(x) \ | ||
1309 | ({ \ | ||
1310 | __asm__( \ | ||
1311 | " .set push \n" \ | ||
1312 | " .set dsp \n" \ | ||
1313 | " mtlo %0, $ac2 \n" \ | ||
1314 | " .set pop \n" \ | ||
1315 | : \ | ||
1316 | : "r" (x)); \ | ||
1317 | }) | ||
1318 | |||
1319 | #define mtlo3(x) \ | ||
1320 | ({ \ | ||
1321 | __asm__( \ | ||
1322 | " .set push \n" \ | ||
1323 | " .set dsp \n" \ | ||
1324 | " mtlo %0, $ac3 \n" \ | ||
1325 | " .set pop \n" \ | ||
1326 | : \ | ||
1327 | : "r" (x)); \ | ||
1328 | }) | ||
1329 | |||
1330 | #define mthi0(x) \ | ||
1331 | ({ \ | ||
1332 | __asm__( \ | ||
1333 | " .set push \n" \ | ||
1334 | " .set dsp \n" \ | ||
1335 | " mthi %0, $ac0 \n" \ | ||
1336 | " .set pop \n" \ | ||
1337 | : \ | ||
1338 | : "r" (x)); \ | ||
1339 | }) | ||
1340 | |||
1341 | #define mthi1(x) \ | ||
1342 | ({ \ | ||
1343 | __asm__( \ | ||
1344 | " .set push \n" \ | ||
1345 | " .set dsp \n" \ | ||
1346 | " mthi %0, $ac1 \n" \ | ||
1347 | " .set pop \n" \ | ||
1348 | : \ | ||
1349 | : "r" (x)); \ | ||
1350 | }) | ||
1351 | |||
1352 | #define mthi2(x) \ | ||
1353 | ({ \ | ||
1354 | __asm__( \ | ||
1355 | " .set push \n" \ | ||
1356 | " .set dsp \n" \ | ||
1357 | " mthi %0, $ac2 \n" \ | ||
1358 | " .set pop \n" \ | ||
1359 | : \ | ||
1360 | : "r" (x)); \ | ||
1361 | }) | ||
1362 | |||
1363 | #define mthi3(x) \ | ||
1364 | ({ \ | ||
1365 | __asm__( \ | ||
1366 | " .set push \n" \ | ||
1367 | " .set dsp \n" \ | ||
1368 | " mthi %0, $ac3 \n" \ | ||
1369 | " .set pop \n" \ | ||
1370 | : \ | ||
1371 | : "r" (x)); \ | ||
1372 | }) | ||
1202 | 1373 | ||
1203 | #else | 1374 | #else |
1204 | 1375 | ||
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 99fc547af9d3..eab99e536b5c 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
@@ -31,7 +31,7 @@ | |||
31 | #define PAGE_SHIFT 16 | 31 | #define PAGE_SHIFT 16 |
32 | #endif | 32 | #endif |
33 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | 33 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
34 | #define PAGE_MASK (~(PAGE_SIZE - 1)) | 34 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) |
35 | 35 | ||
36 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT | 36 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
37 | #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) | 37 | #define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) |
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 197f6367c201..8efe5a9e2c3e 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h | |||
@@ -21,6 +21,6 @@ | |||
21 | #include <asm/sigcontext.h> | 21 | #include <asm/sigcontext.h> |
22 | #include <asm/siginfo.h> | 22 | #include <asm/siginfo.h> |
23 | 23 | ||
24 | #define __ARCH_HAS_ODD_SIGACTION | 24 | #define __ARCH_HAS_IRIX_SIGACTION |
25 | 25 | ||
26 | #endif /* _ASM_SIGNAL_H */ | 26 | #endif /* _ASM_SIGNAL_H */ |
diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h index d6b18b4d0f3a..addb9f556b71 100644 --- a/arch/mips/include/uapi/asm/signal.h +++ b/arch/mips/include/uapi/asm/signal.h | |||
@@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ | |||
72 | * | 72 | * |
73 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single | 73 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single |
74 | * Unix names RESETHAND and NODEFER respectively. | 74 | * Unix names RESETHAND and NODEFER respectively. |
75 | * | ||
76 | * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever | ||
77 | * supported its use and no libc was using it, so the entire sa-restorer | ||
78 | * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48 | ||
79 | * retaining only the SA_RESTORER definition as a reminder to avoid | ||
80 | * accidental reuse of the mask bit. | ||
75 | */ | 81 | */ |
76 | #define SA_ONSTACK 0x08000000 | 82 | #define SA_ONSTACK 0x08000000 |
77 | #define SA_RESETHAND 0x80000000 | 83 | #define SA_RESETHAND 0x80000000 |
@@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ | |||
84 | #define SA_NOMASK SA_NODEFER | 90 | #define SA_NOMASK SA_NODEFER |
85 | #define SA_ONESHOT SA_RESETHAND | 91 | #define SA_ONESHOT SA_RESETHAND |
86 | 92 | ||
87 | #define SA_RESTORER 0x04000000 /* Only for o32 */ | ||
88 | |||
89 | #define MINSIGSTKSZ 2048 | 93 | #define MINSIGSTKSZ 2048 |
90 | #define SIGSTKSZ 8192 | 94 | #define SIGSTKSZ 8192 |
91 | 95 | ||
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index f81d98f6184c..de75fb50562b 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o | |||
100 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o | 100 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o |
101 | 101 | ||
102 | # | 102 | # |
103 | # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe | 103 | # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not |
104 | # to enable DSP assembler support here even if the MIPS Release 2 CPU we | 104 | # safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches |
105 | # are targetting does not support DSP because all code-paths making use of | 105 | # here because the compiler may use DSP ASE instructions (such as lwx) in |
106 | # it properly check that the running CPU *actually does* support these | 106 | # code paths where we cannot check that the CPU we are running on supports it. |
107 | # instructions. | 107 | # Proper abstraction using HAVE_AS_DSP and macros is done in |
108 | # arch/mips/include/asm/mipsregs.h. | ||
108 | # | 109 | # |
109 | ifeq ($(CONFIG_CPU_MIPSR2), y) | 110 | ifeq ($(CONFIG_CPU_MIPSR2), y) |
110 | CFLAGS_DSP = -DHAVE_AS_DSP | 111 | CFLAGS_DSP = -DHAVE_AS_DSP |
111 | 112 | ||
112 | # | ||
113 | # Check if assembler supports DSP ASE | ||
114 | # | ||
115 | ifeq ($(call cc-option-yn,-mdsp), y) | ||
116 | CFLAGS_DSP += -mdsp | ||
117 | endif | ||
118 | |||
119 | # | ||
120 | # Check if assembler supports DSP ASE Rev2 | ||
121 | # | ||
122 | ifeq ($(call cc-option-yn,-mdspr2), y) | ||
123 | CFLAGS_DSP += -mdspr2 | ||
124 | endif | ||
125 | |||
126 | CFLAGS_signal.o = $(CFLAGS_DSP) | 113 | CFLAGS_signal.o = $(CFLAGS_DSP) |
127 | CFLAGS_signal32.o = $(CFLAGS_DSP) | 114 | CFLAGS_signal32.o = $(CFLAGS_DSP) |
128 | CFLAGS_process.o = $(CFLAGS_DSP) | 115 | CFLAGS_process.o = $(CFLAGS_DSP) |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6bfccc227a95..5fe66a0c3224 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
580 | c->tlbsize = 48; | 580 | c->tlbsize = 48; |
581 | break; | 581 | break; |
582 | case PRID_IMP_VR41XX: | 582 | case PRID_IMP_VR41XX: |
583 | set_isa(c, MIPS_CPU_ISA_III); | ||
584 | c->options = R4K_OPTS; | ||
585 | c->tlbsize = 32; | ||
583 | switch (c->processor_id & 0xf0) { | 586 | switch (c->processor_id & 0xf0) { |
584 | case PRID_REV_VR4111: | 587 | case PRID_REV_VR4111: |
585 | c->cputype = CPU_VR4111; | 588 | c->cputype = CPU_VR4111; |
@@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
604 | __cpu_name[cpu] = "NEC VR4131"; | 607 | __cpu_name[cpu] = "NEC VR4131"; |
605 | } else { | 608 | } else { |
606 | c->cputype = CPU_VR4133; | 609 | c->cputype = CPU_VR4133; |
610 | c->options |= MIPS_CPU_LLSC; | ||
607 | __cpu_name[cpu] = "NEC VR4133"; | 611 | __cpu_name[cpu] = "NEC VR4133"; |
608 | } | 612 | } |
609 | break; | 613 | break; |
@@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
613 | __cpu_name[cpu] = "NEC Vr41xx"; | 617 | __cpu_name[cpu] = "NEC Vr41xx"; |
614 | break; | 618 | break; |
615 | } | 619 | } |
616 | set_isa(c, MIPS_CPU_ISA_III); | ||
617 | c->options = R4K_OPTS; | ||
618 | c->tlbsize = 32; | ||
619 | break; | 620 | break; |
620 | case PRID_IMP_R4300: | 621 | case PRID_IMP_R4300: |
621 | c->cputype = CPU_R4300; | 622 | c->cputype = CPU_R4300; |
@@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void) | |||
1226 | if (c->options & MIPS_CPU_FPU) { | 1227 | if (c->options & MIPS_CPU_FPU) { |
1227 | c->fpu_id = cpu_get_fpu_id(); | 1228 | c->fpu_id = cpu_get_fpu_id(); |
1228 | 1229 | ||
1229 | if (c->isa_level == MIPS_CPU_ISA_M32R1 || | 1230 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | |
1230 | c->isa_level == MIPS_CPU_ISA_M32R2 || | 1231 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { |
1231 | c->isa_level == MIPS_CPU_ISA_M64R1 || | ||
1232 | c->isa_level == MIPS_CPU_ISA_M64R2) { | ||
1233 | if (c->fpu_id & MIPS_FPIR_3D) | 1232 | if (c->fpu_id & MIPS_FPIR_3D) |
1234 | c->ases |= MIPS_ASE_MIPS3D; | 1233 | c->ases |= MIPS_ASE_MIPS3D; |
1235 | } | 1234 | } |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 8eeee1c860c0..db9655f08892 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third, | |||
171 | err = compat_sys_shmctl(first, second, compat_ptr(ptr)); | 171 | err = compat_sys_shmctl(first, second, compat_ptr(ptr)); |
172 | break; | 172 | break; |
173 | default: | 173 | default: |
174 | err = -EINVAL; | 174 | err = -ENOSYS; |
175 | break; | 175 | break; |
176 | } | 176 | } |
177 | 177 | ||
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 165867673357..33d067148e61 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S | |||
@@ -46,10 +46,9 @@ | |||
46 | PTR_L a5, PT_R9(sp) | 46 | PTR_L a5, PT_R9(sp) |
47 | PTR_L a6, PT_R10(sp) | 47 | PTR_L a6, PT_R10(sp) |
48 | PTR_L a7, PT_R11(sp) | 48 | PTR_L a7, PT_R11(sp) |
49 | #else | ||
50 | PTR_ADDIU sp, PT_SIZE | ||
51 | #endif | 49 | #endif |
52 | .endm | 50 | PTR_ADDIU sp, PT_SIZE |
51 | .endm | ||
53 | 52 | ||
54 | .macro RETURN_BACK | 53 | .macro RETURN_BACK |
55 | jr ra | 54 | jr ra |
@@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra) | |||
68 | .globl _mcount | 67 | .globl _mcount |
69 | _mcount: | 68 | _mcount: |
70 | b ftrace_stub | 69 | b ftrace_stub |
71 | addiu sp,sp,8 | 70 | #ifdef CONFIG_32BIT |
71 | addiu sp,sp,8 | ||
72 | #else | ||
73 | nop | ||
74 | #endif | ||
72 | 75 | ||
73 | /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ | 76 | /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ |
74 | lw t1, function_trace_stop | 77 | lw t1, function_trace_stop |
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 135c4aadccbe..7a54f74b7818 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c | |||
@@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
67 | if (cpu_has_mips_r) { | 67 | if (cpu_has_mips_r) { |
68 | seq_printf(m, "isa\t\t\t:"); | 68 | seq_printf(m, "isa\t\t\t:"); |
69 | if (cpu_has_mips_1) | 69 | if (cpu_has_mips_1) |
70 | seq_printf(m, "%s", "mips1"); | 70 | seq_printf(m, "%s", " mips1"); |
71 | if (cpu_has_mips_2) | 71 | if (cpu_has_mips_2) |
72 | seq_printf(m, "%s", " mips2"); | 72 | seq_printf(m, "%s", " mips2"); |
73 | if (cpu_has_mips_3) | 73 | if (cpu_has_mips_3) |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a200b5bdbb87..c3abb88170fc 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1571 | #ifdef CONFIG_64BIT | 1571 | #ifdef CONFIG_64BIT |
1572 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; | 1572 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; |
1573 | #endif | 1573 | #endif |
1574 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) | 1574 | if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) |
1575 | status_set |= ST0_XX; | 1575 | status_set |= ST0_XX; |
1576 | if (cpu_has_dsp) | 1576 | if (cpu_has_dsp) |
1577 | status_set |= ST0_MX; | 1577 | status_set |= ST0_MX; |
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 81f1dcfdcab8..a64daee740ee 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c | |||
@@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr, | |||
90 | unsigned bit = nr & SZLONG_MASK; | 90 | unsigned bit = nr & SZLONG_MASK; |
91 | unsigned long mask; | 91 | unsigned long mask; |
92 | unsigned long flags; | 92 | unsigned long flags; |
93 | unsigned long res; | 93 | int res; |
94 | 94 | ||
95 | a += nr >> SZLONG_LOG; | 95 | a += nr >> SZLONG_LOG; |
96 | mask = 1UL << bit; | 96 | mask = 1UL << bit; |
97 | raw_local_irq_save(flags); | 97 | raw_local_irq_save(flags); |
98 | res = (mask & *a); | 98 | res = (mask & *a) != 0; |
99 | *a |= mask; | 99 | *a |= mask; |
100 | raw_local_irq_restore(flags); | 100 | raw_local_irq_restore(flags); |
101 | return res; | 101 | return res; |
@@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr, | |||
116 | unsigned bit = nr & SZLONG_MASK; | 116 | unsigned bit = nr & SZLONG_MASK; |
117 | unsigned long mask; | 117 | unsigned long mask; |
118 | unsigned long flags; | 118 | unsigned long flags; |
119 | unsigned long res; | 119 | int res; |
120 | 120 | ||
121 | a += nr >> SZLONG_LOG; | 121 | a += nr >> SZLONG_LOG; |
122 | mask = 1UL << bit; | 122 | mask = 1UL << bit; |
123 | raw_local_irq_save(flags); | 123 | raw_local_irq_save(flags); |
124 | res = (mask & *a); | 124 | res = (mask & *a) != 0; |
125 | *a |= mask; | 125 | *a |= mask; |
126 | raw_local_irq_restore(flags); | 126 | raw_local_irq_restore(flags); |
127 | return res; | 127 | return res; |
@@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
141 | unsigned bit = nr & SZLONG_MASK; | 141 | unsigned bit = nr & SZLONG_MASK; |
142 | unsigned long mask; | 142 | unsigned long mask; |
143 | unsigned long flags; | 143 | unsigned long flags; |
144 | unsigned long res; | 144 | int res; |
145 | 145 | ||
146 | a += nr >> SZLONG_LOG; | 146 | a += nr >> SZLONG_LOG; |
147 | mask = 1UL << bit; | 147 | mask = 1UL << bit; |
148 | raw_local_irq_save(flags); | 148 | raw_local_irq_save(flags); |
149 | res = (mask & *a); | 149 | res = (mask & *a) != 0; |
150 | *a &= ~mask; | 150 | *a &= ~mask; |
151 | raw_local_irq_restore(flags); | 151 | raw_local_irq_restore(flags); |
152 | return res; | 152 | return res; |
@@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | |||
166 | unsigned bit = nr & SZLONG_MASK; | 166 | unsigned bit = nr & SZLONG_MASK; |
167 | unsigned long mask; | 167 | unsigned long mask; |
168 | unsigned long flags; | 168 | unsigned long flags; |
169 | unsigned long res; | 169 | int res; |
170 | 170 | ||
171 | a += nr >> SZLONG_LOG; | 171 | a += nr >> SZLONG_LOG; |
172 | mask = 1UL << bit; | 172 | mask = 1UL << bit; |
173 | raw_local_irq_save(flags); | 173 | raw_local_irq_save(flags); |
174 | res = (mask & *a); | 174 | res = (mask & *a) != 0; |
175 | *a ^= mask; | 175 | *a ^= mask; |
176 | raw_local_irq_restore(flags); | 176 | raw_local_irq_restore(flags); |
177 | return res; | 177 | return res; |
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 507147aebd41..a6adffbb4e5f 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S | |||
@@ -270,7 +270,7 @@ LEAF(csum_partial) | |||
270 | #endif | 270 | #endif |
271 | 271 | ||
272 | /* odd buffer alignment? */ | 272 | /* odd buffer alignment? */ |
273 | #ifdef CPU_MIPSR2 | 273 | #ifdef CONFIG_CPU_MIPSR2 |
274 | wsbh v1, sum | 274 | wsbh v1, sum |
275 | movn sum, v1, t7 | 275 | movn sum, v1, t7 |
276 | #else | 276 | #else |
@@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) | |||
670 | addu sum, v1 | 670 | addu sum, v1 |
671 | #endif | 671 | #endif |
672 | 672 | ||
673 | #ifdef CPU_MIPSR2 | 673 | #ifdef CONFIG_CPU_MIPSR2 |
674 | wsbh v1, sum | 674 | wsbh v1, sum |
675 | movn sum, v1, odd | 675 | movn sum, v1, odd |
676 | #else | 676 | #else |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index ecca559b8d7b..2078915eacb9 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void) | |||
1247 | return; | 1247 | return; |
1248 | 1248 | ||
1249 | default: | 1249 | default: |
1250 | if (c->isa_level == MIPS_CPU_ISA_M32R1 || | 1250 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | |
1251 | c->isa_level == MIPS_CPU_ISA_M32R2 || | 1251 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { |
1252 | c->isa_level == MIPS_CPU_ISA_M64R1 || | ||
1253 | c->isa_level == MIPS_CPU_ISA_M64R2) { | ||
1254 | #ifdef CONFIG_MIPS_CPU_SCACHE | 1252 | #ifdef CONFIG_MIPS_CPU_SCACHE |
1255 | if (mips_sc_init ()) { | 1253 | if (mips_sc_init ()) { |
1256 | scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; | 1254 | scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; |
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 93d937b4b1ba..df96da7e939b 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c | |||
@@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void) | |||
98 | c->scache.flags |= MIPS_CACHE_NOT_PRESENT; | 98 | c->scache.flags |= MIPS_CACHE_NOT_PRESENT; |
99 | 99 | ||
100 | /* Ignore anything but MIPSxx processors */ | 100 | /* Ignore anything but MIPSxx processors */ |
101 | if (c->isa_level != MIPS_CPU_ISA_M32R1 && | 101 | if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | |
102 | c->isa_level != MIPS_CPU_ISA_M32R2 && | 102 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) |
103 | c->isa_level != MIPS_CPU_ISA_M64R1 && | ||
104 | c->isa_level != MIPS_CPU_ISA_M64R2) | ||
105 | return 0; | 103 | return 0; |
106 | 104 | ||
107 | /* Does this MIPS32/MIPS64 CPU have a config2 register? */ | 105 | /* Does this MIPS32/MIPS64 CPU have a config2 register? */ |
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index 38a80c83fd67..d1faece21b6a 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <asm/mach-au1x00/au1000.h> | 19 | #include <asm/mach-au1x00/au1000.h> |
20 | #include <asm/tlbmisc.h> | 20 | #include <asm/tlbmisc.h> |
21 | 21 | ||
22 | #ifdef CONFIG_DEBUG_PCI | 22 | #ifdef CONFIG_PCI_DEBUG |
23 | #define DBG(x...) printk(KERN_DEBUG x) | 23 | #define DBG(x...) printk(KERN_DEBUG x) |
24 | #else | 24 | #else |
25 | #define DBG(x...) do {} while (0) | 25 | #define DBG(x...) do {} while (0) |
@@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus, | |||
162 | if (status & (1 << 29)) { | 162 | if (status & (1 << 29)) { |
163 | *data = 0xffffffff; | 163 | *data = 0xffffffff; |
164 | error = -1; | 164 | error = -1; |
165 | DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", | 165 | DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n", |
166 | access_type, bus->number, device); | 166 | access_type, bus->number, device); |
167 | } else if ((status >> 28) & 0xf) { | 167 | } else if ((status >> 28) & 0xf) { |
168 | DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", | 168 | DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", |
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 01d95e2f0581..113e28206503 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
@@ -65,8 +65,10 @@ ifndef CONFIG_FUNCTION_TRACER | |||
65 | endif | 65 | endif |
66 | 66 | ||
67 | # Use long jumps instead of long branches (needed if your linker fails to | 67 | # Use long jumps instead of long branches (needed if your linker fails to |
68 | # link a too big vmlinux executable) | 68 | # link a too big vmlinux executable). Not enabled for building modules. |
69 | cflags-$(CONFIG_MLONGCALLS) += -mlong-calls | 69 | ifdef CONFIG_MLONGCALLS |
70 | KBUILD_CFLAGS_KERNEL += -mlong-calls | ||
71 | endif | ||
70 | 72 | ||
71 | # select which processor to optimise for | 73 | # select which processor to optimise for |
72 | cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100 | 74 | cflags-$(CONFIG_PA7100) += -march=1.1 -mschedule=7100 |
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 79f694f3ad9b..f0e2784e7cca 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h | |||
@@ -140,7 +140,10 @@ static inline void *kmap(struct page *page) | |||
140 | return page_address(page); | 140 | return page_address(page); |
141 | } | 141 | } |
142 | 142 | ||
143 | #define kunmap(page) kunmap_parisc(page_address(page)) | 143 | static inline void kunmap(struct page *page) |
144 | { | ||
145 | kunmap_parisc(page_address(page)); | ||
146 | } | ||
144 | 147 | ||
145 | static inline void *kmap_atomic(struct page *page) | 148 | static inline void *kmap_atomic(struct page *page) |
146 | { | 149 | { |
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 7df49fad29f9..1e40d7f86be3 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
17 | #include <asm/cache.h> | 17 | #include <asm/cache.h> |
18 | 18 | ||
19 | extern spinlock_t pa_dbit_lock; | ||
20 | |||
19 | /* | 21 | /* |
20 | * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel | 22 | * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel |
21 | * memory. For the return value to be meaningful, ADDR must be >= | 23 | * memory. For the return value to be meaningful, ADDR must be >= |
@@ -44,8 +46,11 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); | |||
44 | 46 | ||
45 | #define set_pte_at(mm, addr, ptep, pteval) \ | 47 | #define set_pte_at(mm, addr, ptep, pteval) \ |
46 | do { \ | 48 | do { \ |
49 | unsigned long flags; \ | ||
50 | spin_lock_irqsave(&pa_dbit_lock, flags); \ | ||
47 | set_pte(ptep, pteval); \ | 51 | set_pte(ptep, pteval); \ |
48 | purge_tlb_entries(mm, addr); \ | 52 | purge_tlb_entries(mm, addr); \ |
53 | spin_unlock_irqrestore(&pa_dbit_lock, flags); \ | ||
49 | } while (0) | 54 | } while (0) |
50 | 55 | ||
51 | #endif /* !__ASSEMBLY__ */ | 56 | #endif /* !__ASSEMBLY__ */ |
@@ -435,48 +440,46 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); | |||
435 | 440 | ||
436 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | 441 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) |
437 | { | 442 | { |
438 | #ifdef CONFIG_SMP | 443 | pte_t pte; |
444 | unsigned long flags; | ||
445 | |||
439 | if (!pte_young(*ptep)) | 446 | if (!pte_young(*ptep)) |
440 | return 0; | 447 | return 0; |
441 | return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); | 448 | |
442 | #else | 449 | spin_lock_irqsave(&pa_dbit_lock, flags); |
443 | pte_t pte = *ptep; | 450 | pte = *ptep; |
444 | if (!pte_young(pte)) | 451 | if (!pte_young(pte)) { |
452 | spin_unlock_irqrestore(&pa_dbit_lock, flags); | ||
445 | return 0; | 453 | return 0; |
446 | set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); | 454 | } |
455 | set_pte(ptep, pte_mkold(pte)); | ||
456 | purge_tlb_entries(vma->vm_mm, addr); | ||
457 | spin_unlock_irqrestore(&pa_dbit_lock, flags); | ||
447 | return 1; | 458 | return 1; |
448 | #endif | ||
449 | } | 459 | } |
450 | 460 | ||
451 | extern spinlock_t pa_dbit_lock; | ||
452 | |||
453 | struct mm_struct; | 461 | struct mm_struct; |
454 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 462 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
455 | { | 463 | { |
456 | pte_t old_pte; | 464 | pte_t old_pte; |
465 | unsigned long flags; | ||
457 | 466 | ||
458 | spin_lock(&pa_dbit_lock); | 467 | spin_lock_irqsave(&pa_dbit_lock, flags); |
459 | old_pte = *ptep; | 468 | old_pte = *ptep; |
460 | pte_clear(mm,addr,ptep); | 469 | pte_clear(mm,addr,ptep); |
461 | spin_unlock(&pa_dbit_lock); | 470 | purge_tlb_entries(mm, addr); |
471 | spin_unlock_irqrestore(&pa_dbit_lock, flags); | ||
462 | 472 | ||
463 | return old_pte; | 473 | return old_pte; |
464 | } | 474 | } |
465 | 475 | ||
466 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 476 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
467 | { | 477 | { |
468 | #ifdef CONFIG_SMP | 478 | unsigned long flags; |
469 | unsigned long new, old; | 479 | spin_lock_irqsave(&pa_dbit_lock, flags); |
470 | 480 | set_pte(ptep, pte_wrprotect(*ptep)); | |
471 | do { | ||
472 | old = pte_val(*ptep); | ||
473 | new = pte_val(pte_wrprotect(__pte (old))); | ||
474 | } while (cmpxchg((unsigned long *) ptep, old, new) != old); | ||
475 | purge_tlb_entries(mm, addr); | 481 | purge_tlb_entries(mm, addr); |
476 | #else | 482 | spin_unlock_irqrestore(&pa_dbit_lock, flags); |
477 | pte_t old_pte = *ptep; | ||
478 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | ||
479 | #endif | ||
480 | } | 483 | } |
481 | 484 | ||
482 | #define pte_same(A,B) (pte_val(A) == pte_val(B)) | 485 | #define pte_same(A,B) (pte_val(A) == pte_val(B)) |
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 4ba2c93770f1..e0a82358517e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h | |||
@@ -181,30 +181,24 @@ struct exception_data { | |||
181 | #if !defined(CONFIG_64BIT) | 181 | #if !defined(CONFIG_64BIT) |
182 | 182 | ||
183 | #define __put_kernel_asm64(__val,ptr) do { \ | 183 | #define __put_kernel_asm64(__val,ptr) do { \ |
184 | u64 __val64 = (u64)(__val); \ | ||
185 | u32 hi = (__val64) >> 32; \ | ||
186 | u32 lo = (__val64) & 0xffffffff; \ | ||
187 | __asm__ __volatile__ ( \ | 184 | __asm__ __volatile__ ( \ |
188 | "\n1:\tstw %2,0(%1)" \ | 185 | "\n1:\tstw %2,0(%1)" \ |
189 | "\n2:\tstw %3,4(%1)\n\t" \ | 186 | "\n2:\tstw %R2,4(%1)\n\t" \ |
190 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ | 187 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ |
191 | ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ | 188 | ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ |
192 | : "=r"(__pu_err) \ | 189 | : "=r"(__pu_err) \ |
193 | : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ | 190 | : "r"(ptr), "r"(__val), "0"(__pu_err) \ |
194 | : "r1"); \ | 191 | : "r1"); \ |
195 | } while (0) | 192 | } while (0) |
196 | 193 | ||
197 | #define __put_user_asm64(__val,ptr) do { \ | 194 | #define __put_user_asm64(__val,ptr) do { \ |
198 | u64 __val64 = (u64)(__val); \ | ||
199 | u32 hi = (__val64) >> 32; \ | ||
200 | u32 lo = (__val64) & 0xffffffff; \ | ||
201 | __asm__ __volatile__ ( \ | 195 | __asm__ __volatile__ ( \ |
202 | "\n1:\tstw %2,0(%%sr3,%1)" \ | 196 | "\n1:\tstw %2,0(%%sr3,%1)" \ |
203 | "\n2:\tstw %3,4(%%sr3,%1)\n\t" \ | 197 | "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \ |
204 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ | 198 | ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ |
205 | ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ | 199 | ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ |
206 | : "=r"(__pu_err) \ | 200 | : "=r"(__pu_err) \ |
207 | : "r"(ptr), "r"(hi), "r"(lo), "0"(__pu_err) \ | 201 | : "r"(ptr), "r"(__val), "0"(__pu_err) \ |
208 | : "r1"); \ | 202 | : "r1"); \ |
209 | } while (0) | 203 | } while (0) |
210 | 204 | ||
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 4b12890642eb..83ded26cad06 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -421,14 +421,11 @@ void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | |||
421 | /* Note: purge_tlb_entries can be called at startup with | 421 | /* Note: purge_tlb_entries can be called at startup with |
422 | no context. */ | 422 | no context. */ |
423 | 423 | ||
424 | /* Disable preemption while we play with %sr1. */ | ||
425 | preempt_disable(); | ||
426 | mtsp(mm->context, 1); | ||
427 | purge_tlb_start(flags); | 424 | purge_tlb_start(flags); |
425 | mtsp(mm->context, 1); | ||
428 | pdtlb(addr); | 426 | pdtlb(addr); |
429 | pitlb(addr); | 427 | pitlb(addr); |
430 | purge_tlb_end(flags); | 428 | purge_tlb_end(flags); |
431 | preempt_enable(); | ||
432 | } | 429 | } |
433 | EXPORT_SYMBOL(purge_tlb_entries); | 430 | EXPORT_SYMBOL(purge_tlb_entries); |
434 | 431 | ||
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 6795dc6c995f..568b2c61ea02 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c | |||
@@ -120,11 +120,13 @@ extern void __ashrdi3(void); | |||
120 | extern void __ashldi3(void); | 120 | extern void __ashldi3(void); |
121 | extern void __lshrdi3(void); | 121 | extern void __lshrdi3(void); |
122 | extern void __muldi3(void); | 122 | extern void __muldi3(void); |
123 | extern void __ucmpdi2(void); | ||
123 | 124 | ||
124 | EXPORT_SYMBOL(__ashrdi3); | 125 | EXPORT_SYMBOL(__ashrdi3); |
125 | EXPORT_SYMBOL(__ashldi3); | 126 | EXPORT_SYMBOL(__ashldi3); |
126 | EXPORT_SYMBOL(__lshrdi3); | 127 | EXPORT_SYMBOL(__lshrdi3); |
127 | EXPORT_SYMBOL(__muldi3); | 128 | EXPORT_SYMBOL(__muldi3); |
129 | EXPORT_SYMBOL(__ucmpdi2); | ||
128 | 130 | ||
129 | asmlinkage void * __canonicalize_funcptr_for_compare(void *); | 131 | asmlinkage void * __canonicalize_funcptr_for_compare(void *); |
130 | EXPORT_SYMBOL(__canonicalize_funcptr_for_compare); | 132 | EXPORT_SYMBOL(__canonicalize_funcptr_for_compare); |
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 5f2e6904d14a..5651536ac733 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile | |||
@@ -2,6 +2,7 @@ | |||
2 | # Makefile for parisc-specific library files | 2 | # Makefile for parisc-specific library files |
3 | # | 3 | # |
4 | 4 | ||
5 | lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o | 5 | lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ |
6 | ucmpdi2.o | ||
6 | 7 | ||
7 | obj-y := iomap.o | 8 | obj-y := iomap.o |
diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c new file mode 100644 index 000000000000..149c016f32c5 --- /dev/null +++ b/arch/parisc/lib/ucmpdi2.c | |||
@@ -0,0 +1,25 @@ | |||
1 | #include <linux/module.h> | ||
2 | |||
3 | union ull_union { | ||
4 | unsigned long long ull; | ||
5 | struct { | ||
6 | unsigned int high; | ||
7 | unsigned int low; | ||
8 | } ui; | ||
9 | }; | ||
10 | |||
11 | int __ucmpdi2(unsigned long long a, unsigned long long b) | ||
12 | { | ||
13 | union ull_union au = {.ull = a}; | ||
14 | union ull_union bu = {.ull = b}; | ||
15 | |||
16 | if (au.ui.high < bu.ui.high) | ||
17 | return 0; | ||
18 | else if (au.ui.high > bu.ui.high) | ||
19 | return 2; | ||
20 | if (au.ui.low < bu.ui.low) | ||
21 | return 0; | ||
22 | else if (au.ui.low > bu.ui.low) | ||
23 | return 2; | ||
24 | return 1; | ||
25 | } | ||
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 256c5bf0adb7..04d69c4a5ac2 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -304,7 +304,7 @@ syscall_exit_work: | |||
304 | subi r12,r12,TI_FLAGS | 304 | subi r12,r12,TI_FLAGS |
305 | 305 | ||
306 | 4: /* Anything else left to do? */ | 306 | 4: /* Anything else left to do? */ |
307 | SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */ | 307 | SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ |
308 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | 308 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) |
309 | beq .ret_from_except_lite | 309 | beq .ret_from_except_lite |
310 | 310 | ||
@@ -657,7 +657,7 @@ resume_kernel: | |||
657 | /* Clear _TIF_EMULATE_STACK_STORE flag */ | 657 | /* Clear _TIF_EMULATE_STACK_STORE flag */ |
658 | lis r11,_TIF_EMULATE_STACK_STORE@h | 658 | lis r11,_TIF_EMULATE_STACK_STORE@h |
659 | addi r5,r9,TI_FLAGS | 659 | addi r5,r9,TI_FLAGS |
660 | ldarx r4,0,r5 | 660 | 0: ldarx r4,0,r5 |
661 | andc r4,r4,r11 | 661 | andc r4,r4,r11 |
662 | stdcx. r4,0,r5 | 662 | stdcx. r4,0,r5 |
663 | bne- 0b | 663 | bne- 0b |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 59dd545fdde1..16e77a81ab4f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new) | |||
555 | new->thread.regs->msr |= | 555 | new->thread.regs->msr |= |
556 | (MSR_FP | new->thread.fpexc_mode); | 556 | (MSR_FP | new->thread.fpexc_mode); |
557 | } | 557 | } |
558 | #ifdef CONFIG_ALTIVEC | ||
558 | if (msr & MSR_VEC) { | 559 | if (msr & MSR_VEC) { |
559 | do_load_up_transact_altivec(&new->thread); | 560 | do_load_up_transact_altivec(&new->thread); |
560 | new->thread.regs->msr |= MSR_VEC; | 561 | new->thread.regs->msr |= MSR_VEC; |
561 | } | 562 | } |
563 | #endif | ||
562 | /* We may as well turn on VSX too since all the state is restored now */ | 564 | /* We may as well turn on VSX too since all the state is restored now */ |
563 | if (msr & MSR_VSX) | 565 | if (msr & MSR_VSX) |
564 | new->thread.regs->msr |= MSR_VSX; | 566 | new->thread.regs->msr |= MSR_VSX; |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 3acb28e245b4..95068bf569ad 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs, | |||
866 | do_load_up_transact_fpu(¤t->thread); | 866 | do_load_up_transact_fpu(¤t->thread); |
867 | regs->msr |= (MSR_FP | current->thread.fpexc_mode); | 867 | regs->msr |= (MSR_FP | current->thread.fpexc_mode); |
868 | } | 868 | } |
869 | #ifdef CONFIG_ALTIVEC | ||
869 | if (msr & MSR_VEC) { | 870 | if (msr & MSR_VEC) { |
870 | do_load_up_transact_altivec(¤t->thread); | 871 | do_load_up_transact_altivec(¤t->thread); |
871 | regs->msr |= MSR_VEC; | 872 | regs->msr |= MSR_VEC; |
872 | } | 873 | } |
874 | #endif | ||
873 | 875 | ||
874 | return 0; | 876 | return 0; |
875 | } | 877 | } |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 995f8543cb57..c1794286098c 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, | |||
522 | do_load_up_transact_fpu(¤t->thread); | 522 | do_load_up_transact_fpu(¤t->thread); |
523 | regs->msr |= (MSR_FP | current->thread.fpexc_mode); | 523 | regs->msr |= (MSR_FP | current->thread.fpexc_mode); |
524 | } | 524 | } |
525 | #ifdef CONFIG_ALTIVEC | ||
525 | if (msr & MSR_VEC) { | 526 | if (msr & MSR_VEC) { |
526 | do_load_up_transact_altivec(¤t->thread); | 527 | do_load_up_transact_altivec(¤t->thread); |
527 | regs->msr |= MSR_VEC; | 528 | regs->msr |= MSR_VEC; |
528 | } | 529 | } |
530 | #endif | ||
529 | 531 | ||
530 | return err; | 532 | return err; |
531 | } | 533 | } |
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 84dbace657ce..2da67e7a16d5 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S | |||
@@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint) | |||
309 | or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */ | 309 | or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */ |
310 | mtmsr r5 | 310 | mtmsr r5 |
311 | 311 | ||
312 | #ifdef CONFIG_ALTIVEC | ||
312 | /* FP and VEC registers: These are recheckpointed from thread.fpr[] | 313 | /* FP and VEC registers: These are recheckpointed from thread.fpr[] |
313 | * and thread.vr[] respectively. The thread.transact_fpr[] version | 314 | * and thread.vr[] respectively. The thread.transact_fpr[] version |
314 | * is more modern, and will be loaded subsequently by any FPUnavailable | 315 | * is more modern, and will be loaded subsequently by any FPUnavailable |
@@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint) | |||
323 | REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ | 324 | REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ |
324 | ld r5, THREAD_VRSAVE(r3) | 325 | ld r5, THREAD_VRSAVE(r3) |
325 | mtspr SPRN_VRSAVE, r5 | 326 | mtspr SPRN_VRSAVE, r5 |
327 | #endif | ||
326 | 328 | ||
327 | dont_restore_vec: | 329 | dont_restore_vec: |
328 | andi. r0, r4, MSR_FP | 330 | andi. r0, r4, MSR_FP |
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 41cefd43655f..33db48a8ce24 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h | |||
@@ -26,17 +26,20 @@ | |||
26 | #define E500_PID_NUM 3 | 26 | #define E500_PID_NUM 3 |
27 | #define E500_TLB_NUM 2 | 27 | #define E500_TLB_NUM 2 |
28 | 28 | ||
29 | #define E500_TLB_VALID 1 | 29 | /* entry is mapped somewhere in host TLB */ |
30 | #define E500_TLB_BITMAP 2 | 30 | #define E500_TLB_VALID (1 << 0) |
31 | /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ | ||
32 | #define E500_TLB_BITMAP (1 << 1) | ||
33 | /* TLB1 entry is mapped by host TLB0 */ | ||
31 | #define E500_TLB_TLB0 (1 << 2) | 34 | #define E500_TLB_TLB0 (1 << 2) |
32 | 35 | ||
33 | struct tlbe_ref { | 36 | struct tlbe_ref { |
34 | pfn_t pfn; | 37 | pfn_t pfn; /* valid only for TLB0, except briefly */ |
35 | unsigned int flags; /* E500_TLB_* */ | 38 | unsigned int flags; /* E500_TLB_* */ |
36 | }; | 39 | }; |
37 | 40 | ||
38 | struct tlbe_priv { | 41 | struct tlbe_priv { |
39 | struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ | 42 | struct tlbe_ref ref; |
40 | }; | 43 | }; |
41 | 44 | ||
42 | #ifdef CONFIG_KVM_E500V2 | 45 | #ifdef CONFIG_KVM_E500V2 |
@@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 { | |||
63 | 66 | ||
64 | unsigned int gtlb_nv[E500_TLB_NUM]; | 67 | unsigned int gtlb_nv[E500_TLB_NUM]; |
65 | 68 | ||
66 | /* | ||
67 | * information associated with each host TLB entry -- | ||
68 | * TLB1 only for now. If/when guest TLB1 entries can be | ||
69 | * mapped with host TLB0, this will be used for that too. | ||
70 | * | ||
71 | * We don't want to use this for guest TLB0 because then we'd | ||
72 | * have the overhead of doing the translation again even if | ||
73 | * the entry is still in the guest TLB (e.g. we swapped out | ||
74 | * and back, and our host TLB entries got evicted). | ||
75 | */ | ||
76 | struct tlbe_ref *tlb_refs[E500_TLB_NUM]; | ||
77 | unsigned int host_tlb1_nv; | 69 | unsigned int host_tlb1_nv; |
78 | 70 | ||
79 | u32 svr; | 71 | u32 svr; |
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index a222edfb9a9b..1c6a9d729df4 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
@@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, | |||
193 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; | 193 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; |
194 | 194 | ||
195 | /* Don't bother with unmapped entries */ | 195 | /* Don't bother with unmapped entries */ |
196 | if (!(ref->flags & E500_TLB_VALID)) | 196 | if (!(ref->flags & E500_TLB_VALID)) { |
197 | return; | 197 | WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), |
198 | "%s: flags %x\n", __func__, ref->flags); | ||
199 | WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); | ||
200 | } | ||
198 | 201 | ||
199 | if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { | 202 | if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { |
200 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; | 203 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; |
@@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, | |||
248 | pfn_t pfn) | 251 | pfn_t pfn) |
249 | { | 252 | { |
250 | ref->pfn = pfn; | 253 | ref->pfn = pfn; |
251 | ref->flags = E500_TLB_VALID; | 254 | ref->flags |= E500_TLB_VALID; |
252 | 255 | ||
253 | if (tlbe_is_writable(gtlbe)) | 256 | if (tlbe_is_writable(gtlbe)) |
254 | kvm_set_pfn_dirty(pfn); | 257 | kvm_set_pfn_dirty(pfn); |
@@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, | |||
257 | static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) | 260 | static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) |
258 | { | 261 | { |
259 | if (ref->flags & E500_TLB_VALID) { | 262 | if (ref->flags & E500_TLB_VALID) { |
263 | /* FIXME: don't log bogus pfn for TLB1 */ | ||
260 | trace_kvm_booke206_ref_release(ref->pfn, ref->flags); | 264 | trace_kvm_booke206_ref_release(ref->pfn, ref->flags); |
261 | ref->flags = 0; | 265 | ref->flags = 0; |
262 | } | 266 | } |
@@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
274 | 278 | ||
275 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) | 279 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) |
276 | { | 280 | { |
277 | int tlbsel = 0; | 281 | int tlbsel; |
278 | int i; | ||
279 | |||
280 | for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { | ||
281 | struct tlbe_ref *ref = | ||
282 | &vcpu_e500->gtlb_priv[tlbsel][i].ref; | ||
283 | kvmppc_e500_ref_release(ref); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
288 | { | ||
289 | int stlbsel = 1; | ||
290 | int i; | 282 | int i; |
291 | 283 | ||
292 | kvmppc_e500_tlbil_all(vcpu_e500); | 284 | for (tlbsel = 0; tlbsel <= 1; tlbsel++) { |
293 | 285 | for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { | |
294 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { | 286 | struct tlbe_ref *ref = |
295 | struct tlbe_ref *ref = | 287 | &vcpu_e500->gtlb_priv[tlbsel][i].ref; |
296 | &vcpu_e500->tlb_refs[stlbsel][i]; | 288 | kvmppc_e500_ref_release(ref); |
297 | kvmppc_e500_ref_release(ref); | 289 | } |
298 | } | 290 | } |
299 | |||
300 | clear_tlb_privs(vcpu_e500); | ||
301 | } | 291 | } |
302 | 292 | ||
303 | void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) | 293 | void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) |
304 | { | 294 | { |
305 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 295 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
306 | clear_tlb_refs(vcpu_e500); | 296 | kvmppc_e500_tlbil_all(vcpu_e500); |
297 | clear_tlb_privs(vcpu_e500); | ||
307 | clear_tlb1_bitmap(vcpu_e500); | 298 | clear_tlb1_bitmap(vcpu_e500); |
308 | } | 299 | } |
309 | 300 | ||
@@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
458 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | 449 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); |
459 | } | 450 | } |
460 | 451 | ||
461 | /* Drop old ref and setup new one. */ | ||
462 | kvmppc_e500_ref_release(ref); | ||
463 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); | 452 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); |
464 | 453 | ||
465 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, | 454 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, |
@@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
507 | if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) | 496 | if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) |
508 | vcpu_e500->host_tlb1_nv = 0; | 497 | vcpu_e500->host_tlb1_nv = 0; |
509 | 498 | ||
510 | vcpu_e500->tlb_refs[1][sesel] = *ref; | ||
511 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; | ||
512 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; | ||
513 | if (vcpu_e500->h2g_tlb1_rmap[sesel]) { | 499 | if (vcpu_e500->h2g_tlb1_rmap[sesel]) { |
514 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel]; | 500 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; |
515 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); | 501 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); |
516 | } | 502 | } |
517 | vcpu_e500->h2g_tlb1_rmap[sesel] = esel; | 503 | |
504 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; | ||
505 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; | ||
506 | vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; | ||
507 | WARN_ON(!(ref->flags & E500_TLB_VALID)); | ||
518 | 508 | ||
519 | return sesel; | 509 | return sesel; |
520 | } | 510 | } |
@@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
526 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | 516 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, |
527 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) | 517 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) |
528 | { | 518 | { |
529 | struct tlbe_ref ref; | 519 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; |
530 | int sesel; | 520 | int sesel; |
531 | int r; | 521 | int r; |
532 | 522 | ||
533 | ref.flags = 0; | ||
534 | r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, | 523 | r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, |
535 | &ref); | 524 | ref); |
536 | if (r) | 525 | if (r) |
537 | return r; | 526 | return r; |
538 | 527 | ||
@@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
544 | } | 533 | } |
545 | 534 | ||
546 | /* Otherwise map into TLB1 */ | 535 | /* Otherwise map into TLB1 */ |
547 | sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel); | 536 | sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); |
548 | write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); | 537 | write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); |
549 | 538 | ||
550 | return 0; | 539 | return 0; |
@@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
565 | case 0: | 554 | case 0: |
566 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; | 555 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; |
567 | 556 | ||
568 | /* Triggers after clear_tlb_refs or on initial mapping */ | 557 | /* Triggers after clear_tlb_privs or on initial mapping */ |
569 | if (!(priv->ref.flags & E500_TLB_VALID)) { | 558 | if (!(priv->ref.flags & E500_TLB_VALID)) { |
570 | kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); | 559 | kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); |
571 | } else { | 560 | } else { |
@@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
665 | host_tlb_params[0].entries / host_tlb_params[0].ways; | 654 | host_tlb_params[0].entries / host_tlb_params[0].ways; |
666 | host_tlb_params[1].sets = 1; | 655 | host_tlb_params[1].sets = 1; |
667 | 656 | ||
668 | vcpu_e500->tlb_refs[0] = | ||
669 | kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries, | ||
670 | GFP_KERNEL); | ||
671 | if (!vcpu_e500->tlb_refs[0]) | ||
672 | goto err; | ||
673 | |||
674 | vcpu_e500->tlb_refs[1] = | ||
675 | kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries, | ||
676 | GFP_KERNEL); | ||
677 | if (!vcpu_e500->tlb_refs[1]) | ||
678 | goto err; | ||
679 | |||
680 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * | 657 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * |
681 | host_tlb_params[1].entries, | 658 | host_tlb_params[1].entries, |
682 | GFP_KERNEL); | 659 | GFP_KERNEL); |
683 | if (!vcpu_e500->h2g_tlb1_rmap) | 660 | if (!vcpu_e500->h2g_tlb1_rmap) |
684 | goto err; | 661 | return -EINVAL; |
685 | 662 | ||
686 | return 0; | 663 | return 0; |
687 | |||
688 | err: | ||
689 | kfree(vcpu_e500->tlb_refs[0]); | ||
690 | kfree(vcpu_e500->tlb_refs[1]); | ||
691 | return -EINVAL; | ||
692 | } | 664 | } |
693 | 665 | ||
694 | void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) | 666 | void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) |
695 | { | 667 | { |
696 | kfree(vcpu_e500->h2g_tlb1_rmap); | 668 | kfree(vcpu_e500->h2g_tlb1_rmap); |
697 | kfree(vcpu_e500->tlb_refs[0]); | ||
698 | kfree(vcpu_e500->tlb_refs[1]); | ||
699 | } | 669 | } |
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 1f89d26e65fb..2f4baa074b2e 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | |||
108 | { | 108 | { |
109 | } | 109 | } |
110 | 110 | ||
111 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); | ||
112 | |||
111 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 113 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
112 | { | 114 | { |
113 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 115 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
@@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
136 | mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); | 138 | mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); |
137 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); | 139 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); |
138 | 140 | ||
139 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) | 141 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || |
142 | __get_cpu_var(last_vcpu_on_cpu) != vcpu) { | ||
140 | kvmppc_e500_tlbil_all(vcpu_e500); | 143 | kvmppc_e500_tlbil_all(vcpu_e500); |
144 | __get_cpu_var(last_vcpu_on_cpu) = vcpu; | ||
145 | } | ||
141 | 146 | ||
142 | kvmppc_load_guest_fp(vcpu); | 147 | kvmppc_load_guest_fp(vcpu); |
143 | } | 148 | } |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0da39fed355a..299731e9036b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) | |||
186 | (0x1UL << 4), &dummy1, &dummy2); | 186 | (0x1UL << 4), &dummy1, &dummy2); |
187 | if (lpar_rc == H_SUCCESS) | 187 | if (lpar_rc == H_SUCCESS) |
188 | return i; | 188 | return i; |
189 | BUG_ON(lpar_rc != H_NOT_FOUND); | 189 | |
190 | /* | ||
191 | * The test for adjunct partition is performed before the | ||
192 | * ANDCOND test. H_RESOURCE may be returned, so we need to | ||
193 | * check for that as well. | ||
194 | */ | ||
195 | BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); | ||
190 | 196 | ||
191 | slot_offset++; | 197 | slot_offset++; |
192 | slot_offset &= 0x7; | 198 | slot_offset &= 0x7; |
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 27cb32185ce1..379d96e2105e 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h | |||
@@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr); | |||
50 | #define ioremap_nocache(addr, size) ioremap(addr, size) | 50 | #define ioremap_nocache(addr, size) ioremap(addr, size) |
51 | #define ioremap_wc ioremap_nocache | 51 | #define ioremap_wc ioremap_nocache |
52 | 52 | ||
53 | /* TODO: s390 cannot support io_remap_pfn_range... */ | ||
54 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
55 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
56 | |||
57 | static inline void __iomem *ioremap(unsigned long offset, unsigned long size) | 53 | static inline void __iomem *ioremap(unsigned long offset, unsigned long size) |
58 | { | 54 | { |
59 | return (void __iomem *) offset; | 55 | return (void __iomem *) offset; |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a2930844d43..3cb47cf02530 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -57,6 +57,10 @@ extern unsigned long zero_page_mask; | |||
57 | (((unsigned long)(vaddr)) &zero_page_mask)))) | 57 | (((unsigned long)(vaddr)) &zero_page_mask)))) |
58 | #define __HAVE_COLOR_ZERO_PAGE | 58 | #define __HAVE_COLOR_ZERO_PAGE |
59 | 59 | ||
60 | /* TODO: s390 cannot support io_remap_pfn_range... */ | ||
61 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
62 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
63 | |||
60 | #endif /* !__ASSEMBLY__ */ | 64 | #endif /* !__ASSEMBLY__ */ |
61 | 65 | ||
62 | /* | 66 | /* |
@@ -344,6 +348,7 @@ extern unsigned long MODULES_END; | |||
344 | #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ | 348 | #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ |
345 | 349 | ||
346 | /* Bits in the segment table entry */ | 350 | /* Bits in the segment table entry */ |
351 | #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ | ||
347 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ | 352 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
348 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ | 353 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ |
349 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | 354 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ |
@@ -1531,7 +1536,8 @@ extern int s390_enable_sie(void); | |||
1531 | /* | 1536 | /* |
1532 | * No page table caches to initialise | 1537 | * No page table caches to initialise |
1533 | */ | 1538 | */ |
1534 | #define pgtable_cache_init() do { } while (0) | 1539 | static inline void pgtable_cache_init(void) { } |
1540 | static inline void check_pgt_cache(void) { } | ||
1535 | 1541 | ||
1536 | #include <asm-generic/pgtable.h> | 1542 | #include <asm-generic/pgtable.h> |
1537 | 1543 | ||
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dff631d34b45..466fb3383960 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to, | |||
77 | * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address | 77 | * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address |
78 | * contains the (negative) exception code. | 78 | * contains the (negative) exception code. |
79 | */ | 79 | */ |
80 | static __always_inline unsigned long follow_table(struct mm_struct *mm, | 80 | #ifdef CONFIG_64BIT |
81 | unsigned long addr, int write) | 81 | static unsigned long follow_table(struct mm_struct *mm, |
82 | unsigned long address, int write) | ||
82 | { | 83 | { |
83 | pgd_t *pgd; | 84 | unsigned long *table = (unsigned long *)__pa(mm->pgd); |
84 | pud_t *pud; | 85 | |
85 | pmd_t *pmd; | 86 | switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { |
86 | pte_t *ptep; | 87 | case _ASCE_TYPE_REGION1: |
88 | table = table + ((address >> 53) & 0x7ff); | ||
89 | if (unlikely(*table & _REGION_ENTRY_INV)) | ||
90 | return -0x39UL; | ||
91 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
92 | case _ASCE_TYPE_REGION2: | ||
93 | table = table + ((address >> 42) & 0x7ff); | ||
94 | if (unlikely(*table & _REGION_ENTRY_INV)) | ||
95 | return -0x3aUL; | ||
96 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
97 | case _ASCE_TYPE_REGION3: | ||
98 | table = table + ((address >> 31) & 0x7ff); | ||
99 | if (unlikely(*table & _REGION_ENTRY_INV)) | ||
100 | return -0x3bUL; | ||
101 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
102 | case _ASCE_TYPE_SEGMENT: | ||
103 | table = table + ((address >> 20) & 0x7ff); | ||
104 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) | ||
105 | return -0x10UL; | ||
106 | if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { | ||
107 | if (write && (*table & _SEGMENT_ENTRY_RO)) | ||
108 | return -0x04UL; | ||
109 | return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + | ||
110 | (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); | ||
111 | } | ||
112 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); | ||
113 | } | ||
114 | table = table + ((address >> 12) & 0xff); | ||
115 | if (unlikely(*table & _PAGE_INVALID)) | ||
116 | return -0x11UL; | ||
117 | if (write && (*table & _PAGE_RO)) | ||
118 | return -0x04UL; | ||
119 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | ||
120 | } | ||
87 | 121 | ||
88 | pgd = pgd_offset(mm, addr); | 122 | #else /* CONFIG_64BIT */ |
89 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
90 | return -0x3aUL; | ||
91 | 123 | ||
92 | pud = pud_offset(pgd, addr); | 124 | static unsigned long follow_table(struct mm_struct *mm, |
93 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) | 125 | unsigned long address, int write) |
94 | return -0x3bUL; | 126 | { |
127 | unsigned long *table = (unsigned long *)__pa(mm->pgd); | ||
95 | 128 | ||
96 | pmd = pmd_offset(pud, addr); | 129 | table = table + ((address >> 20) & 0x7ff); |
97 | if (pmd_none(*pmd)) | 130 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) |
98 | return -0x10UL; | 131 | return -0x10UL; |
99 | if (pmd_large(*pmd)) { | 132 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); |
100 | if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) | 133 | table = table + ((address >> 12) & 0xff); |
101 | return -0x04UL; | 134 | if (unlikely(*table & _PAGE_INVALID)) |
102 | return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); | ||
103 | } | ||
104 | if (unlikely(pmd_bad(*pmd))) | ||
105 | return -0x10UL; | ||
106 | |||
107 | ptep = pte_offset_map(pmd, addr); | ||
108 | if (!pte_present(*ptep)) | ||
109 | return -0x11UL; | 135 | return -0x11UL; |
110 | if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) | 136 | if (write && (*table & _PAGE_RO)) |
111 | return -0x04UL; | 137 | return -0x04UL; |
112 | 138 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | |
113 | return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); | ||
114 | } | 139 | } |
115 | 140 | ||
141 | #endif /* CONFIG_64BIT */ | ||
142 | |||
116 | static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, | 143 | static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, |
117 | size_t n, int write_user) | 144 | size_t n, int write_user) |
118 | { | 145 | { |
@@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from) | |||
197 | 224 | ||
198 | static size_t clear_user_pt(size_t n, void __user *to) | 225 | static size_t clear_user_pt(size_t n, void __user *to) |
199 | { | 226 | { |
200 | void *zpage = &empty_zero_page; | 227 | void *zpage = (void *) empty_zero_page; |
201 | long done, size, ret; | 228 | long done, size, ret; |
202 | 229 | ||
203 | done = 0; | 230 | done = 0; |
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index e26d430ce2fd..ff18e3cfb6b1 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild | |||
@@ -2,11 +2,16 @@ | |||
2 | 2 | ||
3 | 3 | ||
4 | generic-y += clkdev.h | 4 | generic-y += clkdev.h |
5 | generic-y += cputime.h | ||
5 | generic-y += div64.h | 6 | generic-y += div64.h |
7 | generic-y += emergency-restart.h | ||
6 | generic-y += exec.h | 8 | generic-y += exec.h |
7 | generic-y += local64.h | 9 | generic-y += local64.h |
10 | generic-y += mutex.h | ||
8 | generic-y += irq_regs.h | 11 | generic-y += irq_regs.h |
9 | generic-y += local.h | 12 | generic-y += local.h |
10 | generic-y += module.h | 13 | generic-y += module.h |
14 | generic-y += serial.h | ||
11 | generic-y += trace_clock.h | 15 | generic-y += trace_clock.h |
16 | generic-y += types.h | ||
12 | generic-y += word-at-a-time.h | 17 | generic-y += word-at-a-time.h |
diff --git a/arch/sparc/include/asm/cputime.h b/arch/sparc/include/asm/cputime.h deleted file mode 100644 index 1a642b81e019..000000000000 --- a/arch/sparc/include/asm/cputime.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __SPARC_CPUTIME_H | ||
2 | #define __SPARC_CPUTIME_H | ||
3 | |||
4 | #include <asm-generic/cputime.h> | ||
5 | |||
6 | #endif /* __SPARC_CPUTIME_H */ | ||
diff --git a/arch/sparc/include/asm/emergency-restart.h b/arch/sparc/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/arch/sparc/include/asm/emergency-restart.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef _ASM_EMERGENCY_RESTART_H | ||
2 | #define _ASM_EMERGENCY_RESTART_H | ||
3 | |||
4 | #include <asm-generic/emergency-restart.h> | ||
5 | |||
6 | #endif /* _ASM_EMERGENCY_RESTART_H */ | ||
diff --git a/arch/sparc/include/asm/mutex.h b/arch/sparc/include/asm/mutex.h deleted file mode 100644 index 458c1f7fbc18..000000000000 --- a/arch/sparc/include/asm/mutex.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | /* | ||
2 | * Pull in the generic implementation for the mutex fastpath. | ||
3 | * | ||
4 | * TODO: implement optimized primitives instead, or leave the generic | ||
5 | * implementation in place, or pick the atomic_xchg() based generic | ||
6 | * implementation. (see asm-generic/mutex-xchg.h for details) | ||
7 | */ | ||
8 | |||
9 | #include <asm-generic/mutex-dec.h> | ||
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 08fcce90316b..7619f2f792af 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, | |||
915 | return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); | 915 | return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); |
916 | } | 916 | } |
917 | 917 | ||
918 | #include <asm/tlbflush.h> | ||
918 | #include <asm-generic/pgtable.h> | 919 | #include <asm-generic/pgtable.h> |
919 | 920 | ||
920 | /* We provide our own get_unmapped_area to cope with VA holes and | 921 | /* We provide our own get_unmapped_area to cope with VA holes and |
diff --git a/arch/sparc/include/asm/serial.h b/arch/sparc/include/asm/serial.h deleted file mode 100644 index f90d61c28059..000000000000 --- a/arch/sparc/include/asm/serial.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __SPARC_SERIAL_H | ||
2 | #define __SPARC_SERIAL_H | ||
3 | |||
4 | #define BASE_BAUD ( 1843200 / 16 ) | ||
5 | |||
6 | #endif /* __SPARC_SERIAL_H */ | ||
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index b73da3c5f10a..3c8917f054de 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h | |||
@@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, | |||
36 | unsigned long, unsigned long); | 36 | unsigned long, unsigned long); |
37 | 37 | ||
38 | void cpu_panic(void); | 38 | void cpu_panic(void); |
39 | extern void smp4m_irq_rotate(int cpu); | ||
40 | 39 | ||
41 | /* | 40 | /* |
42 | * General functions that each host system must provide. | 41 | * General functions that each host system must provide. |
@@ -46,7 +45,6 @@ void sun4m_init_smp(void); | |||
46 | void sun4d_init_smp(void); | 45 | void sun4d_init_smp(void); |
47 | 46 | ||
48 | void smp_callin(void); | 47 | void smp_callin(void); |
49 | void smp_boot_cpus(void); | ||
50 | void smp_store_cpu_info(int); | 48 | void smp_store_cpu_info(int); |
51 | 49 | ||
52 | void smp_resched_interrupt(void); | 50 | void smp_resched_interrupt(void); |
@@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void); | |||
107 | 105 | ||
108 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 106 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
109 | 107 | ||
110 | #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier | ||
111 | #define prof_counter(__cpu) cpu_data(__cpu).counter | ||
112 | |||
113 | void smp_setup_cpu_possible_map(void); | 108 | void smp_setup_cpu_possible_map(void); |
114 | 109 | ||
115 | #endif /* !(__ASSEMBLY__) */ | 110 | #endif /* !(__ASSEMBLY__) */ |
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index cad36f56fa03..c7de3323819c 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h | |||
@@ -18,8 +18,7 @@ do { \ | |||
18 | * and 2 stores in this critical code path. -DaveM | 18 | * and 2 stores in this critical code path. -DaveM |
19 | */ | 19 | */ |
20 | #define switch_to(prev, next, last) \ | 20 | #define switch_to(prev, next, last) \ |
21 | do { flush_tlb_pending(); \ | 21 | do { save_and_clear_fpu(); \ |
22 | save_and_clear_fpu(); \ | ||
23 | /* If you are tempted to conditionalize the following */ \ | 22 | /* If you are tempted to conditionalize the following */ \ |
24 | /* so that ASI is only written if it changes, think again. */ \ | 23 | /* so that ASI is only written if it changes, think again. */ \ |
25 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ | 24 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ |
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index 2ef463494153..f0d6a9700f4c 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h | |||
@@ -11,24 +11,40 @@ | |||
11 | struct tlb_batch { | 11 | struct tlb_batch { |
12 | struct mm_struct *mm; | 12 | struct mm_struct *mm; |
13 | unsigned long tlb_nr; | 13 | unsigned long tlb_nr; |
14 | unsigned long active; | ||
14 | unsigned long vaddrs[TLB_BATCH_NR]; | 15 | unsigned long vaddrs[TLB_BATCH_NR]; |
15 | }; | 16 | }; |
16 | 17 | ||
17 | extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); | 18 | extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); |
18 | extern void flush_tsb_user(struct tlb_batch *tb); | 19 | extern void flush_tsb_user(struct tlb_batch *tb); |
20 | extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); | ||
19 | 21 | ||
20 | /* TLB flush operations. */ | 22 | /* TLB flush operations. */ |
21 | 23 | ||
22 | extern void flush_tlb_pending(void); | 24 | static inline void flush_tlb_mm(struct mm_struct *mm) |
25 | { | ||
26 | } | ||
27 | |||
28 | static inline void flush_tlb_page(struct vm_area_struct *vma, | ||
29 | unsigned long vmaddr) | ||
30 | { | ||
31 | } | ||
32 | |||
33 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
34 | unsigned long start, unsigned long end) | ||
35 | { | ||
36 | } | ||
37 | |||
38 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | ||
23 | 39 | ||
24 | #define flush_tlb_range(vma,start,end) \ | 40 | extern void flush_tlb_pending(void); |
25 | do { (void)(start); flush_tlb_pending(); } while (0) | 41 | extern void arch_enter_lazy_mmu_mode(void); |
26 | #define flush_tlb_page(vma,addr) flush_tlb_pending() | 42 | extern void arch_leave_lazy_mmu_mode(void); |
27 | #define flush_tlb_mm(mm) flush_tlb_pending() | 43 | #define arch_flush_lazy_mmu_mode() do {} while (0) |
28 | 44 | ||
29 | /* Local cpu only. */ | 45 | /* Local cpu only. */ |
30 | extern void __flush_tlb_all(void); | 46 | extern void __flush_tlb_all(void); |
31 | 47 | extern void __flush_tlb_page(unsigned long context, unsigned long vaddr); | |
32 | extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); | 48 | extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); |
33 | 49 | ||
34 | #ifndef CONFIG_SMP | 50 | #ifndef CONFIG_SMP |
@@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \ | |||
38 | __flush_tlb_kernel_range(start,end); \ | 54 | __flush_tlb_kernel_range(start,end); \ |
39 | } while (0) | 55 | } while (0) |
40 | 56 | ||
57 | static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) | ||
58 | { | ||
59 | __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); | ||
60 | } | ||
61 | |||
41 | #else /* CONFIG_SMP */ | 62 | #else /* CONFIG_SMP */ |
42 | 63 | ||
43 | extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); | 64 | extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); |
65 | extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); | ||
44 | 66 | ||
45 | #define flush_tlb_kernel_range(start, end) \ | 67 | #define flush_tlb_kernel_range(start, end) \ |
46 | do { flush_tsb_kernel_range(start,end); \ | 68 | do { flush_tsb_kernel_range(start,end); \ |
47 | smp_flush_tlb_kernel_range(start, end); \ | 69 | smp_flush_tlb_kernel_range(start, end); \ |
48 | } while (0) | 70 | } while (0) |
49 | 71 | ||
72 | #define global_flush_tlb_page(mm, vaddr) \ | ||
73 | smp_flush_tlb_page(mm, vaddr) | ||
74 | |||
50 | #endif /* ! CONFIG_SMP */ | 75 | #endif /* ! CONFIG_SMP */ |
51 | 76 | ||
52 | #endif /* _SPARC64_TLBFLUSH_H */ | 77 | #endif /* _SPARC64_TLBFLUSH_H */ |
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild index ce175aff71b7..b5843ee09fb5 100644 --- a/arch/sparc/include/uapi/asm/Kbuild +++ b/arch/sparc/include/uapi/asm/Kbuild | |||
@@ -44,7 +44,6 @@ header-y += swab.h | |||
44 | header-y += termbits.h | 44 | header-y += termbits.h |
45 | header-y += termios.h | 45 | header-y += termios.h |
46 | header-y += traps.h | 46 | header-y += traps.h |
47 | header-y += types.h | ||
48 | header-y += uctx.h | 47 | header-y += uctx.h |
49 | header-y += unistd.h | 48 | header-y += unistd.h |
50 | header-y += utrap.h | 49 | header-y += utrap.h |
diff --git a/arch/sparc/include/uapi/asm/types.h b/arch/sparc/include/uapi/asm/types.h deleted file mode 100644 index 383d156cde9c..000000000000 --- a/arch/sparc/include/uapi/asm/types.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | #ifndef _SPARC_TYPES_H | ||
2 | #define _SPARC_TYPES_H | ||
3 | /* | ||
4 | * This file is never included by application software unless | ||
5 | * explicitly requested (e.g., via linux/types.h) in which case the | ||
6 | * application is Linux specific so (user-) name space pollution is | ||
7 | * not a major issue. However, for interoperability, libraries still | ||
8 | * need to be careful to avoid a name clashes. | ||
9 | */ | ||
10 | |||
11 | #if defined(__sparc__) | ||
12 | |||
13 | #include <asm-generic/int-ll64.h> | ||
14 | |||
15 | #endif /* defined(__sparc__) */ | ||
16 | |||
17 | #endif /* defined(_SPARC_TYPES_H) */ | ||
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 537eb66abd06..ca64d2a86ec0 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm) | |||
849 | } | 849 | } |
850 | 850 | ||
851 | extern unsigned long xcall_flush_tlb_mm; | 851 | extern unsigned long xcall_flush_tlb_mm; |
852 | extern unsigned long xcall_flush_tlb_pending; | 852 | extern unsigned long xcall_flush_tlb_page; |
853 | extern unsigned long xcall_flush_tlb_kernel_range; | 853 | extern unsigned long xcall_flush_tlb_kernel_range; |
854 | extern unsigned long xcall_fetch_glob_regs; | 854 | extern unsigned long xcall_fetch_glob_regs; |
855 | extern unsigned long xcall_fetch_glob_pmu; | 855 | extern unsigned long xcall_fetch_glob_pmu; |
@@ -1074,23 +1074,56 @@ local_flush_and_out: | |||
1074 | put_cpu(); | 1074 | put_cpu(); |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | struct tlb_pending_info { | ||
1078 | unsigned long ctx; | ||
1079 | unsigned long nr; | ||
1080 | unsigned long *vaddrs; | ||
1081 | }; | ||
1082 | |||
1083 | static void tlb_pending_func(void *info) | ||
1084 | { | ||
1085 | struct tlb_pending_info *t = info; | ||
1086 | |||
1087 | __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); | ||
1088 | } | ||
1089 | |||
1077 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) | 1090 | void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) |
1078 | { | 1091 | { |
1079 | u32 ctx = CTX_HWBITS(mm->context); | 1092 | u32 ctx = CTX_HWBITS(mm->context); |
1093 | struct tlb_pending_info info; | ||
1080 | int cpu = get_cpu(); | 1094 | int cpu = get_cpu(); |
1081 | 1095 | ||
1096 | info.ctx = ctx; | ||
1097 | info.nr = nr; | ||
1098 | info.vaddrs = vaddrs; | ||
1099 | |||
1082 | if (mm == current->mm && atomic_read(&mm->mm_users) == 1) | 1100 | if (mm == current->mm && atomic_read(&mm->mm_users) == 1) |
1083 | cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); | 1101 | cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); |
1084 | else | 1102 | else |
1085 | smp_cross_call_masked(&xcall_flush_tlb_pending, | 1103 | smp_call_function_many(mm_cpumask(mm), tlb_pending_func, |
1086 | ctx, nr, (unsigned long) vaddrs, | 1104 | &info, 1); |
1087 | mm_cpumask(mm)); | ||
1088 | 1105 | ||
1089 | __flush_tlb_pending(ctx, nr, vaddrs); | 1106 | __flush_tlb_pending(ctx, nr, vaddrs); |
1090 | 1107 | ||
1091 | put_cpu(); | 1108 | put_cpu(); |
1092 | } | 1109 | } |
1093 | 1110 | ||
1111 | void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) | ||
1112 | { | ||
1113 | unsigned long context = CTX_HWBITS(mm->context); | ||
1114 | int cpu = get_cpu(); | ||
1115 | |||
1116 | if (mm == current->mm && atomic_read(&mm->mm_users) == 1) | ||
1117 | cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); | ||
1118 | else | ||
1119 | smp_cross_call_masked(&xcall_flush_tlb_page, | ||
1120 | context, vaddr, 0, | ||
1121 | mm_cpumask(mm)); | ||
1122 | __flush_tlb_page(context, vaddr); | ||
1123 | |||
1124 | put_cpu(); | ||
1125 | } | ||
1126 | |||
1094 | void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) | 1127 | void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
1095 | { | 1128 | { |
1096 | start &= PAGE_MASK; | 1129 | start &= PAGE_MASK; |
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 48d00e72ce15..8ec4e9c0251a 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c | |||
@@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len) | |||
119 | 119 | ||
120 | void bit_map_init(struct bit_map *t, unsigned long *map, int size) | 120 | void bit_map_init(struct bit_map *t, unsigned long *map, int size) |
121 | { | 121 | { |
122 | 122 | bitmap_zero(map, size); | |
123 | if ((size & 07) != 0) | ||
124 | BUG(); | ||
125 | memset(map, 0, size>>3); | ||
126 | |||
127 | memset(t, 0, sizeof *t); | 123 | memset(t, 0, sizeof *t); |
128 | spin_lock_init(&t->lock); | 124 | spin_lock_init(&t->lock); |
129 | t->map = map; | 125 | t->map = map; |
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 0f4f7191fbba..28f96f27c768 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #define IOMMU_RNGE IOMMU_RNGE_256MB | 34 | #define IOMMU_RNGE IOMMU_RNGE_256MB |
35 | #define IOMMU_START 0xF0000000 | 35 | #define IOMMU_START 0xF0000000 |
36 | #define IOMMU_WINSIZE (256*1024*1024U) | 36 | #define IOMMU_WINSIZE (256*1024*1024U) |
37 | #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */ | 37 | #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */ |
38 | #define IOMMU_ORDER 6 /* 4096 * (1<<6) */ | 38 | #define IOMMU_ORDER 6 /* 4096 * (1<<6) */ |
39 | 39 | ||
40 | /* srmmu.c */ | 40 | /* srmmu.c */ |
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c38bb72e3e80..036c2797dece 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
@@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void) | |||
280 | SRMMU_NOCACHE_ALIGN_MAX, 0UL); | 280 | SRMMU_NOCACHE_ALIGN_MAX, 0UL); |
281 | memset(srmmu_nocache_pool, 0, srmmu_nocache_size); | 281 | memset(srmmu_nocache_pool, 0, srmmu_nocache_size); |
282 | 282 | ||
283 | srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); | 283 | srmmu_nocache_bitmap = |
284 | __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long), | ||
285 | SMP_CACHE_BYTES, 0UL); | ||
284 | bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); | 286 | bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); |
285 | 287 | ||
286 | srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); | 288 | srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); |
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index ba6ae7ffdc2c..83d89bcb44af 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); | |||
24 | void flush_tlb_pending(void) | 24 | void flush_tlb_pending(void) |
25 | { | 25 | { |
26 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); | 26 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
27 | struct mm_struct *mm = tb->mm; | ||
27 | 28 | ||
28 | if (tb->tlb_nr) { | 29 | if (!tb->tlb_nr) |
29 | flush_tsb_user(tb); | 30 | goto out; |
30 | 31 | ||
31 | if (CTX_VALID(tb->mm->context)) { | 32 | flush_tsb_user(tb); |
33 | |||
34 | if (CTX_VALID(mm->context)) { | ||
35 | if (tb->tlb_nr == 1) { | ||
36 | global_flush_tlb_page(mm, tb->vaddrs[0]); | ||
37 | } else { | ||
32 | #ifdef CONFIG_SMP | 38 | #ifdef CONFIG_SMP |
33 | smp_flush_tlb_pending(tb->mm, tb->tlb_nr, | 39 | smp_flush_tlb_pending(tb->mm, tb->tlb_nr, |
34 | &tb->vaddrs[0]); | 40 | &tb->vaddrs[0]); |
@@ -37,12 +43,30 @@ void flush_tlb_pending(void) | |||
37 | tb->tlb_nr, &tb->vaddrs[0]); | 43 | tb->tlb_nr, &tb->vaddrs[0]); |
38 | #endif | 44 | #endif |
39 | } | 45 | } |
40 | tb->tlb_nr = 0; | ||
41 | } | 46 | } |
42 | 47 | ||
48 | tb->tlb_nr = 0; | ||
49 | |||
50 | out: | ||
43 | put_cpu_var(tlb_batch); | 51 | put_cpu_var(tlb_batch); |
44 | } | 52 | } |
45 | 53 | ||
54 | void arch_enter_lazy_mmu_mode(void) | ||
55 | { | ||
56 | struct tlb_batch *tb = &__get_cpu_var(tlb_batch); | ||
57 | |||
58 | tb->active = 1; | ||
59 | } | ||
60 | |||
61 | void arch_leave_lazy_mmu_mode(void) | ||
62 | { | ||
63 | struct tlb_batch *tb = &__get_cpu_var(tlb_batch); | ||
64 | |||
65 | if (tb->tlb_nr) | ||
66 | flush_tlb_pending(); | ||
67 | tb->active = 0; | ||
68 | } | ||
69 | |||
46 | static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, | 70 | static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |
47 | bool exec) | 71 | bool exec) |
48 | { | 72 | { |
@@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, | |||
60 | nr = 0; | 84 | nr = 0; |
61 | } | 85 | } |
62 | 86 | ||
87 | if (!tb->active) { | ||
88 | global_flush_tlb_page(mm, vaddr); | ||
89 | flush_tsb_user_page(mm, vaddr); | ||
90 | goto out; | ||
91 | } | ||
92 | |||
63 | if (nr == 0) | 93 | if (nr == 0) |
64 | tb->mm = mm; | 94 | tb->mm = mm; |
65 | 95 | ||
@@ -68,6 +98,7 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, | |||
68 | if (nr >= TLB_BATCH_NR) | 98 | if (nr >= TLB_BATCH_NR) |
69 | flush_tlb_pending(); | 99 | flush_tlb_pending(); |
70 | 100 | ||
101 | out: | ||
71 | put_cpu_var(tlb_batch); | 102 | put_cpu_var(tlb_batch); |
72 | } | 103 | } |
73 | 104 | ||
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 428982b9becf..2cc3bce5ee91 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
@@ -7,11 +7,10 @@ | |||
7 | #include <linux/preempt.h> | 7 | #include <linux/preempt.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <asm/page.h> | 9 | #include <asm/page.h> |
10 | #include <asm/tlbflush.h> | ||
11 | #include <asm/tlb.h> | ||
12 | #include <asm/mmu_context.h> | ||
13 | #include <asm/pgtable.h> | 10 | #include <asm/pgtable.h> |
11 | #include <asm/mmu_context.h> | ||
14 | #include <asm/tsb.h> | 12 | #include <asm/tsb.h> |
13 | #include <asm/tlb.h> | ||
15 | #include <asm/oplib.h> | 14 | #include <asm/oplib.h> |
16 | 15 | ||
17 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; | 16 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
@@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) | |||
46 | } | 45 | } |
47 | } | 46 | } |
48 | 47 | ||
49 | static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, | 48 | static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, |
50 | unsigned long tsb, unsigned long nentries) | 49 | unsigned long hash_shift, |
50 | unsigned long nentries) | ||
51 | { | 51 | { |
52 | unsigned long i; | 52 | unsigned long tag, ent, hash; |
53 | 53 | ||
54 | for (i = 0; i < tb->tlb_nr; i++) { | 54 | v &= ~0x1UL; |
55 | unsigned long v = tb->vaddrs[i]; | 55 | hash = tsb_hash(v, hash_shift, nentries); |
56 | unsigned long tag, ent, hash; | 56 | ent = tsb + (hash * sizeof(struct tsb)); |
57 | tag = (v >> 22UL); | ||
57 | 58 | ||
58 | v &= ~0x1UL; | 59 | tsb_flush(ent, tag); |
60 | } | ||
59 | 61 | ||
60 | hash = tsb_hash(v, hash_shift, nentries); | 62 | static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, |
61 | ent = tsb + (hash * sizeof(struct tsb)); | 63 | unsigned long tsb, unsigned long nentries) |
62 | tag = (v >> 22UL); | 64 | { |
65 | unsigned long i; | ||
63 | 66 | ||
64 | tsb_flush(ent, tag); | 67 | for (i = 0; i < tb->tlb_nr; i++) |
65 | } | 68 | __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); |
66 | } | 69 | } |
67 | 70 | ||
68 | void flush_tsb_user(struct tlb_batch *tb) | 71 | void flush_tsb_user(struct tlb_batch *tb) |
@@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb) | |||
90 | spin_unlock_irqrestore(&mm->context.lock, flags); | 93 | spin_unlock_irqrestore(&mm->context.lock, flags); |
91 | } | 94 | } |
92 | 95 | ||
96 | void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) | ||
97 | { | ||
98 | unsigned long nentries, base, flags; | ||
99 | |||
100 | spin_lock_irqsave(&mm->context.lock, flags); | ||
101 | |||
102 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; | ||
103 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; | ||
104 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
105 | base = __pa(base); | ||
106 | __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); | ||
107 | |||
108 | #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) | ||
109 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { | ||
110 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; | ||
111 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | ||
112 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | ||
113 | base = __pa(base); | ||
114 | __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); | ||
115 | } | ||
116 | #endif | ||
117 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
118 | } | ||
119 | |||
93 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K | 120 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K |
94 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K | 121 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K |
95 | 122 | ||
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index f8e13d421fcb..432aa0cb1b38 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S | |||
@@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */ | |||
53 | nop | 53 | nop |
54 | 54 | ||
55 | .align 32 | 55 | .align 32 |
56 | .globl __flush_tlb_page | ||
57 | __flush_tlb_page: /* 22 insns */ | ||
58 | /* %o0 = context, %o1 = vaddr */ | ||
59 | rdpr %pstate, %g7 | ||
60 | andn %g7, PSTATE_IE, %g2 | ||
61 | wrpr %g2, %pstate | ||
62 | mov SECONDARY_CONTEXT, %o4 | ||
63 | ldxa [%o4] ASI_DMMU, %g2 | ||
64 | stxa %o0, [%o4] ASI_DMMU | ||
65 | andcc %o1, 1, %g0 | ||
66 | andn %o1, 1, %o3 | ||
67 | be,pn %icc, 1f | ||
68 | or %o3, 0x10, %o3 | ||
69 | stxa %g0, [%o3] ASI_IMMU_DEMAP | ||
70 | 1: stxa %g0, [%o3] ASI_DMMU_DEMAP | ||
71 | membar #Sync | ||
72 | stxa %g2, [%o4] ASI_DMMU | ||
73 | sethi %hi(KERNBASE), %o4 | ||
74 | flush %o4 | ||
75 | retl | ||
76 | wrpr %g7, 0x0, %pstate | ||
77 | nop | ||
78 | nop | ||
79 | nop | ||
80 | nop | ||
81 | |||
82 | .align 32 | ||
56 | .globl __flush_tlb_pending | 83 | .globl __flush_tlb_pending |
57 | __flush_tlb_pending: /* 26 insns */ | 84 | __flush_tlb_pending: /* 26 insns */ |
58 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 85 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
@@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */ | |||
203 | retl | 230 | retl |
204 | wrpr %g7, 0x0, %pstate | 231 | wrpr %g7, 0x0, %pstate |
205 | 232 | ||
233 | __cheetah_flush_tlb_page: /* 22 insns */ | ||
234 | /* %o0 = context, %o1 = vaddr */ | ||
235 | rdpr %pstate, %g7 | ||
236 | andn %g7, PSTATE_IE, %g2 | ||
237 | wrpr %g2, 0x0, %pstate | ||
238 | wrpr %g0, 1, %tl | ||
239 | mov PRIMARY_CONTEXT, %o4 | ||
240 | ldxa [%o4] ASI_DMMU, %g2 | ||
241 | srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 | ||
242 | sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 | ||
243 | or %o0, %o3, %o0 /* Preserve nucleus page size fields */ | ||
244 | stxa %o0, [%o4] ASI_DMMU | ||
245 | andcc %o1, 1, %g0 | ||
246 | be,pn %icc, 1f | ||
247 | andn %o1, 1, %o3 | ||
248 | stxa %g0, [%o3] ASI_IMMU_DEMAP | ||
249 | 1: stxa %g0, [%o3] ASI_DMMU_DEMAP | ||
250 | membar #Sync | ||
251 | stxa %g2, [%o4] ASI_DMMU | ||
252 | sethi %hi(KERNBASE), %o4 | ||
253 | flush %o4 | ||
254 | wrpr %g0, 0, %tl | ||
255 | retl | ||
256 | wrpr %g7, 0x0, %pstate | ||
257 | |||
206 | __cheetah_flush_tlb_pending: /* 27 insns */ | 258 | __cheetah_flush_tlb_pending: /* 27 insns */ |
207 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 259 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
208 | rdpr %pstate, %g7 | 260 | rdpr %pstate, %g7 |
@@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */ | |||
269 | retl | 321 | retl |
270 | nop | 322 | nop |
271 | 323 | ||
324 | __hypervisor_flush_tlb_page: /* 11 insns */ | ||
325 | /* %o0 = context, %o1 = vaddr */ | ||
326 | mov %o0, %g2 | ||
327 | mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ | ||
328 | mov %g2, %o1 /* ARG1: mmu context */ | ||
329 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | ||
330 | srlx %o0, PAGE_SHIFT, %o0 | ||
331 | sllx %o0, PAGE_SHIFT, %o0 | ||
332 | ta HV_MMU_UNMAP_ADDR_TRAP | ||
333 | brnz,pn %o0, __hypervisor_tlb_tl0_error | ||
334 | mov HV_MMU_UNMAP_ADDR_TRAP, %o1 | ||
335 | retl | ||
336 | nop | ||
337 | |||
272 | __hypervisor_flush_tlb_pending: /* 16 insns */ | 338 | __hypervisor_flush_tlb_pending: /* 16 insns */ |
273 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ | 339 | /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ |
274 | sllx %o1, 3, %g1 | 340 | sllx %o1, 3, %g1 |
@@ -339,6 +405,13 @@ cheetah_patch_cachetlbops: | |||
339 | call tlb_patch_one | 405 | call tlb_patch_one |
340 | mov 19, %o2 | 406 | mov 19, %o2 |
341 | 407 | ||
408 | sethi %hi(__flush_tlb_page), %o0 | ||
409 | or %o0, %lo(__flush_tlb_page), %o0 | ||
410 | sethi %hi(__cheetah_flush_tlb_page), %o1 | ||
411 | or %o1, %lo(__cheetah_flush_tlb_page), %o1 | ||
412 | call tlb_patch_one | ||
413 | mov 22, %o2 | ||
414 | |||
342 | sethi %hi(__flush_tlb_pending), %o0 | 415 | sethi %hi(__flush_tlb_pending), %o0 |
343 | or %o0, %lo(__flush_tlb_pending), %o0 | 416 | or %o0, %lo(__flush_tlb_pending), %o0 |
344 | sethi %hi(__cheetah_flush_tlb_pending), %o1 | 417 | sethi %hi(__cheetah_flush_tlb_pending), %o1 |
@@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */ | |||
397 | nop | 470 | nop |
398 | nop | 471 | nop |
399 | 472 | ||
400 | .globl xcall_flush_tlb_pending | 473 | .globl xcall_flush_tlb_page |
401 | xcall_flush_tlb_pending: /* 21 insns */ | 474 | xcall_flush_tlb_page: /* 17 insns */ |
402 | /* %g5=context, %g1=nr, %g7=vaddrs[] */ | 475 | /* %g5=context, %g1=vaddr */ |
403 | sllx %g1, 3, %g1 | ||
404 | mov PRIMARY_CONTEXT, %g4 | 476 | mov PRIMARY_CONTEXT, %g4 |
405 | ldxa [%g4] ASI_DMMU, %g2 | 477 | ldxa [%g4] ASI_DMMU, %g2 |
406 | srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 | 478 | srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 |
@@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */ | |||
408 | or %g5, %g4, %g5 | 480 | or %g5, %g4, %g5 |
409 | mov PRIMARY_CONTEXT, %g4 | 481 | mov PRIMARY_CONTEXT, %g4 |
410 | stxa %g5, [%g4] ASI_DMMU | 482 | stxa %g5, [%g4] ASI_DMMU |
411 | 1: sub %g1, (1 << 3), %g1 | 483 | andcc %g1, 0x1, %g0 |
412 | ldx [%g7 + %g1], %g5 | ||
413 | andcc %g5, 0x1, %g0 | ||
414 | be,pn %icc, 2f | 484 | be,pn %icc, 2f |
415 | 485 | andn %g1, 0x1, %g5 | |
416 | andn %g5, 0x1, %g5 | ||
417 | stxa %g0, [%g5] ASI_IMMU_DEMAP | 486 | stxa %g0, [%g5] ASI_IMMU_DEMAP |
418 | 2: stxa %g0, [%g5] ASI_DMMU_DEMAP | 487 | 2: stxa %g0, [%g5] ASI_DMMU_DEMAP |
419 | membar #Sync | 488 | membar #Sync |
420 | brnz,pt %g1, 1b | ||
421 | nop | ||
422 | stxa %g2, [%g4] ASI_DMMU | 489 | stxa %g2, [%g4] ASI_DMMU |
423 | retry | 490 | retry |
424 | nop | 491 | nop |
492 | nop | ||
425 | 493 | ||
426 | .globl xcall_flush_tlb_kernel_range | 494 | .globl xcall_flush_tlb_kernel_range |
427 | xcall_flush_tlb_kernel_range: /* 25 insns */ | 495 | xcall_flush_tlb_kernel_range: /* 25 insns */ |
@@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ | |||
656 | membar #Sync | 724 | membar #Sync |
657 | retry | 725 | retry |
658 | 726 | ||
659 | .globl __hypervisor_xcall_flush_tlb_pending | 727 | .globl __hypervisor_xcall_flush_tlb_page |
660 | __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ | 728 | __hypervisor_xcall_flush_tlb_page: /* 17 insns */ |
661 | /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ | 729 | /* %g5=ctx, %g1=vaddr */ |
662 | sllx %g1, 3, %g1 | ||
663 | mov %o0, %g2 | 730 | mov %o0, %g2 |
664 | mov %o1, %g3 | 731 | mov %o1, %g3 |
665 | mov %o2, %g4 | 732 | mov %o2, %g4 |
666 | 1: sub %g1, (1 << 3), %g1 | 733 | mov %g1, %o0 /* ARG0: virtual address */ |
667 | ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ | ||
668 | mov %g5, %o1 /* ARG1: mmu context */ | 734 | mov %g5, %o1 /* ARG1: mmu context */ |
669 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ | 735 | mov HV_MMU_ALL, %o2 /* ARG2: flags */ |
670 | srlx %o0, PAGE_SHIFT, %o0 | 736 | srlx %o0, PAGE_SHIFT, %o0 |
@@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ | |||
673 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 | 739 | mov HV_MMU_UNMAP_ADDR_TRAP, %g6 |
674 | brnz,a,pn %o0, __hypervisor_tlb_xcall_error | 740 | brnz,a,pn %o0, __hypervisor_tlb_xcall_error |
675 | mov %o0, %g5 | 741 | mov %o0, %g5 |
676 | brnz,pt %g1, 1b | ||
677 | nop | ||
678 | mov %g2, %o0 | 742 | mov %g2, %o0 |
679 | mov %g3, %o1 | 743 | mov %g3, %o1 |
680 | mov %g4, %o2 | 744 | mov %g4, %o2 |
@@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops: | |||
757 | call tlb_patch_one | 821 | call tlb_patch_one |
758 | mov 10, %o2 | 822 | mov 10, %o2 |
759 | 823 | ||
824 | sethi %hi(__flush_tlb_page), %o0 | ||
825 | or %o0, %lo(__flush_tlb_page), %o0 | ||
826 | sethi %hi(__hypervisor_flush_tlb_page), %o1 | ||
827 | or %o1, %lo(__hypervisor_flush_tlb_page), %o1 | ||
828 | call tlb_patch_one | ||
829 | mov 11, %o2 | ||
830 | |||
760 | sethi %hi(__flush_tlb_pending), %o0 | 831 | sethi %hi(__flush_tlb_pending), %o0 |
761 | or %o0, %lo(__flush_tlb_pending), %o0 | 832 | or %o0, %lo(__flush_tlb_pending), %o0 |
762 | sethi %hi(__hypervisor_flush_tlb_pending), %o1 | 833 | sethi %hi(__hypervisor_flush_tlb_pending), %o1 |
@@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops: | |||
788 | call tlb_patch_one | 859 | call tlb_patch_one |
789 | mov 21, %o2 | 860 | mov 21, %o2 |
790 | 861 | ||
791 | sethi %hi(xcall_flush_tlb_pending), %o0 | 862 | sethi %hi(xcall_flush_tlb_page), %o0 |
792 | or %o0, %lo(xcall_flush_tlb_pending), %o0 | 863 | or %o0, %lo(xcall_flush_tlb_page), %o0 |
793 | sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 | 864 | sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 |
794 | or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 | 865 | or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 |
795 | call tlb_patch_one | 866 | call tlb_patch_one |
796 | mov 21, %o2 | 867 | mov 17, %o2 |
797 | 868 | ||
798 | sethi %hi(xcall_flush_tlb_kernel_range), %o0 | 869 | sethi %hi(xcall_flush_tlb_kernel_range), %o0 |
799 | or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 | 870 | or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 |
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 241c0bb60b12..c96f9bbb760d 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -40,7 +40,15 @@ | |||
40 | #include <asm/percpu.h> | 40 | #include <asm/percpu.h> |
41 | #include <arch/spr_def.h> | 41 | #include <arch/spr_def.h> |
42 | 42 | ||
43 | /* Set and clear kernel interrupt masks. */ | 43 | /* |
44 | * Set and clear kernel interrupt masks. | ||
45 | * | ||
46 | * NOTE: __insn_mtspr() is a compiler builtin marked as a memory | ||
47 | * clobber. We rely on it being equivalent to a compiler barrier in | ||
48 | * this code since arch_local_irq_save() and friends must act as | ||
49 | * compiler barriers. This compiler semantic is baked into enough | ||
50 | * places that the compiler will maintain it going forward. | ||
51 | */ | ||
44 | #if CHIP_HAS_SPLIT_INTR_MASK() | 52 | #if CHIP_HAS_SPLIT_INTR_MASK() |
45 | #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 | 53 | #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 |
46 | # error Fix assumptions about which word various interrupts are in | 54 | # error Fix assumptions about which word various interrupts are in |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index d1e15f7b59c6..7a5aa1a7864e 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot) | |||
1004 | 1004 | ||
1005 | #ifdef CONFIG_BLK_DEV_INITRD | 1005 | #ifdef CONFIG_BLK_DEV_INITRD |
1006 | 1006 | ||
1007 | /* | ||
1008 | * Note that the kernel can potentially support other compression | ||
1009 | * techniques than gz, though we don't do so by default. If we ever | ||
1010 | * decide to do so we can either look for other filename extensions, | ||
1011 | * or just allow a file with this name to be compressed with an | ||
1012 | * arbitrary compressor (somewhat counterintuitively). | ||
1013 | */ | ||
1014 | static int __initdata set_initramfs_file; | 1007 | static int __initdata set_initramfs_file; |
1015 | static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; | 1008 | static char __initdata initramfs_file[128] = "initramfs"; |
1016 | 1009 | ||
1017 | static int __init setup_initramfs_file(char *str) | 1010 | static int __init setup_initramfs_file(char *str) |
1018 | { | 1011 | { |
@@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str) | |||
1026 | early_param("initramfs_file", setup_initramfs_file); | 1019 | early_param("initramfs_file", setup_initramfs_file); |
1027 | 1020 | ||
1028 | /* | 1021 | /* |
1029 | * We look for an "initramfs.cpio.gz" file in the hvfs. | 1022 | * We look for a file called "initramfs" in the hvfs. If there is one, we |
1030 | * If there is one, we allocate some memory for it and it will be | 1023 | * allocate some memory for it and it will be unpacked to the initramfs. |
1031 | * unpacked to the initramfs. | 1024 | * If it's compressed, the initd code will uncompress it first. |
1032 | */ | 1025 | */ |
1033 | static void __init load_hv_initrd(void) | 1026 | static void __init load_hv_initrd(void) |
1034 | { | 1027 | { |
@@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void) | |||
1038 | 1031 | ||
1039 | fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); | 1032 | fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); |
1040 | if (fd == HV_ENOENT) { | 1033 | if (fd == HV_ENOENT) { |
1041 | if (set_initramfs_file) | 1034 | if (set_initramfs_file) { |
1042 | pr_warning("No such hvfs initramfs file '%s'\n", | 1035 | pr_warning("No such hvfs initramfs file '%s'\n", |
1043 | initramfs_file); | 1036 | initramfs_file); |
1044 | return; | 1037 | return; |
1038 | } else { | ||
1039 | /* Try old backwards-compatible name. */ | ||
1040 | fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); | ||
1041 | if (fd == HV_ENOENT) | ||
1042 | return; | ||
1043 | } | ||
1045 | } | 1044 | } |
1046 | BUG_ON(fd < 0); | 1045 | BUG_ON(fd < 0); |
1047 | stat = hv_fs_fstat(fd); | 1046 | stat = hv_fs_fstat(fd); |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 70c0f3da0476..15b5cef4aa38 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -1549,6 +1549,7 @@ config X86_SMAP | |||
1549 | config EFI | 1549 | config EFI |
1550 | bool "EFI runtime service support" | 1550 | bool "EFI runtime service support" |
1551 | depends on ACPI | 1551 | depends on ACPI |
1552 | select UCS2_STRING | ||
1552 | ---help--- | 1553 | ---help--- |
1553 | This enables the kernel to use EFI runtime services that are | 1554 | This enables the kernel to use EFI runtime services that are |
1554 | available (such as the EFI variable services). | 1555 | available (such as the EFI variable services). |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 8a84501acb1b..5ef205c5f37b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | # create a compressed vmlinux image from the original vmlinux | 4 | # create a compressed vmlinux image from the original vmlinux |
5 | # | 5 | # |
6 | 6 | ||
7 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o | 7 | targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo |
8 | 8 | ||
9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC | 10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC |
@@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ | |||
29 | $(obj)/piggy.o | 29 | $(obj)/piggy.o |
30 | 30 | ||
31 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone | 31 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone |
32 | $(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone | ||
33 | 32 | ||
34 | ifeq ($(CONFIG_EFI_STUB), y) | 33 | ifeq ($(CONFIG_EFI_STUB), y) |
35 | VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o | 34 | VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o |
@@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S | |||
43 | $(obj)/vmlinux.bin: vmlinux FORCE | 42 | $(obj)/vmlinux.bin: vmlinux FORCE |
44 | $(call if_changed,objcopy) | 43 | $(call if_changed,objcopy) |
45 | 44 | ||
46 | targets += vmlinux.bin.all vmlinux.relocs | 45 | targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs |
47 | 46 | ||
48 | CMD_RELOCS = arch/x86/tools/relocs | 47 | CMD_RELOCS = arch/x86/tools/relocs |
49 | quiet_cmd_relocs = RELOCS $@ | 48 | quiet_cmd_relocs = RELOCS $@ |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index c205035a6b96..35ee62fccf98 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) | |||
251 | *size = len; | 251 | *size = len; |
252 | } | 252 | } |
253 | 253 | ||
254 | static efi_status_t setup_efi_vars(struct boot_params *params) | ||
255 | { | ||
256 | struct setup_data *data; | ||
257 | struct efi_var_bootdata *efidata; | ||
258 | u64 store_size, remaining_size, var_size; | ||
259 | efi_status_t status; | ||
260 | |||
261 | if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION) | ||
262 | return EFI_UNSUPPORTED; | ||
263 | |||
264 | data = (struct setup_data *)(unsigned long)params->hdr.setup_data; | ||
265 | |||
266 | while (data && data->next) | ||
267 | data = (struct setup_data *)(unsigned long)data->next; | ||
268 | |||
269 | status = efi_call_phys4((void *)sys_table->runtime->query_variable_info, | ||
270 | EFI_VARIABLE_NON_VOLATILE | | ||
271 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
272 | EFI_VARIABLE_RUNTIME_ACCESS, &store_size, | ||
273 | &remaining_size, &var_size); | ||
274 | |||
275 | if (status != EFI_SUCCESS) | ||
276 | return status; | ||
277 | |||
278 | status = efi_call_phys3(sys_table->boottime->allocate_pool, | ||
279 | EFI_LOADER_DATA, sizeof(*efidata), &efidata); | ||
280 | |||
281 | if (status != EFI_SUCCESS) | ||
282 | return status; | ||
283 | |||
284 | efidata->data.type = SETUP_EFI_VARS; | ||
285 | efidata->data.len = sizeof(struct efi_var_bootdata) - | ||
286 | sizeof(struct setup_data); | ||
287 | efidata->data.next = 0; | ||
288 | efidata->store_size = store_size; | ||
289 | efidata->remaining_size = remaining_size; | ||
290 | efidata->max_var_size = var_size; | ||
291 | |||
292 | if (data) | ||
293 | data->next = (unsigned long)efidata; | ||
294 | else | ||
295 | params->hdr.setup_data = (unsigned long)efidata; | ||
296 | |||
297 | } | ||
298 | |||
254 | static efi_status_t setup_efi_pci(struct boot_params *params) | 299 | static efi_status_t setup_efi_pci(struct boot_params *params) |
255 | { | 300 | { |
256 | efi_pci_io_protocol *pci; | 301 | efi_pci_io_protocol *pci; |
@@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table, | |||
1157 | 1202 | ||
1158 | setup_graphics(boot_params); | 1203 | setup_graphics(boot_params); |
1159 | 1204 | ||
1205 | setup_efi_vars(boot_params); | ||
1206 | |||
1160 | setup_efi_pci(boot_params); | 1207 | setup_efi_pci(boot_params); |
1161 | 1208 | ||
1162 | status = efi_call_phys3(sys_table->boottime->allocate_pool, | 1209 | status = efi_call_phys3(sys_table->boottime->allocate_pool, |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 60c89f30c727..2fb5d5884e23 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void); | |||
102 | extern void efi_unmap_memmap(void); | 102 | extern void efi_unmap_memmap(void); |
103 | extern void efi_memory_uc(u64 addr, unsigned long size); | 103 | extern void efi_memory_uc(u64 addr, unsigned long size); |
104 | 104 | ||
105 | struct efi_var_bootdata { | ||
106 | struct setup_data data; | ||
107 | u64 store_size; | ||
108 | u64 remaining_size; | ||
109 | u64 max_var_size; | ||
110 | }; | ||
111 | |||
105 | #ifdef CONFIG_EFI | 112 | #ifdef CONFIG_EFI |
106 | 113 | ||
107 | static inline bool efi_is_native(void) | 114 | static inline bool efi_is_native(void) |
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5edd1742cfd0..7361e47db79f 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void) | |||
703 | PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); | 703 | PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); |
704 | } | 704 | } |
705 | 705 | ||
706 | void arch_flush_lazy_mmu_mode(void); | 706 | static inline void arch_flush_lazy_mmu_mode(void) |
707 | { | ||
708 | PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); | ||
709 | } | ||
707 | 710 | ||
708 | static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, | 711 | static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, |
709 | phys_addr_t phys, pgprot_t flags) | 712 | phys_addr_t phys, pgprot_t flags) |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 142236ed83af..b3b0ec1dac86 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
@@ -91,6 +91,7 @@ struct pv_lazy_ops { | |||
91 | /* Set deferred update mode, used for batching operations. */ | 91 | /* Set deferred update mode, used for batching operations. */ |
92 | void (*enter)(void); | 92 | void (*enter)(void); |
93 | void (*leave)(void); | 93 | void (*leave)(void); |
94 | void (*flush)(void); | ||
94 | }; | 95 | }; |
95 | 96 | ||
96 | struct pv_time_ops { | 97 | struct pv_time_ops { |
@@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next); | |||
679 | 680 | ||
680 | void paravirt_enter_lazy_mmu(void); | 681 | void paravirt_enter_lazy_mmu(void); |
681 | void paravirt_leave_lazy_mmu(void); | 682 | void paravirt_leave_lazy_mmu(void); |
683 | void paravirt_flush_lazy_mmu(void); | ||
682 | 684 | ||
683 | void _paravirt_nop(void); | 685 | void _paravirt_nop(void); |
684 | u32 _paravirt_ident_32(u32); | 686 | u32 _paravirt_ident_32(u32); |
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 1ace47b62592..2e188d68397c 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h | |||
@@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[]; | |||
29 | */ | 29 | */ |
30 | static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) | 30 | static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) |
31 | { | 31 | { |
32 | return regs->orig_ax & __SYSCALL_MASK; | 32 | return regs->orig_ax; |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline void syscall_rollback(struct task_struct *task, | 35 | static inline void syscall_rollback(struct task_struct *task, |
36 | struct pt_regs *regs) | 36 | struct pt_regs *regs) |
37 | { | 37 | { |
38 | regs->ax = regs->orig_ax & __SYSCALL_MASK; | 38 | regs->ax = regs->orig_ax; |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline long syscall_get_error(struct task_struct *task, | 41 | static inline long syscall_get_error(struct task_struct *task, |
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 4fef20773b8f..c7797307fc2b 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | #define tlb_flush(tlb) \ | 8 | #define tlb_flush(tlb) \ |
9 | { \ | 9 | { \ |
10 | if (tlb->fullmm == 0) \ | 10 | if (!tlb->fullmm && !tlb->need_flush_all) \ |
11 | flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ | 11 | flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \ |
12 | else \ | 12 | else \ |
13 | flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ | 13 | flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \ |
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index c15ddaf90710..08744242b8d2 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #define SETUP_E820_EXT 1 | 6 | #define SETUP_E820_EXT 1 |
7 | #define SETUP_DTB 2 | 7 | #define SETUP_DTB 2 |
8 | #define SETUP_PCI 3 | 8 | #define SETUP_PCI 3 |
9 | #define SETUP_EFI_VARS 4 | ||
9 | 10 | ||
10 | /* ram_size flags */ | 11 | /* ram_size flags */ |
11 | #define RAMDISK_IMAGE_START_MASK 0x07FF | 12 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index a7d26d83fb70..8f4be53ea04b 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
@@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void) | |||
35 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) | 35 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
36 | return false; | 36 | return false; |
37 | 37 | ||
38 | /* | ||
39 | * Xen emulates Hyper-V to support enlightened Windows. | ||
40 | * Check to see first if we are on a Xen Hypervisor. | ||
41 | */ | ||
42 | if (xen_cpuid_base()) | ||
43 | return false; | ||
44 | |||
45 | cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, | 38 | cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, |
46 | &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); | 39 | &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); |
47 | 40 | ||
@@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void) | |||
82 | 75 | ||
83 | if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) | 76 | if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) |
84 | clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); | 77 | clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); |
85 | #if IS_ENABLED(CONFIG_HYPERV) | ||
86 | /* | ||
87 | * Setup the IDT for hypervisor callback. | ||
88 | */ | ||
89 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); | ||
90 | #endif | ||
91 | } | 78 | } |
92 | 79 | ||
93 | const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { | 80 | const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { |
@@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr; | |||
103 | 90 | ||
104 | void hv_register_vmbus_handler(int irq, irq_handler_t handler) | 91 | void hv_register_vmbus_handler(int irq, irq_handler_t handler) |
105 | { | 92 | { |
93 | /* | ||
94 | * Setup the IDT for hypervisor callback. | ||
95 | */ | ||
96 | alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); | ||
97 | |||
106 | vmbus_irq = irq; | 98 | vmbus_irq = irq; |
107 | vmbus_isr = handler; | 99 | vmbus_isr = handler; |
108 | } | 100 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index dab7580c47ae..cc45deb791b0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -153,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = | |||
153 | }; | 153 | }; |
154 | 154 | ||
155 | static struct extra_reg intel_snb_extra_regs[] __read_mostly = { | 155 | static struct extra_reg intel_snb_extra_regs[] __read_mostly = { |
156 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), | 156 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), |
157 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), | 157 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), |
158 | EVENT_EXTRA_END | ||
159 | }; | ||
160 | |||
161 | static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { | ||
162 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), | ||
163 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | ||
158 | EVENT_EXTRA_END | 164 | EVENT_EXTRA_END |
159 | }; | 165 | }; |
160 | 166 | ||
@@ -2097,7 +2103,10 @@ __init int intel_pmu_init(void) | |||
2097 | x86_pmu.event_constraints = intel_snb_event_constraints; | 2103 | x86_pmu.event_constraints = intel_snb_event_constraints; |
2098 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; | 2104 | x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; |
2099 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | 2105 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; |
2100 | x86_pmu.extra_regs = intel_snb_extra_regs; | 2106 | if (boot_cpu_data.x86_model == 45) |
2107 | x86_pmu.extra_regs = intel_snbep_extra_regs; | ||
2108 | else | ||
2109 | x86_pmu.extra_regs = intel_snb_extra_regs; | ||
2101 | /* all extra regs are per-cpu when HT is on */ | 2110 | /* all extra regs are per-cpu when HT is on */ |
2102 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 2111 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
2103 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | 2112 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; |
@@ -2123,7 +2132,10 @@ __init int intel_pmu_init(void) | |||
2123 | x86_pmu.event_constraints = intel_ivb_event_constraints; | 2132 | x86_pmu.event_constraints = intel_ivb_event_constraints; |
2124 | x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; | 2133 | x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; |
2125 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; | 2134 | x86_pmu.pebs_aliases = intel_pebs_aliases_snb; |
2126 | x86_pmu.extra_regs = intel_snb_extra_regs; | 2135 | if (boot_cpu_data.x86_model == 62) |
2136 | x86_pmu.extra_regs = intel_snbep_extra_regs; | ||
2137 | else | ||
2138 | x86_pmu.extra_regs = intel_snb_extra_regs; | ||
2127 | /* all extra regs are per-cpu when HT is on */ | 2139 | /* all extra regs are per-cpu when HT is on */ |
2128 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | 2140 | x86_pmu.er_flags |= ERF_HAS_RSP_1; |
2129 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; | 2141 | x86_pmu.er_flags |= ERF_NO_HT_SHARING; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index b05a575d56f4..26830f3af0df 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void) | |||
314 | if (top <= at) | 314 | if (top <= at) |
315 | return 0; | 315 | return 0; |
316 | 316 | ||
317 | memset(®s, 0, sizeof(regs)); | ||
318 | |||
317 | ds->bts_index = ds->bts_buffer_base; | 319 | ds->bts_index = ds->bts_buffer_base; |
318 | 320 | ||
319 | perf_sample_data_init(&data, 0, event->hw.last_period); | 321 | perf_sample_data_init(&data, 0, event->hw.last_period); |
320 | regs.ip = 0; | ||
321 | 322 | ||
322 | /* | 323 | /* |
323 | * Prepare a generic sample, i.e. fill in the invariant fields. | 324 | * Prepare a generic sample, i.e. fill in the invariant fields. |
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c index 577db8417d15..833d51d6ee06 100644 --- a/arch/x86/kernel/microcode_core_early.c +++ b/arch/x86/kernel/microcode_core_early.c | |||
@@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void) | |||
45 | u32 eax = 0x00000000; | 45 | u32 eax = 0x00000000; |
46 | u32 ebx, ecx = 0, edx; | 46 | u32 ebx, ecx = 0, edx; |
47 | 47 | ||
48 | if (!have_cpuid_p()) | ||
49 | return X86_VENDOR_UNKNOWN; | ||
50 | |||
51 | native_cpuid(&eax, &ebx, &ecx, &edx); | 48 | native_cpuid(&eax, &ebx, &ecx, &edx); |
52 | 49 | ||
53 | if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) | 50 | if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) |
@@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void) | |||
59 | return X86_VENDOR_UNKNOWN; | 56 | return X86_VENDOR_UNKNOWN; |
60 | } | 57 | } |
61 | 58 | ||
59 | static int __cpuinit x86_family(void) | ||
60 | { | ||
61 | u32 eax = 0x00000001; | ||
62 | u32 ebx, ecx = 0, edx; | ||
63 | int x86; | ||
64 | |||
65 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
66 | |||
67 | x86 = (eax >> 8) & 0xf; | ||
68 | if (x86 == 15) | ||
69 | x86 += (eax >> 20) & 0xff; | ||
70 | |||
71 | return x86; | ||
72 | } | ||
73 | |||
62 | void __init load_ucode_bsp(void) | 74 | void __init load_ucode_bsp(void) |
63 | { | 75 | { |
64 | int vendor = x86_vendor(); | 76 | int vendor, x86; |
77 | |||
78 | if (!have_cpuid_p()) | ||
79 | return; | ||
65 | 80 | ||
66 | if (vendor == X86_VENDOR_INTEL) | 81 | vendor = x86_vendor(); |
82 | x86 = x86_family(); | ||
83 | |||
84 | if (vendor == X86_VENDOR_INTEL && x86 >= 6) | ||
67 | load_ucode_intel_bsp(); | 85 | load_ucode_intel_bsp(); |
68 | } | 86 | } |
69 | 87 | ||
70 | void __cpuinit load_ucode_ap(void) | 88 | void __cpuinit load_ucode_ap(void) |
71 | { | 89 | { |
72 | int vendor = x86_vendor(); | 90 | int vendor, x86; |
91 | |||
92 | if (!have_cpuid_p()) | ||
93 | return; | ||
94 | |||
95 | vendor = x86_vendor(); | ||
96 | x86 = x86_family(); | ||
73 | 97 | ||
74 | if (vendor == X86_VENDOR_INTEL) | 98 | if (vendor == X86_VENDOR_INTEL && x86 >= 6) |
75 | load_ucode_intel_ap(); | 99 | load_ucode_intel_ap(); |
76 | } | 100 | } |
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 17fff18a1031..8bfb335f74bb 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void) | |||
263 | leave_lazy(PARAVIRT_LAZY_MMU); | 263 | leave_lazy(PARAVIRT_LAZY_MMU); |
264 | } | 264 | } |
265 | 265 | ||
266 | void paravirt_flush_lazy_mmu(void) | ||
267 | { | ||
268 | preempt_disable(); | ||
269 | |||
270 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { | ||
271 | arch_leave_lazy_mmu_mode(); | ||
272 | arch_enter_lazy_mmu_mode(); | ||
273 | } | ||
274 | |||
275 | preempt_enable(); | ||
276 | } | ||
277 | |||
266 | void paravirt_start_context_switch(struct task_struct *prev) | 278 | void paravirt_start_context_switch(struct task_struct *prev) |
267 | { | 279 | { |
268 | BUG_ON(preemptible()); | 280 | BUG_ON(preemptible()); |
@@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | |||
292 | return this_cpu_read(paravirt_lazy_mode); | 304 | return this_cpu_read(paravirt_lazy_mode); |
293 | } | 305 | } |
294 | 306 | ||
295 | void arch_flush_lazy_mmu_mode(void) | ||
296 | { | ||
297 | preempt_disable(); | ||
298 | |||
299 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { | ||
300 | arch_leave_lazy_mmu_mode(); | ||
301 | arch_enter_lazy_mmu_mode(); | ||
302 | } | ||
303 | |||
304 | preempt_enable(); | ||
305 | } | ||
306 | |||
307 | struct pv_info pv_info = { | 307 | struct pv_info pv_info = { |
308 | .name = "bare hardware", | 308 | .name = "bare hardware", |
309 | .paravirt_enabled = 0, | 309 | .paravirt_enabled = 0, |
@@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = { | |||
475 | .lazy_mode = { | 475 | .lazy_mode = { |
476 | .enter = paravirt_nop, | 476 | .enter = paravirt_nop, |
477 | .leave = paravirt_nop, | 477 | .leave = paravirt_nop, |
478 | .flush = paravirt_nop, | ||
478 | }, | 479 | }, |
479 | 480 | ||
480 | .set_fixmap = native_set_fixmap, | 481 | .set_fixmap = native_set_fixmap, |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 90d8cc930f5e..fae9134a2de9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void) | |||
507 | /* | 507 | /* |
508 | * Keep the crash kernel below this limit. On 32 bits earlier kernels | 508 | * Keep the crash kernel below this limit. On 32 bits earlier kernels |
509 | * would limit the kernel to the low 512 MiB due to mapping restrictions. | 509 | * would limit the kernel to the low 512 MiB due to mapping restrictions. |
510 | * On 64bit, old kexec-tools need to under 896MiB. | ||
510 | */ | 511 | */ |
511 | #ifdef CONFIG_X86_32 | 512 | #ifdef CONFIG_X86_32 |
512 | # define CRASH_KERNEL_ADDR_MAX (512 << 20) | 513 | # define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20) |
514 | # define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20) | ||
513 | #else | 515 | #else |
514 | # define CRASH_KERNEL_ADDR_MAX MAXMEM | 516 | # define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20) |
517 | # define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM | ||
515 | #endif | 518 | #endif |
516 | 519 | ||
517 | static void __init reserve_crashkernel_low(void) | 520 | static void __init reserve_crashkernel_low(void) |
@@ -521,19 +524,35 @@ static void __init reserve_crashkernel_low(void) | |||
521 | unsigned long long low_base = 0, low_size = 0; | 524 | unsigned long long low_base = 0, low_size = 0; |
522 | unsigned long total_low_mem; | 525 | unsigned long total_low_mem; |
523 | unsigned long long base; | 526 | unsigned long long base; |
527 | bool auto_set = false; | ||
524 | int ret; | 528 | int ret; |
525 | 529 | ||
526 | total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); | 530 | total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT)); |
531 | /* crashkernel=Y,low */ | ||
527 | ret = parse_crashkernel_low(boot_command_line, total_low_mem, | 532 | ret = parse_crashkernel_low(boot_command_line, total_low_mem, |
528 | &low_size, &base); | 533 | &low_size, &base); |
529 | if (ret != 0 || low_size <= 0) | 534 | if (ret != 0) { |
530 | return; | 535 | /* |
536 | * two parts from lib/swiotlb.c: | ||
537 | * swiotlb size: user specified with swiotlb= or default. | ||
538 | * swiotlb overflow buffer: now is hardcoded to 32k. | ||
539 | * We round it to 8M for other buffers that | ||
540 | * may need to stay low too. | ||
541 | */ | ||
542 | low_size = swiotlb_size_or_default() + (8UL<<20); | ||
543 | auto_set = true; | ||
544 | } else { | ||
545 | /* passed with crashkernel=0,low ? */ | ||
546 | if (!low_size) | ||
547 | return; | ||
548 | } | ||
531 | 549 | ||
532 | low_base = memblock_find_in_range(low_size, (1ULL<<32), | 550 | low_base = memblock_find_in_range(low_size, (1ULL<<32), |
533 | low_size, alignment); | 551 | low_size, alignment); |
534 | 552 | ||
535 | if (!low_base) { | 553 | if (!low_base) { |
536 | pr_info("crashkernel low reservation failed - No suitable area found.\n"); | 554 | if (!auto_set) |
555 | pr_info("crashkernel low reservation failed - No suitable area found.\n"); | ||
537 | 556 | ||
538 | return; | 557 | return; |
539 | } | 558 | } |
@@ -554,14 +573,22 @@ static void __init reserve_crashkernel(void) | |||
554 | const unsigned long long alignment = 16<<20; /* 16M */ | 573 | const unsigned long long alignment = 16<<20; /* 16M */ |
555 | unsigned long long total_mem; | 574 | unsigned long long total_mem; |
556 | unsigned long long crash_size, crash_base; | 575 | unsigned long long crash_size, crash_base; |
576 | bool high = false; | ||
557 | int ret; | 577 | int ret; |
558 | 578 | ||
559 | total_mem = memblock_phys_mem_size(); | 579 | total_mem = memblock_phys_mem_size(); |
560 | 580 | ||
581 | /* crashkernel=XM */ | ||
561 | ret = parse_crashkernel(boot_command_line, total_mem, | 582 | ret = parse_crashkernel(boot_command_line, total_mem, |
562 | &crash_size, &crash_base); | 583 | &crash_size, &crash_base); |
563 | if (ret != 0 || crash_size <= 0) | 584 | if (ret != 0 || crash_size <= 0) { |
564 | return; | 585 | /* crashkernel=X,high */ |
586 | ret = parse_crashkernel_high(boot_command_line, total_mem, | ||
587 | &crash_size, &crash_base); | ||
588 | if (ret != 0 || crash_size <= 0) | ||
589 | return; | ||
590 | high = true; | ||
591 | } | ||
565 | 592 | ||
566 | /* 0 means: find the address automatically */ | 593 | /* 0 means: find the address automatically */ |
567 | if (crash_base <= 0) { | 594 | if (crash_base <= 0) { |
@@ -569,7 +596,9 @@ static void __init reserve_crashkernel(void) | |||
569 | * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX | 596 | * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX |
570 | */ | 597 | */ |
571 | crash_base = memblock_find_in_range(alignment, | 598 | crash_base = memblock_find_in_range(alignment, |
572 | CRASH_KERNEL_ADDR_MAX, crash_size, alignment); | 599 | high ? CRASH_KERNEL_ADDR_HIGH_MAX : |
600 | CRASH_KERNEL_ADDR_LOW_MAX, | ||
601 | crash_size, alignment); | ||
573 | 602 | ||
574 | if (!crash_base) { | 603 | if (!crash_base) { |
575 | pr_info("crashkernel reservation failed - No suitable area found.\n"); | 604 | pr_info("crashkernel reservation failed - No suitable area found.\n"); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd4e4ad..f77df1c5de6e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) | |||
1857 | if (!pv_eoi_enabled(vcpu)) | 1857 | if (!pv_eoi_enabled(vcpu)) |
1858 | return 0; | 1858 | return 0; |
1859 | return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, | 1859 | return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, |
1860 | addr); | 1860 | addr, sizeof(u8)); |
1861 | } | 1861 | } |
1862 | 1862 | ||
1863 | void kvm_lapic_init(void) | 1863 | void kvm_lapic_init(void) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f19ac0aca60d..e1721324c271 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1823,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) | |||
1823 | return 0; | 1823 | return 0; |
1824 | } | 1824 | } |
1825 | 1825 | ||
1826 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) | 1826 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, |
1827 | sizeof(u32))) | ||
1827 | return 1; | 1828 | return 1; |
1828 | 1829 | ||
1829 | vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); | 1830 | vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); |
@@ -1952,12 +1953,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
1952 | 1953 | ||
1953 | gpa_offset = data & ~(PAGE_MASK | 1); | 1954 | gpa_offset = data & ~(PAGE_MASK | 1); |
1954 | 1955 | ||
1955 | /* Check that the address is 32-byte aligned. */ | ||
1956 | if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) | ||
1957 | break; | ||
1958 | |||
1959 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, | 1956 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, |
1960 | &vcpu->arch.pv_time, data & ~1ULL)) | 1957 | &vcpu->arch.pv_time, data & ~1ULL, |
1958 | sizeof(struct pvclock_vcpu_time_info))) | ||
1961 | vcpu->arch.pv_time_enabled = false; | 1959 | vcpu->arch.pv_time_enabled = false; |
1962 | else | 1960 | else |
1963 | vcpu->arch.pv_time_enabled = true; | 1961 | vcpu->arch.pv_time_enabled = true; |
@@ -1977,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
1977 | return 1; | 1975 | return 1; |
1978 | 1976 | ||
1979 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, | 1977 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, |
1980 | data & KVM_STEAL_VALID_BITS)) | 1978 | data & KVM_STEAL_VALID_BITS, |
1979 | sizeof(struct kvm_steal_time))) | ||
1981 | return 1; | 1980 | return 1; |
1982 | 1981 | ||
1983 | vcpu->arch.st.msr_val = data; | 1982 | vcpu->arch.st.msr_val = data; |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 1cbd89ca5569..7114c63f047d 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -1334,6 +1334,7 @@ __init void lguest_init(void) | |||
1334 | pv_mmu_ops.read_cr3 = lguest_read_cr3; | 1334 | pv_mmu_ops.read_cr3 = lguest_read_cr3; |
1335 | pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; | 1335 | pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; |
1336 | pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; | 1336 | pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; |
1337 | pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; | ||
1337 | pv_mmu_ops.pte_update = lguest_pte_update; | 1338 | pv_mmu_ops.pte_update = lguest_pte_update; |
1338 | pv_mmu_ops.pte_update_defer = lguest_pte_update; | 1339 | pv_mmu_ops.pte_update_defer = lguest_pte_update; |
1339 | 1340 | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2b97525246d4..0e883364abb5 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -378,10 +378,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) | |||
378 | if (pgd_none(*pgd_ref)) | 378 | if (pgd_none(*pgd_ref)) |
379 | return -1; | 379 | return -1; |
380 | 380 | ||
381 | if (pgd_none(*pgd)) | 381 | if (pgd_none(*pgd)) { |
382 | set_pgd(pgd, *pgd_ref); | 382 | set_pgd(pgd, *pgd_ref); |
383 | else | 383 | arch_flush_lazy_mmu_mode(); |
384 | } else { | ||
384 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | 385 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); |
386 | } | ||
385 | 387 | ||
386 | /* | 388 | /* |
387 | * Below here mismatches are bugs because these lower tables | 389 | * Below here mismatches are bugs because these lower tables |
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index b0086567271c..0e38951e65eb 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c | |||
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s) | |||
68 | s->gpg++; | 68 | s->gpg++; |
69 | i += GPS/PAGE_SIZE; | 69 | i += GPS/PAGE_SIZE; |
70 | } else if (level == PG_LEVEL_2M) { | 70 | } else if (level == PG_LEVEL_2M) { |
71 | if (!(pte_val(*pte) & _PAGE_PSE)) { | 71 | if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) { |
72 | printk(KERN_ERR | 72 | printk(KERN_ERR |
73 | "%lx level %d but not PSE %Lx\n", | 73 | "%lx level %d but not PSE %Lx\n", |
74 | addr, level, (u64)pte_val(*pte)); | 74 | addr, level, (u64)pte_val(*pte)); |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 091934e1d0d9..fb4e73ec24d8 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
467 | * We are safe now. Check whether the new pgprot is the same: | 467 | * We are safe now. Check whether the new pgprot is the same: |
468 | */ | 468 | */ |
469 | old_pte = *kpte; | 469 | old_pte = *kpte; |
470 | old_prot = new_prot = req_prot = pte_pgprot(old_pte); | 470 | old_prot = req_prot = pte_pgprot(old_pte); |
471 | 471 | ||
472 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); | 472 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); |
473 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); | 473 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); |
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
478 | * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL | 478 | * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL |
479 | * for the ancient hardware that doesn't support it. | 479 | * for the ancient hardware that doesn't support it. |
480 | */ | 480 | */ |
481 | if (pgprot_val(new_prot) & _PAGE_PRESENT) | 481 | if (pgprot_val(req_prot) & _PAGE_PRESENT) |
482 | pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; | 482 | pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL; |
483 | else | 483 | else |
484 | pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); | 484 | pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); |
485 | 485 | ||
486 | new_prot = canon_pgprot(new_prot); | 486 | req_prot = canon_pgprot(req_prot); |
487 | 487 | ||
488 | /* | 488 | /* |
489 | * old_pte points to the large page base address. So we need | 489 | * old_pte points to the large page base address. So we need |
@@ -1413,6 +1413,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
1413 | * but that can deadlock->flush only current cpu: | 1413 | * but that can deadlock->flush only current cpu: |
1414 | */ | 1414 | */ |
1415 | __flush_tlb_all(); | 1415 | __flush_tlb_all(); |
1416 | |||
1417 | arch_flush_lazy_mmu_mode(); | ||
1416 | } | 1418 | } |
1417 | 1419 | ||
1418 | #ifdef CONFIG_HIBERNATION | 1420 | #ifdef CONFIG_HIBERNATION |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 193350b51f90..17fda6a8b3c2 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -58,6 +58,13 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) | |||
58 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) | 58 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
59 | { | 59 | { |
60 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); | 60 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
61 | /* | ||
62 | * NOTE! For PAE, any changes to the top page-directory-pointer-table | ||
63 | * entries need a full cr3 reload to flush. | ||
64 | */ | ||
65 | #ifdef CONFIG_X86_PAE | ||
66 | tlb->need_flush_all = 1; | ||
67 | #endif | ||
61 | tlb_remove_page(tlb, virt_to_page(pmd)); | 68 | tlb_remove_page(tlb, virt_to_page(pmd)); |
62 | } | 69 | } |
63 | 70 | ||
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index fff986da6239..012b3f6a9bd6 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/io.h> | 41 | #include <linux/io.h> |
42 | #include <linux/reboot.h> | 42 | #include <linux/reboot.h> |
43 | #include <linux/bcd.h> | 43 | #include <linux/bcd.h> |
44 | #include <linux/ucs2_string.h> | ||
44 | 45 | ||
45 | #include <asm/setup.h> | 46 | #include <asm/setup.h> |
46 | #include <asm/efi.h> | 47 | #include <asm/efi.h> |
@@ -51,6 +52,13 @@ | |||
51 | 52 | ||
52 | #define EFI_DEBUG 1 | 53 | #define EFI_DEBUG 1 |
53 | 54 | ||
55 | /* | ||
56 | * There's some additional metadata associated with each | ||
57 | * variable. Intel's reference implementation is 60 bytes - bump that | ||
58 | * to account for potential alignment constraints | ||
59 | */ | ||
60 | #define VAR_METADATA_SIZE 64 | ||
61 | |||
54 | struct efi __read_mostly efi = { | 62 | struct efi __read_mostly efi = { |
55 | .mps = EFI_INVALID_TABLE_ADDR, | 63 | .mps = EFI_INVALID_TABLE_ADDR, |
56 | .acpi = EFI_INVALID_TABLE_ADDR, | 64 | .acpi = EFI_INVALID_TABLE_ADDR, |
@@ -69,6 +77,13 @@ struct efi_memory_map memmap; | |||
69 | static struct efi efi_phys __initdata; | 77 | static struct efi efi_phys __initdata; |
70 | static efi_system_table_t efi_systab __initdata; | 78 | static efi_system_table_t efi_systab __initdata; |
71 | 79 | ||
80 | static u64 efi_var_store_size; | ||
81 | static u64 efi_var_remaining_size; | ||
82 | static u64 efi_var_max_var_size; | ||
83 | static u64 boot_used_size; | ||
84 | static u64 boot_var_size; | ||
85 | static u64 active_size; | ||
86 | |||
72 | unsigned long x86_efi_facility; | 87 | unsigned long x86_efi_facility; |
73 | 88 | ||
74 | /* | 89 | /* |
@@ -98,6 +113,15 @@ static int __init setup_add_efi_memmap(char *arg) | |||
98 | } | 113 | } |
99 | early_param("add_efi_memmap", setup_add_efi_memmap); | 114 | early_param("add_efi_memmap", setup_add_efi_memmap); |
100 | 115 | ||
116 | static bool efi_no_storage_paranoia; | ||
117 | |||
118 | static int __init setup_storage_paranoia(char *arg) | ||
119 | { | ||
120 | efi_no_storage_paranoia = true; | ||
121 | return 0; | ||
122 | } | ||
123 | early_param("efi_no_storage_paranoia", setup_storage_paranoia); | ||
124 | |||
101 | 125 | ||
102 | static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) | 126 | static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) |
103 | { | 127 | { |
@@ -162,8 +186,53 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, | |||
162 | efi_char16_t *name, | 186 | efi_char16_t *name, |
163 | efi_guid_t *vendor) | 187 | efi_guid_t *vendor) |
164 | { | 188 | { |
165 | return efi_call_virt3(get_next_variable, | 189 | efi_status_t status; |
166 | name_size, name, vendor); | 190 | static bool finished = false; |
191 | static u64 var_size; | ||
192 | |||
193 | status = efi_call_virt3(get_next_variable, | ||
194 | name_size, name, vendor); | ||
195 | |||
196 | if (status == EFI_NOT_FOUND) { | ||
197 | finished = true; | ||
198 | if (var_size < boot_used_size) { | ||
199 | boot_var_size = boot_used_size - var_size; | ||
200 | active_size += boot_var_size; | ||
201 | } else { | ||
202 | printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n"); | ||
203 | } | ||
204 | } | ||
205 | |||
206 | if (boot_used_size && !finished) { | ||
207 | unsigned long size; | ||
208 | u32 attr; | ||
209 | efi_status_t s; | ||
210 | void *tmp; | ||
211 | |||
212 | s = virt_efi_get_variable(name, vendor, &attr, &size, NULL); | ||
213 | |||
214 | if (s != EFI_BUFFER_TOO_SMALL || !size) | ||
215 | return status; | ||
216 | |||
217 | tmp = kmalloc(size, GFP_ATOMIC); | ||
218 | |||
219 | if (!tmp) | ||
220 | return status; | ||
221 | |||
222 | s = virt_efi_get_variable(name, vendor, &attr, &size, tmp); | ||
223 | |||
224 | if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) { | ||
225 | var_size += size; | ||
226 | var_size += ucs2_strsize(name, 1024); | ||
227 | active_size += size; | ||
228 | active_size += VAR_METADATA_SIZE; | ||
229 | active_size += ucs2_strsize(name, 1024); | ||
230 | } | ||
231 | |||
232 | kfree(tmp); | ||
233 | } | ||
234 | |||
235 | return status; | ||
167 | } | 236 | } |
168 | 237 | ||
169 | static efi_status_t virt_efi_set_variable(efi_char16_t *name, | 238 | static efi_status_t virt_efi_set_variable(efi_char16_t *name, |
@@ -172,9 +241,34 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, | |||
172 | unsigned long data_size, | 241 | unsigned long data_size, |
173 | void *data) | 242 | void *data) |
174 | { | 243 | { |
175 | return efi_call_virt5(set_variable, | 244 | efi_status_t status; |
176 | name, vendor, attr, | 245 | u32 orig_attr = 0; |
177 | data_size, data); | 246 | unsigned long orig_size = 0; |
247 | |||
248 | status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size, | ||
249 | NULL); | ||
250 | |||
251 | if (status != EFI_BUFFER_TOO_SMALL) | ||
252 | orig_size = 0; | ||
253 | |||
254 | status = efi_call_virt5(set_variable, | ||
255 | name, vendor, attr, | ||
256 | data_size, data); | ||
257 | |||
258 | if (status == EFI_SUCCESS) { | ||
259 | if (orig_size) { | ||
260 | active_size -= orig_size; | ||
261 | active_size -= ucs2_strsize(name, 1024); | ||
262 | active_size -= VAR_METADATA_SIZE; | ||
263 | } | ||
264 | if (data_size) { | ||
265 | active_size += data_size; | ||
266 | active_size += ucs2_strsize(name, 1024); | ||
267 | active_size += VAR_METADATA_SIZE; | ||
268 | } | ||
269 | } | ||
270 | |||
271 | return status; | ||
178 | } | 272 | } |
179 | 273 | ||
180 | static efi_status_t virt_efi_query_variable_info(u32 attr, | 274 | static efi_status_t virt_efi_query_variable_info(u32 attr, |
@@ -683,6 +777,9 @@ void __init efi_init(void) | |||
683 | char vendor[100] = "unknown"; | 777 | char vendor[100] = "unknown"; |
684 | int i = 0; | 778 | int i = 0; |
685 | void *tmp; | 779 | void *tmp; |
780 | struct setup_data *data; | ||
781 | struct efi_var_bootdata *efi_var_data; | ||
782 | u64 pa_data; | ||
686 | 783 | ||
687 | #ifdef CONFIG_X86_32 | 784 | #ifdef CONFIG_X86_32 |
688 | if (boot_params.efi_info.efi_systab_hi || | 785 | if (boot_params.efi_info.efi_systab_hi || |
@@ -700,6 +797,22 @@ void __init efi_init(void) | |||
700 | if (efi_systab_init(efi_phys.systab)) | 797 | if (efi_systab_init(efi_phys.systab)) |
701 | return; | 798 | return; |
702 | 799 | ||
800 | pa_data = boot_params.hdr.setup_data; | ||
801 | while (pa_data) { | ||
802 | data = early_ioremap(pa_data, sizeof(*efi_var_data)); | ||
803 | if (data->type == SETUP_EFI_VARS) { | ||
804 | efi_var_data = (struct efi_var_bootdata *)data; | ||
805 | |||
806 | efi_var_store_size = efi_var_data->store_size; | ||
807 | efi_var_remaining_size = efi_var_data->remaining_size; | ||
808 | efi_var_max_var_size = efi_var_data->max_var_size; | ||
809 | } | ||
810 | pa_data = data->next; | ||
811 | early_iounmap(data, sizeof(*efi_var_data)); | ||
812 | } | ||
813 | |||
814 | boot_used_size = efi_var_store_size - efi_var_remaining_size; | ||
815 | |||
703 | set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); | 816 | set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); |
704 | 817 | ||
705 | /* | 818 | /* |
@@ -1000,3 +1113,48 @@ u64 efi_mem_attributes(unsigned long phys_addr) | |||
1000 | } | 1113 | } |
1001 | return 0; | 1114 | return 0; |
1002 | } | 1115 | } |
1116 | |||
1117 | /* | ||
1118 | * Some firmware has serious problems when using more than 50% of the EFI | ||
1119 | * variable store, i.e. it triggers bugs that can brick machines. Ensure that | ||
1120 | * we never use more than this safe limit. | ||
1121 | * | ||
1122 | * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable | ||
1123 | * store. | ||
1124 | */ | ||
1125 | efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) | ||
1126 | { | ||
1127 | efi_status_t status; | ||
1128 | u64 storage_size, remaining_size, max_size; | ||
1129 | |||
1130 | status = efi.query_variable_info(attributes, &storage_size, | ||
1131 | &remaining_size, &max_size); | ||
1132 | if (status != EFI_SUCCESS) | ||
1133 | return status; | ||
1134 | |||
1135 | if (!max_size && remaining_size > size) | ||
1136 | printk_once(KERN_ERR FW_BUG "Broken EFI implementation" | ||
1137 | " is returning MaxVariableSize=0\n"); | ||
1138 | /* | ||
1139 | * Some firmware implementations refuse to boot if there's insufficient | ||
1140 | * space in the variable store. We account for that by refusing the | ||
1141 | * write if permitting it would reduce the available space to under | ||
1142 | * 50%. However, some firmware won't reclaim variable space until | ||
1143 | * after the used (not merely the actively used) space drops below | ||
1144 | * a threshold. We can approximate that case with the value calculated | ||
1145 | * above. If both the firmware and our calculations indicate that the | ||
1146 | * available space would drop below 50%, refuse the write. | ||
1147 | */ | ||
1148 | |||
1149 | if (!storage_size || size > remaining_size || | ||
1150 | (max_size && size > max_size)) | ||
1151 | return EFI_OUT_OF_RESOURCES; | ||
1152 | |||
1153 | if (!efi_no_storage_paranoia && | ||
1154 | ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && | ||
1155 | (remaining_size - size < storage_size / 2))) | ||
1156 | return EFI_OUT_OF_RESOURCES; | ||
1157 | |||
1158 | return EFI_SUCCESS; | ||
1159 | } | ||
1160 | EXPORT_SYMBOL_GPL(efi_query_variable_store); | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 6afbb2ca9a0a..e006c18d288a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1748,14 +1748,18 @@ static void *m2v(phys_addr_t maddr) | |||
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | /* Set the page permissions on an identity-mapped pages */ | 1750 | /* Set the page permissions on an identity-mapped pages */ |
1751 | static void set_page_prot(void *addr, pgprot_t prot) | 1751 | static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) |
1752 | { | 1752 | { |
1753 | unsigned long pfn = __pa(addr) >> PAGE_SHIFT; | 1753 | unsigned long pfn = __pa(addr) >> PAGE_SHIFT; |
1754 | pte_t pte = pfn_pte(pfn, prot); | 1754 | pte_t pte = pfn_pte(pfn, prot); |
1755 | 1755 | ||
1756 | if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) | 1756 | if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) |
1757 | BUG(); | 1757 | BUG(); |
1758 | } | 1758 | } |
1759 | static void set_page_prot(void *addr, pgprot_t prot) | ||
1760 | { | ||
1761 | return set_page_prot_flags(addr, prot, UVMF_NONE); | ||
1762 | } | ||
1759 | #ifdef CONFIG_X86_32 | 1763 | #ifdef CONFIG_X86_32 |
1760 | static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | 1764 | static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) |
1761 | { | 1765 | { |
@@ -1839,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, | |||
1839 | unsigned long addr) | 1843 | unsigned long addr) |
1840 | { | 1844 | { |
1841 | if (*pt_base == PFN_DOWN(__pa(addr))) { | 1845 | if (*pt_base == PFN_DOWN(__pa(addr))) { |
1842 | set_page_prot((void *)addr, PAGE_KERNEL); | 1846 | set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); |
1843 | clear_page((void *)addr); | 1847 | clear_page((void *)addr); |
1844 | (*pt_base)++; | 1848 | (*pt_base)++; |
1845 | } | 1849 | } |
1846 | if (*pt_end == PFN_DOWN(__pa(addr))) { | 1850 | if (*pt_end == PFN_DOWN(__pa(addr))) { |
1847 | set_page_prot((void *)addr, PAGE_KERNEL); | 1851 | set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); |
1848 | clear_page((void *)addr); | 1852 | clear_page((void *)addr); |
1849 | (*pt_end)--; | 1853 | (*pt_end)--; |
1850 | } | 1854 | } |
@@ -2196,6 +2200,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { | |||
2196 | .lazy_mode = { | 2200 | .lazy_mode = { |
2197 | .enter = paravirt_enter_lazy_mmu, | 2201 | .enter = paravirt_enter_lazy_mmu, |
2198 | .leave = xen_leave_lazy_mmu, | 2202 | .leave = xen_leave_lazy_mmu, |
2203 | .flush = paravirt_flush_lazy_mmu, | ||
2199 | }, | 2204 | }, |
2200 | 2205 | ||
2201 | .set_fixmap = xen_set_fixmap, | 2206 | .set_fixmap = xen_set_fixmap, |
diff --git a/block/blk-core.c b/block/blk-core.c index 074b758efc42..7c288358a745 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -39,6 +39,7 @@ | |||
39 | 39 | ||
40 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); | 40 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); |
41 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | 41 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); |
42 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); | ||
42 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); | 43 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug); |
43 | 44 | ||
44 | DEFINE_IDA(blk_queue_ida); | 45 | DEFINE_IDA(blk_queue_ida); |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 6206a934eb8c..5efc5a647183 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -229,6 +229,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |||
229 | unsigned long val; \ | 229 | unsigned long val; \ |
230 | ssize_t ret; \ | 230 | ssize_t ret; \ |
231 | ret = queue_var_store(&val, page, count); \ | 231 | ret = queue_var_store(&val, page, count); \ |
232 | if (ret < 0) \ | ||
233 | return ret; \ | ||
232 | if (neg) \ | 234 | if (neg) \ |
233 | val = !val; \ | 235 | val = !val; \ |
234 | \ | 236 | \ |
diff --git a/block/partition-generic.c b/block/partition-generic.c index ae95ee6a58aa..789cdea05893 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -257,7 +257,6 @@ void delete_partition(struct gendisk *disk, int partno) | |||
257 | 257 | ||
258 | hd_struct_put(part); | 258 | hd_struct_put(part); |
259 | } | 259 | } |
260 | EXPORT_SYMBOL(delete_partition); | ||
261 | 260 | ||
262 | static ssize_t whole_disk_show(struct device *dev, | 261 | static ssize_t whole_disk_show(struct device *dev, |
263 | struct device_attribute *attr, char *buf) | 262 | struct device_attribute *attr, char *buf) |
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index ef5356cd280a..0262210cad38 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c | |||
@@ -161,6 +161,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock, | |||
161 | else if (len < ds) | 161 | else if (len < ds) |
162 | msg->msg_flags |= MSG_TRUNC; | 162 | msg->msg_flags |= MSG_TRUNC; |
163 | 163 | ||
164 | msg->msg_namelen = 0; | ||
165 | |||
164 | lock_sock(sk); | 166 | lock_sock(sk); |
165 | if (ctx->more) { | 167 | if (ctx->more) { |
166 | ctx->more = 0; | 168 | ctx->more = 0; |
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 6a6dfc062d2a..a1c4f0a55583 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
@@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, | |||
432 | long copied = 0; | 432 | long copied = 0; |
433 | 433 | ||
434 | lock_sock(sk); | 434 | lock_sock(sk); |
435 | msg->msg_namelen = 0; | ||
435 | for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; | 436 | for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; |
436 | iovlen--, iov++) { | 437 | iovlen--, iov++) { |
437 | unsigned long seglen = iov->iov_len; | 438 | unsigned long seglen = iov->iov_len; |
diff --git a/crypto/gcm.c b/crypto/gcm.c index 137ad1ec5438..13ccbda34ff9 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx { | |||
44 | 44 | ||
45 | struct crypto_rfc4543_req_ctx { | 45 | struct crypto_rfc4543_req_ctx { |
46 | u8 auth_tag[16]; | 46 | u8 auth_tag[16]; |
47 | u8 assocbuf[32]; | ||
47 | struct scatterlist cipher[1]; | 48 | struct scatterlist cipher[1]; |
48 | struct scatterlist payload[2]; | 49 | struct scatterlist payload[2]; |
49 | struct scatterlist assoc[2]; | 50 | struct scatterlist assoc[2]; |
@@ -1133,9 +1134,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, | |||
1133 | scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); | 1134 | scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); |
1134 | assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); | 1135 | assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); |
1135 | 1136 | ||
1136 | sg_init_table(assoc, 2); | 1137 | if (req->assoc->length == req->assoclen) { |
1137 | sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, | 1138 | sg_init_table(assoc, 2); |
1138 | req->assoc->offset); | 1139 | sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, |
1140 | req->assoc->offset); | ||
1141 | } else { | ||
1142 | BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); | ||
1143 | |||
1144 | scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, | ||
1145 | req->assoclen, 0); | ||
1146 | |||
1147 | sg_init_table(assoc, 2); | ||
1148 | sg_set_buf(assoc, rctx->assocbuf, req->assoclen); | ||
1149 | } | ||
1139 | scatterwalk_crypto_chain(assoc, payload, 0, 2); | 1150 | scatterwalk_crypto_chain(assoc, payload, 0, 2); |
1140 | 1151 | ||
1141 | aead_request_set_tfm(subreq, ctx->child); | 1152 | aead_request_set_tfm(subreq, ctx->child); |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 92ed9692c47e..4bf68c8d4797 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD | |||
396 | 396 | ||
397 | config ACPI_BGRT | 397 | config ACPI_BGRT |
398 | bool "Boottime Graphics Resource Table support" | 398 | bool "Boottime Graphics Resource Table support" |
399 | depends on EFI | 399 | depends on EFI && X86 |
400 | help | 400 | help |
401 | This driver adds support for exposing the ACPI Boottime Graphics | 401 | This driver adds support for exposing the ACPI Boottime Graphics |
402 | Resource Table, which allows the operating system to obtain | 402 | Resource Table, which allows the operating system to obtain |
diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c index 82045e3f5cac..a82c7626aa9b 100644 --- a/drivers/acpi/acpi_i2c.c +++ b/drivers/acpi/acpi_i2c.c | |||
@@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter) | |||
90 | acpi_handle handle; | 90 | acpi_handle handle; |
91 | acpi_status status; | 91 | acpi_status status; |
92 | 92 | ||
93 | handle = ACPI_HANDLE(&adapter->dev); | 93 | handle = ACPI_HANDLE(adapter->dev.parent); |
94 | if (!handle) | 94 | if (!handle) |
95 | return; | 95 | return; |
96 | 96 | ||
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 5ff173066127..6ae5e440436e 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -415,7 +415,6 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
415 | struct acpi_pci_root *root; | 415 | struct acpi_pci_root *root; |
416 | struct acpi_pci_driver *driver; | 416 | struct acpi_pci_driver *driver; |
417 | u32 flags, base_flags; | 417 | u32 flags, base_flags; |
418 | bool is_osc_granted = false; | ||
419 | 418 | ||
420 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); | 419 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); |
421 | if (!root) | 420 | if (!root) |
@@ -476,6 +475,30 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
476 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; | 475 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; |
477 | acpi_pci_osc_support(root, flags); | 476 | acpi_pci_osc_support(root, flags); |
478 | 477 | ||
478 | /* | ||
479 | * TBD: Need PCI interface for enumeration/configuration of roots. | ||
480 | */ | ||
481 | |||
482 | mutex_lock(&acpi_pci_root_lock); | ||
483 | list_add_tail(&root->node, &acpi_pci_roots); | ||
484 | mutex_unlock(&acpi_pci_root_lock); | ||
485 | |||
486 | /* | ||
487 | * Scan the Root Bridge | ||
488 | * -------------------- | ||
489 | * Must do this prior to any attempt to bind the root device, as the | ||
490 | * PCI namespace does not get created until this call is made (and | ||
491 | * thus the root bridge's pci_dev does not exist). | ||
492 | */ | ||
493 | root->bus = pci_acpi_scan_root(root); | ||
494 | if (!root->bus) { | ||
495 | printk(KERN_ERR PREFIX | ||
496 | "Bus %04x:%02x not present in PCI namespace\n", | ||
497 | root->segment, (unsigned int)root->secondary.start); | ||
498 | result = -ENODEV; | ||
499 | goto out_del_root; | ||
500 | } | ||
501 | |||
479 | /* Indicate support for various _OSC capabilities. */ | 502 | /* Indicate support for various _OSC capabilities. */ |
480 | if (pci_ext_cfg_avail()) | 503 | if (pci_ext_cfg_avail()) |
481 | flags |= OSC_EXT_PCI_CONFIG_SUPPORT; | 504 | flags |= OSC_EXT_PCI_CONFIG_SUPPORT; |
@@ -494,6 +517,7 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
494 | flags = base_flags; | 517 | flags = base_flags; |
495 | } | 518 | } |
496 | } | 519 | } |
520 | |||
497 | if (!pcie_ports_disabled | 521 | if (!pcie_ports_disabled |
498 | && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { | 522 | && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { |
499 | flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL | 523 | flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL |
@@ -514,54 +538,28 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
514 | status = acpi_pci_osc_control_set(device->handle, &flags, | 538 | status = acpi_pci_osc_control_set(device->handle, &flags, |
515 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | 539 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); |
516 | if (ACPI_SUCCESS(status)) { | 540 | if (ACPI_SUCCESS(status)) { |
517 | is_osc_granted = true; | ||
518 | dev_info(&device->dev, | 541 | dev_info(&device->dev, |
519 | "ACPI _OSC control (0x%02x) granted\n", flags); | 542 | "ACPI _OSC control (0x%02x) granted\n", flags); |
543 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { | ||
544 | /* | ||
545 | * We have ASPM control, but the FADT indicates | ||
546 | * that it's unsupported. Clear it. | ||
547 | */ | ||
548 | pcie_clear_aspm(root->bus); | ||
549 | } | ||
520 | } else { | 550 | } else { |
521 | is_osc_granted = false; | ||
522 | dev_info(&device->dev, | 551 | dev_info(&device->dev, |
523 | "ACPI _OSC request failed (%s), " | 552 | "ACPI _OSC request failed (%s), " |
524 | "returned control mask: 0x%02x\n", | 553 | "returned control mask: 0x%02x\n", |
525 | acpi_format_exception(status), flags); | 554 | acpi_format_exception(status), flags); |
555 | pr_info("ACPI _OSC control for PCIe not granted, " | ||
556 | "disabling ASPM\n"); | ||
557 | pcie_no_aspm(); | ||
526 | } | 558 | } |
527 | } else { | 559 | } else { |
528 | dev_info(&device->dev, | 560 | dev_info(&device->dev, |
529 | "Unable to request _OSC control " | 561 | "Unable to request _OSC control " |
530 | "(_OSC support mask: 0x%02x)\n", flags); | 562 | "(_OSC support mask: 0x%02x)\n", flags); |
531 | } | ||
532 | |||
533 | /* | ||
534 | * TBD: Need PCI interface for enumeration/configuration of roots. | ||
535 | */ | ||
536 | |||
537 | mutex_lock(&acpi_pci_root_lock); | ||
538 | list_add_tail(&root->node, &acpi_pci_roots); | ||
539 | mutex_unlock(&acpi_pci_root_lock); | ||
540 | |||
541 | /* | ||
542 | * Scan the Root Bridge | ||
543 | * -------------------- | ||
544 | * Must do this prior to any attempt to bind the root device, as the | ||
545 | * PCI namespace does not get created until this call is made (and | ||
546 | * thus the root bridge's pci_dev does not exist). | ||
547 | */ | ||
548 | root->bus = pci_acpi_scan_root(root); | ||
549 | if (!root->bus) { | ||
550 | printk(KERN_ERR PREFIX | ||
551 | "Bus %04x:%02x not present in PCI namespace\n", | ||
552 | root->segment, (unsigned int)root->secondary.start); | ||
553 | result = -ENODEV; | ||
554 | goto out_del_root; | ||
555 | } | ||
556 | |||
557 | /* ASPM setting */ | ||
558 | if (is_osc_granted) { | ||
559 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) | ||
560 | pcie_clear_aspm(root->bus); | ||
561 | } else { | ||
562 | pr_info("ACPI _OSC control for PCIe not granted, " | ||
563 | "disabling ASPM\n"); | ||
564 | pcie_no_aspm(); | ||
565 | } | 563 | } |
566 | 564 | ||
567 | pci_acpi_add_bus_pm_notifier(device, root->bus); | 565 | pci_acpi_add_bus_pm_notifier(device, root->bus); |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index fc95308e9a11..ee255c60bdac 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -66,7 +66,8 @@ module_param(latency_factor, uint, 0644); | |||
66 | 66 | ||
67 | static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); | 67 | static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); |
68 | 68 | ||
69 | static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX]; | 69 | static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], |
70 | acpi_cstate); | ||
70 | 71 | ||
71 | static int disabled_by_idle_boot_param(void) | 72 | static int disabled_by_idle_boot_param(void) |
72 | { | 73 | { |
@@ -722,7 +723,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
722 | struct cpuidle_driver *drv, int index) | 723 | struct cpuidle_driver *drv, int index) |
723 | { | 724 | { |
724 | struct acpi_processor *pr; | 725 | struct acpi_processor *pr; |
725 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 726 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
726 | 727 | ||
727 | pr = __this_cpu_read(processors); | 728 | pr = __this_cpu_read(processors); |
728 | 729 | ||
@@ -745,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
745 | */ | 746 | */ |
746 | static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) | 747 | static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) |
747 | { | 748 | { |
748 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 749 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
749 | 750 | ||
750 | ACPI_FLUSH_CPU_CACHE(); | 751 | ACPI_FLUSH_CPU_CACHE(); |
751 | 752 | ||
@@ -775,7 +776,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
775 | struct cpuidle_driver *drv, int index) | 776 | struct cpuidle_driver *drv, int index) |
776 | { | 777 | { |
777 | struct acpi_processor *pr; | 778 | struct acpi_processor *pr; |
778 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 779 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
779 | 780 | ||
780 | pr = __this_cpu_read(processors); | 781 | pr = __this_cpu_read(processors); |
781 | 782 | ||
@@ -833,7 +834,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
833 | struct cpuidle_driver *drv, int index) | 834 | struct cpuidle_driver *drv, int index) |
834 | { | 835 | { |
835 | struct acpi_processor *pr; | 836 | struct acpi_processor *pr; |
836 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 837 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
837 | 838 | ||
838 | pr = __this_cpu_read(processors); | 839 | pr = __this_cpu_read(processors); |
839 | 840 | ||
@@ -960,7 +961,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, | |||
960 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 961 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
961 | continue; | 962 | continue; |
962 | #endif | 963 | #endif |
963 | acpi_cstate[count] = cx; | 964 | per_cpu(acpi_cstate[count], dev->cpu) = cx; |
964 | 965 | ||
965 | count++; | 966 | count++; |
966 | if (count == CPUIDLE_STATE_MAX) | 967 | if (count == CPUIDLE_STATE_MAX) |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index ffdd32d22602..2f48123d74c4 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -150,6 +150,7 @@ enum piix_controller_ids { | |||
150 | tolapai_sata, | 150 | tolapai_sata, |
151 | piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ | 151 | piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ |
152 | ich8_sata_snb, | 152 | ich8_sata_snb, |
153 | ich8_2port_sata_snb, | ||
153 | }; | 154 | }; |
154 | 155 | ||
155 | struct piix_map_db { | 156 | struct piix_map_db { |
@@ -304,7 +305,7 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
304 | /* SATA Controller IDE (Lynx Point) */ | 305 | /* SATA Controller IDE (Lynx Point) */ |
305 | { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, | 306 | { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, |
306 | /* SATA Controller IDE (Lynx Point) */ | 307 | /* SATA Controller IDE (Lynx Point) */ |
307 | { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 308 | { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, |
308 | /* SATA Controller IDE (Lynx Point) */ | 309 | /* SATA Controller IDE (Lynx Point) */ |
309 | { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 310 | { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
310 | /* SATA Controller IDE (Lynx Point-LP) */ | 311 | /* SATA Controller IDE (Lynx Point-LP) */ |
@@ -439,6 +440,7 @@ static const struct piix_map_db *piix_map_db_table[] = { | |||
439 | [ich8m_apple_sata] = &ich8m_apple_map_db, | 440 | [ich8m_apple_sata] = &ich8m_apple_map_db, |
440 | [tolapai_sata] = &tolapai_map_db, | 441 | [tolapai_sata] = &tolapai_map_db, |
441 | [ich8_sata_snb] = &ich8_map_db, | 442 | [ich8_sata_snb] = &ich8_map_db, |
443 | [ich8_2port_sata_snb] = &ich8_2port_map_db, | ||
442 | }; | 444 | }; |
443 | 445 | ||
444 | static struct pci_bits piix_enable_bits[] = { | 446 | static struct pci_bits piix_enable_bits[] = { |
@@ -1242,6 +1244,16 @@ static struct ata_port_info piix_port_info[] = { | |||
1242 | .udma_mask = ATA_UDMA6, | 1244 | .udma_mask = ATA_UDMA6, |
1243 | .port_ops = &piix_sata_ops, | 1245 | .port_ops = &piix_sata_ops, |
1244 | }, | 1246 | }, |
1247 | |||
1248 | [ich8_2port_sata_snb] = | ||
1249 | { | ||
1250 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | ||
1251 | | PIIX_FLAG_PIO16, | ||
1252 | .pio_mask = ATA_PIO4, | ||
1253 | .mwdma_mask = ATA_MWDMA2, | ||
1254 | .udma_mask = ATA_UDMA6, | ||
1255 | .port_ops = &piix_sata_ops, | ||
1256 | }, | ||
1245 | }; | 1257 | }; |
1246 | 1258 | ||
1247 | #define AHCI_PCI_BAR 5 | 1259 | #define AHCI_PCI_BAR 5 |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 497adea1f0d6..63c743baf920 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2329 | * from SATA Settings page of Identify Device Data Log. | 2329 | * from SATA Settings page of Identify Device Data Log. |
2330 | */ | 2330 | */ |
2331 | if (ata_id_has_devslp(dev->id)) { | 2331 | if (ata_id_has_devslp(dev->id)) { |
2332 | u8 sata_setting[ATA_SECT_SIZE]; | 2332 | u8 *sata_setting = ap->sector_buf; |
2333 | int i, j; | 2333 | int i, j; |
2334 | 2334 | ||
2335 | dev->flags |= ATA_DFLAG_DEVSLP; | 2335 | dev->flags |= ATA_DFLAG_DEVSLP; |
@@ -2439,6 +2439,9 @@ int ata_dev_configure(struct ata_device *dev) | |||
2439 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, | 2439 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, |
2440 | dev->max_sectors); | 2440 | dev->max_sectors); |
2441 | 2441 | ||
2442 | if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) | ||
2443 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; | ||
2444 | |||
2442 | if (ap->ops->dev_config) | 2445 | if (ap->ops->dev_config) |
2443 | ap->ops->dev_config(dev); | 2446 | ap->ops->dev_config(dev); |
2444 | 2447 | ||
@@ -4100,6 +4103,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4100 | /* Weird ATAPI devices */ | 4103 | /* Weird ATAPI devices */ |
4101 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, | 4104 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, |
4102 | { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, | 4105 | { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, |
4106 | { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, | ||
4103 | 4107 | ||
4104 | /* Devices we expect to fail diagnostics */ | 4108 | /* Devices we expect to fail diagnostics */ |
4105 | 4109 | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 318b41358187..ff44787e5a45 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
532 | struct scsi_sense_hdr sshdr; | 532 | struct scsi_sense_hdr sshdr; |
533 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, | 533 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, |
534 | &sshdr); | 534 | &sshdr); |
535 | if (sshdr.sense_key == 0 && | 535 | if (sshdr.sense_key == RECOVERED_ERROR && |
536 | sshdr.asc == 0 && sshdr.ascq == 0) | 536 | sshdr.asc == 0 && sshdr.ascq == 0x1d) |
537 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; | 537 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; |
538 | } | 538 | } |
539 | 539 | ||
@@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
618 | struct scsi_sense_hdr sshdr; | 618 | struct scsi_sense_hdr sshdr; |
619 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, | 619 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, |
620 | &sshdr); | 620 | &sshdr); |
621 | if (sshdr.sense_key == 0 && | 621 | if (sshdr.sense_key == RECOVERED_ERROR && |
622 | sshdr.asc == 0 && sshdr.ascq == 0) | 622 | sshdr.asc == 0 && sshdr.ascq == 0x1d) |
623 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; | 623 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; |
624 | } | 624 | } |
625 | 625 | ||
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5f74587ef258..71671c42ef45 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include "power.h" | 46 | #include "power.h" |
47 | 47 | ||
48 | static DEFINE_MUTEX(dev_pm_qos_mtx); | 48 | static DEFINE_MUTEX(dev_pm_qos_mtx); |
49 | static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); | ||
49 | 50 | ||
50 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | 51 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); |
51 | 52 | ||
@@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
216 | struct pm_qos_constraints *c; | 217 | struct pm_qos_constraints *c; |
217 | struct pm_qos_flags *f; | 218 | struct pm_qos_flags *f; |
218 | 219 | ||
219 | mutex_lock(&dev_pm_qos_mtx); | 220 | mutex_lock(&dev_pm_qos_sysfs_mtx); |
220 | 221 | ||
221 | /* | 222 | /* |
222 | * If the device's PM QoS resume latency limit or PM QoS flags have been | 223 | * If the device's PM QoS resume latency limit or PM QoS flags have been |
223 | * exposed to user space, they have to be hidden at this point. | 224 | * exposed to user space, they have to be hidden at this point. |
224 | */ | 225 | */ |
226 | pm_qos_sysfs_remove_latency(dev); | ||
227 | pm_qos_sysfs_remove_flags(dev); | ||
228 | |||
229 | mutex_lock(&dev_pm_qos_mtx); | ||
230 | |||
225 | __dev_pm_qos_hide_latency_limit(dev); | 231 | __dev_pm_qos_hide_latency_limit(dev); |
226 | __dev_pm_qos_hide_flags(dev); | 232 | __dev_pm_qos_hide_flags(dev); |
227 | 233 | ||
@@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
254 | 260 | ||
255 | out: | 261 | out: |
256 | mutex_unlock(&dev_pm_qos_mtx); | 262 | mutex_unlock(&dev_pm_qos_mtx); |
263 | |||
264 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
257 | } | 265 | } |
258 | 266 | ||
259 | /** | 267 | /** |
@@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, | |||
558 | kfree(req); | 566 | kfree(req); |
559 | } | 567 | } |
560 | 568 | ||
569 | static void dev_pm_qos_drop_user_request(struct device *dev, | ||
570 | enum dev_pm_qos_req_type type) | ||
571 | { | ||
572 | mutex_lock(&dev_pm_qos_mtx); | ||
573 | __dev_pm_qos_drop_user_request(dev, type); | ||
574 | mutex_unlock(&dev_pm_qos_mtx); | ||
575 | } | ||
576 | |||
561 | /** | 577 | /** |
562 | * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. | 578 | * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. |
563 | * @dev: Device whose PM QoS latency limit is to be exposed to user space. | 579 | * @dev: Device whose PM QoS latency limit is to be exposed to user space. |
@@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | |||
581 | return ret; | 597 | return ret; |
582 | } | 598 | } |
583 | 599 | ||
600 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
601 | |||
584 | mutex_lock(&dev_pm_qos_mtx); | 602 | mutex_lock(&dev_pm_qos_mtx); |
585 | 603 | ||
586 | if (IS_ERR_OR_NULL(dev->power.qos)) | 604 | if (IS_ERR_OR_NULL(dev->power.qos)) |
@@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | |||
591 | if (ret < 0) { | 609 | if (ret < 0) { |
592 | __dev_pm_qos_remove_request(req); | 610 | __dev_pm_qos_remove_request(req); |
593 | kfree(req); | 611 | kfree(req); |
612 | mutex_unlock(&dev_pm_qos_mtx); | ||
594 | goto out; | 613 | goto out; |
595 | } | 614 | } |
596 | |||
597 | dev->power.qos->latency_req = req; | 615 | dev->power.qos->latency_req = req; |
616 | |||
617 | mutex_unlock(&dev_pm_qos_mtx); | ||
618 | |||
598 | ret = pm_qos_sysfs_add_latency(dev); | 619 | ret = pm_qos_sysfs_add_latency(dev); |
599 | if (ret) | 620 | if (ret) |
600 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); | 621 | dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
601 | 622 | ||
602 | out: | 623 | out: |
603 | mutex_unlock(&dev_pm_qos_mtx); | 624 | mutex_unlock(&dev_pm_qos_sysfs_mtx); |
604 | return ret; | 625 | return ret; |
605 | } | 626 | } |
606 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); | 627 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); |
607 | 628 | ||
608 | static void __dev_pm_qos_hide_latency_limit(struct device *dev) | 629 | static void __dev_pm_qos_hide_latency_limit(struct device *dev) |
609 | { | 630 | { |
610 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { | 631 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) |
611 | pm_qos_sysfs_remove_latency(dev); | ||
612 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); | 632 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
613 | } | ||
614 | } | 633 | } |
615 | 634 | ||
616 | /** | 635 | /** |
@@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev) | |||
619 | */ | 638 | */ |
620 | void dev_pm_qos_hide_latency_limit(struct device *dev) | 639 | void dev_pm_qos_hide_latency_limit(struct device *dev) |
621 | { | 640 | { |
641 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
642 | |||
643 | pm_qos_sysfs_remove_latency(dev); | ||
644 | |||
622 | mutex_lock(&dev_pm_qos_mtx); | 645 | mutex_lock(&dev_pm_qos_mtx); |
623 | __dev_pm_qos_hide_latency_limit(dev); | 646 | __dev_pm_qos_hide_latency_limit(dev); |
624 | mutex_unlock(&dev_pm_qos_mtx); | 647 | mutex_unlock(&dev_pm_qos_mtx); |
648 | |||
649 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
625 | } | 650 | } |
626 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); | 651 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); |
627 | 652 | ||
@@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) | |||
649 | } | 674 | } |
650 | 675 | ||
651 | pm_runtime_get_sync(dev); | 676 | pm_runtime_get_sync(dev); |
677 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
678 | |||
652 | mutex_lock(&dev_pm_qos_mtx); | 679 | mutex_lock(&dev_pm_qos_mtx); |
653 | 680 | ||
654 | if (IS_ERR_OR_NULL(dev->power.qos)) | 681 | if (IS_ERR_OR_NULL(dev->power.qos)) |
@@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) | |||
659 | if (ret < 0) { | 686 | if (ret < 0) { |
660 | __dev_pm_qos_remove_request(req); | 687 | __dev_pm_qos_remove_request(req); |
661 | kfree(req); | 688 | kfree(req); |
689 | mutex_unlock(&dev_pm_qos_mtx); | ||
662 | goto out; | 690 | goto out; |
663 | } | 691 | } |
664 | |||
665 | dev->power.qos->flags_req = req; | 692 | dev->power.qos->flags_req = req; |
693 | |||
694 | mutex_unlock(&dev_pm_qos_mtx); | ||
695 | |||
666 | ret = pm_qos_sysfs_add_flags(dev); | 696 | ret = pm_qos_sysfs_add_flags(dev); |
667 | if (ret) | 697 | if (ret) |
668 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | 698 | dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); |
669 | 699 | ||
670 | out: | 700 | out: |
671 | mutex_unlock(&dev_pm_qos_mtx); | 701 | mutex_unlock(&dev_pm_qos_sysfs_mtx); |
672 | pm_runtime_put(dev); | 702 | pm_runtime_put(dev); |
673 | return ret; | 703 | return ret; |
674 | } | 704 | } |
@@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); | |||
676 | 706 | ||
677 | static void __dev_pm_qos_hide_flags(struct device *dev) | 707 | static void __dev_pm_qos_hide_flags(struct device *dev) |
678 | { | 708 | { |
679 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { | 709 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) |
680 | pm_qos_sysfs_remove_flags(dev); | ||
681 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | 710 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); |
682 | } | ||
683 | } | 711 | } |
684 | 712 | ||
685 | /** | 713 | /** |
@@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev) | |||
689 | void dev_pm_qos_hide_flags(struct device *dev) | 717 | void dev_pm_qos_hide_flags(struct device *dev) |
690 | { | 718 | { |
691 | pm_runtime_get_sync(dev); | 719 | pm_runtime_get_sync(dev); |
720 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
721 | |||
722 | pm_qos_sysfs_remove_flags(dev); | ||
723 | |||
692 | mutex_lock(&dev_pm_qos_mtx); | 724 | mutex_lock(&dev_pm_qos_mtx); |
693 | __dev_pm_qos_hide_flags(dev); | 725 | __dev_pm_qos_hide_flags(dev); |
694 | mutex_unlock(&dev_pm_qos_mtx); | 726 | mutex_unlock(&dev_pm_qos_mtx); |
727 | |||
728 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
695 | pm_runtime_put(dev); | 729 | pm_runtime_put(dev); |
696 | } | 730 | } |
697 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); | 731 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e6732cf7c06e..79f4fca9877a 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, | |||
398 | base = 0; | 398 | base = 0; |
399 | 399 | ||
400 | if (max < rbnode->base_reg + rbnode->blklen) | 400 | if (max < rbnode->base_reg + rbnode->blklen) |
401 | end = rbnode->base_reg + rbnode->blklen - max; | 401 | end = max - rbnode->base_reg + 1; |
402 | else | 402 | else |
403 | end = rbnode->blklen; | 403 | end = rbnode->blklen; |
404 | 404 | ||
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 3d2367501fd0..58cfb3232428 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -710,12 +710,12 @@ skip_format_initialization: | |||
710 | } | 710 | } |
711 | } | 711 | } |
712 | 712 | ||
713 | regmap_debugfs_init(map, config->name); | ||
714 | |||
713 | ret = regcache_init(map, config); | 715 | ret = regcache_init(map, config); |
714 | if (ret != 0) | 716 | if (ret != 0) |
715 | goto err_range; | 717 | goto err_range; |
716 | 718 | ||
717 | regmap_debugfs_init(map, config->name); | ||
718 | |||
719 | /* Add a devres resource for dev_get_regmap() */ | 719 | /* Add a devres resource for dev_get_regmap() */ |
720 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); | 720 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); |
721 | if (!m) { | 721 | if (!m) { |
@@ -1036,6 +1036,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
1036 | kfree(async->work_buf); | 1036 | kfree(async->work_buf); |
1037 | kfree(async); | 1037 | kfree(async); |
1038 | } | 1038 | } |
1039 | |||
1040 | return ret; | ||
1039 | } | 1041 | } |
1040 | 1042 | ||
1041 | trace_regmap_hw_write_start(map->dev, reg, | 1043 | trace_regmap_hw_write_start(map->dev, reg, |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 25ef5c014fca..92b6d7c51e39 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -51,8 +51,9 @@ new_skb(ulong len) | |||
51 | { | 51 | { |
52 | struct sk_buff *skb; | 52 | struct sk_buff *skb; |
53 | 53 | ||
54 | skb = alloc_skb(len, GFP_ATOMIC); | 54 | skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC); |
55 | if (skb) { | 55 | if (skb) { |
56 | skb_reserve(skb, MAX_HEADER); | ||
56 | skb_reset_mac_header(skb); | 57 | skb_reset_mac_header(skb); |
57 | skb_reset_network_header(skb); | 58 | skb_reset_network_header(skb); |
58 | skb->protocol = __constant_htons(ETH_P_AOE); | 59 | skb->protocol = __constant_htons(ETH_P_AOE); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index fe5f6403417f..dfe758382eaf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -922,6 +922,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
922 | lo->lo_flags |= LO_FLAGS_PARTSCAN; | 922 | lo->lo_flags |= LO_FLAGS_PARTSCAN; |
923 | if (lo->lo_flags & LO_FLAGS_PARTSCAN) | 923 | if (lo->lo_flags & LO_FLAGS_PARTSCAN) |
924 | ioctl_by_bdev(bdev, BLKRRPART, 0); | 924 | ioctl_by_bdev(bdev, BLKRRPART, 0); |
925 | |||
926 | /* Grab the block_device to prevent its destruction after we | ||
927 | * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). | ||
928 | */ | ||
929 | bdgrab(bdev); | ||
925 | return 0; | 930 | return 0; |
926 | 931 | ||
927 | out_clr: | 932 | out_clr: |
@@ -1031,8 +1036,10 @@ static int loop_clr_fd(struct loop_device *lo) | |||
1031 | memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); | 1036 | memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); |
1032 | memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); | 1037 | memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); |
1033 | memset(lo->lo_file_name, 0, LO_NAME_SIZE); | 1038 | memset(lo->lo_file_name, 0, LO_NAME_SIZE); |
1034 | if (bdev) | 1039 | if (bdev) { |
1040 | bdput(bdev); | ||
1035 | invalidate_bdev(bdev); | 1041 | invalidate_bdev(bdev); |
1042 | } | ||
1036 | set_capacity(lo->lo_disk, 0); | 1043 | set_capacity(lo->lo_disk, 0); |
1037 | loop_sysfs_exit(lo); | 1044 | loop_sysfs_exit(lo); |
1038 | if (bdev) { | 1045 | if (bdev) { |
@@ -1044,29 +1051,12 @@ static int loop_clr_fd(struct loop_device *lo) | |||
1044 | lo->lo_state = Lo_unbound; | 1051 | lo->lo_state = Lo_unbound; |
1045 | /* This is safe: open() is still holding a reference. */ | 1052 | /* This is safe: open() is still holding a reference. */ |
1046 | module_put(THIS_MODULE); | 1053 | module_put(THIS_MODULE); |
1054 | if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) | ||
1055 | ioctl_by_bdev(bdev, BLKRRPART, 0); | ||
1047 | lo->lo_flags = 0; | 1056 | lo->lo_flags = 0; |
1048 | if (!part_shift) | 1057 | if (!part_shift) |
1049 | lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; | 1058 | lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; |
1050 | mutex_unlock(&lo->lo_ctl_mutex); | 1059 | mutex_unlock(&lo->lo_ctl_mutex); |
1051 | |||
1052 | /* | ||
1053 | * Remove all partitions, since BLKRRPART won't remove user | ||
1054 | * added partitions when max_part=0 | ||
1055 | */ | ||
1056 | if (bdev) { | ||
1057 | struct disk_part_iter piter; | ||
1058 | struct hd_struct *part; | ||
1059 | |||
1060 | mutex_lock_nested(&bdev->bd_mutex, 1); | ||
1061 | invalidate_partition(bdev->bd_disk, 0); | ||
1062 | disk_part_iter_init(&piter, bdev->bd_disk, | ||
1063 | DISK_PITER_INCL_EMPTY); | ||
1064 | while ((part = disk_part_iter_next(&piter))) | ||
1065 | delete_partition(bdev->bd_disk, part->partno); | ||
1066 | disk_part_iter_exit(&piter); | ||
1067 | mutex_unlock(&bdev->bd_mutex); | ||
1068 | } | ||
1069 | |||
1070 | /* | 1060 | /* |
1071 | * Need not hold lo_ctl_mutex to fput backing file. | 1061 | * Need not hold lo_ctl_mutex to fput backing file. |
1072 | * Calling fput holding lo_ctl_mutex triggers a circular | 1062 | * Calling fput holding lo_ctl_mutex triggers a circular |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 92250af84e7d..32c678028e53 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -81,12 +81,17 @@ | |||
81 | /* Device instance number, incremented each time a device is probed. */ | 81 | /* Device instance number, incremented each time a device is probed. */ |
82 | static int instance; | 82 | static int instance; |
83 | 83 | ||
84 | struct list_head online_list; | ||
85 | struct list_head removing_list; | ||
86 | spinlock_t dev_lock; | ||
87 | |||
84 | /* | 88 | /* |
85 | * Global variable used to hold the major block device number | 89 | * Global variable used to hold the major block device number |
86 | * allocated in mtip_init(). | 90 | * allocated in mtip_init(). |
87 | */ | 91 | */ |
88 | static int mtip_major; | 92 | static int mtip_major; |
89 | static struct dentry *dfs_parent; | 93 | static struct dentry *dfs_parent; |
94 | static struct dentry *dfs_device_status; | ||
90 | 95 | ||
91 | static u32 cpu_use[NR_CPUS]; | 96 | static u32 cpu_use[NR_CPUS]; |
92 | 97 | ||
@@ -243,40 +248,31 @@ static inline void release_slot(struct mtip_port *port, int tag) | |||
243 | /* | 248 | /* |
244 | * Reset the HBA (without sleeping) | 249 | * Reset the HBA (without sleeping) |
245 | * | 250 | * |
246 | * Just like hba_reset, except does not call sleep, so can be | ||
247 | * run from interrupt/tasklet context. | ||
248 | * | ||
249 | * @dd Pointer to the driver data structure. | 251 | * @dd Pointer to the driver data structure. |
250 | * | 252 | * |
251 | * return value | 253 | * return value |
252 | * 0 The reset was successful. | 254 | * 0 The reset was successful. |
253 | * -1 The HBA Reset bit did not clear. | 255 | * -1 The HBA Reset bit did not clear. |
254 | */ | 256 | */ |
255 | static int hba_reset_nosleep(struct driver_data *dd) | 257 | static int mtip_hba_reset(struct driver_data *dd) |
256 | { | 258 | { |
257 | unsigned long timeout; | 259 | unsigned long timeout; |
258 | 260 | ||
259 | /* Chip quirk: quiesce any chip function */ | ||
260 | mdelay(10); | ||
261 | |||
262 | /* Set the reset bit */ | 261 | /* Set the reset bit */ |
263 | writel(HOST_RESET, dd->mmio + HOST_CTL); | 262 | writel(HOST_RESET, dd->mmio + HOST_CTL); |
264 | 263 | ||
265 | /* Flush */ | 264 | /* Flush */ |
266 | readl(dd->mmio + HOST_CTL); | 265 | readl(dd->mmio + HOST_CTL); |
267 | 266 | ||
268 | /* | 267 | /* Spin for up to 2 seconds, waiting for reset acknowledgement */ |
269 | * Wait 10ms then spin for up to 1 second | 268 | timeout = jiffies + msecs_to_jiffies(2000); |
270 | * waiting for reset acknowledgement | 269 | do { |
271 | */ | 270 | mdelay(10); |
272 | timeout = jiffies + msecs_to_jiffies(1000); | 271 | if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) |
273 | mdelay(10); | 272 | return -1; |
274 | while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) | ||
275 | && time_before(jiffies, timeout)) | ||
276 | mdelay(1); | ||
277 | 273 | ||
278 | if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) | 274 | } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) |
279 | return -1; | 275 | && time_before(jiffies, timeout)); |
280 | 276 | ||
281 | if (readl(dd->mmio + HOST_CTL) & HOST_RESET) | 277 | if (readl(dd->mmio + HOST_CTL) & HOST_RESET) |
282 | return -1; | 278 | return -1; |
@@ -481,7 +477,7 @@ static void mtip_restart_port(struct mtip_port *port) | |||
481 | dev_warn(&port->dd->pdev->dev, | 477 | dev_warn(&port->dd->pdev->dev, |
482 | "PxCMD.CR not clear, escalating reset\n"); | 478 | "PxCMD.CR not clear, escalating reset\n"); |
483 | 479 | ||
484 | if (hba_reset_nosleep(port->dd)) | 480 | if (mtip_hba_reset(port->dd)) |
485 | dev_err(&port->dd->pdev->dev, | 481 | dev_err(&port->dd->pdev->dev, |
486 | "HBA reset escalation failed.\n"); | 482 | "HBA reset escalation failed.\n"); |
487 | 483 | ||
@@ -527,6 +523,26 @@ static void mtip_restart_port(struct mtip_port *port) | |||
527 | 523 | ||
528 | } | 524 | } |
529 | 525 | ||
526 | static int mtip_device_reset(struct driver_data *dd) | ||
527 | { | ||
528 | int rv = 0; | ||
529 | |||
530 | if (mtip_check_surprise_removal(dd->pdev)) | ||
531 | return 0; | ||
532 | |||
533 | if (mtip_hba_reset(dd) < 0) | ||
534 | rv = -EFAULT; | ||
535 | |||
536 | mdelay(1); | ||
537 | mtip_init_port(dd->port); | ||
538 | mtip_start_port(dd->port); | ||
539 | |||
540 | /* Enable interrupts on the HBA. */ | ||
541 | writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, | ||
542 | dd->mmio + HOST_CTL); | ||
543 | return rv; | ||
544 | } | ||
545 | |||
530 | /* | 546 | /* |
531 | * Helper function for tag logging | 547 | * Helper function for tag logging |
532 | */ | 548 | */ |
@@ -632,7 +648,7 @@ static void mtip_timeout_function(unsigned long int data) | |||
632 | if (cmdto_cnt) { | 648 | if (cmdto_cnt) { |
633 | print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); | 649 | print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); |
634 | if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { | 650 | if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { |
635 | mtip_restart_port(port); | 651 | mtip_device_reset(port->dd); |
636 | wake_up_interruptible(&port->svc_wait); | 652 | wake_up_interruptible(&port->svc_wait); |
637 | } | 653 | } |
638 | clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); | 654 | clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); |
@@ -1283,11 +1299,11 @@ static int mtip_exec_internal_command(struct mtip_port *port, | |||
1283 | int rv = 0, ready2go = 1; | 1299 | int rv = 0, ready2go = 1; |
1284 | struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; | 1300 | struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; |
1285 | unsigned long to; | 1301 | unsigned long to; |
1302 | struct driver_data *dd = port->dd; | ||
1286 | 1303 | ||
1287 | /* Make sure the buffer is 8 byte aligned. This is asic specific. */ | 1304 | /* Make sure the buffer is 8 byte aligned. This is asic specific. */ |
1288 | if (buffer & 0x00000007) { | 1305 | if (buffer & 0x00000007) { |
1289 | dev_err(&port->dd->pdev->dev, | 1306 | dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); |
1290 | "SG buffer is not 8 byte aligned\n"); | ||
1291 | return -EFAULT; | 1307 | return -EFAULT; |
1292 | } | 1308 | } |
1293 | 1309 | ||
@@ -1300,23 +1316,21 @@ static int mtip_exec_internal_command(struct mtip_port *port, | |||
1300 | mdelay(100); | 1316 | mdelay(100); |
1301 | } while (time_before(jiffies, to)); | 1317 | } while (time_before(jiffies, to)); |
1302 | if (!ready2go) { | 1318 | if (!ready2go) { |
1303 | dev_warn(&port->dd->pdev->dev, | 1319 | dev_warn(&dd->pdev->dev, |
1304 | "Internal cmd active. new cmd [%02X]\n", fis->command); | 1320 | "Internal cmd active. new cmd [%02X]\n", fis->command); |
1305 | return -EBUSY; | 1321 | return -EBUSY; |
1306 | } | 1322 | } |
1307 | set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); | 1323 | set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); |
1308 | port->ic_pause_timer = 0; | 1324 | port->ic_pause_timer = 0; |
1309 | 1325 | ||
1310 | if (fis->command == ATA_CMD_SEC_ERASE_UNIT) | 1326 | clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); |
1311 | clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); | 1327 | clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); |
1312 | else if (fis->command == ATA_CMD_DOWNLOAD_MICRO) | ||
1313 | clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); | ||
1314 | 1328 | ||
1315 | if (atomic == GFP_KERNEL) { | 1329 | if (atomic == GFP_KERNEL) { |
1316 | if (fis->command != ATA_CMD_STANDBYNOW1) { | 1330 | if (fis->command != ATA_CMD_STANDBYNOW1) { |
1317 | /* wait for io to complete if non atomic */ | 1331 | /* wait for io to complete if non atomic */ |
1318 | if (mtip_quiesce_io(port, 5000) < 0) { | 1332 | if (mtip_quiesce_io(port, 5000) < 0) { |
1319 | dev_warn(&port->dd->pdev->dev, | 1333 | dev_warn(&dd->pdev->dev, |
1320 | "Failed to quiesce IO\n"); | 1334 | "Failed to quiesce IO\n"); |
1321 | release_slot(port, MTIP_TAG_INTERNAL); | 1335 | release_slot(port, MTIP_TAG_INTERNAL); |
1322 | clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); | 1336 | clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); |
@@ -1361,58 +1375,84 @@ static int mtip_exec_internal_command(struct mtip_port *port, | |||
1361 | /* Issue the command to the hardware */ | 1375 | /* Issue the command to the hardware */ |
1362 | mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); | 1376 | mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); |
1363 | 1377 | ||
1364 | /* Poll if atomic, wait_for_completion otherwise */ | ||
1365 | if (atomic == GFP_KERNEL) { | 1378 | if (atomic == GFP_KERNEL) { |
1366 | /* Wait for the command to complete or timeout. */ | 1379 | /* Wait for the command to complete or timeout. */ |
1367 | if (wait_for_completion_timeout( | 1380 | if (wait_for_completion_interruptible_timeout( |
1368 | &wait, | 1381 | &wait, |
1369 | msecs_to_jiffies(timeout)) == 0) { | 1382 | msecs_to_jiffies(timeout)) <= 0) { |
1370 | dev_err(&port->dd->pdev->dev, | 1383 | if (rv == -ERESTARTSYS) { /* interrupted */ |
1371 | "Internal command did not complete [%d] " | 1384 | dev_err(&dd->pdev->dev, |
1372 | "within timeout of %lu ms\n", | 1385 | "Internal command [%02X] was interrupted after %lu ms\n", |
1373 | atomic, timeout); | 1386 | fis->command, timeout); |
1374 | if (mtip_check_surprise_removal(port->dd->pdev) || | 1387 | rv = -EINTR; |
1388 | goto exec_ic_exit; | ||
1389 | } else if (rv == 0) /* timeout */ | ||
1390 | dev_err(&dd->pdev->dev, | ||
1391 | "Internal command did not complete [%02X] within timeout of %lu ms\n", | ||
1392 | fis->command, timeout); | ||
1393 | else | ||
1394 | dev_err(&dd->pdev->dev, | ||
1395 | "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", | ||
1396 | fis->command, rv, timeout); | ||
1397 | |||
1398 | if (mtip_check_surprise_removal(dd->pdev) || | ||
1375 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, | 1399 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, |
1376 | &port->dd->dd_flag)) { | 1400 | &dd->dd_flag)) { |
1401 | dev_err(&dd->pdev->dev, | ||
1402 | "Internal command [%02X] wait returned due to SR\n", | ||
1403 | fis->command); | ||
1377 | rv = -ENXIO; | 1404 | rv = -ENXIO; |
1378 | goto exec_ic_exit; | 1405 | goto exec_ic_exit; |
1379 | } | 1406 | } |
1407 | mtip_device_reset(dd); /* recover from timeout issue */ | ||
1380 | rv = -EAGAIN; | 1408 | rv = -EAGAIN; |
1409 | goto exec_ic_exit; | ||
1381 | } | 1410 | } |
1382 | } else { | 1411 | } else { |
1412 | u32 hba_stat, port_stat; | ||
1413 | |||
1383 | /* Spin for <timeout> checking if command still outstanding */ | 1414 | /* Spin for <timeout> checking if command still outstanding */ |
1384 | timeout = jiffies + msecs_to_jiffies(timeout); | 1415 | timeout = jiffies + msecs_to_jiffies(timeout); |
1385 | while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) | 1416 | while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) |
1386 | & (1 << MTIP_TAG_INTERNAL)) | 1417 | & (1 << MTIP_TAG_INTERNAL)) |
1387 | && time_before(jiffies, timeout)) { | 1418 | && time_before(jiffies, timeout)) { |
1388 | if (mtip_check_surprise_removal(port->dd->pdev)) { | 1419 | if (mtip_check_surprise_removal(dd->pdev)) { |
1389 | rv = -ENXIO; | 1420 | rv = -ENXIO; |
1390 | goto exec_ic_exit; | 1421 | goto exec_ic_exit; |
1391 | } | 1422 | } |
1392 | if ((fis->command != ATA_CMD_STANDBYNOW1) && | 1423 | if ((fis->command != ATA_CMD_STANDBYNOW1) && |
1393 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, | 1424 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, |
1394 | &port->dd->dd_flag)) { | 1425 | &dd->dd_flag)) { |
1395 | rv = -ENXIO; | 1426 | rv = -ENXIO; |
1396 | goto exec_ic_exit; | 1427 | goto exec_ic_exit; |
1397 | } | 1428 | } |
1398 | if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) { | 1429 | port_stat = readl(port->mmio + PORT_IRQ_STAT); |
1399 | atomic_inc(&int_cmd->active); /* error */ | 1430 | if (!port_stat) |
1400 | break; | 1431 | continue; |
1432 | |||
1433 | if (port_stat & PORT_IRQ_ERR) { | ||
1434 | dev_err(&dd->pdev->dev, | ||
1435 | "Internal command [%02X] failed\n", | ||
1436 | fis->command); | ||
1437 | mtip_device_reset(dd); | ||
1438 | rv = -EIO; | ||
1439 | goto exec_ic_exit; | ||
1440 | } else { | ||
1441 | writel(port_stat, port->mmio + PORT_IRQ_STAT); | ||
1442 | hba_stat = readl(dd->mmio + HOST_IRQ_STAT); | ||
1443 | if (hba_stat) | ||
1444 | writel(hba_stat, | ||
1445 | dd->mmio + HOST_IRQ_STAT); | ||
1401 | } | 1446 | } |
1447 | break; | ||
1402 | } | 1448 | } |
1403 | } | 1449 | } |
1404 | 1450 | ||
1405 | if (atomic_read(&int_cmd->active) > 1) { | ||
1406 | dev_err(&port->dd->pdev->dev, | ||
1407 | "Internal command [%02X] failed\n", fis->command); | ||
1408 | rv = -EIO; | ||
1409 | } | ||
1410 | if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) | 1451 | if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) |
1411 | & (1 << MTIP_TAG_INTERNAL)) { | 1452 | & (1 << MTIP_TAG_INTERNAL)) { |
1412 | rv = -ENXIO; | 1453 | rv = -ENXIO; |
1413 | if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, | 1454 | if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { |
1414 | &port->dd->dd_flag)) { | 1455 | mtip_device_reset(dd); |
1415 | mtip_restart_port(port); | ||
1416 | rv = -EAGAIN; | 1456 | rv = -EAGAIN; |
1417 | } | 1457 | } |
1418 | } | 1458 | } |
@@ -1724,7 +1764,8 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, | |||
1724 | * -EINVAL Invalid parameters passed in, trim not supported | 1764 | * -EINVAL Invalid parameters passed in, trim not supported |
1725 | * -EIO Error submitting trim request to hw | 1765 | * -EIO Error submitting trim request to hw |
1726 | */ | 1766 | */ |
1727 | static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len) | 1767 | static int mtip_send_trim(struct driver_data *dd, unsigned int lba, |
1768 | unsigned int len) | ||
1728 | { | 1769 | { |
1729 | int i, rv = 0; | 1770 | int i, rv = 0; |
1730 | u64 tlba, tlen, sect_left; | 1771 | u64 tlba, tlen, sect_left; |
@@ -1811,45 +1852,6 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) | |||
1811 | } | 1852 | } |
1812 | 1853 | ||
1813 | /* | 1854 | /* |
1814 | * Reset the HBA. | ||
1815 | * | ||
1816 | * Resets the HBA by setting the HBA Reset bit in the Global | ||
1817 | * HBA Control register. After setting the HBA Reset bit the | ||
1818 | * function waits for 1 second before reading the HBA Reset | ||
1819 | * bit to make sure it has cleared. If HBA Reset is not clear | ||
1820 | * an error is returned. Cannot be used in non-blockable | ||
1821 | * context. | ||
1822 | * | ||
1823 | * @dd Pointer to the driver data structure. | ||
1824 | * | ||
1825 | * return value | ||
1826 | * 0 The reset was successful. | ||
1827 | * -1 The HBA Reset bit did not clear. | ||
1828 | */ | ||
1829 | static int mtip_hba_reset(struct driver_data *dd) | ||
1830 | { | ||
1831 | mtip_deinit_port(dd->port); | ||
1832 | |||
1833 | /* Set the reset bit */ | ||
1834 | writel(HOST_RESET, dd->mmio + HOST_CTL); | ||
1835 | |||
1836 | /* Flush */ | ||
1837 | readl(dd->mmio + HOST_CTL); | ||
1838 | |||
1839 | /* Wait for reset to clear */ | ||
1840 | ssleep(1); | ||
1841 | |||
1842 | /* Check the bit has cleared */ | ||
1843 | if (readl(dd->mmio + HOST_CTL) & HOST_RESET) { | ||
1844 | dev_err(&dd->pdev->dev, | ||
1845 | "Reset bit did not clear.\n"); | ||
1846 | return -1; | ||
1847 | } | ||
1848 | |||
1849 | return 0; | ||
1850 | } | ||
1851 | |||
1852 | /* | ||
1853 | * Display the identify command data. | 1855 | * Display the identify command data. |
1854 | * | 1856 | * |
1855 | * @port Pointer to the port data structure. | 1857 | * @port Pointer to the port data structure. |
@@ -2710,6 +2712,100 @@ static ssize_t mtip_hw_show_status(struct device *dev, | |||
2710 | 2712 | ||
2711 | static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); | 2713 | static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); |
2712 | 2714 | ||
2715 | /* debugsfs entries */ | ||
2716 | |||
2717 | static ssize_t show_device_status(struct device_driver *drv, char *buf) | ||
2718 | { | ||
2719 | int size = 0; | ||
2720 | struct driver_data *dd, *tmp; | ||
2721 | unsigned long flags; | ||
2722 | char id_buf[42]; | ||
2723 | u16 status = 0; | ||
2724 | |||
2725 | spin_lock_irqsave(&dev_lock, flags); | ||
2726 | size += sprintf(&buf[size], "Devices Present:\n"); | ||
2727 | list_for_each_entry_safe(dd, tmp, &online_list, online_list) { | ||
2728 | if (dd->pdev) { | ||
2729 | if (dd->port && | ||
2730 | dd->port->identify && | ||
2731 | dd->port->identify_valid) { | ||
2732 | strlcpy(id_buf, | ||
2733 | (char *) (dd->port->identify + 10), 21); | ||
2734 | status = *(dd->port->identify + 141); | ||
2735 | } else { | ||
2736 | memset(id_buf, 0, 42); | ||
2737 | status = 0; | ||
2738 | } | ||
2739 | |||
2740 | if (dd->port && | ||
2741 | test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { | ||
2742 | size += sprintf(&buf[size], | ||
2743 | " device %s %s (ftl rebuild %d %%)\n", | ||
2744 | dev_name(&dd->pdev->dev), | ||
2745 | id_buf, | ||
2746 | status); | ||
2747 | } else { | ||
2748 | size += sprintf(&buf[size], | ||
2749 | " device %s %s\n", | ||
2750 | dev_name(&dd->pdev->dev), | ||
2751 | id_buf); | ||
2752 | } | ||
2753 | } | ||
2754 | } | ||
2755 | |||
2756 | size += sprintf(&buf[size], "Devices Being Removed:\n"); | ||
2757 | list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { | ||
2758 | if (dd->pdev) { | ||
2759 | if (dd->port && | ||
2760 | dd->port->identify && | ||
2761 | dd->port->identify_valid) { | ||
2762 | strlcpy(id_buf, | ||
2763 | (char *) (dd->port->identify+10), 21); | ||
2764 | status = *(dd->port->identify + 141); | ||
2765 | } else { | ||
2766 | memset(id_buf, 0, 42); | ||
2767 | status = 0; | ||
2768 | } | ||
2769 | |||
2770 | if (dd->port && | ||
2771 | test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { | ||
2772 | size += sprintf(&buf[size], | ||
2773 | " device %s %s (ftl rebuild %d %%)\n", | ||
2774 | dev_name(&dd->pdev->dev), | ||
2775 | id_buf, | ||
2776 | status); | ||
2777 | } else { | ||
2778 | size += sprintf(&buf[size], | ||
2779 | " device %s %s\n", | ||
2780 | dev_name(&dd->pdev->dev), | ||
2781 | id_buf); | ||
2782 | } | ||
2783 | } | ||
2784 | } | ||
2785 | spin_unlock_irqrestore(&dev_lock, flags); | ||
2786 | |||
2787 | return size; | ||
2788 | } | ||
2789 | |||
2790 | static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, | ||
2791 | size_t len, loff_t *offset) | ||
2792 | { | ||
2793 | int size = *offset; | ||
2794 | char buf[MTIP_DFS_MAX_BUF_SIZE]; | ||
2795 | |||
2796 | if (!len || *offset) | ||
2797 | return 0; | ||
2798 | |||
2799 | size += show_device_status(NULL, buf); | ||
2800 | |||
2801 | *offset = size <= len ? size : len; | ||
2802 | size = copy_to_user(ubuf, buf, *offset); | ||
2803 | if (size) | ||
2804 | return -EFAULT; | ||
2805 | |||
2806 | return *offset; | ||
2807 | } | ||
2808 | |||
2713 | static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, | 2809 | static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, |
2714 | size_t len, loff_t *offset) | 2810 | size_t len, loff_t *offset) |
2715 | { | 2811 | { |
@@ -2804,6 +2900,13 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, | |||
2804 | return *offset; | 2900 | return *offset; |
2805 | } | 2901 | } |
2806 | 2902 | ||
2903 | static const struct file_operations mtip_device_status_fops = { | ||
2904 | .owner = THIS_MODULE, | ||
2905 | .open = simple_open, | ||
2906 | .read = mtip_hw_read_device_status, | ||
2907 | .llseek = no_llseek, | ||
2908 | }; | ||
2909 | |||
2807 | static const struct file_operations mtip_regs_fops = { | 2910 | static const struct file_operations mtip_regs_fops = { |
2808 | .owner = THIS_MODULE, | 2911 | .owner = THIS_MODULE, |
2809 | .open = simple_open, | 2912 | .open = simple_open, |
@@ -4161,6 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4161 | const struct cpumask *node_mask; | 4264 | const struct cpumask *node_mask; |
4162 | int cpu, i = 0, j = 0; | 4265 | int cpu, i = 0, j = 0; |
4163 | int my_node = NUMA_NO_NODE; | 4266 | int my_node = NUMA_NO_NODE; |
4267 | unsigned long flags; | ||
4164 | 4268 | ||
4165 | /* Allocate memory for this devices private data. */ | 4269 | /* Allocate memory for this devices private data. */ |
4166 | my_node = pcibus_to_node(pdev->bus); | 4270 | my_node = pcibus_to_node(pdev->bus); |
@@ -4218,6 +4322,9 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4218 | dd->pdev = pdev; | 4322 | dd->pdev = pdev; |
4219 | dd->numa_node = my_node; | 4323 | dd->numa_node = my_node; |
4220 | 4324 | ||
4325 | INIT_LIST_HEAD(&dd->online_list); | ||
4326 | INIT_LIST_HEAD(&dd->remove_list); | ||
4327 | |||
4221 | memset(dd->workq_name, 0, 32); | 4328 | memset(dd->workq_name, 0, 32); |
4222 | snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); | 4329 | snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); |
4223 | 4330 | ||
@@ -4305,6 +4412,14 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4305 | instance++; | 4412 | instance++; |
4306 | if (rv != MTIP_FTL_REBUILD_MAGIC) | 4413 | if (rv != MTIP_FTL_REBUILD_MAGIC) |
4307 | set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); | 4414 | set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); |
4415 | else | ||
4416 | rv = 0; /* device in rebuild state, return 0 from probe */ | ||
4417 | |||
4418 | /* Add to online list even if in ftl rebuild */ | ||
4419 | spin_lock_irqsave(&dev_lock, flags); | ||
4420 | list_add(&dd->online_list, &online_list); | ||
4421 | spin_unlock_irqrestore(&dev_lock, flags); | ||
4422 | |||
4308 | goto done; | 4423 | goto done; |
4309 | 4424 | ||
4310 | block_initialize_err: | 4425 | block_initialize_err: |
@@ -4338,9 +4453,15 @@ static void mtip_pci_remove(struct pci_dev *pdev) | |||
4338 | { | 4453 | { |
4339 | struct driver_data *dd = pci_get_drvdata(pdev); | 4454 | struct driver_data *dd = pci_get_drvdata(pdev); |
4340 | int counter = 0; | 4455 | int counter = 0; |
4456 | unsigned long flags; | ||
4341 | 4457 | ||
4342 | set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); | 4458 | set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); |
4343 | 4459 | ||
4460 | spin_lock_irqsave(&dev_lock, flags); | ||
4461 | list_del_init(&dd->online_list); | ||
4462 | list_add(&dd->remove_list, &removing_list); | ||
4463 | spin_unlock_irqrestore(&dev_lock, flags); | ||
4464 | |||
4344 | if (mtip_check_surprise_removal(pdev)) { | 4465 | if (mtip_check_surprise_removal(pdev)) { |
4345 | while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { | 4466 | while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { |
4346 | counter++; | 4467 | counter++; |
@@ -4366,6 +4487,10 @@ static void mtip_pci_remove(struct pci_dev *pdev) | |||
4366 | 4487 | ||
4367 | pci_disable_msi(pdev); | 4488 | pci_disable_msi(pdev); |
4368 | 4489 | ||
4490 | spin_lock_irqsave(&dev_lock, flags); | ||
4491 | list_del_init(&dd->remove_list); | ||
4492 | spin_unlock_irqrestore(&dev_lock, flags); | ||
4493 | |||
4369 | kfree(dd); | 4494 | kfree(dd); |
4370 | pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); | 4495 | pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); |
4371 | } | 4496 | } |
@@ -4513,6 +4638,11 @@ static int __init mtip_init(void) | |||
4513 | 4638 | ||
4514 | pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); | 4639 | pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); |
4515 | 4640 | ||
4641 | spin_lock_init(&dev_lock); | ||
4642 | |||
4643 | INIT_LIST_HEAD(&online_list); | ||
4644 | INIT_LIST_HEAD(&removing_list); | ||
4645 | |||
4516 | /* Allocate a major block device number to use with this driver. */ | 4646 | /* Allocate a major block device number to use with this driver. */ |
4517 | error = register_blkdev(0, MTIP_DRV_NAME); | 4647 | error = register_blkdev(0, MTIP_DRV_NAME); |
4518 | if (error <= 0) { | 4648 | if (error <= 0) { |
@@ -4522,11 +4652,18 @@ static int __init mtip_init(void) | |||
4522 | } | 4652 | } |
4523 | mtip_major = error; | 4653 | mtip_major = error; |
4524 | 4654 | ||
4525 | if (!dfs_parent) { | 4655 | dfs_parent = debugfs_create_dir("rssd", NULL); |
4526 | dfs_parent = debugfs_create_dir("rssd", NULL); | 4656 | if (IS_ERR_OR_NULL(dfs_parent)) { |
4527 | if (IS_ERR_OR_NULL(dfs_parent)) { | 4657 | pr_warn("Error creating debugfs parent\n"); |
4528 | pr_warn("Error creating debugfs parent\n"); | 4658 | dfs_parent = NULL; |
4529 | dfs_parent = NULL; | 4659 | } |
4660 | if (dfs_parent) { | ||
4661 | dfs_device_status = debugfs_create_file("device_status", | ||
4662 | S_IRUGO, dfs_parent, NULL, | ||
4663 | &mtip_device_status_fops); | ||
4664 | if (IS_ERR_OR_NULL(dfs_device_status)) { | ||
4665 | pr_err("Error creating device_status node\n"); | ||
4666 | dfs_device_status = NULL; | ||
4530 | } | 4667 | } |
4531 | } | 4668 | } |
4532 | 4669 | ||
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 3bffff5f670c..8e8334c9dd0f 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
@@ -129,9 +129,9 @@ enum { | |||
129 | MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ | 129 | MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ |
130 | MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ | 130 | MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ |
131 | MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ | 131 | MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ |
132 | MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \ | 132 | MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | |
133 | (1 << MTIP_PF_EH_ACTIVE_BIT) | \ | 133 | (1 << MTIP_PF_EH_ACTIVE_BIT) | |
134 | (1 << MTIP_PF_SE_ACTIVE_BIT) | \ | 134 | (1 << MTIP_PF_SE_ACTIVE_BIT) | |
135 | (1 << MTIP_PF_DM_ACTIVE_BIT)), | 135 | (1 << MTIP_PF_DM_ACTIVE_BIT)), |
136 | 136 | ||
137 | MTIP_PF_SVC_THD_ACTIVE_BIT = 4, | 137 | MTIP_PF_SVC_THD_ACTIVE_BIT = 4, |
@@ -144,9 +144,9 @@ enum { | |||
144 | MTIP_DDF_REMOVE_PENDING_BIT = 1, | 144 | MTIP_DDF_REMOVE_PENDING_BIT = 1, |
145 | MTIP_DDF_OVER_TEMP_BIT = 2, | 145 | MTIP_DDF_OVER_TEMP_BIT = 2, |
146 | MTIP_DDF_WRITE_PROTECT_BIT = 3, | 146 | MTIP_DDF_WRITE_PROTECT_BIT = 3, |
147 | MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ | 147 | MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | |
148 | (1 << MTIP_DDF_SEC_LOCK_BIT) | \ | 148 | (1 << MTIP_DDF_SEC_LOCK_BIT) | |
149 | (1 << MTIP_DDF_OVER_TEMP_BIT) | \ | 149 | (1 << MTIP_DDF_OVER_TEMP_BIT) | |
150 | (1 << MTIP_DDF_WRITE_PROTECT_BIT)), | 150 | (1 << MTIP_DDF_WRITE_PROTECT_BIT)), |
151 | 151 | ||
152 | MTIP_DDF_CLEANUP_BIT = 5, | 152 | MTIP_DDF_CLEANUP_BIT = 5, |
@@ -180,7 +180,7 @@ struct mtip_work { | |||
180 | 180 | ||
181 | #define MTIP_TRIM_TIMEOUT_MS 240000 | 181 | #define MTIP_TRIM_TIMEOUT_MS 240000 |
182 | #define MTIP_MAX_TRIM_ENTRIES 8 | 182 | #define MTIP_MAX_TRIM_ENTRIES 8 |
183 | #define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 | 183 | #define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 |
184 | 184 | ||
185 | struct mtip_trim_entry { | 185 | struct mtip_trim_entry { |
186 | u32 lba; /* starting lba of region */ | 186 | u32 lba; /* starting lba of region */ |
@@ -501,6 +501,10 @@ struct driver_data { | |||
501 | atomic_t irq_workers_active; | 501 | atomic_t irq_workers_active; |
502 | 502 | ||
503 | int isr_binding; | 503 | int isr_binding; |
504 | |||
505 | struct list_head online_list; /* linkage for online list */ | ||
506 | |||
507 | struct list_head remove_list; /* linkage for removing list */ | ||
504 | }; | 508 | }; |
505 | 509 | ||
506 | #endif | 510 | #endif |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f556f8a8b3f9..b7b7a88d9f68 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -1742,9 +1742,10 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request) | |||
1742 | struct rbd_device *rbd_dev = img_request->rbd_dev; | 1742 | struct rbd_device *rbd_dev = img_request->rbd_dev; |
1743 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; | 1743 | struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; |
1744 | struct rbd_obj_request *obj_request; | 1744 | struct rbd_obj_request *obj_request; |
1745 | struct rbd_obj_request *next_obj_request; | ||
1745 | 1746 | ||
1746 | dout("%s: img %p\n", __func__, img_request); | 1747 | dout("%s: img %p\n", __func__, img_request); |
1747 | for_each_obj_request(img_request, obj_request) { | 1748 | for_each_obj_request_safe(img_request, obj_request, next_obj_request) { |
1748 | int ret; | 1749 | int ret; |
1749 | 1750 | ||
1750 | obj_request->callback = rbd_img_obj_callback; | 1751 | obj_request->callback = rbd_img_obj_callback; |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index e3f9a99b8522..d784650d14f0 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -373,26 +373,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) | |||
373 | struct hpet_dev *devp; | 373 | struct hpet_dev *devp; |
374 | unsigned long addr; | 374 | unsigned long addr; |
375 | 375 | ||
376 | if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) | ||
377 | return -EINVAL; | ||
378 | |||
379 | devp = file->private_data; | 376 | devp = file->private_data; |
380 | addr = devp->hd_hpets->hp_hpet_phys; | 377 | addr = devp->hd_hpets->hp_hpet_phys; |
381 | 378 | ||
382 | if (addr & (PAGE_SIZE - 1)) | 379 | if (addr & (PAGE_SIZE - 1)) |
383 | return -ENOSYS; | 380 | return -ENOSYS; |
384 | 381 | ||
385 | vma->vm_flags |= VM_IO; | ||
386 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 382 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
387 | 383 | return vm_iomap_memory(vma, addr, PAGE_SIZE); | |
388 | if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, | ||
389 | PAGE_SIZE, vma->vm_page_prot)) { | ||
390 | printk(KERN_ERR "%s: io_remap_pfn_range failed\n", | ||
391 | __func__); | ||
392 | return -EAGAIN; | ||
393 | } | ||
394 | |||
395 | return 0; | ||
396 | #else | 384 | #else |
397 | return -ENOSYS; | 385 | return -ENOSYS; |
398 | #endif | 386 | #endif |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 69ae5972713c..a0f7724852eb 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng) | |||
380 | } | 380 | } |
381 | EXPORT_SYMBOL_GPL(hwrng_unregister); | 381 | EXPORT_SYMBOL_GPL(hwrng_unregister); |
382 | 382 | ||
383 | static void __exit hwrng_exit(void) | ||
384 | { | ||
385 | mutex_lock(&rng_mutex); | ||
386 | BUG_ON(current_rng); | ||
387 | kfree(rng_buffer); | ||
388 | mutex_unlock(&rng_mutex); | ||
389 | } | ||
390 | |||
391 | module_exit(hwrng_exit); | ||
383 | 392 | ||
384 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); | 393 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); |
385 | MODULE_LICENSE("GPL"); | 394 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index e905d5f53051..ce5f3fc25d6d 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -149,7 +149,8 @@ struct ports_device { | |||
149 | spinlock_t ports_lock; | 149 | spinlock_t ports_lock; |
150 | 150 | ||
151 | /* To protect the vq operations for the control channel */ | 151 | /* To protect the vq operations for the control channel */ |
152 | spinlock_t cvq_lock; | 152 | spinlock_t c_ivq_lock; |
153 | spinlock_t c_ovq_lock; | ||
153 | 154 | ||
154 | /* The current config space is stored here */ | 155 | /* The current config space is stored here */ |
155 | struct virtio_console_config config; | 156 | struct virtio_console_config config; |
@@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, | |||
569 | vq = portdev->c_ovq; | 570 | vq = portdev->c_ovq; |
570 | 571 | ||
571 | sg_init_one(sg, &cpkt, sizeof(cpkt)); | 572 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
573 | |||
574 | spin_lock(&portdev->c_ovq_lock); | ||
572 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { | 575 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { |
573 | virtqueue_kick(vq); | 576 | virtqueue_kick(vq); |
574 | while (!virtqueue_get_buf(vq, &len)) | 577 | while (!virtqueue_get_buf(vq, &len)) |
575 | cpu_relax(); | 578 | cpu_relax(); |
576 | } | 579 | } |
580 | spin_unlock(&portdev->c_ovq_lock); | ||
577 | return 0; | 581 | return 0; |
578 | } | 582 | } |
579 | 583 | ||
@@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1436 | * rproc_serial does not want the console port, only | 1440 | * rproc_serial does not want the console port, only |
1437 | * the generic port implementation. | 1441 | * the generic port implementation. |
1438 | */ | 1442 | */ |
1439 | port->host_connected = port->guest_connected = true; | 1443 | port->host_connected = true; |
1440 | else if (!use_multiport(port->portdev)) { | 1444 | else if (!use_multiport(port->portdev)) { |
1441 | /* | 1445 | /* |
1442 | * If we're not using multiport support, | 1446 | * If we're not using multiport support, |
@@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work) | |||
1709 | portdev = container_of(work, struct ports_device, control_work); | 1713 | portdev = container_of(work, struct ports_device, control_work); |
1710 | vq = portdev->c_ivq; | 1714 | vq = portdev->c_ivq; |
1711 | 1715 | ||
1712 | spin_lock(&portdev->cvq_lock); | 1716 | spin_lock(&portdev->c_ivq_lock); |
1713 | while ((buf = virtqueue_get_buf(vq, &len))) { | 1717 | while ((buf = virtqueue_get_buf(vq, &len))) { |
1714 | spin_unlock(&portdev->cvq_lock); | 1718 | spin_unlock(&portdev->c_ivq_lock); |
1715 | 1719 | ||
1716 | buf->len = len; | 1720 | buf->len = len; |
1717 | buf->offset = 0; | 1721 | buf->offset = 0; |
1718 | 1722 | ||
1719 | handle_control_message(portdev, buf); | 1723 | handle_control_message(portdev, buf); |
1720 | 1724 | ||
1721 | spin_lock(&portdev->cvq_lock); | 1725 | spin_lock(&portdev->c_ivq_lock); |
1722 | if (add_inbuf(portdev->c_ivq, buf) < 0) { | 1726 | if (add_inbuf(portdev->c_ivq, buf) < 0) { |
1723 | dev_warn(&portdev->vdev->dev, | 1727 | dev_warn(&portdev->vdev->dev, |
1724 | "Error adding buffer to queue\n"); | 1728 | "Error adding buffer to queue\n"); |
1725 | free_buf(buf, false); | 1729 | free_buf(buf, false); |
1726 | } | 1730 | } |
1727 | } | 1731 | } |
1728 | spin_unlock(&portdev->cvq_lock); | 1732 | spin_unlock(&portdev->c_ivq_lock); |
1729 | } | 1733 | } |
1730 | 1734 | ||
1731 | static void out_intr(struct virtqueue *vq) | 1735 | static void out_intr(struct virtqueue *vq) |
@@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq) | |||
1752 | port->inbuf = get_inbuf(port); | 1756 | port->inbuf = get_inbuf(port); |
1753 | 1757 | ||
1754 | /* | 1758 | /* |
1755 | * Don't queue up data when port is closed. This condition | 1759 | * Normally the port should not accept data when the port is |
1760 | * closed. For generic serial ports, the host won't (shouldn't) | ||
1761 | * send data till the guest is connected. But this condition | ||
1756 | * can be reached when a console port is not yet connected (no | 1762 | * can be reached when a console port is not yet connected (no |
1757 | * tty is spawned) and the host sends out data to console | 1763 | * tty is spawned) and the other side sends out data over the |
1758 | * ports. For generic serial ports, the host won't | 1764 | * vring, or when a remote devices start sending data before |
1759 | * (shouldn't) send data till the guest is connected. | 1765 | * the ports are opened. |
1766 | * | ||
1767 | * A generic serial port will discard data if not connected, | ||
1768 | * while console ports and rproc-serial ports accepts data at | ||
1769 | * any time. rproc-serial is initiated with guest_connected to | ||
1770 | * false because port_fops_open expects this. Console ports are | ||
1771 | * hooked up with an HVC console and is initialized with | ||
1772 | * guest_connected to true. | ||
1760 | */ | 1773 | */ |
1761 | if (!port->guest_connected) | 1774 | |
1775 | if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev)) | ||
1762 | discard_port_data(port); | 1776 | discard_port_data(port); |
1763 | 1777 | ||
1764 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | 1778 | spin_unlock_irqrestore(&port->inbuf_lock, flags); |
@@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev) | |||
1986 | if (multiport) { | 2000 | if (multiport) { |
1987 | unsigned int nr_added_bufs; | 2001 | unsigned int nr_added_bufs; |
1988 | 2002 | ||
1989 | spin_lock_init(&portdev->cvq_lock); | 2003 | spin_lock_init(&portdev->c_ivq_lock); |
2004 | spin_lock_init(&portdev->c_ovq_lock); | ||
1990 | INIT_WORK(&portdev->control_work, &control_work_handler); | 2005 | INIT_WORK(&portdev->control_work, &control_work_handler); |
1991 | 2006 | ||
1992 | nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 2007 | nr_added_bufs = fill_queue(portdev->c_ivq, |
2008 | &portdev->c_ivq_lock); | ||
1993 | if (!nr_added_bufs) { | 2009 | if (!nr_added_bufs) { |
1994 | dev_err(&vdev->dev, | 2010 | dev_err(&vdev->dev, |
1995 | "Error allocating buffers for control queue\n"); | 2011 | "Error allocating buffers for control queue\n"); |
@@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev) | |||
2140 | return ret; | 2156 | return ret; |
2141 | 2157 | ||
2142 | if (use_multiport(portdev)) | 2158 | if (use_multiport(portdev)) |
2143 | fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 2159 | fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); |
2144 | 2160 | ||
2145 | list_for_each_entry(port, &portdev->ports, list) { | 2161 | list_for_each_entry(port, &portdev->ports, list) { |
2146 | port->in_vq = portdev->in_vqs[port->id]; | 2162 | port->in_vq = portdev->in_vqs[port->id]; |
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 1e2de7305362..f873dcefe0de 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c | |||
@@ -703,7 +703,7 @@ static void tegra20_pll_init(void) | |||
703 | clks[pll_a_out0] = clk; | 703 | clks[pll_a_out0] = clk; |
704 | 704 | ||
705 | /* PLLE */ | 705 | /* PLLE */ |
706 | clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL, | 706 | clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base, |
707 | 0, 100000000, &pll_e_params, | 707 | 0, 100000000, &pll_e_params, |
708 | 0, pll_e_freq_table, NULL); | 708 | 0, pll_e_freq_table, NULL); |
709 | clk_register_clkdev(clk, "pll_e", NULL); | 709 | clk_register_clkdev(clk, "pll_e", NULL); |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 4e5b7fb8927c..37d23a0f8c56 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { | |||
178 | 178 | ||
179 | static int cpu0_cpufreq_probe(struct platform_device *pdev) | 179 | static int cpu0_cpufreq_probe(struct platform_device *pdev) |
180 | { | 180 | { |
181 | struct device_node *np; | 181 | struct device_node *np, *parent; |
182 | int ret; | 182 | int ret; |
183 | 183 | ||
184 | for_each_child_of_node(of_find_node_by_path("/cpus"), np) { | 184 | parent = of_find_node_by_path("/cpus"); |
185 | if (!parent) { | ||
186 | pr_err("failed to find OF /cpus\n"); | ||
187 | return -ENOENT; | ||
188 | } | ||
189 | |||
190 | for_each_child_of_node(parent, np) { | ||
185 | if (of_get_property(np, "operating-points", NULL)) | 191 | if (of_get_property(np, "operating-points", NULL)) |
186 | break; | 192 | break; |
187 | } | 193 | } |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 46bde01eee62..cc4bd2f6838a 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -14,8 +14,8 @@ | |||
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef _CPUFREQ_GOVERNER_H | 17 | #ifndef _CPUFREQ_GOVERNOR_H |
18 | #define _CPUFREQ_GOVERNER_H | 18 | #define _CPUFREQ_GOVERNOR_H |
19 | 19 | ||
20 | #include <linux/cpufreq.h> | 20 | #include <linux/cpufreq.h> |
21 | #include <linux/kobject.h> | 21 | #include <linux/kobject.h> |
@@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs, | |||
175 | unsigned int sampling_rate); | 175 | unsigned int sampling_rate); |
176 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, | 176 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, |
177 | struct cpufreq_policy *policy, unsigned int event); | 177 | struct cpufreq_policy *policy, unsigned int event); |
178 | #endif /* _CPUFREQ_GOVERNER_H */ | 178 | #endif /* _CPUFREQ_GOVERNOR_H */ |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ad72922919ed..6133ef5cf671 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
502 | 502 | ||
503 | sample_time = cpu->pstate_policy->sample_rate_ms; | 503 | sample_time = cpu->pstate_policy->sample_rate_ms; |
504 | delay = msecs_to_jiffies(sample_time); | 504 | delay = msecs_to_jiffies(sample_time); |
505 | delay -= jiffies % delay; | ||
506 | mod_timer_pinned(&cpu->timer, jiffies + delay); | 505 | mod_timer_pinned(&cpu->timer, jiffies + delay); |
507 | } | 506 | } |
508 | 507 | ||
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 8bc5fef07e7a..22c9063e0120 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = { | |||
1750 | .shutdown = ux500_cryp_shutdown, | 1750 | .shutdown = ux500_cryp_shutdown, |
1751 | .driver = { | 1751 | .driver = { |
1752 | .owner = THIS_MODULE, | 1752 | .owner = THIS_MODULE, |
1753 | .name = "cryp1" | 1753 | .name = "cryp1", |
1754 | .pm = &ux500_cryp_pm, | 1754 | .pm = &ux500_cryp_pm, |
1755 | } | 1755 | } |
1756 | }; | 1756 | }; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 80b69971cf28..aeaea32bcfda 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -83,6 +83,7 @@ config INTEL_IOP_ADMA | |||
83 | 83 | ||
84 | config DW_DMAC | 84 | config DW_DMAC |
85 | tristate "Synopsys DesignWare AHB DMA support" | 85 | tristate "Synopsys DesignWare AHB DMA support" |
86 | depends on GENERIC_HARDIRQS | ||
86 | select DMA_ENGINE | 87 | select DMA_ENGINE |
87 | default y if CPU_AT32AP7000 | 88 | default y if CPU_AT32AP7000 |
88 | help | 89 | help |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 6e13f262139a..88cfc61329d2 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -310,8 +310,6 @@ static void atc_complete_all(struct at_dma_chan *atchan) | |||
310 | 310 | ||
311 | dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); | 311 | dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); |
312 | 312 | ||
313 | BUG_ON(atc_chan_is_enabled(atchan)); | ||
314 | |||
315 | /* | 313 | /* |
316 | * Submit queued descriptors ASAP, i.e. before we go through | 314 | * Submit queued descriptors ASAP, i.e. before we go through |
317 | * the completed ones. | 315 | * the completed ones. |
@@ -368,6 +366,9 @@ static void atc_advance_work(struct at_dma_chan *atchan) | |||
368 | { | 366 | { |
369 | dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); | 367 | dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); |
370 | 368 | ||
369 | if (atc_chan_is_enabled(atchan)) | ||
370 | return; | ||
371 | |||
371 | if (list_empty(&atchan->active_list) || | 372 | if (list_empty(&atchan->active_list) || |
372 | list_is_singular(&atchan->active_list)) { | 373 | list_is_singular(&atchan->active_list)) { |
373 | atc_complete_all(atchan); | 374 | atc_complete_all(atchan); |
@@ -1078,9 +1079,7 @@ static void atc_issue_pending(struct dma_chan *chan) | |||
1078 | return; | 1079 | return; |
1079 | 1080 | ||
1080 | spin_lock_irqsave(&atchan->lock, flags); | 1081 | spin_lock_irqsave(&atchan->lock, flags); |
1081 | if (!atc_chan_is_enabled(atchan)) { | 1082 | atc_advance_work(atchan); |
1082 | atc_advance_work(atchan); | ||
1083 | } | ||
1084 | spin_unlock_irqrestore(&atchan->lock, flags); | 1083 | spin_unlock_irqrestore(&atchan->lock, flags); |
1085 | } | 1084 | } |
1086 | 1085 | ||
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index c4b4fd2acc42..08b43bf37158 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan) | |||
276 | 276 | ||
277 | spin_lock_irqsave(&c->vc.lock, flags); | 277 | spin_lock_irqsave(&c->vc.lock, flags); |
278 | if (vchan_issue_pending(&c->vc) && !c->desc) { | 278 | if (vchan_issue_pending(&c->vc) && !c->desc) { |
279 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); | 279 | /* |
280 | spin_lock(&d->lock); | 280 | * c->cyclic is used only by audio and in this case the DMA need |
281 | if (list_empty(&c->node)) | 281 | * to be started without delay. |
282 | list_add_tail(&c->node, &d->pending); | 282 | */ |
283 | spin_unlock(&d->lock); | 283 | if (!c->cyclic) { |
284 | tasklet_schedule(&d->task); | 284 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); |
285 | spin_lock(&d->lock); | ||
286 | if (list_empty(&c->node)) | ||
287 | list_add_tail(&c->node, &d->pending); | ||
288 | spin_unlock(&d->lock); | ||
289 | tasklet_schedule(&d->task); | ||
290 | } else { | ||
291 | omap_dma_start_desc(c); | ||
292 | } | ||
285 | } | 293 | } |
286 | spin_unlock_irqrestore(&c->vc.lock, flags); | 294 | spin_unlock_irqrestore(&c->vc.lock, flags); |
287 | } | 295 | } |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 718153122759..5dbc5946c4c3 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2882 | { | 2882 | { |
2883 | struct dma_pl330_platdata *pdat; | 2883 | struct dma_pl330_platdata *pdat; |
2884 | struct dma_pl330_dmac *pdmac; | 2884 | struct dma_pl330_dmac *pdmac; |
2885 | struct dma_pl330_chan *pch; | 2885 | struct dma_pl330_chan *pch, *_p; |
2886 | struct pl330_info *pi; | 2886 | struct pl330_info *pi; |
2887 | struct dma_device *pd; | 2887 | struct dma_device *pd; |
2888 | struct resource *res; | 2888 | struct resource *res; |
@@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2984 | ret = dma_async_device_register(pd); | 2984 | ret = dma_async_device_register(pd); |
2985 | if (ret) { | 2985 | if (ret) { |
2986 | dev_err(&adev->dev, "unable to register DMAC\n"); | 2986 | dev_err(&adev->dev, "unable to register DMAC\n"); |
2987 | goto probe_err2; | 2987 | goto probe_err3; |
2988 | } | ||
2989 | |||
2990 | if (adev->dev.of_node) { | ||
2991 | ret = of_dma_controller_register(adev->dev.of_node, | ||
2992 | of_dma_pl330_xlate, pdmac); | ||
2993 | if (ret) { | ||
2994 | dev_err(&adev->dev, | ||
2995 | "unable to register DMA to the generic DT DMA helpers\n"); | ||
2996 | } | ||
2988 | } | 2997 | } |
2989 | 2998 | ||
2990 | dev_info(&adev->dev, | 2999 | dev_info(&adev->dev, |
@@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2995 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, | 3004 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, |
2996 | pi->pcfg.num_peri, pi->pcfg.num_events); | 3005 | pi->pcfg.num_peri, pi->pcfg.num_events); |
2997 | 3006 | ||
2998 | ret = of_dma_controller_register(adev->dev.of_node, | ||
2999 | of_dma_pl330_xlate, pdmac); | ||
3000 | if (ret) { | ||
3001 | dev_err(&adev->dev, | ||
3002 | "unable to register DMA to the generic DT DMA helpers\n"); | ||
3003 | goto probe_err2; | ||
3004 | } | ||
3005 | |||
3006 | return 0; | 3007 | return 0; |
3008 | probe_err3: | ||
3009 | amba_set_drvdata(adev, NULL); | ||
3007 | 3010 | ||
3011 | /* Idle the DMAC */ | ||
3012 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | ||
3013 | chan.device_node) { | ||
3014 | |||
3015 | /* Remove the channel */ | ||
3016 | list_del(&pch->chan.device_node); | ||
3017 | |||
3018 | /* Flush the channel */ | ||
3019 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | ||
3020 | pl330_free_chan_resources(&pch->chan); | ||
3021 | } | ||
3008 | probe_err2: | 3022 | probe_err2: |
3009 | pl330_del(pi); | 3023 | pl330_del(pi); |
3010 | probe_err1: | 3024 | probe_err1: |
@@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev) | |||
3023 | if (!pdmac) | 3037 | if (!pdmac) |
3024 | return 0; | 3038 | return 0; |
3025 | 3039 | ||
3026 | of_dma_controller_free(adev->dev.of_node); | 3040 | if (adev->dev.of_node) |
3041 | of_dma_controller_free(adev->dev.of_node); | ||
3027 | 3042 | ||
3043 | dma_async_device_unregister(&pdmac->ddma); | ||
3028 | amba_set_drvdata(adev, NULL); | 3044 | amba_set_drvdata(adev, NULL); |
3029 | 3045 | ||
3030 | /* Idle the DMAC */ | 3046 | /* Idle the DMAC */ |
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c index cdae207028a7..6c3fca97d346 100644 --- a/drivers/eisa/pci_eisa.c +++ b/drivers/eisa/pci_eisa.c | |||
@@ -19,10 +19,10 @@ | |||
19 | /* There is only *one* pci_eisa device per machine, right ? */ | 19 | /* There is only *one* pci_eisa device per machine, right ? */ |
20 | static struct eisa_root_device pci_eisa_root; | 20 | static struct eisa_root_device pci_eisa_root; |
21 | 21 | ||
22 | static int __init pci_eisa_init(struct pci_dev *pdev, | 22 | static int __init pci_eisa_init(struct pci_dev *pdev) |
23 | const struct pci_device_id *ent) | ||
24 | { | 23 | { |
25 | int rc; | 24 | int rc, i; |
25 | struct resource *res, *bus_res = NULL; | ||
26 | 26 | ||
27 | if ((rc = pci_enable_device (pdev))) { | 27 | if ((rc = pci_enable_device (pdev))) { |
28 | printk (KERN_ERR "pci_eisa : Could not enable device %s\n", | 28 | printk (KERN_ERR "pci_eisa : Could not enable device %s\n", |
@@ -30,9 +30,30 @@ static int __init pci_eisa_init(struct pci_dev *pdev, | |||
30 | return rc; | 30 | return rc; |
31 | } | 31 | } |
32 | 32 | ||
33 | /* | ||
34 | * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI | ||
35 | * device, so the resources available on EISA are the same as those | ||
36 | * available on the 82375 bus. This works the same as a PCI-PCI | ||
37 | * bridge in subtractive-decode mode (see pci_read_bridge_bases()). | ||
38 | * We assume other PCI-EISA bridges are similar. | ||
39 | * | ||
40 | * eisa_root_register() can only deal with a single io port resource, | ||
41 | * so we use the first valid io port resource. | ||
42 | */ | ||
43 | pci_bus_for_each_resource(pdev->bus, res, i) | ||
44 | if (res && (res->flags & IORESOURCE_IO)) { | ||
45 | bus_res = res; | ||
46 | break; | ||
47 | } | ||
48 | |||
49 | if (!bus_res) { | ||
50 | dev_err(&pdev->dev, "No resources available\n"); | ||
51 | return -1; | ||
52 | } | ||
53 | |||
33 | pci_eisa_root.dev = &pdev->dev; | 54 | pci_eisa_root.dev = &pdev->dev; |
34 | pci_eisa_root.res = pdev->bus->resource[0]; | 55 | pci_eisa_root.res = bus_res; |
35 | pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start; | 56 | pci_eisa_root.bus_base_addr = bus_res->start; |
36 | pci_eisa_root.slots = EISA_MAX_SLOTS; | 57 | pci_eisa_root.slots = EISA_MAX_SLOTS; |
37 | pci_eisa_root.dma_mask = pdev->dma_mask; | 58 | pci_eisa_root.dma_mask = pdev->dma_mask; |
38 | dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); | 59 | dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); |
@@ -45,22 +66,26 @@ static int __init pci_eisa_init(struct pci_dev *pdev, | |||
45 | return 0; | 66 | return 0; |
46 | } | 67 | } |
47 | 68 | ||
48 | static struct pci_device_id pci_eisa_pci_tbl[] = { | 69 | /* |
49 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 70 | * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init(). |
50 | PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, | 71 | * Otherwise pnp resource will get enabled early and could prevent eisa |
51 | { 0, } | 72 | * to be initialized. |
52 | }; | 73 | * Also need to make sure pci_eisa_init_early() is called after |
74 | * x86/pci_subsys_init(). | ||
75 | * So need to use subsys_initcall_sync with it. | ||
76 | */ | ||
77 | static int __init pci_eisa_init_early(void) | ||
78 | { | ||
79 | struct pci_dev *dev = NULL; | ||
80 | int ret; | ||
53 | 81 | ||
54 | static struct pci_driver __refdata pci_eisa_driver = { | 82 | for_each_pci_dev(dev) |
55 | .name = "pci_eisa", | 83 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) { |
56 | .id_table = pci_eisa_pci_tbl, | 84 | ret = pci_eisa_init(dev); |
57 | .probe = pci_eisa_init, | 85 | if (ret) |
58 | }; | 86 | return ret; |
87 | } | ||
59 | 88 | ||
60 | static int __init pci_eisa_init_module (void) | 89 | return 0; |
61 | { | ||
62 | return pci_register_driver (&pci_eisa_driver); | ||
63 | } | 90 | } |
64 | 91 | subsys_initcall_sync(pci_eisa_init_early); | |
65 | device_initcall(pci_eisa_init_module); | ||
66 | MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl); | ||
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 47ae712c9504..221ad1bf94de 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/efi.h> | 1 | #include <linux/efi.h> |
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <linux/pstore.h> | 3 | #include <linux/pstore.h> |
4 | #include <linux/ucs2_string.h> | ||
4 | 5 | ||
5 | #define DUMP_NAME_LEN 52 | 6 | #define DUMP_NAME_LEN 52 |
6 | 7 | ||
@@ -140,15 +141,15 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) | |||
140 | efi_guid_t vendor = LINUX_EFI_CRASH_GUID; | 141 | efi_guid_t vendor = LINUX_EFI_CRASH_GUID; |
141 | efi_char16_t efi_name_old[DUMP_NAME_LEN]; | 142 | efi_char16_t efi_name_old[DUMP_NAME_LEN]; |
142 | efi_char16_t *efi_name = ed->name; | 143 | efi_char16_t *efi_name = ed->name; |
143 | unsigned long utf16_len = utf16_strlen(ed->name); | 144 | unsigned long ucs2_len = ucs2_strlen(ed->name); |
144 | char name_old[DUMP_NAME_LEN]; | 145 | char name_old[DUMP_NAME_LEN]; |
145 | int i; | 146 | int i; |
146 | 147 | ||
147 | if (efi_guidcmp(entry->var.VendorGuid, vendor)) | 148 | if (efi_guidcmp(entry->var.VendorGuid, vendor)) |
148 | return 0; | 149 | return 0; |
149 | 150 | ||
150 | if (utf16_strncmp(entry->var.VariableName, | 151 | if (ucs2_strncmp(entry->var.VariableName, |
151 | efi_name, (size_t)utf16_len)) { | 152 | efi_name, (size_t)ucs2_len)) { |
152 | /* | 153 | /* |
153 | * Check if an old format, which doesn't support | 154 | * Check if an old format, which doesn't support |
154 | * holding multiple logs, remains. | 155 | * holding multiple logs, remains. |
@@ -159,8 +160,8 @@ static int efi_pstore_erase_func(struct efivar_entry *entry, void *data) | |||
159 | for (i = 0; i < DUMP_NAME_LEN; i++) | 160 | for (i = 0; i < DUMP_NAME_LEN; i++) |
160 | efi_name_old[i] = name_old[i]; | 161 | efi_name_old[i] = name_old[i]; |
161 | 162 | ||
162 | if (utf16_strncmp(entry->var.VariableName, efi_name_old, | 163 | if (ucs2_strncmp(entry->var.VariableName, efi_name_old, |
163 | utf16_strlen(efi_name_old))) | 164 | ucs2_strlen(efi_name_old))) |
164 | return 0; | 165 | return 0; |
165 | } | 166 | } |
166 | 167 | ||
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 32bdf4f8e432..5145fa344ad5 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c | |||
@@ -72,7 +72,7 @@ static int generic_ops_register(void) | |||
72 | generic_ops.get_variable = efi.get_variable; | 72 | generic_ops.get_variable = efi.get_variable; |
73 | generic_ops.set_variable = efi.set_variable; | 73 | generic_ops.set_variable = efi.set_variable; |
74 | generic_ops.get_next_variable = efi.get_next_variable; | 74 | generic_ops.get_next_variable = efi.get_next_variable; |
75 | generic_ops.query_variable_info = efi.query_variable_info; | 75 | generic_ops.query_variable_store = efi_query_variable_store; |
76 | 76 | ||
77 | return efivars_register(&generic_efivars, &generic_ops, efi_kobj); | 77 | return efivars_register(&generic_efivars, &generic_ops, efi_kobj); |
78 | } | 78 | } |
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 70635b3b59d3..f8f5e5d9e020 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c | |||
@@ -67,6 +67,7 @@ | |||
67 | 67 | ||
68 | #include <linux/efi.h> | 68 | #include <linux/efi.h> |
69 | #include <linux/module.h> | 69 | #include <linux/module.h> |
70 | #include <linux/ucs2_string.h> | ||
70 | 71 | ||
71 | #define EFIVARS_VERSION "0.08" | 72 | #define EFIVARS_VERSION "0.08" |
72 | #define EFIVARS_DATE "2004-May-17" | 73 | #define EFIVARS_DATE "2004-May-17" |
@@ -407,7 +408,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) | |||
407 | efi_char16_t *variable_name; | 408 | efi_char16_t *variable_name; |
408 | 409 | ||
409 | variable_name = new_var->var.VariableName; | 410 | variable_name = new_var->var.VariableName; |
410 | variable_name_size = utf16_strlen(variable_name) * sizeof(efi_char16_t); | 411 | variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t); |
411 | 412 | ||
412 | /* | 413 | /* |
413 | * Length of the variable bytes in ASCII, plus the '-' separator, | 414 | * Length of the variable bytes in ASCII, plus the '-' separator, |
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index dd1c20a426fa..1d80c1ca39c5 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/device.h> | 32 | #include <linux/device.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/ctype.h> | 34 | #include <linux/ctype.h> |
35 | #include <linux/ucs2_string.h> | ||
35 | 36 | ||
36 | /* Private pointer to registered efivars */ | 37 | /* Private pointer to registered efivars */ |
37 | static struct efivars *__efivars; | 38 | static struct efivars *__efivars; |
@@ -91,7 +92,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, | |||
91 | u16 filepathlength; | 92 | u16 filepathlength; |
92 | int i, desclength = 0, namelen; | 93 | int i, desclength = 0, namelen; |
93 | 94 | ||
94 | namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName)); | 95 | namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName)); |
95 | 96 | ||
96 | /* Either "Boot" or "Driver" followed by four digits of hex */ | 97 | /* Either "Boot" or "Driver" followed by four digits of hex */ |
97 | for (i = match; i < match+4; i++) { | 98 | for (i = match; i < match+4; i++) { |
@@ -114,7 +115,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer, | |||
114 | * There's no stored length for the description, so it has to be | 115 | * There's no stored length for the description, so it has to be |
115 | * found by hand | 116 | * found by hand |
116 | */ | 117 | */ |
117 | desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; | 118 | desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; |
118 | 119 | ||
119 | /* Each boot entry must have a descriptor */ | 120 | /* Each boot entry must have a descriptor */ |
120 | if (!desclength) | 121 | if (!desclength) |
@@ -228,24 +229,12 @@ EXPORT_SYMBOL_GPL(efivar_validate); | |||
228 | static efi_status_t | 229 | static efi_status_t |
229 | check_var_size(u32 attributes, unsigned long size) | 230 | check_var_size(u32 attributes, unsigned long size) |
230 | { | 231 | { |
231 | u64 storage_size, remaining_size, max_size; | ||
232 | efi_status_t status; | ||
233 | const struct efivar_operations *fops = __efivars->ops; | 232 | const struct efivar_operations *fops = __efivars->ops; |
234 | 233 | ||
235 | if (!fops->query_variable_info) | 234 | if (!fops->query_variable_store) |
236 | return EFI_UNSUPPORTED; | 235 | return EFI_UNSUPPORTED; |
237 | 236 | ||
238 | status = fops->query_variable_info(attributes, &storage_size, | 237 | return fops->query_variable_store(attributes, size); |
239 | &remaining_size, &max_size); | ||
240 | |||
241 | if (status != EFI_SUCCESS) | ||
242 | return status; | ||
243 | |||
244 | if (!storage_size || size > remaining_size || size > max_size || | ||
245 | (remaining_size - size) < (storage_size / 2)) | ||
246 | return EFI_OUT_OF_RESOURCES; | ||
247 | |||
248 | return status; | ||
249 | } | 238 | } |
250 | 239 | ||
251 | static int efi_status_to_err(efi_status_t status) | 240 | static int efi_status_to_err(efi_status_t status) |
@@ -288,9 +277,9 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor, | |||
288 | unsigned long strsize1, strsize2; | 277 | unsigned long strsize1, strsize2; |
289 | bool found = false; | 278 | bool found = false; |
290 | 279 | ||
291 | strsize1 = utf16_strsize(variable_name, 1024); | 280 | strsize1 = ucs2_strsize(variable_name, 1024); |
292 | list_for_each_entry_safe(entry, n, head, list) { | 281 | list_for_each_entry_safe(entry, n, head, list) { |
293 | strsize2 = utf16_strsize(entry->var.VariableName, 1024); | 282 | strsize2 = ucs2_strsize(entry->var.VariableName, 1024); |
294 | if (strsize1 == strsize2 && | 283 | if (strsize1 == strsize2 && |
295 | !memcmp(variable_name, &(entry->var.VariableName), | 284 | !memcmp(variable_name, &(entry->var.VariableName), |
296 | strsize2) && | 285 | strsize2) && |
@@ -594,7 +583,7 @@ int efivar_entry_set(struct efivar_entry *entry, u32 attributes, | |||
594 | return -EEXIST; | 583 | return -EEXIST; |
595 | } | 584 | } |
596 | 585 | ||
597 | status = check_var_size(attributes, size + utf16_strsize(name, 1024)); | 586 | status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); |
598 | if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) | 587 | if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED) |
599 | status = ops->set_variable(name, &vendor, | 588 | status = ops->set_variable(name, &vendor, |
600 | attributes, size, data); | 589 | attributes, size, data); |
@@ -630,7 +619,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, | |||
630 | unsigned long flags; | 619 | unsigned long flags; |
631 | efi_status_t status; | 620 | efi_status_t status; |
632 | 621 | ||
633 | if (!ops->query_variable_info) | 622 | if (!ops->query_variable_store) |
634 | return -ENOSYS; | 623 | return -ENOSYS; |
635 | 624 | ||
636 | if (!block && spin_trylock_irqsave(&__efivars->lock, flags)) | 625 | if (!block && spin_trylock_irqsave(&__efivars->lock, flags)) |
@@ -638,7 +627,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, | |||
638 | else | 627 | else |
639 | spin_lock_irqsave(&__efivars->lock, flags); | 628 | spin_lock_irqsave(&__efivars->lock, flags); |
640 | 629 | ||
641 | status = check_var_size(attributes, size + utf16_strsize(name, 1024)); | 630 | status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); |
642 | if (status != EFI_SUCCESS) { | 631 | if (status != EFI_SUCCESS) { |
643 | spin_unlock_irqrestore(&__efivars->lock, flags); | 632 | spin_unlock_irqrestore(&__efivars->lock, flags); |
644 | return -ENOSPC; | 633 | return -ENOSPC; |
@@ -679,8 +668,8 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, | |||
679 | WARN_ON(!spin_is_locked(&__efivars->lock)); | 668 | WARN_ON(!spin_is_locked(&__efivars->lock)); |
680 | 669 | ||
681 | list_for_each_entry_safe(entry, n, head, list) { | 670 | list_for_each_entry_safe(entry, n, head, list) { |
682 | strsize1 = utf16_strsize(name, 1024); | 671 | strsize1 = ucs2_strsize(name, 1024); |
683 | strsize2 = utf16_strsize(entry->var.VariableName, 1024); | 672 | strsize2 = ucs2_strsize(entry->var.VariableName, 1024); |
684 | if (strsize1 == strsize2 && | 673 | if (strsize1 == strsize2 && |
685 | !memcmp(name, &(entry->var.VariableName), strsize1) && | 674 | !memcmp(name, &(entry->var.VariableName), strsize1) && |
686 | !efi_guidcmp(guid, entry->var.VendorGuid)) { | 675 | !efi_guidcmp(guid, entry->var.VendorGuid)) { |
@@ -818,7 +807,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, | |||
818 | /* | 807 | /* |
819 | * Ensure that the available space hasn't shrunk below the safe level | 808 | * Ensure that the available space hasn't shrunk below the safe level |
820 | */ | 809 | */ |
821 | status = check_var_size(attributes, *size + utf16_strsize(name, 1024)); | 810 | status = check_var_size(attributes, *size + ucs2_strsize(name, 1024)); |
822 | if (status != EFI_SUCCESS) { | 811 | if (status != EFI_SUCCESS) { |
823 | if (status != EFI_UNSUPPORTED) { | 812 | if (status != EFI_UNSUPPORTED) { |
824 | err = efi_status_to_err(status); | 813 | err = efi_status_to_err(status); |
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index 757b2d92d5b0..acba0b9f4406 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/reboot.h> | 28 | #include <linux/reboot.h> |
29 | #include <linux/efi.h> | 29 | #include <linux/efi.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/ucs2_string.h> | ||
31 | 32 | ||
32 | #define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */ | 33 | #define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */ |
33 | /* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */ | 34 | /* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */ |
@@ -300,7 +301,7 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name, | |||
300 | }; | 301 | }; |
301 | efi_status_t ret = EFI_SUCCESS; | 302 | efi_status_t ret = EFI_SUCCESS; |
302 | unsigned long flags; | 303 | unsigned long flags; |
303 | size_t name_len = utf16_strnlen(name, GSMI_BUF_SIZE / 2); | 304 | size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2); |
304 | int rc; | 305 | int rc; |
305 | 306 | ||
306 | if (name_len >= GSMI_BUF_SIZE / 2) | 307 | if (name_len >= GSMI_BUF_SIZE / 2) |
@@ -369,7 +370,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size, | |||
369 | return EFI_BAD_BUFFER_SIZE; | 370 | return EFI_BAD_BUFFER_SIZE; |
370 | 371 | ||
371 | /* Let's make sure the thing is at least null-terminated */ | 372 | /* Let's make sure the thing is at least null-terminated */ |
372 | if (utf16_strnlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2) | 373 | if (ucs2_strnlen(name, GSMI_BUF_SIZE / 2) == GSMI_BUF_SIZE / 2) |
373 | return EFI_INVALID_PARAMETER; | 374 | return EFI_INVALID_PARAMETER; |
374 | 375 | ||
375 | spin_lock_irqsave(&gsmi_dev.lock, flags); | 376 | spin_lock_irqsave(&gsmi_dev.lock, flags); |
@@ -397,7 +398,7 @@ static efi_status_t gsmi_get_next_variable(unsigned long *name_size, | |||
397 | 398 | ||
398 | /* Copy the name back */ | 399 | /* Copy the name back */ |
399 | memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE); | 400 | memcpy(name, gsmi_dev.name_buf->start, GSMI_BUF_SIZE); |
400 | *name_size = utf16_strnlen(name, GSMI_BUF_SIZE / 2) * 2; | 401 | *name_size = ucs2_strnlen(name, GSMI_BUF_SIZE / 2) * 2; |
401 | 402 | ||
402 | /* copy guid to return buffer */ | 403 | /* copy guid to return buffer */ |
403 | memcpy(vendor, ¶m.guid, sizeof(param.guid)); | 404 | memcpy(vendor, ¶m.guid, sizeof(param.guid)); |
@@ -423,7 +424,7 @@ static efi_status_t gsmi_set_variable(efi_char16_t *name, | |||
423 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | 424 | EFI_VARIABLE_BOOTSERVICE_ACCESS | |
424 | EFI_VARIABLE_RUNTIME_ACCESS, | 425 | EFI_VARIABLE_RUNTIME_ACCESS, |
425 | }; | 426 | }; |
426 | size_t name_len = utf16_strnlen(name, GSMI_BUF_SIZE / 2); | 427 | size_t name_len = ucs2_strnlen(name, GSMI_BUF_SIZE / 2); |
427 | efi_status_t ret = EFI_SUCCESS; | 428 | efi_status_t ret = EFI_SUCCESS; |
428 | int rc; | 429 | int rc; |
429 | unsigned long flags; | 430 | unsigned long flags; |
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index f9dbd503fc40..de3c317bd3e2 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c | |||
@@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr) | |||
214 | * If it can't be trusted, assume that the pin can be used as a GPIO. | 214 | * If it can't be trusted, assume that the pin can be used as a GPIO. |
215 | */ | 215 | */ |
216 | if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) | 216 | if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) |
217 | return 1; | 217 | return 0; |
218 | 218 | ||
219 | return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV; | 219 | return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV; |
220 | } | 220 | } |
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 24059462c87f..9391cf16e990 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c | |||
@@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, | |||
575 | chip->gpio_chip.ngpio, | 575 | chip->gpio_chip.ngpio, |
576 | irq_base, | 576 | irq_base, |
577 | &pca953x_irq_simple_ops, | 577 | &pca953x_irq_simple_ops, |
578 | NULL); | 578 | chip); |
579 | if (!chip->domain) | 579 | if (!chip->domain) |
580 | return -ENODEV; | 580 | return -ENODEV; |
581 | 581 | ||
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 9cc108d2b770..8325f580c0f1 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c | |||
@@ -642,7 +642,12 @@ static struct platform_driver pxa_gpio_driver = { | |||
642 | .of_match_table = of_match_ptr(pxa_gpio_dt_ids), | 642 | .of_match_table = of_match_ptr(pxa_gpio_dt_ids), |
643 | }, | 643 | }, |
644 | }; | 644 | }; |
645 | module_platform_driver(pxa_gpio_driver); | 645 | |
646 | static int __init pxa_gpio_init(void) | ||
647 | { | ||
648 | return platform_driver_register(&pxa_gpio_driver); | ||
649 | } | ||
650 | postcore_initcall(pxa_gpio_init); | ||
646 | 651 | ||
647 | #ifdef CONFIG_PM | 652 | #ifdef CONFIG_PM |
648 | static int pxa_gpio_suspend(void) | 653 | static int pxa_gpio_suspend(void) |
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 770476a9da87..3ce5bc38ac31 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c | |||
@@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = { | |||
307 | .xlate = irq_domain_xlate_twocell, | 307 | .xlate = irq_domain_xlate_twocell, |
308 | }; | 308 | }; |
309 | 309 | ||
310 | static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio) | 310 | static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio, |
311 | struct device_node *np) | ||
311 | { | 312 | { |
312 | int base = stmpe_gpio->irq_base; | 313 | int base = 0; |
313 | 314 | ||
314 | stmpe_gpio->domain = irq_domain_add_simple(NULL, | 315 | if (!np) |
316 | base = stmpe_gpio->irq_base; | ||
317 | |||
318 | stmpe_gpio->domain = irq_domain_add_simple(np, | ||
315 | stmpe_gpio->chip.ngpio, base, | 319 | stmpe_gpio->chip.ngpio, base, |
316 | &stmpe_gpio_irq_simple_ops, stmpe_gpio); | 320 | &stmpe_gpio_irq_simple_ops, stmpe_gpio); |
317 | if (!stmpe_gpio->domain) { | 321 | if (!stmpe_gpio->domain) { |
@@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev) | |||
346 | stmpe_gpio->chip = template_chip; | 350 | stmpe_gpio->chip = template_chip; |
347 | stmpe_gpio->chip.ngpio = stmpe->num_gpios; | 351 | stmpe_gpio->chip.ngpio = stmpe->num_gpios; |
348 | stmpe_gpio->chip.dev = &pdev->dev; | 352 | stmpe_gpio->chip.dev = &pdev->dev; |
353 | #ifdef CONFIG_OF | ||
354 | stmpe_gpio->chip.of_node = np; | ||
355 | #endif | ||
349 | stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; | 356 | stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; |
350 | 357 | ||
351 | if (pdata) | 358 | if (pdata) |
@@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev) | |||
366 | goto out_free; | 373 | goto out_free; |
367 | 374 | ||
368 | if (irq >= 0) { | 375 | if (irq >= 0) { |
369 | ret = stmpe_gpio_irq_init(stmpe_gpio); | 376 | ret = stmpe_gpio_irq_init(stmpe_gpio, np); |
370 | if (ret) | 377 | if (ret) |
371 | goto out_disable; | 378 | goto out_disable; |
372 | 379 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 792c3e3795ca..dd64a06dc5b4 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -2326,7 +2326,6 @@ int drm_mode_addfb(struct drm_device *dev, | |||
2326 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); | 2326 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); |
2327 | if (IS_ERR(fb)) { | 2327 | if (IS_ERR(fb)) { |
2328 | DRM_DEBUG_KMS("could not create framebuffer\n"); | 2328 | DRM_DEBUG_KMS("could not create framebuffer\n"); |
2329 | drm_modeset_unlock_all(dev); | ||
2330 | return PTR_ERR(fb); | 2329 | return PTR_ERR(fb); |
2331 | } | 2330 | } |
2332 | 2331 | ||
@@ -2506,7 +2505,6 @@ int drm_mode_addfb2(struct drm_device *dev, | |||
2506 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); | 2505 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); |
2507 | if (IS_ERR(fb)) { | 2506 | if (IS_ERR(fb)) { |
2508 | DRM_DEBUG_KMS("could not create framebuffer\n"); | 2507 | DRM_DEBUG_KMS("could not create framebuffer\n"); |
2509 | drm_modeset_unlock_all(dev); | ||
2510 | return PTR_ERR(fb); | 2508 | return PTR_ERR(fb); |
2511 | } | 2509 | } |
2512 | 2510 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 59d6b9bf204b..892ff9f95975 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | |||
1544 | if (!fb_helper->fb) | 1544 | if (!fb_helper->fb) |
1545 | return 0; | 1545 | return 0; |
1546 | 1546 | ||
1547 | drm_modeset_lock_all(dev); | 1547 | mutex_lock(&fb_helper->dev->mode_config.mutex); |
1548 | if (!drm_fb_helper_is_bound(fb_helper)) { | 1548 | if (!drm_fb_helper_is_bound(fb_helper)) { |
1549 | fb_helper->delayed_hotplug = true; | 1549 | fb_helper->delayed_hotplug = true; |
1550 | drm_modeset_unlock_all(dev); | 1550 | mutex_unlock(&fb_helper->dev->mode_config.mutex); |
1551 | return 0; | 1551 | return 0; |
1552 | } | 1552 | } |
1553 | DRM_DEBUG_KMS("\n"); | 1553 | DRM_DEBUG_KMS("\n"); |
@@ -1558,9 +1558,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) | |||
1558 | 1558 | ||
1559 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, | 1559 | count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, |
1560 | max_height); | 1560 | max_height); |
1561 | mutex_unlock(&fb_helper->dev->mode_config.mutex); | ||
1562 | |||
1563 | drm_modeset_lock_all(dev); | ||
1561 | drm_setup_crtcs(fb_helper); | 1564 | drm_setup_crtcs(fb_helper); |
1562 | drm_modeset_unlock_all(dev); | 1565 | drm_modeset_unlock_all(dev); |
1563 | |||
1564 | drm_fb_helper_set_par(fb_helper->fbdev); | 1566 | drm_fb_helper_set_par(fb_helper->fbdev); |
1565 | 1567 | ||
1566 | return 0; | 1568 | return 0; |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 13fdcd10a605..429e07d0b0f1 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp) | |||
123 | int retcode = 0; | 123 | int retcode = 0; |
124 | int need_setup = 0; | 124 | int need_setup = 0; |
125 | struct address_space *old_mapping; | 125 | struct address_space *old_mapping; |
126 | struct address_space *old_imapping; | ||
126 | 127 | ||
127 | minor = idr_find(&drm_minors_idr, minor_id); | 128 | minor = idr_find(&drm_minors_idr, minor_id); |
128 | if (!minor) | 129 | if (!minor) |
@@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp) | |||
137 | if (!dev->open_count++) | 138 | if (!dev->open_count++) |
138 | need_setup = 1; | 139 | need_setup = 1; |
139 | mutex_lock(&dev->struct_mutex); | 140 | mutex_lock(&dev->struct_mutex); |
141 | old_imapping = inode->i_mapping; | ||
140 | old_mapping = dev->dev_mapping; | 142 | old_mapping = dev->dev_mapping; |
141 | if (old_mapping == NULL) | 143 | if (old_mapping == NULL) |
142 | dev->dev_mapping = &inode->i_data; | 144 | dev->dev_mapping = &inode->i_data; |
@@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp) | |||
159 | 161 | ||
160 | err_undo: | 162 | err_undo: |
161 | mutex_lock(&dev->struct_mutex); | 163 | mutex_lock(&dev->struct_mutex); |
162 | filp->f_mapping = old_mapping; | 164 | filp->f_mapping = old_imapping; |
163 | inode->i_mapping = old_mapping; | 165 | inode->i_mapping = old_imapping; |
164 | iput(container_of(dev->dev_mapping, struct inode, i_data)); | 166 | iput(container_of(dev->dev_mapping, struct inode, i_data)); |
165 | dev->dev_mapping = old_mapping; | 167 | dev->dev_mapping = old_mapping; |
166 | mutex_unlock(&dev->struct_mutex); | 168 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3b11ab0fbc96..9a48e1a2d417 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args) | |||
57 | if (eb == NULL) { | 57 | if (eb == NULL) { |
58 | int size = args->buffer_count; | 58 | int size = args->buffer_count; |
59 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; | 59 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
60 | BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); | 60 | BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); |
61 | while (count > 2*size) | 61 | while (count > 2*size) |
62 | count >>= 1; | 62 | count >>= 1; |
63 | eb = kzalloc(count*sizeof(struct hlist_head) + | 63 | eb = kzalloc(count*sizeof(struct hlist_head) + |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 32a3693905ec..1ce45a0a2d3e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -45,6 +45,9 @@ | |||
45 | 45 | ||
46 | struct intel_crt { | 46 | struct intel_crt { |
47 | struct intel_encoder base; | 47 | struct intel_encoder base; |
48 | /* DPMS state is stored in the connector, which we need in the | ||
49 | * encoder's enable/disable callbacks */ | ||
50 | struct intel_connector *connector; | ||
48 | bool force_hotplug_required; | 51 | bool force_hotplug_required; |
49 | u32 adpa_reg; | 52 | u32 adpa_reg; |
50 | }; | 53 | }; |
@@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, | |||
81 | return true; | 84 | return true; |
82 | } | 85 | } |
83 | 86 | ||
84 | static void intel_disable_crt(struct intel_encoder *encoder) | ||
85 | { | ||
86 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | ||
87 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | ||
88 | u32 temp; | ||
89 | |||
90 | temp = I915_READ(crt->adpa_reg); | ||
91 | temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; | ||
92 | temp &= ~ADPA_DAC_ENABLE; | ||
93 | I915_WRITE(crt->adpa_reg, temp); | ||
94 | } | ||
95 | |||
96 | static void intel_enable_crt(struct intel_encoder *encoder) | ||
97 | { | ||
98 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | ||
99 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | ||
100 | u32 temp; | ||
101 | |||
102 | temp = I915_READ(crt->adpa_reg); | ||
103 | temp |= ADPA_DAC_ENABLE; | ||
104 | I915_WRITE(crt->adpa_reg, temp); | ||
105 | } | ||
106 | |||
107 | /* Note: The caller is required to filter out dpms modes not supported by the | 87 | /* Note: The caller is required to filter out dpms modes not supported by the |
108 | * platform. */ | 88 | * platform. */ |
109 | static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) | 89 | static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) |
@@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) | |||
135 | I915_WRITE(crt->adpa_reg, temp); | 115 | I915_WRITE(crt->adpa_reg, temp); |
136 | } | 116 | } |
137 | 117 | ||
118 | static void intel_disable_crt(struct intel_encoder *encoder) | ||
119 | { | ||
120 | intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
121 | } | ||
122 | |||
123 | static void intel_enable_crt(struct intel_encoder *encoder) | ||
124 | { | ||
125 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | ||
126 | |||
127 | intel_crt_set_dpms(encoder, crt->connector->base.dpms); | ||
128 | } | ||
129 | |||
130 | |||
138 | static void intel_crt_dpms(struct drm_connector *connector, int mode) | 131 | static void intel_crt_dpms(struct drm_connector *connector, int mode) |
139 | { | 132 | { |
140 | struct drm_device *dev = connector->dev; | 133 | struct drm_device *dev = connector->dev; |
@@ -746,6 +739,7 @@ void intel_crt_init(struct drm_device *dev) | |||
746 | } | 739 | } |
747 | 740 | ||
748 | connector = &intel_connector->base; | 741 | connector = &intel_connector->base; |
742 | crt->connector = intel_connector; | ||
749 | drm_connector_init(dev, &intel_connector->base, | 743 | drm_connector_init(dev, &intel_connector->base, |
750 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 744 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
751 | 745 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d7d4afe01341..8fc93f90a7cd 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -2559,12 +2559,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
2559 | { | 2559 | { |
2560 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 2560 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
2561 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 2561 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
2562 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
2562 | 2563 | ||
2563 | i2c_del_adapter(&intel_dp->adapter); | 2564 | i2c_del_adapter(&intel_dp->adapter); |
2564 | drm_encoder_cleanup(encoder); | 2565 | drm_encoder_cleanup(encoder); |
2565 | if (is_edp(intel_dp)) { | 2566 | if (is_edp(intel_dp)) { |
2566 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 2567 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
2568 | mutex_lock(&dev->mode_config.mutex); | ||
2567 | ironlake_panel_vdd_off_sync(intel_dp); | 2569 | ironlake_panel_vdd_off_sync(intel_dp); |
2570 | mutex_unlock(&dev->mode_config.mutex); | ||
2568 | } | 2571 | } |
2569 | kfree(intel_dig_port); | 2572 | kfree(intel_dig_port); |
2570 | } | 2573 | } |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index fe22bb780e1d..78d8e919509f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
@@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
751 | int i; | 751 | int i; |
752 | unsigned char misc = 0; | 752 | unsigned char misc = 0; |
753 | unsigned char ext_vga[6]; | 753 | unsigned char ext_vga[6]; |
754 | unsigned char ext_vga_index24; | ||
755 | unsigned char dac_index90 = 0; | ||
756 | u8 bppshift; | 754 | u8 bppshift; |
757 | 755 | ||
758 | static unsigned char dacvalue[] = { | 756 | static unsigned char dacvalue[] = { |
@@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
803 | option2 = 0x0000b000; | 801 | option2 = 0x0000b000; |
804 | break; | 802 | break; |
805 | case G200_ER: | 803 | case G200_ER: |
806 | dac_index90 = 0; | ||
807 | break; | 804 | break; |
808 | } | 805 | } |
809 | 806 | ||
@@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
852 | WREG_DAC(i, dacvalue[i]); | 849 | WREG_DAC(i, dacvalue[i]); |
853 | } | 850 | } |
854 | 851 | ||
855 | if (mdev->type == G200_ER) { | 852 | if (mdev->type == G200_ER) |
856 | WREG_DAC(0x90, dac_index90); | 853 | WREG_DAC(0x90, 0); |
857 | } | ||
858 | |||
859 | 854 | ||
860 | if (option) | 855 | if (option) |
861 | pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); | 856 | pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); |
@@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
952 | if (mdev->type == G200_WB) | 947 | if (mdev->type == G200_WB) |
953 | ext_vga[1] |= 0x88; | 948 | ext_vga[1] |= 0x88; |
954 | 949 | ||
955 | ext_vga_index24 = 0x05; | ||
956 | |||
957 | /* Set pixel clocks */ | 950 | /* Set pixel clocks */ |
958 | misc = 0x2d; | 951 | misc = 0x2d; |
959 | WREG8(MGA_MISC_OUT, misc); | 952 | WREG8(MGA_MISC_OUT, misc); |
@@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
965 | } | 958 | } |
966 | 959 | ||
967 | if (mdev->type == G200_ER) | 960 | if (mdev->type == G200_ER) |
968 | WREG_ECRT(24, ext_vga_index24); | 961 | WREG_ECRT(0x24, 0x5); |
969 | 962 | ||
970 | if (mdev->type == G200_EV) { | 963 | if (mdev->type == G200_EV) { |
971 | WREG_ECRT(6, 0); | 964 | WREG_ECRT(6, 0); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index e816f06637a7..0e2c1a4f1659 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c | |||
@@ -248,6 +248,22 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios) | |||
248 | } | 248 | } |
249 | } | 249 | } |
250 | 250 | ||
251 | static void | ||
252 | nouveau_bios_shadow_platform(struct nouveau_bios *bios) | ||
253 | { | ||
254 | struct pci_dev *pdev = nv_device(bios)->pdev; | ||
255 | size_t size; | ||
256 | |||
257 | void __iomem *rom = pci_platform_rom(pdev, &size); | ||
258 | if (rom && size) { | ||
259 | bios->data = kmalloc(size, GFP_KERNEL); | ||
260 | if (bios->data) { | ||
261 | memcpy_fromio(bios->data, rom, size); | ||
262 | bios->size = size; | ||
263 | } | ||
264 | } | ||
265 | } | ||
266 | |||
251 | static int | 267 | static int |
252 | nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) | 268 | nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) |
253 | { | 269 | { |
@@ -288,6 +304,7 @@ nouveau_bios_shadow(struct nouveau_bios *bios) | |||
288 | { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, | 304 | { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, |
289 | { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, | 305 | { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, |
290 | { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, | 306 | { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, |
307 | { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL }, | ||
291 | {} | 308 | {} |
292 | }; | 309 | }; |
293 | struct methods *mthd, *best; | 310 | struct methods *mthd, *best; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 3b6dc883e150..5eb3e0da7c6e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
@@ -391,7 +391,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | |||
391 | struct nouveau_drm *drm = nouveau_drm(dev); | 391 | struct nouveau_drm *drm = nouveau_drm(dev); |
392 | struct nouveau_device *device = nv_device(drm->device); | 392 | struct nouveau_device *device = nv_device(drm->device); |
393 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 393 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
394 | struct nouveau_abi16_chan *chan, *temp; | 394 | struct nouveau_abi16_chan *chan = NULL, *temp; |
395 | struct nouveau_abi16_ntfy *ntfy; | 395 | struct nouveau_abi16_ntfy *ntfy; |
396 | struct nouveau_object *object; | 396 | struct nouveau_object *object; |
397 | struct nv_dma_class args = {}; | 397 | struct nv_dma_class args = {}; |
@@ -404,10 +404,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | |||
404 | if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) | 404 | if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) |
405 | return nouveau_abi16_put(abi16, -EINVAL); | 405 | return nouveau_abi16_put(abi16, -EINVAL); |
406 | 406 | ||
407 | list_for_each_entry_safe(chan, temp, &abi16->channels, head) { | 407 | list_for_each_entry(temp, &abi16->channels, head) { |
408 | if (chan->chan->handle == (NVDRM_CHAN | info->channel)) | 408 | if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { |
409 | chan = temp; | ||
409 | break; | 410 | break; |
410 | chan = NULL; | 411 | } |
411 | } | 412 | } |
412 | 413 | ||
413 | if (!chan) | 414 | if (!chan) |
@@ -459,17 +460,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) | |||
459 | { | 460 | { |
460 | struct drm_nouveau_gpuobj_free *fini = data; | 461 | struct drm_nouveau_gpuobj_free *fini = data; |
461 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 462 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
462 | struct nouveau_abi16_chan *chan, *temp; | 463 | struct nouveau_abi16_chan *chan = NULL, *temp; |
463 | struct nouveau_abi16_ntfy *ntfy; | 464 | struct nouveau_abi16_ntfy *ntfy; |
464 | int ret; | 465 | int ret; |
465 | 466 | ||
466 | if (unlikely(!abi16)) | 467 | if (unlikely(!abi16)) |
467 | return -ENOMEM; | 468 | return -ENOMEM; |
468 | 469 | ||
469 | list_for_each_entry_safe(chan, temp, &abi16->channels, head) { | 470 | list_for_each_entry(temp, &abi16->channels, head) { |
470 | if (chan->chan->handle == (NVDRM_CHAN | fini->channel)) | 471 | if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { |
472 | chan = temp; | ||
471 | break; | 473 | break; |
472 | chan = NULL; | 474 | } |
473 | } | 475 | } |
474 | 476 | ||
475 | if (!chan) | 477 | if (!chan) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index d1099365bfc1..c95decf543e9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -72,11 +72,25 @@ module_param_named(modeset, nouveau_modeset, int, 0400); | |||
72 | static struct drm_driver driver; | 72 | static struct drm_driver driver; |
73 | 73 | ||
74 | static int | 74 | static int |
75 | nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) | ||
76 | { | ||
77 | struct nouveau_drm *drm = | ||
78 | container_of(event, struct nouveau_drm, vblank[head]); | ||
79 | drm_handle_vblank(drm->dev, head); | ||
80 | return NVKM_EVENT_KEEP; | ||
81 | } | ||
82 | |||
83 | static int | ||
75 | nouveau_drm_vblank_enable(struct drm_device *dev, int head) | 84 | nouveau_drm_vblank_enable(struct drm_device *dev, int head) |
76 | { | 85 | { |
77 | struct nouveau_drm *drm = nouveau_drm(dev); | 86 | struct nouveau_drm *drm = nouveau_drm(dev); |
78 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); | 87 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); |
79 | nouveau_event_get(pdisp->vblank, head, &drm->vblank); | 88 | |
89 | if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank))) | ||
90 | return -EIO; | ||
91 | WARN_ON_ONCE(drm->vblank[head].func); | ||
92 | drm->vblank[head].func = nouveau_drm_vblank_handler; | ||
93 | nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]); | ||
80 | return 0; | 94 | return 0; |
81 | } | 95 | } |
82 | 96 | ||
@@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head) | |||
85 | { | 99 | { |
86 | struct nouveau_drm *drm = nouveau_drm(dev); | 100 | struct nouveau_drm *drm = nouveau_drm(dev); |
87 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); | 101 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); |
88 | nouveau_event_put(pdisp->vblank, head, &drm->vblank); | 102 | if (drm->vblank[head].func) |
89 | } | 103 | nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]); |
90 | 104 | else | |
91 | static int | 105 | WARN_ON_ONCE(1); |
92 | nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) | 106 | drm->vblank[head].func = NULL; |
93 | { | ||
94 | struct nouveau_drm *drm = | ||
95 | container_of(event, struct nouveau_drm, vblank); | ||
96 | drm_handle_vblank(drm->dev, head); | ||
97 | return NVKM_EVENT_KEEP; | ||
98 | } | 107 | } |
99 | 108 | ||
100 | static u64 | 109 | static u64 |
@@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
292 | 301 | ||
293 | dev->dev_private = drm; | 302 | dev->dev_private = drm; |
294 | drm->dev = dev; | 303 | drm->dev = dev; |
295 | drm->vblank.func = nouveau_drm_vblank_handler; | ||
296 | 304 | ||
297 | INIT_LIST_HEAD(&drm->clients); | 305 | INIT_LIST_HEAD(&drm->clients); |
298 | spin_lock_init(&drm->tile.lock); | 306 | spin_lock_init(&drm->tile.lock); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index b25df374c901..9c39bafbef2c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h | |||
@@ -113,7 +113,7 @@ struct nouveau_drm { | |||
113 | struct nvbios vbios; | 113 | struct nvbios vbios; |
114 | struct nouveau_display *display; | 114 | struct nouveau_display *display; |
115 | struct backlight_device *backlight; | 115 | struct backlight_device *backlight; |
116 | struct nouveau_eventh vblank; | 116 | struct nouveau_eventh vblank[4]; |
117 | 117 | ||
118 | /* power management */ | 118 | /* power management */ |
119 | struct nouveau_pm *pm; | 119 | struct nouveau_pm *pm; |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 7f0e6c3f37d1..1ddc03e51bf4 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data) | |||
479 | { | 479 | { |
480 | struct nv50_display_flip *flip = data; | 480 | struct nv50_display_flip *flip = data; |
481 | if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == | 481 | if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == |
482 | flip->chan->data); | 482 | flip->chan->data) |
483 | return true; | 483 | return true; |
484 | usleep_range(1, 2); | 484 | usleep_range(1, 2); |
485 | return false; | 485 | return false; |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b8015913d382..fa3c56fba294 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -99,6 +99,29 @@ static bool radeon_read_bios(struct radeon_device *rdev) | |||
99 | return true; | 99 | return true; |
100 | } | 100 | } |
101 | 101 | ||
102 | static bool radeon_read_platform_bios(struct radeon_device *rdev) | ||
103 | { | ||
104 | uint8_t __iomem *bios; | ||
105 | size_t size; | ||
106 | |||
107 | rdev->bios = NULL; | ||
108 | |||
109 | bios = pci_platform_rom(rdev->pdev, &size); | ||
110 | if (!bios) { | ||
111 | return false; | ||
112 | } | ||
113 | |||
114 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | ||
115 | return false; | ||
116 | } | ||
117 | rdev->bios = kmemdup(bios, size, GFP_KERNEL); | ||
118 | if (rdev->bios == NULL) { | ||
119 | return false; | ||
120 | } | ||
121 | |||
122 | return true; | ||
123 | } | ||
124 | |||
102 | #ifdef CONFIG_ACPI | 125 | #ifdef CONFIG_ACPI |
103 | /* ATRM is used to get the BIOS on the discrete cards in | 126 | /* ATRM is used to get the BIOS on the discrete cards in |
104 | * dual-gpu systems. | 127 | * dual-gpu systems. |
@@ -620,6 +643,9 @@ bool radeon_get_bios(struct radeon_device *rdev) | |||
620 | if (r == false) { | 643 | if (r == false) { |
621 | r = radeon_read_disabled_bios(rdev); | 644 | r = radeon_read_disabled_bios(rdev); |
622 | } | 645 | } |
646 | if (r == false) { | ||
647 | r = radeon_read_platform_bios(rdev); | ||
648 | } | ||
623 | if (r == false || rdev->bios == NULL) { | 649 | if (r == false || rdev->bios == NULL) { |
624 | DRM_ERROR("Unable to locate a BIOS ROM\n"); | 650 | DRM_ERROR("Unable to locate a BIOS ROM\n"); |
625 | rdev->bios = NULL; | 651 | rdev->bios = NULL; |
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index fe5cdbcf2636..b44d548c56f8 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c | |||
@@ -61,6 +61,10 @@ static int udl_get_modes(struct drm_connector *connector) | |||
61 | int ret; | 61 | int ret; |
62 | 62 | ||
63 | edid = (struct edid *)udl_get_edid(udl); | 63 | edid = (struct edid *)udl_get_edid(udl); |
64 | if (!edid) { | ||
65 | drm_mode_connector_update_edid_property(connector, NULL); | ||
66 | return 0; | ||
67 | } | ||
64 | 68 | ||
65 | /* | 69 | /* |
66 | * We only read the main block, but if the monitor reports extension | 70 | * We only read the main block, but if the monitor reports extension |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 512b01c04ea7..aa341d135867 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
2077 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, | 2077 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, |
2078 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, | 2078 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, |
2079 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, | 2079 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, |
2080 | { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) }, | ||
2081 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, | 2080 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, |
2082 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, | 2081 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, |
2083 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, | 2082 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, |
@@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev) | |||
2244 | hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST)) | 2243 | hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST)) |
2245 | return true; | 2244 | return true; |
2246 | break; | 2245 | break; |
2246 | case USB_VENDOR_ID_ATMEL_V_USB: | ||
2247 | /* Masterkit MA901 usb radio based on Atmel tiny85 chip and | ||
2248 | * it has the same USB ID as many Atmel V-USB devices. This | ||
2249 | * usb radio is handled by radio-ma901.c driver so we want | ||
2250 | * ignore the hid. Check the name, bus, product and ignore | ||
2251 | * if we have MA901 usb radio. | ||
2252 | */ | ||
2253 | if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB && | ||
2254 | hdev->bus == BUS_USB && | ||
2255 | strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) | ||
2256 | return true; | ||
2257 | break; | ||
2247 | } | 2258 | } |
2248 | 2259 | ||
2249 | if (hdev->type == HID_TYPE_USBMOUSE && | 2260 | if (hdev->type == HID_TYPE_USBMOUSE && |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index c4388776f4e4..5309fd5eb0eb 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -158,6 +158,8 @@ | |||
158 | #define USB_VENDOR_ID_ATMEL 0x03eb | 158 | #define USB_VENDOR_ID_ATMEL 0x03eb |
159 | #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c | 159 | #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c |
160 | #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 | 160 | #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 |
161 | #define USB_VENDOR_ID_ATMEL_V_USB 0x16c0 | ||
162 | #define USB_DEVICE_ID_ATMEL_V_USB 0x05df | ||
161 | 163 | ||
162 | #define USB_VENDOR_ID_AUREAL 0x0755 | 164 | #define USB_VENDOR_ID_AUREAL 0x0755 |
163 | #define USB_DEVICE_ID_AUREAL_W01RN 0x2626 | 165 | #define USB_DEVICE_ID_AUREAL_W01RN 0x2626 |
@@ -557,9 +559,6 @@ | |||
557 | #define USB_VENDOR_ID_MADCATZ 0x0738 | 559 | #define USB_VENDOR_ID_MADCATZ 0x0738 |
558 | #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 | 560 | #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 |
559 | 561 | ||
560 | #define USB_VENDOR_ID_MASTERKIT 0x16c0 | ||
561 | #define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df | ||
562 | |||
563 | #define USB_VENDOR_ID_MCC 0x09db | 562 | #define USB_VENDOR_ID_MCC 0x09db |
564 | #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 | 563 | #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 |
565 | #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a | 564 | #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a |
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index f7f113ba083e..a8ce44296cfd 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c | |||
@@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev, | |||
462 | return 0; | 462 | return 0; |
463 | } | 463 | } |
464 | 464 | ||
465 | static void magicmouse_input_configured(struct hid_device *hdev, | ||
466 | struct hid_input *hi) | ||
467 | |||
468 | { | ||
469 | struct magicmouse_sc *msc = hid_get_drvdata(hdev); | ||
470 | |||
471 | int ret = magicmouse_setup_input(msc->input, hdev); | ||
472 | if (ret) { | ||
473 | hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); | ||
474 | /* clean msc->input to notify probe() of the failure */ | ||
475 | msc->input = NULL; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | |||
465 | static int magicmouse_probe(struct hid_device *hdev, | 480 | static int magicmouse_probe(struct hid_device *hdev, |
466 | const struct hid_device_id *id) | 481 | const struct hid_device_id *id) |
467 | { | 482 | { |
@@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev, | |||
493 | goto err_free; | 508 | goto err_free; |
494 | } | 509 | } |
495 | 510 | ||
496 | /* We do this after hid-input is done parsing reports so that | 511 | if (!msc->input) { |
497 | * hid-input uses the most natural button and axis IDs. | 512 | hid_err(hdev, "magicmouse input not registered\n"); |
498 | */ | 513 | ret = -ENOMEM; |
499 | if (msc->input) { | 514 | goto err_stop_hw; |
500 | ret = magicmouse_setup_input(msc->input, hdev); | ||
501 | if (ret) { | ||
502 | hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); | ||
503 | goto err_stop_hw; | ||
504 | } | ||
505 | } | 515 | } |
506 | 516 | ||
507 | if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) | 517 | if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) |
@@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = { | |||
568 | .remove = magicmouse_remove, | 578 | .remove = magicmouse_remove, |
569 | .raw_event = magicmouse_raw_event, | 579 | .raw_event = magicmouse_raw_event, |
570 | .input_mapping = magicmouse_input_mapping, | 580 | .input_mapping = magicmouse_input_mapping, |
581 | .input_configured = magicmouse_input_configured, | ||
571 | }; | 582 | }; |
572 | module_hid_driver(magicmouse_driver); | 583 | module_hid_driver(magicmouse_driver); |
573 | 584 | ||
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index db713c0dfba4..461a0d739d75 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c | |||
@@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock) | |||
416 | ret = pm_runtime_get_sync(dev); | 416 | ret = pm_runtime_get_sync(dev); |
417 | if (ret < 0) { | 417 | if (ret < 0) { |
418 | dev_err(dev, "%s: can't power on device\n", __func__); | 418 | dev_err(dev, "%s: can't power on device\n", __func__); |
419 | pm_runtime_put_noidle(dev); | ||
420 | module_put(dev->driver->owner); | ||
419 | return ret; | 421 | return ret; |
420 | } | 422 | } |
421 | 423 | ||
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0ceb6e1b0f65..e3085c487ace 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -182,7 +182,6 @@ static int dw_i2c_probe(struct platform_device *pdev) | |||
182 | adap->algo = &i2c_dw_algo; | 182 | adap->algo = &i2c_dw_algo; |
183 | adap->dev.parent = &pdev->dev; | 183 | adap->dev.parent = &pdev->dev; |
184 | adap->dev.of_node = pdev->dev.of_node; | 184 | adap->dev.of_node = pdev->dev.of_node; |
185 | ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev)); | ||
186 | 185 | ||
187 | r = i2c_add_numbered_adapter(adap); | 186 | r = i2c_add_numbered_adapter(adap); |
188 | if (r) { | 187 | if (r) { |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 5d6675013864..1a38dd7dfe4e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -465,6 +465,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { | |||
465 | ICPU(0x3c, idle_cpu_hsw), | 465 | ICPU(0x3c, idle_cpu_hsw), |
466 | ICPU(0x3f, idle_cpu_hsw), | 466 | ICPU(0x3f, idle_cpu_hsw), |
467 | ICPU(0x45, idle_cpu_hsw), | 467 | ICPU(0x45, idle_cpu_hsw), |
468 | ICPU(0x46, idle_cpu_hsw), | ||
468 | {} | 469 | {} |
469 | }; | 470 | }; |
470 | MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); | 471 | MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); |
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 08a6c6d39e56..911205d3d5a0 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include "qib.h" | 44 | #include "qib.h" |
45 | #include "qib_7220.h" | 45 | #include "qib_7220.h" |
46 | 46 | ||
47 | #define SD7220_FW_NAME "intel/sd7220.fw" | 47 | #define SD7220_FW_NAME "qlogic/sd7220.fw" |
48 | MODULE_FIRMWARE(SD7220_FW_NAME); | 48 | MODULE_FIRMWARE(SD7220_FW_NAME); |
49 | 49 | ||
50 | /* | 50 | /* |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 1daa97913b7d..0bfd8cf25200 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
359 | case 0x802: /* Intuos4 General Pen */ | 359 | case 0x802: /* Intuos4 General Pen */ |
360 | case 0x804: /* Intuos4 Marker Pen */ | 360 | case 0x804: /* Intuos4 Marker Pen */ |
361 | case 0x40802: /* Intuos4 Classic Pen */ | 361 | case 0x40802: /* Intuos4 Classic Pen */ |
362 | case 0x18803: /* DTH2242 Grip Pen */ | 362 | case 0x18802: /* DTH2242 Grip Pen */ |
363 | case 0x022: | 363 | case 0x022: |
364 | wacom->tool[idx] = BTN_TOOL_PEN; | 364 | wacom->tool[idx] = BTN_TOOL_PEN; |
365 | break; | 365 | break; |
@@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB = | |||
1912 | { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, | 1912 | { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, |
1913 | 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; | 1913 | 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; |
1914 | static const struct wacom_features wacom_features_0xBC = | 1914 | static const struct wacom_features wacom_features_0xBC = |
1915 | { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047, | 1915 | { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047, |
1916 | 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; | 1916 | 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; |
1917 | static const struct wacom_features wacom_features_0x26 = | 1917 | static const struct wacom_features wacom_features_0x26 = |
1918 | { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, | 1918 | { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, |
@@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = { | |||
2144 | { USB_DEVICE_WACOM(0x44) }, | 2144 | { USB_DEVICE_WACOM(0x44) }, |
2145 | { USB_DEVICE_WACOM(0x45) }, | 2145 | { USB_DEVICE_WACOM(0x45) }, |
2146 | { USB_DEVICE_WACOM(0x59) }, | 2146 | { USB_DEVICE_WACOM(0x59) }, |
2147 | { USB_DEVICE_WACOM(0x5D) }, | 2147 | { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) }, |
2148 | { USB_DEVICE_WACOM(0xB0) }, | 2148 | { USB_DEVICE_WACOM(0xB0) }, |
2149 | { USB_DEVICE_WACOM(0xB1) }, | 2149 | { USB_DEVICE_WACOM(0xB1) }, |
2150 | { USB_DEVICE_WACOM(0xB2) }, | 2150 | { USB_DEVICE_WACOM(0xB2) }, |
@@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = { | |||
2209 | { USB_DEVICE_WACOM(0x47) }, | 2209 | { USB_DEVICE_WACOM(0x47) }, |
2210 | { USB_DEVICE_WACOM(0xF4) }, | 2210 | { USB_DEVICE_WACOM(0xF4) }, |
2211 | { USB_DEVICE_WACOM(0xF8) }, | 2211 | { USB_DEVICE_WACOM(0xF8) }, |
2212 | { USB_DEVICE_WACOM(0xF6) }, | 2212 | { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) }, |
2213 | { USB_DEVICE_WACOM(0xFA) }, | 2213 | { USB_DEVICE_WACOM(0xFA) }, |
2214 | { USB_DEVICE_LENOVO(0x6004) }, | 2214 | { USB_DEVICE_LENOVO(0x6004) }, |
2215 | { } | 2215 | { } |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index a32e0d5aa45f..fc6aebf1e4b2 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -236,7 +236,8 @@ static int gic_retrigger(struct irq_data *d) | |||
236 | if (gic_arch_extn.irq_retrigger) | 236 | if (gic_arch_extn.irq_retrigger) |
237 | return gic_arch_extn.irq_retrigger(d); | 237 | return gic_arch_extn.irq_retrigger(d); |
238 | 238 | ||
239 | return -ENXIO; | 239 | /* the genirq layer expects 0 if we can't retrigger in hardware */ |
240 | return 0; | ||
240 | } | 241 | } |
241 | 242 | ||
242 | #ifdef CONFIG_SMP | 243 | #ifdef CONFIG_SMP |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 66120bd46d15..10744091e6ca 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include "dm.h" | 7 | #include "dm.h" |
8 | #include "dm-bio-prison.h" | 8 | #include "dm-bio-prison.h" |
9 | #include "dm-bio-record.h" | ||
9 | #include "dm-cache-metadata.h" | 10 | #include "dm-cache-metadata.h" |
10 | 11 | ||
11 | #include <linux/dm-io.h> | 12 | #include <linux/dm-io.h> |
@@ -201,10 +202,15 @@ struct per_bio_data { | |||
201 | unsigned req_nr:2; | 202 | unsigned req_nr:2; |
202 | struct dm_deferred_entry *all_io_entry; | 203 | struct dm_deferred_entry *all_io_entry; |
203 | 204 | ||
204 | /* writethrough fields */ | 205 | /* |
206 | * writethrough fields. These MUST remain at the end of this | ||
207 | * structure and the 'cache' member must be the first as it | ||
208 | * is used to determine the offsetof the writethrough fields. | ||
209 | */ | ||
205 | struct cache *cache; | 210 | struct cache *cache; |
206 | dm_cblock_t cblock; | 211 | dm_cblock_t cblock; |
207 | bio_end_io_t *saved_bi_end_io; | 212 | bio_end_io_t *saved_bi_end_io; |
213 | struct dm_bio_details bio_details; | ||
208 | }; | 214 | }; |
209 | 215 | ||
210 | struct dm_cache_migration { | 216 | struct dm_cache_migration { |
@@ -513,16 +519,28 @@ static void save_stats(struct cache *cache) | |||
513 | /*---------------------------------------------------------------- | 519 | /*---------------------------------------------------------------- |
514 | * Per bio data | 520 | * Per bio data |
515 | *--------------------------------------------------------------*/ | 521 | *--------------------------------------------------------------*/ |
516 | static struct per_bio_data *get_per_bio_data(struct bio *bio) | 522 | |
523 | /* | ||
524 | * If using writeback, leave out struct per_bio_data's writethrough fields. | ||
525 | */ | ||
526 | #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) | ||
527 | #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) | ||
528 | |||
529 | static size_t get_per_bio_data_size(struct cache *cache) | ||
530 | { | ||
531 | return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; | ||
532 | } | ||
533 | |||
534 | static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) | ||
517 | { | 535 | { |
518 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 536 | struct per_bio_data *pb = dm_per_bio_data(bio, data_size); |
519 | BUG_ON(!pb); | 537 | BUG_ON(!pb); |
520 | return pb; | 538 | return pb; |
521 | } | 539 | } |
522 | 540 | ||
523 | static struct per_bio_data *init_per_bio_data(struct bio *bio) | 541 | static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) |
524 | { | 542 | { |
525 | struct per_bio_data *pb = get_per_bio_data(bio); | 543 | struct per_bio_data *pb = get_per_bio_data(bio, data_size); |
526 | 544 | ||
527 | pb->tick = false; | 545 | pb->tick = false; |
528 | pb->req_nr = dm_bio_get_target_bio_nr(bio); | 546 | pb->req_nr = dm_bio_get_target_bio_nr(bio); |
@@ -556,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, | |||
556 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) | 574 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) |
557 | { | 575 | { |
558 | unsigned long flags; | 576 | unsigned long flags; |
559 | struct per_bio_data *pb = get_per_bio_data(bio); | 577 | size_t pb_data_size = get_per_bio_data_size(cache); |
578 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
560 | 579 | ||
561 | spin_lock_irqsave(&cache->lock, flags); | 580 | spin_lock_irqsave(&cache->lock, flags); |
562 | if (cache->need_tick_bio && | 581 | if (cache->need_tick_bio && |
@@ -635,7 +654,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio) | |||
635 | 654 | ||
636 | static void writethrough_endio(struct bio *bio, int err) | 655 | static void writethrough_endio(struct bio *bio, int err) |
637 | { | 656 | { |
638 | struct per_bio_data *pb = get_per_bio_data(bio); | 657 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); |
639 | bio->bi_end_io = pb->saved_bi_end_io; | 658 | bio->bi_end_io = pb->saved_bi_end_io; |
640 | 659 | ||
641 | if (err) { | 660 | if (err) { |
@@ -643,6 +662,7 @@ static void writethrough_endio(struct bio *bio, int err) | |||
643 | return; | 662 | return; |
644 | } | 663 | } |
645 | 664 | ||
665 | dm_bio_restore(&pb->bio_details, bio); | ||
646 | remap_to_cache(pb->cache, bio, pb->cblock); | 666 | remap_to_cache(pb->cache, bio, pb->cblock); |
647 | 667 | ||
648 | /* | 668 | /* |
@@ -662,11 +682,12 @@ static void writethrough_endio(struct bio *bio, int err) | |||
662 | static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, | 682 | static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, |
663 | dm_oblock_t oblock, dm_cblock_t cblock) | 683 | dm_oblock_t oblock, dm_cblock_t cblock) |
664 | { | 684 | { |
665 | struct per_bio_data *pb = get_per_bio_data(bio); | 685 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); |
666 | 686 | ||
667 | pb->cache = cache; | 687 | pb->cache = cache; |
668 | pb->cblock = cblock; | 688 | pb->cblock = cblock; |
669 | pb->saved_bi_end_io = bio->bi_end_io; | 689 | pb->saved_bi_end_io = bio->bi_end_io; |
690 | dm_bio_record(&pb->bio_details, bio); | ||
670 | bio->bi_end_io = writethrough_endio; | 691 | bio->bi_end_io = writethrough_endio; |
671 | 692 | ||
672 | remap_to_origin_clear_discard(pb->cache, bio, oblock); | 693 | remap_to_origin_clear_discard(pb->cache, bio, oblock); |
@@ -1035,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio) | |||
1035 | 1056 | ||
1036 | static void process_flush_bio(struct cache *cache, struct bio *bio) | 1057 | static void process_flush_bio(struct cache *cache, struct bio *bio) |
1037 | { | 1058 | { |
1038 | struct per_bio_data *pb = get_per_bio_data(bio); | 1059 | size_t pb_data_size = get_per_bio_data_size(cache); |
1060 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
1039 | 1061 | ||
1040 | BUG_ON(bio->bi_size); | 1062 | BUG_ON(bio->bi_size); |
1041 | if (!pb->req_nr) | 1063 | if (!pb->req_nr) |
@@ -1107,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, | |||
1107 | dm_oblock_t block = get_bio_block(cache, bio); | 1129 | dm_oblock_t block = get_bio_block(cache, bio); |
1108 | struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; | 1130 | struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; |
1109 | struct policy_result lookup_result; | 1131 | struct policy_result lookup_result; |
1110 | struct per_bio_data *pb = get_per_bio_data(bio); | 1132 | size_t pb_data_size = get_per_bio_data_size(cache); |
1133 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
1111 | bool discarded_block = is_discarded_oblock(cache, block); | 1134 | bool discarded_block = is_discarded_oblock(cache, block); |
1112 | bool can_migrate = discarded_block || spare_migration_bandwidth(cache); | 1135 | bool can_migrate = discarded_block || spare_migration_bandwidth(cache); |
1113 | 1136 | ||
@@ -1881,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1881 | 1904 | ||
1882 | cache->ti = ca->ti; | 1905 | cache->ti = ca->ti; |
1883 | ti->private = cache; | 1906 | ti->private = cache; |
1884 | ti->per_bio_data_size = sizeof(struct per_bio_data); | ||
1885 | ti->num_flush_bios = 2; | 1907 | ti->num_flush_bios = 2; |
1886 | ti->flush_supported = true; | 1908 | ti->flush_supported = true; |
1887 | 1909 | ||
@@ -1890,6 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1890 | ti->discard_zeroes_data_unsupported = true; | 1912 | ti->discard_zeroes_data_unsupported = true; |
1891 | 1913 | ||
1892 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); | 1914 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); |
1915 | ti->per_bio_data_size = get_per_bio_data_size(cache); | ||
1893 | 1916 | ||
1894 | cache->callbacks.congested_fn = cache_is_congested; | 1917 | cache->callbacks.congested_fn = cache_is_congested; |
1895 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); | 1918 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); |
@@ -2092,6 +2115,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2092 | 2115 | ||
2093 | int r; | 2116 | int r; |
2094 | dm_oblock_t block = get_bio_block(cache, bio); | 2117 | dm_oblock_t block = get_bio_block(cache, bio); |
2118 | size_t pb_data_size = get_per_bio_data_size(cache); | ||
2095 | bool can_migrate = false; | 2119 | bool can_migrate = false; |
2096 | bool discarded_block; | 2120 | bool discarded_block; |
2097 | struct dm_bio_prison_cell *cell; | 2121 | struct dm_bio_prison_cell *cell; |
@@ -2108,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2108 | return DM_MAPIO_REMAPPED; | 2132 | return DM_MAPIO_REMAPPED; |
2109 | } | 2133 | } |
2110 | 2134 | ||
2111 | pb = init_per_bio_data(bio); | 2135 | pb = init_per_bio_data(bio, pb_data_size); |
2112 | 2136 | ||
2113 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { | 2137 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { |
2114 | defer_bio(cache, bio); | 2138 | defer_bio(cache, bio); |
@@ -2193,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
2193 | { | 2217 | { |
2194 | struct cache *cache = ti->private; | 2218 | struct cache *cache = ti->private; |
2195 | unsigned long flags; | 2219 | unsigned long flags; |
2196 | struct per_bio_data *pb = get_per_bio_data(bio); | 2220 | size_t pb_data_size = get_per_bio_data_size(cache); |
2221 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
2197 | 2222 | ||
2198 | if (pb->tick) { | 2223 | if (pb->tick) { |
2199 | policy_tick(cache->policy); | 2224 | policy_tick(cache->policy); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 7e469260fe5e..9a0bdad9ad8f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -611,6 +611,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
611 | queue_io(md, bio); | 611 | queue_io(md, bio); |
612 | } else { | 612 | } else { |
613 | /* done with normal IO or empty flush */ | 613 | /* done with normal IO or empty flush */ |
614 | trace_block_bio_complete(md->queue, bio, io_error); | ||
614 | bio_endio(bio, io_error); | 615 | bio_endio(bio, io_error); |
615 | } | 616 | } |
616 | } | 617 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 24909eb13fec..f4e87bfc7567 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -184,6 +184,8 @@ static void return_io(struct bio *return_bi) | |||
184 | return_bi = bi->bi_next; | 184 | return_bi = bi->bi_next; |
185 | bi->bi_next = NULL; | 185 | bi->bi_next = NULL; |
186 | bi->bi_size = 0; | 186 | bi->bi_size = 0; |
187 | trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), | ||
188 | bi, 0); | ||
187 | bio_endio(bi, 0); | 189 | bio_endio(bi, 0); |
188 | bi = return_bi; | 190 | bi = return_bi; |
189 | } | 191 | } |
@@ -3914,6 +3916,8 @@ static void raid5_align_endio(struct bio *bi, int error) | |||
3914 | rdev_dec_pending(rdev, conf->mddev); | 3916 | rdev_dec_pending(rdev, conf->mddev); |
3915 | 3917 | ||
3916 | if (!error && uptodate) { | 3918 | if (!error && uptodate) { |
3919 | trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), | ||
3920 | raid_bi, 0); | ||
3917 | bio_endio(raid_bi, 0); | 3921 | bio_endio(raid_bi, 0); |
3918 | if (atomic_dec_and_test(&conf->active_aligned_reads)) | 3922 | if (atomic_dec_and_test(&conf->active_aligned_reads)) |
3919 | wake_up(&conf->wait_for_stripe); | 3923 | wake_up(&conf->wait_for_stripe); |
@@ -4382,6 +4386,8 @@ static void make_request(struct mddev *mddev, struct bio * bi) | |||
4382 | if ( rw == WRITE ) | 4386 | if ( rw == WRITE ) |
4383 | md_write_end(mddev); | 4387 | md_write_end(mddev); |
4384 | 4388 | ||
4389 | trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), | ||
4390 | bi, 0); | ||
4385 | bio_endio(bi, 0); | 4391 | bio_endio(bi, 0); |
4386 | } | 4392 | } |
4387 | } | 4393 | } |
@@ -4758,8 +4764,11 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) | |||
4758 | handled++; | 4764 | handled++; |
4759 | } | 4765 | } |
4760 | remaining = raid5_dec_bi_active_stripes(raid_bio); | 4766 | remaining = raid5_dec_bi_active_stripes(raid_bio); |
4761 | if (remaining == 0) | 4767 | if (remaining == 0) { |
4768 | trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), | ||
4769 | raid_bio, 0); | ||
4762 | bio_endio(raid_bio, 0); | 4770 | bio_endio(raid_bio, 0); |
4771 | } | ||
4763 | if (atomic_dec_and_test(&conf->active_aligned_reads)) | 4772 | if (atomic_dec_and_test(&conf->active_aligned_reads)) |
4764 | wake_up(&conf->wait_for_stripe); | 4773 | wake_up(&conf->wait_for_stripe); |
4765 | return handled; | 4774 | return handled; |
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c index f19cd7367040..4faaf8053f26 100644 --- a/drivers/media/dvb-frontends/mb86a20s.c +++ b/drivers/media/dvb-frontends/mb86a20s.c | |||
@@ -610,7 +610,7 @@ static void mb86a20s_layer_bitrate(struct dvb_frontend *fe, u32 layer, | |||
610 | __func__, 'A' + layer, segment * isdbt_rate[m][f][i]/1000, | 610 | __func__, 'A' + layer, segment * isdbt_rate[m][f][i]/1000, |
611 | rate, rate); | 611 | rate, rate); |
612 | 612 | ||
613 | state->estimated_rate[i] = rate; | 613 | state->estimated_rate[layer] = rate; |
614 | } | 614 | } |
615 | 615 | ||
616 | 616 | ||
diff --git a/drivers/media/pci/cx25821/cx25821-video.c b/drivers/media/pci/cx25821/cx25821-video.c index d4de021dc844..31ce7698acb9 100644 --- a/drivers/media/pci/cx25821/cx25821-video.c +++ b/drivers/media/pci/cx25821/cx25821-video.c | |||
@@ -461,7 +461,7 @@ int cx25821_video_register(struct cx25821_dev *dev) | |||
461 | 461 | ||
462 | spin_lock_init(&dev->slock); | 462 | spin_lock_init(&dev->slock); |
463 | 463 | ||
464 | for (i = 0; i < MAX_VID_CHANNEL_NUM - 1; ++i) { | 464 | for (i = 0; i < VID_CHANNEL_NUM; ++i) { |
465 | cx25821_init_controls(dev, i); | 465 | cx25821_init_controls(dev, i); |
466 | 466 | ||
467 | cx25821_risc_stopper(dev->pci, &dev->channels[i].vidq.stopper, | 467 | cx25821_risc_stopper(dev->pci, &dev->channels[i].vidq.stopper, |
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 05d7b6333461..a0639e779973 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
@@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC | |||
204 | 204 | ||
205 | config VIDEO_SH_VEU | 205 | config VIDEO_SH_VEU |
206 | tristate "SuperH VEU mem2mem video processing driver" | 206 | tristate "SuperH VEU mem2mem video processing driver" |
207 | depends on VIDEO_DEV && VIDEO_V4L2 | 207 | depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS |
208 | select VIDEOBUF2_DMA_CONTIG | 208 | select VIDEOBUF2_DMA_CONTIG |
209 | select V4L2_MEM2MEM_DEV | 209 | select V4L2_MEM2MEM_DEV |
210 | help | 210 | help |
diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c index c61f590029ad..348dafc0318a 100644 --- a/drivers/media/radio/radio-ma901.c +++ b/drivers/media/radio/radio-ma901.c | |||
@@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev) | |||
347 | static int usb_ma901radio_probe(struct usb_interface *intf, | 347 | static int usb_ma901radio_probe(struct usb_interface *intf, |
348 | const struct usb_device_id *id) | 348 | const struct usb_device_id *id) |
349 | { | 349 | { |
350 | struct usb_device *dev = interface_to_usbdev(intf); | ||
350 | struct ma901radio_device *radio; | 351 | struct ma901radio_device *radio; |
351 | int retval = 0; | 352 | int retval = 0; |
352 | 353 | ||
354 | /* Masterkit MA901 usb radio has the same USB ID as many others | ||
355 | * Atmel V-USB devices. Let's make additional checks to be sure | ||
356 | * that this is our device. | ||
357 | */ | ||
358 | |||
359 | if (dev->product && dev->manufacturer && | ||
360 | (strncmp(dev->product, "MA901", 5) != 0 | ||
361 | || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0)) | ||
362 | return -ENODEV; | ||
363 | |||
353 | radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL); | 364 | radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL); |
354 | if (!radio) { | 365 | if (!radio) { |
355 | dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n"); | 366 | dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n"); |
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig index 39c2ecadb273..ea98f7e9ccd1 100644 --- a/drivers/misc/vmw_vmci/Kconfig +++ b/drivers/misc/vmw_vmci/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | config VMWARE_VMCI | 5 | config VMWARE_VMCI |
6 | tristate "VMware VMCI Driver" | 6 | tristate "VMware VMCI Driver" |
7 | depends on X86 && PCI | 7 | depends on X86 && PCI && NET |
8 | help | 8 | help |
9 | This is VMware's Virtual Machine Communication Interface. It enables | 9 | This is VMware's Virtual Machine Communication Interface. It enables |
10 | high-speed communication between host and guest in a virtual | 10 | high-speed communication between host and guest in a virtual |
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 92ab30ab00dc..dc571ebc1aa0 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -1123,33 +1123,6 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file, | |||
1123 | } | 1123 | } |
1124 | #endif | 1124 | #endif |
1125 | 1125 | ||
1126 | static inline unsigned long get_vm_size(struct vm_area_struct *vma) | ||
1127 | { | ||
1128 | return vma->vm_end - vma->vm_start; | ||
1129 | } | ||
1130 | |||
1131 | static inline resource_size_t get_vm_offset(struct vm_area_struct *vma) | ||
1132 | { | ||
1133 | return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT; | ||
1134 | } | ||
1135 | |||
1136 | /* | ||
1137 | * Set a new vm offset. | ||
1138 | * | ||
1139 | * Verify that the incoming offset really works as a page offset, | ||
1140 | * and that the offset and size fit in a resource_size_t. | ||
1141 | */ | ||
1142 | static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off) | ||
1143 | { | ||
1144 | pgoff_t pgoff = off >> PAGE_SHIFT; | ||
1145 | if (off != (resource_size_t) pgoff << PAGE_SHIFT) | ||
1146 | return -EINVAL; | ||
1147 | if (off + get_vm_size(vma) - 1 < off) | ||
1148 | return -EINVAL; | ||
1149 | vma->vm_pgoff = pgoff; | ||
1150 | return 0; | ||
1151 | } | ||
1152 | |||
1153 | /* | 1126 | /* |
1154 | * set up a mapping for shared memory segments | 1127 | * set up a mapping for shared memory segments |
1155 | */ | 1128 | */ |
@@ -1159,45 +1132,17 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) | |||
1159 | struct mtd_file_info *mfi = file->private_data; | 1132 | struct mtd_file_info *mfi = file->private_data; |
1160 | struct mtd_info *mtd = mfi->mtd; | 1133 | struct mtd_info *mtd = mfi->mtd; |
1161 | struct map_info *map = mtd->priv; | 1134 | struct map_info *map = mtd->priv; |
1162 | resource_size_t start, off; | ||
1163 | unsigned long len, vma_len; | ||
1164 | 1135 | ||
1165 | /* This is broken because it assumes the MTD device is map-based | 1136 | /* This is broken because it assumes the MTD device is map-based |
1166 | and that mtd->priv is a valid struct map_info. It should be | 1137 | and that mtd->priv is a valid struct map_info. It should be |
1167 | replaced with something that uses the mtd_get_unmapped_area() | 1138 | replaced with something that uses the mtd_get_unmapped_area() |
1168 | operation properly. */ | 1139 | operation properly. */ |
1169 | if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { | 1140 | if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { |
1170 | off = get_vm_offset(vma); | ||
1171 | start = map->phys; | ||
1172 | len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); | ||
1173 | start &= PAGE_MASK; | ||
1174 | vma_len = get_vm_size(vma); | ||
1175 | |||
1176 | /* Overflow in off+len? */ | ||
1177 | if (vma_len + off < off) | ||
1178 | return -EINVAL; | ||
1179 | /* Does it fit in the mapping? */ | ||
1180 | if (vma_len + off > len) | ||
1181 | return -EINVAL; | ||
1182 | |||
1183 | off += start; | ||
1184 | /* Did that overflow? */ | ||
1185 | if (off < start) | ||
1186 | return -EINVAL; | ||
1187 | if (set_vm_offset(vma, off) < 0) | ||
1188 | return -EINVAL; | ||
1189 | vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; | ||
1190 | |||
1191 | #ifdef pgprot_noncached | 1141 | #ifdef pgprot_noncached |
1192 | if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) | 1142 | if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) |
1193 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 1143 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
1194 | #endif | 1144 | #endif |
1195 | if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, | 1145 | return vm_iomap_memory(vma, map->phys, map->size); |
1196 | vma->vm_end - vma->vm_start, | ||
1197 | vma->vm_page_prot)) | ||
1198 | return -EAGAIN; | ||
1199 | |||
1200 | return 0; | ||
1201 | } | 1146 | } |
1202 | return -ENOSYS; | 1147 | return -ENOSYS; |
1203 | #else | 1148 | #else |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6bbd90e1123c..dbbea0eec134 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, | |||
846 | if (bond->dev->flags & IFF_ALLMULTI) | 846 | if (bond->dev->flags & IFF_ALLMULTI) |
847 | dev_set_allmulti(old_active->dev, -1); | 847 | dev_set_allmulti(old_active->dev, -1); |
848 | 848 | ||
849 | netif_addr_lock_bh(bond->dev); | ||
849 | netdev_for_each_mc_addr(ha, bond->dev) | 850 | netdev_for_each_mc_addr(ha, bond->dev) |
850 | dev_mc_del(old_active->dev, ha->addr); | 851 | dev_mc_del(old_active->dev, ha->addr); |
852 | netif_addr_unlock_bh(bond->dev); | ||
851 | } | 853 | } |
852 | 854 | ||
853 | if (new_active) { | 855 | if (new_active) { |
@@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, | |||
858 | if (bond->dev->flags & IFF_ALLMULTI) | 860 | if (bond->dev->flags & IFF_ALLMULTI) |
859 | dev_set_allmulti(new_active->dev, 1); | 861 | dev_set_allmulti(new_active->dev, 1); |
860 | 862 | ||
863 | netif_addr_lock_bh(bond->dev); | ||
861 | netdev_for_each_mc_addr(ha, bond->dev) | 864 | netdev_for_each_mc_addr(ha, bond->dev) |
862 | dev_mc_add(new_active->dev, ha->addr); | 865 | dev_mc_add(new_active->dev, ha->addr); |
866 | netif_addr_unlock_bh(bond->dev); | ||
863 | } | 867 | } |
864 | } | 868 | } |
865 | 869 | ||
@@ -1901,11 +1905,29 @@ err_dest_symlinks: | |||
1901 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | 1905 | bond_destroy_slave_symlinks(bond_dev, slave_dev); |
1902 | 1906 | ||
1903 | err_detach: | 1907 | err_detach: |
1908 | if (!USES_PRIMARY(bond->params.mode)) { | ||
1909 | netif_addr_lock_bh(bond_dev); | ||
1910 | bond_mc_list_flush(bond_dev, slave_dev); | ||
1911 | netif_addr_unlock_bh(bond_dev); | ||
1912 | } | ||
1913 | bond_del_vlans_from_slave(bond, slave_dev); | ||
1904 | write_lock_bh(&bond->lock); | 1914 | write_lock_bh(&bond->lock); |
1905 | bond_detach_slave(bond, new_slave); | 1915 | bond_detach_slave(bond, new_slave); |
1916 | if (bond->primary_slave == new_slave) | ||
1917 | bond->primary_slave = NULL; | ||
1906 | write_unlock_bh(&bond->lock); | 1918 | write_unlock_bh(&bond->lock); |
1919 | if (bond->curr_active_slave == new_slave) { | ||
1920 | read_lock(&bond->lock); | ||
1921 | write_lock_bh(&bond->curr_slave_lock); | ||
1922 | bond_change_active_slave(bond, NULL); | ||
1923 | bond_select_active_slave(bond); | ||
1924 | write_unlock_bh(&bond->curr_slave_lock); | ||
1925 | read_unlock(&bond->lock); | ||
1926 | } | ||
1927 | slave_disable_netpoll(new_slave); | ||
1907 | 1928 | ||
1908 | err_close: | 1929 | err_close: |
1930 | slave_dev->priv_flags &= ~IFF_BONDING; | ||
1909 | dev_close(slave_dev); | 1931 | dev_close(slave_dev); |
1910 | 1932 | ||
1911 | err_unset_master: | 1933 | err_unset_master: |
@@ -1976,12 +1998,11 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1976 | return -EINVAL; | 1998 | return -EINVAL; |
1977 | } | 1999 | } |
1978 | 2000 | ||
2001 | write_unlock_bh(&bond->lock); | ||
1979 | /* unregister rx_handler early so bond_handle_frame wouldn't be called | 2002 | /* unregister rx_handler early so bond_handle_frame wouldn't be called |
1980 | * for this slave anymore. | 2003 | * for this slave anymore. |
1981 | */ | 2004 | */ |
1982 | netdev_rx_handler_unregister(slave_dev); | 2005 | netdev_rx_handler_unregister(slave_dev); |
1983 | write_unlock_bh(&bond->lock); | ||
1984 | synchronize_net(); | ||
1985 | write_lock_bh(&bond->lock); | 2006 | write_lock_bh(&bond->lock); |
1986 | 2007 | ||
1987 | if (!all && !bond->params.fail_over_mac) { | 2008 | if (!all && !bond->params.fail_over_mac) { |
@@ -3169,11 +3190,20 @@ static int bond_slave_netdev_event(unsigned long event, | |||
3169 | struct net_device *slave_dev) | 3190 | struct net_device *slave_dev) |
3170 | { | 3191 | { |
3171 | struct slave *slave = bond_slave_get_rtnl(slave_dev); | 3192 | struct slave *slave = bond_slave_get_rtnl(slave_dev); |
3172 | struct bonding *bond = slave->bond; | 3193 | struct bonding *bond; |
3173 | struct net_device *bond_dev = slave->bond->dev; | 3194 | struct net_device *bond_dev; |
3174 | u32 old_speed; | 3195 | u32 old_speed; |
3175 | u8 old_duplex; | 3196 | u8 old_duplex; |
3176 | 3197 | ||
3198 | /* A netdev event can be generated while enslaving a device | ||
3199 | * before netdev_rx_handler_register is called in which case | ||
3200 | * slave will be NULL | ||
3201 | */ | ||
3202 | if (!slave) | ||
3203 | return NOTIFY_DONE; | ||
3204 | bond_dev = slave->bond->dev; | ||
3205 | bond = slave->bond; | ||
3206 | |||
3177 | switch (event) { | 3207 | switch (event) { |
3178 | case NETDEV_UNREGISTER: | 3208 | case NETDEV_UNREGISTER: |
3179 | if (bond->setup_by_slave) | 3209 | if (bond->setup_by_slave) |
@@ -3287,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) | |||
3287 | */ | 3317 | */ |
3288 | static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) | 3318 | static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) |
3289 | { | 3319 | { |
3290 | struct ethhdr *data = (struct ethhdr *)skb->data; | 3320 | const struct ethhdr *data; |
3291 | struct iphdr *iph; | 3321 | const struct iphdr *iph; |
3292 | struct ipv6hdr *ipv6h; | 3322 | const struct ipv6hdr *ipv6h; |
3293 | u32 v6hash; | 3323 | u32 v6hash; |
3294 | __be32 *s, *d; | 3324 | const __be32 *s, *d; |
3295 | 3325 | ||
3296 | if (skb->protocol == htons(ETH_P_IP) && | 3326 | if (skb->protocol == htons(ETH_P_IP) && |
3297 | skb_network_header_len(skb) >= sizeof(*iph)) { | 3327 | pskb_network_may_pull(skb, sizeof(*iph))) { |
3298 | iph = ip_hdr(skb); | 3328 | iph = ip_hdr(skb); |
3329 | data = (struct ethhdr *)skb->data; | ||
3299 | return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ | 3330 | return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ |
3300 | (data->h_dest[5] ^ data->h_source[5])) % count; | 3331 | (data->h_dest[5] ^ data->h_source[5])) % count; |
3301 | } else if (skb->protocol == htons(ETH_P_IPV6) && | 3332 | } else if (skb->protocol == htons(ETH_P_IPV6) && |
3302 | skb_network_header_len(skb) >= sizeof(*ipv6h)) { | 3333 | pskb_network_may_pull(skb, sizeof(*ipv6h))) { |
3303 | ipv6h = ipv6_hdr(skb); | 3334 | ipv6h = ipv6_hdr(skb); |
3335 | data = (struct ethhdr *)skb->data; | ||
3304 | s = &ipv6h->saddr.s6_addr32[0]; | 3336 | s = &ipv6h->saddr.s6_addr32[0]; |
3305 | d = &ipv6h->daddr.s6_addr32[0]; | 3337 | d = &ipv6h->daddr.s6_addr32[0]; |
3306 | v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); | 3338 | v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); |
@@ -3319,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) | |||
3319 | static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) | 3351 | static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) |
3320 | { | 3352 | { |
3321 | u32 layer4_xor = 0; | 3353 | u32 layer4_xor = 0; |
3322 | struct iphdr *iph; | 3354 | const struct iphdr *iph; |
3323 | struct ipv6hdr *ipv6h; | 3355 | const struct ipv6hdr *ipv6h; |
3324 | __be32 *s, *d; | 3356 | const __be32 *s, *d; |
3325 | __be16 *layer4hdr; | 3357 | const __be16 *l4 = NULL; |
3358 | __be16 _l4[2]; | ||
3359 | int noff = skb_network_offset(skb); | ||
3360 | int poff; | ||
3326 | 3361 | ||
3327 | if (skb->protocol == htons(ETH_P_IP) && | 3362 | if (skb->protocol == htons(ETH_P_IP) && |
3328 | skb_network_header_len(skb) >= sizeof(*iph)) { | 3363 | pskb_may_pull(skb, noff + sizeof(*iph))) { |
3329 | iph = ip_hdr(skb); | 3364 | iph = ip_hdr(skb); |
3330 | if (!ip_is_fragment(iph) && | 3365 | poff = proto_ports_offset(iph->protocol); |
3331 | (iph->protocol == IPPROTO_TCP || | 3366 | |
3332 | iph->protocol == IPPROTO_UDP) && | 3367 | if (!ip_is_fragment(iph) && poff >= 0) { |
3333 | (skb_headlen(skb) - skb_network_offset(skb) >= | 3368 | l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff, |
3334 | iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { | 3369 | sizeof(_l4), &_l4); |
3335 | layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); | 3370 | if (l4) |
3336 | layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); | 3371 | layer4_xor = ntohs(l4[0] ^ l4[1]); |
3337 | } | 3372 | } |
3338 | return (layer4_xor ^ | 3373 | return (layer4_xor ^ |
3339 | ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; | 3374 | ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; |
3340 | } else if (skb->protocol == htons(ETH_P_IPV6) && | 3375 | } else if (skb->protocol == htons(ETH_P_IPV6) && |
3341 | skb_network_header_len(skb) >= sizeof(*ipv6h)) { | 3376 | pskb_may_pull(skb, noff + sizeof(*ipv6h))) { |
3342 | ipv6h = ipv6_hdr(skb); | 3377 | ipv6h = ipv6_hdr(skb); |
3343 | if ((ipv6h->nexthdr == IPPROTO_TCP || | 3378 | poff = proto_ports_offset(ipv6h->nexthdr); |
3344 | ipv6h->nexthdr == IPPROTO_UDP) && | 3379 | if (poff >= 0) { |
3345 | (skb_headlen(skb) - skb_network_offset(skb) >= | 3380 | l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff, |
3346 | sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { | 3381 | sizeof(_l4), &_l4); |
3347 | layer4hdr = (__be16 *)(ipv6h + 1); | 3382 | if (l4) |
3348 | layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); | 3383 | layer4_xor = ntohs(l4[0] ^ l4[1]); |
3349 | } | 3384 | } |
3350 | s = &ipv6h->saddr.s6_addr32[0]; | 3385 | s = &ipv6h->saddr.s6_addr32[0]; |
3351 | d = &ipv6h->daddr.s6_addr32[0]; | 3386 | d = &ipv6h->daddr.s6_addr32[0]; |
@@ -4847,9 +4882,18 @@ static int __net_init bond_net_init(struct net *net) | |||
4847 | static void __net_exit bond_net_exit(struct net *net) | 4882 | static void __net_exit bond_net_exit(struct net *net) |
4848 | { | 4883 | { |
4849 | struct bond_net *bn = net_generic(net, bond_net_id); | 4884 | struct bond_net *bn = net_generic(net, bond_net_id); |
4885 | struct bonding *bond, *tmp_bond; | ||
4886 | LIST_HEAD(list); | ||
4850 | 4887 | ||
4851 | bond_destroy_sysfs(bn); | 4888 | bond_destroy_sysfs(bn); |
4852 | bond_destroy_proc_dir(bn); | 4889 | bond_destroy_proc_dir(bn); |
4890 | |||
4891 | /* Kill off any bonds created after unregistering bond rtnl ops */ | ||
4892 | rtnl_lock(); | ||
4893 | list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) | ||
4894 | unregister_netdevice_queue(bond->dev, &list); | ||
4895 | unregister_netdevice_many(&list); | ||
4896 | rtnl_unlock(); | ||
4853 | } | 4897 | } |
4854 | 4898 | ||
4855 | static struct pernet_operations bond_net_ops = { | 4899 | static struct pernet_operations bond_net_ops = { |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index db103e03ba05..ea7a388f4843 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -527,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
527 | goto out; | 527 | goto out; |
528 | } | 528 | } |
529 | if (new_value < 0) { | 529 | if (new_value < 0) { |
530 | pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n", | 530 | pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n", |
531 | bond->dev->name, new_value, INT_MAX); | 531 | bond->dev->name, new_value, INT_MAX); |
532 | ret = -EINVAL; | 532 | ret = -EINVAL; |
533 | goto out; | 533 | goto out; |
@@ -542,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
542 | pr_info("%s: Setting ARP monitoring interval to %d.\n", | 542 | pr_info("%s: Setting ARP monitoring interval to %d.\n", |
543 | bond->dev->name, new_value); | 543 | bond->dev->name, new_value); |
544 | bond->params.arp_interval = new_value; | 544 | bond->params.arp_interval = new_value; |
545 | if (bond->params.miimon) { | 545 | if (new_value) { |
546 | pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", | 546 | if (bond->params.miimon) { |
547 | bond->dev->name, bond->dev->name); | 547 | pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", |
548 | bond->params.miimon = 0; | 548 | bond->dev->name, bond->dev->name); |
549 | } | 549 | bond->params.miimon = 0; |
550 | if (!bond->params.arp_targets[0]) { | 550 | } |
551 | pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", | 551 | if (!bond->params.arp_targets[0]) |
552 | bond->dev->name); | 552 | pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", |
553 | bond->dev->name); | ||
553 | } | 554 | } |
554 | if (bond->dev->flags & IFF_UP) { | 555 | if (bond->dev->flags & IFF_UP) { |
555 | /* If the interface is up, we may need to fire off | 556 | /* If the interface is up, we may need to fire off |
@@ -557,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
557 | * timer will get fired off when the open function | 558 | * timer will get fired off when the open function |
558 | * is called. | 559 | * is called. |
559 | */ | 560 | */ |
560 | cancel_delayed_work_sync(&bond->mii_work); | 561 | if (!new_value) { |
561 | queue_delayed_work(bond->wq, &bond->arp_work, 0); | 562 | cancel_delayed_work_sync(&bond->arp_work); |
563 | } else { | ||
564 | cancel_delayed_work_sync(&bond->mii_work); | ||
565 | queue_delayed_work(bond->wq, &bond->arp_work, 0); | ||
566 | } | ||
562 | } | 567 | } |
563 | |||
564 | out: | 568 | out: |
565 | rtnl_unlock(); | 569 | rtnl_unlock(); |
566 | return ret; | 570 | return ret; |
@@ -702,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d, | |||
702 | } | 706 | } |
703 | if (new_value < 0) { | 707 | if (new_value < 0) { |
704 | pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", | 708 | pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", |
705 | bond->dev->name, new_value, 1, INT_MAX); | 709 | bond->dev->name, new_value, 0, INT_MAX); |
706 | ret = -EINVAL; | 710 | ret = -EINVAL; |
707 | goto out; | 711 | goto out; |
708 | } else { | 712 | } else { |
@@ -757,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d, | |||
757 | goto out; | 761 | goto out; |
758 | } | 762 | } |
759 | if (new_value < 0) { | 763 | if (new_value < 0) { |
760 | pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", | 764 | pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n", |
761 | bond->dev->name, new_value, 1, INT_MAX); | 765 | bond->dev->name, new_value, 0, INT_MAX); |
762 | ret = -EINVAL; | 766 | ret = -EINVAL; |
763 | goto out; | 767 | goto out; |
764 | } else { | 768 | } else { |
@@ -968,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d, | |||
968 | } | 972 | } |
969 | if (new_value < 0) { | 973 | if (new_value < 0) { |
970 | pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", | 974 | pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", |
971 | bond->dev->name, new_value, 1, INT_MAX); | 975 | bond->dev->name, new_value, 0, INT_MAX); |
972 | ret = -EINVAL; | 976 | ret = -EINVAL; |
973 | goto out; | 977 | goto out; |
974 | } else { | 978 | } |
975 | pr_info("%s: Setting MII monitoring interval to %d.\n", | 979 | pr_info("%s: Setting MII monitoring interval to %d.\n", |
976 | bond->dev->name, new_value); | 980 | bond->dev->name, new_value); |
977 | bond->params.miimon = new_value; | 981 | bond->params.miimon = new_value; |
978 | if (bond->params.updelay) | 982 | if (bond->params.updelay) |
979 | pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", | 983 | pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", |
980 | bond->dev->name, | 984 | bond->dev->name, |
981 | bond->params.updelay * bond->params.miimon); | 985 | bond->params.updelay * bond->params.miimon); |
982 | if (bond->params.downdelay) | 986 | if (bond->params.downdelay) |
983 | pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", | 987 | pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", |
984 | bond->dev->name, | 988 | bond->dev->name, |
985 | bond->params.downdelay * bond->params.miimon); | 989 | bond->params.downdelay * bond->params.miimon); |
986 | if (bond->params.arp_interval) { | 990 | if (new_value && bond->params.arp_interval) { |
987 | pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", | 991 | pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", |
988 | bond->dev->name); | 992 | bond->dev->name); |
989 | bond->params.arp_interval = 0; | 993 | bond->params.arp_interval = 0; |
990 | if (bond->params.arp_validate) { | 994 | if (bond->params.arp_validate) |
991 | bond->params.arp_validate = | 995 | bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; |
992 | BOND_ARP_VALIDATE_NONE; | 996 | } |
993 | } | 997 | if (bond->dev->flags & IFF_UP) { |
994 | } | 998 | /* If the interface is up, we may need to fire off |
995 | 999 | * the MII timer. If the interface is down, the | |
996 | if (bond->dev->flags & IFF_UP) { | 1000 | * timer will get fired off when the open function |
997 | /* If the interface is up, we may need to fire off | 1001 | * is called. |
998 | * the MII timer. If the interface is down, the | 1002 | */ |
999 | * timer will get fired off when the open function | 1003 | if (!new_value) { |
1000 | * is called. | 1004 | cancel_delayed_work_sync(&bond->mii_work); |
1001 | */ | 1005 | } else { |
1002 | cancel_delayed_work_sync(&bond->arp_work); | 1006 | cancel_delayed_work_sync(&bond->arp_work); |
1003 | queue_delayed_work(bond->wq, &bond->mii_work, 0); | 1007 | queue_delayed_work(bond->wq, &bond->mii_work, 0); |
1004 | } | 1008 | } |
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index f32b9fc6a983..9aa0c64c33c8 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c | |||
@@ -929,6 +929,7 @@ static int mcp251x_open(struct net_device *net) | |||
929 | struct mcp251x_priv *priv = netdev_priv(net); | 929 | struct mcp251x_priv *priv = netdev_priv(net); |
930 | struct spi_device *spi = priv->spi; | 930 | struct spi_device *spi = priv->spi; |
931 | struct mcp251x_platform_data *pdata = spi->dev.platform_data; | 931 | struct mcp251x_platform_data *pdata = spi->dev.platform_data; |
932 | unsigned long flags; | ||
932 | int ret; | 933 | int ret; |
933 | 934 | ||
934 | ret = open_candev(net); | 935 | ret = open_candev(net); |
@@ -945,9 +946,14 @@ static int mcp251x_open(struct net_device *net) | |||
945 | priv->tx_skb = NULL; | 946 | priv->tx_skb = NULL; |
946 | priv->tx_len = 0; | 947 | priv->tx_len = 0; |
947 | 948 | ||
949 | flags = IRQF_ONESHOT; | ||
950 | if (pdata->irq_flags) | ||
951 | flags |= pdata->irq_flags; | ||
952 | else | ||
953 | flags |= IRQF_TRIGGER_FALLING; | ||
954 | |||
948 | ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, | 955 | ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, |
949 | pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING, | 956 | flags, DEVICE_NAME, priv); |
950 | DEVICE_NAME, priv); | ||
951 | if (ret) { | 957 | if (ret) { |
952 | dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); | 958 | dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); |
953 | if (pdata->transceiver_enable) | 959 | if (pdata->transceiver_enable) |
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index b39ca5b3ea7f..ff2ba86cd4a4 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig | |||
@@ -46,6 +46,7 @@ config CAN_EMS_PCI | |||
46 | config CAN_PEAK_PCMCIA | 46 | config CAN_PEAK_PCMCIA |
47 | tristate "PEAK PCAN-PC Card" | 47 | tristate "PEAK PCAN-PC Card" |
48 | depends on PCMCIA | 48 | depends on PCMCIA |
49 | depends on HAS_IOPORT | ||
49 | ---help--- | 50 | ---help--- |
50 | This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) | 51 | This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) |
51 | from PEAK-System (http://www.peak-system.com). To compile this | 52 | from PEAK-System (http://www.peak-system.com). To compile this |
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index a042cdc260dc..3c18d7d000ed 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c | |||
@@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) | |||
348 | */ | 348 | */ |
349 | if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == | 349 | if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == |
350 | REG_CR_BASICCAN_INITIAL && | 350 | REG_CR_BASICCAN_INITIAL && |
351 | (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) && | 351 | (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) && |
352 | (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) | 352 | (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) |
353 | flag = 1; | 353 | flag = 1; |
354 | 354 | ||
@@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) | |||
360 | * See states on p. 23 of the Datasheet. | 360 | * See states on p. 23 of the Datasheet. |
361 | */ | 361 | */ |
362 | if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && | 362 | if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && |
363 | priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL && | 363 | priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL && |
364 | priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) | 364 | priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) |
365 | return flag; | 365 | return flag; |
366 | 366 | ||
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index daf4013a8fc7..e4df307eaa90 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
@@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val) | |||
92 | */ | 92 | */ |
93 | spin_lock_irqsave(&priv->cmdreg_lock, flags); | 93 | spin_lock_irqsave(&priv->cmdreg_lock, flags); |
94 | priv->write_reg(priv, REG_CMR, val); | 94 | priv->write_reg(priv, REG_CMR, val); |
95 | priv->read_reg(priv, REG_SR); | 95 | priv->read_reg(priv, SJA1000_REG_SR); |
96 | spin_unlock_irqrestore(&priv->cmdreg_lock, flags); | 96 | spin_unlock_irqrestore(&priv->cmdreg_lock, flags); |
97 | } | 97 | } |
98 | 98 | ||
@@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
502 | 502 | ||
503 | while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { | 503 | while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { |
504 | n++; | 504 | n++; |
505 | status = priv->read_reg(priv, REG_SR); | 505 | status = priv->read_reg(priv, SJA1000_REG_SR); |
506 | /* check for absent controller due to hw unplug */ | 506 | /* check for absent controller due to hw unplug */ |
507 | if (status == 0xFF && sja1000_is_absent(priv)) | 507 | if (status == 0xFF && sja1000_is_absent(priv)) |
508 | return IRQ_NONE; | 508 | return IRQ_NONE; |
@@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
530 | /* receive interrupt */ | 530 | /* receive interrupt */ |
531 | while (status & SR_RBS) { | 531 | while (status & SR_RBS) { |
532 | sja1000_rx(dev); | 532 | sja1000_rx(dev); |
533 | status = priv->read_reg(priv, REG_SR); | 533 | status = priv->read_reg(priv, SJA1000_REG_SR); |
534 | /* check for absent controller */ | 534 | /* check for absent controller */ |
535 | if (status == 0xFF && sja1000_is_absent(priv)) | 535 | if (status == 0xFF && sja1000_is_absent(priv)) |
536 | return IRQ_NONE; | 536 | return IRQ_NONE; |
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h index afa99847a510..aa48e053da27 100644 --- a/drivers/net/can/sja1000/sja1000.h +++ b/drivers/net/can/sja1000/sja1000.h | |||
@@ -56,7 +56,7 @@ | |||
56 | /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ | 56 | /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ |
57 | #define REG_MOD 0x00 | 57 | #define REG_MOD 0x00 |
58 | #define REG_CMR 0x01 | 58 | #define REG_CMR 0x01 |
59 | #define REG_SR 0x02 | 59 | #define SJA1000_REG_SR 0x02 |
60 | #define REG_IR 0x03 | 60 | #define REG_IR 0x03 |
61 | #define REG_IER 0x04 | 61 | #define REG_IER 0x04 |
62 | #define REG_ALC 0x0B | 62 | #define REG_ALC 0x0B |
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 6433b81256cd..8e0c4a001939 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c | |||
@@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) | |||
96 | struct net_device *dev; | 96 | struct net_device *dev; |
97 | struct sja1000_priv *priv; | 97 | struct sja1000_priv *priv; |
98 | struct resource res; | 98 | struct resource res; |
99 | const u32 *prop; | 99 | u32 prop; |
100 | int err, irq, res_size, prop_size; | 100 | int err, irq, res_size; |
101 | void __iomem *base; | 101 | void __iomem *base; |
102 | 102 | ||
103 | err = of_address_to_resource(np, 0, &res); | 103 | err = of_address_to_resource(np, 0, &res); |
@@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) | |||
138 | priv->read_reg = sja1000_ofp_read_reg; | 138 | priv->read_reg = sja1000_ofp_read_reg; |
139 | priv->write_reg = sja1000_ofp_write_reg; | 139 | priv->write_reg = sja1000_ofp_write_reg; |
140 | 140 | ||
141 | prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size); | 141 | err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop); |
142 | if (prop && (prop_size == sizeof(u32))) | 142 | if (!err) |
143 | priv->can.clock.freq = *prop / 2; | 143 | priv->can.clock.freq = prop / 2; |
144 | else | 144 | else |
145 | priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */ | 145 | priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */ |
146 | 146 | ||
147 | prop = of_get_property(np, "nxp,tx-output-mode", &prop_size); | 147 | err = of_property_read_u32(np, "nxp,tx-output-mode", &prop); |
148 | if (prop && (prop_size == sizeof(u32))) | 148 | if (!err) |
149 | priv->ocr |= *prop & OCR_MODE_MASK; | 149 | priv->ocr |= prop & OCR_MODE_MASK; |
150 | else | 150 | else |
151 | priv->ocr |= OCR_MODE_NORMAL; /* default */ | 151 | priv->ocr |= OCR_MODE_NORMAL; /* default */ |
152 | 152 | ||
153 | prop = of_get_property(np, "nxp,tx-output-config", &prop_size); | 153 | err = of_property_read_u32(np, "nxp,tx-output-config", &prop); |
154 | if (prop && (prop_size == sizeof(u32))) | 154 | if (!err) |
155 | priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK; | 155 | priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK; |
156 | else | 156 | else |
157 | priv->ocr |= OCR_TX0_PULLDOWN; /* default */ | 157 | priv->ocr |= OCR_TX0_PULLDOWN; /* default */ |
158 | 158 | ||
159 | prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size); | 159 | err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop); |
160 | if (prop && (prop_size == sizeof(u32)) && *prop) { | 160 | if (!err && prop) { |
161 | u32 divider = priv->can.clock.freq * 2 / *prop; | 161 | u32 divider = priv->can.clock.freq * 2 / prop; |
162 | 162 | ||
163 | if (divider > 1) | 163 | if (divider > 1) |
164 | priv->cdr |= divider / 2 - 1; | 164 | priv->cdr |= divider / 2 - 1; |
@@ -168,8 +168,7 @@ static int sja1000_ofp_probe(struct platform_device *ofdev) | |||
168 | priv->cdr |= CDR_CLK_OFF; /* default */ | 168 | priv->cdr |= CDR_CLK_OFF; /* default */ |
169 | } | 169 | } |
170 | 170 | ||
171 | prop = of_get_property(np, "nxp,no-comparator-bypass", NULL); | 171 | if (!of_property_read_bool(np, "nxp,no-comparator-bypass")) |
172 | if (!prop) | ||
173 | priv->cdr |= CDR_CBP; /* default */ | 172 | priv->cdr |= CDR_CBP; /* default */ |
174 | 173 | ||
175 | priv->irq_flags = IRQF_SHARED; | 174 | priv->irq_flags = IRQF_SHARED; |
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index cab306a9888e..e1d26433d619 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c | |||
@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev) | |||
828 | struct ei_device *ei_local; | 828 | struct ei_device *ei_local; |
829 | struct ax_device *ax; | 829 | struct ax_device *ax; |
830 | struct resource *irq, *mem, *mem2; | 830 | struct resource *irq, *mem, *mem2; |
831 | resource_size_t mem_size, mem2_size = 0; | 831 | unsigned long mem_size, mem2_size = 0; |
832 | int ret = 0; | 832 | int ret = 0; |
833 | 833 | ||
834 | dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); | 834 | dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index 829b5ad71d0d..b5fd934585e9 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h | |||
@@ -186,7 +186,7 @@ struct atl1e_tpd_desc { | |||
186 | /* how about 0x2000 */ | 186 | /* how about 0x2000 */ |
187 | #define MAX_TX_BUF_LEN 0x2000 | 187 | #define MAX_TX_BUF_LEN 0x2000 |
188 | #define MAX_TX_BUF_SHIFT 13 | 188 | #define MAX_TX_BUF_SHIFT 13 |
189 | /*#define MAX_TX_BUF_LEN 0x3000 */ | 189 | #define MAX_TSO_SEG_SIZE 0x3c00 |
190 | 190 | ||
191 | /* rrs word 1 bit 0:31 */ | 191 | /* rrs word 1 bit 0:31 */ |
192 | #define RRS_RX_CSUM_MASK 0xFFFF | 192 | #define RRS_RX_CSUM_MASK 0xFFFF |
@@ -438,7 +438,6 @@ struct atl1e_adapter { | |||
438 | struct atl1e_hw hw; | 438 | struct atl1e_hw hw; |
439 | struct atl1e_hw_stats hw_stats; | 439 | struct atl1e_hw_stats hw_stats; |
440 | 440 | ||
441 | bool have_msi; | ||
442 | u32 wol; | 441 | u32 wol; |
443 | u16 link_speed; | 442 | u16 link_speed; |
444 | u16 link_duplex; | 443 | u16 link_duplex; |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 92f4734f860d..ac25f05ff68f 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c | |||
@@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter) | |||
1849 | struct net_device *netdev = adapter->netdev; | 1849 | struct net_device *netdev = adapter->netdev; |
1850 | 1850 | ||
1851 | free_irq(adapter->pdev->irq, netdev); | 1851 | free_irq(adapter->pdev->irq, netdev); |
1852 | |||
1853 | if (adapter->have_msi) | ||
1854 | pci_disable_msi(adapter->pdev); | ||
1855 | } | 1852 | } |
1856 | 1853 | ||
1857 | static int atl1e_request_irq(struct atl1e_adapter *adapter) | 1854 | static int atl1e_request_irq(struct atl1e_adapter *adapter) |
1858 | { | 1855 | { |
1859 | struct pci_dev *pdev = adapter->pdev; | 1856 | struct pci_dev *pdev = adapter->pdev; |
1860 | struct net_device *netdev = adapter->netdev; | 1857 | struct net_device *netdev = adapter->netdev; |
1861 | int flags = 0; | ||
1862 | int err = 0; | 1858 | int err = 0; |
1863 | 1859 | ||
1864 | adapter->have_msi = true; | 1860 | err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, |
1865 | err = pci_enable_msi(pdev); | 1861 | netdev); |
1866 | if (err) { | ||
1867 | netdev_dbg(netdev, | ||
1868 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
1869 | adapter->have_msi = false; | ||
1870 | } | ||
1871 | |||
1872 | if (!adapter->have_msi) | ||
1873 | flags |= IRQF_SHARED; | ||
1874 | err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev); | ||
1875 | if (err) { | 1862 | if (err) { |
1876 | netdev_dbg(adapter->netdev, | 1863 | netdev_dbg(adapter->netdev, |
1877 | "Unable to allocate interrupt Error: %d\n", err); | 1864 | "Unable to allocate interrupt Error: %d\n", err); |
1878 | if (adapter->have_msi) | ||
1879 | pci_disable_msi(pdev); | ||
1880 | return err; | 1865 | return err; |
1881 | } | 1866 | } |
1882 | netdev_dbg(netdev, "atl1e_request_irq OK\n"); | 1867 | netdev_dbg(netdev, "atl1e_request_irq OK\n"); |
@@ -2344,6 +2329,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2344 | 2329 | ||
2345 | INIT_WORK(&adapter->reset_task, atl1e_reset_task); | 2330 | INIT_WORK(&adapter->reset_task, atl1e_reset_task); |
2346 | INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); | 2331 | INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); |
2332 | netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); | ||
2347 | err = register_netdev(netdev); | 2333 | err = register_netdev(netdev); |
2348 | if (err) { | 2334 | if (err) { |
2349 | netdev_err(netdev, "register netdevice failed\n"); | 2335 | netdev_err(netdev, "register netdevice failed\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4046f97378c2..57619dd4a92b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2614 | } | 2614 | } |
2615 | } | 2615 | } |
2616 | 2616 | ||
2617 | /* initialize FW coalescing state machines in RAM */ | ||
2618 | bnx2x_update_coalesce(bp); | ||
2619 | |||
2617 | /* setup the leading queue */ | 2620 | /* setup the leading queue */ |
2618 | rc = bnx2x_setup_leading(bp); | 2621 | rc = bnx2x_setup_leading(bp); |
2619 | if (rc) { | 2622 | if (rc) { |
@@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port, | |||
4580 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); | 4583 | u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); |
4581 | u32 addr = BAR_CSTRORM_INTMEM + | 4584 | u32 addr = BAR_CSTRORM_INTMEM + |
4582 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); | 4585 | CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); |
4583 | u16 flags = REG_RD16(bp, addr); | 4586 | u8 flags = REG_RD8(bp, addr); |
4584 | /* clear and set */ | 4587 | /* clear and set */ |
4585 | flags &= ~HC_INDEX_DATA_HC_ENABLED; | 4588 | flags &= ~HC_INDEX_DATA_HC_ENABLED; |
4586 | flags |= enable_flag; | 4589 | flags |= enable_flag; |
4587 | REG_WR16(bp, addr, flags); | 4590 | REG_WR8(bp, addr, flags); |
4588 | DP(NETIF_MSG_IFUP, | 4591 | DP(NETIF_MSG_IFUP, |
4589 | "port %x fw_sb_id %d sb_index %d disable %d\n", | 4592 | "port %x fw_sb_id %d sb_index %d disable %d\n", |
4590 | port, fw_sb_id, sb_index, disable); | 4593 | port, fw_sb_id, sb_index, disable); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 77ebae0ac64a..0283f343b0d1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13437 | { | 13437 | { |
13438 | struct bnx2x *bp = params->bp; | 13438 | struct bnx2x *bp = params->bp; |
13439 | u16 base_page, next_page, not_kr2_device, lane; | 13439 | u16 base_page, next_page, not_kr2_device, lane; |
13440 | int sigdet = bnx2x_warpcore_get_sigdet(phy, params); | 13440 | int sigdet; |
13441 | |||
13442 | if (!sigdet) { | ||
13443 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) | ||
13444 | bnx2x_kr2_recovery(params, vars, phy); | ||
13445 | return; | ||
13446 | } | ||
13447 | 13441 | ||
13448 | /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery | 13442 | /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery |
13449 | * since some switches tend to reinit the AN process and clear the | 13443 | * since some switches tend to reinit the AN process and clear the |
@@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13454 | vars->check_kr2_recovery_cnt--; | 13448 | vars->check_kr2_recovery_cnt--; |
13455 | return; | 13449 | return; |
13456 | } | 13450 | } |
13451 | |||
13452 | sigdet = bnx2x_warpcore_get_sigdet(phy, params); | ||
13453 | if (!sigdet) { | ||
13454 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | ||
13455 | bnx2x_kr2_recovery(params, vars, phy); | ||
13456 | DP(NETIF_MSG_LINK, "No sigdet\n"); | ||
13457 | } | ||
13458 | return; | ||
13459 | } | ||
13460 | |||
13457 | lane = bnx2x_get_warpcore_lane(phy, params); | 13461 | lane = bnx2x_get_warpcore_lane(phy, params); |
13458 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | 13462 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, |
13459 | MDIO_AER_BLOCK_AER_REG, lane); | 13463 | MDIO_AER_BLOCK_AER_REG, lane); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e81a747ea8ce..c50696b396f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) | |||
4947 | q); | 4947 | q); |
4948 | } | 4948 | } |
4949 | 4949 | ||
4950 | if (!NO_FCOE(bp)) { | 4950 | if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { |
4951 | fp = &bp->fp[FCOE_IDX(bp)]; | 4951 | fp = &bp->fp[FCOE_IDX(bp)]; |
4952 | queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; | 4952 | queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; |
4953 | 4953 | ||
@@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) | |||
9878 | REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); | 9878 | REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0); |
9879 | } | 9879 | } |
9880 | } | 9880 | } |
9881 | if (!CHIP_IS_E1x(bp)) | ||
9882 | /* block FW from writing to host */ | ||
9883 | REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); | ||
9884 | |||
9881 | /* wait until BRB is empty */ | 9885 | /* wait until BRB is empty */ |
9882 | tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); | 9886 | tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS); |
9883 | while (timer_count) { | 9887 | while (timer_count) { |
@@ -13354,6 +13358,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev) | |||
13354 | RCU_INIT_POINTER(bp->cnic_ops, NULL); | 13358 | RCU_INIT_POINTER(bp->cnic_ops, NULL); |
13355 | mutex_unlock(&bp->cnic_mutex); | 13359 | mutex_unlock(&bp->cnic_mutex); |
13356 | synchronize_rcu(); | 13360 | synchronize_rcu(); |
13361 | bp->cnic_enabled = false; | ||
13357 | kfree(bp->cnic_kwq); | 13362 | kfree(bp->cnic_kwq); |
13358 | bp->cnic_kwq = NULL; | 13363 | bp->cnic_kwq = NULL; |
13359 | 13364 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 67d2663b3974..17a972734ba7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -14604,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp) | |||
14604 | if (j + len > block_end) | 14604 | if (j + len > block_end) |
14605 | goto partno; | 14605 | goto partno; |
14606 | 14606 | ||
14607 | memcpy(tp->fw_ver, &vpd_data[j], len); | 14607 | if (len >= sizeof(tp->fw_ver)) |
14608 | strncat(tp->fw_ver, " bc ", vpdlen - len - 1); | 14608 | len = sizeof(tp->fw_ver) - 1; |
14609 | memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); | ||
14610 | snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, | ||
14611 | &vpd_data[j]); | ||
14609 | } | 14612 | } |
14610 | 14613 | ||
14611 | partno: | 14614 | partno: |
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index a170065b5973..b0ebc9f6d55e 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c | |||
@@ -163,6 +163,7 @@ | |||
163 | #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ | 163 | #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ |
164 | 164 | ||
165 | /* XGMAC_INT_STAT reg */ | 165 | /* XGMAC_INT_STAT reg */ |
166 | #define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ | ||
166 | #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ | 167 | #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ |
167 | #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ | 168 | #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ |
168 | 169 | ||
@@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev) | |||
960 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); | 961 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); |
961 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); | 962 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); |
962 | 963 | ||
964 | /* Mask power mgt interrupt */ | ||
965 | writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); | ||
966 | |||
963 | /* XGMAC requires AXI bus init. This is a 'magic number' for now */ | 967 | /* XGMAC requires AXI bus init. This is a 'magic number' for now */ |
964 | writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); | 968 | writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); |
965 | 969 | ||
@@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) | |||
1141 | struct sk_buff *skb; | 1145 | struct sk_buff *skb; |
1142 | int frame_len; | 1146 | int frame_len; |
1143 | 1147 | ||
1148 | if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) | ||
1149 | break; | ||
1150 | |||
1144 | entry = priv->rx_tail; | 1151 | entry = priv->rx_tail; |
1145 | p = priv->dma_rx + entry; | 1152 | p = priv->dma_rx + entry; |
1146 | if (desc_get_owner(p)) | 1153 | if (desc_get_owner(p)) |
@@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) | |||
1825 | unsigned int pmt = 0; | 1832 | unsigned int pmt = 0; |
1826 | 1833 | ||
1827 | if (mode & WAKE_MAGIC) | 1834 | if (mode & WAKE_MAGIC) |
1828 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; | 1835 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; |
1829 | if (mode & WAKE_UCAST) | 1836 | if (mode & WAKE_UCAST) |
1830 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; | 1837 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; |
1831 | 1838 | ||
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 8cdf02503d13..9eada8e86078 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count) | |||
257 | tmp = readl(reg); | 257 | tmp = readl(reg); |
258 | } | 258 | } |
259 | 259 | ||
260 | /* | ||
261 | * Sleep, either by using msleep() or if we are suspending, then | ||
262 | * use mdelay() to sleep. | ||
263 | */ | ||
264 | static void dm9000_msleep(board_info_t *db, unsigned int ms) | ||
265 | { | ||
266 | if (db->in_suspend) | ||
267 | mdelay(ms); | ||
268 | else | ||
269 | msleep(ms); | ||
270 | } | ||
271 | |||
272 | /* Read a word from phyxcer */ | ||
273 | static int | ||
274 | dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) | ||
275 | { | ||
276 | board_info_t *db = netdev_priv(dev); | ||
277 | unsigned long flags; | ||
278 | unsigned int reg_save; | ||
279 | int ret; | ||
280 | |||
281 | mutex_lock(&db->addr_lock); | ||
282 | |||
283 | spin_lock_irqsave(&db->lock, flags); | ||
284 | |||
285 | /* Save previous register address */ | ||
286 | reg_save = readb(db->io_addr); | ||
287 | |||
288 | /* Fill the phyxcer register into REG_0C */ | ||
289 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
290 | |||
291 | /* Issue phyxcer read command */ | ||
292 | iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); | ||
293 | |||
294 | writeb(reg_save, db->io_addr); | ||
295 | spin_unlock_irqrestore(&db->lock, flags); | ||
296 | |||
297 | dm9000_msleep(db, 1); /* Wait read complete */ | ||
298 | |||
299 | spin_lock_irqsave(&db->lock, flags); | ||
300 | reg_save = readb(db->io_addr); | ||
301 | |||
302 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ | ||
303 | |||
304 | /* The read data keeps on REG_0D & REG_0E */ | ||
305 | ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); | ||
306 | |||
307 | /* restore the previous address */ | ||
308 | writeb(reg_save, db->io_addr); | ||
309 | spin_unlock_irqrestore(&db->lock, flags); | ||
310 | |||
311 | mutex_unlock(&db->addr_lock); | ||
312 | |||
313 | dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); | ||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | /* Write a word to phyxcer */ | ||
318 | static void | ||
319 | dm9000_phy_write(struct net_device *dev, | ||
320 | int phyaddr_unused, int reg, int value) | ||
321 | { | ||
322 | board_info_t *db = netdev_priv(dev); | ||
323 | unsigned long flags; | ||
324 | unsigned long reg_save; | ||
325 | |||
326 | dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); | ||
327 | mutex_lock(&db->addr_lock); | ||
328 | |||
329 | spin_lock_irqsave(&db->lock, flags); | ||
330 | |||
331 | /* Save previous register address */ | ||
332 | reg_save = readb(db->io_addr); | ||
333 | |||
334 | /* Fill the phyxcer register into REG_0C */ | ||
335 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
336 | |||
337 | /* Fill the written data into REG_0D & REG_0E */ | ||
338 | iow(db, DM9000_EPDRL, value); | ||
339 | iow(db, DM9000_EPDRH, value >> 8); | ||
340 | |||
341 | /* Issue phyxcer write command */ | ||
342 | iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); | ||
343 | |||
344 | writeb(reg_save, db->io_addr); | ||
345 | spin_unlock_irqrestore(&db->lock, flags); | ||
346 | |||
347 | dm9000_msleep(db, 1); /* Wait write complete */ | ||
348 | |||
349 | spin_lock_irqsave(&db->lock, flags); | ||
350 | reg_save = readb(db->io_addr); | ||
351 | |||
352 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ | ||
353 | |||
354 | /* restore the previous address */ | ||
355 | writeb(reg_save, db->io_addr); | ||
356 | |||
357 | spin_unlock_irqrestore(&db->lock, flags); | ||
358 | mutex_unlock(&db->addr_lock); | ||
359 | } | ||
360 | |||
260 | /* dm9000_set_io | 361 | /* dm9000_set_io |
261 | * | 362 | * |
262 | * select the specified set of io routines to use with the | 363 | * select the specified set of io routines to use with the |
@@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev) | |||
795 | 896 | ||
796 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ | 897 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ |
797 | 898 | ||
899 | dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ | ||
900 | dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ | ||
901 | |||
798 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; | 902 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; |
799 | 903 | ||
800 | /* if wol is needed, then always set NCR_WAKEEN otherwise we end | 904 | /* if wol is needed, then always set NCR_WAKEEN otherwise we end |
@@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev) | |||
1201 | return 0; | 1305 | return 0; |
1202 | } | 1306 | } |
1203 | 1307 | ||
1204 | /* | ||
1205 | * Sleep, either by using msleep() or if we are suspending, then | ||
1206 | * use mdelay() to sleep. | ||
1207 | */ | ||
1208 | static void dm9000_msleep(board_info_t *db, unsigned int ms) | ||
1209 | { | ||
1210 | if (db->in_suspend) | ||
1211 | mdelay(ms); | ||
1212 | else | ||
1213 | msleep(ms); | ||
1214 | } | ||
1215 | |||
1216 | /* | ||
1217 | * Read a word from phyxcer | ||
1218 | */ | ||
1219 | static int | ||
1220 | dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) | ||
1221 | { | ||
1222 | board_info_t *db = netdev_priv(dev); | ||
1223 | unsigned long flags; | ||
1224 | unsigned int reg_save; | ||
1225 | int ret; | ||
1226 | |||
1227 | mutex_lock(&db->addr_lock); | ||
1228 | |||
1229 | spin_lock_irqsave(&db->lock,flags); | ||
1230 | |||
1231 | /* Save previous register address */ | ||
1232 | reg_save = readb(db->io_addr); | ||
1233 | |||
1234 | /* Fill the phyxcer register into REG_0C */ | ||
1235 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
1236 | |||
1237 | iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ | ||
1238 | |||
1239 | writeb(reg_save, db->io_addr); | ||
1240 | spin_unlock_irqrestore(&db->lock,flags); | ||
1241 | |||
1242 | dm9000_msleep(db, 1); /* Wait read complete */ | ||
1243 | |||
1244 | spin_lock_irqsave(&db->lock,flags); | ||
1245 | reg_save = readb(db->io_addr); | ||
1246 | |||
1247 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ | ||
1248 | |||
1249 | /* The read data keeps on REG_0D & REG_0E */ | ||
1250 | ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); | ||
1251 | |||
1252 | /* restore the previous address */ | ||
1253 | writeb(reg_save, db->io_addr); | ||
1254 | spin_unlock_irqrestore(&db->lock,flags); | ||
1255 | |||
1256 | mutex_unlock(&db->addr_lock); | ||
1257 | |||
1258 | dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); | ||
1259 | return ret; | ||
1260 | } | ||
1261 | |||
1262 | /* | ||
1263 | * Write a word to phyxcer | ||
1264 | */ | ||
1265 | static void | ||
1266 | dm9000_phy_write(struct net_device *dev, | ||
1267 | int phyaddr_unused, int reg, int value) | ||
1268 | { | ||
1269 | board_info_t *db = netdev_priv(dev); | ||
1270 | unsigned long flags; | ||
1271 | unsigned long reg_save; | ||
1272 | |||
1273 | dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); | ||
1274 | mutex_lock(&db->addr_lock); | ||
1275 | |||
1276 | spin_lock_irqsave(&db->lock,flags); | ||
1277 | |||
1278 | /* Save previous register address */ | ||
1279 | reg_save = readb(db->io_addr); | ||
1280 | |||
1281 | /* Fill the phyxcer register into REG_0C */ | ||
1282 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
1283 | |||
1284 | /* Fill the written data into REG_0D & REG_0E */ | ||
1285 | iow(db, DM9000_EPDRL, value); | ||
1286 | iow(db, DM9000_EPDRH, value >> 8); | ||
1287 | |||
1288 | iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ | ||
1289 | |||
1290 | writeb(reg_save, db->io_addr); | ||
1291 | spin_unlock_irqrestore(&db->lock, flags); | ||
1292 | |||
1293 | dm9000_msleep(db, 1); /* Wait write complete */ | ||
1294 | |||
1295 | spin_lock_irqsave(&db->lock,flags); | ||
1296 | reg_save = readb(db->io_addr); | ||
1297 | |||
1298 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ | ||
1299 | |||
1300 | /* restore the previous address */ | ||
1301 | writeb(reg_save, db->io_addr); | ||
1302 | |||
1303 | spin_unlock_irqrestore(&db->lock, flags); | ||
1304 | mutex_unlock(&db->addr_lock); | ||
1305 | } | ||
1306 | |||
1307 | static void | 1308 | static void |
1308 | dm9000_shutdown(struct net_device *dev) | 1309 | dm9000_shutdown(struct net_device *dev) |
1309 | { | 1310 | { |
@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev) | |||
1502 | db->flags |= DM9000_PLATF_SIMPLE_PHY; | 1503 | db->flags |= DM9000_PLATF_SIMPLE_PHY; |
1503 | #endif | 1504 | #endif |
1504 | 1505 | ||
1505 | dm9000_reset(db); | 1506 | /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), |
1507 | * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo | ||
1508 | * while probe stage. | ||
1509 | */ | ||
1510 | |||
1511 | iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST); | ||
1506 | 1512 | ||
1507 | /* try multiple times, DM9000 sometimes gets the read wrong */ | 1513 | /* try multiple times, DM9000 sometimes gets the read wrong */ |
1508 | for (i = 0; i < 8; i++) { | 1514 | for (i = 0; i < 8; i++) { |
diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h index 55688bd1a3ef..9ce058adabab 100644 --- a/drivers/net/ethernet/davicom/dm9000.h +++ b/drivers/net/ethernet/davicom/dm9000.h | |||
@@ -69,7 +69,9 @@ | |||
69 | #define NCR_WAKEEN (1<<6) | 69 | #define NCR_WAKEEN (1<<6) |
70 | #define NCR_FCOL (1<<4) | 70 | #define NCR_FCOL (1<<4) |
71 | #define NCR_FDX (1<<3) | 71 | #define NCR_FDX (1<<3) |
72 | #define NCR_LBK (3<<1) | 72 | |
73 | #define NCR_RESERVED (3<<1) | ||
74 | #define NCR_MAC_LBK (1<<1) | ||
73 | #define NCR_RST (1<<0) | 75 | #define NCR_RST (1<<0) |
74 | 76 | ||
75 | #define NSR_SPEED (1<<7) | 77 | #define NSR_SPEED (1<<7) |
@@ -167,5 +169,12 @@ | |||
167 | #define ISR_LNKCHNG (1<<5) | 169 | #define ISR_LNKCHNG (1<<5) |
168 | #define ISR_UNDERRUN (1<<4) | 170 | #define ISR_UNDERRUN (1<<4) |
169 | 171 | ||
172 | /* Davicom MII registers. | ||
173 | */ | ||
174 | |||
175 | #define MII_DM_DSPCR 0x1b /* DSP Control Register */ | ||
176 | |||
177 | #define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */ | ||
178 | |||
170 | #endif /* _DM9000X_H_ */ | 179 | #endif /* _DM9000X_H_ */ |
171 | 180 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 08e54f3d288b..2886c9b63f90 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -759,8 +759,9 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, | |||
759 | 759 | ||
760 | if (vlan_tx_tag_present(skb)) { | 760 | if (vlan_tx_tag_present(skb)) { |
761 | vlan_tag = be_get_tx_vlan_tag(adapter, skb); | 761 | vlan_tag = be_get_tx_vlan_tag(adapter, skb); |
762 | __vlan_put_tag(skb, vlan_tag); | 762 | skb = __vlan_put_tag(skb, vlan_tag); |
763 | skb->vlan_tci = 0; | 763 | if (skb) |
764 | skb->vlan_tci = 0; | ||
764 | } | 765 | } |
765 | 766 | ||
766 | return skb; | 767 | return skb; |
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 911d0253dbb2..73195f643c9c 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c | |||
@@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
345 | return NETDEV_TX_OK; | 345 | return NETDEV_TX_OK; |
346 | } | 346 | } |
347 | 347 | ||
348 | /* Init RX & TX buffer descriptors | ||
349 | */ | ||
350 | static void fec_enet_bd_init(struct net_device *dev) | ||
351 | { | ||
352 | struct fec_enet_private *fep = netdev_priv(dev); | ||
353 | struct bufdesc *bdp; | ||
354 | unsigned int i; | ||
355 | |||
356 | /* Initialize the receive buffer descriptors. */ | ||
357 | bdp = fep->rx_bd_base; | ||
358 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
359 | |||
360 | /* Initialize the BD for every fragment in the page. */ | ||
361 | if (bdp->cbd_bufaddr) | ||
362 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | ||
363 | else | ||
364 | bdp->cbd_sc = 0; | ||
365 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
366 | } | ||
367 | |||
368 | /* Set the last buffer to wrap */ | ||
369 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
370 | bdp->cbd_sc |= BD_SC_WRAP; | ||
371 | |||
372 | fep->cur_rx = fep->rx_bd_base; | ||
373 | |||
374 | /* ...and the same for transmit */ | ||
375 | bdp = fep->tx_bd_base; | ||
376 | fep->cur_tx = bdp; | ||
377 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
378 | |||
379 | /* Initialize the BD for every fragment in the page. */ | ||
380 | bdp->cbd_sc = 0; | ||
381 | if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { | ||
382 | dev_kfree_skb_any(fep->tx_skbuff[i]); | ||
383 | fep->tx_skbuff[i] = NULL; | ||
384 | } | ||
385 | bdp->cbd_bufaddr = 0; | ||
386 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
387 | } | ||
388 | |||
389 | /* Set the last buffer to wrap */ | ||
390 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
391 | bdp->cbd_sc |= BD_SC_WRAP; | ||
392 | fep->dirty_tx = bdp; | ||
393 | } | ||
394 | |||
348 | /* This function is called to start or restart the FEC during a link | 395 | /* This function is called to start or restart the FEC during a link |
349 | * change. This only happens when switching between half and full | 396 | * change. This only happens when switching between half and full |
350 | * duplex. | 397 | * duplex. |
@@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex) | |||
388 | /* Set maximum receive buffer size. */ | 435 | /* Set maximum receive buffer size. */ |
389 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | 436 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); |
390 | 437 | ||
438 | fec_enet_bd_init(ndev); | ||
439 | |||
391 | /* Set receive and transmit descriptor base. */ | 440 | /* Set receive and transmit descriptor base. */ |
392 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | 441 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); |
393 | if (fep->bufdesc_ex) | 442 | if (fep->bufdesc_ex) |
@@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex) | |||
397 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) | 446 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) |
398 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | 447 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); |
399 | 448 | ||
400 | fep->cur_rx = fep->rx_bd_base; | ||
401 | 449 | ||
402 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | 450 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
403 | if (fep->tx_skbuff[i]) { | 451 | if (fep->tx_skbuff[i]) { |
@@ -954,6 +1002,7 @@ static void fec_enet_adjust_link(struct net_device *ndev) | |||
954 | } else { | 1002 | } else { |
955 | if (fep->link) { | 1003 | if (fep->link) { |
956 | fec_stop(ndev); | 1004 | fec_stop(ndev); |
1005 | fep->link = phy_dev->link; | ||
957 | status_change = 1; | 1006 | status_change = 1; |
958 | } | 1007 | } |
959 | } | 1008 | } |
@@ -1597,8 +1646,6 @@ static int fec_enet_init(struct net_device *ndev) | |||
1597 | { | 1646 | { |
1598 | struct fec_enet_private *fep = netdev_priv(ndev); | 1647 | struct fec_enet_private *fep = netdev_priv(ndev); |
1599 | struct bufdesc *cbd_base; | 1648 | struct bufdesc *cbd_base; |
1600 | struct bufdesc *bdp; | ||
1601 | unsigned int i; | ||
1602 | 1649 | ||
1603 | /* Allocate memory for buffer descriptors. */ | 1650 | /* Allocate memory for buffer descriptors. */ |
1604 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, | 1651 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, |
@@ -1608,6 +1655,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
1608 | return -ENOMEM; | 1655 | return -ENOMEM; |
1609 | } | 1656 | } |
1610 | 1657 | ||
1658 | memset(cbd_base, 0, PAGE_SIZE); | ||
1611 | spin_lock_init(&fep->hw_lock); | 1659 | spin_lock_init(&fep->hw_lock); |
1612 | 1660 | ||
1613 | fep->netdev = ndev; | 1661 | fep->netdev = ndev; |
@@ -1631,35 +1679,6 @@ static int fec_enet_init(struct net_device *ndev) | |||
1631 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); | 1679 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); |
1632 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); | 1680 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); |
1633 | 1681 | ||
1634 | /* Initialize the receive buffer descriptors. */ | ||
1635 | bdp = fep->rx_bd_base; | ||
1636 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1637 | |||
1638 | /* Initialize the BD for every fragment in the page. */ | ||
1639 | bdp->cbd_sc = 0; | ||
1640 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1641 | } | ||
1642 | |||
1643 | /* Set the last buffer to wrap */ | ||
1644 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
1645 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1646 | |||
1647 | /* ...and the same for transmit */ | ||
1648 | bdp = fep->tx_bd_base; | ||
1649 | fep->cur_tx = bdp; | ||
1650 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1651 | |||
1652 | /* Initialize the BD for every fragment in the page. */ | ||
1653 | bdp->cbd_sc = 0; | ||
1654 | bdp->cbd_bufaddr = 0; | ||
1655 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1656 | } | ||
1657 | |||
1658 | /* Set the last buffer to wrap */ | ||
1659 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
1660 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1661 | fep->dirty_tx = bdp; | ||
1662 | |||
1663 | fec_restart(ndev, 0); | 1682 | fec_restart(ndev, 0); |
1664 | 1683 | ||
1665 | return 0; | 1684 | return 0; |
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ec800b093e7e..d2bea3f07c73 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c | |||
@@ -870,7 +870,7 @@ err_unlock: | |||
870 | } | 870 | } |
871 | 871 | ||
872 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | 872 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, |
873 | void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) | 873 | int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) |
874 | { | 874 | { |
875 | struct cb *cb; | 875 | struct cb *cb; |
876 | unsigned long flags; | 876 | unsigned long flags; |
@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | |||
888 | nic->cbs_avail--; | 888 | nic->cbs_avail--; |
889 | cb->skb = skb; | 889 | cb->skb = skb; |
890 | 890 | ||
891 | err = cb_prepare(nic, cb, skb); | ||
892 | if (err) | ||
893 | goto err_unlock; | ||
894 | |||
891 | if (unlikely(!nic->cbs_avail)) | 895 | if (unlikely(!nic->cbs_avail)) |
892 | err = -ENOSPC; | 896 | err = -ENOSPC; |
893 | 897 | ||
894 | cb_prepare(nic, cb, skb); | ||
895 | 898 | ||
896 | /* Order is important otherwise we'll be in a race with h/w: | 899 | /* Order is important otherwise we'll be in a race with h/w: |
897 | * set S-bit in current first, then clear S-bit in previous. */ | 900 | * set S-bit in current first, then clear S-bit in previous. */ |
@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic) | |||
1091 | nic->mii.mdio_write = mdio_write; | 1094 | nic->mii.mdio_write = mdio_write; |
1092 | } | 1095 | } |
1093 | 1096 | ||
1094 | static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1097 | static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1095 | { | 1098 | { |
1096 | struct config *config = &cb->u.config; | 1099 | struct config *config = &cb->u.config; |
1097 | u8 *c = (u8 *)config; | 1100 | u8 *c = (u8 *)config; |
@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1181 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, | 1184 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1182 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", | 1185 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", |
1183 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); | 1186 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); |
1187 | return 0; | ||
1184 | } | 1188 | } |
1185 | 1189 | ||
1186 | /************************************************************************* | 1190 | /************************************************************************* |
@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic) | |||
1331 | return fw; | 1335 | return fw; |
1332 | } | 1336 | } |
1333 | 1337 | ||
1334 | static void e100_setup_ucode(struct nic *nic, struct cb *cb, | 1338 | static int e100_setup_ucode(struct nic *nic, struct cb *cb, |
1335 | struct sk_buff *skb) | 1339 | struct sk_buff *skb) |
1336 | { | 1340 | { |
1337 | const struct firmware *fw = (void *)skb; | 1341 | const struct firmware *fw = (void *)skb; |
@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, | |||
1358 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); | 1362 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); |
1359 | 1363 | ||
1360 | cb->command = cpu_to_le16(cb_ucode | cb_el); | 1364 | cb->command = cpu_to_le16(cb_ucode | cb_el); |
1365 | return 0; | ||
1361 | } | 1366 | } |
1362 | 1367 | ||
1363 | static inline int e100_load_ucode_wait(struct nic *nic) | 1368 | static inline int e100_load_ucode_wait(struct nic *nic) |
@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic) | |||
1400 | return err; | 1405 | return err; |
1401 | } | 1406 | } |
1402 | 1407 | ||
1403 | static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, | 1408 | static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, |
1404 | struct sk_buff *skb) | 1409 | struct sk_buff *skb) |
1405 | { | 1410 | { |
1406 | cb->command = cpu_to_le16(cb_iaaddr); | 1411 | cb->command = cpu_to_le16(cb_iaaddr); |
1407 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); | 1412 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); |
1413 | return 0; | ||
1408 | } | 1414 | } |
1409 | 1415 | ||
1410 | static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1416 | static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1411 | { | 1417 | { |
1412 | cb->command = cpu_to_le16(cb_dump); | 1418 | cb->command = cpu_to_le16(cb_dump); |
1413 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + | 1419 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + |
1414 | offsetof(struct mem, dump_buf)); | 1420 | offsetof(struct mem, dump_buf)); |
1421 | return 0; | ||
1415 | } | 1422 | } |
1416 | 1423 | ||
1417 | static int e100_phy_check_without_mii(struct nic *nic) | 1424 | static int e100_phy_check_without_mii(struct nic *nic) |
@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic) | |||
1581 | return 0; | 1588 | return 0; |
1582 | } | 1589 | } |
1583 | 1590 | ||
1584 | static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1591 | static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1585 | { | 1592 | { |
1586 | struct net_device *netdev = nic->netdev; | 1593 | struct net_device *netdev = nic->netdev; |
1587 | struct netdev_hw_addr *ha; | 1594 | struct netdev_hw_addr *ha; |
@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1596 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, | 1603 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, |
1597 | ETH_ALEN); | 1604 | ETH_ALEN); |
1598 | } | 1605 | } |
1606 | return 0; | ||
1599 | } | 1607 | } |
1600 | 1608 | ||
1601 | static void e100_set_multicast_list(struct net_device *netdev) | 1609 | static void e100_set_multicast_list(struct net_device *netdev) |
@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data) | |||
1756 | round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); | 1764 | round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); |
1757 | } | 1765 | } |
1758 | 1766 | ||
1759 | static void e100_xmit_prepare(struct nic *nic, struct cb *cb, | 1767 | static int e100_xmit_prepare(struct nic *nic, struct cb *cb, |
1760 | struct sk_buff *skb) | 1768 | struct sk_buff *skb) |
1761 | { | 1769 | { |
1770 | dma_addr_t dma_addr; | ||
1762 | cb->command = nic->tx_command; | 1771 | cb->command = nic->tx_command; |
1763 | 1772 | ||
1773 | dma_addr = pci_map_single(nic->pdev, | ||
1774 | skb->data, skb->len, PCI_DMA_TODEVICE); | ||
1775 | /* If we can't map the skb, have the upper layer try later */ | ||
1776 | if (pci_dma_mapping_error(nic->pdev, dma_addr)) | ||
1777 | return -ENOMEM; | ||
1778 | |||
1764 | /* | 1779 | /* |
1765 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for | 1780 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for |
1766 | * testing, ie sending frames with bad CRC. | 1781 | * testing, ie sending frames with bad CRC. |
@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb, | |||
1777 | cb->u.tcb.tcb_byte_count = 0; | 1792 | cb->u.tcb.tcb_byte_count = 0; |
1778 | cb->u.tcb.threshold = nic->tx_threshold; | 1793 | cb->u.tcb.threshold = nic->tx_threshold; |
1779 | cb->u.tcb.tbd_count = 1; | 1794 | cb->u.tcb.tbd_count = 1; |
1780 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, | 1795 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); |
1781 | skb->data, skb->len, PCI_DMA_TODEVICE)); | ||
1782 | /* check for mapping failure? */ | ||
1783 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); | 1796 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); |
1784 | skb_tx_timestamp(skb); | 1797 | skb_tx_timestamp(skb); |
1798 | return 0; | ||
1785 | } | 1799 | } |
1786 | 1800 | ||
1787 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, | 1801 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 43462d596a4e..ffd287196bf8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c | |||
@@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1053 | txdr->buffer_info[i].dma = | 1053 | txdr->buffer_info[i].dma = |
1054 | dma_map_single(&pdev->dev, skb->data, skb->len, | 1054 | dma_map_single(&pdev->dev, skb->data, skb->len, |
1055 | DMA_TO_DEVICE); | 1055 | DMA_TO_DEVICE); |
1056 | if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { | ||
1057 | ret_val = 4; | ||
1058 | goto err_nomem; | ||
1059 | } | ||
1056 | tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); | 1060 | tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); |
1057 | tx_desc->lower.data = cpu_to_le32(skb->len); | 1061 | tx_desc->lower.data = cpu_to_le32(skb->len); |
1058 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | | 1062 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | |
@@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1069 | rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), | 1073 | rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), |
1070 | GFP_KERNEL); | 1074 | GFP_KERNEL); |
1071 | if (!rxdr->buffer_info) { | 1075 | if (!rxdr->buffer_info) { |
1072 | ret_val = 4; | 1076 | ret_val = 5; |
1073 | goto err_nomem; | 1077 | goto err_nomem; |
1074 | } | 1078 | } |
1075 | 1079 | ||
@@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1077 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 1081 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
1078 | GFP_KERNEL); | 1082 | GFP_KERNEL); |
1079 | if (!rxdr->desc) { | 1083 | if (!rxdr->desc) { |
1080 | ret_val = 5; | 1084 | ret_val = 6; |
1081 | goto err_nomem; | 1085 | goto err_nomem; |
1082 | } | 1086 | } |
1083 | memset(rxdr->desc, 0, rxdr->size); | 1087 | memset(rxdr->desc, 0, rxdr->size); |
@@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1101 | 1105 | ||
1102 | skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); | 1106 | skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); |
1103 | if (!skb) { | 1107 | if (!skb) { |
1104 | ret_val = 6; | 1108 | ret_val = 7; |
1105 | goto err_nomem; | 1109 | goto err_nomem; |
1106 | } | 1110 | } |
1107 | skb_reserve(skb, NET_IP_ALIGN); | 1111 | skb_reserve(skb, NET_IP_ALIGN); |
@@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1110 | rxdr->buffer_info[i].dma = | 1114 | rxdr->buffer_info[i].dma = |
1111 | dma_map_single(&pdev->dev, skb->data, | 1115 | dma_map_single(&pdev->dev, skb->data, |
1112 | E1000_RXBUFFER_2048, DMA_FROM_DEVICE); | 1116 | E1000_RXBUFFER_2048, DMA_FROM_DEVICE); |
1117 | if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { | ||
1118 | ret_val = 8; | ||
1119 | goto err_nomem; | ||
1120 | } | ||
1113 | rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); | 1121 | rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); |
1114 | memset(skb->data, 0x00, skb->len); | 1122 | memset(skb->data, 0x00, skb->len); |
1115 | } | 1123 | } |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 948b86ffa4f0..7e615e2bf7e6 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -848,11 +848,16 @@ check_page: | |||
848 | } | 848 | } |
849 | } | 849 | } |
850 | 850 | ||
851 | if (!buffer_info->dma) | 851 | if (!buffer_info->dma) { |
852 | buffer_info->dma = dma_map_page(&pdev->dev, | 852 | buffer_info->dma = dma_map_page(&pdev->dev, |
853 | buffer_info->page, 0, | 853 | buffer_info->page, 0, |
854 | PAGE_SIZE, | 854 | PAGE_SIZE, |
855 | DMA_FROM_DEVICE); | 855 | DMA_FROM_DEVICE); |
856 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | ||
857 | adapter->alloc_rx_buff_failed++; | ||
858 | break; | ||
859 | } | ||
860 | } | ||
856 | 861 | ||
857 | rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); | 862 | rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); |
858 | rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | 863 | rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); |
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 25151401c2ab..ab577a763a20 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h | |||
@@ -284,18 +284,10 @@ struct igb_q_vector { | |||
284 | enum e1000_ring_flags_t { | 284 | enum e1000_ring_flags_t { |
285 | IGB_RING_FLAG_RX_SCTP_CSUM, | 285 | IGB_RING_FLAG_RX_SCTP_CSUM, |
286 | IGB_RING_FLAG_RX_LB_VLAN_BSWAP, | 286 | IGB_RING_FLAG_RX_LB_VLAN_BSWAP, |
287 | IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, | ||
288 | IGB_RING_FLAG_TX_CTX_IDX, | 287 | IGB_RING_FLAG_TX_CTX_IDX, |
289 | IGB_RING_FLAG_TX_DETECT_HANG | 288 | IGB_RING_FLAG_TX_DETECT_HANG |
290 | }; | 289 | }; |
291 | 290 | ||
292 | #define ring_uses_build_skb(ring) \ | ||
293 | test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
294 | #define set_ring_build_skb_enabled(ring) \ | ||
295 | set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
296 | #define clear_ring_build_skb_enabled(ring) \ | ||
297 | clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) | ||
298 | |||
299 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) | 291 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) |
300 | 292 | ||
301 | #define IGB_RX_DESC(R, i) \ | 293 | #define IGB_RX_DESC(R, i) \ |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8496adfc6a68..64f75291e3a5 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, | |||
3350 | wr32(E1000_RXDCTL(reg_idx), rxdctl); | 3350 | wr32(E1000_RXDCTL(reg_idx), rxdctl); |
3351 | } | 3351 | } |
3352 | 3352 | ||
3353 | static void igb_set_rx_buffer_len(struct igb_adapter *adapter, | ||
3354 | struct igb_ring *rx_ring) | ||
3355 | { | ||
3356 | #define IGB_MAX_BUILD_SKB_SIZE \ | ||
3357 | (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \ | ||
3358 | (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN)) | ||
3359 | |||
3360 | /* set build_skb flag */ | ||
3361 | if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE) | ||
3362 | set_ring_build_skb_enabled(rx_ring); | ||
3363 | else | ||
3364 | clear_ring_build_skb_enabled(rx_ring); | ||
3365 | } | ||
3366 | |||
3367 | /** | 3353 | /** |
3368 | * igb_configure_rx - Configure receive Unit after Reset | 3354 | * igb_configure_rx - Configure receive Unit after Reset |
3369 | * @adapter: board private structure | 3355 | * @adapter: board private structure |
@@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter) | |||
3383 | 3369 | ||
3384 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | 3370 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
3385 | * the Base and Length of the Rx Descriptor Ring */ | 3371 | * the Base and Length of the Rx Descriptor Ring */ |
3386 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3372 | for (i = 0; i < adapter->num_rx_queues; i++) |
3387 | struct igb_ring *rx_ring = adapter->rx_ring[i]; | 3373 | igb_configure_rx_ring(adapter, adapter->rx_ring[i]); |
3388 | igb_set_rx_buffer_len(adapter, rx_ring); | ||
3389 | igb_configure_rx_ring(adapter, rx_ring); | ||
3390 | } | ||
3391 | } | 3374 | } |
3392 | 3375 | ||
3393 | /** | 3376 | /** |
@@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, | |||
6203 | return igb_can_reuse_rx_page(rx_buffer, page, truesize); | 6186 | return igb_can_reuse_rx_page(rx_buffer, page, truesize); |
6204 | } | 6187 | } |
6205 | 6188 | ||
6206 | static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring, | ||
6207 | union e1000_adv_rx_desc *rx_desc) | ||
6208 | { | ||
6209 | struct igb_rx_buffer *rx_buffer; | ||
6210 | struct sk_buff *skb; | ||
6211 | struct page *page; | ||
6212 | void *page_addr; | ||
6213 | unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); | ||
6214 | #if (PAGE_SIZE < 8192) | ||
6215 | unsigned int truesize = IGB_RX_BUFSZ; | ||
6216 | #else | ||
6217 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + | ||
6218 | SKB_DATA_ALIGN(NET_SKB_PAD + | ||
6219 | NET_IP_ALIGN + | ||
6220 | size); | ||
6221 | #endif | ||
6222 | |||
6223 | /* If we spanned a buffer we have a huge mess so test for it */ | ||
6224 | BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); | ||
6225 | |||
6226 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; | ||
6227 | page = rx_buffer->page; | ||
6228 | prefetchw(page); | ||
6229 | |||
6230 | page_addr = page_address(page) + rx_buffer->page_offset; | ||
6231 | |||
6232 | /* prefetch first cache line of first page */ | ||
6233 | prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN); | ||
6234 | #if L1_CACHE_BYTES < 128 | ||
6235 | prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN); | ||
6236 | #endif | ||
6237 | |||
6238 | /* build an skb to around the page buffer */ | ||
6239 | skb = build_skb(page_addr, truesize); | ||
6240 | if (unlikely(!skb)) { | ||
6241 | rx_ring->rx_stats.alloc_failed++; | ||
6242 | return NULL; | ||
6243 | } | ||
6244 | |||
6245 | /* we are reusing so sync this buffer for CPU use */ | ||
6246 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
6247 | rx_buffer->dma, | ||
6248 | rx_buffer->page_offset, | ||
6249 | IGB_RX_BUFSZ, | ||
6250 | DMA_FROM_DEVICE); | ||
6251 | |||
6252 | /* update pointers within the skb to store the data */ | ||
6253 | skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); | ||
6254 | __skb_put(skb, size); | ||
6255 | |||
6256 | /* pull timestamp out of packet data */ | ||
6257 | if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { | ||
6258 | igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb); | ||
6259 | __skb_pull(skb, IGB_TS_HDR_LEN); | ||
6260 | } | ||
6261 | |||
6262 | if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) { | ||
6263 | /* hand second half of page back to the ring */ | ||
6264 | igb_reuse_rx_page(rx_ring, rx_buffer); | ||
6265 | } else { | ||
6266 | /* we are not reusing the buffer so unmap it */ | ||
6267 | dma_unmap_page(rx_ring->dev, rx_buffer->dma, | ||
6268 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
6269 | } | ||
6270 | |||
6271 | /* clear contents of buffer_info */ | ||
6272 | rx_buffer->dma = 0; | ||
6273 | rx_buffer->page = NULL; | ||
6274 | |||
6275 | return skb; | ||
6276 | } | ||
6277 | |||
6278 | static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, | 6189 | static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, |
6279 | union e1000_adv_rx_desc *rx_desc, | 6190 | union e1000_adv_rx_desc *rx_desc, |
6280 | struct sk_buff *skb) | 6191 | struct sk_buff *skb) |
@@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) | |||
6690 | rmb(); | 6601 | rmb(); |
6691 | 6602 | ||
6692 | /* retrieve a buffer from the ring */ | 6603 | /* retrieve a buffer from the ring */ |
6693 | if (ring_uses_build_skb(rx_ring)) | 6604 | skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); |
6694 | skb = igb_build_rx_buffer(rx_ring, rx_desc); | ||
6695 | else | ||
6696 | skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb); | ||
6697 | 6605 | ||
6698 | /* exit if we failed to retrieve a buffer */ | 6606 | /* exit if we failed to retrieve a buffer */ |
6699 | if (!skb) | 6607 | if (!skb) |
@@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, | |||
6780 | return true; | 6688 | return true; |
6781 | } | 6689 | } |
6782 | 6690 | ||
6783 | static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring) | ||
6784 | { | ||
6785 | if (ring_uses_build_skb(rx_ring)) | ||
6786 | return NET_SKB_PAD + NET_IP_ALIGN; | ||
6787 | else | ||
6788 | return 0; | ||
6789 | } | ||
6790 | |||
6791 | /** | 6691 | /** |
6792 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split | 6692 | * igb_alloc_rx_buffers - Replace used receive buffers; packet split |
6793 | * @adapter: address of board private structure | 6693 | * @adapter: address of board private structure |
@@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) | |||
6814 | * Refresh the desc even if buffer_addrs didn't change | 6714 | * Refresh the desc even if buffer_addrs didn't change |
6815 | * because each write-back erases this info. | 6715 | * because each write-back erases this info. |
6816 | */ | 6716 | */ |
6817 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + | 6717 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); |
6818 | bi->page_offset + | ||
6819 | igb_rx_offset(rx_ring)); | ||
6820 | 6718 | ||
6821 | rx_desc++; | 6719 | rx_desc++; |
6822 | bi++; | 6720 | bi++; |
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index ea4808373435..b5f94abe3cff 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c | |||
@@ -2159,6 +2159,10 @@ map_skb: | |||
2159 | skb->data, | 2159 | skb->data, |
2160 | adapter->rx_buffer_len, | 2160 | adapter->rx_buffer_len, |
2161 | DMA_FROM_DEVICE); | 2161 | DMA_FROM_DEVICE); |
2162 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | ||
2163 | adapter->alloc_rx_buff_failed++; | ||
2164 | break; | ||
2165 | } | ||
2162 | 2166 | ||
2163 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | 2167 | rx_desc = IXGB_RX_DESC(*rx_ring, i); |
2164 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); | 2168 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); |
@@ -2168,7 +2172,8 @@ map_skb: | |||
2168 | rx_desc->status = 0; | 2172 | rx_desc->status = 0; |
2169 | 2173 | ||
2170 | 2174 | ||
2171 | if (++i == rx_ring->count) i = 0; | 2175 | if (++i == rx_ring->count) |
2176 | i = 0; | ||
2172 | buffer_info = &rx_ring->buffer_info[i]; | 2177 | buffer_info = &rx_ring->buffer_info[i]; |
2173 | } | 2178 | } |
2174 | 2179 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index db5611ae407e..79f4a26ea6cc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -7922,12 +7922,19 @@ static int __init ixgbe_init_module(void) | |||
7922 | ixgbe_dbg_init(); | 7922 | ixgbe_dbg_init(); |
7923 | #endif /* CONFIG_DEBUG_FS */ | 7923 | #endif /* CONFIG_DEBUG_FS */ |
7924 | 7924 | ||
7925 | ret = pci_register_driver(&ixgbe_driver); | ||
7926 | if (ret) { | ||
7927 | #ifdef CONFIG_DEBUG_FS | ||
7928 | ixgbe_dbg_exit(); | ||
7929 | #endif /* CONFIG_DEBUG_FS */ | ||
7930 | return ret; | ||
7931 | } | ||
7932 | |||
7925 | #ifdef CONFIG_IXGBE_DCA | 7933 | #ifdef CONFIG_IXGBE_DCA |
7926 | dca_register_notify(&dca_notifier); | 7934 | dca_register_notify(&dca_notifier); |
7927 | #endif | 7935 | #endif |
7928 | 7936 | ||
7929 | ret = pci_register_driver(&ixgbe_driver); | 7937 | return 0; |
7930 | return ret; | ||
7931 | } | 7938 | } |
7932 | 7939 | ||
7933 | module_init(ixgbe_init_module); | 7940 | module_init(ixgbe_init_module); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d44b4d21268c..97e33669c0b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
@@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) | |||
1049 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) | 1049 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) |
1050 | return -EINVAL; | 1050 | return -EINVAL; |
1051 | if (vlan || qos) { | 1051 | if (vlan || qos) { |
1052 | if (adapter->vfinfo[vf].pf_vlan) | ||
1053 | err = ixgbe_set_vf_vlan(adapter, false, | ||
1054 | adapter->vfinfo[vf].pf_vlan, | ||
1055 | vf); | ||
1056 | if (err) | ||
1057 | goto out; | ||
1052 | err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); | 1058 | err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); |
1053 | if (err) | 1059 | if (err) |
1054 | goto out; | 1060 | goto out; |
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index edfba9370922..434e33c527df 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig | |||
@@ -33,6 +33,7 @@ config MV643XX_ETH | |||
33 | 33 | ||
34 | config MVMDIO | 34 | config MVMDIO |
35 | tristate "Marvell MDIO interface support" | 35 | tristate "Marvell MDIO interface support" |
36 | select PHYLIB | ||
36 | ---help--- | 37 | ---help--- |
37 | This driver supports the MDIO interface found in the network | 38 | This driver supports the MDIO interface found in the network |
38 | interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, | 39 | interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, |
@@ -45,7 +46,6 @@ config MVMDIO | |||
45 | config MVNETA | 46 | config MVNETA |
46 | tristate "Marvell Armada 370/XP network interface support" | 47 | tristate "Marvell Armada 370/XP network interface support" |
47 | depends on MACH_ARMADA_370_XP | 48 | depends on MACH_ARMADA_370_XP |
48 | select PHYLIB | ||
49 | select MVMDIO | 49 | select MVMDIO |
50 | ---help--- | 50 | ---help--- |
51 | This driver supports the network interface units in the | 51 | This driver supports the network interface units in the |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index cd345b8969bc..a47a097c21e1 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -374,7 +374,6 @@ static int rxq_number = 8; | |||
374 | static int txq_number = 8; | 374 | static int txq_number = 8; |
375 | 375 | ||
376 | static int rxq_def; | 376 | static int rxq_def; |
377 | static int txq_def; | ||
378 | 377 | ||
379 | #define MVNETA_DRIVER_NAME "mvneta" | 378 | #define MVNETA_DRIVER_NAME "mvneta" |
380 | #define MVNETA_DRIVER_VERSION "1.0" | 379 | #define MVNETA_DRIVER_VERSION "1.0" |
@@ -1475,7 +1474,8 @@ error: | |||
1475 | static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) | 1474 | static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) |
1476 | { | 1475 | { |
1477 | struct mvneta_port *pp = netdev_priv(dev); | 1476 | struct mvneta_port *pp = netdev_priv(dev); |
1478 | struct mvneta_tx_queue *txq = &pp->txqs[txq_def]; | 1477 | u16 txq_id = skb_get_queue_mapping(skb); |
1478 | struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; | ||
1479 | struct mvneta_tx_desc *tx_desc; | 1479 | struct mvneta_tx_desc *tx_desc; |
1480 | struct netdev_queue *nq; | 1480 | struct netdev_queue *nq; |
1481 | int frags = 0; | 1481 | int frags = 0; |
@@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) | |||
1485 | goto out; | 1485 | goto out; |
1486 | 1486 | ||
1487 | frags = skb_shinfo(skb)->nr_frags + 1; | 1487 | frags = skb_shinfo(skb)->nr_frags + 1; |
1488 | nq = netdev_get_tx_queue(dev, txq_def); | 1488 | nq = netdev_get_tx_queue(dev, txq_id); |
1489 | 1489 | ||
1490 | /* Get a descriptor for the first part of the packet */ | 1490 | /* Get a descriptor for the first part of the packet */ |
1491 | tx_desc = mvneta_txq_next_desc_get(txq); | 1491 | tx_desc = mvneta_txq_next_desc_get(txq); |
@@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev) | |||
2689 | return -EINVAL; | 2689 | return -EINVAL; |
2690 | } | 2690 | } |
2691 | 2691 | ||
2692 | dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8); | 2692 | dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); |
2693 | if (!dev) | 2693 | if (!dev) |
2694 | return -ENOMEM; | 2694 | return -ENOMEM; |
2695 | 2695 | ||
@@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev) | |||
2771 | 2771 | ||
2772 | netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); | 2772 | netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); |
2773 | 2773 | ||
2774 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2775 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2776 | dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2777 | dev->priv_flags |= IFF_UNICAST_FLT; | ||
2778 | |||
2774 | err = register_netdev(dev); | 2779 | err = register_netdev(dev); |
2775 | if (err < 0) { | 2780 | if (err < 0) { |
2776 | dev_err(&pdev->dev, "failed to register\n"); | 2781 | dev_err(&pdev->dev, "failed to register\n"); |
2777 | goto err_deinit; | 2782 | goto err_deinit; |
2778 | } | 2783 | } |
2779 | 2784 | ||
2780 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2781 | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2782 | dev->priv_flags |= IFF_UNICAST_FLT; | ||
2783 | |||
2784 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); | 2785 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
2785 | 2786 | ||
2786 | platform_set_drvdata(pdev, pp->dev); | 2787 | platform_set_drvdata(pdev, pp->dev); |
@@ -2843,4 +2844,3 @@ module_param(rxq_number, int, S_IRUGO); | |||
2843 | module_param(txq_number, int, S_IRUGO); | 2844 | module_param(txq_number, int, S_IRUGO); |
2844 | 2845 | ||
2845 | module_param(rxq_def, int, S_IRUGO); | 2846 | module_param(rxq_def, int, S_IRUGO); |
2846 | module_param(txq_def, int, S_IRUGO); | ||
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index fc07ca35721b..6a0e671fcecd 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) | |||
1067 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); | 1067 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); |
1068 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); | 1068 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); |
1069 | 1069 | ||
1070 | tp = space - 2048/8; | 1070 | tp = space - 8192/8; |
1071 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); | 1071 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); |
1072 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); | 1072 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); |
1073 | } else { | 1073 | } else { |
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index 615ac63ea860..ec6dcd80152b 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h | |||
@@ -2074,7 +2074,7 @@ enum { | |||
2074 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ | 2074 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ |
2075 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ | 2075 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ |
2076 | 2076 | ||
2077 | #define GMAC_DEF_MSK GM_IS_TX_FF_UR | 2077 | #define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) |
2078 | }; | 2078 | }; |
2079 | 2079 | ||
2080 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ | 2080 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index f278b10ef714..30d78f806dc3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
411 | 411 | ||
412 | static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) | 412 | static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) |
413 | { | 413 | { |
414 | unsigned int i; | 414 | int i; |
415 | for (i = ETH_ALEN - 1; i; --i) { | 415 | for (i = ETH_ALEN - 1; i >= 0; --i) { |
416 | dst_mac[i] = src_mac & 0xff; | 416 | dst_mac[i] = src_mac & 0xff; |
417 | src_mac >>= 8; | 417 | src_mac >>= 8; |
418 | } | 418 | } |
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 33bcb63d56a2..8fb481252e2c 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c | |||
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) | |||
528 | for (; rxfc != 0; rxfc--) { | 528 | for (; rxfc != 0; rxfc--) { |
529 | rxh = ks8851_rdreg32(ks, KS_RXFHSR); | 529 | rxh = ks8851_rdreg32(ks, KS_RXFHSR); |
530 | rxstat = rxh & 0xffff; | 530 | rxstat = rxh & 0xffff; |
531 | rxlen = rxh >> 16; | 531 | rxlen = (rxh >> 16) & 0xfff; |
532 | 532 | ||
533 | netif_dbg(ks, rx_status, ks->netdev, | 533 | netif_dbg(ks, rx_status, ks->netdev, |
534 | "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); | 534 | "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index cd5ae8813cb3..edd63f1230f3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -1500,6 +1500,12 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) | |||
1500 | } | 1500 | } |
1501 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); | 1501 | } while ((adapter->ahw->linkup && ahw->has_link_events) != 1); |
1502 | 1502 | ||
1503 | /* Make sure carrier is off and queue is stopped during loopback */ | ||
1504 | if (netif_running(netdev)) { | ||
1505 | netif_carrier_off(netdev); | ||
1506 | netif_stop_queue(netdev); | ||
1507 | } | ||
1508 | |||
1503 | ret = qlcnic_do_lb_test(adapter, mode); | 1509 | ret = qlcnic_do_lb_test(adapter, mode); |
1504 | 1510 | ||
1505 | qlcnic_83xx_clear_lb_mode(adapter, mode); | 1511 | qlcnic_83xx_clear_lb_mode(adapter, mode); |
@@ -2780,6 +2786,7 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter, | |||
2780 | void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | 2786 | void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) |
2781 | { | 2787 | { |
2782 | struct qlcnic_cmd_args cmd; | 2788 | struct qlcnic_cmd_args cmd; |
2789 | struct net_device *netdev = adapter->netdev; | ||
2783 | int ret = 0; | 2790 | int ret = 0; |
2784 | 2791 | ||
2785 | qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); | 2792 | qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS); |
@@ -2789,7 +2796,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | |||
2789 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, | 2796 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, |
2790 | QLC_83XX_STAT_TX, &ret); | 2797 | QLC_83XX_STAT_TX, &ret); |
2791 | if (ret) { | 2798 | if (ret) { |
2792 | dev_info(&adapter->pdev->dev, "Error getting MAC stats\n"); | 2799 | netdev_err(netdev, "Error getting Tx stats\n"); |
2793 | goto out; | 2800 | goto out; |
2794 | } | 2801 | } |
2795 | /* Get MAC stats */ | 2802 | /* Get MAC stats */ |
@@ -2799,8 +2806,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | |||
2799 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, | 2806 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, |
2800 | QLC_83XX_STAT_MAC, &ret); | 2807 | QLC_83XX_STAT_MAC, &ret); |
2801 | if (ret) { | 2808 | if (ret) { |
2802 | dev_info(&adapter->pdev->dev, | 2809 | netdev_err(netdev, "Error getting MAC stats\n"); |
2803 | "Error getting Rx stats\n"); | ||
2804 | goto out; | 2810 | goto out; |
2805 | } | 2811 | } |
2806 | /* Get Rx stats */ | 2812 | /* Get Rx stats */ |
@@ -2810,8 +2816,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data) | |||
2810 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, | 2816 | data = qlcnic_83xx_fill_stats(adapter, &cmd, data, |
2811 | QLC_83XX_STAT_RX, &ret); | 2817 | QLC_83XX_STAT_RX, &ret); |
2812 | if (ret) | 2818 | if (ret) |
2813 | dev_info(&adapter->pdev->dev, | 2819 | netdev_err(netdev, "Error getting Rx stats\n"); |
2814 | "Error getting Tx stats\n"); | ||
2815 | out: | 2820 | out: |
2816 | qlcnic_free_mbx_args(&cmd); | 2821 | qlcnic_free_mbx_args(&cmd); |
2817 | } | 2822 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 0e630061bff3..5fa847fe388a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -358,8 +358,7 @@ set_flags: | |||
358 | memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); | 358 | memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); |
359 | } | 359 | } |
360 | opcode = TX_ETHER_PKT; | 360 | opcode = TX_ETHER_PKT; |
361 | if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && | 361 | if (skb_is_gso(skb)) { |
362 | skb_shinfo(skb)->gso_size > 0) { | ||
363 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 362 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
364 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | 363 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
365 | first_desc->total_hdr_length = hdr_len; | 364 | first_desc->total_hdr_length = hdr_len; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 987fb6f8adc3..5ef328af61d0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
@@ -200,10 +200,10 @@ beacon_err: | |||
200 | } | 200 | } |
201 | 201 | ||
202 | err = qlcnic_config_led(adapter, b_state, b_rate); | 202 | err = qlcnic_config_led(adapter, b_state, b_rate); |
203 | if (!err) | 203 | if (!err) { |
204 | err = len; | 204 | err = len; |
205 | else | ||
206 | ahw->beacon_state = b_state; | 205 | ahw->beacon_state = b_state; |
206 | } | ||
207 | 207 | ||
208 | if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) | 208 | if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) |
209 | qlcnic_diag_free_res(adapter->netdev, max_sds_rings); | 209 | qlcnic_diag_free_res(adapter->netdev, max_sds_rings); |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h index a131d7b5d2fe..7e8d68263963 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge.h +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #define DRV_NAME "qlge" | 19 | #define DRV_NAME "qlge" |
20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " | 20 | #define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " |
21 | #define DRV_VERSION "v1.00.00.31" | 21 | #define DRV_VERSION "v1.00.00.32" |
22 | 22 | ||
23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ | 23 | #define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ |
24 | 24 | ||
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 6f316ab23257..0780e039b271 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c | |||
@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev, | |||
379 | 379 | ||
380 | ecmd->supported = SUPPORTED_10000baseT_Full; | 380 | ecmd->supported = SUPPORTED_10000baseT_Full; |
381 | ecmd->advertising = ADVERTISED_10000baseT_Full; | 381 | ecmd->advertising = ADVERTISED_10000baseT_Full; |
382 | ecmd->autoneg = AUTONEG_ENABLE; | ||
383 | ecmd->transceiver = XCVR_EXTERNAL; | 382 | ecmd->transceiver = XCVR_EXTERNAL; |
384 | if ((qdev->link_status & STS_LINK_TYPE_MASK) == | 383 | if ((qdev->link_status & STS_LINK_TYPE_MASK) == |
385 | STS_LINK_TYPE_10GBASET) { | 384 | STS_LINK_TYPE_10GBASET) { |
386 | ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); | 385 | ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); |
387 | ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); | 386 | ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); |
388 | ecmd->port = PORT_TP; | 387 | ecmd->port = PORT_TP; |
388 | ecmd->autoneg = AUTONEG_ENABLE; | ||
389 | } else { | 389 | } else { |
390 | ecmd->supported |= SUPPORTED_FIBRE; | 390 | ecmd->supported |= SUPPORTED_FIBRE; |
391 | ecmd->advertising |= ADVERTISED_FIBRE; | 391 | ecmd->advertising |= ADVERTISED_FIBRE; |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b13ab544a7eb..8033555e53c2 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -1434,11 +1434,13 @@ map_error: | |||
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | /* Categorizing receive firmware frame errors */ | 1436 | /* Categorizing receive firmware frame errors */ |
1437 | static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err) | 1437 | static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, |
1438 | struct rx_ring *rx_ring) | ||
1438 | { | 1439 | { |
1439 | struct nic_stats *stats = &qdev->nic_stats; | 1440 | struct nic_stats *stats = &qdev->nic_stats; |
1440 | 1441 | ||
1441 | stats->rx_err_count++; | 1442 | stats->rx_err_count++; |
1443 | rx_ring->rx_errors++; | ||
1442 | 1444 | ||
1443 | switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { | 1445 | switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) { |
1444 | case IB_MAC_IOCB_RSP_ERR_CODE_ERR: | 1446 | case IB_MAC_IOCB_RSP_ERR_CODE_ERR: |
@@ -1474,6 +1476,12 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, | |||
1474 | struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); | 1476 | struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
1475 | struct napi_struct *napi = &rx_ring->napi; | 1477 | struct napi_struct *napi = &rx_ring->napi; |
1476 | 1478 | ||
1479 | /* Frame error, so drop the packet. */ | ||
1480 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1481 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1482 | put_page(lbq_desc->p.pg_chunk.page); | ||
1483 | return; | ||
1484 | } | ||
1477 | napi->dev = qdev->ndev; | 1485 | napi->dev = qdev->ndev; |
1478 | 1486 | ||
1479 | skb = napi_get_frags(napi); | 1487 | skb = napi_get_frags(napi); |
@@ -1529,6 +1537,12 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev, | |||
1529 | addr = lbq_desc->p.pg_chunk.va; | 1537 | addr = lbq_desc->p.pg_chunk.va; |
1530 | prefetch(addr); | 1538 | prefetch(addr); |
1531 | 1539 | ||
1540 | /* Frame error, so drop the packet. */ | ||
1541 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1542 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1543 | goto err_out; | ||
1544 | } | ||
1545 | |||
1532 | /* The max framesize filter on this chip is set higher than | 1546 | /* The max framesize filter on this chip is set higher than |
1533 | * MTU since FCoE uses 2k frames. | 1547 | * MTU since FCoE uses 2k frames. |
1534 | */ | 1548 | */ |
@@ -1614,6 +1628,13 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, | |||
1614 | memcpy(skb_put(new_skb, length), skb->data, length); | 1628 | memcpy(skb_put(new_skb, length), skb->data, length); |
1615 | skb = new_skb; | 1629 | skb = new_skb; |
1616 | 1630 | ||
1631 | /* Frame error, so drop the packet. */ | ||
1632 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1633 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1634 | dev_kfree_skb_any(skb); | ||
1635 | return; | ||
1636 | } | ||
1637 | |||
1617 | /* loopback self test for ethtool */ | 1638 | /* loopback self test for ethtool */ |
1618 | if (test_bit(QL_SELFTEST, &qdev->flags)) { | 1639 | if (test_bit(QL_SELFTEST, &qdev->flags)) { |
1619 | ql_check_lb_frame(qdev, skb); | 1640 | ql_check_lb_frame(qdev, skb); |
@@ -1919,6 +1940,13 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, | |||
1919 | return; | 1940 | return; |
1920 | } | 1941 | } |
1921 | 1942 | ||
1943 | /* Frame error, so drop the packet. */ | ||
1944 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1945 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); | ||
1946 | dev_kfree_skb_any(skb); | ||
1947 | return; | ||
1948 | } | ||
1949 | |||
1922 | /* The max framesize filter on this chip is set higher than | 1950 | /* The max framesize filter on this chip is set higher than |
1923 | * MTU since FCoE uses 2k frames. | 1951 | * MTU since FCoE uses 2k frames. |
1924 | */ | 1952 | */ |
@@ -2000,12 +2028,6 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, | |||
2000 | 2028 | ||
2001 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); | 2029 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); |
2002 | 2030 | ||
2003 | /* Frame error, so drop the packet. */ | ||
2004 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
2005 | ql_categorize_rx_err(qdev, ib_mac_rsp->flags2); | ||
2006 | return (unsigned long)length; | ||
2007 | } | ||
2008 | |||
2009 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { | 2031 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { |
2010 | /* The data and headers are split into | 2032 | /* The data and headers are split into |
2011 | * separate buffers. | 2033 | * separate buffers. |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 28fb50a1e9c3..4ecbe64a758d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -3818,6 +3818,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) | |||
3818 | } | 3818 | } |
3819 | } | 3819 | } |
3820 | 3820 | ||
3821 | static void rtl_speed_down(struct rtl8169_private *tp) | ||
3822 | { | ||
3823 | u32 adv; | ||
3824 | int lpa; | ||
3825 | |||
3826 | rtl_writephy(tp, 0x1f, 0x0000); | ||
3827 | lpa = rtl_readphy(tp, MII_LPA); | ||
3828 | |||
3829 | if (lpa & (LPA_10HALF | LPA_10FULL)) | ||
3830 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; | ||
3831 | else if (lpa & (LPA_100HALF | LPA_100FULL)) | ||
3832 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | ||
3833 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; | ||
3834 | else | ||
3835 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | ||
3836 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | | ||
3837 | (tp->mii.supports_gmii ? | ||
3838 | ADVERTISED_1000baseT_Half | | ||
3839 | ADVERTISED_1000baseT_Full : 0); | ||
3840 | |||
3841 | rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, | ||
3842 | adv); | ||
3843 | } | ||
3844 | |||
3821 | static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) | 3845 | static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) |
3822 | { | 3846 | { |
3823 | void __iomem *ioaddr = tp->mmio_addr; | 3847 | void __iomem *ioaddr = tp->mmio_addr; |
@@ -3848,9 +3872,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) | |||
3848 | if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) | 3872 | if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) |
3849 | return false; | 3873 | return false; |
3850 | 3874 | ||
3851 | rtl_writephy(tp, 0x1f, 0x0000); | 3875 | rtl_speed_down(tp); |
3852 | rtl_writephy(tp, MII_BMCR, 0x0000); | ||
3853 | |||
3854 | rtl_wol_suspend_quirk(tp); | 3876 | rtl_wol_suspend_quirk(tp); |
3855 | 3877 | ||
3856 | return true; | 3878 | return true; |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index bf5e3cf97c4d..6ed333fe5c04 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
1216 | if (felic_stat & ECSR_LCHNG) { | 1216 | if (felic_stat & ECSR_LCHNG) { |
1217 | /* Link Changed */ | 1217 | /* Link Changed */ |
1218 | if (mdp->cd->no_psr || mdp->no_ether_link) { | 1218 | if (mdp->cd->no_psr || mdp->no_ether_link) { |
1219 | if (mdp->link == PHY_DOWN) | 1219 | goto ignore_link; |
1220 | link_stat = 0; | ||
1221 | else | ||
1222 | link_stat = PHY_ST_LINK; | ||
1223 | } else { | 1220 | } else { |
1224 | link_stat = (sh_eth_read(ndev, PSR)); | 1221 | link_stat = (sh_eth_read(ndev, PSR)); |
1225 | if (mdp->ether_link_active_low) | 1222 | if (mdp->ether_link_active_low) |
@@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
1242 | } | 1239 | } |
1243 | } | 1240 | } |
1244 | 1241 | ||
1242 | ignore_link: | ||
1245 | if (intr_status & EESR_TWB) { | 1243 | if (intr_status & EESR_TWB) { |
1246 | /* Write buck end. unused write back interrupt */ | 1244 | /* Write buck end. unused write back interrupt */ |
1247 | if (intr_status & EESR_TABT) /* Transmit Abort int */ | 1245 | if (intr_status & EESR_TABT) /* Transmit Abort int */ |
@@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1326 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1324 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1327 | struct sh_eth_cpu_data *cd = mdp->cd; | 1325 | struct sh_eth_cpu_data *cd = mdp->cd; |
1328 | irqreturn_t ret = IRQ_NONE; | 1326 | irqreturn_t ret = IRQ_NONE; |
1329 | u32 intr_status = 0; | 1327 | unsigned long intr_status; |
1330 | 1328 | ||
1331 | spin_lock(&mdp->lock); | 1329 | spin_lock(&mdp->lock); |
1332 | 1330 | ||
1333 | /* Get interrpt stat */ | 1331 | /* Get interrupt status */ |
1334 | intr_status = sh_eth_read(ndev, EESR); | 1332 | intr_status = sh_eth_read(ndev, EESR); |
1333 | /* Mask it with the interrupt mask, forcing ECI interrupt to be always | ||
1334 | * enabled since it's the one that comes thru regardless of the mask, | ||
1335 | * and we need to fully handle it in sh_eth_error() in order to quench | ||
1336 | * it as it doesn't get cleared by just writing 1 to the ECI bit... | ||
1337 | */ | ||
1338 | intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; | ||
1335 | /* Clear interrupt */ | 1339 | /* Clear interrupt */ |
1336 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | | 1340 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | |
1337 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | | 1341 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | |
@@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
1373 | struct phy_device *phydev = mdp->phydev; | 1377 | struct phy_device *phydev = mdp->phydev; |
1374 | int new_state = 0; | 1378 | int new_state = 0; |
1375 | 1379 | ||
1376 | if (phydev->link != PHY_DOWN) { | 1380 | if (phydev->link) { |
1377 | if (phydev->duplex != mdp->duplex) { | 1381 | if (phydev->duplex != mdp->duplex) { |
1378 | new_state = 1; | 1382 | new_state = 1; |
1379 | mdp->duplex = phydev->duplex; | 1383 | mdp->duplex = phydev->duplex; |
@@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
1387 | if (mdp->cd->set_rate) | 1391 | if (mdp->cd->set_rate) |
1388 | mdp->cd->set_rate(ndev); | 1392 | mdp->cd->set_rate(ndev); |
1389 | } | 1393 | } |
1390 | if (mdp->link == PHY_DOWN) { | 1394 | if (!mdp->link) { |
1391 | sh_eth_write(ndev, | 1395 | sh_eth_write(ndev, |
1392 | (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); | 1396 | (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); |
1393 | new_state = 1; | 1397 | new_state = 1; |
1394 | mdp->link = phydev->link; | 1398 | mdp->link = phydev->link; |
1399 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
1400 | sh_eth_rcv_snd_enable(ndev); | ||
1395 | } | 1401 | } |
1396 | } else if (mdp->link) { | 1402 | } else if (mdp->link) { |
1397 | new_state = 1; | 1403 | new_state = 1; |
1398 | mdp->link = PHY_DOWN; | 1404 | mdp->link = 0; |
1399 | mdp->speed = 0; | 1405 | mdp->speed = 0; |
1400 | mdp->duplex = -1; | 1406 | mdp->duplex = -1; |
1407 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
1408 | sh_eth_rcv_snd_disable(ndev); | ||
1401 | } | 1409 | } |
1402 | 1410 | ||
1403 | if (new_state && netif_msg_link(mdp)) | 1411 | if (new_state && netif_msg_link(mdp)) |
@@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev) | |||
1414 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, | 1422 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, |
1415 | mdp->mii_bus->id , mdp->phy_id); | 1423 | mdp->mii_bus->id , mdp->phy_id); |
1416 | 1424 | ||
1417 | mdp->link = PHY_DOWN; | 1425 | mdp->link = 0; |
1418 | mdp->speed = 0; | 1426 | mdp->speed = 0; |
1419 | mdp->duplex = -1; | 1427 | mdp->duplex = -1; |
1420 | 1428 | ||
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index e6655678458e..828be4515008 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -723,7 +723,7 @@ struct sh_eth_private { | |||
723 | u32 phy_id; /* PHY ID */ | 723 | u32 phy_id; /* PHY ID */ |
724 | struct mii_bus *mii_bus; /* MDIO bus control */ | 724 | struct mii_bus *mii_bus; /* MDIO bus control */ |
725 | struct phy_device *phydev; /* PHY device control */ | 725 | struct phy_device *phydev; /* PHY device control */ |
726 | enum phy_state link; | 726 | int link; |
727 | phy_interface_t phy_interface; | 727 | phy_interface_t phy_interface; |
728 | int msg_enable; | 728 | int msg_enable; |
729 | int speed; | 729 | int speed; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 0c74a702d461..50617c5a0bdb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c | |||
@@ -149,6 +149,7 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) | |||
149 | { | 149 | { |
150 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); | 150 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); |
151 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); | 151 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); |
152 | writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK); | ||
152 | } | 153 | } |
153 | 154 | ||
154 | /* This reads the MAC core counters (if actaully supported). | 155 | /* This reads the MAC core counters (if actaully supported). |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index df32a090d08e..4781d3d8e182 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status) | |||
436 | * queue is stopped then start the queue as we have free desc for tx | 436 | * queue is stopped then start the queue as we have free desc for tx |
437 | */ | 437 | */ |
438 | if (unlikely(netif_queue_stopped(ndev))) | 438 | if (unlikely(netif_queue_stopped(ndev))) |
439 | netif_start_queue(ndev); | 439 | netif_wake_queue(ndev); |
440 | cpts_tx_timestamp(priv->cpts, skb); | 440 | cpts_tx_timestamp(priv->cpts, skb); |
441 | priv->stats.tx_packets++; | 441 | priv->stats.tx_packets++; |
442 | priv->stats.tx_bytes += len; | 442 | priv->stats.tx_bytes += len; |
@@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
1380 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); | 1380 | memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); |
1381 | 1381 | ||
1382 | if (data->dual_emac) { | 1382 | if (data->dual_emac) { |
1383 | if (of_property_read_u32(node, "dual_emac_res_vlan", | 1383 | if (of_property_read_u32(slave_node, "dual_emac_res_vlan", |
1384 | &prop)) { | 1384 | &prop)) { |
1385 | pr_err("Missing dual_emac_res_vlan in DT.\n"); | 1385 | pr_err("Missing dual_emac_res_vlan in DT.\n"); |
1386 | slave_data->dual_emac_res_vlan = i+1; | 1386 | slave_data->dual_emac_res_vlan = i+1; |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index ae1b77aa199f..72300bc9e378 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status) | |||
1053 | * queue is stopped then start the queue as we have free desc for tx | 1053 | * queue is stopped then start the queue as we have free desc for tx |
1054 | */ | 1054 | */ |
1055 | if (unlikely(netif_queue_stopped(ndev))) | 1055 | if (unlikely(netif_queue_stopped(ndev))) |
1056 | netif_start_queue(ndev); | 1056 | netif_wake_queue(ndev); |
1057 | ndev->stats.tx_packets++; | 1057 | ndev->stats.tx_packets++; |
1058 | ndev->stats.tx_bytes += len; | 1058 | ndev->stats.tx_bytes += len; |
1059 | dev_kfree_skb_any(skb); | 1059 | dev_kfree_skb_any(skb); |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 1cd77483da50..f5f0f09e4cc5 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device, | |||
470 | packet->trans_id; | 470 | packet->trans_id; |
471 | 471 | ||
472 | /* Notify the layer above us */ | 472 | /* Notify the layer above us */ |
473 | nvsc_packet->completion.send.send_completion( | 473 | if (nvsc_packet) |
474 | nvsc_packet->completion.send.send_completion_ctx); | 474 | nvsc_packet->completion.send.send_completion( |
475 | nvsc_packet->completion.send. | ||
476 | send_completion_ctx); | ||
475 | 477 | ||
476 | num_outstanding_sends = | 478 | num_outstanding_sends = |
477 | atomic_dec_return(&net_device->num_outstanding_sends); | 479 | atomic_dec_return(&net_device->num_outstanding_sends); |
@@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device, | |||
498 | int ret = 0; | 500 | int ret = 0; |
499 | struct nvsp_message sendMessage; | 501 | struct nvsp_message sendMessage; |
500 | struct net_device *ndev; | 502 | struct net_device *ndev; |
503 | u64 req_id; | ||
501 | 504 | ||
502 | net_device = get_outbound_net_device(device); | 505 | net_device = get_outbound_net_device(device); |
503 | if (!net_device) | 506 | if (!net_device) |
@@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device, | |||
518 | 0xFFFFFFFF; | 521 | 0xFFFFFFFF; |
519 | sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; | 522 | sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; |
520 | 523 | ||
524 | if (packet->completion.send.send_completion) | ||
525 | req_id = (u64)packet; | ||
526 | else | ||
527 | req_id = 0; | ||
528 | |||
521 | if (packet->page_buf_cnt) { | 529 | if (packet->page_buf_cnt) { |
522 | ret = vmbus_sendpacket_pagebuffer(device->channel, | 530 | ret = vmbus_sendpacket_pagebuffer(device->channel, |
523 | packet->page_buf, | 531 | packet->page_buf, |
524 | packet->page_buf_cnt, | 532 | packet->page_buf_cnt, |
525 | &sendMessage, | 533 | &sendMessage, |
526 | sizeof(struct nvsp_message), | 534 | sizeof(struct nvsp_message), |
527 | (unsigned long)packet); | 535 | req_id); |
528 | } else { | 536 | } else { |
529 | ret = vmbus_sendpacket(device->channel, &sendMessage, | 537 | ret = vmbus_sendpacket(device->channel, &sendMessage, |
530 | sizeof(struct nvsp_message), | 538 | sizeof(struct nvsp_message), |
531 | (unsigned long)packet, | 539 | req_id, |
532 | VM_PKT_DATA_INBAND, | 540 | VM_PKT_DATA_INBAND, |
533 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | 541 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
534 | |||
535 | } | 542 | } |
536 | 543 | ||
537 | if (ret == 0) { | 544 | if (ret == 0) { |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5f85205cd12b..8341b62e5521 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, | |||
241 | 241 | ||
242 | if (status == 1) { | 242 | if (status == 1) { |
243 | netif_carrier_on(net); | 243 | netif_carrier_on(net); |
244 | netif_wake_queue(net); | ||
245 | ndev_ctx = netdev_priv(net); | 244 | ndev_ctx = netdev_priv(net); |
246 | schedule_delayed_work(&ndev_ctx->dwork, 0); | 245 | schedule_delayed_work(&ndev_ctx->dwork, 0); |
247 | schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); | 246 | schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); |
248 | } else { | 247 | } else { |
249 | netif_carrier_off(net); | 248 | netif_carrier_off(net); |
250 | netif_tx_disable(net); | ||
251 | } | 249 | } |
252 | } | 250 | } |
253 | 251 | ||
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2b657d4d63a8..0775f0aefd1e 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -61,9 +61,6 @@ struct rndis_request { | |||
61 | 61 | ||
62 | static void rndis_filter_send_completion(void *ctx); | 62 | static void rndis_filter_send_completion(void *ctx); |
63 | 63 | ||
64 | static void rndis_filter_send_request_completion(void *ctx); | ||
65 | |||
66 | |||
67 | 64 | ||
68 | static struct rndis_device *get_rndis_device(void) | 65 | static struct rndis_device *get_rndis_device(void) |
69 | { | 66 | { |
@@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, | |||
241 | packet->page_buf[0].len; | 238 | packet->page_buf[0].len; |
242 | } | 239 | } |
243 | 240 | ||
244 | packet->completion.send.send_completion_ctx = req;/* packet; */ | 241 | packet->completion.send.send_completion = NULL; |
245 | packet->completion.send.send_completion = | ||
246 | rndis_filter_send_request_completion; | ||
247 | packet->completion.send.send_completion_tid = (unsigned long)dev; | ||
248 | 242 | ||
249 | ret = netvsc_send(dev->net_dev->dev, packet); | 243 | ret = netvsc_send(dev->net_dev->dev, packet); |
250 | return ret; | 244 | return ret; |
@@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx) | |||
999 | /* Pass it back to the original handler */ | 993 | /* Pass it back to the original handler */ |
1000 | filter_pkt->completion(filter_pkt->completion_ctx); | 994 | filter_pkt->completion(filter_pkt->completion_ctx); |
1001 | } | 995 | } |
1002 | |||
1003 | |||
1004 | static void rndis_filter_send_request_completion(void *ctx) | ||
1005 | { | ||
1006 | /* Noop */ | ||
1007 | } | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b7c457adc0dc..729ed533bb33 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
1594 | 1594 | ||
1595 | if (tun->flags & TUN_TAP_MQ && | 1595 | if (tun->flags & TUN_TAP_MQ && |
1596 | (tun->numqueues + tun->numdisabled > 1)) | 1596 | (tun->numqueues + tun->numdisabled > 1)) |
1597 | return err; | 1597 | return -EBUSY; |
1598 | } | 1598 | } |
1599 | else { | 1599 | else { |
1600 | char *name; | 1600 | char *name; |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 16c842997291..6bd91676d2cb 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb | |||
134 | goto error; | 134 | goto error; |
135 | 135 | ||
136 | if (skb) { | 136 | if (skb) { |
137 | if (skb->len <= sizeof(ETH_HLEN)) | 137 | if (skb->len <= ETH_HLEN) |
138 | goto error; | 138 | goto error; |
139 | 139 | ||
140 | /* mapping VLANs to MBIM sessions: | 140 | /* mapping VLANs to MBIM sessions: |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 968d5d50751d..2a3579f67910 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/netdevice.h> | 14 | #include <linux/netdevice.h> |
15 | #include <linux/ethtool.h> | 15 | #include <linux/ethtool.h> |
16 | #include <linux/etherdevice.h> | ||
16 | #include <linux/mii.h> | 17 | #include <linux/mii.h> |
17 | #include <linux/usb.h> | 18 | #include <linux/usb.h> |
18 | #include <linux/usb/cdc.h> | 19 | #include <linux/usb/cdc.h> |
@@ -52,6 +53,96 @@ struct qmi_wwan_state { | |||
52 | struct usb_interface *data; | 53 | struct usb_interface *data; |
53 | }; | 54 | }; |
54 | 55 | ||
56 | /* default ethernet address used by the modem */ | ||
57 | static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; | ||
58 | |||
59 | /* Make up an ethernet header if the packet doesn't have one. | ||
60 | * | ||
61 | * A firmware bug common among several devices cause them to send raw | ||
62 | * IP packets under some circumstances. There is no way for the | ||
63 | * driver/host to know when this will happen. And even when the bug | ||
64 | * hits, some packets will still arrive with an intact header. | ||
65 | * | ||
66 | * The supported devices are only capably of sending IPv4, IPv6 and | ||
67 | * ARP packets on a point-to-point link. Any packet with an ethernet | ||
68 | * header will have either our address or a broadcast/multicast | ||
69 | * address as destination. ARP packets will always have a header. | ||
70 | * | ||
71 | * This means that this function will reliably add the appropriate | ||
72 | * header iff necessary, provided our hardware address does not start | ||
73 | * with 4 or 6. | ||
74 | * | ||
75 | * Another common firmware bug results in all packets being addressed | ||
76 | * to 00:a0:c6:00:00:00 despite the host address being different. | ||
77 | * This function will also fixup such packets. | ||
78 | */ | ||
79 | static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | ||
80 | { | ||
81 | __be16 proto; | ||
82 | |||
83 | /* usbnet rx_complete guarantees that skb->len is at least | ||
84 | * hard_header_len, so we can inspect the dest address without | ||
85 | * checking skb->len | ||
86 | */ | ||
87 | switch (skb->data[0] & 0xf0) { | ||
88 | case 0x40: | ||
89 | proto = htons(ETH_P_IP); | ||
90 | break; | ||
91 | case 0x60: | ||
92 | proto = htons(ETH_P_IPV6); | ||
93 | break; | ||
94 | case 0x00: | ||
95 | if (is_multicast_ether_addr(skb->data)) | ||
96 | return 1; | ||
97 | /* possibly bogus destination - rewrite just in case */ | ||
98 | skb_reset_mac_header(skb); | ||
99 | goto fix_dest; | ||
100 | default: | ||
101 | /* pass along other packets without modifications */ | ||
102 | return 1; | ||
103 | } | ||
104 | if (skb_headroom(skb) < ETH_HLEN) | ||
105 | return 0; | ||
106 | skb_push(skb, ETH_HLEN); | ||
107 | skb_reset_mac_header(skb); | ||
108 | eth_hdr(skb)->h_proto = proto; | ||
109 | memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); | ||
110 | fix_dest: | ||
111 | memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); | ||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | /* very simplistic detection of IPv4 or IPv6 headers */ | ||
116 | static bool possibly_iphdr(const char *data) | ||
117 | { | ||
118 | return (data[0] & 0xd0) == 0x40; | ||
119 | } | ||
120 | |||
121 | /* disallow addresses which may be confused with IP headers */ | ||
122 | static int qmi_wwan_mac_addr(struct net_device *dev, void *p) | ||
123 | { | ||
124 | int ret; | ||
125 | struct sockaddr *addr = p; | ||
126 | |||
127 | ret = eth_prepare_mac_addr_change(dev, p); | ||
128 | if (ret < 0) | ||
129 | return ret; | ||
130 | if (possibly_iphdr(addr->sa_data)) | ||
131 | return -EADDRNOTAVAIL; | ||
132 | eth_commit_mac_addr_change(dev, p); | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | static const struct net_device_ops qmi_wwan_netdev_ops = { | ||
137 | .ndo_open = usbnet_open, | ||
138 | .ndo_stop = usbnet_stop, | ||
139 | .ndo_start_xmit = usbnet_start_xmit, | ||
140 | .ndo_tx_timeout = usbnet_tx_timeout, | ||
141 | .ndo_change_mtu = usbnet_change_mtu, | ||
142 | .ndo_set_mac_address = qmi_wwan_mac_addr, | ||
143 | .ndo_validate_addr = eth_validate_addr, | ||
144 | }; | ||
145 | |||
55 | /* using a counter to merge subdriver requests with our own into a combined state */ | 146 | /* using a counter to merge subdriver requests with our own into a combined state */ |
56 | static int qmi_wwan_manage_power(struct usbnet *dev, int on) | 147 | static int qmi_wwan_manage_power(struct usbnet *dev, int on) |
57 | { | 148 | { |
@@ -229,6 +320,18 @@ next_desc: | |||
229 | usb_driver_release_interface(driver, info->data); | 320 | usb_driver_release_interface(driver, info->data); |
230 | } | 321 | } |
231 | 322 | ||
323 | /* Never use the same address on both ends of the link, even | ||
324 | * if the buggy firmware told us to. | ||
325 | */ | ||
326 | if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr)) | ||
327 | eth_hw_addr_random(dev->net); | ||
328 | |||
329 | /* make MAC addr easily distinguishable from an IP header */ | ||
330 | if (possibly_iphdr(dev->net->dev_addr)) { | ||
331 | dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */ | ||
332 | dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */ | ||
333 | } | ||
334 | dev->net->netdev_ops = &qmi_wwan_netdev_ops; | ||
232 | err: | 335 | err: |
233 | return status; | 336 | return status; |
234 | } | 337 | } |
@@ -307,6 +410,7 @@ static const struct driver_info qmi_wwan_info = { | |||
307 | .bind = qmi_wwan_bind, | 410 | .bind = qmi_wwan_bind, |
308 | .unbind = qmi_wwan_unbind, | 411 | .unbind = qmi_wwan_unbind, |
309 | .manage_power = qmi_wwan_manage_power, | 412 | .manage_power = qmi_wwan_manage_power, |
413 | .rx_fixup = qmi_wwan_rx_fixup, | ||
310 | }; | 414 | }; |
311 | 415 | ||
312 | #define HUAWEI_VENDOR_ID 0x12D1 | 416 | #define HUAWEI_VENDOR_ID 0x12D1 |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 9abe51710f22..1a15ec14c386 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
@@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) | |||
914 | static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) | 914 | static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) |
915 | { | 915 | { |
916 | struct usbnet *dev = netdev_priv(netdev); | 916 | struct usbnet *dev = netdev_priv(netdev); |
917 | int ret; | ||
918 | |||
919 | if (new_mtu > MAX_SINGLE_PACKET_SIZE) | ||
920 | return -EINVAL; | ||
917 | 921 | ||
918 | int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); | 922 | ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); |
919 | if (ret < 0) { | 923 | if (ret < 0) { |
920 | netdev_warn(dev->net, "Failed to set mac rx frame length\n"); | 924 | netdev_warn(dev->net, "Failed to set mac rx frame length\n"); |
921 | return ret; | 925 | return ret; |
@@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev) | |||
1324 | 1328 | ||
1325 | netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); | 1329 | netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); |
1326 | 1330 | ||
1327 | ret = smsc75xx_set_rx_max_frame_length(dev, 1514); | 1331 | ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); |
1328 | if (ret < 0) { | 1332 | if (ret < 0) { |
1329 | netdev_warn(dev->net, "Failed to set max rx frame length\n"); | 1333 | netdev_warn(dev->net, "Failed to set max rx frame length\n"); |
1330 | return ret; | 1334 | return ret; |
@@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
2134 | else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) | 2138 | else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) |
2135 | dev->net->stats.rx_frame_errors++; | 2139 | dev->net->stats.rx_frame_errors++; |
2136 | } else { | 2140 | } else { |
2137 | /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ | 2141 | /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ |
2138 | if (unlikely(size > (ETH_FRAME_LEN + 12))) { | 2142 | if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { |
2139 | netif_dbg(dev, rx_err, dev->net, | 2143 | netif_dbg(dev, rx_err, dev->net, |
2140 | "size err rx_cmd_a=0x%08x\n", | 2144 | "size err rx_cmd_a=0x%08x\n", |
2141 | rx_cmd_a); | 2145 | rx_cmd_a); |
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h index 28fd99203f64..bdee2ed67219 100644 --- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h | |||
@@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = { | |||
519 | {0x00008258, 0x00000000}, | 519 | {0x00008258, 0x00000000}, |
520 | {0x0000825c, 0x40000000}, | 520 | {0x0000825c, 0x40000000}, |
521 | {0x00008260, 0x00080922}, | 521 | {0x00008260, 0x00080922}, |
522 | {0x00008264, 0x9bc00010}, | 522 | {0x00008264, 0x9d400010}, |
523 | {0x00008268, 0xffffffff}, | 523 | {0x00008268, 0xffffffff}, |
524 | {0x0000826c, 0x0000ffff}, | 524 | {0x0000826c, 0x0000ffff}, |
525 | {0x00008270, 0x00000000}, | 525 | {0x00008270, 0x00000000}, |
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c index 467b60014b7b..73fe8d6db566 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c | |||
@@ -143,14 +143,14 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq) | |||
143 | u32 sz, i; | 143 | u32 sz, i; |
144 | struct channel_detector *cd; | 144 | struct channel_detector *cd; |
145 | 145 | ||
146 | cd = kmalloc(sizeof(*cd), GFP_KERNEL); | 146 | cd = kmalloc(sizeof(*cd), GFP_ATOMIC); |
147 | if (cd == NULL) | 147 | if (cd == NULL) |
148 | goto fail; | 148 | goto fail; |
149 | 149 | ||
150 | INIT_LIST_HEAD(&cd->head); | 150 | INIT_LIST_HEAD(&cd->head); |
151 | cd->freq = freq; | 151 | cd->freq = freq; |
152 | sz = sizeof(cd->detectors) * dpd->num_radar_types; | 152 | sz = sizeof(cd->detectors) * dpd->num_radar_types; |
153 | cd->detectors = kzalloc(sz, GFP_KERNEL); | 153 | cd->detectors = kzalloc(sz, GFP_ATOMIC); |
154 | if (cd->detectors == NULL) | 154 | if (cd->detectors == NULL) |
155 | goto fail; | 155 | goto fail; |
156 | 156 | ||
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c index 91b8dceeadb1..5e48c5515b8c 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c +++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c | |||
@@ -218,7 +218,7 @@ static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts) | |||
218 | { | 218 | { |
219 | struct pulse_elem *p = pool_get_pulse_elem(); | 219 | struct pulse_elem *p = pool_get_pulse_elem(); |
220 | if (p == NULL) { | 220 | if (p == NULL) { |
221 | p = kmalloc(sizeof(*p), GFP_KERNEL); | 221 | p = kmalloc(sizeof(*p), GFP_ATOMIC); |
222 | if (p == NULL) { | 222 | if (p == NULL) { |
223 | DFS_POOL_STAT_INC(pulse_alloc_error); | 223 | DFS_POOL_STAT_INC(pulse_alloc_error); |
224 | return false; | 224 | return false; |
@@ -299,7 +299,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde, | |||
299 | ps.deadline_ts = ps.first_ts + ps.dur; | 299 | ps.deadline_ts = ps.first_ts + ps.dur; |
300 | new_ps = pool_get_pseq_elem(); | 300 | new_ps = pool_get_pseq_elem(); |
301 | if (new_ps == NULL) { | 301 | if (new_ps == NULL) { |
302 | new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL); | 302 | new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC); |
303 | if (new_ps == NULL) { | 303 | if (new_ps == NULL) { |
304 | DFS_POOL_STAT_INC(pseq_alloc_error); | 304 | DFS_POOL_STAT_INC(pseq_alloc_error); |
305 | return false; | 305 | return false; |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 716058b67557..a47f5e05fc04 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c | |||
@@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) | |||
796 | * required version. | 796 | * required version. |
797 | */ | 797 | */ |
798 | if (priv->fw_version_major != MAJOR_VERSION_REQ || | 798 | if (priv->fw_version_major != MAJOR_VERSION_REQ || |
799 | priv->fw_version_minor != MINOR_VERSION_REQ) { | 799 | priv->fw_version_minor < MINOR_VERSION_REQ) { |
800 | dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", | 800 | dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", |
801 | MAJOR_VERSION_REQ, MINOR_VERSION_REQ); | 801 | MAJOR_VERSION_REQ, MINOR_VERSION_REQ); |
802 | return -EINVAL; | 802 | return -EINVAL; |
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index 39c84ecf6a42..7fdac6c7b3ea 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c | |||
@@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data) | |||
170 | { | 170 | { |
171 | struct ath_softc *sc = (struct ath_softc *)data; | 171 | struct ath_softc *sc = (struct ath_softc *)data; |
172 | 172 | ||
173 | ieee80211_queue_work(sc->hw, &sc->hw_check_work); | 173 | if (!test_bit(SC_OP_INVALID, &sc->sc_flags)) |
174 | ieee80211_queue_work(sc->hw, &sc->hw_check_work); | ||
174 | } | 175 | } |
175 | 176 | ||
176 | /* | 177 | /* |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6e66f9c6782b..988372d218a4 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) | |||
280 | if (r) { | 280 | if (r) { |
281 | ath_err(common, | 281 | ath_err(common, |
282 | "Unable to reset channel, reset status %d\n", r); | 282 | "Unable to reset channel, reset status %d\n", r); |
283 | |||
284 | ath9k_hw_enable_interrupts(ah); | ||
285 | ath9k_queue_reset(sc, RESET_TYPE_BB_HANG); | ||
286 | |||
283 | goto out; | 287 | goto out; |
284 | } | 288 | } |
285 | 289 | ||
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 38bc5a7997ff..122146943bf2 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1487 | const struct b43_dma_ops *ops; | 1487 | const struct b43_dma_ops *ops; |
1488 | struct b43_dmaring *ring; | 1488 | struct b43_dmaring *ring; |
1489 | struct b43_dmadesc_meta *meta; | 1489 | struct b43_dmadesc_meta *meta; |
1490 | static const struct b43_txstatus fake; /* filled with 0 */ | ||
1491 | const struct b43_txstatus *txstat; | ||
1490 | int slot, firstused; | 1492 | int slot, firstused; |
1491 | bool frame_succeed; | 1493 | bool frame_succeed; |
1494 | int skip; | ||
1495 | static u8 err_out1, err_out2; | ||
1492 | 1496 | ||
1493 | ring = parse_cookie(dev, status->cookie, &slot); | 1497 | ring = parse_cookie(dev, status->cookie, &slot); |
1494 | if (unlikely(!ring)) | 1498 | if (unlikely(!ring)) |
@@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1501 | firstused = ring->current_slot - ring->used_slots + 1; | 1505 | firstused = ring->current_slot - ring->used_slots + 1; |
1502 | if (firstused < 0) | 1506 | if (firstused < 0) |
1503 | firstused = ring->nr_slots + firstused; | 1507 | firstused = ring->nr_slots + firstused; |
1508 | |||
1509 | skip = 0; | ||
1504 | if (unlikely(slot != firstused)) { | 1510 | if (unlikely(slot != firstused)) { |
1505 | /* This possibly is a firmware bug and will result in | 1511 | /* This possibly is a firmware bug and will result in |
1506 | * malfunction, memory leaks and/or stall of DMA functionality. */ | 1512 | * malfunction, memory leaks and/or stall of DMA functionality. |
1507 | b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " | 1513 | */ |
1508 | "Expected %d, but got %d\n", | 1514 | if (slot == next_slot(ring, next_slot(ring, firstused))) { |
1509 | ring->index, firstused, slot); | 1515 | /* If a single header/data pair was missed, skip over |
1510 | return; | 1516 | * the first two slots in an attempt to recover. |
1517 | */ | ||
1518 | slot = firstused; | ||
1519 | skip = 2; | ||
1520 | if (!err_out1) { | ||
1521 | /* Report the error once. */ | ||
1522 | b43dbg(dev->wl, | ||
1523 | "Skip on DMA ring %d slot %d.\n", | ||
1524 | ring->index, slot); | ||
1525 | err_out1 = 1; | ||
1526 | } | ||
1527 | } else { | ||
1528 | /* More than a single header/data pair were missed. | ||
1529 | * Report this error once. | ||
1530 | */ | ||
1531 | if (!err_out2) | ||
1532 | b43dbg(dev->wl, | ||
1533 | "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", | ||
1534 | ring->index, firstused, slot); | ||
1535 | err_out2 = 1; | ||
1536 | return; | ||
1537 | } | ||
1511 | } | 1538 | } |
1512 | 1539 | ||
1513 | ops = ring->ops; | 1540 | ops = ring->ops; |
@@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1522 | slot, firstused, ring->index); | 1549 | slot, firstused, ring->index); |
1523 | break; | 1550 | break; |
1524 | } | 1551 | } |
1552 | |||
1525 | if (meta->skb) { | 1553 | if (meta->skb) { |
1526 | struct b43_private_tx_info *priv_info = | 1554 | struct b43_private_tx_info *priv_info = |
1527 | b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); | 1555 | b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); |
1528 | 1556 | ||
1529 | unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); | 1557 | unmap_descbuffer(ring, meta->dmaaddr, |
1558 | meta->skb->len, 1); | ||
1530 | kfree(priv_info->bouncebuffer); | 1559 | kfree(priv_info->bouncebuffer); |
1531 | priv_info->bouncebuffer = NULL; | 1560 | priv_info->bouncebuffer = NULL; |
1532 | } else { | 1561 | } else { |
@@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1538 | struct ieee80211_tx_info *info; | 1567 | struct ieee80211_tx_info *info; |
1539 | 1568 | ||
1540 | if (unlikely(!meta->skb)) { | 1569 | if (unlikely(!meta->skb)) { |
1541 | /* This is a scatter-gather fragment of a frame, so | 1570 | /* This is a scatter-gather fragment of a frame, |
1542 | * the skb pointer must not be NULL. */ | 1571 | * so the skb pointer must not be NULL. |
1572 | */ | ||
1543 | b43dbg(dev->wl, "TX status unexpected NULL skb " | 1573 | b43dbg(dev->wl, "TX status unexpected NULL skb " |
1544 | "at slot %d (first=%d) on ring %d\n", | 1574 | "at slot %d (first=%d) on ring %d\n", |
1545 | slot, firstused, ring->index); | 1575 | slot, firstused, ring->index); |
@@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1550 | 1580 | ||
1551 | /* | 1581 | /* |
1552 | * Call back to inform the ieee80211 subsystem about | 1582 | * Call back to inform the ieee80211 subsystem about |
1553 | * the status of the transmission. | 1583 | * the status of the transmission. When skipping over |
1584 | * a missed TX status report, use a status structure | ||
1585 | * filled with zeros to indicate that the frame was not | ||
1586 | * sent (frame_count 0) and not acknowledged | ||
1554 | */ | 1587 | */ |
1555 | frame_succeed = b43_fill_txstatus_report(dev, info, status); | 1588 | if (unlikely(skip)) |
1589 | txstat = &fake; | ||
1590 | else | ||
1591 | txstat = status; | ||
1592 | |||
1593 | frame_succeed = b43_fill_txstatus_report(dev, info, | ||
1594 | txstat); | ||
1556 | #ifdef CONFIG_B43_DEBUG | 1595 | #ifdef CONFIG_B43_DEBUG |
1557 | if (frame_succeed) | 1596 | if (frame_succeed) |
1558 | ring->nr_succeed_tx_packets++; | 1597 | ring->nr_succeed_tx_packets++; |
@@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1580 | /* Everything unmapped and free'd. So it's not used anymore. */ | 1619 | /* Everything unmapped and free'd. So it's not used anymore. */ |
1581 | ring->used_slots--; | 1620 | ring->used_slots--; |
1582 | 1621 | ||
1583 | if (meta->is_last_fragment) { | 1622 | if (meta->is_last_fragment && !skip) { |
1584 | /* This is the last scatter-gather | 1623 | /* This is the last scatter-gather |
1585 | * fragment of the frame. We are done. */ | 1624 | * fragment of the frame. We are done. */ |
1586 | break; | 1625 | break; |
1587 | } | 1626 | } |
1588 | slot = next_slot(ring, slot); | 1627 | slot = next_slot(ring, slot); |
1628 | if (skip > 0) | ||
1629 | --skip; | ||
1589 | } | 1630 | } |
1590 | if (ring->stopped) { | 1631 | if (ring->stopped) { |
1591 | B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); | 1632 | B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); |
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index 3c35382ee6c2..b70f220bc4b3 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c | |||
@@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) | |||
1564 | u16 clip_off[2] = { 0xFFFF, 0xFFFF }; | 1564 | u16 clip_off[2] = { 0xFFFF, 0xFFFF }; |
1565 | 1565 | ||
1566 | u8 vcm_final = 0; | 1566 | u8 vcm_final = 0; |
1567 | s8 offset[4]; | 1567 | s32 offset[4]; |
1568 | s32 results[8][4] = { }; | 1568 | s32 results[8][4] = { }; |
1569 | s32 results_min[4] = { }; | 1569 | s32 results_min[4] = { }; |
1570 | s32 poll_results[4] = { }; | 1570 | s32 poll_results[4] = { }; |
@@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) | |||
1615 | } | 1615 | } |
1616 | for (i = 0; i < 4; i += 2) { | 1616 | for (i = 0; i < 4; i += 2) { |
1617 | s32 curr; | 1617 | s32 curr; |
1618 | s32 mind = 40; | 1618 | s32 mind = 0x100000; |
1619 | s32 minpoll = 249; | 1619 | s32 minpoll = 249; |
1620 | u8 minvcm = 0; | 1620 | u8 minvcm = 0; |
1621 | if (2 * core != i) | 1621 | if (2 * core != i) |
@@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) | |||
1732 | u8 regs_save_radio[2]; | 1732 | u8 regs_save_radio[2]; |
1733 | u16 regs_save_phy[2]; | 1733 | u16 regs_save_phy[2]; |
1734 | 1734 | ||
1735 | s8 offset[4]; | 1735 | s32 offset[4]; |
1736 | u8 core; | 1736 | u8 core; |
1737 | u8 rail; | 1737 | u8 rail; |
1738 | 1738 | ||
@@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) | |||
1799 | } | 1799 | } |
1800 | 1800 | ||
1801 | for (i = 0; i < 4; i++) { | 1801 | for (i = 0; i < 4; i++) { |
1802 | s32 mind = 40; | 1802 | s32 mind = 0x100000; |
1803 | u8 minvcm = 0; | 1803 | u8 minvcm = 0; |
1804 | s32 minpoll = 249; | 1804 | s32 minpoll = 249; |
1805 | s32 curr; | 1805 | s32 curr; |
@@ -5165,7 +5165,8 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) | |||
5165 | #endif | 5165 | #endif |
5166 | #ifdef CONFIG_B43_SSB | 5166 | #ifdef CONFIG_B43_SSB |
5167 | case B43_BUS_SSB: | 5167 | case B43_BUS_SSB: |
5168 | /* FIXME */ | 5168 | ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco, |
5169 | avoid); | ||
5169 | break; | 5170 | break; |
5170 | #endif | 5171 | #endif |
5171 | } | 5172 | } |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 4469321c0eb3..35fc68be158d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | |||
@@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) | |||
3317 | goto err; | 3317 | goto err; |
3318 | } | 3318 | } |
3319 | 3319 | ||
3320 | /* External image takes precedence if specified */ | ||
3321 | if (brcmf_sdbrcm_download_code_file(bus)) { | 3320 | if (brcmf_sdbrcm_download_code_file(bus)) { |
3322 | brcmf_err("dongle image file download failed\n"); | 3321 | brcmf_err("dongle image file download failed\n"); |
3323 | goto err; | 3322 | goto err; |
3324 | } | 3323 | } |
3325 | 3324 | ||
3326 | /* External nvram takes precedence if specified */ | 3325 | if (brcmf_sdbrcm_download_nvram(bus)) { |
3327 | if (brcmf_sdbrcm_download_nvram(bus)) | ||
3328 | brcmf_err("dongle nvram file download failed\n"); | 3326 | brcmf_err("dongle nvram file download failed\n"); |
3327 | goto err; | ||
3328 | } | ||
3329 | 3329 | ||
3330 | /* Take arm out of reset */ | 3330 | /* Take arm out of reset */ |
3331 | if (brcmf_sdbrcm_download_state(bus, false)) { | 3331 | if (brcmf_sdbrcm_download_state(bus, false)) { |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 2af9c0f0798d..78da3eff75e8 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c | |||
@@ -1891,8 +1891,10 @@ static s32 | |||
1891 | brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, | 1891 | brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, |
1892 | u8 key_idx, const u8 *mac_addr, struct key_params *params) | 1892 | u8 key_idx, const u8 *mac_addr, struct key_params *params) |
1893 | { | 1893 | { |
1894 | struct brcmf_if *ifp = netdev_priv(ndev); | ||
1894 | struct brcmf_wsec_key key; | 1895 | struct brcmf_wsec_key key; |
1895 | s32 err = 0; | 1896 | s32 err = 0; |
1897 | u8 keybuf[8]; | ||
1896 | 1898 | ||
1897 | memset(&key, 0, sizeof(key)); | 1899 | memset(&key, 0, sizeof(key)); |
1898 | key.index = (u32) key_idx; | 1900 | key.index = (u32) key_idx; |
@@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, | |||
1916 | brcmf_dbg(CONN, "Setting the key index %d\n", key.index); | 1918 | brcmf_dbg(CONN, "Setting the key index %d\n", key.index); |
1917 | memcpy(key.data, params->key, key.len); | 1919 | memcpy(key.data, params->key, key.len); |
1918 | 1920 | ||
1919 | if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { | 1921 | if ((ifp->vif->mode != WL_MODE_AP) && |
1920 | u8 keybuf[8]; | 1922 | (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { |
1923 | brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); | ||
1921 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); | 1924 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); |
1922 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); | 1925 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); |
1923 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); | 1926 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); |
@@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, | |||
2013 | break; | 2016 | break; |
2014 | case WLAN_CIPHER_SUITE_TKIP: | 2017 | case WLAN_CIPHER_SUITE_TKIP: |
2015 | if (ifp->vif->mode != WL_MODE_AP) { | 2018 | if (ifp->vif->mode != WL_MODE_AP) { |
2016 | brcmf_dbg(CONN, "Swapping key\n"); | 2019 | brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); |
2017 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); | 2020 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); |
2018 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); | 2021 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); |
2019 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); | 2022 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); |
@@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, | |||
2118 | err = -EAGAIN; | 2121 | err = -EAGAIN; |
2119 | goto done; | 2122 | goto done; |
2120 | } | 2123 | } |
2121 | switch (wsec & ~SES_OW_ENABLED) { | 2124 | if (wsec & WEP_ENABLED) { |
2122 | case WEP_ENABLED: | ||
2123 | sec = &profile->sec; | 2125 | sec = &profile->sec; |
2124 | if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { | 2126 | if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { |
2125 | params.cipher = WLAN_CIPHER_SUITE_WEP40; | 2127 | params.cipher = WLAN_CIPHER_SUITE_WEP40; |
@@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, | |||
2128 | params.cipher = WLAN_CIPHER_SUITE_WEP104; | 2130 | params.cipher = WLAN_CIPHER_SUITE_WEP104; |
2129 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); | 2131 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); |
2130 | } | 2132 | } |
2131 | break; | 2133 | } else if (wsec & TKIP_ENABLED) { |
2132 | case TKIP_ENABLED: | ||
2133 | params.cipher = WLAN_CIPHER_SUITE_TKIP; | 2134 | params.cipher = WLAN_CIPHER_SUITE_TKIP; |
2134 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); | 2135 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); |
2135 | break; | 2136 | } else if (wsec & AES_ENABLED) { |
2136 | case AES_ENABLED: | ||
2137 | params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; | 2137 | params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; |
2138 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); | 2138 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); |
2139 | break; | 2139 | } else { |
2140 | default: | ||
2141 | brcmf_err("Invalid algo (0x%x)\n", wsec); | 2140 | brcmf_err("Invalid algo (0x%x)\n", wsec); |
2142 | err = -EINVAL; | 2141 | err = -EINVAL; |
2143 | goto done; | 2142 | goto done; |
@@ -3824,8 +3823,9 @@ exit: | |||
3824 | static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) | 3823 | static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) |
3825 | { | 3824 | { |
3826 | struct brcmf_if *ifp = netdev_priv(ndev); | 3825 | struct brcmf_if *ifp = netdev_priv(ndev); |
3827 | s32 err = -EPERM; | 3826 | s32 err; |
3828 | struct brcmf_fil_bss_enable_le bss_enable; | 3827 | struct brcmf_fil_bss_enable_le bss_enable; |
3828 | struct brcmf_join_params join_params; | ||
3829 | 3829 | ||
3830 | brcmf_dbg(TRACE, "Enter\n"); | 3830 | brcmf_dbg(TRACE, "Enter\n"); |
3831 | 3831 | ||
@@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) | |||
3833 | /* Due to most likely deauths outstanding we sleep */ | 3833 | /* Due to most likely deauths outstanding we sleep */ |
3834 | /* first to make sure they get processed by fw. */ | 3834 | /* first to make sure they get processed by fw. */ |
3835 | msleep(400); | 3835 | msleep(400); |
3836 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); | 3836 | |
3837 | if (err < 0) { | 3837 | memset(&join_params, 0, sizeof(join_params)); |
3838 | brcmf_err("setting AP mode failed %d\n", err); | 3838 | err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, |
3839 | goto exit; | 3839 | &join_params, sizeof(join_params)); |
3840 | } | 3840 | if (err < 0) |
3841 | brcmf_err("SET SSID error (%d)\n", err); | ||
3841 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); | 3842 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); |
3842 | if (err < 0) { | 3843 | if (err < 0) |
3843 | brcmf_err("BRCMF_C_UP error %d\n", err); | 3844 | brcmf_err("BRCMF_C_UP error %d\n", err); |
3844 | goto exit; | 3845 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); |
3845 | } | 3846 | if (err < 0) |
3847 | brcmf_err("setting AP mode failed %d\n", err); | ||
3848 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0); | ||
3849 | if (err < 0) | ||
3850 | brcmf_err("setting INFRA mode failed %d\n", err); | ||
3846 | } else { | 3851 | } else { |
3847 | bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); | 3852 | bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); |
3848 | bss_enable.enable = cpu_to_le32(0); | 3853 | bss_enable.enable = cpu_to_le32(0); |
@@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) | |||
3855 | set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); | 3860 | set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); |
3856 | clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); | 3861 | clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); |
3857 | 3862 | ||
3858 | exit: | ||
3859 | return err; | 3863 | return err; |
3860 | } | 3864 | } |
3861 | 3865 | ||
@@ -4124,10 +4128,6 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = { | |||
4124 | }, | 4128 | }, |
4125 | { | 4129 | { |
4126 | .max = 1, | 4130 | .max = 1, |
4127 | .types = BIT(NL80211_IFTYPE_P2P_DEVICE) | ||
4128 | }, | ||
4129 | { | ||
4130 | .max = 1, | ||
4131 | .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | | 4131 | .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | |
4132 | BIT(NL80211_IFTYPE_P2P_GO) | 4132 | BIT(NL80211_IFTYPE_P2P_GO) |
4133 | }, | 4133 | }, |
@@ -4183,8 +4183,7 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) | |||
4183 | BIT(NL80211_IFTYPE_ADHOC) | | 4183 | BIT(NL80211_IFTYPE_ADHOC) | |
4184 | BIT(NL80211_IFTYPE_AP) | | 4184 | BIT(NL80211_IFTYPE_AP) | |
4185 | BIT(NL80211_IFTYPE_P2P_CLIENT) | | 4185 | BIT(NL80211_IFTYPE_P2P_CLIENT) | |
4186 | BIT(NL80211_IFTYPE_P2P_GO) | | 4186 | BIT(NL80211_IFTYPE_P2P_GO); |
4187 | BIT(NL80211_IFTYPE_P2P_DEVICE); | ||
4188 | wiphy->iface_combinations = brcmf_iface_combos; | 4187 | wiphy->iface_combinations = brcmf_iface_combos; |
4189 | wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); | 4188 | wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); |
4190 | wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; | 4189 | wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index c6451c61407a..e2340b231aa1 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c | |||
@@ -274,6 +274,130 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br) | |||
274 | } | 274 | } |
275 | } | 275 | } |
276 | 276 | ||
277 | /** | ||
278 | * This function frees the WL per-device resources. | ||
279 | * | ||
280 | * This function frees resources owned by the WL device pointed to | ||
281 | * by the wl parameter. | ||
282 | * | ||
283 | * precondition: can both be called locked and unlocked | ||
284 | * | ||
285 | */ | ||
286 | static void brcms_free(struct brcms_info *wl) | ||
287 | { | ||
288 | struct brcms_timer *t, *next; | ||
289 | |||
290 | /* free ucode data */ | ||
291 | if (wl->fw.fw_cnt) | ||
292 | brcms_ucode_data_free(&wl->ucode); | ||
293 | if (wl->irq) | ||
294 | free_irq(wl->irq, wl); | ||
295 | |||
296 | /* kill dpc */ | ||
297 | tasklet_kill(&wl->tasklet); | ||
298 | |||
299 | if (wl->pub) { | ||
300 | brcms_debugfs_detach(wl->pub); | ||
301 | brcms_c_module_unregister(wl->pub, "linux", wl); | ||
302 | } | ||
303 | |||
304 | /* free common resources */ | ||
305 | if (wl->wlc) { | ||
306 | brcms_c_detach(wl->wlc); | ||
307 | wl->wlc = NULL; | ||
308 | wl->pub = NULL; | ||
309 | } | ||
310 | |||
311 | /* virtual interface deletion is deferred so we cannot spinwait */ | ||
312 | |||
313 | /* wait for all pending callbacks to complete */ | ||
314 | while (atomic_read(&wl->callbacks) > 0) | ||
315 | schedule(); | ||
316 | |||
317 | /* free timers */ | ||
318 | for (t = wl->timers; t; t = next) { | ||
319 | next = t->next; | ||
320 | #ifdef DEBUG | ||
321 | kfree(t->name); | ||
322 | #endif | ||
323 | kfree(t); | ||
324 | } | ||
325 | } | ||
326 | |||
327 | /* | ||
328 | * called from both kernel as from this kernel module (error flow on attach) | ||
329 | * precondition: perimeter lock is not acquired. | ||
330 | */ | ||
331 | static void brcms_remove(struct bcma_device *pdev) | ||
332 | { | ||
333 | struct ieee80211_hw *hw = bcma_get_drvdata(pdev); | ||
334 | struct brcms_info *wl = hw->priv; | ||
335 | |||
336 | if (wl->wlc) { | ||
337 | wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); | ||
338 | wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); | ||
339 | ieee80211_unregister_hw(hw); | ||
340 | } | ||
341 | |||
342 | brcms_free(wl); | ||
343 | |||
344 | bcma_set_drvdata(pdev, NULL); | ||
345 | ieee80211_free_hw(hw); | ||
346 | } | ||
347 | |||
348 | /* | ||
349 | * Precondition: Since this function is called in brcms_pci_probe() context, | ||
350 | * no locking is required. | ||
351 | */ | ||
352 | static void brcms_release_fw(struct brcms_info *wl) | ||
353 | { | ||
354 | int i; | ||
355 | for (i = 0; i < MAX_FW_IMAGES; i++) { | ||
356 | release_firmware(wl->fw.fw_bin[i]); | ||
357 | release_firmware(wl->fw.fw_hdr[i]); | ||
358 | } | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * Precondition: Since this function is called in brcms_pci_probe() context, | ||
363 | * no locking is required. | ||
364 | */ | ||
365 | static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) | ||
366 | { | ||
367 | int status; | ||
368 | struct device *device = &pdev->dev; | ||
369 | char fw_name[100]; | ||
370 | int i; | ||
371 | |||
372 | memset(&wl->fw, 0, sizeof(struct brcms_firmware)); | ||
373 | for (i = 0; i < MAX_FW_IMAGES; i++) { | ||
374 | if (brcms_firmwares[i] == NULL) | ||
375 | break; | ||
376 | sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], | ||
377 | UCODE_LOADER_API_VER); | ||
378 | status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); | ||
379 | if (status) { | ||
380 | wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", | ||
381 | KBUILD_MODNAME, fw_name); | ||
382 | return status; | ||
383 | } | ||
384 | sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], | ||
385 | UCODE_LOADER_API_VER); | ||
386 | status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); | ||
387 | if (status) { | ||
388 | wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", | ||
389 | KBUILD_MODNAME, fw_name); | ||
390 | return status; | ||
391 | } | ||
392 | wl->fw.hdr_num_entries[i] = | ||
393 | wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); | ||
394 | } | ||
395 | wl->fw.fw_cnt = i; | ||
396 | status = brcms_ucode_data_init(wl, &wl->ucode); | ||
397 | brcms_release_fw(wl); | ||
398 | return status; | ||
399 | } | ||
400 | |||
277 | static void brcms_ops_tx(struct ieee80211_hw *hw, | 401 | static void brcms_ops_tx(struct ieee80211_hw *hw, |
278 | struct ieee80211_tx_control *control, | 402 | struct ieee80211_tx_control *control, |
279 | struct sk_buff *skb) | 403 | struct sk_buff *skb) |
@@ -306,6 +430,14 @@ static int brcms_ops_start(struct ieee80211_hw *hw) | |||
306 | if (!blocked) | 430 | if (!blocked) |
307 | wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); | 431 | wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); |
308 | 432 | ||
433 | if (!wl->ucode.bcm43xx_bomminor) { | ||
434 | err = brcms_request_fw(wl, wl->wlc->hw->d11core); | ||
435 | if (err) { | ||
436 | brcms_remove(wl->wlc->hw->d11core); | ||
437 | return -ENOENT; | ||
438 | } | ||
439 | } | ||
440 | |||
309 | spin_lock_bh(&wl->lock); | 441 | spin_lock_bh(&wl->lock); |
310 | /* avoid acknowledging frames before a non-monitor device is added */ | 442 | /* avoid acknowledging frames before a non-monitor device is added */ |
311 | wl->mute_tx = true; | 443 | wl->mute_tx = true; |
@@ -793,128 +925,6 @@ void brcms_dpc(unsigned long data) | |||
793 | wake_up(&wl->tx_flush_wq); | 925 | wake_up(&wl->tx_flush_wq); |
794 | } | 926 | } |
795 | 927 | ||
796 | /* | ||
797 | * Precondition: Since this function is called in brcms_pci_probe() context, | ||
798 | * no locking is required. | ||
799 | */ | ||
800 | static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev) | ||
801 | { | ||
802 | int status; | ||
803 | struct device *device = &pdev->dev; | ||
804 | char fw_name[100]; | ||
805 | int i; | ||
806 | |||
807 | memset(&wl->fw, 0, sizeof(struct brcms_firmware)); | ||
808 | for (i = 0; i < MAX_FW_IMAGES; i++) { | ||
809 | if (brcms_firmwares[i] == NULL) | ||
810 | break; | ||
811 | sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i], | ||
812 | UCODE_LOADER_API_VER); | ||
813 | status = request_firmware(&wl->fw.fw_bin[i], fw_name, device); | ||
814 | if (status) { | ||
815 | wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", | ||
816 | KBUILD_MODNAME, fw_name); | ||
817 | return status; | ||
818 | } | ||
819 | sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i], | ||
820 | UCODE_LOADER_API_VER); | ||
821 | status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device); | ||
822 | if (status) { | ||
823 | wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n", | ||
824 | KBUILD_MODNAME, fw_name); | ||
825 | return status; | ||
826 | } | ||
827 | wl->fw.hdr_num_entries[i] = | ||
828 | wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr)); | ||
829 | } | ||
830 | wl->fw.fw_cnt = i; | ||
831 | return brcms_ucode_data_init(wl, &wl->ucode); | ||
832 | } | ||
833 | |||
834 | /* | ||
835 | * Precondition: Since this function is called in brcms_pci_probe() context, | ||
836 | * no locking is required. | ||
837 | */ | ||
838 | static void brcms_release_fw(struct brcms_info *wl) | ||
839 | { | ||
840 | int i; | ||
841 | for (i = 0; i < MAX_FW_IMAGES; i++) { | ||
842 | release_firmware(wl->fw.fw_bin[i]); | ||
843 | release_firmware(wl->fw.fw_hdr[i]); | ||
844 | } | ||
845 | } | ||
846 | |||
847 | /** | ||
848 | * This function frees the WL per-device resources. | ||
849 | * | ||
850 | * This function frees resources owned by the WL device pointed to | ||
851 | * by the wl parameter. | ||
852 | * | ||
853 | * precondition: can both be called locked and unlocked | ||
854 | * | ||
855 | */ | ||
856 | static void brcms_free(struct brcms_info *wl) | ||
857 | { | ||
858 | struct brcms_timer *t, *next; | ||
859 | |||
860 | /* free ucode data */ | ||
861 | if (wl->fw.fw_cnt) | ||
862 | brcms_ucode_data_free(&wl->ucode); | ||
863 | if (wl->irq) | ||
864 | free_irq(wl->irq, wl); | ||
865 | |||
866 | /* kill dpc */ | ||
867 | tasklet_kill(&wl->tasklet); | ||
868 | |||
869 | if (wl->pub) { | ||
870 | brcms_debugfs_detach(wl->pub); | ||
871 | brcms_c_module_unregister(wl->pub, "linux", wl); | ||
872 | } | ||
873 | |||
874 | /* free common resources */ | ||
875 | if (wl->wlc) { | ||
876 | brcms_c_detach(wl->wlc); | ||
877 | wl->wlc = NULL; | ||
878 | wl->pub = NULL; | ||
879 | } | ||
880 | |||
881 | /* virtual interface deletion is deferred so we cannot spinwait */ | ||
882 | |||
883 | /* wait for all pending callbacks to complete */ | ||
884 | while (atomic_read(&wl->callbacks) > 0) | ||
885 | schedule(); | ||
886 | |||
887 | /* free timers */ | ||
888 | for (t = wl->timers; t; t = next) { | ||
889 | next = t->next; | ||
890 | #ifdef DEBUG | ||
891 | kfree(t->name); | ||
892 | #endif | ||
893 | kfree(t); | ||
894 | } | ||
895 | } | ||
896 | |||
897 | /* | ||
898 | * called from both kernel as from this kernel module (error flow on attach) | ||
899 | * precondition: perimeter lock is not acquired. | ||
900 | */ | ||
901 | static void brcms_remove(struct bcma_device *pdev) | ||
902 | { | ||
903 | struct ieee80211_hw *hw = bcma_get_drvdata(pdev); | ||
904 | struct brcms_info *wl = hw->priv; | ||
905 | |||
906 | if (wl->wlc) { | ||
907 | wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); | ||
908 | wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); | ||
909 | ieee80211_unregister_hw(hw); | ||
910 | } | ||
911 | |||
912 | brcms_free(wl); | ||
913 | |||
914 | bcma_set_drvdata(pdev, NULL); | ||
915 | ieee80211_free_hw(hw); | ||
916 | } | ||
917 | |||
918 | static irqreturn_t brcms_isr(int irq, void *dev_id) | 928 | static irqreturn_t brcms_isr(int irq, void *dev_id) |
919 | { | 929 | { |
920 | struct brcms_info *wl; | 930 | struct brcms_info *wl; |
@@ -1047,18 +1057,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) | |||
1047 | spin_lock_init(&wl->lock); | 1057 | spin_lock_init(&wl->lock); |
1048 | spin_lock_init(&wl->isr_lock); | 1058 | spin_lock_init(&wl->isr_lock); |
1049 | 1059 | ||
1050 | /* prepare ucode */ | ||
1051 | if (brcms_request_fw(wl, pdev) < 0) { | ||
1052 | wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in " | ||
1053 | "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm"); | ||
1054 | brcms_release_fw(wl); | ||
1055 | brcms_remove(pdev); | ||
1056 | return NULL; | ||
1057 | } | ||
1058 | |||
1059 | /* common load-time initialization */ | 1060 | /* common load-time initialization */ |
1060 | wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); | 1061 | wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err); |
1061 | brcms_release_fw(wl); | ||
1062 | if (!wl->wlc) { | 1062 | if (!wl->wlc) { |
1063 | wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", | 1063 | wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n", |
1064 | KBUILD_MODNAME, err); | 1064 | KBUILD_MODNAME, err); |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 21a824232478..18d37645e2cd 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c | |||
@@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, | |||
1137 | gain0_15 = ((biq1 & 0xf) << 12) | | 1137 | gain0_15 = ((biq1 & 0xf) << 12) | |
1138 | ((tia & 0xf) << 8) | | 1138 | ((tia & 0xf) << 8) | |
1139 | ((lna2 & 0x3) << 6) | | 1139 | ((lna2 & 0x3) << 6) | |
1140 | ((lna2 & 0x3) << 4) | | 1140 | ((lna2 & |
1141 | ((lna1 & 0x3) << 2) | | 1141 | 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); |
1142 | ((lna1 & 0x3) << 0); | ||
1143 | 1142 | ||
1144 | mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); | 1143 | mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); |
1145 | mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); | 1144 | mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); |
@@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, | |||
1157 | } | 1156 | } |
1158 | 1157 | ||
1159 | mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); | 1158 | mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); |
1160 | mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11); | ||
1161 | mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3); | ||
1162 | 1159 | ||
1163 | } | 1160 | } |
1164 | 1161 | ||
@@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples) | |||
1331 | return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; | 1328 | return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; |
1332 | } | 1329 | } |
1333 | 1330 | ||
1334 | static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain, | ||
1335 | u16 tia_gain, u16 lna2_gain) | ||
1336 | { | ||
1337 | u32 i_thresh_l, q_thresh_l; | ||
1338 | u32 i_thresh_h, q_thresh_h; | ||
1339 | struct lcnphy_iq_est iq_est_h, iq_est_l; | ||
1340 | |||
1341 | wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain, | ||
1342 | lna2_gain, 0); | ||
1343 | |||
1344 | wlc_lcnphy_rx_gain_override_enable(pi, true); | ||
1345 | wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0); | ||
1346 | udelay(500); | ||
1347 | write_radio_reg(pi, RADIO_2064_REG112, 0); | ||
1348 | if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l)) | ||
1349 | return false; | ||
1350 | |||
1351 | wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0); | ||
1352 | udelay(500); | ||
1353 | write_radio_reg(pi, RADIO_2064_REG112, 0); | ||
1354 | if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h)) | ||
1355 | return false; | ||
1356 | |||
1357 | i_thresh_l = (iq_est_l.i_pwr << 1); | ||
1358 | i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr; | ||
1359 | |||
1360 | q_thresh_l = (iq_est_l.q_pwr << 1); | ||
1361 | q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr; | ||
1362 | if ((iq_est_h.i_pwr > i_thresh_l) && | ||
1363 | (iq_est_h.i_pwr < i_thresh_h) && | ||
1364 | (iq_est_h.q_pwr > q_thresh_l) && | ||
1365 | (iq_est_h.q_pwr < q_thresh_h)) | ||
1366 | return true; | ||
1367 | |||
1368 | return false; | ||
1369 | } | ||
1370 | |||
1371 | static bool | 1331 | static bool |
1372 | wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, | 1332 | wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, |
1373 | const struct lcnphy_rx_iqcomp *iqcomp, | 1333 | const struct lcnphy_rx_iqcomp *iqcomp, |
@@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, | |||
1382 | RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, | 1342 | RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, |
1383 | rfoverride3_old, rfoverride3val_old, rfoverride4_old, | 1343 | rfoverride3_old, rfoverride3val_old, rfoverride4_old, |
1384 | rfoverride4val_old, afectrlovr_old, afectrlovrval_old; | 1344 | rfoverride4val_old, afectrlovr_old, afectrlovrval_old; |
1385 | int tia_gain, lna2_gain, biq1_gain; | 1345 | int tia_gain; |
1386 | bool set_gain; | 1346 | u32 received_power, rx_pwr_threshold; |
1387 | u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; | 1347 | u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; |
1388 | u16 values_to_save[11]; | 1348 | u16 values_to_save[11]; |
1389 | s16 *ptr; | 1349 | s16 *ptr; |
@@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, | |||
1408 | goto cal_done; | 1368 | goto cal_done; |
1409 | } | 1369 | } |
1410 | 1370 | ||
1411 | WARN_ON(module != 1); | 1371 | if (module == 1) { |
1412 | tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); | ||
1413 | wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); | ||
1414 | |||
1415 | for (i = 0; i < 11; i++) | ||
1416 | values_to_save[i] = | ||
1417 | read_radio_reg(pi, rxiq_cal_rf_reg[i]); | ||
1418 | Core1TxControl_old = read_phy_reg(pi, 0x631); | ||
1419 | |||
1420 | or_phy_reg(pi, 0x631, 0x0015); | ||
1421 | |||
1422 | RFOverride0_old = read_phy_reg(pi, 0x44c); | ||
1423 | RFOverrideVal0_old = read_phy_reg(pi, 0x44d); | ||
1424 | rfoverride2_old = read_phy_reg(pi, 0x4b0); | ||
1425 | rfoverride2val_old = read_phy_reg(pi, 0x4b1); | ||
1426 | rfoverride3_old = read_phy_reg(pi, 0x4f9); | ||
1427 | rfoverride3val_old = read_phy_reg(pi, 0x4fa); | ||
1428 | rfoverride4_old = read_phy_reg(pi, 0x938); | ||
1429 | rfoverride4val_old = read_phy_reg(pi, 0x939); | ||
1430 | afectrlovr_old = read_phy_reg(pi, 0x43b); | ||
1431 | afectrlovrval_old = read_phy_reg(pi, 0x43c); | ||
1432 | old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); | ||
1433 | old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); | ||
1434 | |||
1435 | tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); | ||
1436 | if (tx_gain_override_old) { | ||
1437 | wlc_lcnphy_get_tx_gain(pi, &old_gains); | ||
1438 | tx_gain_index_old = pi_lcn->lcnphy_current_index; | ||
1439 | } | ||
1440 | |||
1441 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); | ||
1442 | 1372 | ||
1443 | mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); | 1373 | tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); |
1444 | mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); | 1374 | wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); |
1445 | 1375 | ||
1446 | mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); | 1376 | for (i = 0; i < 11; i++) |
1447 | mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); | 1377 | values_to_save[i] = |
1378 | read_radio_reg(pi, rxiq_cal_rf_reg[i]); | ||
1379 | Core1TxControl_old = read_phy_reg(pi, 0x631); | ||
1380 | |||
1381 | or_phy_reg(pi, 0x631, 0x0015); | ||
1382 | |||
1383 | RFOverride0_old = read_phy_reg(pi, 0x44c); | ||
1384 | RFOverrideVal0_old = read_phy_reg(pi, 0x44d); | ||
1385 | rfoverride2_old = read_phy_reg(pi, 0x4b0); | ||
1386 | rfoverride2val_old = read_phy_reg(pi, 0x4b1); | ||
1387 | rfoverride3_old = read_phy_reg(pi, 0x4f9); | ||
1388 | rfoverride3val_old = read_phy_reg(pi, 0x4fa); | ||
1389 | rfoverride4_old = read_phy_reg(pi, 0x938); | ||
1390 | rfoverride4val_old = read_phy_reg(pi, 0x939); | ||
1391 | afectrlovr_old = read_phy_reg(pi, 0x43b); | ||
1392 | afectrlovrval_old = read_phy_reg(pi, 0x43c); | ||
1393 | old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); | ||
1394 | old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); | ||
1395 | |||
1396 | tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); | ||
1397 | if (tx_gain_override_old) { | ||
1398 | wlc_lcnphy_get_tx_gain(pi, &old_gains); | ||
1399 | tx_gain_index_old = pi_lcn->lcnphy_current_index; | ||
1400 | } | ||
1448 | 1401 | ||
1449 | write_radio_reg(pi, RADIO_2064_REG116, 0x06); | 1402 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); |
1450 | write_radio_reg(pi, RADIO_2064_REG12C, 0x07); | ||
1451 | write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); | ||
1452 | write_radio_reg(pi, RADIO_2064_REG098, 0x03); | ||
1453 | write_radio_reg(pi, RADIO_2064_REG00B, 0x7); | ||
1454 | mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); | ||
1455 | write_radio_reg(pi, RADIO_2064_REG01D, 0x01); | ||
1456 | write_radio_reg(pi, RADIO_2064_REG114, 0x01); | ||
1457 | write_radio_reg(pi, RADIO_2064_REG02E, 0x10); | ||
1458 | write_radio_reg(pi, RADIO_2064_REG12A, 0x08); | ||
1459 | |||
1460 | mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); | ||
1461 | mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); | ||
1462 | mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); | ||
1463 | mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); | ||
1464 | mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); | ||
1465 | mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); | ||
1466 | mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); | ||
1467 | mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); | ||
1468 | mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); | ||
1469 | mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); | ||
1470 | 1403 | ||
1471 | mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); | 1404 | mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); |
1472 | mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); | 1405 | mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); |
1473 | 1406 | ||
1474 | write_phy_reg(pi, 0x6da, 0xffff); | 1407 | mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); |
1475 | or_phy_reg(pi, 0x6db, 0x3); | 1408 | mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); |
1476 | 1409 | ||
1477 | wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); | 1410 | write_radio_reg(pi, RADIO_2064_REG116, 0x06); |
1478 | set_gain = false; | 1411 | write_radio_reg(pi, RADIO_2064_REG12C, 0x07); |
1479 | 1412 | write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); | |
1480 | lna2_gain = 3; | 1413 | write_radio_reg(pi, RADIO_2064_REG098, 0x03); |
1481 | while ((lna2_gain >= 0) && !set_gain) { | 1414 | write_radio_reg(pi, RADIO_2064_REG00B, 0x7); |
1482 | tia_gain = 4; | 1415 | mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); |
1483 | 1416 | write_radio_reg(pi, RADIO_2064_REG01D, 0x01); | |
1484 | while ((tia_gain >= 0) && !set_gain) { | 1417 | write_radio_reg(pi, RADIO_2064_REG114, 0x01); |
1485 | biq1_gain = 6; | 1418 | write_radio_reg(pi, RADIO_2064_REG02E, 0x10); |
1486 | 1419 | write_radio_reg(pi, RADIO_2064_REG12A, 0x08); | |
1487 | while ((biq1_gain >= 0) && !set_gain) { | 1420 | |
1488 | set_gain = wlc_lcnphy_rx_iq_cal_gain(pi, | 1421 | mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); |
1489 | (u16) | 1422 | mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); |
1490 | biq1_gain, | 1423 | mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); |
1491 | (u16) | 1424 | mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); |
1492 | tia_gain, | 1425 | mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); |
1493 | (u16) | 1426 | mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); |
1494 | lna2_gain); | 1427 | mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); |
1495 | biq1_gain -= 1; | 1428 | mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); |
1496 | } | 1429 | mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); |
1430 | mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); | ||
1431 | |||
1432 | mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); | ||
1433 | mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); | ||
1434 | |||
1435 | wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); | ||
1436 | write_phy_reg(pi, 0x6da, 0xffff); | ||
1437 | or_phy_reg(pi, 0x6db, 0x3); | ||
1438 | wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); | ||
1439 | wlc_lcnphy_rx_gain_override_enable(pi, true); | ||
1440 | |||
1441 | tia_gain = 8; | ||
1442 | rx_pwr_threshold = 950; | ||
1443 | while (tia_gain > 0) { | ||
1497 | tia_gain -= 1; | 1444 | tia_gain -= 1; |
1445 | wlc_lcnphy_set_rx_gain_by_distribution(pi, | ||
1446 | 0, 0, 2, 2, | ||
1447 | (u16) | ||
1448 | tia_gain, 1, 0); | ||
1449 | udelay(500); | ||
1450 | |||
1451 | received_power = | ||
1452 | wlc_lcnphy_measure_digital_power(pi, 2000); | ||
1453 | if (received_power < rx_pwr_threshold) | ||
1454 | break; | ||
1498 | } | 1455 | } |
1499 | lna2_gain -= 1; | 1456 | result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); |
1500 | } | ||
1501 | 1457 | ||
1502 | if (set_gain) | 1458 | wlc_lcnphy_stop_tx_tone(pi); |
1503 | result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024); | ||
1504 | else | ||
1505 | result = false; | ||
1506 | 1459 | ||
1507 | wlc_lcnphy_stop_tx_tone(pi); | 1460 | write_phy_reg(pi, 0x631, Core1TxControl_old); |
1508 | 1461 | ||
1509 | write_phy_reg(pi, 0x631, Core1TxControl_old); | 1462 | write_phy_reg(pi, 0x44c, RFOverrideVal0_old); |
1510 | 1463 | write_phy_reg(pi, 0x44d, RFOverrideVal0_old); | |
1511 | write_phy_reg(pi, 0x44c, RFOverrideVal0_old); | 1464 | write_phy_reg(pi, 0x4b0, rfoverride2_old); |
1512 | write_phy_reg(pi, 0x44d, RFOverrideVal0_old); | 1465 | write_phy_reg(pi, 0x4b1, rfoverride2val_old); |
1513 | write_phy_reg(pi, 0x4b0, rfoverride2_old); | 1466 | write_phy_reg(pi, 0x4f9, rfoverride3_old); |
1514 | write_phy_reg(pi, 0x4b1, rfoverride2val_old); | 1467 | write_phy_reg(pi, 0x4fa, rfoverride3val_old); |
1515 | write_phy_reg(pi, 0x4f9, rfoverride3_old); | 1468 | write_phy_reg(pi, 0x938, rfoverride4_old); |
1516 | write_phy_reg(pi, 0x4fa, rfoverride3val_old); | 1469 | write_phy_reg(pi, 0x939, rfoverride4val_old); |
1517 | write_phy_reg(pi, 0x938, rfoverride4_old); | 1470 | write_phy_reg(pi, 0x43b, afectrlovr_old); |
1518 | write_phy_reg(pi, 0x939, rfoverride4val_old); | 1471 | write_phy_reg(pi, 0x43c, afectrlovrval_old); |
1519 | write_phy_reg(pi, 0x43b, afectrlovr_old); | 1472 | write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); |
1520 | write_phy_reg(pi, 0x43c, afectrlovrval_old); | 1473 | write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); |
1521 | write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); | ||
1522 | write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); | ||
1523 | 1474 | ||
1524 | wlc_lcnphy_clear_trsw_override(pi); | 1475 | wlc_lcnphy_clear_trsw_override(pi); |
1525 | 1476 | ||
1526 | mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); | 1477 | mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); |
1527 | 1478 | ||
1528 | for (i = 0; i < 11; i++) | 1479 | for (i = 0; i < 11; i++) |
1529 | write_radio_reg(pi, rxiq_cal_rf_reg[i], | 1480 | write_radio_reg(pi, rxiq_cal_rf_reg[i], |
1530 | values_to_save[i]); | 1481 | values_to_save[i]); |
1531 | 1482 | ||
1532 | if (tx_gain_override_old) | 1483 | if (tx_gain_override_old) |
1533 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); | 1484 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); |
1534 | else | 1485 | else |
1535 | wlc_lcnphy_disable_tx_gain_override(pi); | 1486 | wlc_lcnphy_disable_tx_gain_override(pi); |
1536 | 1487 | ||
1537 | wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); | 1488 | wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); |
1538 | wlc_lcnphy_rx_gain_override_enable(pi, false); | 1489 | wlc_lcnphy_rx_gain_override_enable(pi, false); |
1490 | } | ||
1539 | 1491 | ||
1540 | cal_done: | 1492 | cal_done: |
1541 | kfree(ptr); | 1493 | kfree(ptr); |
@@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel) | |||
1829 | write_radio_reg(pi, RADIO_2064_REG038, 3); | 1781 | write_radio_reg(pi, RADIO_2064_REG038, 3); |
1830 | write_radio_reg(pi, RADIO_2064_REG091, 7); | 1782 | write_radio_reg(pi, RADIO_2064_REG091, 7); |
1831 | } | 1783 | } |
1832 | |||
1833 | if (!(pi->sh->boardflags & BFL_FEM)) { | ||
1834 | u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc, | ||
1835 | 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0}; | ||
1836 | |||
1837 | write_radio_reg(pi, RADIO_2064_REG02A, 0xf); | ||
1838 | write_radio_reg(pi, RADIO_2064_REG091, 0x3); | ||
1839 | write_radio_reg(pi, RADIO_2064_REG038, 0x3); | ||
1840 | |||
1841 | write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]); | ||
1842 | } | ||
1843 | } | 1784 | } |
1844 | 1785 | ||
1845 | static int | 1786 | static int |
@@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos) | |||
2034 | } else { | 1975 | } else { |
2035 | mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); | 1976 | mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); |
2036 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); | 1977 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); |
2037 | mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0); | ||
2038 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2); | ||
2039 | mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0); | ||
2040 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4); | ||
2041 | mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); | ||
2042 | mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77); | ||
2043 | mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1); | ||
2044 | mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7); | ||
2045 | mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1); | ||
2046 | mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4); | ||
2047 | } | 1978 | } |
2048 | } else { | 1979 | } else { |
2049 | mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); | 1980 | mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); |
@@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi) | |||
2130 | (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); | 2061 | (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); |
2131 | 2062 | ||
2132 | mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); | 2063 | mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); |
2133 | mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0)); | ||
2134 | } | 2064 | } |
2135 | 2065 | ||
2136 | static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | 2066 | static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) |
2137 | { | 2067 | { |
2138 | struct phytbl_info tab; | 2068 | struct phytbl_info tab; |
2139 | u32 rfseq, ind; | 2069 | u32 rfseq, ind; |
2140 | u8 tssi_sel; | ||
2141 | 2070 | ||
2142 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; | 2071 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; |
2143 | tab.tbl_width = 32; | 2072 | tab.tbl_width = 32; |
@@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | |||
2159 | 2088 | ||
2160 | mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); | 2089 | mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); |
2161 | 2090 | ||
2162 | if (pi->sh->boardflags & BFL_FEM) { | 2091 | wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); |
2163 | tssi_sel = 0x1; | ||
2164 | wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); | ||
2165 | } else { | ||
2166 | tssi_sel = 0xe; | ||
2167 | wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA); | ||
2168 | } | ||
2169 | mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); | 2092 | mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); |
2170 | 2093 | ||
2171 | mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); | 2094 | mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); |
@@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | |||
2201 | mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); | 2124 | mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); |
2202 | 2125 | ||
2203 | if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { | 2126 | if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { |
2204 | mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel); | 2127 | mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); |
2205 | mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); | 2128 | mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); |
2206 | } else { | 2129 | } else { |
2207 | mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1); | ||
2208 | mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); | 2130 | mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); |
2209 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); | 2131 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); |
2210 | } | 2132 | } |
@@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | |||
2251 | 2173 | ||
2252 | mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); | 2174 | mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); |
2253 | 2175 | ||
2254 | mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0); | ||
2255 | mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); | ||
2256 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); | ||
2257 | |||
2258 | wlc_lcnphy_pwrctrl_rssiparams(pi); | 2176 | wlc_lcnphy_pwrctrl_rssiparams(pi); |
2259 | } | 2177 | } |
2260 | 2178 | ||
@@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) | |||
2873 | read_radio_reg(pi, RADIO_2064_REG007) & 1; | 2791 | read_radio_reg(pi, RADIO_2064_REG007) & 1; |
2874 | u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; | 2792 | u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; |
2875 | u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; | 2793 | u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; |
2876 | u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi); | ||
2877 | |||
2878 | idleTssi = read_phy_reg(pi, 0x4ab); | 2794 | idleTssi = read_phy_reg(pi, 0x4ab); |
2879 | suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & | 2795 | suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & |
2880 | MCTL_EN_MAC)); | 2796 | MCTL_EN_MAC)); |
@@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) | |||
2892 | mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); | 2808 | mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); |
2893 | mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); | 2809 | mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); |
2894 | wlc_lcnphy_tssi_setup(pi); | 2810 | wlc_lcnphy_tssi_setup(pi); |
2895 | |||
2896 | mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0)); | ||
2897 | mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6)); | ||
2898 | |||
2899 | wlc_lcnphy_set_bbmult(pi, 0x0); | ||
2900 | |||
2901 | wlc_phy_do_dummy_tx(pi, true, OFF); | 2811 | wlc_phy_do_dummy_tx(pi, true, OFF); |
2902 | idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) | 2812 | idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) |
2903 | >> 0); | 2813 | >> 0); |
@@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) | |||
2919 | 2829 | ||
2920 | mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); | 2830 | mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); |
2921 | 2831 | ||
2922 | wlc_lcnphy_set_bbmult(pi, SAVE_bbmult); | ||
2923 | wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); | 2832 | wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); |
2924 | wlc_lcnphy_set_tx_gain(pi, &old_gains); | 2833 | wlc_lcnphy_set_tx_gain(pi, &old_gains); |
2925 | wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); | 2834 | wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); |
@@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) | |||
3133 | wlc_lcnphy_write_table(pi, &tab); | 3042 | wlc_lcnphy_write_table(pi, &tab); |
3134 | tab.tbl_offset++; | 3043 | tab.tbl_offset++; |
3135 | } | 3044 | } |
3136 | mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0); | ||
3137 | mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0); | ||
3138 | mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8); | ||
3139 | mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4); | ||
3140 | mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2); | ||
3141 | 3045 | ||
3142 | mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); | 3046 | mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); |
3143 | 3047 | ||
@@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) | |||
3939 | target_gains.pad_gain = 21; | 3843 | target_gains.pad_gain = 21; |
3940 | target_gains.dac_gain = 0; | 3844 | target_gains.dac_gain = 0; |
3941 | wlc_lcnphy_set_tx_gain(pi, &target_gains); | 3845 | wlc_lcnphy_set_tx_gain(pi, &target_gains); |
3846 | wlc_lcnphy_set_tx_pwr_by_index(pi, 16); | ||
3942 | 3847 | ||
3943 | if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { | 3848 | if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { |
3944 | 3849 | ||
@@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) | |||
3949 | lcnphy_recal ? LCNPHY_CAL_RECAL : | 3854 | lcnphy_recal ? LCNPHY_CAL_RECAL : |
3950 | LCNPHY_CAL_FULL), false); | 3855 | LCNPHY_CAL_FULL), false); |
3951 | } else { | 3856 | } else { |
3952 | wlc_lcnphy_set_tx_pwr_by_index(pi, 16); | ||
3953 | wlc_lcnphy_tx_iqlo_soft_cal_full(pi); | 3857 | wlc_lcnphy_tx_iqlo_soft_cal_full(pi); |
3954 | } | 3858 | } |
3955 | 3859 | ||
@@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi, | |||
4374 | if (CHSPEC_IS5G(pi->radio_chanspec)) | 4278 | if (CHSPEC_IS5G(pi->radio_chanspec)) |
4375 | pa_gain = 0x70; | 4279 | pa_gain = 0x70; |
4376 | else | 4280 | else |
4377 | pa_gain = 0x60; | 4281 | pa_gain = 0x70; |
4378 | 4282 | ||
4379 | if (pi->sh->boardflags & BFL_FEM) | 4283 | if (pi->sh->boardflags & BFL_FEM) |
4380 | pa_gain = 0x10; | 4284 | pa_gain = 0x10; |
4381 | |||
4382 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; | 4285 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; |
4383 | tab.tbl_width = 32; | 4286 | tab.tbl_width = 32; |
4384 | tab.tbl_len = 1; | 4287 | tab.tbl_len = 1; |
4385 | tab.tbl_ptr = &val; | 4288 | tab.tbl_ptr = &val; |
4386 | 4289 | ||
4387 | for (j = 0; j < 128; j++) { | 4290 | for (j = 0; j < 128; j++) { |
4388 | if (pi->sh->boardflags & BFL_FEM) | 4291 | gm_gain = gain_table[j].gm; |
4389 | gm_gain = gain_table[j].gm; | ||
4390 | else | ||
4391 | gm_gain = 15; | ||
4392 | |||
4393 | val = (((u32) pa_gain << 24) | | 4292 | val = (((u32) pa_gain << 24) | |
4394 | (gain_table[j].pad << 16) | | 4293 | (gain_table[j].pad << 16) | |
4395 | (gain_table[j].pga << 8) | gm_gain); | 4294 | (gain_table[j].pga << 8) | gm_gain); |
@@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) | |||
4600 | 4499 | ||
4601 | write_phy_reg(pi, 0x4ea, 0x4688); | 4500 | write_phy_reg(pi, 0x4ea, 0x4688); |
4602 | 4501 | ||
4603 | if (pi->sh->boardflags & BFL_FEM) | 4502 | mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); |
4604 | mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); | ||
4605 | else | ||
4606 | mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0); | ||
4607 | 4503 | ||
4608 | mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); | 4504 | mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); |
4609 | 4505 | ||
@@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) | |||
4614 | wlc_lcnphy_rcal(pi); | 4510 | wlc_lcnphy_rcal(pi); |
4615 | 4511 | ||
4616 | wlc_lcnphy_rc_cal(pi); | 4512 | wlc_lcnphy_rc_cal(pi); |
4617 | |||
4618 | if (!(pi->sh->boardflags & BFL_FEM)) { | ||
4619 | write_radio_reg(pi, RADIO_2064_REG032, 0x6f); | ||
4620 | write_radio_reg(pi, RADIO_2064_REG033, 0x19); | ||
4621 | write_radio_reg(pi, RADIO_2064_REG039, 0xe); | ||
4622 | } | ||
4623 | |||
4624 | } | 4513 | } |
4625 | 4514 | ||
4626 | static void wlc_lcnphy_radio_init(struct brcms_phy *pi) | 4515 | static void wlc_lcnphy_radio_init(struct brcms_phy *pi) |
@@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) | |||
4650 | wlc_lcnphy_write_table(pi, &tab); | 4539 | wlc_lcnphy_write_table(pi, &tab); |
4651 | } | 4540 | } |
4652 | 4541 | ||
4653 | if (!(pi->sh->boardflags & BFL_FEM)) { | 4542 | tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; |
4654 | tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; | 4543 | tab.tbl_width = 16; |
4655 | tab.tbl_width = 16; | 4544 | tab.tbl_ptr = &val; |
4656 | tab.tbl_ptr = &val; | 4545 | tab.tbl_len = 1; |
4657 | tab.tbl_len = 1; | ||
4658 | 4546 | ||
4659 | val = 150; | 4547 | val = 114; |
4660 | tab.tbl_offset = 0; | 4548 | tab.tbl_offset = 0; |
4661 | wlc_lcnphy_write_table(pi, &tab); | 4549 | wlc_lcnphy_write_table(pi, &tab); |
4662 | 4550 | ||
4663 | val = 220; | 4551 | val = 130; |
4664 | tab.tbl_offset = 1; | 4552 | tab.tbl_offset = 1; |
4665 | wlc_lcnphy_write_table(pi, &tab); | 4553 | wlc_lcnphy_write_table(pi, &tab); |
4666 | } | 4554 | |
4555 | val = 6; | ||
4556 | tab.tbl_offset = 8; | ||
4557 | wlc_lcnphy_write_table(pi, &tab); | ||
4667 | 4558 | ||
4668 | if (CHSPEC_IS2G(pi->radio_chanspec)) { | 4559 | if (CHSPEC_IS2G(pi->radio_chanspec)) { |
4669 | if (pi->sh->boardflags & BFL_FEM) | 4560 | if (pi->sh->boardflags & BFL_FEM) |
@@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec) | |||
5055 | wlc_lcnphy_load_tx_iir_filter(pi, true, 3); | 4946 | wlc_lcnphy_load_tx_iir_filter(pi, true, 3); |
5056 | 4947 | ||
5057 | mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); | 4948 | mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); |
5058 | wlc_lcnphy_tssi_setup(pi); | ||
5059 | } | 4949 | } |
5060 | 4950 | ||
5061 | void wlc_phy_detach_lcnphy(struct brcms_phy *pi) | 4951 | void wlc_phy_detach_lcnphy(struct brcms_phy *pi) |
@@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) | |||
5094 | if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) | 4984 | if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) |
5095 | return false; | 4985 | return false; |
5096 | 4986 | ||
5097 | if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { | 4987 | if ((pi->sh->boardflags & BFL_FEM) && |
4988 | (LCNREV_IS(pi->pubpi.phy_rev, 1))) { | ||
5098 | if (pi_lcn->lcnphy_tempsense_option == 3) { | 4989 | if (pi_lcn->lcnphy_tempsense_option == 3) { |
5099 | pi->hwpwrctrl = true; | 4990 | pi->hwpwrctrl = true; |
5100 | pi->hwpwrctrl_capable = true; | 4991 | pi->hwpwrctrl_capable = true; |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c index b7e95acc2084..622c01ca72c5 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c | |||
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = { | |||
1992 | }; | 1992 | }; |
1993 | 1993 | ||
1994 | static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { | 1994 | static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { |
1995 | 0x0009, | ||
1996 | 0x000a, | 1995 | 0x000a, |
1997 | 0x0005, | ||
1998 | 0x0006, | ||
1999 | 0x0009, | 1996 | 0x0009, |
2000 | 0x000a, | ||
2001 | 0x0005, | ||
2002 | 0x0006, | 1997 | 0x0006, |
2003 | 0x0009, | ||
2004 | 0x000a, | ||
2005 | 0x0005, | 1998 | 0x0005, |
2006 | 0x0006, | ||
2007 | 0x0009, | ||
2008 | 0x000a, | 1999 | 0x000a, |
2009 | 0x0005, | ||
2010 | 0x0006, | ||
2011 | 0x0009, | 2000 | 0x0009, |
2012 | 0x000a, | ||
2013 | 0x0005, | ||
2014 | 0x0006, | 2001 | 0x0006, |
2015 | 0x0009, | ||
2016 | 0x000a, | ||
2017 | 0x0005, | 2002 | 0x0005, |
2018 | 0x0006, | ||
2019 | 0x0009, | ||
2020 | 0x000a, | 2003 | 0x000a, |
2021 | 0x0005, | ||
2022 | 0x0006, | ||
2023 | 0x0009, | 2004 | 0x0009, |
2024 | 0x000a, | ||
2025 | 0x0005, | ||
2026 | 0x0006, | 2005 | 0x0006, |
2027 | 0x0009, | ||
2028 | 0x000a, | ||
2029 | 0x0005, | 2006 | 0x0005, |
2030 | 0x0006, | ||
2031 | 0x0009, | ||
2032 | 0x000a, | 2007 | 0x000a, |
2033 | 0x0005, | ||
2034 | 0x0006, | ||
2035 | 0x0009, | 2008 | 0x0009, |
2036 | 0x000a, | ||
2037 | 0x0005, | ||
2038 | 0x0006, | 2009 | 0x0006, |
2039 | 0x0009, | ||
2040 | 0x000a, | ||
2041 | 0x0005, | 2010 | 0x0005, |
2042 | 0x0006, | 2011 | 0x000a, |
2043 | 0x0009, | 2012 | 0x0009, |
2013 | 0x0006, | ||
2014 | 0x0005, | ||
2044 | 0x000a, | 2015 | 0x000a, |
2016 | 0x0009, | ||
2017 | 0x0006, | ||
2045 | 0x0005, | 2018 | 0x0005, |
2019 | 0x000a, | ||
2020 | 0x0009, | ||
2046 | 0x0006, | 2021 | 0x0006, |
2022 | 0x0005, | ||
2023 | 0x000a, | ||
2047 | 0x0009, | 2024 | 0x0009, |
2025 | 0x0006, | ||
2026 | 0x0005, | ||
2048 | 0x000a, | 2027 | 0x000a, |
2028 | 0x0009, | ||
2029 | 0x0006, | ||
2049 | 0x0005, | 2030 | 0x0005, |
2031 | 0x000a, | ||
2032 | 0x0009, | ||
2050 | 0x0006, | 2033 | 0x0006, |
2034 | 0x0005, | ||
2035 | 0x000a, | ||
2051 | 0x0009, | 2036 | 0x0009, |
2037 | 0x0006, | ||
2038 | 0x0005, | ||
2052 | 0x000a, | 2039 | 0x000a, |
2040 | 0x0009, | ||
2041 | 0x0006, | ||
2053 | 0x0005, | 2042 | 0x0005, |
2043 | 0x000a, | ||
2044 | 0x0009, | ||
2054 | 0x0006, | 2045 | 0x0006, |
2046 | 0x0005, | ||
2047 | 0x000a, | ||
2055 | 0x0009, | 2048 | 0x0009, |
2049 | 0x0006, | ||
2050 | 0x0005, | ||
2056 | 0x000a, | 2051 | 0x000a, |
2052 | 0x0009, | ||
2053 | 0x0006, | ||
2057 | 0x0005, | 2054 | 0x0005, |
2055 | 0x000a, | ||
2056 | 0x0009, | ||
2058 | 0x0006, | 2057 | 0x0006, |
2058 | 0x0005, | ||
2059 | }; | 2059 | }; |
2060 | 2060 | ||
2061 | static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { | 2061 | static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { |
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index e8324b5e5bfe..6c7493c2d698 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c | |||
@@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, | |||
2152 | int rate_idx; | 2152 | int rate_idx; |
2153 | int i; | 2153 | int i; |
2154 | u32 rate; | 2154 | u32 rate; |
2155 | u8 use_green = il4965_rs_use_green(il, sta); | 2155 | u8 use_green; |
2156 | u8 active_tbl = 0; | 2156 | u8 active_tbl = 0; |
2157 | u8 valid_tx_ant; | 2157 | u8 valid_tx_ant; |
2158 | struct il_station_priv *sta_priv; | 2158 | struct il_station_priv *sta_priv; |
@@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, | |||
2160 | if (!sta || !lq_sta) | 2160 | if (!sta || !lq_sta) |
2161 | return; | 2161 | return; |
2162 | 2162 | ||
2163 | use_green = il4965_rs_use_green(il, sta); | ||
2163 | sta_priv = (void *)sta->drv_priv; | 2164 | sta_priv = (void *)sta->drv_priv; |
2164 | 2165 | ||
2165 | i = lq_sta->last_txrate_idx; | 2166 | i = lq_sta->last_txrate_idx; |
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 86ea5f4c3939..44ca0e57f9f7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c | |||
@@ -1262,6 +1262,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | /* | 1264 | /* |
1265 | * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag | ||
1266 | * in iwl_down but cancel the workers only later. | ||
1267 | */ | ||
1268 | if (!priv->ucode_loaded) { | ||
1269 | IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id); | ||
1270 | return -EIO; | ||
1271 | } | ||
1272 | |||
1273 | /* | ||
1265 | * Synchronous commands from this op-mode must hold | 1274 | * Synchronous commands from this op-mode must hold |
1266 | * the mutex, this ensures we don't try to send two | 1275 | * the mutex, this ensures we don't try to send two |
1267 | * (or more) synchronous commands at a time. | 1276 | * (or more) synchronous commands at a time. |
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 23be948cf162..a82b6b39d4ff 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c | |||
@@ -1419,6 +1419,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, | |||
1419 | 1419 | ||
1420 | mutex_lock(&priv->mutex); | 1420 | mutex_lock(&priv->mutex); |
1421 | 1421 | ||
1422 | if (changes & BSS_CHANGED_IDLE && bss_conf->idle) { | ||
1423 | /* | ||
1424 | * If we go idle, then clearly no "passive-no-rx" | ||
1425 | * workaround is needed any more, this is a reset. | ||
1426 | */ | ||
1427 | iwlagn_lift_passive_no_rx(priv); | ||
1428 | } | ||
1429 | |||
1422 | if (unlikely(!iwl_is_ready(priv))) { | 1430 | if (unlikely(!iwl_is_ready(priv))) { |
1423 | IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); | 1431 | IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); |
1424 | mutex_unlock(&priv->mutex); | 1432 | mutex_unlock(&priv->mutex); |
@@ -1450,16 +1458,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, | |||
1450 | priv->timestamp = bss_conf->sync_tsf; | 1458 | priv->timestamp = bss_conf->sync_tsf; |
1451 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; | 1459 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; |
1452 | } else { | 1460 | } else { |
1453 | /* | ||
1454 | * If we disassociate while there are pending | ||
1455 | * frames, just wake up the queues and let the | ||
1456 | * frames "escape" ... This shouldn't really | ||
1457 | * be happening to start with, but we should | ||
1458 | * not get stuck in this case either since it | ||
1459 | * can happen if userspace gets confused. | ||
1460 | */ | ||
1461 | iwlagn_lift_passive_no_rx(priv); | ||
1462 | |||
1463 | ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 1461 | ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
1464 | 1462 | ||
1465 | if (ctx->ctxid == IWL_RXON_CTX_BSS) | 1463 | if (ctx->ctxid == IWL_RXON_CTX_BSS) |
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 6aec2df3bb27..d1a670d7b10c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c | |||
@@ -1192,7 +1192,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, | |||
1192 | memset(&info->status, 0, sizeof(info->status)); | 1192 | memset(&info->status, 0, sizeof(info->status)); |
1193 | 1193 | ||
1194 | if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && | 1194 | if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && |
1195 | iwl_is_associated_ctx(ctx) && ctx->vif && | 1195 | ctx->vif && |
1196 | ctx->vif->type == NL80211_IFTYPE_STATION) { | 1196 | ctx->vif->type == NL80211_IFTYPE_STATION) { |
1197 | /* block and stop all queues */ | 1197 | /* block and stop all queues */ |
1198 | priv->passive_no_rx = true; | 1198 | priv->passive_no_rx = true; |
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 736fe9bb140e..1a4ac9236a44 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c | |||
@@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, | |||
367 | return -EIO; | 367 | return -EIO; |
368 | } | 368 | } |
369 | 369 | ||
370 | priv->ucode_loaded = true; | ||
371 | |||
370 | if (ucode_type != IWL_UCODE_WOWLAN) { | 372 | if (ucode_type != IWL_UCODE_WOWLAN) { |
371 | /* delay a bit to give rfkill time to run */ | 373 | /* delay a bit to give rfkill time to run */ |
372 | msleep(5); | 374 | msleep(5); |
@@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, | |||
380 | return ret; | 382 | return ret; |
381 | } | 383 | } |
382 | 384 | ||
383 | priv->ucode_loaded = true; | ||
384 | |||
385 | return 0; | 385 | return 0; |
386 | } | 386 | } |
387 | 387 | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 17bedc50e753..12c4f31ca8fb 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, | |||
475 | 475 | ||
476 | /* If platform's RF_KILL switch is NOT set to KILL */ | 476 | /* If platform's RF_KILL switch is NOT set to KILL */ |
477 | hw_rfkill = iwl_is_rfkill_set(trans); | 477 | hw_rfkill = iwl_is_rfkill_set(trans); |
478 | if (hw_rfkill) | ||
479 | set_bit(STATUS_RFKILL, &trans_pcie->status); | ||
480 | else | ||
481 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | ||
478 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 482 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
479 | if (hw_rfkill && !run_in_rfkill) | 483 | if (hw_rfkill && !run_in_rfkill) |
480 | return -ERFKILL; | 484 | return -ERFKILL; |
@@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, | |||
641 | 645 | ||
642 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | 646 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) |
643 | { | 647 | { |
648 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
644 | bool hw_rfkill; | 649 | bool hw_rfkill; |
645 | int err; | 650 | int err; |
646 | 651 | ||
@@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | |||
656 | iwl_enable_rfkill_int(trans); | 661 | iwl_enable_rfkill_int(trans); |
657 | 662 | ||
658 | hw_rfkill = iwl_is_rfkill_set(trans); | 663 | hw_rfkill = iwl_is_rfkill_set(trans); |
664 | if (hw_rfkill) | ||
665 | set_bit(STATUS_RFKILL, &trans_pcie->status); | ||
666 | else | ||
667 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | ||
659 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 668 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
660 | 669 | ||
661 | return 0; | 670 | return 0; |
@@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, | |||
694 | * op_mode. | 703 | * op_mode. |
695 | */ | 704 | */ |
696 | hw_rfkill = iwl_is_rfkill_set(trans); | 705 | hw_rfkill = iwl_is_rfkill_set(trans); |
706 | if (hw_rfkill) | ||
707 | set_bit(STATUS_RFKILL, &trans_pcie->status); | ||
708 | else | ||
709 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | ||
697 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 710 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
698 | } | 711 | } |
699 | } | 712 | } |
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 8595c16f74de..cb5c6792e3a8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1264 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { | 1264 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { |
1265 | int copy = 0; | 1265 | int copy = 0; |
1266 | 1266 | ||
1267 | if (!cmd->len) | 1267 | if (!cmd->len[i]) |
1268 | continue; | 1268 | continue; |
1269 | 1269 | ||
1270 | /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ | 1270 | /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index a44023a7bd57..8aaf56ade4d9 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
@@ -1892,7 +1892,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, | |||
1892 | } | 1892 | } |
1893 | } | 1893 | } |
1894 | 1894 | ||
1895 | for (i = 0; i < request->n_channels; i++) { | 1895 | for (i = 0; i < min_t(u32, request->n_channels, |
1896 | MWIFIEX_USER_SCAN_CHAN_MAX); i++) { | ||
1896 | chan = request->channels[i]; | 1897 | chan = request->channels[i]; |
1897 | priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; | 1898 | priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; |
1898 | priv->user_scan_cfg->chan_list[i].radio_type = chan->band; | 1899 | priv->user_scan_cfg->chan_list[i].radio_type = chan->band; |
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 5c395e2e6a2b..feb204613397 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c | |||
@@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) | |||
1508 | } | 1508 | } |
1509 | memcpy(adapter->upld_buf, skb->data, | 1509 | memcpy(adapter->upld_buf, skb->data, |
1510 | min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); | 1510 | min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); |
1511 | skb_push(skb, INTF_HEADER_LEN); | ||
1511 | if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, | 1512 | if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, |
1512 | PCI_DMA_FROMDEVICE)) | 1513 | PCI_DMA_FROMDEVICE)) |
1513 | return -1; | 1514 | return -1; |
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index d215b4d3c51b..e7f6deaf715e 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c | |||
@@ -1393,8 +1393,10 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, | |||
1393 | queue_work(adapter->workqueue, &adapter->main_work); | 1393 | queue_work(adapter->workqueue, &adapter->main_work); |
1394 | 1394 | ||
1395 | /* Perform internal scan synchronously */ | 1395 | /* Perform internal scan synchronously */ |
1396 | if (!priv->scan_request) | 1396 | if (!priv->scan_request) { |
1397 | dev_dbg(adapter->dev, "wait internal scan\n"); | ||
1397 | mwifiex_wait_queue_complete(adapter, cmd_node); | 1398 | mwifiex_wait_queue_complete(adapter, cmd_node); |
1399 | } | ||
1398 | } else { | 1400 | } else { |
1399 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, | 1401 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, |
1400 | flags); | 1402 | flags); |
@@ -1793,7 +1795,12 @@ check_next_scan: | |||
1793 | /* Need to indicate IOCTL complete */ | 1795 | /* Need to indicate IOCTL complete */ |
1794 | if (adapter->curr_cmd->wait_q_enabled) { | 1796 | if (adapter->curr_cmd->wait_q_enabled) { |
1795 | adapter->cmd_wait_q.status = 0; | 1797 | adapter->cmd_wait_q.status = 0; |
1796 | mwifiex_complete_cmd(adapter, adapter->curr_cmd); | 1798 | if (!priv->scan_request) { |
1799 | dev_dbg(adapter->dev, | ||
1800 | "complete internal scan\n"); | ||
1801 | mwifiex_complete_cmd(adapter, | ||
1802 | adapter->curr_cmd); | ||
1803 | } | ||
1797 | } | 1804 | } |
1798 | if (priv->report_scan_result) | 1805 | if (priv->report_scan_result) |
1799 | priv->report_scan_result = false; | 1806 | priv->report_scan_result = false; |
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 2bf4efa33186..76cd47eb901e 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig | |||
@@ -20,6 +20,7 @@ if RT2X00 | |||
20 | config RT2400PCI | 20 | config RT2400PCI |
21 | tristate "Ralink rt2400 (PCI/PCMCIA) support" | 21 | tristate "Ralink rt2400 (PCI/PCMCIA) support" |
22 | depends on PCI | 22 | depends on PCI |
23 | select RT2X00_LIB_MMIO | ||
23 | select RT2X00_LIB_PCI | 24 | select RT2X00_LIB_PCI |
24 | select EEPROM_93CX6 | 25 | select EEPROM_93CX6 |
25 | ---help--- | 26 | ---help--- |
@@ -31,6 +32,7 @@ config RT2400PCI | |||
31 | config RT2500PCI | 32 | config RT2500PCI |
32 | tristate "Ralink rt2500 (PCI/PCMCIA) support" | 33 | tristate "Ralink rt2500 (PCI/PCMCIA) support" |
33 | depends on PCI | 34 | depends on PCI |
35 | select RT2X00_LIB_MMIO | ||
34 | select RT2X00_LIB_PCI | 36 | select RT2X00_LIB_PCI |
35 | select EEPROM_93CX6 | 37 | select EEPROM_93CX6 |
36 | ---help--- | 38 | ---help--- |
@@ -43,6 +45,7 @@ config RT61PCI | |||
43 | tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" | 45 | tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" |
44 | depends on PCI | 46 | depends on PCI |
45 | select RT2X00_LIB_PCI | 47 | select RT2X00_LIB_PCI |
48 | select RT2X00_LIB_MMIO | ||
46 | select RT2X00_LIB_FIRMWARE | 49 | select RT2X00_LIB_FIRMWARE |
47 | select RT2X00_LIB_CRYPTO | 50 | select RT2X00_LIB_CRYPTO |
48 | select CRC_ITU_T | 51 | select CRC_ITU_T |
@@ -57,6 +60,7 @@ config RT2800PCI | |||
57 | tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" | 60 | tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" |
58 | depends on PCI || SOC_RT288X || SOC_RT305X | 61 | depends on PCI || SOC_RT288X || SOC_RT305X |
59 | select RT2800_LIB | 62 | select RT2800_LIB |
63 | select RT2X00_LIB_MMIO | ||
60 | select RT2X00_LIB_PCI if PCI | 64 | select RT2X00_LIB_PCI if PCI |
61 | select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X | 65 | select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X |
62 | select RT2X00_LIB_FIRMWARE | 66 | select RT2X00_LIB_FIRMWARE |
@@ -185,6 +189,9 @@ endif | |||
185 | config RT2800_LIB | 189 | config RT2800_LIB |
186 | tristate | 190 | tristate |
187 | 191 | ||
192 | config RT2X00_LIB_MMIO | ||
193 | tristate | ||
194 | |||
188 | config RT2X00_LIB_PCI | 195 | config RT2X00_LIB_PCI |
189 | tristate | 196 | tristate |
190 | select RT2X00_LIB | 197 | select RT2X00_LIB |
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile index 349d5b8284a4..f069d8bc5b67 100644 --- a/drivers/net/wireless/rt2x00/Makefile +++ b/drivers/net/wireless/rt2x00/Makefile | |||
@@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o | |||
9 | rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o | 9 | rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o |
10 | 10 | ||
11 | obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o | 11 | obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o |
12 | obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o | ||
12 | obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o | 13 | obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o |
13 | obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o | 14 | obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o |
14 | obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o | 15 | obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o |
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 221beaaa83f1..dcfb54e0c516 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | 35 | ||
36 | #include "rt2x00.h" | 36 | #include "rt2x00.h" |
37 | #include "rt2x00mmio.h" | ||
37 | #include "rt2x00pci.h" | 38 | #include "rt2x00pci.h" |
38 | #include "rt2400pci.h" | 39 | #include "rt2400pci.h" |
39 | 40 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 39edc59e8d03..e1d2dc9ed28a 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | 35 | ||
36 | #include "rt2x00.h" | 36 | #include "rt2x00.h" |
37 | #include "rt2x00mmio.h" | ||
37 | #include "rt2x00pci.h" | 38 | #include "rt2x00pci.h" |
38 | #include "rt2500pci.h" | 39 | #include "rt2500pci.h" |
39 | 40 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index ded73da4de0b..ba5a05625aaa 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/eeprom_93cx6.h> | 41 | #include <linux/eeprom_93cx6.h> |
42 | 42 | ||
43 | #include "rt2x00.h" | 43 | #include "rt2x00.h" |
44 | #include "rt2x00mmio.h" | ||
44 | #include "rt2x00pci.h" | 45 | #include "rt2x00pci.h" |
45 | #include "rt2x00soc.h" | 46 | #include "rt2x00soc.h" |
46 | #include "rt2800lib.h" | 47 | #include "rt2800lib.h" |
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c new file mode 100644 index 000000000000..d84a680ba0c9 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c | |||
@@ -0,0 +1,216 @@ | |||
1 | /* | ||
2 | Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> | ||
3 | <http://rt2x00.serialmonkey.com> | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License as published by | ||
7 | the Free Software Foundation; either version 2 of the License, or | ||
8 | (at your option) any later version. | ||
9 | |||
10 | This program is distributed in the hope that it will be useful, | ||
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | GNU General Public License for more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License | ||
16 | along with this program; if not, write to the | ||
17 | Free Software Foundation, Inc., | ||
18 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | Module: rt2x00mmio | ||
23 | Abstract: rt2x00 generic mmio device routines. | ||
24 | */ | ||
25 | |||
26 | #include <linux/dma-mapping.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | #include "rt2x00.h" | ||
32 | #include "rt2x00mmio.h" | ||
33 | |||
34 | /* | ||
35 | * Register access. | ||
36 | */ | ||
37 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
38 | const unsigned int offset, | ||
39 | const struct rt2x00_field32 field, | ||
40 | u32 *reg) | ||
41 | { | ||
42 | unsigned int i; | ||
43 | |||
44 | if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) | ||
45 | return 0; | ||
46 | |||
47 | for (i = 0; i < REGISTER_BUSY_COUNT; i++) { | ||
48 | rt2x00pci_register_read(rt2x00dev, offset, reg); | ||
49 | if (!rt2x00_get_field32(*reg, field)) | ||
50 | return 1; | ||
51 | udelay(REGISTER_BUSY_DELAY); | ||
52 | } | ||
53 | |||
54 | printk_once(KERN_ERR "%s() Indirect register access failed: " | ||
55 | "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg); | ||
56 | *reg = ~0; | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); | ||
61 | |||
62 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) | ||
63 | { | ||
64 | struct data_queue *queue = rt2x00dev->rx; | ||
65 | struct queue_entry *entry; | ||
66 | struct queue_entry_priv_pci *entry_priv; | ||
67 | struct skb_frame_desc *skbdesc; | ||
68 | int max_rx = 16; | ||
69 | |||
70 | while (--max_rx) { | ||
71 | entry = rt2x00queue_get_entry(queue, Q_INDEX); | ||
72 | entry_priv = entry->priv_data; | ||
73 | |||
74 | if (rt2x00dev->ops->lib->get_entry_state(entry)) | ||
75 | break; | ||
76 | |||
77 | /* | ||
78 | * Fill in desc fields of the skb descriptor | ||
79 | */ | ||
80 | skbdesc = get_skb_frame_desc(entry->skb); | ||
81 | skbdesc->desc = entry_priv->desc; | ||
82 | skbdesc->desc_len = entry->queue->desc_size; | ||
83 | |||
84 | /* | ||
85 | * DMA is already done, notify rt2x00lib that | ||
86 | * it finished successfully. | ||
87 | */ | ||
88 | rt2x00lib_dmastart(entry); | ||
89 | rt2x00lib_dmadone(entry); | ||
90 | |||
91 | /* | ||
92 | * Send the frame to rt2x00lib for further processing. | ||
93 | */ | ||
94 | rt2x00lib_rxdone(entry, GFP_ATOMIC); | ||
95 | } | ||
96 | |||
97 | return !max_rx; | ||
98 | } | ||
99 | EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); | ||
100 | |||
101 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) | ||
102 | { | ||
103 | unsigned int i; | ||
104 | |||
105 | for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) | ||
106 | msleep(10); | ||
107 | } | ||
108 | EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); | ||
109 | |||
110 | /* | ||
111 | * Device initialization handlers. | ||
112 | */ | ||
113 | static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
114 | struct data_queue *queue) | ||
115 | { | ||
116 | struct queue_entry_priv_pci *entry_priv; | ||
117 | void *addr; | ||
118 | dma_addr_t dma; | ||
119 | unsigned int i; | ||
120 | |||
121 | /* | ||
122 | * Allocate DMA memory for descriptor and buffer. | ||
123 | */ | ||
124 | addr = dma_alloc_coherent(rt2x00dev->dev, | ||
125 | queue->limit * queue->desc_size, | ||
126 | &dma, GFP_KERNEL); | ||
127 | if (!addr) | ||
128 | return -ENOMEM; | ||
129 | |||
130 | memset(addr, 0, queue->limit * queue->desc_size); | ||
131 | |||
132 | /* | ||
133 | * Initialize all queue entries to contain valid addresses. | ||
134 | */ | ||
135 | for (i = 0; i < queue->limit; i++) { | ||
136 | entry_priv = queue->entries[i].priv_data; | ||
137 | entry_priv->desc = addr + i * queue->desc_size; | ||
138 | entry_priv->desc_dma = dma + i * queue->desc_size; | ||
139 | } | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
145 | struct data_queue *queue) | ||
146 | { | ||
147 | struct queue_entry_priv_pci *entry_priv = | ||
148 | queue->entries[0].priv_data; | ||
149 | |||
150 | if (entry_priv->desc) | ||
151 | dma_free_coherent(rt2x00dev->dev, | ||
152 | queue->limit * queue->desc_size, | ||
153 | entry_priv->desc, entry_priv->desc_dma); | ||
154 | entry_priv->desc = NULL; | ||
155 | } | ||
156 | |||
157 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) | ||
158 | { | ||
159 | struct data_queue *queue; | ||
160 | int status; | ||
161 | |||
162 | /* | ||
163 | * Allocate DMA | ||
164 | */ | ||
165 | queue_for_each(rt2x00dev, queue) { | ||
166 | status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); | ||
167 | if (status) | ||
168 | goto exit; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Register interrupt handler. | ||
173 | */ | ||
174 | status = request_irq(rt2x00dev->irq, | ||
175 | rt2x00dev->ops->lib->irq_handler, | ||
176 | IRQF_SHARED, rt2x00dev->name, rt2x00dev); | ||
177 | if (status) { | ||
178 | ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", | ||
179 | rt2x00dev->irq, status); | ||
180 | goto exit; | ||
181 | } | ||
182 | |||
183 | return 0; | ||
184 | |||
185 | exit: | ||
186 | queue_for_each(rt2x00dev, queue) | ||
187 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
188 | |||
189 | return status; | ||
190 | } | ||
191 | EXPORT_SYMBOL_GPL(rt2x00pci_initialize); | ||
192 | |||
193 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) | ||
194 | { | ||
195 | struct data_queue *queue; | ||
196 | |||
197 | /* | ||
198 | * Free irq line. | ||
199 | */ | ||
200 | free_irq(rt2x00dev->irq, rt2x00dev); | ||
201 | |||
202 | /* | ||
203 | * Free DMA | ||
204 | */ | ||
205 | queue_for_each(rt2x00dev, queue) | ||
206 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
207 | } | ||
208 | EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); | ||
209 | |||
210 | /* | ||
211 | * rt2x00mmio module information. | ||
212 | */ | ||
213 | MODULE_AUTHOR(DRV_PROJECT); | ||
214 | MODULE_VERSION(DRV_VERSION); | ||
215 | MODULE_DESCRIPTION("rt2x00 mmio library"); | ||
216 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h new file mode 100644 index 000000000000..4ecaf60175bf --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> | ||
3 | <http://rt2x00.serialmonkey.com> | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License as published by | ||
7 | the Free Software Foundation; either version 2 of the License, or | ||
8 | (at your option) any later version. | ||
9 | |||
10 | This program is distributed in the hope that it will be useful, | ||
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | GNU General Public License for more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License | ||
16 | along with this program; if not, write to the | ||
17 | Free Software Foundation, Inc., | ||
18 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | Module: rt2x00mmio | ||
23 | Abstract: Data structures for the rt2x00mmio module. | ||
24 | */ | ||
25 | |||
26 | #ifndef RT2X00MMIO_H | ||
27 | #define RT2X00MMIO_H | ||
28 | |||
29 | #include <linux/io.h> | ||
30 | |||
31 | /* | ||
32 | * Register access. | ||
33 | */ | ||
34 | static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, | ||
35 | const unsigned int offset, | ||
36 | u32 *value) | ||
37 | { | ||
38 | *value = readl(rt2x00dev->csr.base + offset); | ||
39 | } | ||
40 | |||
41 | static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, | ||
42 | const unsigned int offset, | ||
43 | void *value, const u32 length) | ||
44 | { | ||
45 | memcpy_fromio(value, rt2x00dev->csr.base + offset, length); | ||
46 | } | ||
47 | |||
48 | static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, | ||
49 | const unsigned int offset, | ||
50 | u32 value) | ||
51 | { | ||
52 | writel(value, rt2x00dev->csr.base + offset); | ||
53 | } | ||
54 | |||
55 | static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, | ||
56 | const unsigned int offset, | ||
57 | const void *value, | ||
58 | const u32 length) | ||
59 | { | ||
60 | __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * rt2x00pci_regbusy_read - Read from register with busy check | ||
65 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
66 | * @offset: Register offset | ||
67 | * @field: Field to check if register is busy | ||
68 | * @reg: Pointer to where register contents should be stored | ||
69 | * | ||
70 | * This function will read the given register, and checks if the | ||
71 | * register is busy. If it is, it will sleep for a couple of | ||
72 | * microseconds before reading the register again. If the register | ||
73 | * is not read after a certain timeout, this function will return | ||
74 | * FALSE. | ||
75 | */ | ||
76 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
77 | const unsigned int offset, | ||
78 | const struct rt2x00_field32 field, | ||
79 | u32 *reg); | ||
80 | |||
81 | /** | ||
82 | * struct queue_entry_priv_pci: Per entry PCI specific information | ||
83 | * | ||
84 | * @desc: Pointer to device descriptor | ||
85 | * @desc_dma: DMA pointer to &desc. | ||
86 | * @data: Pointer to device's entry memory. | ||
87 | * @data_dma: DMA pointer to &data. | ||
88 | */ | ||
89 | struct queue_entry_priv_pci { | ||
90 | __le32 *desc; | ||
91 | dma_addr_t desc_dma; | ||
92 | }; | ||
93 | |||
94 | /** | ||
95 | * rt2x00pci_rxdone - Handle RX done events | ||
96 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
97 | * | ||
98 | * Returns true if there are still rx frames pending and false if all | ||
99 | * pending rx frames were processed. | ||
100 | */ | ||
101 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); | ||
102 | |||
103 | /** | ||
104 | * rt2x00pci_flush_queue - Flush data queue | ||
105 | * @queue: Data queue to stop | ||
106 | * @drop: True to drop all pending frames. | ||
107 | * | ||
108 | * This will wait for a maximum of 100ms, waiting for the queues | ||
109 | * to become empty. | ||
110 | */ | ||
111 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); | ||
112 | |||
113 | /* | ||
114 | * Device initialization handlers. | ||
115 | */ | ||
116 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); | ||
117 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); | ||
118 | |||
119 | #endif /* RT2X00MMIO_H */ | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index a0c8caef3b0a..e87865e33113 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c | |||
@@ -33,182 +33,6 @@ | |||
33 | #include "rt2x00pci.h" | 33 | #include "rt2x00pci.h" |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Register access. | ||
37 | */ | ||
38 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
39 | const unsigned int offset, | ||
40 | const struct rt2x00_field32 field, | ||
41 | u32 *reg) | ||
42 | { | ||
43 | unsigned int i; | ||
44 | |||
45 | if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) | ||
46 | return 0; | ||
47 | |||
48 | for (i = 0; i < REGISTER_BUSY_COUNT; i++) { | ||
49 | rt2x00pci_register_read(rt2x00dev, offset, reg); | ||
50 | if (!rt2x00_get_field32(*reg, field)) | ||
51 | return 1; | ||
52 | udelay(REGISTER_BUSY_DELAY); | ||
53 | } | ||
54 | |||
55 | ERROR(rt2x00dev, "Indirect register access failed: " | ||
56 | "offset=0x%.08x, value=0x%.08x\n", offset, *reg); | ||
57 | *reg = ~0; | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); | ||
62 | |||
63 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) | ||
64 | { | ||
65 | struct data_queue *queue = rt2x00dev->rx; | ||
66 | struct queue_entry *entry; | ||
67 | struct queue_entry_priv_pci *entry_priv; | ||
68 | struct skb_frame_desc *skbdesc; | ||
69 | int max_rx = 16; | ||
70 | |||
71 | while (--max_rx) { | ||
72 | entry = rt2x00queue_get_entry(queue, Q_INDEX); | ||
73 | entry_priv = entry->priv_data; | ||
74 | |||
75 | if (rt2x00dev->ops->lib->get_entry_state(entry)) | ||
76 | break; | ||
77 | |||
78 | /* | ||
79 | * Fill in desc fields of the skb descriptor | ||
80 | */ | ||
81 | skbdesc = get_skb_frame_desc(entry->skb); | ||
82 | skbdesc->desc = entry_priv->desc; | ||
83 | skbdesc->desc_len = entry->queue->desc_size; | ||
84 | |||
85 | /* | ||
86 | * DMA is already done, notify rt2x00lib that | ||
87 | * it finished successfully. | ||
88 | */ | ||
89 | rt2x00lib_dmastart(entry); | ||
90 | rt2x00lib_dmadone(entry); | ||
91 | |||
92 | /* | ||
93 | * Send the frame to rt2x00lib for further processing. | ||
94 | */ | ||
95 | rt2x00lib_rxdone(entry, GFP_ATOMIC); | ||
96 | } | ||
97 | |||
98 | return !max_rx; | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); | ||
101 | |||
102 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) | ||
103 | { | ||
104 | unsigned int i; | ||
105 | |||
106 | for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) | ||
107 | msleep(10); | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); | ||
110 | |||
111 | /* | ||
112 | * Device initialization handlers. | ||
113 | */ | ||
114 | static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
115 | struct data_queue *queue) | ||
116 | { | ||
117 | struct queue_entry_priv_pci *entry_priv; | ||
118 | void *addr; | ||
119 | dma_addr_t dma; | ||
120 | unsigned int i; | ||
121 | |||
122 | /* | ||
123 | * Allocate DMA memory for descriptor and buffer. | ||
124 | */ | ||
125 | addr = dma_alloc_coherent(rt2x00dev->dev, | ||
126 | queue->limit * queue->desc_size, | ||
127 | &dma, GFP_KERNEL); | ||
128 | if (!addr) | ||
129 | return -ENOMEM; | ||
130 | |||
131 | memset(addr, 0, queue->limit * queue->desc_size); | ||
132 | |||
133 | /* | ||
134 | * Initialize all queue entries to contain valid addresses. | ||
135 | */ | ||
136 | for (i = 0; i < queue->limit; i++) { | ||
137 | entry_priv = queue->entries[i].priv_data; | ||
138 | entry_priv->desc = addr + i * queue->desc_size; | ||
139 | entry_priv->desc_dma = dma + i * queue->desc_size; | ||
140 | } | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
146 | struct data_queue *queue) | ||
147 | { | ||
148 | struct queue_entry_priv_pci *entry_priv = | ||
149 | queue->entries[0].priv_data; | ||
150 | |||
151 | if (entry_priv->desc) | ||
152 | dma_free_coherent(rt2x00dev->dev, | ||
153 | queue->limit * queue->desc_size, | ||
154 | entry_priv->desc, entry_priv->desc_dma); | ||
155 | entry_priv->desc = NULL; | ||
156 | } | ||
157 | |||
158 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) | ||
159 | { | ||
160 | struct data_queue *queue; | ||
161 | int status; | ||
162 | |||
163 | /* | ||
164 | * Allocate DMA | ||
165 | */ | ||
166 | queue_for_each(rt2x00dev, queue) { | ||
167 | status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); | ||
168 | if (status) | ||
169 | goto exit; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * Register interrupt handler. | ||
174 | */ | ||
175 | status = request_irq(rt2x00dev->irq, | ||
176 | rt2x00dev->ops->lib->irq_handler, | ||
177 | IRQF_SHARED, rt2x00dev->name, rt2x00dev); | ||
178 | if (status) { | ||
179 | ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", | ||
180 | rt2x00dev->irq, status); | ||
181 | goto exit; | ||
182 | } | ||
183 | |||
184 | return 0; | ||
185 | |||
186 | exit: | ||
187 | queue_for_each(rt2x00dev, queue) | ||
188 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
189 | |||
190 | return status; | ||
191 | } | ||
192 | EXPORT_SYMBOL_GPL(rt2x00pci_initialize); | ||
193 | |||
194 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) | ||
195 | { | ||
196 | struct data_queue *queue; | ||
197 | |||
198 | /* | ||
199 | * Free irq line. | ||
200 | */ | ||
201 | free_irq(rt2x00dev->irq, rt2x00dev); | ||
202 | |||
203 | /* | ||
204 | * Free DMA | ||
205 | */ | ||
206 | queue_for_each(rt2x00dev, queue) | ||
207 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
208 | } | ||
209 | EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); | ||
210 | |||
211 | /* | ||
212 | * PCI driver handlers. | 36 | * PCI driver handlers. |
213 | */ | 37 | */ |
214 | static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) | 38 | static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h index e2c99f2b9a14..60d90b20f8b9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.h +++ b/drivers/net/wireless/rt2x00/rt2x00pci.h | |||
@@ -36,94 +36,6 @@ | |||
36 | #define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) | 36 | #define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Register access. | ||
40 | */ | ||
41 | static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, | ||
42 | const unsigned int offset, | ||
43 | u32 *value) | ||
44 | { | ||
45 | *value = readl(rt2x00dev->csr.base + offset); | ||
46 | } | ||
47 | |||
48 | static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, | ||
49 | const unsigned int offset, | ||
50 | void *value, const u32 length) | ||
51 | { | ||
52 | memcpy_fromio(value, rt2x00dev->csr.base + offset, length); | ||
53 | } | ||
54 | |||
55 | static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, | ||
56 | const unsigned int offset, | ||
57 | u32 value) | ||
58 | { | ||
59 | writel(value, rt2x00dev->csr.base + offset); | ||
60 | } | ||
61 | |||
62 | static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, | ||
63 | const unsigned int offset, | ||
64 | const void *value, | ||
65 | const u32 length) | ||
66 | { | ||
67 | __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * rt2x00pci_regbusy_read - Read from register with busy check | ||
72 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
73 | * @offset: Register offset | ||
74 | * @field: Field to check if register is busy | ||
75 | * @reg: Pointer to where register contents should be stored | ||
76 | * | ||
77 | * This function will read the given register, and checks if the | ||
78 | * register is busy. If it is, it will sleep for a couple of | ||
79 | * microseconds before reading the register again. If the register | ||
80 | * is not read after a certain timeout, this function will return | ||
81 | * FALSE. | ||
82 | */ | ||
83 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
84 | const unsigned int offset, | ||
85 | const struct rt2x00_field32 field, | ||
86 | u32 *reg); | ||
87 | |||
88 | /** | ||
89 | * struct queue_entry_priv_pci: Per entry PCI specific information | ||
90 | * | ||
91 | * @desc: Pointer to device descriptor | ||
92 | * @desc_dma: DMA pointer to &desc. | ||
93 | * @data: Pointer to device's entry memory. | ||
94 | * @data_dma: DMA pointer to &data. | ||
95 | */ | ||
96 | struct queue_entry_priv_pci { | ||
97 | __le32 *desc; | ||
98 | dma_addr_t desc_dma; | ||
99 | }; | ||
100 | |||
101 | /** | ||
102 | * rt2x00pci_rxdone - Handle RX done events | ||
103 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
104 | * | ||
105 | * Returns true if there are still rx frames pending and false if all | ||
106 | * pending rx frames were processed. | ||
107 | */ | ||
108 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); | ||
109 | |||
110 | /** | ||
111 | * rt2x00pci_flush_queue - Flush data queue | ||
112 | * @queue: Data queue to stop | ||
113 | * @drop: True to drop all pending frames. | ||
114 | * | ||
115 | * This will wait for a maximum of 100ms, waiting for the queues | ||
116 | * to become empty. | ||
117 | */ | ||
118 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); | ||
119 | |||
120 | /* | ||
121 | * Device initialization handlers. | ||
122 | */ | ||
123 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); | ||
124 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); | ||
125 | |||
126 | /* | ||
127 | * PCI driver handlers. | 39 | * PCI driver handlers. |
128 | */ | 40 | */ |
129 | int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); | 41 | int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); |
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index f95792cfcf89..9e3c8ff53e3f 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/eeprom_93cx6.h> | 35 | #include <linux/eeprom_93cx6.h> |
36 | 36 | ||
37 | #include "rt2x00.h" | 37 | #include "rt2x00.h" |
38 | #include "rt2x00mmio.h" | ||
38 | #include "rt2x00pci.h" | 39 | #include "rt2x00pci.h" |
39 | #include "rt61pci.h" | 40 | #include "rt61pci.h" |
40 | 41 | ||
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index eef38cfd812e..ca33ae193935 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/gpio.h> | 24 | #include <linux/gpio.h> |
25 | #include <linux/mei_bus.h> | 25 | #include <linux/mei_cl_bus.h> |
26 | 26 | ||
27 | #include <linux/nfc.h> | 27 | #include <linux/nfc.h> |
28 | #include <net/nfc/hci.h> | 28 | #include <net/nfc/hci.h> |
@@ -32,9 +32,6 @@ | |||
32 | 32 | ||
33 | #define MICROREAD_DRIVER_NAME "microread" | 33 | #define MICROREAD_DRIVER_NAME "microread" |
34 | 34 | ||
35 | #define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \ | ||
36 | 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) | ||
37 | |||
38 | struct mei_nfc_hdr { | 35 | struct mei_nfc_hdr { |
39 | u8 cmd; | 36 | u8 cmd; |
40 | u8 status; | 37 | u8 status; |
@@ -48,7 +45,7 @@ struct mei_nfc_hdr { | |||
48 | #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) | 45 | #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) |
49 | 46 | ||
50 | struct microread_mei_phy { | 47 | struct microread_mei_phy { |
51 | struct mei_device *mei_device; | 48 | struct mei_cl_device *device; |
52 | struct nfc_hci_dev *hdev; | 49 | struct nfc_hci_dev *hdev; |
53 | 50 | ||
54 | int powered; | 51 | int powered; |
@@ -105,14 +102,14 @@ static int microread_mei_write(void *phy_id, struct sk_buff *skb) | |||
105 | 102 | ||
106 | MEI_DUMP_SKB_OUT("mei frame sent", skb); | 103 | MEI_DUMP_SKB_OUT("mei frame sent", skb); |
107 | 104 | ||
108 | r = mei_send(phy->device, skb->data, skb->len); | 105 | r = mei_cl_send(phy->device, skb->data, skb->len); |
109 | if (r > 0) | 106 | if (r > 0) |
110 | r = 0; | 107 | r = 0; |
111 | 108 | ||
112 | return r; | 109 | return r; |
113 | } | 110 | } |
114 | 111 | ||
115 | static void microread_event_cb(struct mei_device *device, u32 events, | 112 | static void microread_event_cb(struct mei_cl_device *device, u32 events, |
116 | void *context) | 113 | void *context) |
117 | { | 114 | { |
118 | struct microread_mei_phy *phy = context; | 115 | struct microread_mei_phy *phy = context; |
@@ -120,7 +117,7 @@ static void microread_event_cb(struct mei_device *device, u32 events, | |||
120 | if (phy->hard_fault != 0) | 117 | if (phy->hard_fault != 0) |
121 | return; | 118 | return; |
122 | 119 | ||
123 | if (events & BIT(MEI_EVENT_RX)) { | 120 | if (events & BIT(MEI_CL_EVENT_RX)) { |
124 | struct sk_buff *skb; | 121 | struct sk_buff *skb; |
125 | int reply_size; | 122 | int reply_size; |
126 | 123 | ||
@@ -128,7 +125,7 @@ static void microread_event_cb(struct mei_device *device, u32 events, | |||
128 | if (!skb) | 125 | if (!skb) |
129 | return; | 126 | return; |
130 | 127 | ||
131 | reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ); | 128 | reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ); |
132 | if (reply_size < MEI_NFC_HEADER_SIZE) { | 129 | if (reply_size < MEI_NFC_HEADER_SIZE) { |
133 | kfree(skb); | 130 | kfree(skb); |
134 | return; | 131 | return; |
@@ -149,8 +146,8 @@ static struct nfc_phy_ops mei_phy_ops = { | |||
149 | .disable = microread_mei_disable, | 146 | .disable = microread_mei_disable, |
150 | }; | 147 | }; |
151 | 148 | ||
152 | static int microread_mei_probe(struct mei_device *device, | 149 | static int microread_mei_probe(struct mei_cl_device *device, |
153 | const struct mei_id *id) | 150 | const struct mei_cl_device_id *id) |
154 | { | 151 | { |
155 | struct microread_mei_phy *phy; | 152 | struct microread_mei_phy *phy; |
156 | int r; | 153 | int r; |
@@ -164,9 +161,9 @@ static int microread_mei_probe(struct mei_device *device, | |||
164 | } | 161 | } |
165 | 162 | ||
166 | phy->device = device; | 163 | phy->device = device; |
167 | mei_set_clientdata(device, phy); | 164 | mei_cl_set_drvdata(device, phy); |
168 | 165 | ||
169 | r = mei_register_event_cb(device, microread_event_cb, phy); | 166 | r = mei_cl_register_event_cb(device, microread_event_cb, phy); |
170 | if (r) { | 167 | if (r) { |
171 | pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); | 168 | pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); |
172 | goto err_out; | 169 | goto err_out; |
@@ -186,9 +183,9 @@ err_out: | |||
186 | return r; | 183 | return r; |
187 | } | 184 | } |
188 | 185 | ||
189 | static int microread_mei_remove(struct mei_device *device) | 186 | static int microread_mei_remove(struct mei_cl_device *device) |
190 | { | 187 | { |
191 | struct microread_mei_phy *phy = mei_get_clientdata(device); | 188 | struct microread_mei_phy *phy = mei_cl_get_drvdata(device); |
192 | 189 | ||
193 | pr_info("Removing microread\n"); | 190 | pr_info("Removing microread\n"); |
194 | 191 | ||
@@ -202,16 +199,15 @@ static int microread_mei_remove(struct mei_device *device) | |||
202 | return 0; | 199 | return 0; |
203 | } | 200 | } |
204 | 201 | ||
205 | static struct mei_id microread_mei_tbl[] = { | 202 | static struct mei_cl_device_id microread_mei_tbl[] = { |
206 | { MICROREAD_DRIVER_NAME, MICROREAD_UUID }, | 203 | { MICROREAD_DRIVER_NAME }, |
207 | 204 | ||
208 | /* required last entry */ | 205 | /* required last entry */ |
209 | { } | 206 | { } |
210 | }; | 207 | }; |
211 | |||
212 | MODULE_DEVICE_TABLE(mei, microread_mei_tbl); | 208 | MODULE_DEVICE_TABLE(mei, microread_mei_tbl); |
213 | 209 | ||
214 | static struct mei_driver microread_driver = { | 210 | static struct mei_cl_driver microread_driver = { |
215 | .id_table = microread_mei_tbl, | 211 | .id_table = microread_mei_tbl, |
216 | .name = MICROREAD_DRIVER_NAME, | 212 | .name = MICROREAD_DRIVER_NAME, |
217 | 213 | ||
@@ -225,7 +221,7 @@ static int microread_mei_init(void) | |||
225 | 221 | ||
226 | pr_debug(DRIVER_DESC ": %s\n", __func__); | 222 | pr_debug(DRIVER_DESC ": %s\n", __func__); |
227 | 223 | ||
228 | r = mei_driver_register(µread_driver); | 224 | r = mei_cl_driver_register(µread_driver); |
229 | if (r) { | 225 | if (r) { |
230 | pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); | 226 | pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); |
231 | return r; | 227 | return r; |
@@ -236,7 +232,7 @@ static int microread_mei_init(void) | |||
236 | 232 | ||
237 | static void microread_mei_exit(void) | 233 | static void microread_mei_exit(void) |
238 | { | 234 | { |
239 | mei_driver_unregister(µread_driver); | 235 | mei_cl_driver_unregister(µread_driver); |
240 | } | 236 | } |
241 | 237 | ||
242 | module_init(microread_mei_init); | 238 | module_init(microread_mei_init); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dee5dddaa292..5147c210df52 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -53,14 +53,15 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | |||
53 | return; | 53 | return; |
54 | } | 54 | } |
55 | 55 | ||
56 | if (!pci_dev->pm_cap || !pci_dev->pme_support | 56 | /* Clear PME Status if set. */ |
57 | || pci_check_pme_status(pci_dev)) { | 57 | if (pci_dev->pme_support) |
58 | if (pci_dev->pme_poll) | 58 | pci_check_pme_status(pci_dev); |
59 | pci_dev->pme_poll = false; | ||
60 | 59 | ||
61 | pci_wakeup_event(pci_dev); | 60 | if (pci_dev->pme_poll) |
62 | pm_runtime_resume(&pci_dev->dev); | 61 | pci_dev->pme_poll = false; |
63 | } | 62 | |
63 | pci_wakeup_event(pci_dev); | ||
64 | pm_runtime_resume(&pci_dev->dev); | ||
64 | 65 | ||
65 | if (pci_dev->subordinate) | 66 | if (pci_dev->subordinate) |
66 | pci_pme_wakeup_bus(pci_dev->subordinate); | 67 | pci_pme_wakeup_bus(pci_dev->subordinate); |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 1fa1e482a999..79277fb36c6b 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -390,9 +390,10 @@ static void pci_device_shutdown(struct device *dev) | |||
390 | 390 | ||
391 | /* | 391 | /* |
392 | * Turn off Bus Master bit on the device to tell it to not | 392 | * Turn off Bus Master bit on the device to tell it to not |
393 | * continue to do DMA | 393 | * continue to do DMA. Don't touch devices in D3cold or unknown states. |
394 | */ | 394 | */ |
395 | pci_clear_master(pci_dev); | 395 | if (pci_dev->current_state <= PCI_D3hot) |
396 | pci_clear_master(pci_dev); | ||
396 | } | 397 | } |
397 | 398 | ||
398 | #ifdef CONFIG_PM | 399 | #ifdef CONFIG_PM |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 08c243ab034e..ed4d09498337 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -185,14 +185,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { | |||
185 | #endif /* !PM */ | 185 | #endif /* !PM */ |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * PCIe port runtime suspend is broken for some chipsets, so use a | ||
189 | * black list to disable runtime PM for these chipsets. | ||
190 | */ | ||
191 | static const struct pci_device_id port_runtime_pm_black_list[] = { | ||
192 | { /* end: all zeroes */ } | ||
193 | }; | ||
194 | |||
195 | /* | ||
196 | * pcie_portdrv_probe - Probe PCI-Express port devices | 188 | * pcie_portdrv_probe - Probe PCI-Express port devices |
197 | * @dev: PCI-Express port device being probed | 189 | * @dev: PCI-Express port device being probed |
198 | * | 190 | * |
@@ -225,16 +217,11 @@ static int pcie_portdrv_probe(struct pci_dev *dev, | |||
225 | * it by default. | 217 | * it by default. |
226 | */ | 218 | */ |
227 | dev->d3cold_allowed = false; | 219 | dev->d3cold_allowed = false; |
228 | if (!pci_match_id(port_runtime_pm_black_list, dev)) | ||
229 | pm_runtime_put_noidle(&dev->dev); | ||
230 | |||
231 | return 0; | 220 | return 0; |
232 | } | 221 | } |
233 | 222 | ||
234 | static void pcie_portdrv_remove(struct pci_dev *dev) | 223 | static void pcie_portdrv_remove(struct pci_dev *dev) |
235 | { | 224 | { |
236 | if (!pci_match_id(port_runtime_pm_black_list, dev)) | ||
237 | pm_runtime_get_noresume(&dev->dev); | ||
238 | pcie_port_device_remove(dev); | 225 | pcie_port_device_remove(dev); |
239 | pci_disable_device(dev); | 226 | pci_disable_device(dev); |
240 | } | 227 | } |
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index b41ac7756a4b..c5d0a08a8747 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c | |||
@@ -100,27 +100,6 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) | |||
100 | return min((size_t)(image - rom), size); | 100 | return min((size_t)(image - rom), size); |
101 | } | 101 | } |
102 | 102 | ||
103 | static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) | ||
104 | { | ||
105 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; | ||
106 | loff_t start; | ||
107 | |||
108 | /* assign the ROM an address if it doesn't have one */ | ||
109 | if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE)) | ||
110 | return 0; | ||
111 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); | ||
112 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); | ||
113 | |||
114 | if (*size == 0) | ||
115 | return 0; | ||
116 | |||
117 | /* Enable ROM space decodes */ | ||
118 | if (pci_enable_rom(pdev)) | ||
119 | return 0; | ||
120 | |||
121 | return start; | ||
122 | } | ||
123 | |||
124 | /** | 103 | /** |
125 | * pci_map_rom - map a PCI ROM to kernel space | 104 | * pci_map_rom - map a PCI ROM to kernel space |
126 | * @pdev: pointer to pci device struct | 105 | * @pdev: pointer to pci device struct |
@@ -135,7 +114,7 @@ static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) | |||
135 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) | 114 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
136 | { | 115 | { |
137 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; | 116 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
138 | loff_t start = 0; | 117 | loff_t start; |
139 | void __iomem *rom; | 118 | void __iomem *rom; |
140 | 119 | ||
141 | /* | 120 | /* |
@@ -154,21 +133,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) | |||
154 | return (void __iomem *)(unsigned long) | 133 | return (void __iomem *)(unsigned long) |
155 | pci_resource_start(pdev, PCI_ROM_RESOURCE); | 134 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
156 | } else { | 135 | } else { |
157 | start = pci_find_rom(pdev, size); | 136 | /* assign the ROM an address if it doesn't have one */ |
158 | } | 137 | if (res->parent == NULL && |
159 | } | 138 | pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
139 | return NULL; | ||
140 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); | ||
141 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); | ||
142 | if (*size == 0) | ||
143 | return NULL; | ||
160 | 144 | ||
161 | /* | 145 | /* Enable ROM space decodes */ |
162 | * Some devices may provide ROMs via a source other than the BAR | 146 | if (pci_enable_rom(pdev)) |
163 | */ | 147 | return NULL; |
164 | if (!start && pdev->rom && pdev->romlen) { | 148 | } |
165 | *size = pdev->romlen; | ||
166 | return phys_to_virt(pdev->rom); | ||
167 | } | 149 | } |
168 | 150 | ||
169 | if (!start) | ||
170 | return NULL; | ||
171 | |||
172 | rom = ioremap(start, *size); | 151 | rom = ioremap(start, *size); |
173 | if (!rom) { | 152 | if (!rom) { |
174 | /* restore enable if ioremap fails */ | 153 | /* restore enable if ioremap fails */ |
@@ -202,8 +181,7 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) | |||
202 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) | 181 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
203 | return; | 182 | return; |
204 | 183 | ||
205 | if (!pdev->rom || !pdev->romlen) | 184 | iounmap(rom); |
206 | iounmap(rom); | ||
207 | 185 | ||
208 | /* Disable again before continuing, leave enabled if pci=rom */ | 186 | /* Disable again before continuing, leave enabled if pci=rom */ |
209 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) | 187 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
@@ -227,7 +205,24 @@ void pci_cleanup_rom(struct pci_dev *pdev) | |||
227 | } | 205 | } |
228 | } | 206 | } |
229 | 207 | ||
208 | /** | ||
209 | * pci_platform_rom - provides a pointer to any ROM image provided by the | ||
210 | * platform | ||
211 | * @pdev: pointer to pci device struct | ||
212 | * @size: pointer to receive size of pci window over ROM | ||
213 | */ | ||
214 | void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) | ||
215 | { | ||
216 | if (pdev->rom && pdev->romlen) { | ||
217 | *size = pdev->romlen; | ||
218 | return phys_to_virt((phys_addr_t)pdev->rom); | ||
219 | } | ||
220 | |||
221 | return NULL; | ||
222 | } | ||
223 | |||
230 | EXPORT_SYMBOL(pci_map_rom); | 224 | EXPORT_SYMBOL(pci_map_rom); |
231 | EXPORT_SYMBOL(pci_unmap_rom); | 225 | EXPORT_SYMBOL(pci_unmap_rom); |
232 | EXPORT_SYMBOL_GPL(pci_enable_rom); | 226 | EXPORT_SYMBOL_GPL(pci_enable_rom); |
233 | EXPORT_SYMBOL_GPL(pci_disable_rom); | 227 | EXPORT_SYMBOL_GPL(pci_disable_rom); |
228 | EXPORT_SYMBOL(pci_platform_rom); | ||
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 45cacf79f3a7..1a779bbfb87d 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -134,7 +134,6 @@ static const struct key_entry hp_wmi_keymap[] = { | |||
134 | { KE_KEY, 0x2142, { KEY_MEDIA } }, | 134 | { KE_KEY, 0x2142, { KEY_MEDIA } }, |
135 | { KE_KEY, 0x213b, { KEY_INFO } }, | 135 | { KE_KEY, 0x213b, { KEY_INFO } }, |
136 | { KE_KEY, 0x2169, { KEY_DIRECTION } }, | 136 | { KE_KEY, 0x2169, { KEY_DIRECTION } }, |
137 | { KE_KEY, 0x216a, { KEY_SETUP } }, | ||
138 | { KE_KEY, 0x231b, { KEY_HELP } }, | 137 | { KE_KEY, 0x231b, { KEY_HELP } }, |
139 | { KE_END, 0 } | 138 | { KE_END, 0 } |
140 | }; | 139 | }; |
@@ -925,9 +924,6 @@ static int __init hp_wmi_init(void) | |||
925 | err = hp_wmi_input_setup(); | 924 | err = hp_wmi_input_setup(); |
926 | if (err) | 925 | if (err) |
927 | return err; | 926 | return err; |
928 | |||
929 | //Enable magic for hotkeys that run on the SMBus | ||
930 | ec_write(0xe6,0x6e); | ||
931 | } | 927 | } |
932 | 928 | ||
933 | if (bios_capable) { | 929 | if (bios_capable) { |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 9a907567f41e..edec135b1685 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -1964,9 +1964,6 @@ struct tp_nvram_state { | |||
1964 | /* kthread for the hotkey poller */ | 1964 | /* kthread for the hotkey poller */ |
1965 | static struct task_struct *tpacpi_hotkey_task; | 1965 | static struct task_struct *tpacpi_hotkey_task; |
1966 | 1966 | ||
1967 | /* Acquired while the poller kthread is running, use to sync start/stop */ | ||
1968 | static struct mutex hotkey_thread_mutex; | ||
1969 | |||
1970 | /* | 1967 | /* |
1971 | * Acquire mutex to write poller control variables as an | 1968 | * Acquire mutex to write poller control variables as an |
1972 | * atomic block. | 1969 | * atomic block. |
@@ -2462,8 +2459,6 @@ static int hotkey_kthread(void *data) | |||
2462 | unsigned int poll_freq; | 2459 | unsigned int poll_freq; |
2463 | bool was_frozen; | 2460 | bool was_frozen; |
2464 | 2461 | ||
2465 | mutex_lock(&hotkey_thread_mutex); | ||
2466 | |||
2467 | if (tpacpi_lifecycle == TPACPI_LIFE_EXITING) | 2462 | if (tpacpi_lifecycle == TPACPI_LIFE_EXITING) |
2468 | goto exit; | 2463 | goto exit; |
2469 | 2464 | ||
@@ -2523,7 +2518,6 @@ static int hotkey_kthread(void *data) | |||
2523 | } | 2518 | } |
2524 | 2519 | ||
2525 | exit: | 2520 | exit: |
2526 | mutex_unlock(&hotkey_thread_mutex); | ||
2527 | return 0; | 2521 | return 0; |
2528 | } | 2522 | } |
2529 | 2523 | ||
@@ -2533,9 +2527,6 @@ static void hotkey_poll_stop_sync(void) | |||
2533 | if (tpacpi_hotkey_task) { | 2527 | if (tpacpi_hotkey_task) { |
2534 | kthread_stop(tpacpi_hotkey_task); | 2528 | kthread_stop(tpacpi_hotkey_task); |
2535 | tpacpi_hotkey_task = NULL; | 2529 | tpacpi_hotkey_task = NULL; |
2536 | mutex_lock(&hotkey_thread_mutex); | ||
2537 | /* at this point, the thread did exit */ | ||
2538 | mutex_unlock(&hotkey_thread_mutex); | ||
2539 | } | 2530 | } |
2540 | } | 2531 | } |
2541 | 2532 | ||
@@ -3234,7 +3225,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
3234 | mutex_init(&hotkey_mutex); | 3225 | mutex_init(&hotkey_mutex); |
3235 | 3226 | ||
3236 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL | 3227 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL |
3237 | mutex_init(&hotkey_thread_mutex); | ||
3238 | mutex_init(&hotkey_thread_data_mutex); | 3228 | mutex_init(&hotkey_thread_data_mutex); |
3239 | #endif | 3229 | #endif |
3240 | 3230 | ||
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index cc1f7bf53fd0..c6d77e20622c 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig | |||
@@ -4,7 +4,7 @@ menu "Remoteproc drivers" | |||
4 | config REMOTEPROC | 4 | config REMOTEPROC |
5 | tristate | 5 | tristate |
6 | depends on HAS_DMA | 6 | depends on HAS_DMA |
7 | select FW_CONFIG | 7 | select FW_LOADER |
8 | select VIRTIO | 8 | select VIRTIO |
9 | 9 | ||
10 | config OMAP_REMOTEPROC | 10 | config OMAP_REMOTEPROC |
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 29387df4bfc9..8edb4aed5d36 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c | |||
@@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) | |||
217 | * TODO: support predefined notifyids (via resource table) | 217 | * TODO: support predefined notifyids (via resource table) |
218 | */ | 218 | */ |
219 | ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); | 219 | ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); |
220 | if (ret) { | 220 | if (ret < 0) { |
221 | dev_err(dev, "idr_alloc failed: %d\n", ret); | 221 | dev_err(dev, "idr_alloc failed: %d\n", ret); |
222 | dma_free_coherent(dev->parent, size, va, dma); | 222 | dma_free_coherent(dev->parent, size, va, dma); |
223 | return ret; | 223 | return ret; |
@@ -366,10 +366,12 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, | |||
366 | /* it is now safe to add the virtio device */ | 366 | /* it is now safe to add the virtio device */ |
367 | ret = rproc_add_virtio_dev(rvdev, rsc->id); | 367 | ret = rproc_add_virtio_dev(rvdev, rsc->id); |
368 | if (ret) | 368 | if (ret) |
369 | goto free_rvdev; | 369 | goto remove_rvdev; |
370 | 370 | ||
371 | return 0; | 371 | return 0; |
372 | 372 | ||
373 | remove_rvdev: | ||
374 | list_del(&rvdev->node); | ||
373 | free_rvdev: | 375 | free_rvdev: |
374 | kfree(rvdev); | 376 | kfree(rvdev); |
375 | return ret; | 377 | return ret; |
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c index a7743c069339..fb95c4220052 100644 --- a/drivers/remoteproc/ste_modem_rproc.c +++ b/drivers/remoteproc/ste_modem_rproc.c | |||
@@ -240,6 +240,8 @@ static int sproc_drv_remove(struct platform_device *pdev) | |||
240 | 240 | ||
241 | /* Unregister as remoteproc device */ | 241 | /* Unregister as remoteproc device */ |
242 | rproc_del(sproc->rproc); | 242 | rproc_del(sproc->rproc); |
243 | dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE, | ||
244 | sproc->fw_addr, sproc->fw_dma_addr); | ||
243 | rproc_put(sproc->rproc); | 245 | rproc_put(sproc->rproc); |
244 | 246 | ||
245 | mdev->drv_data = NULL; | 247 | mdev->drv_data = NULL; |
@@ -297,10 +299,13 @@ static int sproc_probe(struct platform_device *pdev) | |||
297 | /* Register as a remoteproc device */ | 299 | /* Register as a remoteproc device */ |
298 | err = rproc_add(rproc); | 300 | err = rproc_add(rproc); |
299 | if (err) | 301 | if (err) |
300 | goto free_rproc; | 302 | goto free_mem; |
301 | 303 | ||
302 | return 0; | 304 | return 0; |
303 | 305 | ||
306 | free_mem: | ||
307 | dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE, | ||
308 | sproc->fw_addr, sproc->fw_dma_addr); | ||
304 | free_rproc: | 309 | free_rproc: |
305 | /* Reset device data upon error */ | 310 | /* Reset device data upon error */ |
306 | mdev->drv_data = NULL; | 311 | mdev->drv_data = NULL; |
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 0a9f27e094ea..434ebc3a99dc 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -44,7 +44,6 @@ static DECLARE_COMPLETION(at91_rtc_updated); | |||
44 | static unsigned int at91_alarm_year = AT91_RTC_EPOCH; | 44 | static unsigned int at91_alarm_year = AT91_RTC_EPOCH; |
45 | static void __iomem *at91_rtc_regs; | 45 | static void __iomem *at91_rtc_regs; |
46 | static int irq; | 46 | static int irq; |
47 | static u32 at91_rtc_imr; | ||
48 | 47 | ||
49 | /* | 48 | /* |
50 | * Decode time/date into rtc_time structure | 49 | * Decode time/date into rtc_time structure |
@@ -109,11 +108,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm) | |||
109 | cr = at91_rtc_read(AT91_RTC_CR); | 108 | cr = at91_rtc_read(AT91_RTC_CR); |
110 | at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); | 109 | at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); |
111 | 110 | ||
112 | at91_rtc_imr |= AT91_RTC_ACKUPD; | ||
113 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); | 111 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); |
114 | wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ | 112 | wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ |
115 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); | 113 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); |
116 | at91_rtc_imr &= ~AT91_RTC_ACKUPD; | ||
117 | 114 | ||
118 | at91_rtc_write(AT91_RTC_TIMR, | 115 | at91_rtc_write(AT91_RTC_TIMR, |
119 | bin2bcd(tm->tm_sec) << 0 | 116 | bin2bcd(tm->tm_sec) << 0 |
@@ -145,7 +142,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
145 | tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); | 142 | tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); |
146 | tm->tm_year = at91_alarm_year - 1900; | 143 | tm->tm_year = at91_alarm_year - 1900; |
147 | 144 | ||
148 | alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM) | 145 | alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) |
149 | ? 1 : 0; | 146 | ? 1 : 0; |
150 | 147 | ||
151 | dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, | 148 | dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, |
@@ -171,7 +168,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
171 | tm.tm_sec = alrm->time.tm_sec; | 168 | tm.tm_sec = alrm->time.tm_sec; |
172 | 169 | ||
173 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); | 170 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); |
174 | at91_rtc_imr &= ~AT91_RTC_ALARM; | ||
175 | at91_rtc_write(AT91_RTC_TIMALR, | 171 | at91_rtc_write(AT91_RTC_TIMALR, |
176 | bin2bcd(tm.tm_sec) << 0 | 172 | bin2bcd(tm.tm_sec) << 0 |
177 | | bin2bcd(tm.tm_min) << 8 | 173 | | bin2bcd(tm.tm_min) << 8 |
@@ -184,7 +180,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
184 | 180 | ||
185 | if (alrm->enabled) { | 181 | if (alrm->enabled) { |
186 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); | 182 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); |
187 | at91_rtc_imr |= AT91_RTC_ALARM; | ||
188 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); | 183 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); |
189 | } | 184 | } |
190 | 185 | ||
@@ -201,12 +196,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
201 | 196 | ||
202 | if (enabled) { | 197 | if (enabled) { |
203 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); | 198 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); |
204 | at91_rtc_imr |= AT91_RTC_ALARM; | ||
205 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); | 199 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); |
206 | } else { | 200 | } else |
207 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); | 201 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); |
208 | at91_rtc_imr &= ~AT91_RTC_ALARM; | ||
209 | } | ||
210 | 202 | ||
211 | return 0; | 203 | return 0; |
212 | } | 204 | } |
@@ -215,10 +207,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
215 | */ | 207 | */ |
216 | static int at91_rtc_proc(struct device *dev, struct seq_file *seq) | 208 | static int at91_rtc_proc(struct device *dev, struct seq_file *seq) |
217 | { | 209 | { |
210 | unsigned long imr = at91_rtc_read(AT91_RTC_IMR); | ||
211 | |||
218 | seq_printf(seq, "update_IRQ\t: %s\n", | 212 | seq_printf(seq, "update_IRQ\t: %s\n", |
219 | (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no"); | 213 | (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); |
220 | seq_printf(seq, "periodic_IRQ\t: %s\n", | 214 | seq_printf(seq, "periodic_IRQ\t: %s\n", |
221 | (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no"); | 215 | (imr & AT91_RTC_SECEV) ? "yes" : "no"); |
222 | 216 | ||
223 | return 0; | 217 | return 0; |
224 | } | 218 | } |
@@ -233,7 +227,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) | |||
233 | unsigned int rtsr; | 227 | unsigned int rtsr; |
234 | unsigned long events = 0; | 228 | unsigned long events = 0; |
235 | 229 | ||
236 | rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr; | 230 | rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); |
237 | if (rtsr) { /* this interrupt is shared! Is it ours? */ | 231 | if (rtsr) { /* this interrupt is shared! Is it ours? */ |
238 | if (rtsr & AT91_RTC_ALARM) | 232 | if (rtsr & AT91_RTC_ALARM) |
239 | events |= (RTC_AF | RTC_IRQF); | 233 | events |= (RTC_AF | RTC_IRQF); |
@@ -297,7 +291,6 @@ static int __init at91_rtc_probe(struct platform_device *pdev) | |||
297 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | | 291 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | |
298 | AT91_RTC_SECEV | AT91_RTC_TIMEV | | 292 | AT91_RTC_SECEV | AT91_RTC_TIMEV | |
299 | AT91_RTC_CALEV); | 293 | AT91_RTC_CALEV); |
300 | at91_rtc_imr = 0; | ||
301 | 294 | ||
302 | ret = request_irq(irq, at91_rtc_interrupt, | 295 | ret = request_irq(irq, at91_rtc_interrupt, |
303 | IRQF_SHARED, | 296 | IRQF_SHARED, |
@@ -336,7 +329,6 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) | |||
336 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | | 329 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | |
337 | AT91_RTC_SECEV | AT91_RTC_TIMEV | | 330 | AT91_RTC_SECEV | AT91_RTC_TIMEV | |
338 | AT91_RTC_CALEV); | 331 | AT91_RTC_CALEV); |
339 | at91_rtc_imr = 0; | ||
340 | free_irq(irq, pdev); | 332 | free_irq(irq, pdev); |
341 | 333 | ||
342 | rtc_device_unregister(rtc); | 334 | rtc_device_unregister(rtc); |
@@ -349,35 +341,31 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) | |||
349 | 341 | ||
350 | /* AT91RM9200 RTC Power management control */ | 342 | /* AT91RM9200 RTC Power management control */ |
351 | 343 | ||
352 | static u32 at91_rtc_bkpimr; | 344 | static u32 at91_rtc_imr; |
353 | |||
354 | 345 | ||
355 | static int at91_rtc_suspend(struct device *dev) | 346 | static int at91_rtc_suspend(struct device *dev) |
356 | { | 347 | { |
357 | /* this IRQ is shared with DBGU and other hardware which isn't | 348 | /* this IRQ is shared with DBGU and other hardware which isn't |
358 | * necessarily doing PM like we are... | 349 | * necessarily doing PM like we are... |
359 | */ | 350 | */ |
360 | at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV); | 351 | at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) |
361 | if (at91_rtc_bkpimr) { | 352 | & (AT91_RTC_ALARM|AT91_RTC_SECEV); |
362 | if (device_may_wakeup(dev)) { | 353 | if (at91_rtc_imr) { |
354 | if (device_may_wakeup(dev)) | ||
363 | enable_irq_wake(irq); | 355 | enable_irq_wake(irq); |
364 | } else { | 356 | else |
365 | at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr); | 357 | at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); |
366 | at91_rtc_imr &= ~at91_rtc_bkpimr; | 358 | } |
367 | } | ||
368 | } | ||
369 | return 0; | 359 | return 0; |
370 | } | 360 | } |
371 | 361 | ||
372 | static int at91_rtc_resume(struct device *dev) | 362 | static int at91_rtc_resume(struct device *dev) |
373 | { | 363 | { |
374 | if (at91_rtc_bkpimr) { | 364 | if (at91_rtc_imr) { |
375 | if (device_may_wakeup(dev)) { | 365 | if (device_may_wakeup(dev)) |
376 | disable_irq_wake(irq); | 366 | disable_irq_wake(irq); |
377 | } else { | 367 | else |
378 | at91_rtc_imr |= at91_rtc_bkpimr; | 368 | at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); |
379 | at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr); | ||
380 | } | ||
381 | } | 369 | } |
382 | return 0; | 370 | return 0; |
383 | } | 371 | } |
diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h index 5f940b6844cb..da1945e5f714 100644 --- a/drivers/rtc/rtc-at91rm9200.h +++ b/drivers/rtc/rtc-at91rm9200.h | |||
@@ -64,6 +64,7 @@ | |||
64 | #define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ | 64 | #define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ |
65 | #define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ | 65 | #define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ |
66 | #define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ | 66 | #define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ |
67 | #define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */ | ||
67 | 68 | ||
68 | #define AT91_RTC_VER 0x2c /* Valid Entry Register */ | 69 | #define AT91_RTC_VER 0x2c /* Valid Entry Register */ |
69 | #define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */ | 70 | #define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */ |
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 5ac9c935c151..e9b9c8392832 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -307,7 +307,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq) | |||
307 | case EQC_WR_PROHIBIT: | 307 | case EQC_WR_PROHIBIT: |
308 | spin_lock_irqsave(&bdev->lock, flags); | 308 | spin_lock_irqsave(&bdev->lock, flags); |
309 | if (bdev->state != SCM_WR_PROHIBIT) | 309 | if (bdev->state != SCM_WR_PROHIBIT) |
310 | pr_info("%lu: Write access to the SCM increment is suspended\n", | 310 | pr_info("%lx: Write access to the SCM increment is suspended\n", |
311 | (unsigned long) bdev->scmdev->address); | 311 | (unsigned long) bdev->scmdev->address); |
312 | bdev->state = SCM_WR_PROHIBIT; | 312 | bdev->state = SCM_WR_PROHIBIT; |
313 | spin_unlock_irqrestore(&bdev->lock, flags); | 313 | spin_unlock_irqrestore(&bdev->lock, flags); |
@@ -445,7 +445,7 @@ void scm_blk_set_available(struct scm_blk_dev *bdev) | |||
445 | 445 | ||
446 | spin_lock_irqsave(&bdev->lock, flags); | 446 | spin_lock_irqsave(&bdev->lock, flags); |
447 | if (bdev->state == SCM_WR_PROHIBIT) | 447 | if (bdev->state == SCM_WR_PROHIBIT) |
448 | pr_info("%lu: Write access to the SCM increment is restored\n", | 448 | pr_info("%lx: Write access to the SCM increment is restored\n", |
449 | (unsigned long) bdev->scmdev->address); | 449 | (unsigned long) bdev->scmdev->address); |
450 | bdev->state = SCM_OPER; | 450 | bdev->state = SCM_OPER; |
451 | spin_unlock_irqrestore(&bdev->lock, flags); | 451 | spin_unlock_irqrestore(&bdev->lock, flags); |
@@ -463,12 +463,15 @@ static int __init scm_blk_init(void) | |||
463 | goto out; | 463 | goto out; |
464 | 464 | ||
465 | scm_major = ret; | 465 | scm_major = ret; |
466 | if (scm_alloc_rqs(nr_requests)) | 466 | ret = scm_alloc_rqs(nr_requests); |
467 | if (ret) | ||
467 | goto out_unreg; | 468 | goto out_unreg; |
468 | 469 | ||
469 | scm_debug = debug_register("scm_log", 16, 1, 16); | 470 | scm_debug = debug_register("scm_log", 16, 1, 16); |
470 | if (!scm_debug) | 471 | if (!scm_debug) { |
472 | ret = -ENOMEM; | ||
471 | goto out_free; | 473 | goto out_free; |
474 | } | ||
472 | 475 | ||
473 | debug_register_view(scm_debug, &debug_hex_ascii_view); | 476 | debug_register_view(scm_debug, &debug_hex_ascii_view); |
474 | debug_set_level(scm_debug, 2); | 477 | debug_set_level(scm_debug, 2); |
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c index 5f6180d6ff08..c98cf52d78d1 100644 --- a/drivers/s390/block/scm_drv.c +++ b/drivers/s390/block/scm_drv.c | |||
@@ -19,7 +19,7 @@ static void scm_notify(struct scm_device *scmdev, enum scm_event event) | |||
19 | 19 | ||
20 | switch (event) { | 20 | switch (event) { |
21 | case SCM_CHANGE: | 21 | case SCM_CHANGE: |
22 | pr_info("%lu: The capabilities of the SCM increment changed\n", | 22 | pr_info("%lx: The capabilities of the SCM increment changed\n", |
23 | (unsigned long) scmdev->address); | 23 | (unsigned long) scmdev->address); |
24 | SCM_LOG(2, "State changed"); | 24 | SCM_LOG(2, "State changed"); |
25 | SCM_LOG_STATE(2, scmdev); | 25 | SCM_LOG_STATE(2, scmdev); |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index b907dba24025..cee69dac3e18 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
915 | int i, rc; | 915 | int i, rc; |
916 | 916 | ||
917 | /* Check if the tty3270 is already there. */ | 917 | /* Check if the tty3270 is already there. */ |
918 | view = raw3270_find_view(&tty3270_fn, tty->index); | 918 | view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR); |
919 | if (!IS_ERR(view)) { | 919 | if (!IS_ERR(view)) { |
920 | tp = container_of(view, struct tty3270, view); | 920 | tp = container_of(view, struct tty3270, view); |
921 | tty->driver_data = tp; | 921 | tty->driver_data = tp; |
@@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
927 | tp->inattr = TF_INPUT; | 927 | tp->inattr = TF_INPUT; |
928 | return tty_port_install(&tp->port, driver, tty); | 928 | return tty_port_install(&tp->port, driver, tty); |
929 | } | 929 | } |
930 | if (tty3270_max_index < tty->index) | 930 | if (tty3270_max_index < tty->index + 1) |
931 | tty3270_max_index = tty->index; | 931 | tty3270_max_index = tty->index + 1; |
932 | 932 | ||
933 | /* Allocate tty3270 structure on first open. */ | 933 | /* Allocate tty3270 structure on first open. */ |
934 | tp = tty3270_alloc_view(); | 934 | tp = tty3270_alloc_view(); |
935 | if (IS_ERR(tp)) | 935 | if (IS_ERR(tp)) |
936 | return PTR_ERR(tp); | 936 | return PTR_ERR(tp); |
937 | 937 | ||
938 | rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); | 938 | rc = raw3270_add_view(&tp->view, &tty3270_fn, |
939 | tty->index + RAW3270_FIRSTMINOR); | ||
939 | if (rc) { | 940 | if (rc) { |
940 | tty3270_free_view(tp); | 941 | tty3270_free_view(tp); |
941 | return rc; | 942 | return rc; |
@@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = { | |||
1846 | 1847 | ||
1847 | void tty3270_create_cb(int minor) | 1848 | void tty3270_create_cb(int minor) |
1848 | { | 1849 | { |
1849 | tty_register_device(tty3270_driver, minor, NULL); | 1850 | tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL); |
1850 | } | 1851 | } |
1851 | 1852 | ||
1852 | void tty3270_destroy_cb(int minor) | 1853 | void tty3270_destroy_cb(int minor) |
1853 | { | 1854 | { |
1854 | tty_unregister_device(tty3270_driver, minor); | 1855 | tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR); |
1855 | } | 1856 | } |
1856 | 1857 | ||
1857 | struct raw3270_notifier tty3270_notifier = | 1858 | struct raw3270_notifier tty3270_notifier = |
@@ -1884,7 +1885,8 @@ static int __init tty3270_init(void) | |||
1884 | driver->driver_name = "tty3270"; | 1885 | driver->driver_name = "tty3270"; |
1885 | driver->name = "3270/tty"; | 1886 | driver->name = "3270/tty"; |
1886 | driver->major = IBM_TTY3270_MAJOR; | 1887 | driver->major = IBM_TTY3270_MAJOR; |
1887 | driver->minor_start = 0; | 1888 | driver->minor_start = RAW3270_FIRSTMINOR; |
1889 | driver->name_base = RAW3270_FIRSTMINOR; | ||
1888 | driver->type = TTY_DRIVER_TYPE_SYSTEM; | 1890 | driver->type = TTY_DRIVER_TYPE_SYSTEM; |
1889 | driver->subtype = SYSTEM_TYPE_TTY; | 1891 | driver->subtype = SYSTEM_TYPE_TTY; |
1890 | driver->init_termios = tty_std_termios; | 1892 | driver->init_termios = tty_std_termios; |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 8c0622399fcd..6ccb7457746b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -769,6 +769,7 @@ struct qeth_card { | |||
769 | unsigned long thread_start_mask; | 769 | unsigned long thread_start_mask; |
770 | unsigned long thread_allowed_mask; | 770 | unsigned long thread_allowed_mask; |
771 | unsigned long thread_running_mask; | 771 | unsigned long thread_running_mask; |
772 | struct task_struct *recovery_task; | ||
772 | spinlock_t ip_lock; | 773 | spinlock_t ip_lock; |
773 | struct list_head ip_list; | 774 | struct list_head ip_list; |
774 | struct list_head *ip_tbd_list; | 775 | struct list_head *ip_tbd_list; |
@@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list; | |||
862 | extern struct kmem_cache *qeth_core_header_cache; | 863 | extern struct kmem_cache *qeth_core_header_cache; |
863 | extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; | 864 | extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; |
864 | 865 | ||
866 | void qeth_set_recovery_task(struct qeth_card *); | ||
867 | void qeth_clear_recovery_task(struct qeth_card *); | ||
865 | void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); | 868 | void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); |
866 | int qeth_threads_running(struct qeth_card *, unsigned long); | 869 | int qeth_threads_running(struct qeth_card *, unsigned long); |
867 | int qeth_wait_for_threads(struct qeth_card *, unsigned long); | 870 | int qeth_wait_for_threads(struct qeth_card *, unsigned long); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0d73a999983d..451f92020599 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card) | |||
177 | return "n/a"; | 177 | return "n/a"; |
178 | } | 178 | } |
179 | 179 | ||
180 | void qeth_set_recovery_task(struct qeth_card *card) | ||
181 | { | ||
182 | card->recovery_task = current; | ||
183 | } | ||
184 | EXPORT_SYMBOL_GPL(qeth_set_recovery_task); | ||
185 | |||
186 | void qeth_clear_recovery_task(struct qeth_card *card) | ||
187 | { | ||
188 | card->recovery_task = NULL; | ||
189 | } | ||
190 | EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); | ||
191 | |||
192 | static bool qeth_is_recovery_task(const struct qeth_card *card) | ||
193 | { | ||
194 | return card->recovery_task == current; | ||
195 | } | ||
196 | |||
180 | void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, | 197 | void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, |
181 | int clear_start_mask) | 198 | int clear_start_mask) |
182 | { | 199 | { |
@@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running); | |||
205 | 222 | ||
206 | int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) | 223 | int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) |
207 | { | 224 | { |
225 | if (qeth_is_recovery_task(card)) | ||
226 | return 0; | ||
208 | return wait_event_interruptible(card->wait_q, | 227 | return wait_event_interruptible(card->wait_q, |
209 | qeth_threads_running(card, threads) == 0); | 228 | qeth_threads_running(card, threads) == 0); |
210 | } | 229 | } |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d690166efeaf..155b101bd730 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr) | |||
1143 | QETH_CARD_TEXT(card, 2, "recover2"); | 1143 | QETH_CARD_TEXT(card, 2, "recover2"); |
1144 | dev_warn(&card->gdev->dev, | 1144 | dev_warn(&card->gdev->dev, |
1145 | "A recovery process has been started for the device\n"); | 1145 | "A recovery process has been started for the device\n"); |
1146 | qeth_set_recovery_task(card); | ||
1146 | __qeth_l2_set_offline(card->gdev, 1); | 1147 | __qeth_l2_set_offline(card->gdev, 1); |
1147 | rc = __qeth_l2_set_online(card->gdev, 1); | 1148 | rc = __qeth_l2_set_online(card->gdev, 1); |
1148 | if (!rc) | 1149 | if (!rc) |
@@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr) | |||
1153 | dev_warn(&card->gdev->dev, "The qeth device driver " | 1154 | dev_warn(&card->gdev->dev, "The qeth device driver " |
1154 | "failed to recover an error on the device\n"); | 1155 | "failed to recover an error on the device\n"); |
1155 | } | 1156 | } |
1157 | qeth_clear_recovery_task(card); | ||
1156 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 1158 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
1157 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 1159 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
1158 | return 0; | 1160 | return 0; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 8710337dab3e..1f7edf1b26c3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -3515,6 +3515,7 @@ static int qeth_l3_recover(void *ptr) | |||
3515 | QETH_CARD_TEXT(card, 2, "recover2"); | 3515 | QETH_CARD_TEXT(card, 2, "recover2"); |
3516 | dev_warn(&card->gdev->dev, | 3516 | dev_warn(&card->gdev->dev, |
3517 | "A recovery process has been started for the device\n"); | 3517 | "A recovery process has been started for the device\n"); |
3518 | qeth_set_recovery_task(card); | ||
3518 | __qeth_l3_set_offline(card->gdev, 1); | 3519 | __qeth_l3_set_offline(card->gdev, 1); |
3519 | rc = __qeth_l3_set_online(card->gdev, 1); | 3520 | rc = __qeth_l3_set_online(card->gdev, 1); |
3520 | if (!rc) | 3521 | if (!rc) |
@@ -3525,6 +3526,7 @@ static int qeth_l3_recover(void *ptr) | |||
3525 | dev_warn(&card->gdev->dev, "The qeth device driver " | 3526 | dev_warn(&card->gdev->dev, "The qeth device driver " |
3526 | "failed to recover an error on the device\n"); | 3527 | "failed to recover an error on the device\n"); |
3527 | } | 3528 | } |
3529 | qeth_clear_recovery_task(card); | ||
3528 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 3530 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
3529 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 3531 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
3530 | return 0; | 3532 | return 0; |
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c index 1a9d1e3ce64c..c1441ed282eb 100644 --- a/drivers/sbus/char/bbc_i2c.c +++ b/drivers/sbus/char/bbc_i2c.c | |||
@@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id) | |||
282 | return IRQ_HANDLED; | 282 | return IRQ_HANDLED; |
283 | } | 283 | } |
284 | 284 | ||
285 | static void __init reset_one_i2c(struct bbc_i2c_bus *bp) | 285 | static void reset_one_i2c(struct bbc_i2c_bus *bp) |
286 | { | 286 | { |
287 | writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); | 287 | writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0); |
288 | writeb(bp->own, bp->i2c_control_regs + 0x1); | 288 | writeb(bp->own, bp->i2c_control_regs + 0x1); |
@@ -291,7 +291,7 @@ static void __init reset_one_i2c(struct bbc_i2c_bus *bp) | |||
291 | writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); | 291 | writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0); |
292 | } | 292 | } |
293 | 293 | ||
294 | static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index) | 294 | static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index) |
295 | { | 295 | { |
296 | struct bbc_i2c_bus *bp; | 296 | struct bbc_i2c_bus *bp; |
297 | struct device_node *dp; | 297 | struct device_node *dp; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 2daf4b0da434..90bc7bd00966 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -940,6 +940,7 @@ static int bnx2fc_libfc_config(struct fc_lport *lport) | |||
940 | fc_exch_init(lport); | 940 | fc_exch_init(lport); |
941 | fc_rport_init(lport); | 941 | fc_rport_init(lport); |
942 | fc_disc_init(lport); | 942 | fc_disc_init(lport); |
943 | fc_disc_config(lport, lport); | ||
943 | return 0; | 944 | return 0; |
944 | } | 945 | } |
945 | 946 | ||
@@ -2133,6 +2134,7 @@ static int _bnx2fc_create(struct net_device *netdev, | |||
2133 | } | 2134 | } |
2134 | 2135 | ||
2135 | ctlr = bnx2fc_to_ctlr(interface); | 2136 | ctlr = bnx2fc_to_ctlr(interface); |
2137 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
2136 | interface->vlan_id = vlan_id; | 2138 | interface->vlan_id = vlan_id; |
2137 | 2139 | ||
2138 | interface->timer_work_queue = | 2140 | interface->timer_work_queue = |
@@ -2143,7 +2145,7 @@ static int _bnx2fc_create(struct net_device *netdev, | |||
2143 | goto ifput_err; | 2145 | goto ifput_err; |
2144 | } | 2146 | } |
2145 | 2147 | ||
2146 | lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0); | 2148 | lport = bnx2fc_if_create(interface, &cdev->dev, 0); |
2147 | if (!lport) { | 2149 | if (!lport) { |
2148 | printk(KERN_ERR PFX "Failed to create interface (%s)\n", | 2150 | printk(KERN_ERR PFX "Failed to create interface (%s)\n", |
2149 | netdev->name); | 2151 | netdev->name); |
@@ -2159,8 +2161,6 @@ static int _bnx2fc_create(struct net_device *netdev, | |||
2159 | /* Make this master N_port */ | 2161 | /* Make this master N_port */ |
2160 | ctlr->lp = lport; | 2162 | ctlr->lp = lport; |
2161 | 2163 | ||
2162 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
2163 | |||
2164 | if (link_state == BNX2FC_CREATE_LINK_UP) | 2164 | if (link_state == BNX2FC_CREATE_LINK_UP) |
2165 | cdev->enabled = FCOE_CTLR_ENABLED; | 2165 | cdev->enabled = FCOE_CTLR_ENABLED; |
2166 | else | 2166 | else |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b5d92fc93c70..9bfdc9a3f897 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -490,7 +490,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) | |||
490 | { | 490 | { |
491 | struct net_device *netdev = fcoe->netdev; | 491 | struct net_device *netdev = fcoe->netdev; |
492 | struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); | 492 | struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); |
493 | struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); | ||
494 | 493 | ||
495 | rtnl_lock(); | 494 | rtnl_lock(); |
496 | if (!fcoe->removed) | 495 | if (!fcoe->removed) |
@@ -501,7 +500,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) | |||
501 | /* tear-down the FCoE controller */ | 500 | /* tear-down the FCoE controller */ |
502 | fcoe_ctlr_destroy(fip); | 501 | fcoe_ctlr_destroy(fip); |
503 | scsi_host_put(fip->lp->host); | 502 | scsi_host_put(fip->lp->host); |
504 | fcoe_ctlr_device_delete(ctlr_dev); | ||
505 | dev_put(netdev); | 503 | dev_put(netdev); |
506 | module_put(THIS_MODULE); | 504 | module_put(THIS_MODULE); |
507 | } | 505 | } |
@@ -2194,6 +2192,8 @@ out_nodev: | |||
2194 | */ | 2192 | */ |
2195 | static void fcoe_destroy_work(struct work_struct *work) | 2193 | static void fcoe_destroy_work(struct work_struct *work) |
2196 | { | 2194 | { |
2195 | struct fcoe_ctlr_device *cdev; | ||
2196 | struct fcoe_ctlr *ctlr; | ||
2197 | struct fcoe_port *port; | 2197 | struct fcoe_port *port; |
2198 | struct fcoe_interface *fcoe; | 2198 | struct fcoe_interface *fcoe; |
2199 | struct Scsi_Host *shost; | 2199 | struct Scsi_Host *shost; |
@@ -2224,10 +2224,15 @@ static void fcoe_destroy_work(struct work_struct *work) | |||
2224 | mutex_lock(&fcoe_config_mutex); | 2224 | mutex_lock(&fcoe_config_mutex); |
2225 | 2225 | ||
2226 | fcoe = port->priv; | 2226 | fcoe = port->priv; |
2227 | ctlr = fcoe_to_ctlr(fcoe); | ||
2228 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
2229 | |||
2227 | fcoe_if_destroy(port->lport); | 2230 | fcoe_if_destroy(port->lport); |
2228 | fcoe_interface_cleanup(fcoe); | 2231 | fcoe_interface_cleanup(fcoe); |
2229 | 2232 | ||
2230 | mutex_unlock(&fcoe_config_mutex); | 2233 | mutex_unlock(&fcoe_config_mutex); |
2234 | |||
2235 | fcoe_ctlr_device_delete(cdev); | ||
2231 | } | 2236 | } |
2232 | 2237 | ||
2233 | /** | 2238 | /** |
@@ -2335,7 +2340,9 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, | |||
2335 | rc = -EIO; | 2340 | rc = -EIO; |
2336 | rtnl_unlock(); | 2341 | rtnl_unlock(); |
2337 | fcoe_interface_cleanup(fcoe); | 2342 | fcoe_interface_cleanup(fcoe); |
2338 | goto out_nortnl; | 2343 | mutex_unlock(&fcoe_config_mutex); |
2344 | fcoe_ctlr_device_delete(ctlr_dev); | ||
2345 | goto out; | ||
2339 | } | 2346 | } |
2340 | 2347 | ||
2341 | /* Make this the "master" N_Port */ | 2348 | /* Make this the "master" N_Port */ |
@@ -2375,8 +2382,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, | |||
2375 | 2382 | ||
2376 | out_nodev: | 2383 | out_nodev: |
2377 | rtnl_unlock(); | 2384 | rtnl_unlock(); |
2378 | out_nortnl: | ||
2379 | mutex_unlock(&fcoe_config_mutex); | 2385 | mutex_unlock(&fcoe_config_mutex); |
2386 | out: | ||
2380 | return rc; | 2387 | return rc; |
2381 | } | 2388 | } |
2382 | 2389 | ||
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 08c3bc398da2..a76247201be5 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c | |||
@@ -2815,6 +2815,47 @@ unlock: | |||
2815 | } | 2815 | } |
2816 | 2816 | ||
2817 | /** | 2817 | /** |
2818 | * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode | ||
2819 | * @lport: The local port to be (re)configured | ||
2820 | * @fip: The FCoE controller whose mode is changing | ||
2821 | * @fip_mode: The new fip mode | ||
2822 | * | ||
2823 | * Note that the we shouldn't be changing the libfc discovery settings | ||
2824 | * (fc_disc_config) while an lport is going through the libfc state | ||
2825 | * machine. The mode can only be changed when a fcoe_ctlr device is | ||
2826 | * disabled, so that should ensure that this routine is only called | ||
2827 | * when nothing is happening. | ||
2828 | */ | ||
2829 | void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, | ||
2830 | enum fip_state fip_mode) | ||
2831 | { | ||
2832 | void *priv; | ||
2833 | |||
2834 | WARN_ON(lport->state != LPORT_ST_RESET && | ||
2835 | lport->state != LPORT_ST_DISABLED); | ||
2836 | |||
2837 | if (fip_mode == FIP_MODE_VN2VN) { | ||
2838 | lport->rport_priv_size = sizeof(struct fcoe_rport); | ||
2839 | lport->point_to_multipoint = 1; | ||
2840 | lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; | ||
2841 | lport->tt.disc_start = fcoe_ctlr_disc_start; | ||
2842 | lport->tt.disc_stop = fcoe_ctlr_disc_stop; | ||
2843 | lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; | ||
2844 | priv = fip; | ||
2845 | } else { | ||
2846 | lport->rport_priv_size = 0; | ||
2847 | lport->point_to_multipoint = 0; | ||
2848 | lport->tt.disc_recv_req = NULL; | ||
2849 | lport->tt.disc_start = NULL; | ||
2850 | lport->tt.disc_stop = NULL; | ||
2851 | lport->tt.disc_stop_final = NULL; | ||
2852 | priv = lport; | ||
2853 | } | ||
2854 | |||
2855 | fc_disc_config(lport, priv); | ||
2856 | } | ||
2857 | |||
2858 | /** | ||
2818 | * fcoe_libfc_config() - Sets up libfc related properties for local port | 2859 | * fcoe_libfc_config() - Sets up libfc related properties for local port |
2819 | * @lport: The local port to configure libfc for | 2860 | * @lport: The local port to configure libfc for |
2820 | * @fip: The FCoE controller in use by the local port | 2861 | * @fip: The FCoE controller in use by the local port |
@@ -2833,21 +2874,9 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, | |||
2833 | fc_exch_init(lport); | 2874 | fc_exch_init(lport); |
2834 | fc_elsct_init(lport); | 2875 | fc_elsct_init(lport); |
2835 | fc_lport_init(lport); | 2876 | fc_lport_init(lport); |
2836 | if (fip->mode == FIP_MODE_VN2VN) | ||
2837 | lport->rport_priv_size = sizeof(struct fcoe_rport); | ||
2838 | fc_rport_init(lport); | 2877 | fc_rport_init(lport); |
2839 | if (fip->mode == FIP_MODE_VN2VN) { | 2878 | fc_disc_init(lport); |
2840 | lport->point_to_multipoint = 1; | 2879 | fcoe_ctlr_mode_set(lport, fip, fip->mode); |
2841 | lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; | ||
2842 | lport->tt.disc_start = fcoe_ctlr_disc_start; | ||
2843 | lport->tt.disc_stop = fcoe_ctlr_disc_stop; | ||
2844 | lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; | ||
2845 | mutex_init(&lport->disc.disc_mutex); | ||
2846 | INIT_LIST_HEAD(&lport->disc.rports); | ||
2847 | lport->disc.priv = fip; | ||
2848 | } else { | ||
2849 | fc_disc_init(lport); | ||
2850 | } | ||
2851 | return 0; | 2880 | return 0; |
2852 | } | 2881 | } |
2853 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); | 2882 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); |
@@ -2875,6 +2904,7 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected); | |||
2875 | void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) | 2904 | void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) |
2876 | { | 2905 | { |
2877 | struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); | 2906 | struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); |
2907 | struct fc_lport *lport = ctlr->lp; | ||
2878 | 2908 | ||
2879 | mutex_lock(&ctlr->ctlr_mutex); | 2909 | mutex_lock(&ctlr->ctlr_mutex); |
2880 | switch (ctlr_dev->mode) { | 2910 | switch (ctlr_dev->mode) { |
@@ -2888,5 +2918,7 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) | |||
2888 | } | 2918 | } |
2889 | 2919 | ||
2890 | mutex_unlock(&ctlr->ctlr_mutex); | 2920 | mutex_unlock(&ctlr->ctlr_mutex); |
2921 | |||
2922 | fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode); | ||
2891 | } | 2923 | } |
2892 | EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); | 2924 | EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index a044f593e8b9..d0fa4b6c551f 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) | |||
1899 | sdev->allow_restart = 1; | 1899 | sdev->allow_restart = 1; |
1900 | blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); | 1900 | blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); |
1901 | } | 1901 | } |
1902 | scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); | ||
1903 | spin_unlock_irqrestore(shost->host_lock, lock_flags); | 1902 | spin_unlock_irqrestore(shost->host_lock, lock_flags); |
1903 | scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); | ||
1904 | return 0; | 1904 | return 0; |
1905 | } | 1905 | } |
1906 | 1906 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f328089a1060..2197b57fb225 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) | |||
5148 | ipr_trace; | 5148 | ipr_trace; |
5149 | } | 5149 | } |
5150 | 5150 | ||
5151 | list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); | 5151 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); |
5152 | if (!ipr_is_naca_model(res)) | 5152 | if (!ipr_is_naca_model(res)) |
5153 | res->needs_sync_complete = 1; | 5153 | res->needs_sync_complete = 1; |
5154 | 5154 | ||
@@ -9349,7 +9349,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) | |||
9349 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | 9349 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); |
9350 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 9350 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
9351 | 9351 | ||
9352 | rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | 9352 | if (ioa_cfg->intr_flag == IPR_USE_MSIX) |
9353 | rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | ||
9354 | else | ||
9355 | rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | ||
9353 | if (rc) { | 9356 | if (rc) { |
9354 | dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); | 9357 | dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); |
9355 | return rc; | 9358 | return rc; |
@@ -9371,7 +9374,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) | |||
9371 | 9374 | ||
9372 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 9375 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
9373 | 9376 | ||
9374 | free_irq(pdev->irq, ioa_cfg); | 9377 | if (ioa_cfg->intr_flag == IPR_USE_MSIX) |
9378 | free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); | ||
9379 | else | ||
9380 | free_irq(pdev->irq, ioa_cfg); | ||
9375 | 9381 | ||
9376 | LEAVE; | 9382 | LEAVE; |
9377 | 9383 | ||
@@ -9722,6 +9728,7 @@ static void __ipr_remove(struct pci_dev *pdev) | |||
9722 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); | 9728 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); |
9723 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | 9729 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); |
9724 | flush_work(&ioa_cfg->work_q); | 9730 | flush_work(&ioa_cfg->work_q); |
9731 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); | ||
9725 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); | 9732 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); |
9726 | 9733 | ||
9727 | spin_lock(&ipr_driver_lock); | 9734 | spin_lock(&ipr_driver_lock); |
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8e561e6a557c..880a9068ca12 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c | |||
@@ -712,12 +712,13 @@ static void fc_disc_stop_final(struct fc_lport *lport) | |||
712 | } | 712 | } |
713 | 713 | ||
714 | /** | 714 | /** |
715 | * fc_disc_init() - Initialize the discovery layer for a local port | 715 | * fc_disc_config() - Configure the discovery layer for a local port |
716 | * @lport: The local port that needs the discovery layer to be initialized | 716 | * @lport: The local port that needs the discovery layer to be configured |
717 | * @priv: Private data structre for users of the discovery layer | ||
717 | */ | 718 | */ |
718 | int fc_disc_init(struct fc_lport *lport) | 719 | void fc_disc_config(struct fc_lport *lport, void *priv) |
719 | { | 720 | { |
720 | struct fc_disc *disc; | 721 | struct fc_disc *disc = &lport->disc; |
721 | 722 | ||
722 | if (!lport->tt.disc_start) | 723 | if (!lport->tt.disc_start) |
723 | lport->tt.disc_start = fc_disc_start; | 724 | lport->tt.disc_start = fc_disc_start; |
@@ -732,12 +733,21 @@ int fc_disc_init(struct fc_lport *lport) | |||
732 | lport->tt.disc_recv_req = fc_disc_recv_req; | 733 | lport->tt.disc_recv_req = fc_disc_recv_req; |
733 | 734 | ||
734 | disc = &lport->disc; | 735 | disc = &lport->disc; |
736 | |||
737 | disc->priv = priv; | ||
738 | } | ||
739 | EXPORT_SYMBOL(fc_disc_config); | ||
740 | |||
741 | /** | ||
742 | * fc_disc_init() - Initialize the discovery layer for a local port | ||
743 | * @lport: The local port that needs the discovery layer to be initialized | ||
744 | */ | ||
745 | void fc_disc_init(struct fc_lport *lport) | ||
746 | { | ||
747 | struct fc_disc *disc = &lport->disc; | ||
748 | |||
735 | INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); | 749 | INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); |
736 | mutex_init(&disc->disc_mutex); | 750 | mutex_init(&disc->disc_mutex); |
737 | INIT_LIST_HEAD(&disc->rports); | 751 | INIT_LIST_HEAD(&disc->rports); |
738 | |||
739 | disc->priv = lport; | ||
740 | |||
741 | return 0; | ||
742 | } | 752 | } |
743 | EXPORT_SYMBOL(fc_disc_init); | 753 | EXPORT_SYMBOL(fc_disc_init); |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index aec2e0da5016..55cbd0180159 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
235 | linkrate = phy->linkrate; | 235 | linkrate = phy->linkrate; |
236 | memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); | 236 | memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); |
237 | 237 | ||
238 | /* Handle vacant phy - rest of dr data is not valid so skip it */ | ||
239 | if (phy->phy_state == PHY_VACANT) { | ||
240 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); | ||
241 | phy->attached_dev_type = NO_DEVICE; | ||
242 | if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { | ||
243 | phy->phy_id = phy_id; | ||
244 | goto skip; | ||
245 | } else | ||
246 | goto out; | ||
247 | } | ||
248 | |||
238 | phy->attached_dev_type = to_dev_type(dr); | 249 | phy->attached_dev_type = to_dev_type(dr); |
239 | if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) | 250 | if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) |
240 | goto out; | 251 | goto out; |
@@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
272 | phy->phy->maximum_linkrate = dr->pmax_linkrate; | 283 | phy->phy->maximum_linkrate = dr->pmax_linkrate; |
273 | phy->phy->negotiated_linkrate = phy->linkrate; | 284 | phy->phy->negotiated_linkrate = phy->linkrate; |
274 | 285 | ||
286 | skip: | ||
275 | if (new_phy) | 287 | if (new_phy) |
276 | if (sas_phy_add(phy->phy)) { | 288 | if (sas_phy_add(phy->phy)) { |
277 | sas_phy_free(phy->phy); | 289 | sas_phy_free(phy->phy); |
@@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single) | |||
388 | if (!disc_req) | 400 | if (!disc_req) |
389 | return -ENOMEM; | 401 | return -ENOMEM; |
390 | 402 | ||
391 | disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); | 403 | disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); |
392 | if (!disc_resp) { | 404 | if (!disc_resp) { |
393 | kfree(disc_req); | 405 | kfree(disc_req); |
394 | return -ENOMEM; | 406 | return -ENOMEM; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 74b67d98e952..d43faf34c1e2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -438,11 +438,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, | |||
438 | struct lpfc_rqe *temp_hrqe; | 438 | struct lpfc_rqe *temp_hrqe; |
439 | struct lpfc_rqe *temp_drqe; | 439 | struct lpfc_rqe *temp_drqe; |
440 | struct lpfc_register doorbell; | 440 | struct lpfc_register doorbell; |
441 | int put_index = hq->host_index; | 441 | int put_index; |
442 | 442 | ||
443 | /* sanity check on queue memory */ | 443 | /* sanity check on queue memory */ |
444 | if (unlikely(!hq) || unlikely(!dq)) | 444 | if (unlikely(!hq) || unlikely(!dq)) |
445 | return -ENOMEM; | 445 | return -ENOMEM; |
446 | put_index = hq->host_index; | ||
446 | temp_hrqe = hq->qe[hq->host_index].rqe; | 447 | temp_hrqe = hq->qe[hq->host_index].rqe; |
447 | temp_drqe = dq->qe[dq->host_index].rqe; | 448 | temp_drqe = dq->qe[dq->host_index].rqe; |
448 | 449 | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 1d82eef4e1eb..b3db9dcc2619 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1938,11 +1938,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1938 | "Timer for the VP[%d] has stopped\n", vha->vp_idx); | 1938 | "Timer for the VP[%d] has stopped\n", vha->vp_idx); |
1939 | } | 1939 | } |
1940 | 1940 | ||
1941 | /* No pending activities shall be there on the vha now */ | ||
1942 | if (ql2xextended_error_logging & ql_dbg_user) | ||
1943 | msleep(random32()%10); /* Just to see if something falls on | ||
1944 | * the net we have placed below */ | ||
1945 | |||
1946 | BUG_ON(atomic_read(&vha->vref_count)); | 1941 | BUG_ON(atomic_read(&vha->vref_count)); |
1947 | 1942 | ||
1948 | qla2x00_free_fcports(vha); | 1943 | qla2x00_free_fcports(vha); |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 1626de52e32a..fbc305f1c87c 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * | Mailbox commands | 0x115b | 0x111a-0x111b | | 15 | * | Mailbox commands | 0x115b | 0x111a-0x111b | |
16 | * | | | 0x112c-0x112e | | 16 | * | | | 0x112c-0x112e | |
17 | * | | | 0x113a | | 17 | * | | | 0x113a | |
18 | * | | | 0x1155-0x1158 | | ||
18 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | | 19 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | |
19 | * | | | 0x2016 | | 20 | * | | | 0x2016 | |
20 | * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | | 21 | * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | |
@@ -401,7 +402,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, | |||
401 | void *ring; | 402 | void *ring; |
402 | } aq, *aqp; | 403 | } aq, *aqp; |
403 | 404 | ||
404 | if (!ha->tgt.atio_q_length) | 405 | if (!ha->tgt.atio_ring) |
405 | return ptr; | 406 | return ptr; |
406 | 407 | ||
407 | num_queues = 1; | 408 | num_queues = 1; |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c6509911772b..65c5ff75936b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -863,7 +863,6 @@ typedef struct { | |||
863 | #define MBX_1 BIT_1 | 863 | #define MBX_1 BIT_1 |
864 | #define MBX_0 BIT_0 | 864 | #define MBX_0 BIT_0 |
865 | 865 | ||
866 | #define RNID_TYPE_SET_VERSION 0x9 | ||
867 | #define RNID_TYPE_ASIC_TEMP 0xC | 866 | #define RNID_TYPE_ASIC_TEMP 0xC |
868 | 867 | ||
869 | /* | 868 | /* |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index eb3ca21a7f17..b310fa97b545 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -358,9 +358,6 @@ extern int | |||
358 | qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); | 358 | qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); |
359 | 359 | ||
360 | extern int | 360 | extern int |
361 | qla2x00_set_driver_version(scsi_qla_host_t *, char *); | ||
362 | |||
363 | extern int | ||
364 | qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, | 361 | qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, |
365 | uint16_t, uint16_t, uint16_t, uint16_t); | 362 | uint16_t, uint16_t, uint16_t, uint16_t); |
366 | 363 | ||
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index edf4d14a1335..b59203393cb2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -619,8 +619,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
619 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) | 619 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) |
620 | qla24xx_read_fcp_prio_cfg(vha); | 620 | qla24xx_read_fcp_prio_cfg(vha); |
621 | 621 | ||
622 | qla2x00_set_driver_version(vha, QLA2XXX_VERSION); | ||
623 | |||
624 | return (rval); | 622 | return (rval); |
625 | } | 623 | } |
626 | 624 | ||
@@ -1399,7 +1397,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
1399 | mq_size += ha->max_rsp_queues * | 1397 | mq_size += ha->max_rsp_queues * |
1400 | (rsp->length * sizeof(response_t)); | 1398 | (rsp->length * sizeof(response_t)); |
1401 | } | 1399 | } |
1402 | if (ha->tgt.atio_q_length) | 1400 | if (ha->tgt.atio_ring) |
1403 | mq_size += ha->tgt.atio_q_length * sizeof(request_t); | 1401 | mq_size += ha->tgt.atio_q_length * sizeof(request_t); |
1404 | /* Allocate memory for Fibre Channel Event Buffer. */ | 1402 | /* Allocate memory for Fibre Channel Event Buffer. */ |
1405 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) | 1403 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 186dd59ce4fa..43345af56431 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -3866,64 +3866,6 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) | |||
3866 | return rval; | 3866 | return rval; |
3867 | } | 3867 | } |
3868 | 3868 | ||
3869 | int | ||
3870 | qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version) | ||
3871 | { | ||
3872 | int rval; | ||
3873 | mbx_cmd_t mc; | ||
3874 | mbx_cmd_t *mcp = &mc; | ||
3875 | int len; | ||
3876 | uint16_t dwlen; | ||
3877 | uint8_t *str; | ||
3878 | dma_addr_t str_dma; | ||
3879 | struct qla_hw_data *ha = vha->hw; | ||
3880 | |||
3881 | if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha)) | ||
3882 | return QLA_FUNCTION_FAILED; | ||
3883 | |||
3884 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155, | ||
3885 | "Entered %s.\n", __func__); | ||
3886 | |||
3887 | str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); | ||
3888 | if (!str) { | ||
3889 | ql_log(ql_log_warn, vha, 0x1156, | ||
3890 | "Failed to allocate driver version param.\n"); | ||
3891 | return QLA_MEMORY_ALLOC_FAILED; | ||
3892 | } | ||
3893 | |||
3894 | memcpy(str, "\x7\x3\x11\x0", 4); | ||
3895 | dwlen = str[0]; | ||
3896 | len = dwlen * sizeof(uint32_t) - 4; | ||
3897 | memset(str + 4, 0, len); | ||
3898 | if (len > strlen(version)) | ||
3899 | len = strlen(version); | ||
3900 | memcpy(str + 4, version, len); | ||
3901 | |||
3902 | mcp->mb[0] = MBC_SET_RNID_PARAMS; | ||
3903 | mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; | ||
3904 | mcp->mb[2] = MSW(LSD(str_dma)); | ||
3905 | mcp->mb[3] = LSW(LSD(str_dma)); | ||
3906 | mcp->mb[6] = MSW(MSD(str_dma)); | ||
3907 | mcp->mb[7] = LSW(MSD(str_dma)); | ||
3908 | mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | ||
3909 | mcp->in_mb = MBX_0; | ||
3910 | mcp->tov = MBX_TOV_SECONDS; | ||
3911 | mcp->flags = 0; | ||
3912 | rval = qla2x00_mailbox_command(vha, mcp); | ||
3913 | |||
3914 | if (rval != QLA_SUCCESS) { | ||
3915 | ql_dbg(ql_dbg_mbx, vha, 0x1157, | ||
3916 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
3917 | } else { | ||
3918 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158, | ||
3919 | "Done %s.\n", __func__); | ||
3920 | } | ||
3921 | |||
3922 | dma_pool_free(ha->s_dma_pool, str, str_dma); | ||
3923 | |||
3924 | return rval; | ||
3925 | } | ||
3926 | |||
3927 | static int | 3869 | static int |
3928 | qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) | 3870 | qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) |
3929 | { | 3871 | { |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 2b6e478d9e33..ec54036d1e12 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.04.00.08-k" | 10 | #define QLA2XXX_VERSION "8.04.00.13-k" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 4 | 13 | #define QLA_DRIVER_MINOR_VER 4 |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 86974471af68..2a32036a9404 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -4112,6 +4112,10 @@ static int st_probe(struct device *dev) | |||
4112 | tpnt->disk = disk; | 4112 | tpnt->disk = disk; |
4113 | disk->private_data = &tpnt->driver; | 4113 | disk->private_data = &tpnt->driver; |
4114 | disk->queue = SDp->request_queue; | 4114 | disk->queue = SDp->request_queue; |
4115 | /* SCSI tape doesn't register this gendisk via add_disk(). Manually | ||
4116 | * take queue reference that release_disk() expects. */ | ||
4117 | if (!blk_get_queue(disk->queue)) | ||
4118 | goto out_put_disk; | ||
4115 | tpnt->driver = &st_template; | 4119 | tpnt->driver = &st_template; |
4116 | 4120 | ||
4117 | tpnt->device = SDp; | 4121 | tpnt->device = SDp; |
@@ -4185,7 +4189,7 @@ static int st_probe(struct device *dev) | |||
4185 | idr_preload_end(); | 4189 | idr_preload_end(); |
4186 | if (error < 0) { | 4190 | if (error < 0) { |
4187 | pr_warn("st: idr allocation failed: %d\n", error); | 4191 | pr_warn("st: idr allocation failed: %d\n", error); |
4188 | goto out_put_disk; | 4192 | goto out_put_queue; |
4189 | } | 4193 | } |
4190 | tpnt->index = error; | 4194 | tpnt->index = error; |
4191 | sprintf(disk->disk_name, "st%d", tpnt->index); | 4195 | sprintf(disk->disk_name, "st%d", tpnt->index); |
@@ -4211,6 +4215,8 @@ out_remove_devs: | |||
4211 | spin_lock(&st_index_lock); | 4215 | spin_lock(&st_index_lock); |
4212 | idr_remove(&st_index_idr, tpnt->index); | 4216 | idr_remove(&st_index_idr, tpnt->index); |
4213 | spin_unlock(&st_index_lock); | 4217 | spin_unlock(&st_index_lock); |
4218 | out_put_queue: | ||
4219 | blk_put_queue(disk->queue); | ||
4214 | out_put_disk: | 4220 | out_put_disk: |
4215 | put_disk(disk); | 4221 | put_disk(disk); |
4216 | kfree(tpnt); | 4222 | kfree(tpnt); |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f80eee74a311..2be0de920d67 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -55,6 +55,7 @@ comment "SPI Master Controller Drivers" | |||
55 | 55 | ||
56 | config SPI_ALTERA | 56 | config SPI_ALTERA |
57 | tristate "Altera SPI Controller" | 57 | tristate "Altera SPI Controller" |
58 | depends on GENERIC_HARDIRQS | ||
58 | select SPI_BITBANG | 59 | select SPI_BITBANG |
59 | help | 60 | help |
60 | This is the driver for the Altera SPI Controller. | 61 | This is the driver for the Altera SPI Controller. |
@@ -310,7 +311,7 @@ config SPI_PXA2XX_DMA | |||
310 | 311 | ||
311 | config SPI_PXA2XX | 312 | config SPI_PXA2XX |
312 | tristate "PXA2xx SSP SPI master" | 313 | tristate "PXA2xx SSP SPI master" |
313 | depends on ARCH_PXA || PCI || ACPI | 314 | depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS |
314 | select PXA_SSP if ARCH_PXA | 315 | select PXA_SSP if ARCH_PXA |
315 | help | 316 | help |
316 | This enables using a PXA2xx or Sodaville SSP port as a SPI master | 317 | This enables using a PXA2xx or Sodaville SSP port as a SPI master |
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 9578af782a77..d7df435d962e 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c | |||
@@ -152,7 +152,6 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi, | |||
152 | static int bcm63xx_spi_setup(struct spi_device *spi) | 152 | static int bcm63xx_spi_setup(struct spi_device *spi) |
153 | { | 153 | { |
154 | struct bcm63xx_spi *bs; | 154 | struct bcm63xx_spi *bs; |
155 | int ret; | ||
156 | 155 | ||
157 | bs = spi_master_get_devdata(spi->master); | 156 | bs = spi_master_get_devdata(spi->master); |
158 | 157 | ||
@@ -490,7 +489,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
490 | default: | 489 | default: |
491 | dev_err(dev, "unsupported MSG_CTL width: %d\n", | 490 | dev_err(dev, "unsupported MSG_CTL width: %d\n", |
492 | bs->msg_ctl_width); | 491 | bs->msg_ctl_width); |
493 | goto out_clk_disable; | 492 | goto out_err; |
494 | } | 493 | } |
495 | 494 | ||
496 | /* Initialize hardware */ | 495 | /* Initialize hardware */ |
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 89480b281d74..3e490ee7f275 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c | |||
@@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
164 | 164 | ||
165 | for (i = count; i > 0; i--) { | 165 | for (i = count; i > 0; i--) { |
166 | data = tx_buf ? *tx_buf++ : 0; | 166 | data = tx_buf ? *tx_buf++ : 0; |
167 | if (len == EOFBYTE) | 167 | if (len == EOFBYTE && t->cs_change) |
168 | setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); | 168 | setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); |
169 | out_8(&fifo->txdata_8, data); | 169 | out_8(&fifo->txdata_8, data); |
170 | len--; | 170 | len--; |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 90b27a3508a6..810413883c79 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -1168,7 +1168,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1168 | 1168 | ||
1169 | master->dev.parent = &pdev->dev; | 1169 | master->dev.parent = &pdev->dev; |
1170 | master->dev.of_node = pdev->dev.of_node; | 1170 | master->dev.of_node = pdev->dev.of_node; |
1171 | ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev)); | ||
1172 | /* the spi->mode bits understood by this driver: */ | 1171 | /* the spi->mode bits understood by this driver: */ |
1173 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; | 1172 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; |
1174 | 1173 | ||
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index e862ab8853aa..4188b2faac5c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -994,25 +994,30 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data) | |||
994 | { | 994 | { |
995 | struct s3c64xx_spi_driver_data *sdd = data; | 995 | struct s3c64xx_spi_driver_data *sdd = data; |
996 | struct spi_master *spi = sdd->master; | 996 | struct spi_master *spi = sdd->master; |
997 | unsigned int val; | 997 | unsigned int val, clr = 0; |
998 | 998 | ||
999 | val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR); | 999 | val = readl(sdd->regs + S3C64XX_SPI_STATUS); |
1000 | 1000 | ||
1001 | val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR | | 1001 | if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { |
1002 | S3C64XX_SPI_PND_RX_UNDERRUN_CLR | | 1002 | clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; |
1003 | S3C64XX_SPI_PND_TX_OVERRUN_CLR | | ||
1004 | S3C64XX_SPI_PND_TX_UNDERRUN_CLR; | ||
1005 | |||
1006 | writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR); | ||
1007 | |||
1008 | if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR) | ||
1009 | dev_err(&spi->dev, "RX overrun\n"); | 1003 | dev_err(&spi->dev, "RX overrun\n"); |
1010 | if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR) | 1004 | } |
1005 | if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { | ||
1006 | clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; | ||
1011 | dev_err(&spi->dev, "RX underrun\n"); | 1007 | dev_err(&spi->dev, "RX underrun\n"); |
1012 | if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR) | 1008 | } |
1009 | if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { | ||
1010 | clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; | ||
1013 | dev_err(&spi->dev, "TX overrun\n"); | 1011 | dev_err(&spi->dev, "TX overrun\n"); |
1014 | if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR) | 1012 | } |
1013 | if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { | ||
1014 | clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; | ||
1015 | dev_err(&spi->dev, "TX underrun\n"); | 1015 | dev_err(&spi->dev, "TX underrun\n"); |
1016 | } | ||
1017 | |||
1018 | /* Clear the pending irq by setting and then clearing it */ | ||
1019 | writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); | ||
1020 | writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); | ||
1016 | 1021 | ||
1017 | return IRQ_HANDLED; | 1022 | return IRQ_HANDLED; |
1018 | } | 1023 | } |
@@ -1036,9 +1041,13 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) | |||
1036 | writel(0, regs + S3C64XX_SPI_MODE_CFG); | 1041 | writel(0, regs + S3C64XX_SPI_MODE_CFG); |
1037 | writel(0, regs + S3C64XX_SPI_PACKET_CNT); | 1042 | writel(0, regs + S3C64XX_SPI_PACKET_CNT); |
1038 | 1043 | ||
1039 | /* Clear any irq pending bits */ | 1044 | /* Clear any irq pending bits, should set and clear the bits */ |
1040 | writel(readl(regs + S3C64XX_SPI_PENDING_CLR), | 1045 | val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | |
1041 | regs + S3C64XX_SPI_PENDING_CLR); | 1046 | S3C64XX_SPI_PND_RX_UNDERRUN_CLR | |
1047 | S3C64XX_SPI_PND_TX_OVERRUN_CLR | | ||
1048 | S3C64XX_SPI_PND_TX_UNDERRUN_CLR; | ||
1049 | writel(val, regs + S3C64XX_SPI_PENDING_CLR); | ||
1050 | writel(0, regs + S3C64XX_SPI_PENDING_CLR); | ||
1042 | 1051 | ||
1043 | writel(0, regs + S3C64XX_SPI_SWAP_CFG); | 1052 | writel(0, regs + S3C64XX_SPI_SWAP_CFG); |
1044 | 1053 | ||
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index b8698b389ef3..a829563f4713 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c | |||
@@ -858,21 +858,6 @@ static int tegra_slink_setup(struct spi_device *spi) | |||
858 | return 0; | 858 | return 0; |
859 | } | 859 | } |
860 | 860 | ||
861 | static int tegra_slink_prepare_transfer(struct spi_master *master) | ||
862 | { | ||
863 | struct tegra_slink_data *tspi = spi_master_get_devdata(master); | ||
864 | |||
865 | return pm_runtime_get_sync(tspi->dev); | ||
866 | } | ||
867 | |||
868 | static int tegra_slink_unprepare_transfer(struct spi_master *master) | ||
869 | { | ||
870 | struct tegra_slink_data *tspi = spi_master_get_devdata(master); | ||
871 | |||
872 | pm_runtime_put(tspi->dev); | ||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | static int tegra_slink_transfer_one_message(struct spi_master *master, | 861 | static int tegra_slink_transfer_one_message(struct spi_master *master, |
877 | struct spi_message *msg) | 862 | struct spi_message *msg) |
878 | { | 863 | { |
@@ -885,6 +870,12 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, | |||
885 | 870 | ||
886 | msg->status = 0; | 871 | msg->status = 0; |
887 | msg->actual_length = 0; | 872 | msg->actual_length = 0; |
873 | ret = pm_runtime_get_sync(tspi->dev); | ||
874 | if (ret < 0) { | ||
875 | dev_err(tspi->dev, "runtime get failed: %d\n", ret); | ||
876 | goto done; | ||
877 | } | ||
878 | |||
888 | single_xfer = list_is_singular(&msg->transfers); | 879 | single_xfer = list_is_singular(&msg->transfers); |
889 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 880 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
890 | INIT_COMPLETION(tspi->xfer_completion); | 881 | INIT_COMPLETION(tspi->xfer_completion); |
@@ -921,6 +912,8 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, | |||
921 | exit: | 912 | exit: |
922 | tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); | 913 | tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); |
923 | tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); | 914 | tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); |
915 | pm_runtime_put(tspi->dev); | ||
916 | done: | ||
924 | msg->status = ret; | 917 | msg->status = ret; |
925 | spi_finalize_current_message(master); | 918 | spi_finalize_current_message(master); |
926 | return ret; | 919 | return ret; |
@@ -1148,9 +1141,7 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
1148 | /* the spi->mode bits understood by this driver: */ | 1141 | /* the spi->mode bits understood by this driver: */ |
1149 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 1142 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
1150 | master->setup = tegra_slink_setup; | 1143 | master->setup = tegra_slink_setup; |
1151 | master->prepare_transfer_hardware = tegra_slink_prepare_transfer; | ||
1152 | master->transfer_one_message = tegra_slink_transfer_one_message; | 1144 | master->transfer_one_message = tegra_slink_transfer_one_message; |
1153 | master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer; | ||
1154 | master->num_chipselect = MAX_CHIP_SELECT; | 1145 | master->num_chipselect = MAX_CHIP_SELECT; |
1155 | master->bus_num = -1; | 1146 | master->bus_num = -1; |
1156 | 1147 | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index f996c600eb8c..004b10f184d4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -543,17 +543,16 @@ static void spi_pump_messages(struct kthread_work *work) | |||
543 | /* Lock queue and check for queue work */ | 543 | /* Lock queue and check for queue work */ |
544 | spin_lock_irqsave(&master->queue_lock, flags); | 544 | spin_lock_irqsave(&master->queue_lock, flags); |
545 | if (list_empty(&master->queue) || !master->running) { | 545 | if (list_empty(&master->queue) || !master->running) { |
546 | if (master->busy && master->unprepare_transfer_hardware) { | 546 | if (!master->busy) { |
547 | ret = master->unprepare_transfer_hardware(master); | 547 | spin_unlock_irqrestore(&master->queue_lock, flags); |
548 | if (ret) { | 548 | return; |
549 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
550 | dev_err(&master->dev, | ||
551 | "failed to unprepare transfer hardware\n"); | ||
552 | return; | ||
553 | } | ||
554 | } | 549 | } |
555 | master->busy = false; | 550 | master->busy = false; |
556 | spin_unlock_irqrestore(&master->queue_lock, flags); | 551 | spin_unlock_irqrestore(&master->queue_lock, flags); |
552 | if (master->unprepare_transfer_hardware && | ||
553 | master->unprepare_transfer_hardware(master)) | ||
554 | dev_err(&master->dev, | ||
555 | "failed to unprepare transfer hardware\n"); | ||
557 | return; | 556 | return; |
558 | } | 557 | } |
559 | 558 | ||
@@ -984,7 +983,7 @@ static void acpi_register_spi_devices(struct spi_master *master) | |||
984 | acpi_status status; | 983 | acpi_status status; |
985 | acpi_handle handle; | 984 | acpi_handle handle; |
986 | 985 | ||
987 | handle = ACPI_HANDLE(&master->dev); | 986 | handle = ACPI_HANDLE(master->dev.parent); |
988 | if (!handle) | 987 | if (!handle) |
989 | return; | 988 | return; |
990 | 989 | ||
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index 4c0f6d883dd3..7b0bce936762 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c | |||
@@ -675,3 +675,32 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc) | |||
675 | return 0; | 675 | return 0; |
676 | } | 676 | } |
677 | } | 677 | } |
678 | |||
679 | void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid) | ||
680 | { | ||
681 | u32 pmu_ctl = 0; | ||
682 | |||
683 | switch (cc->dev->bus->chip_id) { | ||
684 | case 0x4322: | ||
685 | ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070); | ||
686 | ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a); | ||
687 | ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854); | ||
688 | if (spuravoid == 1) | ||
689 | ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828); | ||
690 | else | ||
691 | ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828); | ||
692 | pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD; | ||
693 | break; | ||
694 | case 43222: | ||
695 | /* TODO: BCM43222 requires updating PLLs too */ | ||
696 | return; | ||
697 | default: | ||
698 | ssb_printk(KERN_ERR PFX | ||
699 | "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n", | ||
700 | cc->dev->bus->chip_id); | ||
701 | return; | ||
702 | } | ||
703 | |||
704 | chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl); | ||
705 | } | ||
706 | EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate); | ||
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index ff1c5ee352cb..cbe48ab41745 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
@@ -409,6 +409,7 @@ static inline int core_alua_state_standby( | |||
409 | case REPORT_LUNS: | 409 | case REPORT_LUNS: |
410 | case RECEIVE_DIAGNOSTIC: | 410 | case RECEIVE_DIAGNOSTIC: |
411 | case SEND_DIAGNOSTIC: | 411 | case SEND_DIAGNOSTIC: |
412 | return 0; | ||
412 | case MAINTENANCE_IN: | 413 | case MAINTENANCE_IN: |
413 | switch (cdb[1] & 0x1f) { | 414 | switch (cdb[1] & 0x1f) { |
414 | case MI_REPORT_TARGET_PGS: | 415 | case MI_REPORT_TARGET_PGS: |
@@ -451,6 +452,7 @@ static inline int core_alua_state_unavailable( | |||
451 | switch (cdb[0]) { | 452 | switch (cdb[0]) { |
452 | case INQUIRY: | 453 | case INQUIRY: |
453 | case REPORT_LUNS: | 454 | case REPORT_LUNS: |
455 | return 0; | ||
454 | case MAINTENANCE_IN: | 456 | case MAINTENANCE_IN: |
455 | switch (cdb[1] & 0x1f) { | 457 | switch (cdb[1] & 0x1f) { |
456 | case MI_REPORT_TARGET_PGS: | 458 | case MI_REPORT_TARGET_PGS: |
@@ -491,6 +493,7 @@ static inline int core_alua_state_transition( | |||
491 | switch (cdb[0]) { | 493 | switch (cdb[0]) { |
492 | case INQUIRY: | 494 | case INQUIRY: |
493 | case REPORT_LUNS: | 495 | case REPORT_LUNS: |
496 | return 0; | ||
494 | case MAINTENANCE_IN: | 497 | case MAINTENANCE_IN: |
495 | switch (cdb[1] & 0x1f) { | 498 | switch (cdb[1] & 0x1f) { |
496 | case MI_REPORT_TARGET_PGS: | 499 | case MI_REPORT_TARGET_PGS: |
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 484b6a3c9b03..302909ccf183 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c | |||
@@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev, | |||
2643 | mxvar_sdriver, brd->idx + i, &pdev->dev); | 2643 | mxvar_sdriver, brd->idx + i, &pdev->dev); |
2644 | if (IS_ERR(tty_dev)) { | 2644 | if (IS_ERR(tty_dev)) { |
2645 | retval = PTR_ERR(tty_dev); | 2645 | retval = PTR_ERR(tty_dev); |
2646 | for (i--; i >= 0; i--) | 2646 | for (; i > 0; i--) |
2647 | tty_unregister_device(mxvar_sdriver, | 2647 | tty_unregister_device(mxvar_sdriver, |
2648 | brd->idx + i); | 2648 | brd->idx + i - 1); |
2649 | goto err_relbrd; | 2649 | goto err_relbrd; |
2650 | } | 2650 | } |
2651 | } | 2651 | } |
@@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void) | |||
2751 | tty_dev = tty_port_register_device(&brd->ports[i].port, | 2751 | tty_dev = tty_port_register_device(&brd->ports[i].port, |
2752 | mxvar_sdriver, brd->idx + i, NULL); | 2752 | mxvar_sdriver, brd->idx + i, NULL); |
2753 | if (IS_ERR(tty_dev)) { | 2753 | if (IS_ERR(tty_dev)) { |
2754 | for (i--; i >= 0; i--) | 2754 | for (; i > 0; i--) |
2755 | tty_unregister_device(mxvar_sdriver, | 2755 | tty_unregister_device(mxvar_sdriver, |
2756 | brd->idx + i); | 2756 | brd->idx + i - 1); |
2757 | for (i = 0; i < brd->info->nports; i++) | 2757 | for (i = 0; i < brd->info->nports; i++) |
2758 | tty_port_destroy(&brd->ports[i].port); | 2758 | tty_port_destroy(&brd->ports[i].port); |
2759 | free_irq(brd->irq, brd); | 2759 | free_irq(brd->irq, brd); |
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c index b3455a970a1d..35d9ab95c5cb 100644 --- a/drivers/tty/serial/8250/8250_pnp.c +++ b/drivers/tty/serial/8250/8250_pnp.c | |||
@@ -429,7 +429,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) | |||
429 | { | 429 | { |
430 | struct uart_8250_port uart; | 430 | struct uart_8250_port uart; |
431 | int ret, line, flags = dev_id->driver_data; | 431 | int ret, line, flags = dev_id->driver_data; |
432 | struct resource *res = NULL; | ||
433 | 432 | ||
434 | if (flags & UNKNOWN_DEV) { | 433 | if (flags & UNKNOWN_DEV) { |
435 | ret = serial_pnp_guess_board(dev); | 434 | ret = serial_pnp_guess_board(dev); |
@@ -440,12 +439,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) | |||
440 | memset(&uart, 0, sizeof(uart)); | 439 | memset(&uart, 0, sizeof(uart)); |
441 | if (pnp_irq_valid(dev, 0)) | 440 | if (pnp_irq_valid(dev, 0)) |
442 | uart.port.irq = pnp_irq(dev, 0); | 441 | uart.port.irq = pnp_irq(dev, 0); |
443 | if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) | 442 | if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) { |
444 | res = pnp_get_resource(dev, IORESOURCE_IO, 2); | 443 | uart.port.iobase = pnp_port_start(dev, 2); |
445 | else if (pnp_port_valid(dev, 0)) | 444 | uart.port.iotype = UPIO_PORT; |
446 | res = pnp_get_resource(dev, IORESOURCE_IO, 0); | 445 | } else if (pnp_port_valid(dev, 0)) { |
447 | if (pnp_resource_enabled(res)) { | 446 | uart.port.iobase = pnp_port_start(dev, 0); |
448 | uart.port.iobase = res->start; | ||
449 | uart.port.iotype = UPIO_PORT; | 447 | uart.port.iotype = UPIO_PORT; |
450 | } else if (pnp_mem_valid(dev, 0)) { | 448 | } else if (pnp_mem_valid(dev, 0)) { |
451 | uart.port.mapbase = pnp_mem_start(dev, 0); | 449 | uart.port.mapbase = pnp_mem_start(dev, 0); |
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 4dc41408ecb7..30d4f7a783cd 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c | |||
@@ -886,6 +886,17 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, | |||
886 | serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); | 886 | serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); |
887 | /* FIFO ENABLE, DMA MODE */ | 887 | /* FIFO ENABLE, DMA MODE */ |
888 | 888 | ||
889 | up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK; | ||
890 | /* | ||
891 | * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK | ||
892 | * sets Enables the granularity of 1 for TRIGGER RX | ||
893 | * level. Along with setting RX FIFO trigger level | ||
894 | * to 1 (as noted below, 16 characters) and TLR[3:0] | ||
895 | * to zero this will result RX FIFO threshold level | ||
896 | * to 1 character, instead of 16 as noted in comment | ||
897 | * below. | ||
898 | */ | ||
899 | |||
889 | /* Set receive FIFO threshold to 16 characters and | 900 | /* Set receive FIFO threshold to 16 characters and |
890 | * transmit FIFO threshold to 16 spaces | 901 | * transmit FIFO threshold to 16 spaces |
891 | */ | 902 | */ |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 05400acbc456..b0452688308c 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -941,6 +941,14 @@ void start_tty(struct tty_struct *tty) | |||
941 | 941 | ||
942 | EXPORT_SYMBOL(start_tty); | 942 | EXPORT_SYMBOL(start_tty); |
943 | 943 | ||
944 | static void tty_update_time(struct timespec *time) | ||
945 | { | ||
946 | unsigned long sec = get_seconds(); | ||
947 | sec -= sec % 60; | ||
948 | if ((long)(sec - time->tv_sec) > 0) | ||
949 | time->tv_sec = sec; | ||
950 | } | ||
951 | |||
944 | /** | 952 | /** |
945 | * tty_read - read method for tty device files | 953 | * tty_read - read method for tty device files |
946 | * @file: pointer to tty file | 954 | * @file: pointer to tty file |
@@ -960,10 +968,11 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, | |||
960 | loff_t *ppos) | 968 | loff_t *ppos) |
961 | { | 969 | { |
962 | int i; | 970 | int i; |
971 | struct inode *inode = file_inode(file); | ||
963 | struct tty_struct *tty = file_tty(file); | 972 | struct tty_struct *tty = file_tty(file); |
964 | struct tty_ldisc *ld; | 973 | struct tty_ldisc *ld; |
965 | 974 | ||
966 | if (tty_paranoia_check(tty, file_inode(file), "tty_read")) | 975 | if (tty_paranoia_check(tty, inode, "tty_read")) |
967 | return -EIO; | 976 | return -EIO; |
968 | if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) | 977 | if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) |
969 | return -EIO; | 978 | return -EIO; |
@@ -977,6 +986,9 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, | |||
977 | i = -EIO; | 986 | i = -EIO; |
978 | tty_ldisc_deref(ld); | 987 | tty_ldisc_deref(ld); |
979 | 988 | ||
989 | if (i > 0) | ||
990 | tty_update_time(&inode->i_atime); | ||
991 | |||
980 | return i; | 992 | return i; |
981 | } | 993 | } |
982 | 994 | ||
@@ -1077,8 +1089,10 @@ static inline ssize_t do_tty_write( | |||
1077 | break; | 1089 | break; |
1078 | cond_resched(); | 1090 | cond_resched(); |
1079 | } | 1091 | } |
1080 | if (written) | 1092 | if (written) { |
1093 | tty_update_time(&file_inode(file)->i_mtime); | ||
1081 | ret = written; | 1094 | ret = written; |
1095 | } | ||
1082 | out: | 1096 | out: |
1083 | tty_write_unlock(tty); | 1097 | tty_write_unlock(tty); |
1084 | return ret; | 1098 | return ret; |
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 797f9d514732..65d4e55552c6 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c | |||
@@ -67,7 +67,6 @@ static void usb_port_device_release(struct device *dev) | |||
67 | { | 67 | { |
68 | struct usb_port *port_dev = to_usb_port(dev); | 68 | struct usb_port *port_dev = to_usb_port(dev); |
69 | 69 | ||
70 | dev_pm_qos_hide_flags(dev); | ||
71 | kfree(port_dev); | 70 | kfree(port_dev); |
72 | } | 71 | } |
73 | 72 | ||
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 8189cb6a86af..7abc5c81af2c 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
@@ -346,6 +346,7 @@ static long vfio_pci_ioctl(void *device_data, | |||
346 | 346 | ||
347 | if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { | 347 | if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { |
348 | size_t size; | 348 | size_t size; |
349 | int max = vfio_pci_get_irq_count(vdev, hdr.index); | ||
349 | 350 | ||
350 | if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) | 351 | if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) |
351 | size = sizeof(uint8_t); | 352 | size = sizeof(uint8_t); |
@@ -355,7 +356,7 @@ static long vfio_pci_ioctl(void *device_data, | |||
355 | return -EINVAL; | 356 | return -EINVAL; |
356 | 357 | ||
357 | if (hdr.argsz - minsz < hdr.count * size || | 358 | if (hdr.argsz - minsz < hdr.count * size || |
358 | hdr.count > vfio_pci_get_irq_count(vdev, hdr.index)) | 359 | hdr.start >= max || hdr.start + hdr.count > max) |
359 | return -EINVAL; | 360 | return -EINVAL; |
360 | 361 | ||
361 | data = memdup_user((void __user *)(arg + minsz), | 362 | data = memdup_user((void __user *)(arg + minsz), |
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 2968b4934659..957a0b98a5d9 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c | |||
@@ -74,9 +74,8 @@ enum { | |||
74 | 74 | ||
75 | struct vhost_scsi { | 75 | struct vhost_scsi { |
76 | /* Protected by vhost_scsi->dev.mutex */ | 76 | /* Protected by vhost_scsi->dev.mutex */ |
77 | struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; | 77 | struct tcm_vhost_tpg **vs_tpg; |
78 | char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; | 78 | char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; |
79 | bool vs_endpoint; | ||
80 | 79 | ||
81 | struct vhost_dev dev; | 80 | struct vhost_dev dev; |
82 | struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; | 81 | struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; |
@@ -579,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work) | |||
579 | } | 578 | } |
580 | } | 579 | } |
581 | 580 | ||
581 | static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, | ||
582 | struct vhost_virtqueue *vq, int head, unsigned out) | ||
583 | { | ||
584 | struct virtio_scsi_cmd_resp __user *resp; | ||
585 | struct virtio_scsi_cmd_resp rsp; | ||
586 | int ret; | ||
587 | |||
588 | memset(&rsp, 0, sizeof(rsp)); | ||
589 | rsp.response = VIRTIO_SCSI_S_BAD_TARGET; | ||
590 | resp = vq->iov[out].iov_base; | ||
591 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | ||
592 | if (!ret) | ||
593 | vhost_add_used_and_signal(&vs->dev, vq, head, 0); | ||
594 | else | ||
595 | pr_err("Faulted on virtio_scsi_cmd_resp\n"); | ||
596 | } | ||
597 | |||
582 | static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | 598 | static void vhost_scsi_handle_vq(struct vhost_scsi *vs, |
583 | struct vhost_virtqueue *vq) | 599 | struct vhost_virtqueue *vq) |
584 | { | 600 | { |
601 | struct tcm_vhost_tpg **vs_tpg; | ||
585 | struct virtio_scsi_cmd_req v_req; | 602 | struct virtio_scsi_cmd_req v_req; |
586 | struct tcm_vhost_tpg *tv_tpg; | 603 | struct tcm_vhost_tpg *tv_tpg; |
587 | struct tcm_vhost_cmd *tv_cmd; | 604 | struct tcm_vhost_cmd *tv_cmd; |
@@ -590,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
590 | int head, ret; | 607 | int head, ret; |
591 | u8 target; | 608 | u8 target; |
592 | 609 | ||
593 | /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ | 610 | /* |
594 | if (unlikely(!vs->vs_endpoint)) | 611 | * We can handle the vq only after the endpoint is setup by calling the |
612 | * VHOST_SCSI_SET_ENDPOINT ioctl. | ||
613 | * | ||
614 | * TODO: Check that we are running from vhost_worker which acts | ||
615 | * as read-side critical section for vhost kind of RCU. | ||
616 | * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h | ||
617 | */ | ||
618 | vs_tpg = rcu_dereference_check(vq->private_data, 1); | ||
619 | if (!vs_tpg) | ||
595 | return; | 620 | return; |
596 | 621 | ||
597 | mutex_lock(&vq->mutex); | 622 | mutex_lock(&vq->mutex); |
@@ -661,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
661 | 686 | ||
662 | /* Extract the tpgt */ | 687 | /* Extract the tpgt */ |
663 | target = v_req.lun[1]; | 688 | target = v_req.lun[1]; |
664 | tv_tpg = vs->vs_tpg[target]; | 689 | tv_tpg = ACCESS_ONCE(vs_tpg[target]); |
665 | 690 | ||
666 | /* Target does not exist, fail the request */ | 691 | /* Target does not exist, fail the request */ |
667 | if (unlikely(!tv_tpg)) { | 692 | if (unlikely(!tv_tpg)) { |
668 | struct virtio_scsi_cmd_resp __user *resp; | 693 | vhost_scsi_send_bad_target(vs, vq, head, out); |
669 | struct virtio_scsi_cmd_resp rsp; | ||
670 | |||
671 | memset(&rsp, 0, sizeof(rsp)); | ||
672 | rsp.response = VIRTIO_SCSI_S_BAD_TARGET; | ||
673 | resp = vq->iov[out].iov_base; | ||
674 | ret = __copy_to_user(resp, &rsp, sizeof(rsp)); | ||
675 | if (!ret) | ||
676 | vhost_add_used_and_signal(&vs->dev, | ||
677 | vq, head, 0); | ||
678 | else | ||
679 | pr_err("Faulted on virtio_scsi_cmd_resp\n"); | ||
680 | |||
681 | continue; | 694 | continue; |
682 | } | 695 | } |
683 | 696 | ||
@@ -690,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
690 | if (IS_ERR(tv_cmd)) { | 703 | if (IS_ERR(tv_cmd)) { |
691 | vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", | 704 | vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", |
692 | PTR_ERR(tv_cmd)); | 705 | PTR_ERR(tv_cmd)); |
693 | break; | 706 | goto err_cmd; |
694 | } | 707 | } |
695 | pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" | 708 | pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" |
696 | ": %d\n", tv_cmd, exp_data_len, data_direction); | 709 | ": %d\n", tv_cmd, exp_data_len, data_direction); |
697 | 710 | ||
698 | tv_cmd->tvc_vhost = vs; | 711 | tv_cmd->tvc_vhost = vs; |
699 | tv_cmd->tvc_vq = vq; | 712 | tv_cmd->tvc_vq = vq; |
700 | |||
701 | if (unlikely(vq->iov[out].iov_len != | ||
702 | sizeof(struct virtio_scsi_cmd_resp))) { | ||
703 | vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" | ||
704 | " bytes, out: %d, in: %d\n", | ||
705 | vq->iov[out].iov_len, out, in); | ||
706 | break; | ||
707 | } | ||
708 | |||
709 | tv_cmd->tvc_resp = vq->iov[out].iov_base; | 713 | tv_cmd->tvc_resp = vq->iov[out].iov_base; |
710 | 714 | ||
711 | /* | 715 | /* |
@@ -725,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
725 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", | 729 | " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", |
726 | scsi_command_size(tv_cmd->tvc_cdb), | 730 | scsi_command_size(tv_cmd->tvc_cdb), |
727 | TCM_VHOST_MAX_CDB_SIZE); | 731 | TCM_VHOST_MAX_CDB_SIZE); |
728 | break; /* TODO */ | 732 | goto err_free; |
729 | } | 733 | } |
730 | tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; | 734 | tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; |
731 | 735 | ||
@@ -738,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
738 | data_direction == DMA_TO_DEVICE); | 742 | data_direction == DMA_TO_DEVICE); |
739 | if (unlikely(ret)) { | 743 | if (unlikely(ret)) { |
740 | vq_err(vq, "Failed to map iov to sgl\n"); | 744 | vq_err(vq, "Failed to map iov to sgl\n"); |
741 | break; /* TODO */ | 745 | goto err_free; |
742 | } | 746 | } |
743 | } | 747 | } |
744 | 748 | ||
@@ -759,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs, | |||
759 | } | 763 | } |
760 | 764 | ||
761 | mutex_unlock(&vq->mutex); | 765 | mutex_unlock(&vq->mutex); |
766 | return; | ||
767 | |||
768 | err_free: | ||
769 | vhost_scsi_free_cmd(tv_cmd); | ||
770 | err_cmd: | ||
771 | vhost_scsi_send_bad_target(vs, vq, head, out); | ||
772 | mutex_unlock(&vq->mutex); | ||
762 | } | 773 | } |
763 | 774 | ||
764 | static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) | 775 | static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) |
@@ -780,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work) | |||
780 | vhost_scsi_handle_vq(vs, vq); | 791 | vhost_scsi_handle_vq(vs, vq); |
781 | } | 792 | } |
782 | 793 | ||
794 | static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) | ||
795 | { | ||
796 | vhost_poll_flush(&vs->dev.vqs[index].poll); | ||
797 | } | ||
798 | |||
799 | static void vhost_scsi_flush(struct vhost_scsi *vs) | ||
800 | { | ||
801 | int i; | ||
802 | |||
803 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | ||
804 | vhost_scsi_flush_vq(vs, i); | ||
805 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); | ||
806 | } | ||
807 | |||
783 | /* | 808 | /* |
784 | * Called from vhost_scsi_ioctl() context to walk the list of available | 809 | * Called from vhost_scsi_ioctl() context to walk the list of available |
785 | * tcm_vhost_tpg with an active struct tcm_vhost_nexus | 810 | * tcm_vhost_tpg with an active struct tcm_vhost_nexus |
@@ -790,8 +815,10 @@ static int vhost_scsi_set_endpoint( | |||
790 | { | 815 | { |
791 | struct tcm_vhost_tport *tv_tport; | 816 | struct tcm_vhost_tport *tv_tport; |
792 | struct tcm_vhost_tpg *tv_tpg; | 817 | struct tcm_vhost_tpg *tv_tpg; |
818 | struct tcm_vhost_tpg **vs_tpg; | ||
819 | struct vhost_virtqueue *vq; | ||
820 | int index, ret, i, len; | ||
793 | bool match = false; | 821 | bool match = false; |
794 | int index, ret; | ||
795 | 822 | ||
796 | mutex_lock(&vs->dev.mutex); | 823 | mutex_lock(&vs->dev.mutex); |
797 | /* Verify that ring has been setup correctly. */ | 824 | /* Verify that ring has been setup correctly. */ |
@@ -803,6 +830,15 @@ static int vhost_scsi_set_endpoint( | |||
803 | } | 830 | } |
804 | } | 831 | } |
805 | 832 | ||
833 | len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; | ||
834 | vs_tpg = kzalloc(len, GFP_KERNEL); | ||
835 | if (!vs_tpg) { | ||
836 | mutex_unlock(&vs->dev.mutex); | ||
837 | return -ENOMEM; | ||
838 | } | ||
839 | if (vs->vs_tpg) | ||
840 | memcpy(vs_tpg, vs->vs_tpg, len); | ||
841 | |||
806 | mutex_lock(&tcm_vhost_mutex); | 842 | mutex_lock(&tcm_vhost_mutex); |
807 | list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { | 843 | list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { |
808 | mutex_lock(&tv_tpg->tv_tpg_mutex); | 844 | mutex_lock(&tv_tpg->tv_tpg_mutex); |
@@ -817,14 +853,15 @@ static int vhost_scsi_set_endpoint( | |||
817 | tv_tport = tv_tpg->tport; | 853 | tv_tport = tv_tpg->tport; |
818 | 854 | ||
819 | if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { | 855 | if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { |
820 | if (vs->vs_tpg[tv_tpg->tport_tpgt]) { | 856 | if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) { |
821 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 857 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
822 | mutex_unlock(&tcm_vhost_mutex); | 858 | mutex_unlock(&tcm_vhost_mutex); |
823 | mutex_unlock(&vs->dev.mutex); | 859 | mutex_unlock(&vs->dev.mutex); |
860 | kfree(vs_tpg); | ||
824 | return -EEXIST; | 861 | return -EEXIST; |
825 | } | 862 | } |
826 | tv_tpg->tv_tpg_vhost_count++; | 863 | tv_tpg->tv_tpg_vhost_count++; |
827 | vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; | 864 | vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; |
828 | smp_mb__after_atomic_inc(); | 865 | smp_mb__after_atomic_inc(); |
829 | match = true; | 866 | match = true; |
830 | } | 867 | } |
@@ -835,12 +872,27 @@ static int vhost_scsi_set_endpoint( | |||
835 | if (match) { | 872 | if (match) { |
836 | memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, | 873 | memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn, |
837 | sizeof(vs->vs_vhost_wwpn)); | 874 | sizeof(vs->vs_vhost_wwpn)); |
838 | vs->vs_endpoint = true; | 875 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { |
876 | vq = &vs->vqs[i]; | ||
877 | /* Flushing the vhost_work acts as synchronize_rcu */ | ||
878 | mutex_lock(&vq->mutex); | ||
879 | rcu_assign_pointer(vq->private_data, vs_tpg); | ||
880 | vhost_init_used(vq); | ||
881 | mutex_unlock(&vq->mutex); | ||
882 | } | ||
839 | ret = 0; | 883 | ret = 0; |
840 | } else { | 884 | } else { |
841 | ret = -EEXIST; | 885 | ret = -EEXIST; |
842 | } | 886 | } |
843 | 887 | ||
888 | /* | ||
889 | * Act as synchronize_rcu to make sure access to | ||
890 | * old vs->vs_tpg is finished. | ||
891 | */ | ||
892 | vhost_scsi_flush(vs); | ||
893 | kfree(vs->vs_tpg); | ||
894 | vs->vs_tpg = vs_tpg; | ||
895 | |||
844 | mutex_unlock(&vs->dev.mutex); | 896 | mutex_unlock(&vs->dev.mutex); |
845 | return ret; | 897 | return ret; |
846 | } | 898 | } |
@@ -851,6 +903,8 @@ static int vhost_scsi_clear_endpoint( | |||
851 | { | 903 | { |
852 | struct tcm_vhost_tport *tv_tport; | 904 | struct tcm_vhost_tport *tv_tport; |
853 | struct tcm_vhost_tpg *tv_tpg; | 905 | struct tcm_vhost_tpg *tv_tpg; |
906 | struct vhost_virtqueue *vq; | ||
907 | bool match = false; | ||
854 | int index, ret, i; | 908 | int index, ret, i; |
855 | u8 target; | 909 | u8 target; |
856 | 910 | ||
@@ -862,9 +916,14 @@ static int vhost_scsi_clear_endpoint( | |||
862 | goto err_dev; | 916 | goto err_dev; |
863 | } | 917 | } |
864 | } | 918 | } |
919 | |||
920 | if (!vs->vs_tpg) { | ||
921 | mutex_unlock(&vs->dev.mutex); | ||
922 | return 0; | ||
923 | } | ||
924 | |||
865 | for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { | 925 | for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { |
866 | target = i; | 926 | target = i; |
867 | |||
868 | tv_tpg = vs->vs_tpg[target]; | 927 | tv_tpg = vs->vs_tpg[target]; |
869 | if (!tv_tpg) | 928 | if (!tv_tpg) |
870 | continue; | 929 | continue; |
@@ -886,10 +945,27 @@ static int vhost_scsi_clear_endpoint( | |||
886 | } | 945 | } |
887 | tv_tpg->tv_tpg_vhost_count--; | 946 | tv_tpg->tv_tpg_vhost_count--; |
888 | vs->vs_tpg[target] = NULL; | 947 | vs->vs_tpg[target] = NULL; |
889 | vs->vs_endpoint = false; | 948 | match = true; |
890 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | 949 | mutex_unlock(&tv_tpg->tv_tpg_mutex); |
891 | } | 950 | } |
951 | if (match) { | ||
952 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { | ||
953 | vq = &vs->vqs[i]; | ||
954 | /* Flushing the vhost_work acts as synchronize_rcu */ | ||
955 | mutex_lock(&vq->mutex); | ||
956 | rcu_assign_pointer(vq->private_data, NULL); | ||
957 | mutex_unlock(&vq->mutex); | ||
958 | } | ||
959 | } | ||
960 | /* | ||
961 | * Act as synchronize_rcu to make sure access to | ||
962 | * old vs->vs_tpg is finished. | ||
963 | */ | ||
964 | vhost_scsi_flush(vs); | ||
965 | kfree(vs->vs_tpg); | ||
966 | vs->vs_tpg = NULL; | ||
892 | mutex_unlock(&vs->dev.mutex); | 967 | mutex_unlock(&vs->dev.mutex); |
968 | |||
893 | return 0; | 969 | return 0; |
894 | 970 | ||
895 | err_tpg: | 971 | err_tpg: |
@@ -899,6 +975,24 @@ err_dev: | |||
899 | return ret; | 975 | return ret; |
900 | } | 976 | } |
901 | 977 | ||
978 | static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) | ||
979 | { | ||
980 | if (features & ~VHOST_SCSI_FEATURES) | ||
981 | return -EOPNOTSUPP; | ||
982 | |||
983 | mutex_lock(&vs->dev.mutex); | ||
984 | if ((features & (1 << VHOST_F_LOG_ALL)) && | ||
985 | !vhost_log_access_ok(&vs->dev)) { | ||
986 | mutex_unlock(&vs->dev.mutex); | ||
987 | return -EFAULT; | ||
988 | } | ||
989 | vs->dev.acked_features = features; | ||
990 | smp_wmb(); | ||
991 | vhost_scsi_flush(vs); | ||
992 | mutex_unlock(&vs->dev.mutex); | ||
993 | return 0; | ||
994 | } | ||
995 | |||
902 | static int vhost_scsi_open(struct inode *inode, struct file *f) | 996 | static int vhost_scsi_open(struct inode *inode, struct file *f) |
903 | { | 997 | { |
904 | struct vhost_scsi *s; | 998 | struct vhost_scsi *s; |
@@ -939,38 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f) | |||
939 | return 0; | 1033 | return 0; |
940 | } | 1034 | } |
941 | 1035 | ||
942 | static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) | ||
943 | { | ||
944 | vhost_poll_flush(&vs->dev.vqs[index].poll); | ||
945 | } | ||
946 | |||
947 | static void vhost_scsi_flush(struct vhost_scsi *vs) | ||
948 | { | ||
949 | int i; | ||
950 | |||
951 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | ||
952 | vhost_scsi_flush_vq(vs, i); | ||
953 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); | ||
954 | } | ||
955 | |||
956 | static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) | ||
957 | { | ||
958 | if (features & ~VHOST_SCSI_FEATURES) | ||
959 | return -EOPNOTSUPP; | ||
960 | |||
961 | mutex_lock(&vs->dev.mutex); | ||
962 | if ((features & (1 << VHOST_F_LOG_ALL)) && | ||
963 | !vhost_log_access_ok(&vs->dev)) { | ||
964 | mutex_unlock(&vs->dev.mutex); | ||
965 | return -EFAULT; | ||
966 | } | ||
967 | vs->dev.acked_features = features; | ||
968 | smp_wmb(); | ||
969 | vhost_scsi_flush(vs); | ||
970 | mutex_unlock(&vs->dev.mutex); | ||
971 | return 0; | ||
972 | } | ||
973 | |||
974 | static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, | 1036 | static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, |
975 | unsigned long arg) | 1037 | unsigned long arg) |
976 | { | 1038 | { |
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 7c254084b6a0..86291dcd964a 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c | |||
@@ -1373,15 +1373,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) | |||
1373 | { | 1373 | { |
1374 | struct fb_info *info = file_fb_info(file); | 1374 | struct fb_info *info = file_fb_info(file); |
1375 | struct fb_ops *fb; | 1375 | struct fb_ops *fb; |
1376 | unsigned long off; | 1376 | unsigned long mmio_pgoff; |
1377 | unsigned long start; | 1377 | unsigned long start; |
1378 | u32 len; | 1378 | u32 len; |
1379 | 1379 | ||
1380 | if (!info) | 1380 | if (!info) |
1381 | return -ENODEV; | 1381 | return -ENODEV; |
1382 | if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) | ||
1383 | return -EINVAL; | ||
1384 | off = vma->vm_pgoff << PAGE_SHIFT; | ||
1385 | fb = info->fbops; | 1382 | fb = info->fbops; |
1386 | if (!fb) | 1383 | if (!fb) |
1387 | return -ENODEV; | 1384 | return -ENODEV; |
@@ -1393,32 +1390,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) | |||
1393 | return res; | 1390 | return res; |
1394 | } | 1391 | } |
1395 | 1392 | ||
1396 | /* frame buffer memory */ | 1393 | /* |
1394 | * Ugh. This can be either the frame buffer mapping, or | ||
1395 | * if pgoff points past it, the mmio mapping. | ||
1396 | */ | ||
1397 | start = info->fix.smem_start; | 1397 | start = info->fix.smem_start; |
1398 | len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); | 1398 | len = info->fix.smem_len; |
1399 | if (off >= len) { | 1399 | mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; |
1400 | /* memory mapped io */ | 1400 | if (vma->vm_pgoff >= mmio_pgoff) { |
1401 | off -= len; | 1401 | vma->vm_pgoff -= mmio_pgoff; |
1402 | if (info->var.accel_flags) { | ||
1403 | mutex_unlock(&info->mm_lock); | ||
1404 | return -EINVAL; | ||
1405 | } | ||
1406 | start = info->fix.mmio_start; | 1402 | start = info->fix.mmio_start; |
1407 | len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); | 1403 | len = info->fix.mmio_len; |
1408 | } | 1404 | } |
1409 | mutex_unlock(&info->mm_lock); | 1405 | mutex_unlock(&info->mm_lock); |
1410 | start &= PAGE_MASK; | 1406 | |
1411 | if ((vma->vm_end - vma->vm_start + off) > len) | ||
1412 | return -EINVAL; | ||
1413 | off += start; | ||
1414 | vma->vm_pgoff = off >> PAGE_SHIFT; | ||
1415 | /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by io_remap_pfn_range()*/ | ||
1416 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | 1407 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
1417 | fb_pgprotect(file, vma, off); | 1408 | fb_pgprotect(file, vma, start); |
1418 | if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, | 1409 | |
1419 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) | 1410 | return vm_iomap_memory(vma, start, len); |
1420 | return -EAGAIN; | ||
1421 | return 0; | ||
1422 | } | 1411 | } |
1423 | 1412 | ||
1424 | static int | 1413 | static int |
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c index 94ad0f71383c..7f6709991a5c 100644 --- a/drivers/video/fbmon.c +++ b/drivers/video/fbmon.c | |||
@@ -1400,7 +1400,7 @@ int fb_videomode_from_videomode(const struct videomode *vm, | |||
1400 | fbmode->vmode = 0; | 1400 | fbmode->vmode = 0; |
1401 | if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) | 1401 | if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) |
1402 | fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; | 1402 | fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; |
1403 | if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) | 1403 | if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH) |
1404 | fbmode->sync |= FB_SYNC_VERT_HIGH_ACT; | 1404 | fbmode->sync |= FB_SYNC_VERT_HIGH_ACT; |
1405 | if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) | 1405 | if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) |
1406 | fbmode->vmode |= FB_VMODE_INTERLACED; | 1406 | fbmode->vmode |= FB_VMODE_INTERLACED; |
diff --git a/drivers/video/mmp/core.c b/drivers/video/mmp/core.c index 9ed83419038b..84de2632857a 100644 --- a/drivers/video/mmp/core.c +++ b/drivers/video/mmp/core.c | |||
@@ -252,7 +252,5 @@ void mmp_unregister_path(struct mmp_path *path) | |||
252 | 252 | ||
253 | kfree(path); | 253 | kfree(path); |
254 | mutex_unlock(&disp_lock); | 254 | mutex_unlock(&disp_lock); |
255 | |||
256 | dev_info(path->dev, "de-register %s\n", path->name); | ||
257 | } | 255 | } |
258 | EXPORT_SYMBOL_GPL(mmp_unregister_path); | 256 | EXPORT_SYMBOL_GPL(mmp_unregister_path); |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 63203acef812..0264704a52be 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -858,6 +858,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) | |||
858 | tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16) | 858 | tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16) |
859 | | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7); | 859 | | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7); |
860 | lcdc_write_chan(ch, LDHAJR, tmp); | 860 | lcdc_write_chan(ch, LDHAJR, tmp); |
861 | lcdc_write_chan_mirror(ch, LDHAJR, tmp); | ||
861 | } | 862 | } |
862 | 863 | ||
863 | static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) | 864 | static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) |
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index b75db0186488..d4284458377e 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c | |||
@@ -1973,7 +1973,8 @@ static int uvesafb_init(void) | |||
1973 | err = -ENOMEM; | 1973 | err = -ENOMEM; |
1974 | 1974 | ||
1975 | if (err) { | 1975 | if (err) { |
1976 | platform_device_put(uvesafb_device); | 1976 | if (uvesafb_device) |
1977 | platform_device_put(uvesafb_device); | ||
1977 | platform_driver_unregister(&uvesafb_driver); | 1978 | platform_driver_unregister(&uvesafb_driver); |
1978 | cn_del_callback(&uvesafb_cn_id); | 1979 | cn_del_callback(&uvesafb_cn_id); |
1979 | return err; | 1980 | return err; |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 9fcc70c11cea..e89fc3133972 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -117,7 +117,7 @@ config ARM_SP805_WATCHDOG | |||
117 | 117 | ||
118 | config AT91RM9200_WATCHDOG | 118 | config AT91RM9200_WATCHDOG |
119 | tristate "AT91RM9200 watchdog" | 119 | tristate "AT91RM9200 watchdog" |
120 | depends on ARCH_AT91 | 120 | depends on ARCH_AT91RM9200 |
121 | help | 121 | help |
122 | Watchdog timer embedded into AT91RM9200 chips. This will reboot your | 122 | Watchdog timer embedded into AT91RM9200 chips. This will reboot your |
123 | system when the timeout is reached. | 123 | system when the timeout is reached. |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index aa85881d17b2..2647ad8e1f19 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -1316,7 +1316,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1316 | { | 1316 | { |
1317 | int start_word_idx, start_bit_idx; | 1317 | int start_word_idx, start_bit_idx; |
1318 | int word_idx, bit_idx; | 1318 | int word_idx, bit_idx; |
1319 | int i; | 1319 | int i, irq; |
1320 | int cpu = get_cpu(); | 1320 | int cpu = get_cpu(); |
1321 | struct shared_info *s = HYPERVISOR_shared_info; | 1321 | struct shared_info *s = HYPERVISOR_shared_info; |
1322 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); | 1322 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
@@ -1324,6 +1324,8 @@ static void __xen_evtchn_do_upcall(void) | |||
1324 | 1324 | ||
1325 | do { | 1325 | do { |
1326 | xen_ulong_t pending_words; | 1326 | xen_ulong_t pending_words; |
1327 | xen_ulong_t pending_bits; | ||
1328 | struct irq_desc *desc; | ||
1327 | 1329 | ||
1328 | vcpu_info->evtchn_upcall_pending = 0; | 1330 | vcpu_info->evtchn_upcall_pending = 0; |
1329 | 1331 | ||
@@ -1335,6 +1337,17 @@ static void __xen_evtchn_do_upcall(void) | |||
1335 | * selector flag. xchg_xen_ulong must contain an | 1337 | * selector flag. xchg_xen_ulong must contain an |
1336 | * appropriate barrier. | 1338 | * appropriate barrier. |
1337 | */ | 1339 | */ |
1340 | if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) { | ||
1341 | int evtchn = evtchn_from_irq(irq); | ||
1342 | word_idx = evtchn / BITS_PER_LONG; | ||
1343 | pending_bits = evtchn % BITS_PER_LONG; | ||
1344 | if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) { | ||
1345 | desc = irq_to_desc(irq); | ||
1346 | if (desc) | ||
1347 | generic_handle_irq_desc(irq, desc); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1338 | pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); | 1351 | pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); |
1339 | 1352 | ||
1340 | start_word_idx = __this_cpu_read(current_word_idx); | 1353 | start_word_idx = __this_cpu_read(current_word_idx); |
@@ -1343,7 +1356,6 @@ static void __xen_evtchn_do_upcall(void) | |||
1343 | word_idx = start_word_idx; | 1356 | word_idx = start_word_idx; |
1344 | 1357 | ||
1345 | for (i = 0; pending_words != 0; i++) { | 1358 | for (i = 0; pending_words != 0; i++) { |
1346 | xen_ulong_t pending_bits; | ||
1347 | xen_ulong_t words; | 1359 | xen_ulong_t words; |
1348 | 1360 | ||
1349 | words = MASK_LSBS(pending_words, word_idx); | 1361 | words = MASK_LSBS(pending_words, word_idx); |
@@ -1372,8 +1384,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1372 | 1384 | ||
1373 | do { | 1385 | do { |
1374 | xen_ulong_t bits; | 1386 | xen_ulong_t bits; |
1375 | int port, irq; | 1387 | int port; |
1376 | struct irq_desc *desc; | ||
1377 | 1388 | ||
1378 | bits = MASK_LSBS(pending_bits, bit_idx); | 1389 | bits = MASK_LSBS(pending_bits, bit_idx); |
1379 | 1390 | ||
diff --git a/firmware/Makefile b/firmware/Makefile index 5d8ee1319b5c..cbb09ce9730a 100644 --- a/firmware/Makefile +++ b/firmware/Makefile | |||
@@ -82,7 +82,7 @@ fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \ | |||
82 | fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \ | 82 | fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \ |
83 | qlogic/12160.bin | 83 | qlogic/12160.bin |
84 | fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin | 84 | fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin |
85 | fw-shipped-$(CONFIG_INFINIBAND_QIB) += intel/sd7220.fw | 85 | fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw |
86 | fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp | 86 | fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp |
87 | fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \ | 87 | fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \ |
88 | ess/maestro3_assp_minisrc.fw | 88 | ess/maestro3_assp_minisrc.fw |
diff --git a/firmware/intel/sd7220.fw.ihex b/firmware/qlogic/sd7220.fw.ihex index a33636319112..a33636319112 100644 --- a/firmware/intel/sd7220.fw.ihex +++ b/firmware/qlogic/sd7220.fw.ihex | |||
@@ -1029,9 +1029,9 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) | |||
1029 | spin_unlock(&info->ring_lock); | 1029 | spin_unlock(&info->ring_lock); |
1030 | 1030 | ||
1031 | out: | 1031 | out: |
1032 | kunmap_atomic(ring); | ||
1033 | dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, | 1032 | dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, |
1034 | (unsigned long)ring->head, (unsigned long)ring->tail); | 1033 | (unsigned long)ring->head, (unsigned long)ring->tail); |
1034 | kunmap_atomic(ring); | ||
1035 | return ret; | 1035 | return ret; |
1036 | } | 1036 | } |
1037 | 1037 | ||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 3939829f6c5c..86af964c2425 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1137,6 +1137,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, | |||
1137 | goto whole; | 1137 | goto whole; |
1138 | if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) | 1138 | if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) |
1139 | goto whole; | 1139 | goto whole; |
1140 | return 0; | ||
1140 | } | 1141 | } |
1141 | 1142 | ||
1142 | /* Do not dump I/O mapped devices or special mappings */ | 1143 | /* Do not dump I/O mapped devices or special mappings */ |
@@ -1428,8 +1428,6 @@ void bio_endio(struct bio *bio, int error) | |||
1428 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | 1428 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) |
1429 | error = -EIO; | 1429 | error = -EIO; |
1430 | 1430 | ||
1431 | trace_block_bio_complete(bio, error); | ||
1432 | |||
1433 | if (bio->bi_end_io) | 1431 | if (bio->bi_end_io) |
1434 | bio->bi_end_io(bio, error); | 1432 | bio->bi_end_io(bio, error); |
1435 | } | 1433 | } |
diff --git a/fs/block_dev.c b/fs/block_dev.c index aea605c98ba6..aae187a7f94a 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -551,6 +551,7 @@ struct block_device *bdgrab(struct block_device *bdev) | |||
551 | ihold(bdev->bd_inode); | 551 | ihold(bdev->bd_inode); |
552 | return bdev; | 552 | return bdev; |
553 | } | 553 | } |
554 | EXPORT_SYMBOL(bdgrab); | ||
554 | 555 | ||
555 | long nr_blockdev_pages(void) | 556 | long nr_blockdev_pages(void) |
556 | { | 557 | { |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 451fad96ecd1..ef96381569a4 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -317,6 +317,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, | |||
317 | unsigned long src_ptr; | 317 | unsigned long src_ptr; |
318 | unsigned long dst_ptr; | 318 | unsigned long dst_ptr; |
319 | int overwrite_root = 0; | 319 | int overwrite_root = 0; |
320 | bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; | ||
320 | 321 | ||
321 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) | 322 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) |
322 | overwrite_root = 1; | 323 | overwrite_root = 1; |
@@ -326,6 +327,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, | |||
326 | 327 | ||
327 | /* look for the key in the destination tree */ | 328 | /* look for the key in the destination tree */ |
328 | ret = btrfs_search_slot(NULL, root, key, path, 0, 0); | 329 | ret = btrfs_search_slot(NULL, root, key, path, 0, 0); |
330 | if (ret < 0) | ||
331 | return ret; | ||
332 | |||
329 | if (ret == 0) { | 333 | if (ret == 0) { |
330 | char *src_copy; | 334 | char *src_copy; |
331 | char *dst_copy; | 335 | char *dst_copy; |
@@ -367,6 +371,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, | |||
367 | return 0; | 371 | return 0; |
368 | } | 372 | } |
369 | 373 | ||
374 | /* | ||
375 | * We need to load the old nbytes into the inode so when we | ||
376 | * replay the extents we've logged we get the right nbytes. | ||
377 | */ | ||
378 | if (inode_item) { | ||
379 | struct btrfs_inode_item *item; | ||
380 | u64 nbytes; | ||
381 | |||
382 | item = btrfs_item_ptr(path->nodes[0], path->slots[0], | ||
383 | struct btrfs_inode_item); | ||
384 | nbytes = btrfs_inode_nbytes(path->nodes[0], item); | ||
385 | item = btrfs_item_ptr(eb, slot, | ||
386 | struct btrfs_inode_item); | ||
387 | btrfs_set_inode_nbytes(eb, item, nbytes); | ||
388 | } | ||
389 | } else if (inode_item) { | ||
390 | struct btrfs_inode_item *item; | ||
391 | |||
392 | /* | ||
393 | * New inode, set nbytes to 0 so that the nbytes comes out | ||
394 | * properly when we replay the extents. | ||
395 | */ | ||
396 | item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); | ||
397 | btrfs_set_inode_nbytes(eb, item, 0); | ||
370 | } | 398 | } |
371 | insert: | 399 | insert: |
372 | btrfs_release_path(path); | 400 | btrfs_release_path(path); |
@@ -486,7 +514,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
486 | int found_type; | 514 | int found_type; |
487 | u64 extent_end; | 515 | u64 extent_end; |
488 | u64 start = key->offset; | 516 | u64 start = key->offset; |
489 | u64 saved_nbytes; | 517 | u64 nbytes = 0; |
490 | struct btrfs_file_extent_item *item; | 518 | struct btrfs_file_extent_item *item; |
491 | struct inode *inode = NULL; | 519 | struct inode *inode = NULL; |
492 | unsigned long size; | 520 | unsigned long size; |
@@ -496,10 +524,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
496 | found_type = btrfs_file_extent_type(eb, item); | 524 | found_type = btrfs_file_extent_type(eb, item); |
497 | 525 | ||
498 | if (found_type == BTRFS_FILE_EXTENT_REG || | 526 | if (found_type == BTRFS_FILE_EXTENT_REG || |
499 | found_type == BTRFS_FILE_EXTENT_PREALLOC) | 527 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { |
500 | extent_end = start + btrfs_file_extent_num_bytes(eb, item); | 528 | nbytes = btrfs_file_extent_num_bytes(eb, item); |
501 | else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | 529 | extent_end = start + nbytes; |
530 | |||
531 | /* | ||
532 | * We don't add to the inodes nbytes if we are prealloc or a | ||
533 | * hole. | ||
534 | */ | ||
535 | if (btrfs_file_extent_disk_bytenr(eb, item) == 0) | ||
536 | nbytes = 0; | ||
537 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | ||
502 | size = btrfs_file_extent_inline_len(eb, item); | 538 | size = btrfs_file_extent_inline_len(eb, item); |
539 | nbytes = btrfs_file_extent_ram_bytes(eb, item); | ||
503 | extent_end = ALIGN(start + size, root->sectorsize); | 540 | extent_end = ALIGN(start + size, root->sectorsize); |
504 | } else { | 541 | } else { |
505 | ret = 0; | 542 | ret = 0; |
@@ -548,7 +585,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
548 | } | 585 | } |
549 | btrfs_release_path(path); | 586 | btrfs_release_path(path); |
550 | 587 | ||
551 | saved_nbytes = inode_get_bytes(inode); | ||
552 | /* drop any overlapping extents */ | 588 | /* drop any overlapping extents */ |
553 | ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); | 589 | ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); |
554 | BUG_ON(ret); | 590 | BUG_ON(ret); |
@@ -635,7 +671,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
635 | BUG_ON(ret); | 671 | BUG_ON(ret); |
636 | } | 672 | } |
637 | 673 | ||
638 | inode_set_bytes(inode, saved_nbytes); | 674 | inode_add_bytes(inode, nbytes); |
639 | ret = btrfs_update_inode(trans, root, inode); | 675 | ret = btrfs_update_inode(trans, root, inode); |
640 | out: | 676 | out: |
641 | if (inode) | 677 | if (inode) |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 991c63c6bdd0..21b3a291c327 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1575,14 +1575,24 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1575 | } | 1575 | } |
1576 | break; | 1576 | break; |
1577 | case Opt_blank_pass: | 1577 | case Opt_blank_pass: |
1578 | vol->password = NULL; | ||
1579 | break; | ||
1580 | case Opt_pass: | ||
1581 | /* passwords have to be handled differently | 1578 | /* passwords have to be handled differently |
1582 | * to allow the character used for deliminator | 1579 | * to allow the character used for deliminator |
1583 | * to be passed within them | 1580 | * to be passed within them |
1584 | */ | 1581 | */ |
1585 | 1582 | ||
1583 | /* | ||
1584 | * Check if this is a case where the password | ||
1585 | * starts with a delimiter | ||
1586 | */ | ||
1587 | tmp_end = strchr(data, '='); | ||
1588 | tmp_end++; | ||
1589 | if (!(tmp_end < end && tmp_end[1] == delim)) { | ||
1590 | /* No it is not. Set the password to NULL */ | ||
1591 | vol->password = NULL; | ||
1592 | break; | ||
1593 | } | ||
1594 | /* Yes it is. Drop down to Opt_pass below.*/ | ||
1595 | case Opt_pass: | ||
1586 | /* Obtain the value string */ | 1596 | /* Obtain the value string */ |
1587 | value = strchr(data, '='); | 1597 | value = strchr(data, '='); |
1588 | value++; | 1598 | value++; |
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index 412e6eda25f8..e4141f257495 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c | |||
@@ -80,13 +80,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) | |||
80 | int rc; | 80 | int rc; |
81 | 81 | ||
82 | mutex_lock(&ecryptfs_daemon_hash_mux); | 82 | mutex_lock(&ecryptfs_daemon_hash_mux); |
83 | rc = try_module_get(THIS_MODULE); | ||
84 | if (rc == 0) { | ||
85 | rc = -EIO; | ||
86 | printk(KERN_ERR "%s: Error attempting to increment module use " | ||
87 | "count; rc = [%d]\n", __func__, rc); | ||
88 | goto out_unlock_daemon_list; | ||
89 | } | ||
90 | rc = ecryptfs_find_daemon_by_euid(&daemon); | 83 | rc = ecryptfs_find_daemon_by_euid(&daemon); |
91 | if (!rc) { | 84 | if (!rc) { |
92 | rc = -EINVAL; | 85 | rc = -EINVAL; |
@@ -96,7 +89,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) | |||
96 | if (rc) { | 89 | if (rc) { |
97 | printk(KERN_ERR "%s: Error attempting to spawn daemon; " | 90 | printk(KERN_ERR "%s: Error attempting to spawn daemon; " |
98 | "rc = [%d]\n", __func__, rc); | 91 | "rc = [%d]\n", __func__, rc); |
99 | goto out_module_put_unlock_daemon_list; | 92 | goto out_unlock_daemon_list; |
100 | } | 93 | } |
101 | mutex_lock(&daemon->mux); | 94 | mutex_lock(&daemon->mux); |
102 | if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { | 95 | if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { |
@@ -108,9 +101,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) | |||
108 | atomic_inc(&ecryptfs_num_miscdev_opens); | 101 | atomic_inc(&ecryptfs_num_miscdev_opens); |
109 | out_unlock_daemon: | 102 | out_unlock_daemon: |
110 | mutex_unlock(&daemon->mux); | 103 | mutex_unlock(&daemon->mux); |
111 | out_module_put_unlock_daemon_list: | ||
112 | if (rc) | ||
113 | module_put(THIS_MODULE); | ||
114 | out_unlock_daemon_list: | 104 | out_unlock_daemon_list: |
115 | mutex_unlock(&ecryptfs_daemon_hash_mux); | 105 | mutex_unlock(&ecryptfs_daemon_hash_mux); |
116 | return rc; | 106 | return rc; |
@@ -147,7 +137,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file) | |||
147 | "bug.\n", __func__, rc); | 137 | "bug.\n", __func__, rc); |
148 | BUG(); | 138 | BUG(); |
149 | } | 139 | } |
150 | module_put(THIS_MODULE); | ||
151 | return rc; | 140 | return rc; |
152 | } | 141 | } |
153 | 142 | ||
@@ -471,6 +460,7 @@ out_free: | |||
471 | 460 | ||
472 | 461 | ||
473 | static const struct file_operations ecryptfs_miscdev_fops = { | 462 | static const struct file_operations ecryptfs_miscdev_fops = { |
463 | .owner = THIS_MODULE, | ||
474 | .open = ecryptfs_miscdev_open, | 464 | .open = ecryptfs_miscdev_open, |
475 | .poll = ecryptfs_miscdev_poll, | 465 | .poll = ecryptfs_miscdev_poll, |
476 | .read = ecryptfs_miscdev_read, | 466 | .read = ecryptfs_miscdev_read, |
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 34c48f1fcdbc..525a2a1ac16c 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
15 | #include <linux/ucs2_string.h> | ||
15 | 16 | ||
16 | #include "internal.h" | 17 | #include "internal.h" |
17 | 18 | ||
@@ -135,7 +136,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, | |||
135 | memcpy(entry->var.VariableName, name16, name_size); | 136 | memcpy(entry->var.VariableName, name16, name_size); |
136 | memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); | 137 | memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); |
137 | 138 | ||
138 | len = utf16_strlen(entry->var.VariableName); | 139 | len = ucs2_strlen(entry->var.VariableName); |
139 | 140 | ||
140 | /* name, plus '-', plus GUID, plus NUL*/ | 141 | /* name, plus '-', plus GUID, plus NUL*/ |
141 | name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); | 142 | name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 56efcaadf848..9c6d06dcef8b 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -2999,20 +2999,23 @@ static int ext4_split_extent_at(handle_t *handle, | |||
2999 | if (split_flag & EXT4_EXT_DATA_VALID1) { | 2999 | if (split_flag & EXT4_EXT_DATA_VALID1) { |
3000 | err = ext4_ext_zeroout(inode, ex2); | 3000 | err = ext4_ext_zeroout(inode, ex2); |
3001 | zero_ex.ee_block = ex2->ee_block; | 3001 | zero_ex.ee_block = ex2->ee_block; |
3002 | zero_ex.ee_len = ext4_ext_get_actual_len(ex2); | 3002 | zero_ex.ee_len = cpu_to_le16( |
3003 | ext4_ext_get_actual_len(ex2)); | ||
3003 | ext4_ext_store_pblock(&zero_ex, | 3004 | ext4_ext_store_pblock(&zero_ex, |
3004 | ext4_ext_pblock(ex2)); | 3005 | ext4_ext_pblock(ex2)); |
3005 | } else { | 3006 | } else { |
3006 | err = ext4_ext_zeroout(inode, ex); | 3007 | err = ext4_ext_zeroout(inode, ex); |
3007 | zero_ex.ee_block = ex->ee_block; | 3008 | zero_ex.ee_block = ex->ee_block; |
3008 | zero_ex.ee_len = ext4_ext_get_actual_len(ex); | 3009 | zero_ex.ee_len = cpu_to_le16( |
3010 | ext4_ext_get_actual_len(ex)); | ||
3009 | ext4_ext_store_pblock(&zero_ex, | 3011 | ext4_ext_store_pblock(&zero_ex, |
3010 | ext4_ext_pblock(ex)); | 3012 | ext4_ext_pblock(ex)); |
3011 | } | 3013 | } |
3012 | } else { | 3014 | } else { |
3013 | err = ext4_ext_zeroout(inode, &orig_ex); | 3015 | err = ext4_ext_zeroout(inode, &orig_ex); |
3014 | zero_ex.ee_block = orig_ex.ee_block; | 3016 | zero_ex.ee_block = orig_ex.ee_block; |
3015 | zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex); | 3017 | zero_ex.ee_len = cpu_to_le16( |
3018 | ext4_ext_get_actual_len(&orig_ex)); | ||
3016 | ext4_ext_store_pblock(&zero_ex, | 3019 | ext4_ext_store_pblock(&zero_ex, |
3017 | ext4_ext_pblock(&orig_ex)); | 3020 | ext4_ext_pblock(&orig_ex)); |
3018 | } | 3021 | } |
@@ -3272,7 +3275,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
3272 | if (err) | 3275 | if (err) |
3273 | goto out; | 3276 | goto out; |
3274 | zero_ex.ee_block = ex->ee_block; | 3277 | zero_ex.ee_block = ex->ee_block; |
3275 | zero_ex.ee_len = ext4_ext_get_actual_len(ex); | 3278 | zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); |
3276 | ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); | 3279 | ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); |
3277 | 3280 | ||
3278 | err = ext4_ext_get_access(handle, inode, path + depth); | 3281 | err = ext4_ext_get_access(handle, inode, path + depth); |
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index b505a145a593..a04183127ef0 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c | |||
@@ -1539,9 +1539,9 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode, | |||
1539 | blk = *i_data; | 1539 | blk = *i_data; |
1540 | if (level > 0) { | 1540 | if (level > 0) { |
1541 | ext4_lblk_t first2; | 1541 | ext4_lblk_t first2; |
1542 | bh = sb_bread(inode->i_sb, blk); | 1542 | bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); |
1543 | if (!bh) { | 1543 | if (!bh) { |
1544 | EXT4_ERROR_INODE_BLOCK(inode, blk, | 1544 | EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), |
1545 | "Read failure"); | 1545 | "Read failure"); |
1546 | return -EIO; | 1546 | return -EIO; |
1547 | } | 1547 | } |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 019f45e45097..d79c2dadc536 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
@@ -923,8 +923,11 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) | |||
923 | cmd = F_SETLK; | 923 | cmd = F_SETLK; |
924 | fl->fl_type = F_UNLCK; | 924 | fl->fl_type = F_UNLCK; |
925 | } | 925 | } |
926 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 926 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { |
927 | if (fl->fl_type == F_UNLCK) | ||
928 | posix_lock_file_wait(file, fl); | ||
927 | return -EIO; | 929 | return -EIO; |
930 | } | ||
928 | if (IS_GETLK(cmd)) | 931 | if (IS_GETLK(cmd)) |
929 | return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); | 932 | return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); |
930 | else if (fl->fl_type == F_UNLCK) | 933 | else if (fl->fl_type == F_UNLCK) |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 156e42ec84ea..5c29216e9cc1 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -588,6 +588,7 @@ struct lm_lockstruct { | |||
588 | struct dlm_lksb ls_control_lksb; /* control_lock */ | 588 | struct dlm_lksb ls_control_lksb; /* control_lock */ |
589 | char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */ | 589 | char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */ |
590 | struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ | 590 | struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ |
591 | char *ls_lvb_bits; | ||
591 | 592 | ||
592 | spinlock_t ls_recover_spin; /* protects following fields */ | 593 | spinlock_t ls_recover_spin; /* protects following fields */ |
593 | unsigned long ls_recover_flags; /* DFL_ */ | 594 | unsigned long ls_recover_flags; /* DFL_ */ |
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 9802de0f85e6..c8423d6de6c3 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c | |||
@@ -483,12 +483,8 @@ static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, | |||
483 | 483 | ||
484 | static int all_jid_bits_clear(char *lvb) | 484 | static int all_jid_bits_clear(char *lvb) |
485 | { | 485 | { |
486 | int i; | 486 | return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0, |
487 | for (i = JID_BITMAP_OFFSET; i < GDLM_LVB_SIZE; i++) { | 487 | GDLM_LVB_SIZE - JID_BITMAP_OFFSET); |
488 | if (lvb[i]) | ||
489 | return 0; | ||
490 | } | ||
491 | return 1; | ||
492 | } | 488 | } |
493 | 489 | ||
494 | static void sync_wait_cb(void *arg) | 490 | static void sync_wait_cb(void *arg) |
@@ -580,7 +576,6 @@ static void gfs2_control_func(struct work_struct *work) | |||
580 | { | 576 | { |
581 | struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); | 577 | struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); |
582 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 578 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
583 | char lvb_bits[GDLM_LVB_SIZE]; | ||
584 | uint32_t block_gen, start_gen, lvb_gen, flags; | 579 | uint32_t block_gen, start_gen, lvb_gen, flags; |
585 | int recover_set = 0; | 580 | int recover_set = 0; |
586 | int write_lvb = 0; | 581 | int write_lvb = 0; |
@@ -634,7 +629,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
634 | return; | 629 | return; |
635 | } | 630 | } |
636 | 631 | ||
637 | control_lvb_read(ls, &lvb_gen, lvb_bits); | 632 | control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); |
638 | 633 | ||
639 | spin_lock(&ls->ls_recover_spin); | 634 | spin_lock(&ls->ls_recover_spin); |
640 | if (block_gen != ls->ls_recover_block || | 635 | if (block_gen != ls->ls_recover_block || |
@@ -664,10 +659,10 @@ static void gfs2_control_func(struct work_struct *work) | |||
664 | 659 | ||
665 | ls->ls_recover_result[i] = 0; | 660 | ls->ls_recover_result[i] = 0; |
666 | 661 | ||
667 | if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) | 662 | if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) |
668 | continue; | 663 | continue; |
669 | 664 | ||
670 | __clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); | 665 | __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); |
671 | write_lvb = 1; | 666 | write_lvb = 1; |
672 | } | 667 | } |
673 | } | 668 | } |
@@ -691,7 +686,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
691 | continue; | 686 | continue; |
692 | if (ls->ls_recover_submit[i] < start_gen) { | 687 | if (ls->ls_recover_submit[i] < start_gen) { |
693 | ls->ls_recover_submit[i] = 0; | 688 | ls->ls_recover_submit[i] = 0; |
694 | __set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); | 689 | __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); |
695 | } | 690 | } |
696 | } | 691 | } |
697 | /* even if there are no bits to set, we need to write the | 692 | /* even if there are no bits to set, we need to write the |
@@ -705,7 +700,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
705 | spin_unlock(&ls->ls_recover_spin); | 700 | spin_unlock(&ls->ls_recover_spin); |
706 | 701 | ||
707 | if (write_lvb) { | 702 | if (write_lvb) { |
708 | control_lvb_write(ls, start_gen, lvb_bits); | 703 | control_lvb_write(ls, start_gen, ls->ls_lvb_bits); |
709 | flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; | 704 | flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; |
710 | } else { | 705 | } else { |
711 | flags = DLM_LKF_CONVERT; | 706 | flags = DLM_LKF_CONVERT; |
@@ -725,7 +720,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
725 | */ | 720 | */ |
726 | 721 | ||
727 | for (i = 0; i < recover_size; i++) { | 722 | for (i = 0; i < recover_size; i++) { |
728 | if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) { | 723 | if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { |
729 | fs_info(sdp, "recover generation %u jid %d\n", | 724 | fs_info(sdp, "recover generation %u jid %d\n", |
730 | start_gen, i); | 725 | start_gen, i); |
731 | gfs2_recover_set(sdp, i); | 726 | gfs2_recover_set(sdp, i); |
@@ -758,7 +753,6 @@ static void gfs2_control_func(struct work_struct *work) | |||
758 | static int control_mount(struct gfs2_sbd *sdp) | 753 | static int control_mount(struct gfs2_sbd *sdp) |
759 | { | 754 | { |
760 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 755 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
761 | char lvb_bits[GDLM_LVB_SIZE]; | ||
762 | uint32_t start_gen, block_gen, mount_gen, lvb_gen; | 756 | uint32_t start_gen, block_gen, mount_gen, lvb_gen; |
763 | int mounted_mode; | 757 | int mounted_mode; |
764 | int retries = 0; | 758 | int retries = 0; |
@@ -857,7 +851,7 @@ locks_done: | |||
857 | * lvb_gen will be non-zero. | 851 | * lvb_gen will be non-zero. |
858 | */ | 852 | */ |
859 | 853 | ||
860 | control_lvb_read(ls, &lvb_gen, lvb_bits); | 854 | control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); |
861 | 855 | ||
862 | if (lvb_gen == 0xFFFFFFFF) { | 856 | if (lvb_gen == 0xFFFFFFFF) { |
863 | /* special value to force mount attempts to fail */ | 857 | /* special value to force mount attempts to fail */ |
@@ -887,7 +881,7 @@ locks_done: | |||
887 | * and all lvb bits to be clear (no pending journal recoveries.) | 881 | * and all lvb bits to be clear (no pending journal recoveries.) |
888 | */ | 882 | */ |
889 | 883 | ||
890 | if (!all_jid_bits_clear(lvb_bits)) { | 884 | if (!all_jid_bits_clear(ls->ls_lvb_bits)) { |
891 | /* journals need recovery, wait until all are clear */ | 885 | /* journals need recovery, wait until all are clear */ |
892 | fs_info(sdp, "control_mount wait for journal recovery\n"); | 886 | fs_info(sdp, "control_mount wait for journal recovery\n"); |
893 | goto restart; | 887 | goto restart; |
@@ -949,7 +943,6 @@ static int dlm_recovery_wait(void *word) | |||
949 | static int control_first_done(struct gfs2_sbd *sdp) | 943 | static int control_first_done(struct gfs2_sbd *sdp) |
950 | { | 944 | { |
951 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 945 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
952 | char lvb_bits[GDLM_LVB_SIZE]; | ||
953 | uint32_t start_gen, block_gen; | 946 | uint32_t start_gen, block_gen; |
954 | int error; | 947 | int error; |
955 | 948 | ||
@@ -991,8 +984,8 @@ restart: | |||
991 | memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); | 984 | memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); |
992 | spin_unlock(&ls->ls_recover_spin); | 985 | spin_unlock(&ls->ls_recover_spin); |
993 | 986 | ||
994 | memset(lvb_bits, 0, sizeof(lvb_bits)); | 987 | memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); |
995 | control_lvb_write(ls, start_gen, lvb_bits); | 988 | control_lvb_write(ls, start_gen, ls->ls_lvb_bits); |
996 | 989 | ||
997 | error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); | 990 | error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); |
998 | if (error) | 991 | if (error) |
@@ -1022,6 +1015,12 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, | |||
1022 | uint32_t old_size, new_size; | 1015 | uint32_t old_size, new_size; |
1023 | int i, max_jid; | 1016 | int i, max_jid; |
1024 | 1017 | ||
1018 | if (!ls->ls_lvb_bits) { | ||
1019 | ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); | ||
1020 | if (!ls->ls_lvb_bits) | ||
1021 | return -ENOMEM; | ||
1022 | } | ||
1023 | |||
1025 | max_jid = 0; | 1024 | max_jid = 0; |
1026 | for (i = 0; i < num_slots; i++) { | 1025 | for (i = 0; i < num_slots; i++) { |
1027 | if (max_jid < slots[i].slot - 1) | 1026 | if (max_jid < slots[i].slot - 1) |
@@ -1057,6 +1056,7 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, | |||
1057 | 1056 | ||
1058 | static void free_recover_size(struct lm_lockstruct *ls) | 1057 | static void free_recover_size(struct lm_lockstruct *ls) |
1059 | { | 1058 | { |
1059 | kfree(ls->ls_lvb_bits); | ||
1060 | kfree(ls->ls_recover_submit); | 1060 | kfree(ls->ls_recover_submit); |
1061 | kfree(ls->ls_recover_result); | 1061 | kfree(ls->ls_recover_result); |
1062 | ls->ls_recover_submit = NULL; | 1062 | ls->ls_recover_submit = NULL; |
@@ -1205,6 +1205,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) | |||
1205 | ls->ls_recover_size = 0; | 1205 | ls->ls_recover_size = 0; |
1206 | ls->ls_recover_submit = NULL; | 1206 | ls->ls_recover_submit = NULL; |
1207 | ls->ls_recover_result = NULL; | 1207 | ls->ls_recover_result = NULL; |
1208 | ls->ls_lvb_bits = NULL; | ||
1208 | 1209 | ||
1209 | error = set_recover_size(sdp, NULL, 0); | 1210 | error = set_recover_size(sdp, NULL, 0); |
1210 | if (error) | 1211 | if (error) |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index d1f51fd73f86..5a51265a4341 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -576,7 +576,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip) | |||
576 | RB_CLEAR_NODE(&ip->i_res->rs_node); | 576 | RB_CLEAR_NODE(&ip->i_res->rs_node); |
577 | out: | 577 | out: |
578 | up_write(&ip->i_rw_mutex); | 578 | up_write(&ip->i_rw_mutex); |
579 | return 0; | 579 | return error; |
580 | } | 580 | } |
581 | 581 | ||
582 | static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) | 582 | static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) |
@@ -1181,12 +1181,9 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, | |||
1181 | const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) | 1181 | const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) |
1182 | { | 1182 | { |
1183 | struct super_block *sb = sdp->sd_vfs; | 1183 | struct super_block *sb = sdp->sd_vfs; |
1184 | struct block_device *bdev = sb->s_bdev; | ||
1185 | const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / | ||
1186 | bdev_logical_block_size(sb->s_bdev); | ||
1187 | u64 blk; | 1184 | u64 blk; |
1188 | sector_t start = 0; | 1185 | sector_t start = 0; |
1189 | sector_t nr_sects = 0; | 1186 | sector_t nr_blks = 0; |
1190 | int rv; | 1187 | int rv; |
1191 | unsigned int x; | 1188 | unsigned int x; |
1192 | u32 trimmed = 0; | 1189 | u32 trimmed = 0; |
@@ -1206,35 +1203,34 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, | |||
1206 | if (diff == 0) | 1203 | if (diff == 0) |
1207 | continue; | 1204 | continue; |
1208 | blk = offset + ((bi->bi_start + x) * GFS2_NBBY); | 1205 | blk = offset + ((bi->bi_start + x) * GFS2_NBBY); |
1209 | blk *= sects_per_blk; /* convert to sectors */ | ||
1210 | while(diff) { | 1206 | while(diff) { |
1211 | if (diff & 1) { | 1207 | if (diff & 1) { |
1212 | if (nr_sects == 0) | 1208 | if (nr_blks == 0) |
1213 | goto start_new_extent; | 1209 | goto start_new_extent; |
1214 | if ((start + nr_sects) != blk) { | 1210 | if ((start + nr_blks) != blk) { |
1215 | if (nr_sects >= minlen) { | 1211 | if (nr_blks >= minlen) { |
1216 | rv = blkdev_issue_discard(bdev, | 1212 | rv = sb_issue_discard(sb, |
1217 | start, nr_sects, | 1213 | start, nr_blks, |
1218 | GFP_NOFS, 0); | 1214 | GFP_NOFS, 0); |
1219 | if (rv) | 1215 | if (rv) |
1220 | goto fail; | 1216 | goto fail; |
1221 | trimmed += nr_sects; | 1217 | trimmed += nr_blks; |
1222 | } | 1218 | } |
1223 | nr_sects = 0; | 1219 | nr_blks = 0; |
1224 | start_new_extent: | 1220 | start_new_extent: |
1225 | start = blk; | 1221 | start = blk; |
1226 | } | 1222 | } |
1227 | nr_sects += sects_per_blk; | 1223 | nr_blks++; |
1228 | } | 1224 | } |
1229 | diff >>= 2; | 1225 | diff >>= 2; |
1230 | blk += sects_per_blk; | 1226 | blk++; |
1231 | } | 1227 | } |
1232 | } | 1228 | } |
1233 | if (nr_sects >= minlen) { | 1229 | if (nr_blks >= minlen) { |
1234 | rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0); | 1230 | rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0); |
1235 | if (rv) | 1231 | if (rv) |
1236 | goto fail; | 1232 | goto fail; |
1237 | trimmed += nr_sects; | 1233 | trimmed += nr_blks; |
1238 | } | 1234 | } |
1239 | if (ptrimmed) | 1235 | if (ptrimmed) |
1240 | *ptrimmed = trimmed; | 1236 | *ptrimmed = trimmed; |
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index a94f0f779d5e..fe0a76213d9e 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c | |||
@@ -533,7 +533,7 @@ void hfsplus_file_truncate(struct inode *inode) | |||
533 | struct address_space *mapping = inode->i_mapping; | 533 | struct address_space *mapping = inode->i_mapping; |
534 | struct page *page; | 534 | struct page *page; |
535 | void *fsdata; | 535 | void *fsdata; |
536 | u32 size = inode->i_size; | 536 | loff_t size = inode->i_size; |
537 | 537 | ||
538 | res = pagecache_write_begin(NULL, mapping, size, 0, | 538 | res = pagecache_write_begin(NULL, mapping, size, 0, |
539 | AOP_FLAG_UNINTERRUPTIBLE, | 539 | AOP_FLAG_UNINTERRUPTIBLE, |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 84e3d856e91d..523464e62849 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
110 | * way when do_mmap_pgoff unwinds (may be important on powerpc | 110 | * way when do_mmap_pgoff unwinds (may be important on powerpc |
111 | * and ia64). | 111 | * and ia64). |
112 | */ | 112 | */ |
113 | vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP; | 113 | vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; |
114 | vma->vm_ops = &hugetlb_vm_ops; | 114 | vma->vm_ops = &hugetlb_vm_ops; |
115 | 115 | ||
116 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) | 116 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
diff --git a/fs/inode.c b/fs/inode.c index f5f7c06c36fb..a898b3d43ccf 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -725,7 +725,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan) | |||
725 | * inode to the back of the list so we don't spin on it. | 725 | * inode to the back of the list so we don't spin on it. |
726 | */ | 726 | */ |
727 | if (!spin_trylock(&inode->i_lock)) { | 727 | if (!spin_trylock(&inode->i_lock)) { |
728 | list_move_tail(&inode->i_lru, &sb->s_inode_lru); | 728 | list_move(&inode->i_lru, &sb->s_inode_lru); |
729 | continue; | 729 | continue; |
730 | } | 730 | } |
731 | 731 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index d581e45c0a9f..341d3f564082 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1690,7 +1690,7 @@ static int do_loopback(struct path *path, const char *old_name, | |||
1690 | 1690 | ||
1691 | if (IS_ERR(mnt)) { | 1691 | if (IS_ERR(mnt)) { |
1692 | err = PTR_ERR(mnt); | 1692 | err = PTR_ERR(mnt); |
1693 | goto out; | 1693 | goto out2; |
1694 | } | 1694 | } |
1695 | 1695 | ||
1696 | err = graft_tree(mnt, path); | 1696 | err = graft_tree(mnt, path); |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index ac4fc9a8fdbc..66b6664dcd4c 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
300 | struct rpc_cred *cred) | 300 | struct rpc_cred *cred) |
301 | { | 301 | { |
302 | struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); | 302 | struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); |
303 | struct nfs_client *pos, *n, *prev = NULL; | 303 | struct nfs_client *pos, *prev = NULL; |
304 | struct nfs4_setclientid_res clid = { | 304 | struct nfs4_setclientid_res clid = { |
305 | .clientid = new->cl_clientid, | 305 | .clientid = new->cl_clientid, |
306 | .confirm = new->cl_confirm, | 306 | .confirm = new->cl_confirm, |
@@ -308,10 +308,23 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
308 | int status = -NFS4ERR_STALE_CLIENTID; | 308 | int status = -NFS4ERR_STALE_CLIENTID; |
309 | 309 | ||
310 | spin_lock(&nn->nfs_client_lock); | 310 | spin_lock(&nn->nfs_client_lock); |
311 | list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { | 311 | list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { |
312 | /* If "pos" isn't marked ready, we can't trust the | 312 | /* If "pos" isn't marked ready, we can't trust the |
313 | * remaining fields in "pos" */ | 313 | * remaining fields in "pos" */ |
314 | if (pos->cl_cons_state < NFS_CS_READY) | 314 | if (pos->cl_cons_state > NFS_CS_READY) { |
315 | atomic_inc(&pos->cl_count); | ||
316 | spin_unlock(&nn->nfs_client_lock); | ||
317 | |||
318 | if (prev) | ||
319 | nfs_put_client(prev); | ||
320 | prev = pos; | ||
321 | |||
322 | status = nfs_wait_client_init_complete(pos); | ||
323 | spin_lock(&nn->nfs_client_lock); | ||
324 | if (status < 0) | ||
325 | continue; | ||
326 | } | ||
327 | if (pos->cl_cons_state != NFS_CS_READY) | ||
315 | continue; | 328 | continue; |
316 | 329 | ||
317 | if (pos->rpc_ops != new->rpc_ops) | 330 | if (pos->rpc_ops != new->rpc_ops) |
@@ -423,16 +436,16 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
423 | struct rpc_cred *cred) | 436 | struct rpc_cred *cred) |
424 | { | 437 | { |
425 | struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); | 438 | struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); |
426 | struct nfs_client *pos, *n, *prev = NULL; | 439 | struct nfs_client *pos, *prev = NULL; |
427 | int status = -NFS4ERR_STALE_CLIENTID; | 440 | int status = -NFS4ERR_STALE_CLIENTID; |
428 | 441 | ||
429 | spin_lock(&nn->nfs_client_lock); | 442 | spin_lock(&nn->nfs_client_lock); |
430 | list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { | 443 | list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { |
431 | /* If "pos" isn't marked ready, we can't trust the | 444 | /* If "pos" isn't marked ready, we can't trust the |
432 | * remaining fields in "pos", especially the client | 445 | * remaining fields in "pos", especially the client |
433 | * ID and serverowner fields. Wait for CREATE_SESSION | 446 | * ID and serverowner fields. Wait for CREATE_SESSION |
434 | * to finish. */ | 447 | * to finish. */ |
435 | if (pos->cl_cons_state < NFS_CS_READY) { | 448 | if (pos->cl_cons_state > NFS_CS_READY) { |
436 | atomic_inc(&pos->cl_count); | 449 | atomic_inc(&pos->cl_count); |
437 | spin_unlock(&nn->nfs_client_lock); | 450 | spin_unlock(&nn->nfs_client_lock); |
438 | 451 | ||
@@ -440,18 +453,17 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
440 | nfs_put_client(prev); | 453 | nfs_put_client(prev); |
441 | prev = pos; | 454 | prev = pos; |
442 | 455 | ||
443 | nfs4_schedule_lease_recovery(pos); | ||
444 | status = nfs_wait_client_init_complete(pos); | 456 | status = nfs_wait_client_init_complete(pos); |
445 | if (status < 0) { | 457 | if (status == 0) { |
446 | nfs_put_client(pos); | 458 | nfs4_schedule_lease_recovery(pos); |
447 | spin_lock(&nn->nfs_client_lock); | 459 | status = nfs4_wait_clnt_recover(pos); |
448 | continue; | ||
449 | } | 460 | } |
450 | status = pos->cl_cons_state; | ||
451 | spin_lock(&nn->nfs_client_lock); | 461 | spin_lock(&nn->nfs_client_lock); |
452 | if (status < 0) | 462 | if (status < 0) |
453 | continue; | 463 | continue; |
454 | } | 464 | } |
465 | if (pos->cl_cons_state != NFS_CS_READY) | ||
466 | continue; | ||
455 | 467 | ||
456 | if (pos->rpc_ops != new->rpc_ops) | 468 | if (pos->rpc_ops != new->rpc_ops) |
457 | continue; | 469 | continue; |
@@ -469,17 +481,18 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
469 | continue; | 481 | continue; |
470 | 482 | ||
471 | atomic_inc(&pos->cl_count); | 483 | atomic_inc(&pos->cl_count); |
472 | spin_unlock(&nn->nfs_client_lock); | 484 | *result = pos; |
485 | status = 0; | ||
473 | dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", | 486 | dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", |
474 | __func__, pos, atomic_read(&pos->cl_count)); | 487 | __func__, pos, atomic_read(&pos->cl_count)); |
475 | 488 | break; | |
476 | *result = pos; | ||
477 | return 0; | ||
478 | } | 489 | } |
479 | 490 | ||
480 | /* No matching nfs_client found. */ | 491 | /* No matching nfs_client found. */ |
481 | spin_unlock(&nn->nfs_client_lock); | 492 | spin_unlock(&nn->nfs_client_lock); |
482 | dprintk("NFS: <-- %s status = %d\n", __func__, status); | 493 | dprintk("NFS: <-- %s status = %d\n", __func__, status); |
494 | if (prev) | ||
495 | nfs_put_client(prev); | ||
483 | return status; | 496 | return status; |
484 | } | 497 | } |
485 | #endif /* CONFIG_NFS_V4_1 */ | 498 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 26431cf62ddb..0ad025eb523b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1046,6 +1046,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) | |||
1046 | /* Save the delegation */ | 1046 | /* Save the delegation */ |
1047 | nfs4_stateid_copy(&stateid, &delegation->stateid); | 1047 | nfs4_stateid_copy(&stateid, &delegation->stateid); |
1048 | rcu_read_unlock(); | 1048 | rcu_read_unlock(); |
1049 | nfs_release_seqid(opendata->o_arg.seqid); | ||
1049 | ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); | 1050 | ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); |
1050 | if (ret != 0) | 1051 | if (ret != 0) |
1051 | goto out; | 1052 | goto out; |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 6ace365c6334..d41a3518509f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1886,7 +1886,13 @@ again: | |||
1886 | status = PTR_ERR(clnt); | 1886 | status = PTR_ERR(clnt); |
1887 | break; | 1887 | break; |
1888 | } | 1888 | } |
1889 | clp->cl_rpcclient = clnt; | 1889 | /* Note: this is safe because we haven't yet marked the |
1890 | * client as ready, so we are the only user of | ||
1891 | * clp->cl_rpcclient | ||
1892 | */ | ||
1893 | clnt = xchg(&clp->cl_rpcclient, clnt); | ||
1894 | rpc_shutdown_client(clnt); | ||
1895 | clnt = clp->cl_rpcclient; | ||
1890 | goto again; | 1896 | goto again; |
1891 | 1897 | ||
1892 | case -NFS4ERR_MINOR_VERS_MISMATCH: | 1898 | case -NFS4ERR_MINOR_VERS_MISMATCH: |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 01168865dd37..a2720071f282 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -264,7 +264,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
264 | iattr->ia_valid |= ATTR_SIZE; | 264 | iattr->ia_valid |= ATTR_SIZE; |
265 | } | 265 | } |
266 | if (bmval[0] & FATTR4_WORD0_ACL) { | 266 | if (bmval[0] & FATTR4_WORD0_ACL) { |
267 | int nace; | 267 | u32 nace; |
268 | struct nfs4_ace *ace; | 268 | struct nfs4_ace *ace; |
269 | 269 | ||
270 | READ_BUF(4); len += 4; | 270 | READ_BUF(4); len += 4; |
diff --git a/fs/proc/array.c b/fs/proc/array.c index f7ed9ee46eb9..cbd0f1b324b9 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -143,6 +143,7 @@ static const char * const task_state_array[] = { | |||
143 | "x (dead)", /* 64 */ | 143 | "x (dead)", /* 64 */ |
144 | "K (wakekill)", /* 128 */ | 144 | "K (wakekill)", /* 128 */ |
145 | "W (waking)", /* 256 */ | 145 | "W (waking)", /* 256 */ |
146 | "P (parked)", /* 512 */ | ||
146 | }; | 147 | }; |
147 | 148 | ||
148 | static inline const char *get_task_state(struct task_struct *tsk) | 149 | static inline const char *get_task_state(struct task_struct *tsk) |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 4b3b3ffb52f1..21e1a8f1659d 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
@@ -755,37 +755,8 @@ void pde_put(struct proc_dir_entry *pde) | |||
755 | free_proc_entry(pde); | 755 | free_proc_entry(pde); |
756 | } | 756 | } |
757 | 757 | ||
758 | /* | 758 | static void entry_rundown(struct proc_dir_entry *de) |
759 | * Remove a /proc entry and free it if it's not currently in use. | ||
760 | */ | ||
761 | void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | ||
762 | { | 759 | { |
763 | struct proc_dir_entry **p; | ||
764 | struct proc_dir_entry *de = NULL; | ||
765 | const char *fn = name; | ||
766 | unsigned int len; | ||
767 | |||
768 | spin_lock(&proc_subdir_lock); | ||
769 | if (__xlate_proc_name(name, &parent, &fn) != 0) { | ||
770 | spin_unlock(&proc_subdir_lock); | ||
771 | return; | ||
772 | } | ||
773 | len = strlen(fn); | ||
774 | |||
775 | for (p = &parent->subdir; *p; p=&(*p)->next ) { | ||
776 | if (proc_match(len, fn, *p)) { | ||
777 | de = *p; | ||
778 | *p = de->next; | ||
779 | de->next = NULL; | ||
780 | break; | ||
781 | } | ||
782 | } | ||
783 | spin_unlock(&proc_subdir_lock); | ||
784 | if (!de) { | ||
785 | WARN(1, "name '%s'\n", name); | ||
786 | return; | ||
787 | } | ||
788 | |||
789 | spin_lock(&de->pde_unload_lock); | 760 | spin_lock(&de->pde_unload_lock); |
790 | /* | 761 | /* |
791 | * Stop accepting new callers into module. If you're | 762 | * Stop accepting new callers into module. If you're |
@@ -817,6 +788,40 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | |||
817 | spin_lock(&de->pde_unload_lock); | 788 | spin_lock(&de->pde_unload_lock); |
818 | } | 789 | } |
819 | spin_unlock(&de->pde_unload_lock); | 790 | spin_unlock(&de->pde_unload_lock); |
791 | } | ||
792 | |||
793 | /* | ||
794 | * Remove a /proc entry and free it if it's not currently in use. | ||
795 | */ | ||
796 | void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | ||
797 | { | ||
798 | struct proc_dir_entry **p; | ||
799 | struct proc_dir_entry *de = NULL; | ||
800 | const char *fn = name; | ||
801 | unsigned int len; | ||
802 | |||
803 | spin_lock(&proc_subdir_lock); | ||
804 | if (__xlate_proc_name(name, &parent, &fn) != 0) { | ||
805 | spin_unlock(&proc_subdir_lock); | ||
806 | return; | ||
807 | } | ||
808 | len = strlen(fn); | ||
809 | |||
810 | for (p = &parent->subdir; *p; p=&(*p)->next ) { | ||
811 | if (proc_match(len, fn, *p)) { | ||
812 | de = *p; | ||
813 | *p = de->next; | ||
814 | de->next = NULL; | ||
815 | break; | ||
816 | } | ||
817 | } | ||
818 | spin_unlock(&proc_subdir_lock); | ||
819 | if (!de) { | ||
820 | WARN(1, "name '%s'\n", name); | ||
821 | return; | ||
822 | } | ||
823 | |||
824 | entry_rundown(de); | ||
820 | 825 | ||
821 | if (S_ISDIR(de->mode)) | 826 | if (S_ISDIR(de->mode)) |
822 | parent->nlink--; | 827 | parent->nlink--; |
@@ -827,3 +832,57 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | |||
827 | pde_put(de); | 832 | pde_put(de); |
828 | } | 833 | } |
829 | EXPORT_SYMBOL(remove_proc_entry); | 834 | EXPORT_SYMBOL(remove_proc_entry); |
835 | |||
836 | int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) | ||
837 | { | ||
838 | struct proc_dir_entry **p; | ||
839 | struct proc_dir_entry *root = NULL, *de, *next; | ||
840 | const char *fn = name; | ||
841 | unsigned int len; | ||
842 | |||
843 | spin_lock(&proc_subdir_lock); | ||
844 | if (__xlate_proc_name(name, &parent, &fn) != 0) { | ||
845 | spin_unlock(&proc_subdir_lock); | ||
846 | return -ENOENT; | ||
847 | } | ||
848 | len = strlen(fn); | ||
849 | |||
850 | for (p = &parent->subdir; *p; p=&(*p)->next ) { | ||
851 | if (proc_match(len, fn, *p)) { | ||
852 | root = *p; | ||
853 | *p = root->next; | ||
854 | root->next = NULL; | ||
855 | break; | ||
856 | } | ||
857 | } | ||
858 | if (!root) { | ||
859 | spin_unlock(&proc_subdir_lock); | ||
860 | return -ENOENT; | ||
861 | } | ||
862 | de = root; | ||
863 | while (1) { | ||
864 | next = de->subdir; | ||
865 | if (next) { | ||
866 | de->subdir = next->next; | ||
867 | next->next = NULL; | ||
868 | de = next; | ||
869 | continue; | ||
870 | } | ||
871 | spin_unlock(&proc_subdir_lock); | ||
872 | |||
873 | entry_rundown(de); | ||
874 | next = de->parent; | ||
875 | if (S_ISDIR(de->mode)) | ||
876 | next->nlink--; | ||
877 | de->nlink = 0; | ||
878 | if (de == root) | ||
879 | break; | ||
880 | pde_put(de); | ||
881 | |||
882 | spin_lock(&proc_subdir_lock); | ||
883 | de = next; | ||
884 | } | ||
885 | pde_put(root); | ||
886 | return 0; | ||
887 | } | ||
888 | EXPORT_SYMBOL(remove_proc_subtree); | ||
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index c196369fe408..4cce1d9552fb 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c | |||
@@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset, | |||
187 | if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) | 187 | if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) |
188 | return -ENOSPC; | 188 | return -ENOSPC; |
189 | 189 | ||
190 | if (name[0] == '.' && (name[1] == '\0' || | 190 | if (name[0] == '.' && (namelen < 2 || |
191 | (name[1] == '.' && name[2] == '\0'))) | 191 | (namelen == 2 && name[1] == '.'))) |
192 | return 0; | 192 | return 0; |
193 | 193 | ||
194 | dentry = lookup_one_len(name, dbuf->xadir, namelen); | 194 | dentry = lookup_one_len(name, dbuf->xadir, namelen); |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ac838b844936..f21acf0ef01f 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -1568,6 +1568,12 @@ static int ubifs_remount_rw(struct ubifs_info *c) | |||
1568 | c->remounting_rw = 1; | 1568 | c->remounting_rw = 1; |
1569 | c->ro_mount = 0; | 1569 | c->ro_mount = 0; |
1570 | 1570 | ||
1571 | if (c->space_fixup) { | ||
1572 | err = ubifs_fixup_free_space(c); | ||
1573 | if (err) | ||
1574 | return err; | ||
1575 | } | ||
1576 | |||
1571 | err = check_free_space(c); | 1577 | err = check_free_space(c); |
1572 | if (err) | 1578 | if (err) |
1573 | goto out; | 1579 | goto out; |
@@ -1684,12 +1690,6 @@ static int ubifs_remount_rw(struct ubifs_info *c) | |||
1684 | err = dbg_check_space_info(c); | 1690 | err = dbg_check_space_info(c); |
1685 | } | 1691 | } |
1686 | 1692 | ||
1687 | if (c->space_fixup) { | ||
1688 | err = ubifs_fixup_free_space(c); | ||
1689 | if (err) | ||
1690 | goto out; | ||
1691 | } | ||
1692 | |||
1693 | mutex_unlock(&c->umount_mutex); | 1693 | mutex_unlock(&c->umount_mutex); |
1694 | return err; | 1694 | return err; |
1695 | 1695 | ||
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 25f01d0bc149..b1b1fa6ffffe 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -99,7 +99,12 @@ struct mmu_gather { | |||
99 | unsigned int need_flush : 1, /* Did free PTEs */ | 99 | unsigned int need_flush : 1, /* Did free PTEs */ |
100 | fast_mode : 1; /* No batching */ | 100 | fast_mode : 1; /* No batching */ |
101 | 101 | ||
102 | unsigned int fullmm; | 102 | /* we are in the middle of an operation to clear |
103 | * a full mm and can make some optimizations */ | ||
104 | unsigned int fullmm : 1, | ||
105 | /* we have performed an operation which | ||
106 | * requires a complete flush of the tlb */ | ||
107 | need_flush_all : 1; | ||
103 | 108 | ||
104 | struct mmu_gather_batch *active; | 109 | struct mmu_gather_batch *active; |
105 | struct mmu_gather_batch local; | 110 | struct mmu_gather_batch local; |
diff --git a/include/linux/ata.h b/include/linux/ata.h index 8f7a3d68371a..ee0bd9524055 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id) | |||
954 | } | 954 | } |
955 | } | 955 | } |
956 | 956 | ||
957 | static inline bool atapi_command_packet_set(const u16 *dev_id) | 957 | static inline int atapi_command_packet_set(const u16 *dev_id) |
958 | { | 958 | { |
959 | return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; | 959 | return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; |
960 | } | 960 | } |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 0ea61e07a91c..7c2e030e72f1 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | struct blk_trace { | 13 | struct blk_trace { |
14 | int trace_state; | 14 | int trace_state; |
15 | bool rq_based; | ||
16 | struct rchan *rchan; | 15 | struct rchan *rchan; |
17 | unsigned long __percpu *sequence; | 16 | unsigned long __percpu *sequence; |
18 | unsigned char __percpu *msg_data; | 17 | unsigned char __percpu *msg_data; |
diff --git a/include/linux/capability.h b/include/linux/capability.h index 98503b792369..d9a4f7f40f32 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -35,6 +35,7 @@ struct cpu_vfs_cap_data { | |||
35 | #define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) | 35 | #define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) |
36 | 36 | ||
37 | 37 | ||
38 | struct file; | ||
38 | struct inode; | 39 | struct inode; |
39 | struct dentry; | 40 | struct dentry; |
40 | struct user_namespace; | 41 | struct user_namespace; |
@@ -211,6 +212,7 @@ extern bool capable(int cap); | |||
211 | extern bool ns_capable(struct user_namespace *ns, int cap); | 212 | extern bool ns_capable(struct user_namespace *ns, int cap); |
212 | extern bool nsown_capable(int cap); | 213 | extern bool nsown_capable(int cap); |
213 | extern bool inode_capable(const struct inode *inode, int cap); | 214 | extern bool inode_capable(const struct inode *inode, int cap); |
215 | extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); | ||
214 | 216 | ||
215 | /* audit system wants to get cap info from files as well */ | 217 | /* audit system wants to get cap info from files as well */ |
216 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); | 218 | extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); |
diff --git a/include/linux/compat.h b/include/linux/compat.h index 76a87fb57ac2..377cd8c3395e 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -141,11 +141,11 @@ typedef struct { | |||
141 | } compat_sigset_t; | 141 | } compat_sigset_t; |
142 | 142 | ||
143 | struct compat_sigaction { | 143 | struct compat_sigaction { |
144 | #ifndef __ARCH_HAS_ODD_SIGACTION | 144 | #ifndef __ARCH_HAS_IRIX_SIGACTION |
145 | compat_uptr_t sa_handler; | 145 | compat_uptr_t sa_handler; |
146 | compat_ulong_t sa_flags; | 146 | compat_ulong_t sa_flags; |
147 | #else | 147 | #else |
148 | compat_ulong_t sa_flags; | 148 | compat_uint_t sa_flags; |
149 | compat_uptr_t sa_handler; | 149 | compat_uptr_t sa_handler; |
150 | #endif | 150 | #endif |
151 | #ifdef __ARCH_HAS_SA_RESTORER | 151 | #ifdef __ARCH_HAS_SA_RESTORER |
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index e83ef39b3bea..fe8c4476f7e4 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h | |||
@@ -213,7 +213,7 @@ struct devfreq_simple_ondemand_data { | |||
213 | #endif | 213 | #endif |
214 | 214 | ||
215 | #else /* !CONFIG_PM_DEVFREQ */ | 215 | #else /* !CONFIG_PM_DEVFREQ */ |
216 | static struct devfreq *devfreq_add_device(struct device *dev, | 216 | static inline struct devfreq *devfreq_add_device(struct device *dev, |
217 | struct devfreq_dev_profile *profile, | 217 | struct devfreq_dev_profile *profile, |
218 | const char *governor_name, | 218 | const char *governor_name, |
219 | void *data) | 219 | void *data) |
@@ -221,34 +221,34 @@ static struct devfreq *devfreq_add_device(struct device *dev, | |||
221 | return NULL; | 221 | return NULL; |
222 | } | 222 | } |
223 | 223 | ||
224 | static int devfreq_remove_device(struct devfreq *devfreq) | 224 | static inline int devfreq_remove_device(struct devfreq *devfreq) |
225 | { | 225 | { |
226 | return 0; | 226 | return 0; |
227 | } | 227 | } |
228 | 228 | ||
229 | static int devfreq_suspend_device(struct devfreq *devfreq) | 229 | static inline int devfreq_suspend_device(struct devfreq *devfreq) |
230 | { | 230 | { |
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
233 | 233 | ||
234 | static int devfreq_resume_device(struct devfreq *devfreq) | 234 | static inline int devfreq_resume_device(struct devfreq *devfreq) |
235 | { | 235 | { |
236 | return 0; | 236 | return 0; |
237 | } | 237 | } |
238 | 238 | ||
239 | static struct opp *devfreq_recommended_opp(struct device *dev, | 239 | static inline struct opp *devfreq_recommended_opp(struct device *dev, |
240 | unsigned long *freq, u32 flags) | 240 | unsigned long *freq, u32 flags) |
241 | { | 241 | { |
242 | return -EINVAL; | 242 | return ERR_PTR(-EINVAL); |
243 | } | 243 | } |
244 | 244 | ||
245 | static int devfreq_register_opp_notifier(struct device *dev, | 245 | static inline int devfreq_register_opp_notifier(struct device *dev, |
246 | struct devfreq *devfreq) | 246 | struct devfreq *devfreq) |
247 | { | 247 | { |
248 | return -EINVAL; | 248 | return -EINVAL; |
249 | } | 249 | } |
250 | 250 | ||
251 | static int devfreq_unregister_opp_notifier(struct device *dev, | 251 | static inline int devfreq_unregister_opp_notifier(struct device *dev, |
252 | struct devfreq *devfreq) | 252 | struct devfreq *devfreq) |
253 | { | 253 | { |
254 | return -EINVAL; | 254 | return -EINVAL; |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 2fc816682714..3f7257f1f5e8 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -333,6 +333,7 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules, | |||
333 | unsigned long count, | 333 | unsigned long count, |
334 | u64 *max_size, | 334 | u64 *max_size, |
335 | int *reset_type); | 335 | int *reset_type); |
336 | typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size); | ||
336 | 337 | ||
337 | /* | 338 | /* |
338 | * EFI Configuration Table and GUID definitions | 339 | * EFI Configuration Table and GUID definitions |
@@ -575,9 +576,15 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos | |||
575 | #ifdef CONFIG_X86 | 576 | #ifdef CONFIG_X86 |
576 | extern void efi_late_init(void); | 577 | extern void efi_late_init(void); |
577 | extern void efi_free_boot_services(void); | 578 | extern void efi_free_boot_services(void); |
579 | extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size); | ||
578 | #else | 580 | #else |
579 | static inline void efi_late_init(void) {} | 581 | static inline void efi_late_init(void) {} |
580 | static inline void efi_free_boot_services(void) {} | 582 | static inline void efi_free_boot_services(void) {} |
583 | |||
584 | static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) | ||
585 | { | ||
586 | return EFI_SUCCESS; | ||
587 | } | ||
581 | #endif | 588 | #endif |
582 | extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); | 589 | extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); |
583 | extern u64 efi_get_iobase (void); | 590 | extern u64 efi_get_iobase (void); |
@@ -725,51 +732,6 @@ static inline void memrange_efi_to_native(u64 *addr, u64 *npages) | |||
725 | *addr &= PAGE_MASK; | 732 | *addr &= PAGE_MASK; |
726 | } | 733 | } |
727 | 734 | ||
728 | /* Return the number of unicode characters in data */ | ||
729 | static inline unsigned long | ||
730 | utf16_strnlen(efi_char16_t *s, size_t maxlength) | ||
731 | { | ||
732 | unsigned long length = 0; | ||
733 | |||
734 | while (*s++ != 0 && length < maxlength) | ||
735 | length++; | ||
736 | return length; | ||
737 | } | ||
738 | |||
739 | static inline unsigned long | ||
740 | utf16_strlen(efi_char16_t *s) | ||
741 | { | ||
742 | return utf16_strnlen(s, ~0UL); | ||
743 | } | ||
744 | |||
745 | /* | ||
746 | * Return the number of bytes is the length of this string | ||
747 | * Note: this is NOT the same as the number of unicode characters | ||
748 | */ | ||
749 | static inline unsigned long | ||
750 | utf16_strsize(efi_char16_t *data, unsigned long maxlength) | ||
751 | { | ||
752 | return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); | ||
753 | } | ||
754 | |||
755 | static inline int | ||
756 | utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len) | ||
757 | { | ||
758 | while (1) { | ||
759 | if (len == 0) | ||
760 | return 0; | ||
761 | if (*a < *b) | ||
762 | return -1; | ||
763 | if (*a > *b) | ||
764 | return 1; | ||
765 | if (*a == 0) /* implies *b == 0 */ | ||
766 | return 0; | ||
767 | a++; | ||
768 | b++; | ||
769 | len--; | ||
770 | } | ||
771 | } | ||
772 | |||
773 | /* | 735 | /* |
774 | * EFI Variable support. | 736 | * EFI Variable support. |
775 | * | 737 | * |
@@ -781,7 +743,7 @@ struct efivar_operations { | |||
781 | efi_get_variable_t *get_variable; | 743 | efi_get_variable_t *get_variable; |
782 | efi_get_next_variable_t *get_next_variable; | 744 | efi_get_next_variable_t *get_next_variable; |
783 | efi_set_variable_t *set_variable; | 745 | efi_set_variable_t *set_variable; |
784 | efi_query_variable_info_t *query_variable_info; | 746 | efi_query_variable_store_t *query_variable_store; |
785 | }; | 747 | }; |
786 | 748 | ||
787 | struct efivars { | 749 | struct efivars { |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index e5ca8ef50e9b..52da2a250795 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -89,6 +89,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, | |||
89 | * that the call back has its own recursion protection. If it does | 89 | * that the call back has its own recursion protection. If it does |
90 | * not set this, then the ftrace infrastructure will add recursion | 90 | * not set this, then the ftrace infrastructure will add recursion |
91 | * protection for the caller. | 91 | * protection for the caller. |
92 | * STUB - The ftrace_ops is just a place holder. | ||
92 | */ | 93 | */ |
93 | enum { | 94 | enum { |
94 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 95 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
@@ -98,6 +99,7 @@ enum { | |||
98 | FTRACE_OPS_FL_SAVE_REGS = 1 << 4, | 99 | FTRACE_OPS_FL_SAVE_REGS = 1 << 4, |
99 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, | 100 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, |
100 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, | 101 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, |
102 | FTRACE_OPS_FL_STUB = 1 << 7, | ||
101 | }; | 103 | }; |
102 | 104 | ||
103 | struct ftrace_ops { | 105 | struct ftrace_ops { |
@@ -394,7 +396,6 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, | |||
394 | size_t cnt, loff_t *ppos); | 396 | size_t cnt, loff_t *ppos); |
395 | ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, | 397 | ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, |
396 | size_t cnt, loff_t *ppos); | 398 | size_t cnt, loff_t *ppos); |
397 | loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence); | ||
398 | int ftrace_regex_release(struct inode *inode, struct file *file); | 399 | int ftrace_regex_release(struct inode *inode, struct file *file); |
399 | 400 | ||
400 | void __init | 401 | void __init |
@@ -567,6 +568,8 @@ static inline int | |||
567 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } | 568 | ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } |
568 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 569 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
569 | 570 | ||
571 | loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence); | ||
572 | |||
570 | /* totally disable ftrace - can not re-enable after this */ | 573 | /* totally disable ftrace - can not re-enable after this */ |
571 | void ftrace_kill(void); | 574 | void ftrace_kill(void); |
572 | 575 | ||
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d2e6927bbaae..d78d28a733b1 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -200,6 +200,8 @@ extern size_t vmcoreinfo_max_size; | |||
200 | 200 | ||
201 | int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, | 201 | int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, |
202 | unsigned long long *crash_size, unsigned long long *crash_base); | 202 | unsigned long long *crash_size, unsigned long long *crash_base); |
203 | int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, | ||
204 | unsigned long long *crash_size, unsigned long long *crash_base); | ||
203 | int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, | 205 | int parse_crashkernel_low(char *cmdline, unsigned long long system_ram, |
204 | unsigned long long *crash_size, unsigned long long *crash_base); | 206 | unsigned long long *crash_size, unsigned long long *crash_base); |
205 | int crash_shrink_memory(unsigned long new_size); | 207 | int crash_shrink_memory(unsigned long new_size); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cad77fe09d77..c13958251927 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |||
518 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | 518 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
519 | void *data, unsigned long len); | 519 | void *data, unsigned long len); |
520 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | 520 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
521 | gpa_t gpa); | 521 | gpa_t gpa, unsigned long len); |
522 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); | 522 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
523 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); | 523 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
524 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); | 524 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index fa7cc7244cbd..b0bcce0ddc95 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h | |||
@@ -71,6 +71,7 @@ struct gfn_to_hva_cache { | |||
71 | u64 generation; | 71 | u64 generation; |
72 | gpa_t gpa; | 72 | gpa_t gpa; |
73 | unsigned long hva; | 73 | unsigned long hva; |
74 | unsigned long len; | ||
74 | struct kvm_memory_slot *memslot; | 75 | struct kvm_memory_slot *memslot; |
75 | }; | 76 | }; |
76 | 77 | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index 91c9d109e5f1..eae7a053dc51 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -398,6 +398,7 @@ enum { | |||
398 | ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ | 398 | ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ |
399 | ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ | 399 | ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ |
400 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ | 400 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ |
401 | ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ | ||
401 | 402 | ||
402 | /* DMA mask for user DMA control: User visible values; DO NOT | 403 | /* DMA mask for user DMA control: User visible values; DO NOT |
403 | renumber */ | 404 | renumber */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index e19ff30ad0a2..e2091b88d24c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1611,6 +1611,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, | |||
1611 | unsigned long pfn); | 1611 | unsigned long pfn); |
1612 | int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, | 1612 | int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, |
1613 | unsigned long pfn); | 1613 | unsigned long pfn); |
1614 | int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); | ||
1615 | |||
1614 | 1616 | ||
1615 | struct page *follow_page_mask(struct vm_area_struct *vma, | 1617 | struct page *follow_page_mask(struct vm_area_struct *vma, |
1616 | unsigned long address, unsigned int foll_flags, | 1618 | unsigned long address, unsigned int foll_flags, |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b3d00fa4b314..6151e903eef0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -210,9 +210,9 @@ struct netdev_hw_addr { | |||
210 | #define NETDEV_HW_ADDR_T_SLAVE 3 | 210 | #define NETDEV_HW_ADDR_T_SLAVE 3 |
211 | #define NETDEV_HW_ADDR_T_UNICAST 4 | 211 | #define NETDEV_HW_ADDR_T_UNICAST 4 |
212 | #define NETDEV_HW_ADDR_T_MULTICAST 5 | 212 | #define NETDEV_HW_ADDR_T_MULTICAST 5 |
213 | bool synced; | ||
214 | bool global_use; | 213 | bool global_use; |
215 | int refcount; | 214 | int refcount; |
215 | int synced; | ||
216 | struct rcu_head rcu_head; | 216 | struct rcu_head rcu_head; |
217 | }; | 217 | }; |
218 | 218 | ||
@@ -895,7 +895,7 @@ struct netdev_fcoe_hbainfo { | |||
895 | * | 895 | * |
896 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) | 896 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) |
897 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, | 897 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
898 | * struct net_device *dev) | 898 | * struct net_device *dev, u32 filter_mask) |
899 | * | 899 | * |
900 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); | 900 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); |
901 | * Called to change device carrier. Soft-devices (like dummy, team, etc) | 901 | * Called to change device carrier. Soft-devices (like dummy, team, etc) |
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h index 01d25e6fc792..0214c4c146fa 100644 --- a/include/linux/netfilter/ipset/ip_set_ahash.h +++ b/include/linux/netfilter/ipset/ip_set_ahash.h | |||
@@ -291,6 +291,7 @@ ip_set_hash_destroy(struct ip_set *set) | |||
291 | #define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist) | 291 | #define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist) |
292 | #define type_pf_data_next TOKEN(TYPE, PF, _data_next) | 292 | #define type_pf_data_next TOKEN(TYPE, PF, _data_next) |
293 | #define type_pf_data_flags TOKEN(TYPE, PF, _data_flags) | 293 | #define type_pf_data_flags TOKEN(TYPE, PF, _data_flags) |
294 | #define type_pf_data_reset_flags TOKEN(TYPE, PF, _data_reset_flags) | ||
294 | #ifdef IP_SET_HASH_WITH_NETS | 295 | #ifdef IP_SET_HASH_WITH_NETS |
295 | #define type_pf_data_match TOKEN(TYPE, PF, _data_match) | 296 | #define type_pf_data_match TOKEN(TYPE, PF, _data_match) |
296 | #else | 297 | #else |
@@ -385,9 +386,9 @@ type_pf_resize(struct ip_set *set, bool retried) | |||
385 | struct ip_set_hash *h = set->data; | 386 | struct ip_set_hash *h = set->data; |
386 | struct htable *t, *orig = h->table; | 387 | struct htable *t, *orig = h->table; |
387 | u8 htable_bits = orig->htable_bits; | 388 | u8 htable_bits = orig->htable_bits; |
388 | const struct type_pf_elem *data; | 389 | struct type_pf_elem *data; |
389 | struct hbucket *n, *m; | 390 | struct hbucket *n, *m; |
390 | u32 i, j; | 391 | u32 i, j, flags = 0; |
391 | int ret; | 392 | int ret; |
392 | 393 | ||
393 | retry: | 394 | retry: |
@@ -412,9 +413,16 @@ retry: | |||
412 | n = hbucket(orig, i); | 413 | n = hbucket(orig, i); |
413 | for (j = 0; j < n->pos; j++) { | 414 | for (j = 0; j < n->pos; j++) { |
414 | data = ahash_data(n, j); | 415 | data = ahash_data(n, j); |
416 | #ifdef IP_SET_HASH_WITH_NETS | ||
417 | flags = 0; | ||
418 | type_pf_data_reset_flags(data, &flags); | ||
419 | #endif | ||
415 | m = hbucket(t, HKEY(data, h->initval, htable_bits)); | 420 | m = hbucket(t, HKEY(data, h->initval, htable_bits)); |
416 | ret = type_pf_elem_add(m, data, AHASH_MAX(h), 0); | 421 | ret = type_pf_elem_add(m, data, AHASH_MAX(h), flags); |
417 | if (ret < 0) { | 422 | if (ret < 0) { |
423 | #ifdef IP_SET_HASH_WITH_NETS | ||
424 | type_pf_data_flags(data, flags); | ||
425 | #endif | ||
418 | read_unlock_bh(&set->lock); | 426 | read_unlock_bh(&set->lock); |
419 | ahash_destroy(t); | 427 | ahash_destroy(t); |
420 | if (ret == -EAGAIN) | 428 | if (ret == -EAGAIN) |
@@ -836,9 +844,9 @@ type_pf_tresize(struct ip_set *set, bool retried) | |||
836 | struct ip_set_hash *h = set->data; | 844 | struct ip_set_hash *h = set->data; |
837 | struct htable *t, *orig = h->table; | 845 | struct htable *t, *orig = h->table; |
838 | u8 htable_bits = orig->htable_bits; | 846 | u8 htable_bits = orig->htable_bits; |
839 | const struct type_pf_elem *data; | 847 | struct type_pf_elem *data; |
840 | struct hbucket *n, *m; | 848 | struct hbucket *n, *m; |
841 | u32 i, j; | 849 | u32 i, j, flags = 0; |
842 | int ret; | 850 | int ret; |
843 | 851 | ||
844 | /* Try to cleanup once */ | 852 | /* Try to cleanup once */ |
@@ -873,10 +881,17 @@ retry: | |||
873 | n = hbucket(orig, i); | 881 | n = hbucket(orig, i); |
874 | for (j = 0; j < n->pos; j++) { | 882 | for (j = 0; j < n->pos; j++) { |
875 | data = ahash_tdata(n, j); | 883 | data = ahash_tdata(n, j); |
884 | #ifdef IP_SET_HASH_WITH_NETS | ||
885 | flags = 0; | ||
886 | type_pf_data_reset_flags(data, &flags); | ||
887 | #endif | ||
876 | m = hbucket(t, HKEY(data, h->initval, htable_bits)); | 888 | m = hbucket(t, HKEY(data, h->initval, htable_bits)); |
877 | ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0, | 889 | ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), flags, |
878 | ip_set_timeout_get(type_pf_data_timeout(data))); | 890 | ip_set_timeout_get(type_pf_data_timeout(data))); |
879 | if (ret < 0) { | 891 | if (ret < 0) { |
892 | #ifdef IP_SET_HASH_WITH_NETS | ||
893 | type_pf_data_flags(data, flags); | ||
894 | #endif | ||
880 | read_unlock_bh(&set->lock); | 895 | read_unlock_bh(&set->lock); |
881 | ahash_destroy(t); | 896 | ahash_destroy(t); |
882 | if (ret == -EAGAIN) | 897 | if (ret == -EAGAIN) |
@@ -1187,6 +1202,7 @@ type_pf_gc_init(struct ip_set *set) | |||
1187 | #undef type_pf_data_tlist | 1202 | #undef type_pf_data_tlist |
1188 | #undef type_pf_data_next | 1203 | #undef type_pf_data_next |
1189 | #undef type_pf_data_flags | 1204 | #undef type_pf_data_flags |
1205 | #undef type_pf_data_reset_flags | ||
1190 | #undef type_pf_data_match | 1206 | #undef type_pf_data_match |
1191 | 1207 | ||
1192 | #undef type_pf_elem | 1208 | #undef type_pf_elem |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 2461033a7987..710067f3618c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -916,6 +916,7 @@ void pci_disable_rom(struct pci_dev *pdev); | |||
916 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); | 916 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); |
917 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); | 917 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); |
918 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); | 918 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); |
919 | void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); | ||
919 | 920 | ||
920 | /* Power management related routines */ | 921 | /* Power management related routines */ |
921 | int pci_save_state(struct pci_dev *dev); | 922 | int pci_save_state(struct pci_dev *dev); |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 5a710b9c578e..87a03c746f17 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -93,14 +93,20 @@ do { \ | |||
93 | 93 | ||
94 | #else /* !CONFIG_PREEMPT_COUNT */ | 94 | #else /* !CONFIG_PREEMPT_COUNT */ |
95 | 95 | ||
96 | #define preempt_disable() do { } while (0) | 96 | /* |
97 | #define sched_preempt_enable_no_resched() do { } while (0) | 97 | * Even if we don't have any preemption, we need preempt disable/enable |
98 | #define preempt_enable_no_resched() do { } while (0) | 98 | * to be barriers, so that we don't have things like get_user/put_user |
99 | #define preempt_enable() do { } while (0) | 99 | * that can cause faults and scheduling migrate into our preempt-protected |
100 | 100 | * region. | |
101 | #define preempt_disable_notrace() do { } while (0) | 101 | */ |
102 | #define preempt_enable_no_resched_notrace() do { } while (0) | 102 | #define preempt_disable() barrier() |
103 | #define preempt_enable_notrace() do { } while (0) | 103 | #define sched_preempt_enable_no_resched() barrier() |
104 | #define preempt_enable_no_resched() barrier() | ||
105 | #define preempt_enable() barrier() | ||
106 | |||
107 | #define preempt_disable_notrace() barrier() | ||
108 | #define preempt_enable_no_resched_notrace() barrier() | ||
109 | #define preempt_enable_notrace() barrier() | ||
104 | 110 | ||
105 | #endif /* CONFIG_PREEMPT_COUNT */ | 111 | #endif /* CONFIG_PREEMPT_COUNT */ |
106 | 112 | ||
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 8307f2f94d86..94dfb2aa5533 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -117,6 +117,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, | |||
117 | const struct file_operations *proc_fops, | 117 | const struct file_operations *proc_fops, |
118 | void *data); | 118 | void *data); |
119 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); | 119 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); |
120 | extern int remove_proc_subtree(const char *name, struct proc_dir_entry *parent); | ||
120 | 121 | ||
121 | struct pid_namespace; | 122 | struct pid_namespace; |
122 | 123 | ||
@@ -202,6 +203,7 @@ static inline struct proc_dir_entry *proc_create_data(const char *name, | |||
202 | return NULL; | 203 | return NULL; |
203 | } | 204 | } |
204 | #define remove_proc_entry(name, parent) do {} while (0) | 205 | #define remove_proc_entry(name, parent) do {} while (0) |
206 | #define remove_proc_subtree(name, parent) do {} while (0) | ||
205 | 207 | ||
206 | static inline struct proc_dir_entry *proc_symlink(const char *name, | 208 | static inline struct proc_dir_entry *proc_symlink(const char *name, |
207 | struct proc_dir_entry *parent,const char *dest) {return NULL;} | 209 | struct proc_dir_entry *parent,const char *dest) {return NULL;} |
diff --git a/include/linux/sched.h b/include/linux/sched.h index d35d2b6ddbfb..e692a022527b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -163,9 +163,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
163 | #define TASK_DEAD 64 | 163 | #define TASK_DEAD 64 |
164 | #define TASK_WAKEKILL 128 | 164 | #define TASK_WAKEKILL 128 |
165 | #define TASK_WAKING 256 | 165 | #define TASK_WAKING 256 |
166 | #define TASK_STATE_MAX 512 | 166 | #define TASK_PARKED 512 |
167 | #define TASK_STATE_MAX 1024 | ||
167 | 168 | ||
168 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" | 169 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" |
169 | 170 | ||
170 | extern char ___assert_task_state[1 - 2*!!( | 171 | extern char ___assert_task_state[1 - 2*!!( |
171 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | 172 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; |
diff --git a/include/linux/security.h b/include/linux/security.h index eee7478cda70..032c366ef1c6 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -1012,6 +1012,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1012 | * This hook can be used by the module to update any security state | 1012 | * This hook can be used by the module to update any security state |
1013 | * associated with the TUN device's security structure. | 1013 | * associated with the TUN device's security structure. |
1014 | * @security pointer to the TUN devices's security structure. | 1014 | * @security pointer to the TUN devices's security structure. |
1015 | * @skb_owned_by: | ||
1016 | * This hook sets the packet's owning sock. | ||
1017 | * @skb is the packet. | ||
1018 | * @sk the sock which owns the packet. | ||
1015 | * | 1019 | * |
1016 | * Security hooks for XFRM operations. | 1020 | * Security hooks for XFRM operations. |
1017 | * | 1021 | * |
@@ -1638,6 +1642,7 @@ struct security_operations { | |||
1638 | int (*tun_dev_attach_queue) (void *security); | 1642 | int (*tun_dev_attach_queue) (void *security); |
1639 | int (*tun_dev_attach) (struct sock *sk, void *security); | 1643 | int (*tun_dev_attach) (struct sock *sk, void *security); |
1640 | int (*tun_dev_open) (void *security); | 1644 | int (*tun_dev_open) (void *security); |
1645 | void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk); | ||
1641 | #endif /* CONFIG_SECURITY_NETWORK */ | 1646 | #endif /* CONFIG_SECURITY_NETWORK */ |
1642 | 1647 | ||
1643 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1648 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
@@ -2588,6 +2593,8 @@ int security_tun_dev_attach_queue(void *security); | |||
2588 | int security_tun_dev_attach(struct sock *sk, void *security); | 2593 | int security_tun_dev_attach(struct sock *sk, void *security); |
2589 | int security_tun_dev_open(void *security); | 2594 | int security_tun_dev_open(void *security); |
2590 | 2595 | ||
2596 | void security_skb_owned_by(struct sk_buff *skb, struct sock *sk); | ||
2597 | |||
2591 | #else /* CONFIG_SECURITY_NETWORK */ | 2598 | #else /* CONFIG_SECURITY_NETWORK */ |
2592 | static inline int security_unix_stream_connect(struct sock *sock, | 2599 | static inline int security_unix_stream_connect(struct sock *sock, |
2593 | struct sock *other, | 2600 | struct sock *other, |
@@ -2779,6 +2786,11 @@ static inline int security_tun_dev_open(void *security) | |||
2779 | { | 2786 | { |
2780 | return 0; | 2787 | return 0; |
2781 | } | 2788 | } |
2789 | |||
2790 | static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) | ||
2791 | { | ||
2792 | } | ||
2793 | |||
2782 | #endif /* CONFIG_SECURITY_NETWORK */ | 2794 | #endif /* CONFIG_SECURITY_NETWORK */ |
2783 | 2795 | ||
2784 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 2796 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
diff --git a/include/linux/signal.h b/include/linux/signal.h index a2dcb94ea49d..9475c5cb28bc 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -250,11 +250,11 @@ extern int show_unhandled_signals; | |||
250 | extern int sigsuspend(sigset_t *); | 250 | extern int sigsuspend(sigset_t *); |
251 | 251 | ||
252 | struct sigaction { | 252 | struct sigaction { |
253 | #ifndef __ARCH_HAS_ODD_SIGACTION | 253 | #ifndef __ARCH_HAS_IRIX_SIGACTION |
254 | __sighandler_t sa_handler; | 254 | __sighandler_t sa_handler; |
255 | unsigned long sa_flags; | 255 | unsigned long sa_flags; |
256 | #else | 256 | #else |
257 | unsigned long sa_flags; | 257 | unsigned int sa_flags; |
258 | __sighandler_t sa_handler; | 258 | __sighandler_t sa_handler; |
259 | #endif | 259 | #endif |
260 | #ifdef __ARCH_HAS_SA_RESTORER | 260 | #ifdef __ARCH_HAS_SA_RESTORER |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 441f5bfdab8e..b8292d8cc9fa 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2643,6 +2643,13 @@ static inline void nf_reset(struct sk_buff *skb) | |||
2643 | #endif | 2643 | #endif |
2644 | } | 2644 | } |
2645 | 2645 | ||
2646 | static inline void nf_reset_trace(struct sk_buff *skb) | ||
2647 | { | ||
2648 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) | ||
2649 | skb->nf_trace = 0; | ||
2650 | #endif | ||
2651 | } | ||
2652 | |||
2646 | /* Note: This doesn't put any conntrack and bridge info in dst. */ | 2653 | /* Note: This doesn't put any conntrack and bridge info in dst. */ |
2647 | static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) | 2654 | static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) |
2648 | { | 2655 | { |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index a26e2fb604e6..e2369c167dbd 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -16,7 +16,10 @@ | |||
16 | * In the debug case, 1 means unlocked, 0 means locked. (the values | 16 | * In the debug case, 1 means unlocked, 0 means locked. (the values |
17 | * are inverted, to catch initialization bugs) | 17 | * are inverted, to catch initialization bugs) |
18 | * | 18 | * |
19 | * No atomicity anywhere, we are on UP. | 19 | * No atomicity anywhere, we are on UP. However, we still need |
20 | * the compiler barriers, because we do not want the compiler to | ||
21 | * move potentially faulting instructions (notably user accesses) | ||
22 | * into the locked sequence, resulting in non-atomic execution. | ||
20 | */ | 23 | */ |
21 | 24 | ||
22 | #ifdef CONFIG_DEBUG_SPINLOCK | 25 | #ifdef CONFIG_DEBUG_SPINLOCK |
@@ -25,6 +28,7 @@ | |||
25 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 28 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
26 | { | 29 | { |
27 | lock->slock = 0; | 30 | lock->slock = 0; |
31 | barrier(); | ||
28 | } | 32 | } |
29 | 33 | ||
30 | static inline void | 34 | static inline void |
@@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | |||
32 | { | 36 | { |
33 | local_irq_save(flags); | 37 | local_irq_save(flags); |
34 | lock->slock = 0; | 38 | lock->slock = 0; |
39 | barrier(); | ||
35 | } | 40 | } |
36 | 41 | ||
37 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 42 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
@@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
39 | char oldval = lock->slock; | 44 | char oldval = lock->slock; |
40 | 45 | ||
41 | lock->slock = 0; | 46 | lock->slock = 0; |
47 | barrier(); | ||
42 | 48 | ||
43 | return oldval > 0; | 49 | return oldval > 0; |
44 | } | 50 | } |
45 | 51 | ||
46 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 52 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
47 | { | 53 | { |
54 | barrier(); | ||
48 | lock->slock = 1; | 55 | lock->slock = 1; |
49 | } | 56 | } |
50 | 57 | ||
51 | /* | 58 | /* |
52 | * Read-write spinlocks. No debug version. | 59 | * Read-write spinlocks. No debug version. |
53 | */ | 60 | */ |
54 | #define arch_read_lock(lock) do { (void)(lock); } while (0) | 61 | #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) |
55 | #define arch_write_lock(lock) do { (void)(lock); } while (0) | 62 | #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) |
56 | #define arch_read_trylock(lock) ({ (void)(lock); 1; }) | 63 | #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
57 | #define arch_write_trylock(lock) ({ (void)(lock); 1; }) | 64 | #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
58 | #define arch_read_unlock(lock) do { (void)(lock); } while (0) | 65 | #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) |
59 | #define arch_write_unlock(lock) do { (void)(lock); } while (0) | 66 | #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) |
60 | 67 | ||
61 | #else /* DEBUG_SPINLOCK */ | 68 | #else /* DEBUG_SPINLOCK */ |
62 | #define arch_spin_is_locked(lock) ((void)(lock), 0) | 69 | #define arch_spin_is_locked(lock) ((void)(lock), 0) |
63 | /* for sched.c and kernel_lock.c: */ | 70 | /* for sched.c and kernel_lock.c: */ |
64 | # define arch_spin_lock(lock) do { (void)(lock); } while (0) | 71 | # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) |
65 | # define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) | 72 | # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) |
66 | # define arch_spin_unlock(lock) do { (void)(lock); } while (0) | 73 | # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) |
67 | # define arch_spin_trylock(lock) ({ (void)(lock); 1; }) | 74 | # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
68 | #endif /* DEBUG_SPINLOCK */ | 75 | #endif /* DEBUG_SPINLOCK */ |
69 | 76 | ||
70 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) | 77 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) |
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h index 9e492be5244b..6fcfe99bd999 100644 --- a/include/linux/ssb/ssb_driver_chipcommon.h +++ b/include/linux/ssb/ssb_driver_chipcommon.h | |||
@@ -219,6 +219,7 @@ | |||
219 | #define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ | 219 | #define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */ |
220 | #define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ | 220 | #define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ |
221 | #define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 | 221 | #define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16 |
222 | #define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400 | ||
222 | #define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ | 223 | #define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ |
223 | #define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ | 224 | #define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ |
224 | #define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ | 225 | #define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ |
@@ -667,5 +668,6 @@ enum ssb_pmu_ldo_volt_id { | |||
667 | void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, | 668 | void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc, |
668 | enum ssb_pmu_ldo_volt_id id, u32 voltage); | 669 | enum ssb_pmu_ldo_volt_id id, u32 voltage); |
669 | void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); | 670 | void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on); |
671 | void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid); | ||
670 | 672 | ||
671 | #endif /* LINUX_SSB_CHIPCO_H_ */ | 673 | #endif /* LINUX_SSB_CHIPCO_H_ */ |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 2de42f9401d2..a5ffd32642fd 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -25,6 +25,7 @@ extern int swiotlb_force; | |||
25 | extern void swiotlb_init(int verbose); | 25 | extern void swiotlb_init(int verbose); |
26 | int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); | 26 | int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); |
27 | extern unsigned long swiotlb_nr_tbl(void); | 27 | extern unsigned long swiotlb_nr_tbl(void); |
28 | unsigned long swiotlb_size_or_default(void); | ||
28 | extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); | 29 | extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); |
29 | 30 | ||
30 | /* | 31 | /* |
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h new file mode 100644 index 000000000000..cbb20afdbc01 --- /dev/null +++ b/include/linux/ucs2_string.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _LINUX_UCS2_STRING_H_ | ||
2 | #define _LINUX_UCS2_STRING_H_ | ||
3 | |||
4 | #include <linux/types.h> /* for size_t */ | ||
5 | #include <linux/stddef.h> /* for NULL */ | ||
6 | |||
7 | typedef u16 ucs2_char_t; | ||
8 | |||
9 | unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength); | ||
10 | unsigned long ucs2_strlen(const ucs2_char_t *s); | ||
11 | unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength); | ||
12 | int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len); | ||
13 | |||
14 | #endif /* _LINUX_UCS2_STRING_H_ */ | ||
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 40be2a0d8ae1..84a6440f1f19 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h | |||
@@ -199,6 +199,7 @@ extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, | |||
199 | /* Device notifier */ | 199 | /* Device notifier */ |
200 | extern int register_inet6addr_notifier(struct notifier_block *nb); | 200 | extern int register_inet6addr_notifier(struct notifier_block *nb); |
201 | extern int unregister_inet6addr_notifier(struct notifier_block *nb); | 201 | extern int unregister_inet6addr_notifier(struct notifier_block *nb); |
202 | extern int inet6addr_notifier_call_chain(unsigned long val, void *v); | ||
202 | 203 | ||
203 | extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, | 204 | extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex, |
204 | struct ipv6_devconf *devconf); | 205 | struct ipv6_devconf *devconf); |
diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h index f74109144d3f..f132924cc9da 100644 --- a/include/net/irda/irlmp.h +++ b/include/net/irda/irlmp.h | |||
@@ -256,7 +256,8 @@ static inline __u32 irlmp_get_daddr(const struct lsap_cb *self) | |||
256 | return (self && self->lap) ? self->lap->daddr : 0; | 256 | return (self && self->lap) ? self->lap->daddr : 0; |
257 | } | 257 | } |
258 | 258 | ||
259 | extern const char *irlmp_reasons[]; | 259 | const char *irlmp_reason_str(LM_REASON reason); |
260 | |||
260 | extern int sysctl_discovery_timeout; | 261 | extern int sysctl_discovery_timeout; |
261 | extern int sysctl_discovery_slots; | 262 | extern int sysctl_discovery_slots; |
262 | extern int sysctl_discovery; | 263 | extern int sysctl_discovery; |
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index cc7c19732389..714cc9a54a4c 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h | |||
@@ -130,6 +130,14 @@ struct iucv_sock { | |||
130 | enum iucv_tx_notify n); | 130 | enum iucv_tx_notify n); |
131 | }; | 131 | }; |
132 | 132 | ||
133 | struct iucv_skb_cb { | ||
134 | u32 class; /* target class of message */ | ||
135 | u32 tag; /* tag associated with message */ | ||
136 | u32 offset; /* offset for skb receival */ | ||
137 | }; | ||
138 | |||
139 | #define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0])) | ||
140 | |||
133 | /* iucv socket options (SOL_IUCV) */ | 141 | /* iucv socket options (SOL_IUCV) */ |
134 | #define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */ | 142 | #define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */ |
135 | #define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */ | 143 | #define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */ |
diff --git a/include/net/scm.h b/include/net/scm.h index 975cca01048b..b11708105681 100644 --- a/include/net/scm.h +++ b/include/net/scm.h | |||
@@ -56,8 +56,8 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm, | |||
56 | scm->pid = get_pid(pid); | 56 | scm->pid = get_pid(pid); |
57 | scm->cred = cred ? get_cred(cred) : NULL; | 57 | scm->cred = cred ? get_cred(cred) : NULL; |
58 | scm->creds.pid = pid_vnr(pid); | 58 | scm->creds.pid = pid_vnr(pid); |
59 | scm->creds.uid = cred ? cred->euid : INVALID_UID; | 59 | scm->creds.uid = cred ? cred->uid : INVALID_UID; |
60 | scm->creds.gid = cred ? cred->egid : INVALID_GID; | 60 | scm->creds.gid = cred ? cred->gid : INVALID_GID; |
61 | } | 61 | } |
62 | 62 | ||
63 | static __inline__ void scm_destroy_cred(struct scm_cookie *scm) | 63 | static __inline__ void scm_destroy_cred(struct scm_cookie *scm) |
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 399162b50a8d..e1379b4e8faf 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h | |||
@@ -1074,7 +1074,8 @@ void fc_rport_terminate_io(struct fc_rport *); | |||
1074 | /* | 1074 | /* |
1075 | * DISCOVERY LAYER | 1075 | * DISCOVERY LAYER |
1076 | *****************************/ | 1076 | *****************************/ |
1077 | int fc_disc_init(struct fc_lport *); | 1077 | void fc_disc_init(struct fc_lport *); |
1078 | void fc_disc_config(struct fc_lport *, void *); | ||
1078 | 1079 | ||
1079 | static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc) | 1080 | static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc) |
1080 | { | 1081 | { |
diff --git a/include/sound/max98090.h b/include/sound/max98090.h index 95efb13f8478..95efb13f8478 100755..100644 --- a/include/sound/max98090.h +++ b/include/sound/max98090.h | |||
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index e1ef63d4a5c4..44a30b108683 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h | |||
@@ -488,6 +488,7 @@ struct snd_soc_dapm_path { | |||
488 | /* status */ | 488 | /* status */ |
489 | u32 connect:1; /* source and sink widgets are connected */ | 489 | u32 connect:1; /* source and sink widgets are connected */ |
490 | u32 walked:1; /* path has been walked */ | 490 | u32 walked:1; /* path has been walked */ |
491 | u32 walking:1; /* path is in the process of being walked */ | ||
491 | u32 weak:1; /* path ignored for power management */ | 492 | u32 weak:1; /* path ignored for power management */ |
492 | 493 | ||
493 | int (*connected)(struct snd_soc_dapm_widget *source, | 494 | int (*connected)(struct snd_soc_dapm_widget *source, |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 9961726523d0..9c1467357b03 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
@@ -257,6 +257,7 @@ TRACE_EVENT(block_bio_bounce, | |||
257 | 257 | ||
258 | /** | 258 | /** |
259 | * block_bio_complete - completed all work on the block operation | 259 | * block_bio_complete - completed all work on the block operation |
260 | * @q: queue holding the block operation | ||
260 | * @bio: block operation completed | 261 | * @bio: block operation completed |
261 | * @error: io error value | 262 | * @error: io error value |
262 | * | 263 | * |
@@ -265,9 +266,9 @@ TRACE_EVENT(block_bio_bounce, | |||
265 | */ | 266 | */ |
266 | TRACE_EVENT(block_bio_complete, | 267 | TRACE_EVENT(block_bio_complete, |
267 | 268 | ||
268 | TP_PROTO(struct bio *bio, int error), | 269 | TP_PROTO(struct request_queue *q, struct bio *bio, int error), |
269 | 270 | ||
270 | TP_ARGS(bio, error), | 271 | TP_ARGS(q, bio, error), |
271 | 272 | ||
272 | TP_STRUCT__entry( | 273 | TP_STRUCT__entry( |
273 | __field( dev_t, dev ) | 274 | __field( dev_t, dev ) |
@@ -278,8 +279,7 @@ TRACE_EVENT(block_bio_complete, | |||
278 | ), | 279 | ), |
279 | 280 | ||
280 | TP_fast_assign( | 281 | TP_fast_assign( |
281 | __entry->dev = bio->bi_bdev ? | 282 | __entry->dev = bio->bi_bdev->bd_dev; |
282 | bio->bi_bdev->bd_dev : 0; | ||
283 | __entry->sector = bio->bi_sector; | 283 | __entry->sector = bio->bi_sector; |
284 | __entry->nr_sector = bio->bi_size >> 9; | 284 | __entry->nr_sector = bio->bi_size >> 9; |
285 | __entry->error = error; | 285 | __entry->error = error; |
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 5a8671e8a67f..e5586caff67a 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h | |||
@@ -147,7 +147,7 @@ TRACE_EVENT(sched_switch, | |||
147 | __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", | 147 | __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", |
148 | { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, | 148 | { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, |
149 | { 16, "Z" }, { 32, "X" }, { 64, "x" }, | 149 | { 16, "Z" }, { 32, "X" }, { 64, "x" }, |
150 | { 128, "W" }) : "R", | 150 | { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R", |
151 | __entry->prev_state & TASK_STATE_MAX ? "+" : "", | 151 | __entry->prev_state & TASK_STATE_MAX ? "+" : "", |
152 | __entry->next_comm, __entry->next_pid, __entry->next_prio) | 152 | __entry->next_comm, __entry->next_pid, __entry->next_prio) |
153 | ); | 153 | ); |
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 4c43b4448792..706d035fa748 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h | |||
@@ -95,15 +95,10 @@ | |||
95 | #ifndef _LINUX_FUSE_H | 95 | #ifndef _LINUX_FUSE_H |
96 | #define _LINUX_FUSE_H | 96 | #define _LINUX_FUSE_H |
97 | 97 | ||
98 | #ifdef __linux__ | 98 | #ifdef __KERNEL__ |
99 | #include <linux/types.h> | 99 | #include <linux/types.h> |
100 | #else | 100 | #else |
101 | #include <stdint.h> | 101 | #include <stdint.h> |
102 | #define __u64 uint64_t | ||
103 | #define __s64 int64_t | ||
104 | #define __u32 uint32_t | ||
105 | #define __s32 int32_t | ||
106 | #define __u16 uint16_t | ||
107 | #endif | 102 | #endif |
108 | 103 | ||
109 | /* | 104 | /* |
@@ -139,42 +134,42 @@ | |||
139 | userspace works under 64bit kernels */ | 134 | userspace works under 64bit kernels */ |
140 | 135 | ||
141 | struct fuse_attr { | 136 | struct fuse_attr { |
142 | __u64 ino; | 137 | uint64_t ino; |
143 | __u64 size; | 138 | uint64_t size; |
144 | __u64 blocks; | 139 | uint64_t blocks; |
145 | __u64 atime; | 140 | uint64_t atime; |
146 | __u64 mtime; | 141 | uint64_t mtime; |
147 | __u64 ctime; | 142 | uint64_t ctime; |
148 | __u32 atimensec; | 143 | uint32_t atimensec; |
149 | __u32 mtimensec; | 144 | uint32_t mtimensec; |
150 | __u32 ctimensec; | 145 | uint32_t ctimensec; |
151 | __u32 mode; | 146 | uint32_t mode; |
152 | __u32 nlink; | 147 | uint32_t nlink; |
153 | __u32 uid; | 148 | uint32_t uid; |
154 | __u32 gid; | 149 | uint32_t gid; |
155 | __u32 rdev; | 150 | uint32_t rdev; |
156 | __u32 blksize; | 151 | uint32_t blksize; |
157 | __u32 padding; | 152 | uint32_t padding; |
158 | }; | 153 | }; |
159 | 154 | ||
160 | struct fuse_kstatfs { | 155 | struct fuse_kstatfs { |
161 | __u64 blocks; | 156 | uint64_t blocks; |
162 | __u64 bfree; | 157 | uint64_t bfree; |
163 | __u64 bavail; | 158 | uint64_t bavail; |
164 | __u64 files; | 159 | uint64_t files; |
165 | __u64 ffree; | 160 | uint64_t ffree; |
166 | __u32 bsize; | 161 | uint32_t bsize; |
167 | __u32 namelen; | 162 | uint32_t namelen; |
168 | __u32 frsize; | 163 | uint32_t frsize; |
169 | __u32 padding; | 164 | uint32_t padding; |
170 | __u32 spare[6]; | 165 | uint32_t spare[6]; |
171 | }; | 166 | }; |
172 | 167 | ||
173 | struct fuse_file_lock { | 168 | struct fuse_file_lock { |
174 | __u64 start; | 169 | uint64_t start; |
175 | __u64 end; | 170 | uint64_t end; |
176 | __u32 type; | 171 | uint32_t type; |
177 | __u32 pid; /* tgid */ | 172 | uint32_t pid; /* tgid */ |
178 | }; | 173 | }; |
179 | 174 | ||
180 | /** | 175 | /** |
@@ -364,143 +359,143 @@ enum fuse_notify_code { | |||
364 | #define FUSE_COMPAT_ENTRY_OUT_SIZE 120 | 359 | #define FUSE_COMPAT_ENTRY_OUT_SIZE 120 |
365 | 360 | ||
366 | struct fuse_entry_out { | 361 | struct fuse_entry_out { |
367 | __u64 nodeid; /* Inode ID */ | 362 | uint64_t nodeid; /* Inode ID */ |
368 | __u64 generation; /* Inode generation: nodeid:gen must | 363 | uint64_t generation; /* Inode generation: nodeid:gen must |
369 | be unique for the fs's lifetime */ | 364 | be unique for the fs's lifetime */ |
370 | __u64 entry_valid; /* Cache timeout for the name */ | 365 | uint64_t entry_valid; /* Cache timeout for the name */ |
371 | __u64 attr_valid; /* Cache timeout for the attributes */ | 366 | uint64_t attr_valid; /* Cache timeout for the attributes */ |
372 | __u32 entry_valid_nsec; | 367 | uint32_t entry_valid_nsec; |
373 | __u32 attr_valid_nsec; | 368 | uint32_t attr_valid_nsec; |
374 | struct fuse_attr attr; | 369 | struct fuse_attr attr; |
375 | }; | 370 | }; |
376 | 371 | ||
377 | struct fuse_forget_in { | 372 | struct fuse_forget_in { |
378 | __u64 nlookup; | 373 | uint64_t nlookup; |
379 | }; | 374 | }; |
380 | 375 | ||
381 | struct fuse_forget_one { | 376 | struct fuse_forget_one { |
382 | __u64 nodeid; | 377 | uint64_t nodeid; |
383 | __u64 nlookup; | 378 | uint64_t nlookup; |
384 | }; | 379 | }; |
385 | 380 | ||
386 | struct fuse_batch_forget_in { | 381 | struct fuse_batch_forget_in { |
387 | __u32 count; | 382 | uint32_t count; |
388 | __u32 dummy; | 383 | uint32_t dummy; |
389 | }; | 384 | }; |
390 | 385 | ||
391 | struct fuse_getattr_in { | 386 | struct fuse_getattr_in { |
392 | __u32 getattr_flags; | 387 | uint32_t getattr_flags; |
393 | __u32 dummy; | 388 | uint32_t dummy; |
394 | __u64 fh; | 389 | uint64_t fh; |
395 | }; | 390 | }; |
396 | 391 | ||
397 | #define FUSE_COMPAT_ATTR_OUT_SIZE 96 | 392 | #define FUSE_COMPAT_ATTR_OUT_SIZE 96 |
398 | 393 | ||
399 | struct fuse_attr_out { | 394 | struct fuse_attr_out { |
400 | __u64 attr_valid; /* Cache timeout for the attributes */ | 395 | uint64_t attr_valid; /* Cache timeout for the attributes */ |
401 | __u32 attr_valid_nsec; | 396 | uint32_t attr_valid_nsec; |
402 | __u32 dummy; | 397 | uint32_t dummy; |
403 | struct fuse_attr attr; | 398 | struct fuse_attr attr; |
404 | }; | 399 | }; |
405 | 400 | ||
406 | #define FUSE_COMPAT_MKNOD_IN_SIZE 8 | 401 | #define FUSE_COMPAT_MKNOD_IN_SIZE 8 |
407 | 402 | ||
408 | struct fuse_mknod_in { | 403 | struct fuse_mknod_in { |
409 | __u32 mode; | 404 | uint32_t mode; |
410 | __u32 rdev; | 405 | uint32_t rdev; |
411 | __u32 umask; | 406 | uint32_t umask; |
412 | __u32 padding; | 407 | uint32_t padding; |
413 | }; | 408 | }; |
414 | 409 | ||
415 | struct fuse_mkdir_in { | 410 | struct fuse_mkdir_in { |
416 | __u32 mode; | 411 | uint32_t mode; |
417 | __u32 umask; | 412 | uint32_t umask; |
418 | }; | 413 | }; |
419 | 414 | ||
420 | struct fuse_rename_in { | 415 | struct fuse_rename_in { |
421 | __u64 newdir; | 416 | uint64_t newdir; |
422 | }; | 417 | }; |
423 | 418 | ||
424 | struct fuse_link_in { | 419 | struct fuse_link_in { |
425 | __u64 oldnodeid; | 420 | uint64_t oldnodeid; |
426 | }; | 421 | }; |
427 | 422 | ||
428 | struct fuse_setattr_in { | 423 | struct fuse_setattr_in { |
429 | __u32 valid; | 424 | uint32_t valid; |
430 | __u32 padding; | 425 | uint32_t padding; |
431 | __u64 fh; | 426 | uint64_t fh; |
432 | __u64 size; | 427 | uint64_t size; |
433 | __u64 lock_owner; | 428 | uint64_t lock_owner; |
434 | __u64 atime; | 429 | uint64_t atime; |
435 | __u64 mtime; | 430 | uint64_t mtime; |
436 | __u64 unused2; | 431 | uint64_t unused2; |
437 | __u32 atimensec; | 432 | uint32_t atimensec; |
438 | __u32 mtimensec; | 433 | uint32_t mtimensec; |
439 | __u32 unused3; | 434 | uint32_t unused3; |
440 | __u32 mode; | 435 | uint32_t mode; |
441 | __u32 unused4; | 436 | uint32_t unused4; |
442 | __u32 uid; | 437 | uint32_t uid; |
443 | __u32 gid; | 438 | uint32_t gid; |
444 | __u32 unused5; | 439 | uint32_t unused5; |
445 | }; | 440 | }; |
446 | 441 | ||
447 | struct fuse_open_in { | 442 | struct fuse_open_in { |
448 | __u32 flags; | 443 | uint32_t flags; |
449 | __u32 unused; | 444 | uint32_t unused; |
450 | }; | 445 | }; |
451 | 446 | ||
452 | struct fuse_create_in { | 447 | struct fuse_create_in { |
453 | __u32 flags; | 448 | uint32_t flags; |
454 | __u32 mode; | 449 | uint32_t mode; |
455 | __u32 umask; | 450 | uint32_t umask; |
456 | __u32 padding; | 451 | uint32_t padding; |
457 | }; | 452 | }; |
458 | 453 | ||
459 | struct fuse_open_out { | 454 | struct fuse_open_out { |
460 | __u64 fh; | 455 | uint64_t fh; |
461 | __u32 open_flags; | 456 | uint32_t open_flags; |
462 | __u32 padding; | 457 | uint32_t padding; |
463 | }; | 458 | }; |
464 | 459 | ||
465 | struct fuse_release_in { | 460 | struct fuse_release_in { |
466 | __u64 fh; | 461 | uint64_t fh; |
467 | __u32 flags; | 462 | uint32_t flags; |
468 | __u32 release_flags; | 463 | uint32_t release_flags; |
469 | __u64 lock_owner; | 464 | uint64_t lock_owner; |
470 | }; | 465 | }; |
471 | 466 | ||
472 | struct fuse_flush_in { | 467 | struct fuse_flush_in { |
473 | __u64 fh; | 468 | uint64_t fh; |
474 | __u32 unused; | 469 | uint32_t unused; |
475 | __u32 padding; | 470 | uint32_t padding; |
476 | __u64 lock_owner; | 471 | uint64_t lock_owner; |
477 | }; | 472 | }; |
478 | 473 | ||
479 | struct fuse_read_in { | 474 | struct fuse_read_in { |
480 | __u64 fh; | 475 | uint64_t fh; |
481 | __u64 offset; | 476 | uint64_t offset; |
482 | __u32 size; | 477 | uint32_t size; |
483 | __u32 read_flags; | 478 | uint32_t read_flags; |
484 | __u64 lock_owner; | 479 | uint64_t lock_owner; |
485 | __u32 flags; | 480 | uint32_t flags; |
486 | __u32 padding; | 481 | uint32_t padding; |
487 | }; | 482 | }; |
488 | 483 | ||
489 | #define FUSE_COMPAT_WRITE_IN_SIZE 24 | 484 | #define FUSE_COMPAT_WRITE_IN_SIZE 24 |
490 | 485 | ||
491 | struct fuse_write_in { | 486 | struct fuse_write_in { |
492 | __u64 fh; | 487 | uint64_t fh; |
493 | __u64 offset; | 488 | uint64_t offset; |
494 | __u32 size; | 489 | uint32_t size; |
495 | __u32 write_flags; | 490 | uint32_t write_flags; |
496 | __u64 lock_owner; | 491 | uint64_t lock_owner; |
497 | __u32 flags; | 492 | uint32_t flags; |
498 | __u32 padding; | 493 | uint32_t padding; |
499 | }; | 494 | }; |
500 | 495 | ||
501 | struct fuse_write_out { | 496 | struct fuse_write_out { |
502 | __u32 size; | 497 | uint32_t size; |
503 | __u32 padding; | 498 | uint32_t padding; |
504 | }; | 499 | }; |
505 | 500 | ||
506 | #define FUSE_COMPAT_STATFS_SIZE 48 | 501 | #define FUSE_COMPAT_STATFS_SIZE 48 |
@@ -510,32 +505,32 @@ struct fuse_statfs_out { | |||
510 | }; | 505 | }; |
511 | 506 | ||
512 | struct fuse_fsync_in { | 507 | struct fuse_fsync_in { |
513 | __u64 fh; | 508 | uint64_t fh; |
514 | __u32 fsync_flags; | 509 | uint32_t fsync_flags; |
515 | __u32 padding; | 510 | uint32_t padding; |
516 | }; | 511 | }; |
517 | 512 | ||
518 | struct fuse_setxattr_in { | 513 | struct fuse_setxattr_in { |
519 | __u32 size; | 514 | uint32_t size; |
520 | __u32 flags; | 515 | uint32_t flags; |
521 | }; | 516 | }; |
522 | 517 | ||
523 | struct fuse_getxattr_in { | 518 | struct fuse_getxattr_in { |
524 | __u32 size; | 519 | uint32_t size; |
525 | __u32 padding; | 520 | uint32_t padding; |
526 | }; | 521 | }; |
527 | 522 | ||
528 | struct fuse_getxattr_out { | 523 | struct fuse_getxattr_out { |
529 | __u32 size; | 524 | uint32_t size; |
530 | __u32 padding; | 525 | uint32_t padding; |
531 | }; | 526 | }; |
532 | 527 | ||
533 | struct fuse_lk_in { | 528 | struct fuse_lk_in { |
534 | __u64 fh; | 529 | uint64_t fh; |
535 | __u64 owner; | 530 | uint64_t owner; |
536 | struct fuse_file_lock lk; | 531 | struct fuse_file_lock lk; |
537 | __u32 lk_flags; | 532 | uint32_t lk_flags; |
538 | __u32 padding; | 533 | uint32_t padding; |
539 | }; | 534 | }; |
540 | 535 | ||
541 | struct fuse_lk_out { | 536 | struct fuse_lk_out { |
@@ -543,134 +538,135 @@ struct fuse_lk_out { | |||
543 | }; | 538 | }; |
544 | 539 | ||
545 | struct fuse_access_in { | 540 | struct fuse_access_in { |
546 | __u32 mask; | 541 | uint32_t mask; |
547 | __u32 padding; | 542 | uint32_t padding; |
548 | }; | 543 | }; |
549 | 544 | ||
550 | struct fuse_init_in { | 545 | struct fuse_init_in { |
551 | __u32 major; | 546 | uint32_t major; |
552 | __u32 minor; | 547 | uint32_t minor; |
553 | __u32 max_readahead; | 548 | uint32_t max_readahead; |
554 | __u32 flags; | 549 | uint32_t flags; |
555 | }; | 550 | }; |
556 | 551 | ||
557 | struct fuse_init_out { | 552 | struct fuse_init_out { |
558 | __u32 major; | 553 | uint32_t major; |
559 | __u32 minor; | 554 | uint32_t minor; |
560 | __u32 max_readahead; | 555 | uint32_t max_readahead; |
561 | __u32 flags; | 556 | uint32_t flags; |
562 | __u16 max_background; | 557 | uint16_t max_background; |
563 | __u16 congestion_threshold; | 558 | uint16_t congestion_threshold; |
564 | __u32 max_write; | 559 | uint32_t max_write; |
565 | }; | 560 | }; |
566 | 561 | ||
567 | #define CUSE_INIT_INFO_MAX 4096 | 562 | #define CUSE_INIT_INFO_MAX 4096 |
568 | 563 | ||
569 | struct cuse_init_in { | 564 | struct cuse_init_in { |
570 | __u32 major; | 565 | uint32_t major; |
571 | __u32 minor; | 566 | uint32_t minor; |
572 | __u32 unused; | 567 | uint32_t unused; |
573 | __u32 flags; | 568 | uint32_t flags; |
574 | }; | 569 | }; |
575 | 570 | ||
576 | struct cuse_init_out { | 571 | struct cuse_init_out { |
577 | __u32 major; | 572 | uint32_t major; |
578 | __u32 minor; | 573 | uint32_t minor; |
579 | __u32 unused; | 574 | uint32_t unused; |
580 | __u32 flags; | 575 | uint32_t flags; |
581 | __u32 max_read; | 576 | uint32_t max_read; |
582 | __u32 max_write; | 577 | uint32_t max_write; |
583 | __u32 dev_major; /* chardev major */ | 578 | uint32_t dev_major; /* chardev major */ |
584 | __u32 dev_minor; /* chardev minor */ | 579 | uint32_t dev_minor; /* chardev minor */ |
585 | __u32 spare[10]; | 580 | uint32_t spare[10]; |
586 | }; | 581 | }; |
587 | 582 | ||
588 | struct fuse_interrupt_in { | 583 | struct fuse_interrupt_in { |
589 | __u64 unique; | 584 | uint64_t unique; |
590 | }; | 585 | }; |
591 | 586 | ||
592 | struct fuse_bmap_in { | 587 | struct fuse_bmap_in { |
593 | __u64 block; | 588 | uint64_t block; |
594 | __u32 blocksize; | 589 | uint32_t blocksize; |
595 | __u32 padding; | 590 | uint32_t padding; |
596 | }; | 591 | }; |
597 | 592 | ||
598 | struct fuse_bmap_out { | 593 | struct fuse_bmap_out { |
599 | __u64 block; | 594 | uint64_t block; |
600 | }; | 595 | }; |
601 | 596 | ||
602 | struct fuse_ioctl_in { | 597 | struct fuse_ioctl_in { |
603 | __u64 fh; | 598 | uint64_t fh; |
604 | __u32 flags; | 599 | uint32_t flags; |
605 | __u32 cmd; | 600 | uint32_t cmd; |
606 | __u64 arg; | 601 | uint64_t arg; |
607 | __u32 in_size; | 602 | uint32_t in_size; |
608 | __u32 out_size; | 603 | uint32_t out_size; |
609 | }; | 604 | }; |
610 | 605 | ||
611 | struct fuse_ioctl_iovec { | 606 | struct fuse_ioctl_iovec { |
612 | __u64 base; | 607 | uint64_t base; |
613 | __u64 len; | 608 | uint64_t len; |
614 | }; | 609 | }; |
615 | 610 | ||
616 | struct fuse_ioctl_out { | 611 | struct fuse_ioctl_out { |
617 | __s32 result; | 612 | int32_t result; |
618 | __u32 flags; | 613 | uint32_t flags; |
619 | __u32 in_iovs; | 614 | uint32_t in_iovs; |
620 | __u32 out_iovs; | 615 | uint32_t out_iovs; |
621 | }; | 616 | }; |
622 | 617 | ||
623 | struct fuse_poll_in { | 618 | struct fuse_poll_in { |
624 | __u64 fh; | 619 | uint64_t fh; |
625 | __u64 kh; | 620 | uint64_t kh; |
626 | __u32 flags; | 621 | uint32_t flags; |
627 | __u32 events; | 622 | uint32_t events; |
628 | }; | 623 | }; |
629 | 624 | ||
630 | struct fuse_poll_out { | 625 | struct fuse_poll_out { |
631 | __u32 revents; | 626 | uint32_t revents; |
632 | __u32 padding; | 627 | uint32_t padding; |
633 | }; | 628 | }; |
634 | 629 | ||
635 | struct fuse_notify_poll_wakeup_out { | 630 | struct fuse_notify_poll_wakeup_out { |
636 | __u64 kh; | 631 | uint64_t kh; |
637 | }; | 632 | }; |
638 | 633 | ||
639 | struct fuse_fallocate_in { | 634 | struct fuse_fallocate_in { |
640 | __u64 fh; | 635 | uint64_t fh; |
641 | __u64 offset; | 636 | uint64_t offset; |
642 | __u64 length; | 637 | uint64_t length; |
643 | __u32 mode; | 638 | uint32_t mode; |
644 | __u32 padding; | 639 | uint32_t padding; |
645 | }; | 640 | }; |
646 | 641 | ||
647 | struct fuse_in_header { | 642 | struct fuse_in_header { |
648 | __u32 len; | 643 | uint32_t len; |
649 | __u32 opcode; | 644 | uint32_t opcode; |
650 | __u64 unique; | 645 | uint64_t unique; |
651 | __u64 nodeid; | 646 | uint64_t nodeid; |
652 | __u32 uid; | 647 | uint32_t uid; |
653 | __u32 gid; | 648 | uint32_t gid; |
654 | __u32 pid; | 649 | uint32_t pid; |
655 | __u32 padding; | 650 | uint32_t padding; |
656 | }; | 651 | }; |
657 | 652 | ||
658 | struct fuse_out_header { | 653 | struct fuse_out_header { |
659 | __u32 len; | 654 | uint32_t len; |
660 | __s32 error; | 655 | int32_t error; |
661 | __u64 unique; | 656 | uint64_t unique; |
662 | }; | 657 | }; |
663 | 658 | ||
664 | struct fuse_dirent { | 659 | struct fuse_dirent { |
665 | __u64 ino; | 660 | uint64_t ino; |
666 | __u64 off; | 661 | uint64_t off; |
667 | __u32 namelen; | 662 | uint32_t namelen; |
668 | __u32 type; | 663 | uint32_t type; |
669 | char name[]; | 664 | char name[]; |
670 | }; | 665 | }; |
671 | 666 | ||
672 | #define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name) | 667 | #define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name) |
673 | #define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1)) | 668 | #define FUSE_DIRENT_ALIGN(x) \ |
669 | (((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1)) | ||
674 | #define FUSE_DIRENT_SIZE(d) \ | 670 | #define FUSE_DIRENT_SIZE(d) \ |
675 | FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) | 671 | FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) |
676 | 672 | ||
@@ -685,47 +681,47 @@ struct fuse_direntplus { | |||
685 | FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET_DIRENTPLUS + (d)->dirent.namelen) | 681 | FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET_DIRENTPLUS + (d)->dirent.namelen) |
686 | 682 | ||
687 | struct fuse_notify_inval_inode_out { | 683 | struct fuse_notify_inval_inode_out { |
688 | __u64 ino; | 684 | uint64_t ino; |
689 | __s64 off; | 685 | int64_t off; |
690 | __s64 len; | 686 | int64_t len; |
691 | }; | 687 | }; |
692 | 688 | ||
693 | struct fuse_notify_inval_entry_out { | 689 | struct fuse_notify_inval_entry_out { |
694 | __u64 parent; | 690 | uint64_t parent; |
695 | __u32 namelen; | 691 | uint32_t namelen; |
696 | __u32 padding; | 692 | uint32_t padding; |
697 | }; | 693 | }; |
698 | 694 | ||
699 | struct fuse_notify_delete_out { | 695 | struct fuse_notify_delete_out { |
700 | __u64 parent; | 696 | uint64_t parent; |
701 | __u64 child; | 697 | uint64_t child; |
702 | __u32 namelen; | 698 | uint32_t namelen; |
703 | __u32 padding; | 699 | uint32_t padding; |
704 | }; | 700 | }; |
705 | 701 | ||
706 | struct fuse_notify_store_out { | 702 | struct fuse_notify_store_out { |
707 | __u64 nodeid; | 703 | uint64_t nodeid; |
708 | __u64 offset; | 704 | uint64_t offset; |
709 | __u32 size; | 705 | uint32_t size; |
710 | __u32 padding; | 706 | uint32_t padding; |
711 | }; | 707 | }; |
712 | 708 | ||
713 | struct fuse_notify_retrieve_out { | 709 | struct fuse_notify_retrieve_out { |
714 | __u64 notify_unique; | 710 | uint64_t notify_unique; |
715 | __u64 nodeid; | 711 | uint64_t nodeid; |
716 | __u64 offset; | 712 | uint64_t offset; |
717 | __u32 size; | 713 | uint32_t size; |
718 | __u32 padding; | 714 | uint32_t padding; |
719 | }; | 715 | }; |
720 | 716 | ||
721 | /* Matches the size of fuse_write_in */ | 717 | /* Matches the size of fuse_write_in */ |
722 | struct fuse_notify_retrieve_in { | 718 | struct fuse_notify_retrieve_in { |
723 | __u64 dummy1; | 719 | uint64_t dummy1; |
724 | __u64 offset; | 720 | uint64_t offset; |
725 | __u32 size; | 721 | uint32_t size; |
726 | __u32 dummy2; | 722 | uint32_t dummy2; |
727 | __u64 dummy3; | 723 | uint64_t dummy3; |
728 | __u64 dummy4; | 724 | uint64_t dummy4; |
729 | }; | 725 | }; |
730 | 726 | ||
731 | #endif /* _LINUX_FUSE_H */ | 727 | #endif /* _LINUX_FUSE_H */ |
@@ -872,6 +872,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | |||
872 | goto out_unlock; | 872 | goto out_unlock; |
873 | break; | 873 | break; |
874 | } | 874 | } |
875 | msg = ERR_PTR(-EAGAIN); | ||
875 | } else | 876 | } else |
876 | break; | 877 | break; |
877 | msg_counter++; | 878 | msg_counter++; |
diff --git a/kernel/.gitignore b/kernel/.gitignore index ab4f1090f437..b3097bde4e9c 100644 --- a/kernel/.gitignore +++ b/kernel/.gitignore | |||
@@ -4,3 +4,4 @@ | |||
4 | config_data.h | 4 | config_data.h |
5 | config_data.gz | 5 | config_data.gz |
6 | timeconst.h | 6 | timeconst.h |
7 | hz.bc | ||
diff --git a/kernel/capability.c b/kernel/capability.c index 493d97259484..f6c2ce5701e1 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -393,6 +393,30 @@ bool ns_capable(struct user_namespace *ns, int cap) | |||
393 | EXPORT_SYMBOL(ns_capable); | 393 | EXPORT_SYMBOL(ns_capable); |
394 | 394 | ||
395 | /** | 395 | /** |
396 | * file_ns_capable - Determine if the file's opener had a capability in effect | ||
397 | * @file: The file we want to check | ||
398 | * @ns: The usernamespace we want the capability in | ||
399 | * @cap: The capability to be tested for | ||
400 | * | ||
401 | * Return true if task that opened the file had a capability in effect | ||
402 | * when the file was opened. | ||
403 | * | ||
404 | * This does not set PF_SUPERPRIV because the caller may not | ||
405 | * actually be privileged. | ||
406 | */ | ||
407 | bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap) | ||
408 | { | ||
409 | if (WARN_ON_ONCE(!cap_valid(cap))) | ||
410 | return false; | ||
411 | |||
412 | if (security_capable(file->f_cred, ns, cap) == 0) | ||
413 | return true; | ||
414 | |||
415 | return false; | ||
416 | } | ||
417 | EXPORT_SYMBOL(file_ns_capable); | ||
418 | |||
419 | /** | ||
396 | * capable - Determine if the current task has a superior capability in effect | 420 | * capable - Determine if the current task has a superior capability in effect |
397 | * @cap: The capability to be tested for | 421 | * @cap: The capability to be tested for |
398 | * | 422 | * |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 59412d037eed..9fcb0944f071 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4596,6 +4596,7 @@ void perf_event_comm(struct task_struct *task) | |||
4596 | struct perf_event_context *ctx; | 4596 | struct perf_event_context *ctx; |
4597 | int ctxn; | 4597 | int ctxn; |
4598 | 4598 | ||
4599 | rcu_read_lock(); | ||
4599 | for_each_task_context_nr(ctxn) { | 4600 | for_each_task_context_nr(ctxn) { |
4600 | ctx = task->perf_event_ctxp[ctxn]; | 4601 | ctx = task->perf_event_ctxp[ctxn]; |
4601 | if (!ctx) | 4602 | if (!ctx) |
@@ -4603,6 +4604,7 @@ void perf_event_comm(struct task_struct *task) | |||
4603 | 4604 | ||
4604 | perf_event_enable_on_exec(ctx); | 4605 | perf_event_enable_on_exec(ctx); |
4605 | } | 4606 | } |
4607 | rcu_read_unlock(); | ||
4606 | 4608 | ||
4607 | if (!atomic_read(&nr_comm_events)) | 4609 | if (!atomic_read(&nr_comm_events)) |
4608 | return; | 4610 | return; |
@@ -4737,7 +4739,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
4737 | } else { | 4739 | } else { |
4738 | if (arch_vma_name(mmap_event->vma)) { | 4740 | if (arch_vma_name(mmap_event->vma)) { |
4739 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), | 4741 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), |
4740 | sizeof(tmp)); | 4742 | sizeof(tmp) - 1); |
4743 | tmp[sizeof(tmp) - 1] = '\0'; | ||
4741 | goto got_name; | 4744 | goto got_name; |
4742 | } | 4745 | } |
4743 | 4746 | ||
@@ -5330,7 +5333,7 @@ static void sw_perf_event_destroy(struct perf_event *event) | |||
5330 | 5333 | ||
5331 | static int perf_swevent_init(struct perf_event *event) | 5334 | static int perf_swevent_init(struct perf_event *event) |
5332 | { | 5335 | { |
5333 | int event_id = event->attr.config; | 5336 | u64 event_id = event->attr.config; |
5334 | 5337 | ||
5335 | if (event->attr.type != PERF_TYPE_SOFTWARE) | 5338 | if (event->attr.type != PERF_TYPE_SOFTWARE) |
5336 | return -ENOENT; | 5339 | return -ENOENT; |
@@ -5986,6 +5989,7 @@ skip_type: | |||
5986 | if (pmu->pmu_cpu_context) | 5989 | if (pmu->pmu_cpu_context) |
5987 | goto got_cpu_context; | 5990 | goto got_cpu_context; |
5988 | 5991 | ||
5992 | ret = -ENOMEM; | ||
5989 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); | 5993 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); |
5990 | if (!pmu->pmu_cpu_context) | 5994 | if (!pmu->pmu_cpu_context) |
5991 | goto free_dev; | 5995 | goto free_dev; |
diff --git a/kernel/events/internal.h b/kernel/events/internal.h index d56a64c99a8b..eb675c4d59df 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h | |||
@@ -16,7 +16,7 @@ struct ring_buffer { | |||
16 | int page_order; /* allocation order */ | 16 | int page_order; /* allocation order */ |
17 | #endif | 17 | #endif |
18 | int nr_pages; /* nr of data pages */ | 18 | int nr_pages; /* nr of data pages */ |
19 | int writable; /* are we writable */ | 19 | int overwrite; /* can overwrite itself */ |
20 | 20 | ||
21 | atomic_t poll; /* POLL_ for wakeups */ | 21 | atomic_t poll; /* POLL_ for wakeups */ |
22 | 22 | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 23cb34ff3973..97fddb09762b 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -18,12 +18,24 @@ | |||
18 | static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, | 18 | static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, |
19 | unsigned long offset, unsigned long head) | 19 | unsigned long offset, unsigned long head) |
20 | { | 20 | { |
21 | unsigned long mask; | 21 | unsigned long sz = perf_data_size(rb); |
22 | unsigned long mask = sz - 1; | ||
22 | 23 | ||
23 | if (!rb->writable) | 24 | /* |
25 | * check if user-writable | ||
26 | * overwrite : over-write its own tail | ||
27 | * !overwrite: buffer possibly drops events. | ||
28 | */ | ||
29 | if (rb->overwrite) | ||
24 | return true; | 30 | return true; |
25 | 31 | ||
26 | mask = perf_data_size(rb) - 1; | 32 | /* |
33 | * verify that payload is not bigger than buffer | ||
34 | * otherwise masking logic may fail to detect | ||
35 | * the "not enough space" condition | ||
36 | */ | ||
37 | if ((head - offset) > sz) | ||
38 | return false; | ||
27 | 39 | ||
28 | offset = (offset - tail) & mask; | 40 | offset = (offset - tail) & mask; |
29 | head = (head - tail) & mask; | 41 | head = (head - tail) & mask; |
@@ -212,7 +224,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) | |||
212 | rb->watermark = max_size / 2; | 224 | rb->watermark = max_size / 2; |
213 | 225 | ||
214 | if (flags & RING_BUFFER_WRITABLE) | 226 | if (flags & RING_BUFFER_WRITABLE) |
215 | rb->writable = 1; | 227 | rb->overwrite = 0; |
228 | else | ||
229 | rb->overwrite = 1; | ||
216 | 230 | ||
217 | atomic_set(&rb->refcount, 1); | 231 | atomic_set(&rb->refcount, 1); |
218 | 232 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index cc47812d3feb..14be27feda49 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -63,6 +63,7 @@ | |||
63 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | 63 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = |
64 | { | 64 | { |
65 | 65 | ||
66 | .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), | ||
66 | .clock_base = | 67 | .clock_base = |
67 | { | 68 | { |
68 | { | 69 | { |
@@ -1642,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1642 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1643 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1643 | int i; | 1644 | int i; |
1644 | 1645 | ||
1645 | raw_spin_lock_init(&cpu_base->lock); | ||
1646 | |||
1647 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1646 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1648 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1647 | cpu_base->clock_base[i].cpu_base = cpu_base; |
1649 | timerqueue_init_head(&cpu_base->clock_base[i].active); | 1648 | timerqueue_init_head(&cpu_base->clock_base[i].active); |
diff --git a/kernel/kexec.c b/kernel/kexec.c index bddd3d7a74b6..ffd4e111fd67 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -55,7 +55,7 @@ struct resource crashk_res = { | |||
55 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | 55 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM |
56 | }; | 56 | }; |
57 | struct resource crashk_low_res = { | 57 | struct resource crashk_low_res = { |
58 | .name = "Crash kernel low", | 58 | .name = "Crash kernel", |
59 | .start = 0, | 59 | .start = 0, |
60 | .end = 0, | 60 | .end = 0, |
61 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | 61 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM |
@@ -1368,35 +1368,114 @@ static int __init parse_crashkernel_simple(char *cmdline, | |||
1368 | return 0; | 1368 | return 0; |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | #define SUFFIX_HIGH 0 | ||
1372 | #define SUFFIX_LOW 1 | ||
1373 | #define SUFFIX_NULL 2 | ||
1374 | static __initdata char *suffix_tbl[] = { | ||
1375 | [SUFFIX_HIGH] = ",high", | ||
1376 | [SUFFIX_LOW] = ",low", | ||
1377 | [SUFFIX_NULL] = NULL, | ||
1378 | }; | ||
1379 | |||
1371 | /* | 1380 | /* |
1372 | * That function is the entry point for command line parsing and should be | 1381 | * That function parses "suffix" crashkernel command lines like |
1373 | * called from the arch-specific code. | 1382 | * |
1383 | * crashkernel=size,[high|low] | ||
1384 | * | ||
1385 | * It returns 0 on success and -EINVAL on failure. | ||
1374 | */ | 1386 | */ |
1387 | static int __init parse_crashkernel_suffix(char *cmdline, | ||
1388 | unsigned long long *crash_size, | ||
1389 | unsigned long long *crash_base, | ||
1390 | const char *suffix) | ||
1391 | { | ||
1392 | char *cur = cmdline; | ||
1393 | |||
1394 | *crash_size = memparse(cmdline, &cur); | ||
1395 | if (cmdline == cur) { | ||
1396 | pr_warn("crashkernel: memory value expected\n"); | ||
1397 | return -EINVAL; | ||
1398 | } | ||
1399 | |||
1400 | /* check with suffix */ | ||
1401 | if (strncmp(cur, suffix, strlen(suffix))) { | ||
1402 | pr_warn("crashkernel: unrecognized char\n"); | ||
1403 | return -EINVAL; | ||
1404 | } | ||
1405 | cur += strlen(suffix); | ||
1406 | if (*cur != ' ' && *cur != '\0') { | ||
1407 | pr_warn("crashkernel: unrecognized char\n"); | ||
1408 | return -EINVAL; | ||
1409 | } | ||
1410 | |||
1411 | return 0; | ||
1412 | } | ||
1413 | |||
1414 | static __init char *get_last_crashkernel(char *cmdline, | ||
1415 | const char *name, | ||
1416 | const char *suffix) | ||
1417 | { | ||
1418 | char *p = cmdline, *ck_cmdline = NULL; | ||
1419 | |||
1420 | /* find crashkernel and use the last one if there are more */ | ||
1421 | p = strstr(p, name); | ||
1422 | while (p) { | ||
1423 | char *end_p = strchr(p, ' '); | ||
1424 | char *q; | ||
1425 | |||
1426 | if (!end_p) | ||
1427 | end_p = p + strlen(p); | ||
1428 | |||
1429 | if (!suffix) { | ||
1430 | int i; | ||
1431 | |||
1432 | /* skip the one with any known suffix */ | ||
1433 | for (i = 0; suffix_tbl[i]; i++) { | ||
1434 | q = end_p - strlen(suffix_tbl[i]); | ||
1435 | if (!strncmp(q, suffix_tbl[i], | ||
1436 | strlen(suffix_tbl[i]))) | ||
1437 | goto next; | ||
1438 | } | ||
1439 | ck_cmdline = p; | ||
1440 | } else { | ||
1441 | q = end_p - strlen(suffix); | ||
1442 | if (!strncmp(q, suffix, strlen(suffix))) | ||
1443 | ck_cmdline = p; | ||
1444 | } | ||
1445 | next: | ||
1446 | p = strstr(p+1, name); | ||
1447 | } | ||
1448 | |||
1449 | if (!ck_cmdline) | ||
1450 | return NULL; | ||
1451 | |||
1452 | return ck_cmdline; | ||
1453 | } | ||
1454 | |||
1375 | static int __init __parse_crashkernel(char *cmdline, | 1455 | static int __init __parse_crashkernel(char *cmdline, |
1376 | unsigned long long system_ram, | 1456 | unsigned long long system_ram, |
1377 | unsigned long long *crash_size, | 1457 | unsigned long long *crash_size, |
1378 | unsigned long long *crash_base, | 1458 | unsigned long long *crash_base, |
1379 | const char *name) | 1459 | const char *name, |
1460 | const char *suffix) | ||
1380 | { | 1461 | { |
1381 | char *p = cmdline, *ck_cmdline = NULL; | ||
1382 | char *first_colon, *first_space; | 1462 | char *first_colon, *first_space; |
1463 | char *ck_cmdline; | ||
1383 | 1464 | ||
1384 | BUG_ON(!crash_size || !crash_base); | 1465 | BUG_ON(!crash_size || !crash_base); |
1385 | *crash_size = 0; | 1466 | *crash_size = 0; |
1386 | *crash_base = 0; | 1467 | *crash_base = 0; |
1387 | 1468 | ||
1388 | /* find crashkernel and use the last one if there are more */ | 1469 | ck_cmdline = get_last_crashkernel(cmdline, name, suffix); |
1389 | p = strstr(p, name); | ||
1390 | while (p) { | ||
1391 | ck_cmdline = p; | ||
1392 | p = strstr(p+1, name); | ||
1393 | } | ||
1394 | 1470 | ||
1395 | if (!ck_cmdline) | 1471 | if (!ck_cmdline) |
1396 | return -EINVAL; | 1472 | return -EINVAL; |
1397 | 1473 | ||
1398 | ck_cmdline += strlen(name); | 1474 | ck_cmdline += strlen(name); |
1399 | 1475 | ||
1476 | if (suffix) | ||
1477 | return parse_crashkernel_suffix(ck_cmdline, crash_size, | ||
1478 | crash_base, suffix); | ||
1400 | /* | 1479 | /* |
1401 | * if the commandline contains a ':', then that's the extended | 1480 | * if the commandline contains a ':', then that's the extended |
1402 | * syntax -- if not, it must be the classic syntax | 1481 | * syntax -- if not, it must be the classic syntax |
@@ -1413,13 +1492,26 @@ static int __init __parse_crashkernel(char *cmdline, | |||
1413 | return 0; | 1492 | return 0; |
1414 | } | 1493 | } |
1415 | 1494 | ||
1495 | /* | ||
1496 | * That function is the entry point for command line parsing and should be | ||
1497 | * called from the arch-specific code. | ||
1498 | */ | ||
1416 | int __init parse_crashkernel(char *cmdline, | 1499 | int __init parse_crashkernel(char *cmdline, |
1417 | unsigned long long system_ram, | 1500 | unsigned long long system_ram, |
1418 | unsigned long long *crash_size, | 1501 | unsigned long long *crash_size, |
1419 | unsigned long long *crash_base) | 1502 | unsigned long long *crash_base) |
1420 | { | 1503 | { |
1421 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, | 1504 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, |
1422 | "crashkernel="); | 1505 | "crashkernel=", NULL); |
1506 | } | ||
1507 | |||
1508 | int __init parse_crashkernel_high(char *cmdline, | ||
1509 | unsigned long long system_ram, | ||
1510 | unsigned long long *crash_size, | ||
1511 | unsigned long long *crash_base) | ||
1512 | { | ||
1513 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, | ||
1514 | "crashkernel=", suffix_tbl[SUFFIX_HIGH]); | ||
1423 | } | 1515 | } |
1424 | 1516 | ||
1425 | int __init parse_crashkernel_low(char *cmdline, | 1517 | int __init parse_crashkernel_low(char *cmdline, |
@@ -1428,7 +1520,7 @@ int __init parse_crashkernel_low(char *cmdline, | |||
1428 | unsigned long long *crash_base) | 1520 | unsigned long long *crash_base) |
1429 | { | 1521 | { |
1430 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, | 1522 | return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, |
1431 | "crashkernel_low="); | 1523 | "crashkernel=", suffix_tbl[SUFFIX_LOW]); |
1432 | } | 1524 | } |
1433 | 1525 | ||
1434 | static void update_vmcoreinfo_note(void) | 1526 | static void update_vmcoreinfo_note(void) |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index e35be53f6613..3fed7f0cbcdf 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -794,16 +794,16 @@ out: | |||
794 | } | 794 | } |
795 | 795 | ||
796 | #ifdef CONFIG_SYSCTL | 796 | #ifdef CONFIG_SYSCTL |
797 | /* This should be called with kprobe_mutex locked */ | ||
798 | static void __kprobes optimize_all_kprobes(void) | 797 | static void __kprobes optimize_all_kprobes(void) |
799 | { | 798 | { |
800 | struct hlist_head *head; | 799 | struct hlist_head *head; |
801 | struct kprobe *p; | 800 | struct kprobe *p; |
802 | unsigned int i; | 801 | unsigned int i; |
803 | 802 | ||
803 | mutex_lock(&kprobe_mutex); | ||
804 | /* If optimization is already allowed, just return */ | 804 | /* If optimization is already allowed, just return */ |
805 | if (kprobes_allow_optimization) | 805 | if (kprobes_allow_optimization) |
806 | return; | 806 | goto out; |
807 | 807 | ||
808 | kprobes_allow_optimization = true; | 808 | kprobes_allow_optimization = true; |
809 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 809 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
@@ -813,18 +813,22 @@ static void __kprobes optimize_all_kprobes(void) | |||
813 | optimize_kprobe(p); | 813 | optimize_kprobe(p); |
814 | } | 814 | } |
815 | printk(KERN_INFO "Kprobes globally optimized\n"); | 815 | printk(KERN_INFO "Kprobes globally optimized\n"); |
816 | out: | ||
817 | mutex_unlock(&kprobe_mutex); | ||
816 | } | 818 | } |
817 | 819 | ||
818 | /* This should be called with kprobe_mutex locked */ | ||
819 | static void __kprobes unoptimize_all_kprobes(void) | 820 | static void __kprobes unoptimize_all_kprobes(void) |
820 | { | 821 | { |
821 | struct hlist_head *head; | 822 | struct hlist_head *head; |
822 | struct kprobe *p; | 823 | struct kprobe *p; |
823 | unsigned int i; | 824 | unsigned int i; |
824 | 825 | ||
826 | mutex_lock(&kprobe_mutex); | ||
825 | /* If optimization is already prohibited, just return */ | 827 | /* If optimization is already prohibited, just return */ |
826 | if (!kprobes_allow_optimization) | 828 | if (!kprobes_allow_optimization) { |
829 | mutex_unlock(&kprobe_mutex); | ||
827 | return; | 830 | return; |
831 | } | ||
828 | 832 | ||
829 | kprobes_allow_optimization = false; | 833 | kprobes_allow_optimization = false; |
830 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 834 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
@@ -834,11 +838,14 @@ static void __kprobes unoptimize_all_kprobes(void) | |||
834 | unoptimize_kprobe(p, false); | 838 | unoptimize_kprobe(p, false); |
835 | } | 839 | } |
836 | } | 840 | } |
841 | mutex_unlock(&kprobe_mutex); | ||
842 | |||
837 | /* Wait for unoptimizing completion */ | 843 | /* Wait for unoptimizing completion */ |
838 | wait_for_kprobe_optimizer(); | 844 | wait_for_kprobe_optimizer(); |
839 | printk(KERN_INFO "Kprobes globally unoptimized\n"); | 845 | printk(KERN_INFO "Kprobes globally unoptimized\n"); |
840 | } | 846 | } |
841 | 847 | ||
848 | static DEFINE_MUTEX(kprobe_sysctl_mutex); | ||
842 | int sysctl_kprobes_optimization; | 849 | int sysctl_kprobes_optimization; |
843 | int proc_kprobes_optimization_handler(struct ctl_table *table, int write, | 850 | int proc_kprobes_optimization_handler(struct ctl_table *table, int write, |
844 | void __user *buffer, size_t *length, | 851 | void __user *buffer, size_t *length, |
@@ -846,7 +853,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write, | |||
846 | { | 853 | { |
847 | int ret; | 854 | int ret; |
848 | 855 | ||
849 | mutex_lock(&kprobe_mutex); | 856 | mutex_lock(&kprobe_sysctl_mutex); |
850 | sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; | 857 | sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; |
851 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); | 858 | ret = proc_dointvec_minmax(table, write, buffer, length, ppos); |
852 | 859 | ||
@@ -854,7 +861,7 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write, | |||
854 | optimize_all_kprobes(); | 861 | optimize_all_kprobes(); |
855 | else | 862 | else |
856 | unoptimize_all_kprobes(); | 863 | unoptimize_all_kprobes(); |
857 | mutex_unlock(&kprobe_mutex); | 864 | mutex_unlock(&kprobe_sysctl_mutex); |
858 | 865 | ||
859 | return ret; | 866 | return ret; |
860 | } | 867 | } |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 691dc2ef9baf..9eb7fed0bbaa 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task) | |||
124 | 124 | ||
125 | static void __kthread_parkme(struct kthread *self) | 125 | static void __kthread_parkme(struct kthread *self) |
126 | { | 126 | { |
127 | __set_current_state(TASK_INTERRUPTIBLE); | 127 | __set_current_state(TASK_PARKED); |
128 | while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { | 128 | while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { |
129 | if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) | 129 | if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) |
130 | complete(&self->parked); | 130 | complete(&self->parked); |
131 | schedule(); | 131 | schedule(); |
132 | __set_current_state(TASK_INTERRUPTIBLE); | 132 | __set_current_state(TASK_PARKED); |
133 | } | 133 | } |
134 | clear_bit(KTHREAD_IS_PARKED, &self->flags); | 134 | clear_bit(KTHREAD_IS_PARKED, &self->flags); |
135 | __set_current_state(TASK_RUNNING); | 135 | __set_current_state(TASK_RUNNING); |
@@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | |||
256 | } | 256 | } |
257 | EXPORT_SYMBOL(kthread_create_on_node); | 257 | EXPORT_SYMBOL(kthread_create_on_node); |
258 | 258 | ||
259 | static void __kthread_bind(struct task_struct *p, unsigned int cpu) | 259 | static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) |
260 | { | 260 | { |
261 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
262 | if (!wait_task_inactive(p, state)) { | ||
263 | WARN_ON(1); | ||
264 | return; | ||
265 | } | ||
261 | /* It's safe because the task is inactive. */ | 266 | /* It's safe because the task is inactive. */ |
262 | do_set_cpus_allowed(p, cpumask_of(cpu)); | 267 | do_set_cpus_allowed(p, cpumask_of(cpu)); |
263 | p->flags |= PF_THREAD_BOUND; | 268 | p->flags |= PF_THREAD_BOUND; |
@@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu) | |||
274 | */ | 279 | */ |
275 | void kthread_bind(struct task_struct *p, unsigned int cpu) | 280 | void kthread_bind(struct task_struct *p, unsigned int cpu) |
276 | { | 281 | { |
277 | /* Must have done schedule() in kthread() before we set_task_cpu */ | 282 | __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); |
278 | if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { | ||
279 | WARN_ON(1); | ||
280 | return; | ||
281 | } | ||
282 | __kthread_bind(p, cpu); | ||
283 | } | 283 | } |
284 | EXPORT_SYMBOL(kthread_bind); | 284 | EXPORT_SYMBOL(kthread_bind); |
285 | 285 | ||
@@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k) | |||
324 | return NULL; | 324 | return NULL; |
325 | } | 325 | } |
326 | 326 | ||
327 | static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) | ||
328 | { | ||
329 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | ||
330 | /* | ||
331 | * We clear the IS_PARKED bit here as we don't wait | ||
332 | * until the task has left the park code. So if we'd | ||
333 | * park before that happens we'd see the IS_PARKED bit | ||
334 | * which might be about to be cleared. | ||
335 | */ | ||
336 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | ||
337 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | ||
338 | __kthread_bind(k, kthread->cpu, TASK_PARKED); | ||
339 | wake_up_state(k, TASK_PARKED); | ||
340 | } | ||
341 | } | ||
342 | |||
327 | /** | 343 | /** |
328 | * kthread_unpark - unpark a thread created by kthread_create(). | 344 | * kthread_unpark - unpark a thread created by kthread_create(). |
329 | * @k: thread created by kthread_create(). | 345 | * @k: thread created by kthread_create(). |
@@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k) | |||
336 | { | 352 | { |
337 | struct kthread *kthread = task_get_live_kthread(k); | 353 | struct kthread *kthread = task_get_live_kthread(k); |
338 | 354 | ||
339 | if (kthread) { | 355 | if (kthread) |
340 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 356 | __kthread_unpark(k, kthread); |
341 | /* | ||
342 | * We clear the IS_PARKED bit here as we don't wait | ||
343 | * until the task has left the park code. So if we'd | ||
344 | * park before that happens we'd see the IS_PARKED bit | ||
345 | * which might be about to be cleared. | ||
346 | */ | ||
347 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | ||
348 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | ||
349 | __kthread_bind(k, kthread->cpu); | ||
350 | wake_up_process(k); | ||
351 | } | ||
352 | } | ||
353 | put_task_struct(k); | 357 | put_task_struct(k); |
354 | } | 358 | } |
355 | 359 | ||
@@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k) | |||
407 | trace_sched_kthread_stop(k); | 411 | trace_sched_kthread_stop(k); |
408 | if (kthread) { | 412 | if (kthread) { |
409 | set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); | 413 | set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); |
410 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 414 | __kthread_unpark(k, kthread); |
411 | wake_up_process(k); | 415 | wake_up_process(k); |
412 | wait_for_completion(&kthread->exited); | 416 | wait_for_completion(&kthread->exited); |
413 | } | 417 | } |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index c685e31492df..c3ae1446461c 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd) | |||
176 | u64 this_clock, remote_clock; | 176 | u64 this_clock, remote_clock; |
177 | u64 *ptr, old_val, val; | 177 | u64 *ptr, old_val, val; |
178 | 178 | ||
179 | #if BITS_PER_LONG != 64 | ||
180 | again: | ||
181 | /* | ||
182 | * Careful here: The local and the remote clock values need to | ||
183 | * be read out atomic as we need to compare the values and | ||
184 | * then update either the local or the remote side. So the | ||
185 | * cmpxchg64 below only protects one readout. | ||
186 | * | ||
187 | * We must reread via sched_clock_local() in the retry case on | ||
188 | * 32bit as an NMI could use sched_clock_local() via the | ||
189 | * tracer and hit between the readout of | ||
190 | * the low32bit and the high 32bit portion. | ||
191 | */ | ||
192 | this_clock = sched_clock_local(my_scd); | ||
193 | /* | ||
194 | * We must enforce atomic readout on 32bit, otherwise the | ||
195 | * update on the remote cpu can hit inbetween the readout of | ||
196 | * the low32bit and the high 32bit portion. | ||
197 | */ | ||
198 | remote_clock = cmpxchg64(&scd->clock, 0, 0); | ||
199 | #else | ||
200 | /* | ||
201 | * On 64bit the read of [my]scd->clock is atomic versus the | ||
202 | * update, so we can avoid the above 32bit dance. | ||
203 | */ | ||
179 | sched_clock_local(my_scd); | 204 | sched_clock_local(my_scd); |
180 | again: | 205 | again: |
181 | this_clock = my_scd->clock; | 206 | this_clock = my_scd->clock; |
182 | remote_clock = scd->clock; | 207 | remote_clock = scd->clock; |
208 | #endif | ||
183 | 209 | ||
184 | /* | 210 | /* |
185 | * Use the opportunity that we have both locks | 211 | * Use the opportunity that we have both locks |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f12624a393c..67d04651f44b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1498,8 +1498,10 @@ static void try_to_wake_up_local(struct task_struct *p) | |||
1498 | { | 1498 | { |
1499 | struct rq *rq = task_rq(p); | 1499 | struct rq *rq = task_rq(p); |
1500 | 1500 | ||
1501 | BUG_ON(rq != this_rq()); | 1501 | if (WARN_ON_ONCE(rq != this_rq()) || |
1502 | BUG_ON(p == current); | 1502 | WARN_ON_ONCE(p == current)) |
1503 | return; | ||
1504 | |||
1503 | lockdep_assert_held(&rq->lock); | 1505 | lockdep_assert_held(&rq->lock); |
1504 | 1506 | ||
1505 | if (!raw_spin_trylock(&p->pi_lock)) { | 1507 | if (!raw_spin_trylock(&p->pi_lock)) { |
@@ -4999,7 +5001,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep) | |||
4999 | } | 5001 | } |
5000 | 5002 | ||
5001 | static int min_load_idx = 0; | 5003 | static int min_load_idx = 0; |
5002 | static int max_load_idx = CPU_LOAD_IDX_MAX; | 5004 | static int max_load_idx = CPU_LOAD_IDX_MAX-1; |
5003 | 5005 | ||
5004 | static void | 5006 | static void |
5005 | set_table_entry(struct ctl_table *entry, | 5007 | set_table_entry(struct ctl_table *entry, |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index ed12cbb135f4..e93cca92f38b 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -310,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | |||
310 | 310 | ||
311 | t = tsk; | 311 | t = tsk; |
312 | do { | 312 | do { |
313 | task_cputime(tsk, &utime, &stime); | 313 | task_cputime(t, &utime, &stime); |
314 | times->utime += utime; | 314 | times->utime += utime; |
315 | times->stime += stime; | 315 | times->stime += stime; |
316 | times->sum_exec_runtime += task_sched_runtime(t); | 316 | times->sum_exec_runtime += task_sched_runtime(t); |
diff --git a/kernel/signal.c b/kernel/signal.c index dd72567767d9..598dc06be421 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -2948,7 +2948,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) | |||
2948 | 2948 | ||
2949 | static int do_tkill(pid_t tgid, pid_t pid, int sig) | 2949 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
2950 | { | 2950 | { |
2951 | struct siginfo info; | 2951 | struct siginfo info = {}; |
2952 | 2952 | ||
2953 | info.si_signo = sig; | 2953 | info.si_signo = sig; |
2954 | info.si_errno = 0; | 2954 | info.si_errno = 0; |
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 8eaed9aa9cf0..02fc5c933673 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
@@ -185,8 +185,18 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) | |||
185 | } | 185 | } |
186 | get_task_struct(tsk); | 186 | get_task_struct(tsk); |
187 | *per_cpu_ptr(ht->store, cpu) = tsk; | 187 | *per_cpu_ptr(ht->store, cpu) = tsk; |
188 | if (ht->create) | 188 | if (ht->create) { |
189 | ht->create(cpu); | 189 | /* |
190 | * Make sure that the task has actually scheduled out | ||
191 | * into park position, before calling the create | ||
192 | * callback. At least the migration thread callback | ||
193 | * requires that the task is off the runqueue. | ||
194 | */ | ||
195 | if (!wait_task_inactive(tsk, TASK_PARKED)) | ||
196 | WARN_ON(1); | ||
197 | else | ||
198 | ht->create(cpu); | ||
199 | } | ||
190 | return 0; | 200 | return 0; |
191 | } | 201 | } |
192 | 202 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index 39c9c4a2949f..0da73cf73e60 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -324,7 +324,6 @@ void kernel_restart_prepare(char *cmd) | |||
324 | system_state = SYSTEM_RESTART; | 324 | system_state = SYSTEM_RESTART; |
325 | usermodehelper_disable(); | 325 | usermodehelper_disable(); |
326 | device_shutdown(); | 326 | device_shutdown(); |
327 | syscore_shutdown(); | ||
328 | } | 327 | } |
329 | 328 | ||
330 | /** | 329 | /** |
@@ -370,6 +369,7 @@ void kernel_restart(char *cmd) | |||
370 | { | 369 | { |
371 | kernel_restart_prepare(cmd); | 370 | kernel_restart_prepare(cmd); |
372 | disable_nonboot_cpus(); | 371 | disable_nonboot_cpus(); |
372 | syscore_shutdown(); | ||
373 | if (!cmd) | 373 | if (!cmd) |
374 | printk(KERN_EMERG "Restarting system.\n"); | 374 | printk(KERN_EMERG "Restarting system.\n"); |
375 | else | 375 | else |
@@ -395,6 +395,7 @@ static void kernel_shutdown_prepare(enum system_states state) | |||
395 | void kernel_halt(void) | 395 | void kernel_halt(void) |
396 | { | 396 | { |
397 | kernel_shutdown_prepare(SYSTEM_HALT); | 397 | kernel_shutdown_prepare(SYSTEM_HALT); |
398 | disable_nonboot_cpus(); | ||
398 | syscore_shutdown(); | 399 | syscore_shutdown(); |
399 | printk(KERN_EMERG "System halted.\n"); | 400 | printk(KERN_EMERG "System halted.\n"); |
400 | kmsg_dump(KMSG_DUMP_HALT); | 401 | kmsg_dump(KMSG_DUMP_HALT); |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 9e5b8c272eec..5a0f781cd729 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -739,12 +739,6 @@ static void blk_add_trace_rq_complete(void *ignore, | |||
739 | struct request_queue *q, | 739 | struct request_queue *q, |
740 | struct request *rq) | 740 | struct request *rq) |
741 | { | 741 | { |
742 | struct blk_trace *bt = q->blk_trace; | ||
743 | |||
744 | /* if control ever passes through here, it's a request based driver */ | ||
745 | if (unlikely(bt && !bt->rq_based)) | ||
746 | bt->rq_based = true; | ||
747 | |||
748 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); | 742 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); |
749 | } | 743 | } |
750 | 744 | ||
@@ -780,24 +774,10 @@ static void blk_add_trace_bio_bounce(void *ignore, | |||
780 | blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); | 774 | blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); |
781 | } | 775 | } |
782 | 776 | ||
783 | static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error) | 777 | static void blk_add_trace_bio_complete(void *ignore, |
778 | struct request_queue *q, struct bio *bio, | ||
779 | int error) | ||
784 | { | 780 | { |
785 | struct request_queue *q; | ||
786 | struct blk_trace *bt; | ||
787 | |||
788 | if (!bio->bi_bdev) | ||
789 | return; | ||
790 | |||
791 | q = bdev_get_queue(bio->bi_bdev); | ||
792 | bt = q->blk_trace; | ||
793 | |||
794 | /* | ||
795 | * Request based drivers will generate both rq and bio completions. | ||
796 | * Ignore bio ones. | ||
797 | */ | ||
798 | if (likely(!bt) || bt->rq_based) | ||
799 | return; | ||
800 | |||
801 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); | 781 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); |
802 | } | 782 | } |
803 | 783 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6893d5a2bf08..b3fde6d7b7fc 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -66,7 +66,7 @@ | |||
66 | 66 | ||
67 | static struct ftrace_ops ftrace_list_end __read_mostly = { | 67 | static struct ftrace_ops ftrace_list_end __read_mostly = { |
68 | .func = ftrace_stub, | 68 | .func = ftrace_stub, |
69 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | 69 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* ftrace_enabled is a method to turn ftrace on or off */ | 72 | /* ftrace_enabled is a method to turn ftrace on or off */ |
@@ -694,7 +694,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) | |||
694 | free_page(tmp); | 694 | free_page(tmp); |
695 | } | 695 | } |
696 | 696 | ||
697 | free_page((unsigned long)stat->pages); | ||
698 | stat->pages = NULL; | 697 | stat->pages = NULL; |
699 | stat->start = NULL; | 698 | stat->start = NULL; |
700 | 699 | ||
@@ -1053,6 +1052,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
1053 | 1052 | ||
1054 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 1053 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
1055 | 1054 | ||
1055 | loff_t | ||
1056 | ftrace_filter_lseek(struct file *file, loff_t offset, int whence) | ||
1057 | { | ||
1058 | loff_t ret; | ||
1059 | |||
1060 | if (file->f_mode & FMODE_READ) | ||
1061 | ret = seq_lseek(file, offset, whence); | ||
1062 | else | ||
1063 | file->f_pos = ret = 1; | ||
1064 | |||
1065 | return ret; | ||
1066 | } | ||
1067 | |||
1056 | #ifdef CONFIG_DYNAMIC_FTRACE | 1068 | #ifdef CONFIG_DYNAMIC_FTRACE |
1057 | 1069 | ||
1058 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 1070 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
@@ -2613,7 +2625,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash) | |||
2613 | * routine, you can use ftrace_filter_write() for the write | 2625 | * routine, you can use ftrace_filter_write() for the write |
2614 | * routine if @flag has FTRACE_ITER_FILTER set, or | 2626 | * routine if @flag has FTRACE_ITER_FILTER set, or |
2615 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. | 2627 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. |
2616 | * ftrace_regex_lseek() should be used as the lseek routine, and | 2628 | * ftrace_filter_lseek() should be used as the lseek routine, and |
2617 | * release must call ftrace_regex_release(). | 2629 | * release must call ftrace_regex_release(). |
2618 | */ | 2630 | */ |
2619 | int | 2631 | int |
@@ -2697,19 +2709,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file) | |||
2697 | inode, file); | 2709 | inode, file); |
2698 | } | 2710 | } |
2699 | 2711 | ||
2700 | loff_t | ||
2701 | ftrace_regex_lseek(struct file *file, loff_t offset, int whence) | ||
2702 | { | ||
2703 | loff_t ret; | ||
2704 | |||
2705 | if (file->f_mode & FMODE_READ) | ||
2706 | ret = seq_lseek(file, offset, whence); | ||
2707 | else | ||
2708 | file->f_pos = ret = 1; | ||
2709 | |||
2710 | return ret; | ||
2711 | } | ||
2712 | |||
2713 | static int ftrace_match(char *str, char *regex, int len, int type) | 2712 | static int ftrace_match(char *str, char *regex, int len, int type) |
2714 | { | 2713 | { |
2715 | int matched = 0; | 2714 | int matched = 0; |
@@ -3441,14 +3440,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | |||
3441 | 3440 | ||
3442 | static int __init set_ftrace_notrace(char *str) | 3441 | static int __init set_ftrace_notrace(char *str) |
3443 | { | 3442 | { |
3444 | strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); | 3443 | strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); |
3445 | return 1; | 3444 | return 1; |
3446 | } | 3445 | } |
3447 | __setup("ftrace_notrace=", set_ftrace_notrace); | 3446 | __setup("ftrace_notrace=", set_ftrace_notrace); |
3448 | 3447 | ||
3449 | static int __init set_ftrace_filter(char *str) | 3448 | static int __init set_ftrace_filter(char *str) |
3450 | { | 3449 | { |
3451 | strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); | 3450 | strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); |
3452 | return 1; | 3451 | return 1; |
3453 | } | 3452 | } |
3454 | __setup("ftrace_filter=", set_ftrace_filter); | 3453 | __setup("ftrace_filter=", set_ftrace_filter); |
@@ -3571,7 +3570,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
3571 | .open = ftrace_filter_open, | 3570 | .open = ftrace_filter_open, |
3572 | .read = seq_read, | 3571 | .read = seq_read, |
3573 | .write = ftrace_filter_write, | 3572 | .write = ftrace_filter_write, |
3574 | .llseek = ftrace_regex_lseek, | 3573 | .llseek = ftrace_filter_lseek, |
3575 | .release = ftrace_regex_release, | 3574 | .release = ftrace_regex_release, |
3576 | }; | 3575 | }; |
3577 | 3576 | ||
@@ -3579,7 +3578,7 @@ static const struct file_operations ftrace_notrace_fops = { | |||
3579 | .open = ftrace_notrace_open, | 3578 | .open = ftrace_notrace_open, |
3580 | .read = seq_read, | 3579 | .read = seq_read, |
3581 | .write = ftrace_notrace_write, | 3580 | .write = ftrace_notrace_write, |
3582 | .llseek = ftrace_regex_lseek, | 3581 | .llseek = ftrace_filter_lseek, |
3583 | .release = ftrace_regex_release, | 3582 | .release = ftrace_regex_release, |
3584 | }; | 3583 | }; |
3585 | 3584 | ||
@@ -3784,8 +3783,8 @@ static const struct file_operations ftrace_graph_fops = { | |||
3784 | .open = ftrace_graph_open, | 3783 | .open = ftrace_graph_open, |
3785 | .read = seq_read, | 3784 | .read = seq_read, |
3786 | .write = ftrace_graph_write, | 3785 | .write = ftrace_graph_write, |
3786 | .llseek = ftrace_filter_lseek, | ||
3787 | .release = ftrace_graph_release, | 3787 | .release = ftrace_graph_release, |
3788 | .llseek = seq_lseek, | ||
3789 | }; | 3788 | }; |
3790 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 3789 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
3791 | 3790 | ||
@@ -4131,7 +4130,8 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
4131 | preempt_disable_notrace(); | 4130 | preempt_disable_notrace(); |
4132 | trace_recursion_set(TRACE_CONTROL_BIT); | 4131 | trace_recursion_set(TRACE_CONTROL_BIT); |
4133 | do_for_each_ftrace_op(op, ftrace_control_list) { | 4132 | do_for_each_ftrace_op(op, ftrace_control_list) { |
4134 | if (!ftrace_function_local_disabled(op) && | 4133 | if (!(op->flags & FTRACE_OPS_FL_STUB) && |
4134 | !ftrace_function_local_disabled(op) && | ||
4135 | ftrace_ops_test(op, ip)) | 4135 | ftrace_ops_test(op, ip)) |
4136 | op->func(ip, parent_ip, op, regs); | 4136 | op->func(ip, parent_ip, op, regs); |
4137 | } while_for_each_ftrace_op(op); | 4137 | } while_for_each_ftrace_op(op); |
@@ -4439,7 +4439,7 @@ static const struct file_operations ftrace_pid_fops = { | |||
4439 | .open = ftrace_pid_open, | 4439 | .open = ftrace_pid_open, |
4440 | .write = ftrace_pid_write, | 4440 | .write = ftrace_pid_write, |
4441 | .read = seq_read, | 4441 | .read = seq_read, |
4442 | .llseek = seq_lseek, | 4442 | .llseek = ftrace_filter_lseek, |
4443 | .release = ftrace_pid_release, | 4443 | .release = ftrace_pid_release, |
4444 | }; | 4444 | }; |
4445 | 4445 | ||
@@ -4555,12 +4555,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
4555 | ftrace_startup_sysctl(); | 4555 | ftrace_startup_sysctl(); |
4556 | 4556 | ||
4557 | /* we are starting ftrace again */ | 4557 | /* we are starting ftrace again */ |
4558 | if (ftrace_ops_list != &ftrace_list_end) { | 4558 | if (ftrace_ops_list != &ftrace_list_end) |
4559 | if (ftrace_ops_list->next == &ftrace_list_end) | 4559 | update_ftrace_function(); |
4560 | ftrace_trace_function = ftrace_ops_list->func; | ||
4561 | else | ||
4562 | ftrace_trace_function = ftrace_ops_list_func; | ||
4563 | } | ||
4564 | 4560 | ||
4565 | } else { | 4561 | } else { |
4566 | /* stopping ftrace calls (just send to ftrace_stub) */ | 4562 | /* stopping ftrace calls (just send to ftrace_stub) */ |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4f1dade56981..66338c4f7f4b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -132,7 +132,7 @@ static char *default_bootup_tracer; | |||
132 | 132 | ||
133 | static int __init set_cmdline_ftrace(char *str) | 133 | static int __init set_cmdline_ftrace(char *str) |
134 | { | 134 | { |
135 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 135 | strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
136 | default_bootup_tracer = bootup_tracer_buf; | 136 | default_bootup_tracer = bootup_tracer_buf; |
137 | /* We are using ftrace early, expand it */ | 137 | /* We are using ftrace early, expand it */ |
138 | ring_buffer_expanded = 1; | 138 | ring_buffer_expanded = 1; |
@@ -162,7 +162,7 @@ static char *trace_boot_options __initdata; | |||
162 | 162 | ||
163 | static int __init set_trace_boot_options(char *str) | 163 | static int __init set_trace_boot_options(char *str) |
164 | { | 164 | { |
165 | strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); | 165 | strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); |
166 | trace_boot_options = trace_boot_options_buf; | 166 | trace_boot_options = trace_boot_options_buf; |
167 | return 0; | 167 | return 0; |
168 | } | 168 | } |
@@ -744,8 +744,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
744 | return; | 744 | return; |
745 | 745 | ||
746 | WARN_ON_ONCE(!irqs_disabled()); | 746 | WARN_ON_ONCE(!irqs_disabled()); |
747 | if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) | 747 | if (!current_trace->allocated_snapshot) { |
748 | /* Only the nop tracer should hit this when disabling */ | ||
749 | WARN_ON_ONCE(current_trace != &nop_trace); | ||
748 | return; | 750 | return; |
751 | } | ||
749 | 752 | ||
750 | arch_spin_lock(&ftrace_max_lock); | 753 | arch_spin_lock(&ftrace_max_lock); |
751 | 754 | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 42ca822fc701..83a8b5b7bd35 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -322,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = { | |||
322 | .open = stack_trace_filter_open, | 322 | .open = stack_trace_filter_open, |
323 | .read = seq_read, | 323 | .read = seq_read, |
324 | .write = ftrace_filter_write, | 324 | .write = ftrace_filter_write, |
325 | .llseek = ftrace_regex_lseek, | 325 | .llseek = ftrace_filter_lseek, |
326 | .release = ftrace_regex_release, | 326 | .release = ftrace_regex_release, |
327 | }; | 327 | }; |
328 | 328 | ||
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index a54f26f82eb2..e134d8f365dd 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -25,7 +25,8 @@ | |||
25 | 25 | ||
26 | static struct kmem_cache *user_ns_cachep __read_mostly; | 26 | static struct kmem_cache *user_ns_cachep __read_mostly; |
27 | 27 | ||
28 | static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, | 28 | static bool new_idmap_permitted(const struct file *file, |
29 | struct user_namespace *ns, int cap_setid, | ||
29 | struct uid_gid_map *map); | 30 | struct uid_gid_map *map); |
30 | 31 | ||
31 | static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns) | 32 | static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns) |
@@ -612,10 +613,10 @@ static ssize_t map_write(struct file *file, const char __user *buf, | |||
612 | if (map->nr_extents != 0) | 613 | if (map->nr_extents != 0) |
613 | goto out; | 614 | goto out; |
614 | 615 | ||
615 | /* Require the appropriate privilege CAP_SETUID or CAP_SETGID | 616 | /* |
616 | * over the user namespace in order to set the id mapping. | 617 | * Adjusting namespace settings requires capabilities on the target. |
617 | */ | 618 | */ |
618 | if (cap_valid(cap_setid) && !ns_capable(ns, cap_setid)) | 619 | if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN)) |
619 | goto out; | 620 | goto out; |
620 | 621 | ||
621 | /* Get a buffer */ | 622 | /* Get a buffer */ |
@@ -700,7 +701,7 @@ static ssize_t map_write(struct file *file, const char __user *buf, | |||
700 | 701 | ||
701 | ret = -EPERM; | 702 | ret = -EPERM; |
702 | /* Validate the user is allowed to use user id's mapped to. */ | 703 | /* Validate the user is allowed to use user id's mapped to. */ |
703 | if (!new_idmap_permitted(ns, cap_setid, &new_map)) | 704 | if (!new_idmap_permitted(file, ns, cap_setid, &new_map)) |
704 | goto out; | 705 | goto out; |
705 | 706 | ||
706 | /* Map the lower ids from the parent user namespace to the | 707 | /* Map the lower ids from the parent user namespace to the |
@@ -787,7 +788,8 @@ ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t | |||
787 | &ns->projid_map, &ns->parent->projid_map); | 788 | &ns->projid_map, &ns->parent->projid_map); |
788 | } | 789 | } |
789 | 790 | ||
790 | static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, | 791 | static bool new_idmap_permitted(const struct file *file, |
792 | struct user_namespace *ns, int cap_setid, | ||
791 | struct uid_gid_map *new_map) | 793 | struct uid_gid_map *new_map) |
792 | { | 794 | { |
793 | /* Allow mapping to your own filesystem ids */ | 795 | /* Allow mapping to your own filesystem ids */ |
@@ -795,12 +797,12 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, | |||
795 | u32 id = new_map->extent[0].lower_first; | 797 | u32 id = new_map->extent[0].lower_first; |
796 | if (cap_setid == CAP_SETUID) { | 798 | if (cap_setid == CAP_SETUID) { |
797 | kuid_t uid = make_kuid(ns->parent, id); | 799 | kuid_t uid = make_kuid(ns->parent, id); |
798 | if (uid_eq(uid, current_fsuid())) | 800 | if (uid_eq(uid, file->f_cred->fsuid)) |
799 | return true; | 801 | return true; |
800 | } | 802 | } |
801 | else if (cap_setid == CAP_SETGID) { | 803 | else if (cap_setid == CAP_SETGID) { |
802 | kgid_t gid = make_kgid(ns->parent, id); | 804 | kgid_t gid = make_kgid(ns->parent, id); |
803 | if (gid_eq(gid, current_fsgid())) | 805 | if (gid_eq(gid, file->f_cred->fsgid)) |
804 | return true; | 806 | return true; |
805 | } | 807 | } |
806 | } | 808 | } |
@@ -811,8 +813,10 @@ static bool new_idmap_permitted(struct user_namespace *ns, int cap_setid, | |||
811 | 813 | ||
812 | /* Allow the specified ids if we have the appropriate capability | 814 | /* Allow the specified ids if we have the appropriate capability |
813 | * (CAP_SETUID or CAP_SETGID) over the parent user namespace. | 815 | * (CAP_SETUID or CAP_SETGID) over the parent user namespace. |
816 | * And the opener of the id file also had the approprpiate capability. | ||
814 | */ | 817 | */ |
815 | if (ns_capable(ns->parent, cap_setid)) | 818 | if (ns_capable(ns->parent, cap_setid) && |
819 | file_ns_capable(file, ns->parent, cap_setid)) | ||
816 | return true; | 820 | return true; |
817 | 821 | ||
818 | return false; | 822 | return false; |
diff --git a/lib/Kconfig b/lib/Kconfig index 3958dc4389f9..fe01d418b09a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -404,4 +404,7 @@ config OID_REGISTRY | |||
404 | help | 404 | help |
405 | Enable fast lookup object identifier registry. | 405 | Enable fast lookup object identifier registry. |
406 | 406 | ||
407 | config UCS2_STRING | ||
408 | tristate | ||
409 | |||
407 | endmenu | 410 | endmenu |
diff --git a/lib/Makefile b/lib/Makefile index d7946ff75b2e..6e2cc561f761 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -174,3 +174,5 @@ quiet_cmd_build_OID_registry = GEN $@ | |||
174 | cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ | 174 | cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ |
175 | 175 | ||
176 | clean-files += oid_registry_data.c | 176 | clean-files += oid_registry_data.c |
177 | |||
178 | obj-$(CONFIG_UCS2_STRING) += ucs2_string.o | ||
diff --git a/lib/kobject.c b/lib/kobject.c index e07ee1fcd6f1..a65486613d79 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj) | |||
529 | return kobj; | 529 | return kobj; |
530 | } | 530 | } |
531 | 531 | ||
532 | static struct kobject *kobject_get_unless_zero(struct kobject *kobj) | ||
533 | { | ||
534 | if (!kref_get_unless_zero(&kobj->kref)) | ||
535 | kobj = NULL; | ||
536 | return kobj; | ||
537 | } | ||
538 | |||
532 | /* | 539 | /* |
533 | * kobject_cleanup - free kobject resources. | 540 | * kobject_cleanup - free kobject resources. |
534 | * @kobj: object to cleanup | 541 | * @kobj: object to cleanup |
@@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name) | |||
751 | 758 | ||
752 | list_for_each_entry(k, &kset->list, entry) { | 759 | list_for_each_entry(k, &kset->list, entry) { |
753 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { | 760 | if (kobject_name(k) && !strcmp(kobject_name(k), name)) { |
754 | ret = kobject_get(k); | 761 | ret = kobject_get_unless_zero(k); |
755 | break; | 762 | break; |
756 | } | 763 | } |
757 | } | 764 | } |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index bfe02b8fc55b..d23762e6652c 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str) | |||
105 | if (!strcmp(str, "force")) | 105 | if (!strcmp(str, "force")) |
106 | swiotlb_force = 1; | 106 | swiotlb_force = 1; |
107 | 107 | ||
108 | return 1; | 108 | return 0; |
109 | } | 109 | } |
110 | __setup("swiotlb=", setup_io_tlb_npages); | 110 | early_param("swiotlb", setup_io_tlb_npages); |
111 | /* make io_tlb_overflow tunable too? */ | 111 | /* make io_tlb_overflow tunable too? */ |
112 | 112 | ||
113 | unsigned long swiotlb_nr_tbl(void) | 113 | unsigned long swiotlb_nr_tbl(void) |
@@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void) | |||
115 | return io_tlb_nslabs; | 115 | return io_tlb_nslabs; |
116 | } | 116 | } |
117 | EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); | 117 | EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); |
118 | |||
119 | /* default to 64MB */ | ||
120 | #define IO_TLB_DEFAULT_SIZE (64UL<<20) | ||
121 | unsigned long swiotlb_size_or_default(void) | ||
122 | { | ||
123 | unsigned long size; | ||
124 | |||
125 | size = io_tlb_nslabs << IO_TLB_SHIFT; | ||
126 | |||
127 | return size ? size : (IO_TLB_DEFAULT_SIZE); | ||
128 | } | ||
129 | |||
118 | /* Note that this doesn't work with highmem page */ | 130 | /* Note that this doesn't work with highmem page */ |
119 | static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, | 131 | static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, |
120 | volatile void *address) | 132 | volatile void *address) |
@@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) | |||
188 | void __init | 200 | void __init |
189 | swiotlb_init(int verbose) | 201 | swiotlb_init(int verbose) |
190 | { | 202 | { |
191 | /* default to 64MB */ | 203 | size_t default_size = IO_TLB_DEFAULT_SIZE; |
192 | size_t default_size = 64UL<<20; | ||
193 | unsigned char *vstart; | 204 | unsigned char *vstart; |
194 | unsigned long bytes; | 205 | unsigned long bytes; |
195 | 206 | ||
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c new file mode 100644 index 000000000000..6f500ef2301d --- /dev/null +++ b/lib/ucs2_string.c | |||
@@ -0,0 +1,51 @@ | |||
1 | #include <linux/ucs2_string.h> | ||
2 | #include <linux/module.h> | ||
3 | |||
4 | /* Return the number of unicode characters in data */ | ||
5 | unsigned long | ||
6 | ucs2_strnlen(const ucs2_char_t *s, size_t maxlength) | ||
7 | { | ||
8 | unsigned long length = 0; | ||
9 | |||
10 | while (*s++ != 0 && length < maxlength) | ||
11 | length++; | ||
12 | return length; | ||
13 | } | ||
14 | EXPORT_SYMBOL(ucs2_strnlen); | ||
15 | |||
16 | unsigned long | ||
17 | ucs2_strlen(const ucs2_char_t *s) | ||
18 | { | ||
19 | return ucs2_strnlen(s, ~0UL); | ||
20 | } | ||
21 | EXPORT_SYMBOL(ucs2_strlen); | ||
22 | |||
23 | /* | ||
24 | * Return the number of bytes is the length of this string | ||
25 | * Note: this is NOT the same as the number of unicode characters | ||
26 | */ | ||
27 | unsigned long | ||
28 | ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength) | ||
29 | { | ||
30 | return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t); | ||
31 | } | ||
32 | EXPORT_SYMBOL(ucs2_strsize); | ||
33 | |||
34 | int | ||
35 | ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len) | ||
36 | { | ||
37 | while (1) { | ||
38 | if (len == 0) | ||
39 | return 0; | ||
40 | if (*a < *b) | ||
41 | return -1; | ||
42 | if (*a > *b) | ||
43 | return 1; | ||
44 | if (*a == 0) /* implies *b == 0 */ | ||
45 | return 0; | ||
46 | a++; | ||
47 | b++; | ||
48 | len--; | ||
49 | } | ||
50 | } | ||
51 | EXPORT_SYMBOL(ucs2_strncmp); | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ca9a7c6d7e97..1a12f5b9a0ab 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2961,7 +2961,17 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2961 | break; | 2961 | break; |
2962 | } | 2962 | } |
2963 | 2963 | ||
2964 | if (absent || | 2964 | /* |
2965 | * We need call hugetlb_fault for both hugepages under migration | ||
2966 | * (in which case hugetlb_fault waits for the migration,) and | ||
2967 | * hwpoisoned hugepages (in which case we need to prevent the | ||
2968 | * caller from accessing to them.) In order to do this, we use | ||
2969 | * here is_swap_pte instead of is_hugetlb_entry_migration and | ||
2970 | * is_hugetlb_entry_hwpoisoned. This is because it simply covers | ||
2971 | * both cases, and because we can't follow correct pages | ||
2972 | * directly from any kind of swap entries. | ||
2973 | */ | ||
2974 | if (absent || is_swap_pte(huge_ptep_get(pte)) || | ||
2965 | ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { | 2975 | ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { |
2966 | int ret; | 2976 | int ret; |
2967 | 2977 | ||
diff --git a/mm/memory.c b/mm/memory.c index 494526ae024a..ba94dec5b259 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -216,6 +216,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | |||
216 | tlb->mm = mm; | 216 | tlb->mm = mm; |
217 | 217 | ||
218 | tlb->fullmm = fullmm; | 218 | tlb->fullmm = fullmm; |
219 | tlb->need_flush_all = 0; | ||
219 | tlb->start = -1UL; | 220 | tlb->start = -1UL; |
220 | tlb->end = 0; | 221 | tlb->end = 0; |
221 | tlb->need_flush = 0; | 222 | tlb->need_flush = 0; |
@@ -2392,6 +2393,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
2392 | } | 2393 | } |
2393 | EXPORT_SYMBOL(remap_pfn_range); | 2394 | EXPORT_SYMBOL(remap_pfn_range); |
2394 | 2395 | ||
2396 | /** | ||
2397 | * vm_iomap_memory - remap memory to userspace | ||
2398 | * @vma: user vma to map to | ||
2399 | * @start: start of area | ||
2400 | * @len: size of area | ||
2401 | * | ||
2402 | * This is a simplified io_remap_pfn_range() for common driver use. The | ||
2403 | * driver just needs to give us the physical memory range to be mapped, | ||
2404 | * we'll figure out the rest from the vma information. | ||
2405 | * | ||
2406 | * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get | ||
2407 | * whatever write-combining details or similar. | ||
2408 | */ | ||
2409 | int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) | ||
2410 | { | ||
2411 | unsigned long vm_len, pfn, pages; | ||
2412 | |||
2413 | /* Check that the physical memory area passed in looks valid */ | ||
2414 | if (start + len < start) | ||
2415 | return -EINVAL; | ||
2416 | /* | ||
2417 | * You *really* shouldn't map things that aren't page-aligned, | ||
2418 | * but we've historically allowed it because IO memory might | ||
2419 | * just have smaller alignment. | ||
2420 | */ | ||
2421 | len += start & ~PAGE_MASK; | ||
2422 | pfn = start >> PAGE_SHIFT; | ||
2423 | pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; | ||
2424 | if (pfn + pages < pfn) | ||
2425 | return -EINVAL; | ||
2426 | |||
2427 | /* We start the mapping 'vm_pgoff' pages into the area */ | ||
2428 | if (vma->vm_pgoff > pages) | ||
2429 | return -EINVAL; | ||
2430 | pfn += vma->vm_pgoff; | ||
2431 | pages -= vma->vm_pgoff; | ||
2432 | |||
2433 | /* Can we fit all of the mapping? */ | ||
2434 | vm_len = vma->vm_end - vma->vm_start; | ||
2435 | if (vm_len >> PAGE_SHIFT > pages) | ||
2436 | return -EINVAL; | ||
2437 | |||
2438 | /* Ok, let it rip */ | ||
2439 | return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); | ||
2440 | } | ||
2441 | EXPORT_SYMBOL(vm_iomap_memory); | ||
2442 | |||
2395 | static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, | 2443 | static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, |
2396 | unsigned long addr, unsigned long end, | 2444 | unsigned long addr, unsigned long end, |
2397 | pte_fn_t fn, void *data) | 2445 | pte_fn_t fn, void *data) |
@@ -1940,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | |||
1940 | 1940 | ||
1941 | /* Check the cache first. */ | 1941 | /* Check the cache first. */ |
1942 | /* (Cache hit rate is typically around 35%.) */ | 1942 | /* (Cache hit rate is typically around 35%.) */ |
1943 | vma = mm->mmap_cache; | 1943 | vma = ACCESS_ONCE(mm->mmap_cache); |
1944 | if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { | 1944 | if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { |
1945 | struct rb_node *rb_node; | 1945 | struct rb_node *rb_node; |
1946 | 1946 | ||
diff --git a/mm/nommu.c b/mm/nommu.c index e19328087534..e001768b14e8 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | |||
821 | struct vm_area_struct *vma; | 821 | struct vm_area_struct *vma; |
822 | 822 | ||
823 | /* check the cache first */ | 823 | /* check the cache first */ |
824 | vma = mm->mmap_cache; | 824 | vma = ACCESS_ONCE(mm->mmap_cache); |
825 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) | 825 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) |
826 | return vma; | 826 | return vma; |
827 | 827 | ||
@@ -1838,6 +1838,16 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
1838 | } | 1838 | } |
1839 | EXPORT_SYMBOL(remap_pfn_range); | 1839 | EXPORT_SYMBOL(remap_pfn_range); |
1840 | 1840 | ||
1841 | int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) | ||
1842 | { | ||
1843 | unsigned long pfn = start >> PAGE_SHIFT; | ||
1844 | unsigned long vm_len = vma->vm_end - vma->vm_start; | ||
1845 | |||
1846 | pfn += vma->vm_pgoff; | ||
1847 | return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); | ||
1848 | } | ||
1849 | EXPORT_SYMBOL(vm_iomap_memory); | ||
1850 | |||
1841 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | 1851 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
1842 | unsigned long pgoff) | 1852 | unsigned long pgoff) |
1843 | { | 1853 | { |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 88c5fed8b9a4..669fba39be1a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -3188,9 +3188,9 @@ int kswapd_run(int nid) | |||
3188 | if (IS_ERR(pgdat->kswapd)) { | 3188 | if (IS_ERR(pgdat->kswapd)) { |
3189 | /* failure at boot is fatal */ | 3189 | /* failure at boot is fatal */ |
3190 | BUG_ON(system_state == SYSTEM_BOOTING); | 3190 | BUG_ON(system_state == SYSTEM_BOOTING); |
3191 | pgdat->kswapd = NULL; | ||
3192 | pr_err("Failed to start kswapd on node %d\n", nid); | 3191 | pr_err("Failed to start kswapd on node %d\n", nid); |
3193 | ret = PTR_ERR(pgdat->kswapd); | 3192 | ret = PTR_ERR(pgdat->kswapd); |
3193 | pgdat->kswapd = NULL; | ||
3194 | } | 3194 | } |
3195 | return ret; | 3195 | return ret; |
3196 | } | 3196 | } |
diff --git a/net/802/mrp.c b/net/802/mrp.c index a4cc3229952a..e085bcc754f6 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c | |||
@@ -870,8 +870,12 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) | |||
870 | * all pending messages before the applicant is gone. | 870 | * all pending messages before the applicant is gone. |
871 | */ | 871 | */ |
872 | del_timer_sync(&app->join_timer); | 872 | del_timer_sync(&app->join_timer); |
873 | |||
874 | spin_lock(&app->lock); | ||
873 | mrp_mad_event(app, MRP_EVENT_TX); | 875 | mrp_mad_event(app, MRP_EVENT_TX); |
874 | mrp_pdu_queue(app); | 876 | mrp_pdu_queue(app); |
877 | spin_unlock(&app->lock); | ||
878 | |||
875 | mrp_queue_xmit(app); | 879 | mrp_queue_xmit(app); |
876 | 880 | ||
877 | dev_mc_del(dev, appl->group_address); | 881 | dev_mc_del(dev, appl->group_address); |
diff --git a/net/atm/common.c b/net/atm/common.c index 7b491006eaf4..737bef59ce89 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
@@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
531 | struct sk_buff *skb; | 531 | struct sk_buff *skb; |
532 | int copied, error = -EINVAL; | 532 | int copied, error = -EINVAL; |
533 | 533 | ||
534 | msg->msg_namelen = 0; | ||
535 | |||
534 | if (sock->state != SS_CONNECTED) | 536 | if (sock->state != SS_CONNECTED) |
535 | return -ENOTCONN; | 537 | return -ENOTCONN; |
536 | 538 | ||
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 7b11f8bc5071..e277e38f736b 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1642 | ax25_address src; | 1642 | ax25_address src; |
1643 | const unsigned char *mac = skb_mac_header(skb); | 1643 | const unsigned char *mac = skb_mac_header(skb); |
1644 | 1644 | ||
1645 | memset(sax, 0, sizeof(struct full_sockaddr_ax25)); | ||
1645 | ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, | 1646 | ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, |
1646 | &digi, NULL, NULL); | 1647 | &digi, NULL, NULL); |
1647 | sax->sax25_family = AF_AX25; | 1648 | sax->sax25_family = AF_AX25; |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 0488d70c8c35..fa563e497c48 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
@@ -169,7 +169,7 @@ void batadv_mesh_free(struct net_device *soft_iface) | |||
169 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); | 169 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); |
170 | } | 170 | } |
171 | 171 | ||
172 | int batadv_is_my_mac(const uint8_t *addr) | 172 | int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr) |
173 | { | 173 | { |
174 | const struct batadv_hard_iface *hard_iface; | 174 | const struct batadv_hard_iface *hard_iface; |
175 | 175 | ||
@@ -178,6 +178,9 @@ int batadv_is_my_mac(const uint8_t *addr) | |||
178 | if (hard_iface->if_status != BATADV_IF_ACTIVE) | 178 | if (hard_iface->if_status != BATADV_IF_ACTIVE) |
179 | continue; | 179 | continue; |
180 | 180 | ||
181 | if (hard_iface->soft_iface != bat_priv->soft_iface) | ||
182 | continue; | ||
183 | |||
181 | if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { | 184 | if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { |
182 | rcu_read_unlock(); | 185 | rcu_read_unlock(); |
183 | return 1; | 186 | return 1; |
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index ced08b936a96..d40910dfc8ea 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h | |||
@@ -162,7 +162,7 @@ extern struct workqueue_struct *batadv_event_workqueue; | |||
162 | 162 | ||
163 | int batadv_mesh_init(struct net_device *soft_iface); | 163 | int batadv_mesh_init(struct net_device *soft_iface); |
164 | void batadv_mesh_free(struct net_device *soft_iface); | 164 | void batadv_mesh_free(struct net_device *soft_iface); |
165 | int batadv_is_my_mac(const uint8_t *addr); | 165 | int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr); |
166 | struct batadv_hard_iface * | 166 | struct batadv_hard_iface * |
167 | batadv_seq_print_text_primary_if_get(struct seq_file *seq); | 167 | batadv_seq_print_text_primary_if_get(struct seq_file *seq); |
168 | int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | 168 | int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 5ee21cebbbb0..319f2906c71a 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -402,7 +402,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
402 | goto out; | 402 | goto out; |
403 | 403 | ||
404 | /* not for me */ | 404 | /* not for me */ |
405 | if (!batadv_is_my_mac(ethhdr->h_dest)) | 405 | if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) |
406 | goto out; | 406 | goto out; |
407 | 407 | ||
408 | icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; | 408 | icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; |
@@ -416,7 +416,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, | |||
416 | } | 416 | } |
417 | 417 | ||
418 | /* packet for me */ | 418 | /* packet for me */ |
419 | if (batadv_is_my_mac(icmp_packet->dst)) | 419 | if (batadv_is_my_mac(bat_priv, icmp_packet->dst)) |
420 | return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size); | 420 | return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size); |
421 | 421 | ||
422 | /* TTL exceeded */ | 422 | /* TTL exceeded */ |
@@ -548,7 +548,8 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig, | |||
548 | return router; | 548 | return router; |
549 | } | 549 | } |
550 | 550 | ||
551 | static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) | 551 | static int batadv_check_unicast_packet(struct batadv_priv *bat_priv, |
552 | struct sk_buff *skb, int hdr_size) | ||
552 | { | 553 | { |
553 | struct ethhdr *ethhdr; | 554 | struct ethhdr *ethhdr; |
554 | 555 | ||
@@ -567,7 +568,7 @@ static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) | |||
567 | return -1; | 568 | return -1; |
568 | 569 | ||
569 | /* not for me */ | 570 | /* not for me */ |
570 | if (!batadv_is_my_mac(ethhdr->h_dest)) | 571 | if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) |
571 | return -1; | 572 | return -1; |
572 | 573 | ||
573 | return 0; | 574 | return 0; |
@@ -582,7 +583,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if) | |||
582 | char tt_flag; | 583 | char tt_flag; |
583 | size_t packet_size; | 584 | size_t packet_size; |
584 | 585 | ||
585 | if (batadv_check_unicast_packet(skb, hdr_size) < 0) | 586 | if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) |
586 | return NET_RX_DROP; | 587 | return NET_RX_DROP; |
587 | 588 | ||
588 | /* I could need to modify it */ | 589 | /* I could need to modify it */ |
@@ -614,7 +615,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if) | |||
614 | case BATADV_TT_RESPONSE: | 615 | case BATADV_TT_RESPONSE: |
615 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX); | 616 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX); |
616 | 617 | ||
617 | if (batadv_is_my_mac(tt_query->dst)) { | 618 | if (batadv_is_my_mac(bat_priv, tt_query->dst)) { |
618 | /* packet needs to be linearized to access the TT | 619 | /* packet needs to be linearized to access the TT |
619 | * changes | 620 | * changes |
620 | */ | 621 | */ |
@@ -657,14 +658,15 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if) | |||
657 | struct batadv_roam_adv_packet *roam_adv_packet; | 658 | struct batadv_roam_adv_packet *roam_adv_packet; |
658 | struct batadv_orig_node *orig_node; | 659 | struct batadv_orig_node *orig_node; |
659 | 660 | ||
660 | if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0) | 661 | if (batadv_check_unicast_packet(bat_priv, skb, |
662 | sizeof(*roam_adv_packet)) < 0) | ||
661 | goto out; | 663 | goto out; |
662 | 664 | ||
663 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX); | 665 | batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX); |
664 | 666 | ||
665 | roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data; | 667 | roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data; |
666 | 668 | ||
667 | if (!batadv_is_my_mac(roam_adv_packet->dst)) | 669 | if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst)) |
668 | return batadv_route_unicast_packet(skb, recv_if); | 670 | return batadv_route_unicast_packet(skb, recv_if); |
669 | 671 | ||
670 | /* check if it is a backbone gateway. we don't accept | 672 | /* check if it is a backbone gateway. we don't accept |
@@ -967,7 +969,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, | |||
967 | * last time) the packet had an updated information or not | 969 | * last time) the packet had an updated information or not |
968 | */ | 970 | */ |
969 | curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); | 971 | curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); |
970 | if (!batadv_is_my_mac(unicast_packet->dest)) { | 972 | if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) { |
971 | orig_node = batadv_orig_hash_find(bat_priv, | 973 | orig_node = batadv_orig_hash_find(bat_priv, |
972 | unicast_packet->dest); | 974 | unicast_packet->dest); |
973 | /* if it is not possible to find the orig_node representing the | 975 | /* if it is not possible to find the orig_node representing the |
@@ -1044,14 +1046,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
1044 | if (is4addr) | 1046 | if (is4addr) |
1045 | hdr_size = sizeof(*unicast_4addr_packet); | 1047 | hdr_size = sizeof(*unicast_4addr_packet); |
1046 | 1048 | ||
1047 | if (batadv_check_unicast_packet(skb, hdr_size) < 0) | 1049 | if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) |
1048 | return NET_RX_DROP; | 1050 | return NET_RX_DROP; |
1049 | 1051 | ||
1050 | if (!batadv_check_unicast_ttvn(bat_priv, skb)) | 1052 | if (!batadv_check_unicast_ttvn(bat_priv, skb)) |
1051 | return NET_RX_DROP; | 1053 | return NET_RX_DROP; |
1052 | 1054 | ||
1053 | /* packet for me */ | 1055 | /* packet for me */ |
1054 | if (batadv_is_my_mac(unicast_packet->dest)) { | 1056 | if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { |
1055 | if (is4addr) { | 1057 | if (is4addr) { |
1056 | batadv_dat_inc_counter(bat_priv, | 1058 | batadv_dat_inc_counter(bat_priv, |
1057 | unicast_4addr_packet->subtype); | 1059 | unicast_4addr_packet->subtype); |
@@ -1088,7 +1090,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb, | |||
1088 | struct sk_buff *new_skb = NULL; | 1090 | struct sk_buff *new_skb = NULL; |
1089 | int ret; | 1091 | int ret; |
1090 | 1092 | ||
1091 | if (batadv_check_unicast_packet(skb, hdr_size) < 0) | 1093 | if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0) |
1092 | return NET_RX_DROP; | 1094 | return NET_RX_DROP; |
1093 | 1095 | ||
1094 | if (!batadv_check_unicast_ttvn(bat_priv, skb)) | 1096 | if (!batadv_check_unicast_ttvn(bat_priv, skb)) |
@@ -1097,7 +1099,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb, | |||
1097 | unicast_packet = (struct batadv_unicast_frag_packet *)skb->data; | 1099 | unicast_packet = (struct batadv_unicast_frag_packet *)skb->data; |
1098 | 1100 | ||
1099 | /* packet for me */ | 1101 | /* packet for me */ |
1100 | if (batadv_is_my_mac(unicast_packet->dest)) { | 1102 | if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { |
1101 | ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb); | 1103 | ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb); |
1102 | 1104 | ||
1103 | if (ret == NET_RX_DROP) | 1105 | if (ret == NET_RX_DROP) |
@@ -1151,13 +1153,13 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, | |||
1151 | goto out; | 1153 | goto out; |
1152 | 1154 | ||
1153 | /* ignore broadcasts sent by myself */ | 1155 | /* ignore broadcasts sent by myself */ |
1154 | if (batadv_is_my_mac(ethhdr->h_source)) | 1156 | if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) |
1155 | goto out; | 1157 | goto out; |
1156 | 1158 | ||
1157 | bcast_packet = (struct batadv_bcast_packet *)skb->data; | 1159 | bcast_packet = (struct batadv_bcast_packet *)skb->data; |
1158 | 1160 | ||
1159 | /* ignore broadcasts originated by myself */ | 1161 | /* ignore broadcasts originated by myself */ |
1160 | if (batadv_is_my_mac(bcast_packet->orig)) | 1162 | if (batadv_is_my_mac(bat_priv, bcast_packet->orig)) |
1161 | goto out; | 1163 | goto out; |
1162 | 1164 | ||
1163 | if (bcast_packet->header.ttl < 2) | 1165 | if (bcast_packet->header.ttl < 2) |
@@ -1243,14 +1245,14 @@ int batadv_recv_vis_packet(struct sk_buff *skb, | |||
1243 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | 1245 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
1244 | 1246 | ||
1245 | /* not for me */ | 1247 | /* not for me */ |
1246 | if (!batadv_is_my_mac(ethhdr->h_dest)) | 1248 | if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest)) |
1247 | return NET_RX_DROP; | 1249 | return NET_RX_DROP; |
1248 | 1250 | ||
1249 | /* ignore own packets */ | 1251 | /* ignore own packets */ |
1250 | if (batadv_is_my_mac(vis_packet->vis_orig)) | 1252 | if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig)) |
1251 | return NET_RX_DROP; | 1253 | return NET_RX_DROP; |
1252 | 1254 | ||
1253 | if (batadv_is_my_mac(vis_packet->sender_orig)) | 1255 | if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig)) |
1254 | return NET_RX_DROP; | 1256 | return NET_RX_DROP; |
1255 | 1257 | ||
1256 | switch (vis_packet->vis_type) { | 1258 | switch (vis_packet->vis_type) { |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 98a66a021a60..7abee19567e9 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -1953,7 +1953,7 @@ out: | |||
1953 | bool batadv_send_tt_response(struct batadv_priv *bat_priv, | 1953 | bool batadv_send_tt_response(struct batadv_priv *bat_priv, |
1954 | struct batadv_tt_query_packet *tt_request) | 1954 | struct batadv_tt_query_packet *tt_request) |
1955 | { | 1955 | { |
1956 | if (batadv_is_my_mac(tt_request->dst)) { | 1956 | if (batadv_is_my_mac(bat_priv, tt_request->dst)) { |
1957 | /* don't answer backbone gws! */ | 1957 | /* don't answer backbone gws! */ |
1958 | if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src)) | 1958 | if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src)) |
1959 | return true; | 1959 | return true; |
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index c053244b97bd..6a1e646be96d 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c | |||
@@ -477,7 +477,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv, | |||
477 | 477 | ||
478 | /* Are we the target for this VIS packet? */ | 478 | /* Are we the target for this VIS packet? */ |
479 | if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && | 479 | if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && |
480 | batadv_is_my_mac(vis_packet->target_orig)) | 480 | batadv_is_my_mac(bat_priv, vis_packet->target_orig)) |
481 | are_target = 1; | 481 | are_target = 1; |
482 | 482 | ||
483 | spin_lock_bh(&bat_priv->vis.hash_lock); | 483 | spin_lock_bh(&bat_priv->vis.hash_lock); |
@@ -496,7 +496,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv, | |||
496 | batadv_send_list_add(bat_priv, info); | 496 | batadv_send_list_add(bat_priv, info); |
497 | 497 | ||
498 | /* ... we're not the recipient (and thus need to forward). */ | 498 | /* ... we're not the recipient (and thus need to forward). */ |
499 | } else if (!batadv_is_my_mac(packet->target_orig)) { | 499 | } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) { |
500 | batadv_send_list_add(bat_priv, info); | 500 | batadv_send_list_add(bat_priv, info); |
501 | } | 501 | } |
502 | 502 | ||
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index d3ee69b35a78..0d1b08cc76e1 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
230 | if (flags & (MSG_OOB)) | 230 | if (flags & (MSG_OOB)) |
231 | return -EOPNOTSUPP; | 231 | return -EOPNOTSUPP; |
232 | 232 | ||
233 | msg->msg_namelen = 0; | ||
234 | |||
233 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 235 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
234 | if (!skb) { | 236 | if (!skb) { |
235 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 237 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
@@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
237 | return err; | 239 | return err; |
238 | } | 240 | } |
239 | 241 | ||
240 | msg->msg_namelen = 0; | ||
241 | |||
242 | copied = skb->len; | 242 | copied = skb->len; |
243 | if (len < copied) { | 243 | if (len < copied) { |
244 | msg->msg_flags |= MSG_TRUNC; | 244 | msg->msg_flags |= MSG_TRUNC; |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index c23bae86263b..7c9224bcce17 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
608 | 608 | ||
609 | if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { | 609 | if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { |
610 | rfcomm_dlc_accept(d); | 610 | rfcomm_dlc_accept(d); |
611 | msg->msg_namelen = 0; | ||
611 | return 0; | 612 | return 0; |
612 | } | 613 | } |
613 | 614 | ||
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index fad0302bdb32..fb6192c9812e 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
665 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { | 665 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { |
666 | hci_conn_accept(pi->conn->hcon, 0); | 666 | hci_conn_accept(pi->conn->hcon, 0); |
667 | sk->sk_state = BT_CONFIG; | 667 | sk->sk_state = BT_CONFIG; |
668 | msg->msg_namelen = 0; | ||
668 | 669 | ||
669 | release_sock(sk); | 670 | release_sock(sk); |
670 | return 0; | 671 | return 0; |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ef1b91431c6b..459dab22b3f6 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -67,7 +67,8 @@ void br_port_carrier_check(struct net_bridge_port *p) | |||
67 | struct net_device *dev = p->dev; | 67 | struct net_device *dev = p->dev; |
68 | struct net_bridge *br = p->br; | 68 | struct net_bridge *br = p->br; |
69 | 69 | ||
70 | if (netif_running(dev) && netif_oper_up(dev)) | 70 | if (!(p->flags & BR_ADMIN_COST) && |
71 | netif_running(dev) && netif_oper_up(dev)) | ||
71 | p->path_cost = port_cost(dev); | 72 | p->path_cost = port_cost(dev); |
72 | 73 | ||
73 | if (!netif_running(br->dev)) | 74 | if (!netif_running(br->dev)) |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 3cbf5beb3d4b..d2c043a857b6 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -156,6 +156,7 @@ struct net_bridge_port | |||
156 | #define BR_BPDU_GUARD 0x00000002 | 156 | #define BR_BPDU_GUARD 0x00000002 |
157 | #define BR_ROOT_BLOCK 0x00000004 | 157 | #define BR_ROOT_BLOCK 0x00000004 |
158 | #define BR_MULTICAST_FAST_LEAVE 0x00000008 | 158 | #define BR_MULTICAST_FAST_LEAVE 0x00000008 |
159 | #define BR_ADMIN_COST 0x00000010 | ||
159 | 160 | ||
160 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 161 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING |
161 | u32 multicast_startup_queries_sent; | 162 | u32 multicast_startup_queries_sent; |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 0bdb4ebd362b..d45e760141bb 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
@@ -288,6 +288,7 @@ int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost) | |||
288 | path_cost > BR_MAX_PATH_COST) | 288 | path_cost > BR_MAX_PATH_COST) |
289 | return -ERANGE; | 289 | return -ERANGE; |
290 | 290 | ||
291 | p->flags |= BR_ADMIN_COST; | ||
291 | p->path_cost = path_cost; | 292 | p->path_cost = path_cost; |
292 | br_configuration_update(p->br); | 293 | br_configuration_update(p->br); |
293 | br_port_state_selection(p->br); | 294 | br_port_state_selection(p->br); |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 095259f83902..ff2ff3ce6965 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
286 | if (m->msg_flags&MSG_OOB) | 286 | if (m->msg_flags&MSG_OOB) |
287 | goto read_error; | 287 | goto read_error; |
288 | 288 | ||
289 | m->msg_namelen = 0; | ||
290 | |||
289 | skb = skb_recv_datagram(sk, flags, 0 , &ret); | 291 | skb = skb_recv_datagram(sk, flags, 0 , &ret); |
290 | if (!skb) | 292 | if (!skb) |
291 | goto read_error; | 293 | goto read_error; |
diff --git a/net/can/gw.c b/net/can/gw.c index 2d117dc5ebea..117814a7e73c 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
@@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb, | |||
466 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { | 466 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { |
467 | hlist_del(&gwj->list); | 467 | hlist_del(&gwj->list); |
468 | cgw_unregister_filter(gwj); | 468 | cgw_unregister_filter(gwj); |
469 | kfree(gwj); | 469 | kmem_cache_free(cgw_cache, gwj); |
470 | } | 470 | } |
471 | } | 471 | } |
472 | } | 472 | } |
@@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void) | |||
864 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { | 864 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { |
865 | hlist_del(&gwj->list); | 865 | hlist_del(&gwj->list); |
866 | cgw_unregister_filter(gwj); | 866 | cgw_unregister_filter(gwj); |
867 | kfree(gwj); | 867 | kmem_cache_free(cgw_cache, gwj); |
868 | } | 868 | } |
869 | } | 869 | } |
870 | 870 | ||
@@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
920 | 920 | ||
921 | hlist_del(&gwj->list); | 921 | hlist_del(&gwj->list); |
922 | cgw_unregister_filter(gwj); | 922 | cgw_unregister_filter(gwj); |
923 | kfree(gwj); | 923 | kmem_cache_free(cgw_cache, gwj); |
924 | err = 0; | 924 | err = 0; |
925 | break; | 925 | break; |
926 | } | 926 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index b13e5c766c11..b24ab0e98eb4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1624,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1624 | } | 1624 | } |
1625 | 1625 | ||
1626 | skb_orphan(skb); | 1626 | skb_orphan(skb); |
1627 | nf_reset(skb); | ||
1628 | 1627 | ||
1629 | if (unlikely(!is_skb_forwardable(dev, skb))) { | 1628 | if (unlikely(!is_skb_forwardable(dev, skb))) { |
1630 | atomic_long_inc(&dev->rx_dropped); | 1629 | atomic_long_inc(&dev->rx_dropped); |
@@ -1640,6 +1639,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1640 | skb->mark = 0; | 1639 | skb->mark = 0; |
1641 | secpath_reset(skb); | 1640 | secpath_reset(skb); |
1642 | nf_reset(skb); | 1641 | nf_reset(skb); |
1642 | nf_reset_trace(skb); | ||
1643 | return netif_rx(skb); | 1643 | return netif_rx(skb); |
1644 | } | 1644 | } |
1645 | EXPORT_SYMBOL_GPL(dev_forward_skb); | 1645 | EXPORT_SYMBOL_GPL(dev_forward_skb); |
@@ -2148,6 +2148,9 @@ static void skb_warn_bad_offload(const struct sk_buff *skb) | |||
2148 | struct net_device *dev = skb->dev; | 2148 | struct net_device *dev = skb->dev; |
2149 | const char *driver = ""; | 2149 | const char *driver = ""; |
2150 | 2150 | ||
2151 | if (!net_ratelimit()) | ||
2152 | return; | ||
2153 | |||
2151 | if (dev && dev->dev.parent) | 2154 | if (dev && dev->dev.parent) |
2152 | driver = dev_driver_string(dev->dev.parent); | 2155 | driver = dev_driver_string(dev->dev.parent); |
2153 | 2156 | ||
@@ -3314,6 +3317,7 @@ int netdev_rx_handler_register(struct net_device *dev, | |||
3314 | if (dev->rx_handler) | 3317 | if (dev->rx_handler) |
3315 | return -EBUSY; | 3318 | return -EBUSY; |
3316 | 3319 | ||
3320 | /* Note: rx_handler_data must be set before rx_handler */ | ||
3317 | rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); | 3321 | rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); |
3318 | rcu_assign_pointer(dev->rx_handler, rx_handler); | 3322 | rcu_assign_pointer(dev->rx_handler, rx_handler); |
3319 | 3323 | ||
@@ -3334,6 +3338,11 @@ void netdev_rx_handler_unregister(struct net_device *dev) | |||
3334 | 3338 | ||
3335 | ASSERT_RTNL(); | 3339 | ASSERT_RTNL(); |
3336 | RCU_INIT_POINTER(dev->rx_handler, NULL); | 3340 | RCU_INIT_POINTER(dev->rx_handler, NULL); |
3341 | /* a reader seeing a non NULL rx_handler in a rcu_read_lock() | ||
3342 | * section has a guarantee to see a non NULL rx_handler_data | ||
3343 | * as well. | ||
3344 | */ | ||
3345 | synchronize_net(); | ||
3337 | RCU_INIT_POINTER(dev->rx_handler_data, NULL); | 3346 | RCU_INIT_POINTER(dev->rx_handler_data, NULL); |
3338 | } | 3347 | } |
3339 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); | 3348 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index bd2eb9d3e369..abdc9e6ef33e 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
@@ -37,7 +37,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, | |||
37 | ha->type = addr_type; | 37 | ha->type = addr_type; |
38 | ha->refcount = 1; | 38 | ha->refcount = 1; |
39 | ha->global_use = global; | 39 | ha->global_use = global; |
40 | ha->synced = false; | 40 | ha->synced = 0; |
41 | list_add_tail_rcu(&ha->list, &list->list); | 41 | list_add_tail_rcu(&ha->list, &list->list); |
42 | list->count++; | 42 | list->count++; |
43 | 43 | ||
@@ -165,7 +165,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | |||
165 | addr_len, ha->type); | 165 | addr_len, ha->type); |
166 | if (err) | 166 | if (err) |
167 | break; | 167 | break; |
168 | ha->synced = true; | 168 | ha->synced++; |
169 | ha->refcount++; | 169 | ha->refcount++; |
170 | } else if (ha->refcount == 1) { | 170 | } else if (ha->refcount == 1) { |
171 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | 171 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); |
@@ -186,7 +186,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | |||
186 | if (ha->synced) { | 186 | if (ha->synced) { |
187 | __hw_addr_del(to_list, ha->addr, | 187 | __hw_addr_del(to_list, ha->addr, |
188 | addr_len, ha->type); | 188 | addr_len, ha->type); |
189 | ha->synced = false; | 189 | ha->synced--; |
190 | __hw_addr_del(from_list, ha->addr, | 190 | __hw_addr_del(from_list, ha->addr, |
191 | addr_len, ha->type); | 191 | addr_len, ha->type); |
192 | } | 192 | } |
diff --git a/net/core/flow.c b/net/core/flow.c index c56ea6f7f6c7..2bfd081c59f7 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data) | |||
328 | struct flow_flush_info *info = data; | 328 | struct flow_flush_info *info = data; |
329 | struct tasklet_struct *tasklet; | 329 | struct tasklet_struct *tasklet; |
330 | 330 | ||
331 | tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); | 331 | tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; |
332 | tasklet->data = (unsigned long)info; | 332 | tasklet->data = (unsigned long)info; |
333 | tasklet_schedule(tasklet); | 333 | tasklet_schedule(tasklet); |
334 | } | 334 | } |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 5fb8d7e47294..23854b51a259 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) | |||
496 | } | 496 | } |
497 | if (ops->fill_info) { | 497 | if (ops->fill_info) { |
498 | data = nla_nest_start(skb, IFLA_INFO_DATA); | 498 | data = nla_nest_start(skb, IFLA_INFO_DATA); |
499 | if (data == NULL) | 499 | if (data == NULL) { |
500 | err = -EMSGSIZE; | ||
500 | goto err_cancel_link; | 501 | goto err_cancel_link; |
502 | } | ||
501 | err = ops->fill_info(skb, dev); | 503 | err = ops->fill_info(skb, dev); |
502 | if (err < 0) | 504 | if (err < 0) |
503 | goto err_cancel_data; | 505 | goto err_cancel_data; |
@@ -1070,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
1070 | rcu_read_lock(); | 1072 | rcu_read_lock(); |
1071 | cb->seq = net->dev_base_seq; | 1073 | cb->seq = net->dev_base_seq; |
1072 | 1074 | ||
1073 | if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, | 1075 | if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, |
1074 | ifla_policy) >= 0) { | 1076 | ifla_policy) >= 0) { |
1075 | 1077 | ||
1076 | if (tb[IFLA_EXT_MASK]) | 1078 | if (tb[IFLA_EXT_MASK]) |
@@ -1920,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1920 | u32 ext_filter_mask = 0; | 1922 | u32 ext_filter_mask = 0; |
1921 | u16 min_ifinfo_dump_size = 0; | 1923 | u16 min_ifinfo_dump_size = 0; |
1922 | 1924 | ||
1923 | if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, | 1925 | if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, |
1924 | ifla_policy) >= 0) { | 1926 | ifla_policy) >= 0) { |
1925 | if (tb[IFLA_EXT_MASK]) | 1927 | if (tb[IFLA_EXT_MASK]) |
1926 | ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); | 1928 | ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f678507bc829..c6287cd978c2 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work) | |||
587 | { | 587 | { |
588 | unsigned long now, next, next_sec, next_sched; | 588 | unsigned long now, next, next_sec, next_sched; |
589 | struct in_ifaddr *ifa; | 589 | struct in_ifaddr *ifa; |
590 | struct hlist_node *n; | ||
590 | int i; | 591 | int i; |
591 | 592 | ||
592 | now = jiffies; | 593 | now = jiffies; |
593 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); | 594 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); |
594 | 595 | ||
595 | rcu_read_lock(); | ||
596 | for (i = 0; i < IN4_ADDR_HSIZE; i++) { | 596 | for (i = 0; i < IN4_ADDR_HSIZE; i++) { |
597 | bool change_needed = false; | ||
598 | |||
599 | rcu_read_lock(); | ||
597 | hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { | 600 | hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { |
598 | unsigned long age; | 601 | unsigned long age; |
599 | 602 | ||
@@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work) | |||
606 | 609 | ||
607 | if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && | 610 | if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && |
608 | age >= ifa->ifa_valid_lft) { | 611 | age >= ifa->ifa_valid_lft) { |
609 | struct in_ifaddr **ifap ; | 612 | change_needed = true; |
610 | |||
611 | rtnl_lock(); | ||
612 | for (ifap = &ifa->ifa_dev->ifa_list; | ||
613 | *ifap != NULL; ifap = &ifa->ifa_next) { | ||
614 | if (*ifap == ifa) | ||
615 | inet_del_ifa(ifa->ifa_dev, | ||
616 | ifap, 1); | ||
617 | } | ||
618 | rtnl_unlock(); | ||
619 | } else if (ifa->ifa_preferred_lft == | 613 | } else if (ifa->ifa_preferred_lft == |
620 | INFINITY_LIFE_TIME) { | 614 | INFINITY_LIFE_TIME) { |
621 | continue; | 615 | continue; |
@@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work) | |||
625 | next = ifa->ifa_tstamp + | 619 | next = ifa->ifa_tstamp + |
626 | ifa->ifa_valid_lft * HZ; | 620 | ifa->ifa_valid_lft * HZ; |
627 | 621 | ||
628 | if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) { | 622 | if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) |
629 | ifa->ifa_flags |= IFA_F_DEPRECATED; | 623 | change_needed = true; |
630 | rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); | ||
631 | } | ||
632 | } else if (time_before(ifa->ifa_tstamp + | 624 | } else if (time_before(ifa->ifa_tstamp + |
633 | ifa->ifa_preferred_lft * HZ, | 625 | ifa->ifa_preferred_lft * HZ, |
634 | next)) { | 626 | next)) { |
@@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work) | |||
636 | ifa->ifa_preferred_lft * HZ; | 628 | ifa->ifa_preferred_lft * HZ; |
637 | } | 629 | } |
638 | } | 630 | } |
631 | rcu_read_unlock(); | ||
632 | if (!change_needed) | ||
633 | continue; | ||
634 | rtnl_lock(); | ||
635 | hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) { | ||
636 | unsigned long age; | ||
637 | |||
638 | if (ifa->ifa_flags & IFA_F_PERMANENT) | ||
639 | continue; | ||
640 | |||
641 | /* We try to batch several events at once. */ | ||
642 | age = (now - ifa->ifa_tstamp + | ||
643 | ADDRCONF_TIMER_FUZZ_MINUS) / HZ; | ||
644 | |||
645 | if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && | ||
646 | age >= ifa->ifa_valid_lft) { | ||
647 | struct in_ifaddr **ifap; | ||
648 | |||
649 | for (ifap = &ifa->ifa_dev->ifa_list; | ||
650 | *ifap != NULL; ifap = &(*ifap)->ifa_next) { | ||
651 | if (*ifap == ifa) { | ||
652 | inet_del_ifa(ifa->ifa_dev, | ||
653 | ifap, 1); | ||
654 | break; | ||
655 | } | ||
656 | } | ||
657 | } else if (ifa->ifa_preferred_lft != | ||
658 | INFINITY_LIFE_TIME && | ||
659 | age >= ifa->ifa_preferred_lft && | ||
660 | !(ifa->ifa_flags & IFA_F_DEPRECATED)) { | ||
661 | ifa->ifa_flags |= IFA_F_DEPRECATED; | ||
662 | rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); | ||
663 | } | ||
664 | } | ||
665 | rtnl_unlock(); | ||
639 | } | 666 | } |
640 | rcu_read_unlock(); | ||
641 | 667 | ||
642 | next_sec = round_jiffies_up(next); | 668 | next_sec = round_jiffies_up(next); |
643 | next_sched = next; | 669 | next_sched = next; |
@@ -802,8 +828,12 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg | |||
802 | if (nlh->nlmsg_flags & NLM_F_EXCL || | 828 | if (nlh->nlmsg_flags & NLM_F_EXCL || |
803 | !(nlh->nlmsg_flags & NLM_F_REPLACE)) | 829 | !(nlh->nlmsg_flags & NLM_F_REPLACE)) |
804 | return -EEXIST; | 830 | return -EEXIST; |
805 | 831 | ifa = ifa_existing; | |
806 | set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); | 832 | set_ifa_lifetime(ifa, valid_lft, prefered_lft); |
833 | cancel_delayed_work(&check_lifetime_work); | ||
834 | schedule_delayed_work(&check_lifetime_work, 0); | ||
835 | rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); | ||
836 | blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); | ||
807 | } | 837 | } |
808 | return 0; | 838 | return 0; |
809 | } | 839 | } |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 3b4f0cd2e63e..4cfe34d4cc96 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
139 | 139 | ||
140 | /* skb is pure payload to encrypt */ | 140 | /* skb is pure payload to encrypt */ |
141 | 141 | ||
142 | err = -ENOMEM; | ||
143 | |||
144 | esp = x->data; | 142 | esp = x->data; |
145 | aead = esp->aead; | 143 | aead = esp->aead; |
146 | alen = crypto_aead_authsize(aead); | 144 | alen = crypto_aead_authsize(aead); |
@@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) | |||
176 | } | 174 | } |
177 | 175 | ||
178 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); | 176 | tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); |
179 | if (!tmp) | 177 | if (!tmp) { |
178 | err = -ENOMEM; | ||
180 | goto error; | 179 | goto error; |
180 | } | ||
181 | 181 | ||
182 | seqhi = esp_tmp_seqhi(tmp); | 182 | seqhi = esp_tmp_seqhi(tmp); |
183 | iv = esp_tmp_iv(aead, tmp, seqhilen); | 183 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index a6445b843ef4..52c273ea05c3 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -248,8 +248,7 @@ static void ip_expire(unsigned long arg) | |||
248 | if (!head->dev) | 248 | if (!head->dev) |
249 | goto out_rcu_unlock; | 249 | goto out_rcu_unlock; |
250 | 250 | ||
251 | /* skb dst is stale, drop it, and perform route lookup again */ | 251 | /* skb has no dst, perform route lookup again */ |
252 | skb_dst_drop(head); | ||
253 | iph = ip_hdr(head); | 252 | iph = ip_hdr(head); |
254 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, | 253 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, |
255 | iph->tos, head->dev); | 254 | iph->tos, head->dev); |
@@ -523,9 +522,16 @@ found: | |||
523 | qp->q.max_size = skb->len + ihl; | 522 | qp->q.max_size = skb->len + ihl; |
524 | 523 | ||
525 | if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && | 524 | if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
526 | qp->q.meat == qp->q.len) | 525 | qp->q.meat == qp->q.len) { |
527 | return ip_frag_reasm(qp, prev, dev); | 526 | unsigned long orefdst = skb->_skb_refdst; |
528 | 527 | ||
528 | skb->_skb_refdst = 0UL; | ||
529 | err = ip_frag_reasm(qp, prev, dev); | ||
530 | skb->_skb_refdst = orefdst; | ||
531 | return err; | ||
532 | } | ||
533 | |||
534 | skb_dst_drop(skb); | ||
529 | inet_frag_lru_move(&qp->q); | 535 | inet_frag_lru_move(&qp->q); |
530 | return -EINPROGRESS; | 536 | return -EINPROGRESS; |
531 | 537 | ||
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index c30130062cd6..c49dcd0284a0 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c | |||
@@ -66,6 +66,12 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4, | |||
66 | return dev_match; | 66 | return dev_match; |
67 | } | 67 | } |
68 | 68 | ||
69 | static bool rpfilter_is_local(const struct sk_buff *skb) | ||
70 | { | ||
71 | const struct rtable *rt = skb_rtable(skb); | ||
72 | return rt && (rt->rt_flags & RTCF_LOCAL); | ||
73 | } | ||
74 | |||
69 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | 75 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) |
70 | { | 76 | { |
71 | const struct xt_rpfilter_info *info; | 77 | const struct xt_rpfilter_info *info; |
@@ -76,7 +82,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
76 | info = par->matchinfo; | 82 | info = par->matchinfo; |
77 | invert = info->flags & XT_RPFILTER_INVERT; | 83 | invert = info->flags & XT_RPFILTER_INVERT; |
78 | 84 | ||
79 | if (par->in->flags & IFF_LOOPBACK) | 85 | if (rpfilter_is_local(skb)) |
80 | return true ^ invert; | 86 | return true ^ invert; |
81 | 87 | ||
82 | iph = ip_hdr(skb); | 88 | iph = ip_hdr(skb); |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index ef54377fb11c..397e0f69435f 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -349,8 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
349 | * hasn't changed since we received the original syn, but I see | 349 | * hasn't changed since we received the original syn, but I see |
350 | * no easy way to do this. | 350 | * no easy way to do this. |
351 | */ | 351 | */ |
352 | flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), | 352 | flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, |
353 | RT_SCOPE_UNIVERSE, IPPROTO_TCP, | 353 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, |
354 | inet_sk_flowi_flags(sk), | 354 | inet_sk_flowi_flags(sk), |
355 | (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, | 355 | (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, |
356 | ireq->loc_addr, th->source, th->dest); | 356 | ireq->loc_addr, th->source, th->dest); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3bd55bad230a..13b9c08fc158 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -113,6 +113,7 @@ int sysctl_tcp_early_retrans __read_mostly = 2; | |||
113 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ | 113 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
114 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ | 114 | #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ |
115 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ | 115 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ |
116 | #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ | ||
116 | 117 | ||
117 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) | 118 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) |
118 | #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) | 119 | #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) |
@@ -3564,6 +3565,27 @@ static void tcp_send_challenge_ack(struct sock *sk) | |||
3564 | } | 3565 | } |
3565 | } | 3566 | } |
3566 | 3567 | ||
3568 | static void tcp_store_ts_recent(struct tcp_sock *tp) | ||
3569 | { | ||
3570 | tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; | ||
3571 | tp->rx_opt.ts_recent_stamp = get_seconds(); | ||
3572 | } | ||
3573 | |||
3574 | static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) | ||
3575 | { | ||
3576 | if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { | ||
3577 | /* PAWS bug workaround wrt. ACK frames, the PAWS discard | ||
3578 | * extra check below makes sure this can only happen | ||
3579 | * for pure ACK frames. -DaveM | ||
3580 | * | ||
3581 | * Not only, also it occurs for expired timestamps. | ||
3582 | */ | ||
3583 | |||
3584 | if (tcp_paws_check(&tp->rx_opt, 0)) | ||
3585 | tcp_store_ts_recent(tp); | ||
3586 | } | ||
3587 | } | ||
3588 | |||
3567 | /* This routine deals with incoming acks, but not outgoing ones. */ | 3589 | /* This routine deals with incoming acks, but not outgoing ones. */ |
3568 | static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | 3590 | static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
3569 | { | 3591 | { |
@@ -3607,6 +3629,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) | |||
3607 | prior_fackets = tp->fackets_out; | 3629 | prior_fackets = tp->fackets_out; |
3608 | prior_in_flight = tcp_packets_in_flight(tp); | 3630 | prior_in_flight = tcp_packets_in_flight(tp); |
3609 | 3631 | ||
3632 | /* ts_recent update must be made after we are sure that the packet | ||
3633 | * is in window. | ||
3634 | */ | ||
3635 | if (flag & FLAG_UPDATE_TS_RECENT) | ||
3636 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
3637 | |||
3610 | if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { | 3638 | if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { |
3611 | /* Window is constant, pure forward advance. | 3639 | /* Window is constant, pure forward advance. |
3612 | * No more checks are required. | 3640 | * No more checks are required. |
@@ -3927,27 +3955,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) | |||
3927 | EXPORT_SYMBOL(tcp_parse_md5sig_option); | 3955 | EXPORT_SYMBOL(tcp_parse_md5sig_option); |
3928 | #endif | 3956 | #endif |
3929 | 3957 | ||
3930 | static inline void tcp_store_ts_recent(struct tcp_sock *tp) | ||
3931 | { | ||
3932 | tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; | ||
3933 | tp->rx_opt.ts_recent_stamp = get_seconds(); | ||
3934 | } | ||
3935 | |||
3936 | static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) | ||
3937 | { | ||
3938 | if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { | ||
3939 | /* PAWS bug workaround wrt. ACK frames, the PAWS discard | ||
3940 | * extra check below makes sure this can only happen | ||
3941 | * for pure ACK frames. -DaveM | ||
3942 | * | ||
3943 | * Not only, also it occurs for expired timestamps. | ||
3944 | */ | ||
3945 | |||
3946 | if (tcp_paws_check(&tp->rx_opt, 0)) | ||
3947 | tcp_store_ts_recent(tp); | ||
3948 | } | ||
3949 | } | ||
3950 | |||
3951 | /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM | 3958 | /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM |
3952 | * | 3959 | * |
3953 | * It is not fatal. If this ACK does _not_ change critical state (seqs, window) | 3960 | * It is not fatal. If this ACK does _not_ change critical state (seqs, window) |
@@ -5543,14 +5550,9 @@ slow_path: | |||
5543 | return 0; | 5550 | return 0; |
5544 | 5551 | ||
5545 | step5: | 5552 | step5: |
5546 | if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) | 5553 | if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) |
5547 | goto discard; | 5554 | goto discard; |
5548 | 5555 | ||
5549 | /* ts_recent update must be made after we are sure that the packet | ||
5550 | * is in window. | ||
5551 | */ | ||
5552 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
5553 | |||
5554 | tcp_rcv_rtt_measure_ts(sk, skb); | 5556 | tcp_rcv_rtt_measure_ts(sk, skb); |
5555 | 5557 | ||
5556 | /* Process urgent data. */ | 5558 | /* Process urgent data. */ |
@@ -5986,7 +5988,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5986 | 5988 | ||
5987 | /* step 5: check the ACK field */ | 5989 | /* step 5: check the ACK field */ |
5988 | if (true) { | 5990 | if (true) { |
5989 | int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; | 5991 | int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | |
5992 | FLAG_UPDATE_TS_RECENT) > 0; | ||
5990 | 5993 | ||
5991 | switch (sk->sk_state) { | 5994 | switch (sk->sk_state) { |
5992 | case TCP_SYN_RECV: | 5995 | case TCP_SYN_RECV: |
@@ -6137,11 +6140,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
6137 | } | 6140 | } |
6138 | } | 6141 | } |
6139 | 6142 | ||
6140 | /* ts_recent update must be made after we are sure that the packet | ||
6141 | * is in window. | ||
6142 | */ | ||
6143 | tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); | ||
6144 | |||
6145 | /* step 6: check the URG bit */ | 6143 | /* step 6: check the URG bit */ |
6146 | tcp_urg(sk, skb, th); | 6144 | tcp_urg(sk, skb, th); |
6147 | 6145 | ||
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5d0b4387cba6..509912a5ff98 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2388,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
2388 | */ | 2388 | */ |
2389 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2389 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
2390 | 2390 | ||
2391 | /* make sure skb->data is aligned on arches that require it */ | 2391 | /* make sure skb->data is aligned on arches that require it |
2392 | if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { | 2392 | * and check if ack-trimming & collapsing extended the headroom |
2393 | * beyond what csum_start can cover. | ||
2394 | */ | ||
2395 | if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || | ||
2396 | skb_headroom(skb) >= 0xFFFF)) { | ||
2393 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, | 2397 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, |
2394 | GFP_ATOMIC); | 2398 | GFP_ATOMIC); |
2395 | return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : | 2399 | return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : |
@@ -2709,6 +2713,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2709 | skb_reserve(skb, MAX_TCP_HEADER); | 2713 | skb_reserve(skb, MAX_TCP_HEADER); |
2710 | 2714 | ||
2711 | skb_dst_set(skb, dst); | 2715 | skb_dst_set(skb, dst); |
2716 | security_skb_owned_by(skb, sk); | ||
2712 | 2717 | ||
2713 | mss = dst_metric_advmss(dst); | 2718 | mss = dst_metric_advmss(dst); |
2714 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) | 2719 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 26512250e095..dae802c0af7c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -168,8 +168,6 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev, | |||
168 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, | 168 | static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, |
169 | struct net_device *dev); | 169 | struct net_device *dev); |
170 | 170 | ||
171 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); | ||
172 | |||
173 | static struct ipv6_devconf ipv6_devconf __read_mostly = { | 171 | static struct ipv6_devconf ipv6_devconf __read_mostly = { |
174 | .forwarding = 0, | 172 | .forwarding = 0, |
175 | .hop_limit = IPV6_DEFAULT_HOPLIMIT, | 173 | .hop_limit = IPV6_DEFAULT_HOPLIMIT, |
@@ -837,7 +835,7 @@ out2: | |||
837 | rcu_read_unlock_bh(); | 835 | rcu_read_unlock_bh(); |
838 | 836 | ||
839 | if (likely(err == 0)) | 837 | if (likely(err == 0)) |
840 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); | 838 | inet6addr_notifier_call_chain(NETDEV_UP, ifa); |
841 | else { | 839 | else { |
842 | kfree(ifa); | 840 | kfree(ifa); |
843 | ifa = ERR_PTR(err); | 841 | ifa = ERR_PTR(err); |
@@ -927,7 +925,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
927 | 925 | ||
928 | ipv6_ifa_notify(RTM_DELADDR, ifp); | 926 | ipv6_ifa_notify(RTM_DELADDR, ifp); |
929 | 927 | ||
930 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp); | 928 | inet6addr_notifier_call_chain(NETDEV_DOWN, ifp); |
931 | 929 | ||
932 | /* | 930 | /* |
933 | * Purge or update corresponding prefix | 931 | * Purge or update corresponding prefix |
@@ -2529,6 +2527,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
2529 | static void init_loopback(struct net_device *dev) | 2527 | static void init_loopback(struct net_device *dev) |
2530 | { | 2528 | { |
2531 | struct inet6_dev *idev; | 2529 | struct inet6_dev *idev; |
2530 | struct net_device *sp_dev; | ||
2531 | struct inet6_ifaddr *sp_ifa; | ||
2532 | struct rt6_info *sp_rt; | ||
2532 | 2533 | ||
2533 | /* ::1 */ | 2534 | /* ::1 */ |
2534 | 2535 | ||
@@ -2540,6 +2541,30 @@ static void init_loopback(struct net_device *dev) | |||
2540 | } | 2541 | } |
2541 | 2542 | ||
2542 | add_addr(idev, &in6addr_loopback, 128, IFA_HOST); | 2543 | add_addr(idev, &in6addr_loopback, 128, IFA_HOST); |
2544 | |||
2545 | /* Add routes to other interface's IPv6 addresses */ | ||
2546 | for_each_netdev(dev_net(dev), sp_dev) { | ||
2547 | if (!strcmp(sp_dev->name, dev->name)) | ||
2548 | continue; | ||
2549 | |||
2550 | idev = __in6_dev_get(sp_dev); | ||
2551 | if (!idev) | ||
2552 | continue; | ||
2553 | |||
2554 | read_lock_bh(&idev->lock); | ||
2555 | list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { | ||
2556 | |||
2557 | if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) | ||
2558 | continue; | ||
2559 | |||
2560 | sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); | ||
2561 | |||
2562 | /* Failure cases are ignored */ | ||
2563 | if (!IS_ERR(sp_rt)) | ||
2564 | ip6_ins_rt(sp_rt); | ||
2565 | } | ||
2566 | read_unlock_bh(&idev->lock); | ||
2567 | } | ||
2543 | } | 2568 | } |
2544 | 2569 | ||
2545 | static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) | 2570 | static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) |
@@ -2961,7 +2986,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
2961 | 2986 | ||
2962 | if (state != INET6_IFADDR_STATE_DEAD) { | 2987 | if (state != INET6_IFADDR_STATE_DEAD) { |
2963 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 2988 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
2964 | atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); | 2989 | inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); |
2965 | } | 2990 | } |
2966 | in6_ifa_put(ifa); | 2991 | in6_ifa_put(ifa); |
2967 | 2992 | ||
@@ -4842,22 +4867,6 @@ static struct pernet_operations addrconf_ops = { | |||
4842 | .exit = addrconf_exit_net, | 4867 | .exit = addrconf_exit_net, |
4843 | }; | 4868 | }; |
4844 | 4869 | ||
4845 | /* | ||
4846 | * Device notifier | ||
4847 | */ | ||
4848 | |||
4849 | int register_inet6addr_notifier(struct notifier_block *nb) | ||
4850 | { | ||
4851 | return atomic_notifier_chain_register(&inet6addr_chain, nb); | ||
4852 | } | ||
4853 | EXPORT_SYMBOL(register_inet6addr_notifier); | ||
4854 | |||
4855 | int unregister_inet6addr_notifier(struct notifier_block *nb) | ||
4856 | { | ||
4857 | return atomic_notifier_chain_unregister(&inet6addr_chain, nb); | ||
4858 | } | ||
4859 | EXPORT_SYMBOL(unregister_inet6addr_notifier); | ||
4860 | |||
4861 | static struct rtnl_af_ops inet6_ops = { | 4870 | static struct rtnl_af_ops inet6_ops = { |
4862 | .family = AF_INET6, | 4871 | .family = AF_INET6, |
4863 | .fill_link_af = inet6_fill_link_af, | 4872 | .fill_link_af = inet6_fill_link_af, |
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index d051e5f4bf34..72104562c864 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c | |||
@@ -78,3 +78,22 @@ int __ipv6_addr_type(const struct in6_addr *addr) | |||
78 | } | 78 | } |
79 | EXPORT_SYMBOL(__ipv6_addr_type); | 79 | EXPORT_SYMBOL(__ipv6_addr_type); |
80 | 80 | ||
81 | static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); | ||
82 | |||
83 | int register_inet6addr_notifier(struct notifier_block *nb) | ||
84 | { | ||
85 | return atomic_notifier_chain_register(&inet6addr_chain, nb); | ||
86 | } | ||
87 | EXPORT_SYMBOL(register_inet6addr_notifier); | ||
88 | |||
89 | int unregister_inet6addr_notifier(struct notifier_block *nb) | ||
90 | { | ||
91 | return atomic_notifier_chain_unregister(&inet6addr_chain, nb); | ||
92 | } | ||
93 | EXPORT_SYMBOL(unregister_inet6addr_notifier); | ||
94 | |||
95 | int inet6addr_notifier_call_chain(unsigned long val, void *v) | ||
96 | { | ||
97 | return atomic_notifier_call_chain(&inet6addr_chain, val, v); | ||
98 | } | ||
99 | EXPORT_SYMBOL(inet6addr_notifier_call_chain); | ||
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index e33fe0ab2568..2bab2aa59745 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
118 | ipv6_addr_loopback(&hdr->daddr)) | 118 | ipv6_addr_loopback(&hdr->daddr)) |
119 | goto err; | 119 | goto err; |
120 | 120 | ||
121 | /* RFC4291 Errata ID: 3480 | ||
122 | * Interface-Local scope spans only a single interface on a | ||
123 | * node and is useful only for loopback transmission of | ||
124 | * multicast. Packets with interface-local scope received | ||
125 | * from another node must be discarded. | ||
126 | */ | ||
127 | if (!(skb->pkt_type == PACKET_LOOPBACK || | ||
128 | dev->flags & IFF_LOOPBACK) && | ||
129 | ipv6_addr_is_multicast(&hdr->daddr) && | ||
130 | IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) | ||
131 | goto err; | ||
132 | |||
121 | /* RFC4291 2.7 | 133 | /* RFC4291 2.7 |
122 | * Nodes must not originate a packet to a multicast address whose scope | 134 | * Nodes must not originate a packet to a multicast address whose scope |
123 | * field contains the reserved value 0; if such a packet is received, it | 135 | * field contains the reserved value 0; if such a packet is received, it |
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 33608c610276..cb631143721c 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c | |||
@@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt, | |||
57 | if (pfx_len - i >= 32) | 57 | if (pfx_len - i >= 32) |
58 | mask = 0; | 58 | mask = 0; |
59 | else | 59 | else |
60 | mask = htonl(~((1 << (pfx_len - i)) - 1)); | 60 | mask = htonl((1 << (i - pfx_len + 32)) - 1); |
61 | 61 | ||
62 | idx = i / 32; | 62 | idx = i / 32; |
63 | addr->s6_addr32[idx] &= mask; | 63 | addr->s6_addr32[idx] &= mask; |
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 5060d54199ab..e0983f3648a6 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c | |||
@@ -71,6 +71,12 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb, | |||
71 | return ret; | 71 | return ret; |
72 | } | 72 | } |
73 | 73 | ||
74 | static bool rpfilter_is_local(const struct sk_buff *skb) | ||
75 | { | ||
76 | const struct rt6_info *rt = (const void *) skb_dst(skb); | ||
77 | return rt && (rt->rt6i_flags & RTF_LOCAL); | ||
78 | } | ||
79 | |||
74 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | 80 | static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) |
75 | { | 81 | { |
76 | const struct xt_rpfilter_info *info = par->matchinfo; | 82 | const struct xt_rpfilter_info *info = par->matchinfo; |
@@ -78,7 +84,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
78 | struct ipv6hdr *iph; | 84 | struct ipv6hdr *iph; |
79 | bool invert = info->flags & XT_RPFILTER_INVERT; | 85 | bool invert = info->flags & XT_RPFILTER_INVERT; |
80 | 86 | ||
81 | if (par->in->flags & IFF_LOOPBACK) | 87 | if (rpfilter_is_local(skb)) |
82 | return true ^ invert; | 88 | return true ^ invert; |
83 | 89 | ||
84 | iph = ipv6_hdr(skb); | 90 | iph = ipv6_hdr(skb); |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 196ab9347ad1..0ba10e53a629 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -330,9 +330,17 @@ found: | |||
330 | } | 330 | } |
331 | 331 | ||
332 | if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && | 332 | if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
333 | fq->q.meat == fq->q.len) | 333 | fq->q.meat == fq->q.len) { |
334 | return ip6_frag_reasm(fq, prev, dev); | 334 | int res; |
335 | unsigned long orefdst = skb->_skb_refdst; | ||
336 | |||
337 | skb->_skb_refdst = 0UL; | ||
338 | res = ip6_frag_reasm(fq, prev, dev); | ||
339 | skb->_skb_refdst = orefdst; | ||
340 | return res; | ||
341 | } | ||
335 | 342 | ||
343 | skb_dst_drop(skb); | ||
336 | inet_frag_lru_move(&fq->q); | 344 | inet_frag_lru_move(&fq->q); |
337 | return -1; | 345 | return -1; |
338 | 346 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f6d629fd6aee..46a5be85be87 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -386,6 +386,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
386 | 386 | ||
387 | if (dst) | 387 | if (dst) |
388 | dst->ops->redirect(dst, sk, skb); | 388 | dst->ops->redirect(dst, sk, skb); |
389 | goto out; | ||
389 | } | 390 | } |
390 | 391 | ||
391 | if (type == ICMPV6_PKT_TOOBIG) { | 392 | if (type == ICMPV6_PKT_TOOBIG) { |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index d28e7f014cc6..e493b3397ae3 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1386 | 1386 | ||
1387 | IRDA_DEBUG(4, "%s()\n", __func__); | 1387 | IRDA_DEBUG(4, "%s()\n", __func__); |
1388 | 1388 | ||
1389 | msg->msg_namelen = 0; | ||
1390 | |||
1389 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 1391 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
1390 | flags & MSG_DONTWAIT, &err); | 1392 | flags & MSG_DONTWAIT, &err); |
1391 | if (!skb) | 1393 | if (!skb) |
diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 29340a9a6fb9..e1b37f5a2691 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c | |||
@@ -303,7 +303,8 @@ static void iriap_disconnect_indication(void *instance, void *sap, | |||
303 | { | 303 | { |
304 | struct iriap_cb *self; | 304 | struct iriap_cb *self; |
305 | 305 | ||
306 | IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); | 306 | IRDA_DEBUG(4, "%s(), reason=%s [%d]\n", __func__, |
307 | irlmp_reason_str(reason), reason); | ||
307 | 308 | ||
308 | self = instance; | 309 | self = instance; |
309 | 310 | ||
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 6115a44c0a24..1064621da6f6 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c | |||
@@ -66,8 +66,15 @@ const char *irlmp_reasons[] = { | |||
66 | "LM_LAP_RESET", | 66 | "LM_LAP_RESET", |
67 | "LM_INIT_DISCONNECT", | 67 | "LM_INIT_DISCONNECT", |
68 | "ERROR, NOT USED", | 68 | "ERROR, NOT USED", |
69 | "UNKNOWN", | ||
69 | }; | 70 | }; |
70 | 71 | ||
72 | const char *irlmp_reason_str(LM_REASON reason) | ||
73 | { | ||
74 | reason = min_t(size_t, reason, ARRAY_SIZE(irlmp_reasons) - 1); | ||
75 | return irlmp_reasons[reason]; | ||
76 | } | ||
77 | |||
71 | /* | 78 | /* |
72 | * Function irlmp_init (void) | 79 | * Function irlmp_init (void) |
73 | * | 80 | * |
@@ -747,7 +754,8 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, | |||
747 | { | 754 | { |
748 | struct lsap_cb *lsap; | 755 | struct lsap_cb *lsap; |
749 | 756 | ||
750 | IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); | 757 | IRDA_DEBUG(1, "%s(), reason=%s [%d]\n", __func__, |
758 | irlmp_reason_str(reason), reason); | ||
751 | IRDA_ASSERT(self != NULL, return;); | 759 | IRDA_ASSERT(self != NULL, return;); |
752 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); | 760 | IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); |
753 | 761 | ||
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a7d11ffe4284..206ce6db2c36 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] = | |||
49 | 49 | ||
50 | #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) | 50 | #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) |
51 | 51 | ||
52 | /* macros to set/get socket control buffer at correct offset */ | ||
53 | #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */ | ||
54 | #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag)) | ||
55 | #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ | ||
56 | #define CB_TRGCLS_LEN (TRGCLS_SIZE) | ||
57 | |||
58 | #define __iucv_sock_wait(sk, condition, timeo, ret) \ | 52 | #define __iucv_sock_wait(sk, condition, timeo, ret) \ |
59 | do { \ | 53 | do { \ |
60 | DEFINE_WAIT(__wait); \ | 54 | DEFINE_WAIT(__wait); \ |
@@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1141 | 1135 | ||
1142 | /* increment and save iucv message tag for msg_completion cbk */ | 1136 | /* increment and save iucv message tag for msg_completion cbk */ |
1143 | txmsg.tag = iucv->send_tag++; | 1137 | txmsg.tag = iucv->send_tag++; |
1144 | memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); | 1138 | IUCV_SKB_CB(skb)->tag = txmsg.tag; |
1145 | 1139 | ||
1146 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { | 1140 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { |
1147 | atomic_inc(&iucv->msg_sent); | 1141 | atomic_inc(&iucv->msg_sent); |
@@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) | |||
1224 | return -ENOMEM; | 1218 | return -ENOMEM; |
1225 | 1219 | ||
1226 | /* copy target class to control buffer of new skb */ | 1220 | /* copy target class to control buffer of new skb */ |
1227 | memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); | 1221 | IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class; |
1228 | 1222 | ||
1229 | /* copy data fragment */ | 1223 | /* copy data fragment */ |
1230 | memcpy(nskb->data, skb->data + copied, size); | 1224 | memcpy(nskb->data, skb->data + copied, size); |
@@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, | |||
1256 | 1250 | ||
1257 | /* store msg target class in the second 4 bytes of skb ctrl buffer */ | 1251 | /* store msg target class in the second 4 bytes of skb ctrl buffer */ |
1258 | /* Note: the first 4 bytes are reserved for msg tag */ | 1252 | /* Note: the first 4 bytes are reserved for msg tag */ |
1259 | memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); | 1253 | IUCV_SKB_CB(skb)->class = msg->class; |
1260 | 1254 | ||
1261 | /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ | 1255 | /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ |
1262 | if ((msg->flags & IUCV_IPRMDATA) && len > 7) { | 1256 | if ((msg->flags & IUCV_IPRMDATA) && len > 7) { |
@@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, | |||
1292 | } | 1286 | } |
1293 | } | 1287 | } |
1294 | 1288 | ||
1289 | IUCV_SKB_CB(skb)->offset = 0; | ||
1295 | if (sock_queue_rcv_skb(sk, skb)) | 1290 | if (sock_queue_rcv_skb(sk, skb)) |
1296 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); | 1291 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); |
1297 | } | 1292 | } |
@@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1327 | unsigned int copied, rlen; | 1322 | unsigned int copied, rlen; |
1328 | struct sk_buff *skb, *rskb, *cskb; | 1323 | struct sk_buff *skb, *rskb, *cskb; |
1329 | int err = 0; | 1324 | int err = 0; |
1325 | u32 offset; | ||
1326 | |||
1327 | msg->msg_namelen = 0; | ||
1330 | 1328 | ||
1331 | if ((sk->sk_state == IUCV_DISCONN) && | 1329 | if ((sk->sk_state == IUCV_DISCONN) && |
1332 | skb_queue_empty(&iucv->backlog_skb_q) && | 1330 | skb_queue_empty(&iucv->backlog_skb_q) && |
@@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1346 | return err; | 1344 | return err; |
1347 | } | 1345 | } |
1348 | 1346 | ||
1349 | rlen = skb->len; /* real length of skb */ | 1347 | offset = IUCV_SKB_CB(skb)->offset; |
1348 | rlen = skb->len - offset; /* real length of skb */ | ||
1350 | copied = min_t(unsigned int, rlen, len); | 1349 | copied = min_t(unsigned int, rlen, len); |
1351 | if (!rlen) | 1350 | if (!rlen) |
1352 | sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; | 1351 | sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; |
1353 | 1352 | ||
1354 | cskb = skb; | 1353 | cskb = skb; |
1355 | if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { | 1354 | if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { |
1356 | if (!(flags & MSG_PEEK)) | 1355 | if (!(flags & MSG_PEEK)) |
1357 | skb_queue_head(&sk->sk_receive_queue, skb); | 1356 | skb_queue_head(&sk->sk_receive_queue, skb); |
1358 | return -EFAULT; | 1357 | return -EFAULT; |
@@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1370 | * get the trgcls from the control buffer of the skb due to | 1369 | * get the trgcls from the control buffer of the skb due to |
1371 | * fragmentation of original iucv message. */ | 1370 | * fragmentation of original iucv message. */ |
1372 | err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, | 1371 | err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, |
1373 | CB_TRGCLS_LEN, CB_TRGCLS(skb)); | 1372 | sizeof(IUCV_SKB_CB(skb)->class), |
1373 | (void *)&IUCV_SKB_CB(skb)->class); | ||
1374 | if (err) { | 1374 | if (err) { |
1375 | if (!(flags & MSG_PEEK)) | 1375 | if (!(flags & MSG_PEEK)) |
1376 | skb_queue_head(&sk->sk_receive_queue, skb); | 1376 | skb_queue_head(&sk->sk_receive_queue, skb); |
@@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1382 | 1382 | ||
1383 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ | 1383 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ |
1384 | if (sk->sk_type == SOCK_STREAM) { | 1384 | if (sk->sk_type == SOCK_STREAM) { |
1385 | skb_pull(skb, copied); | 1385 | if (copied < rlen) { |
1386 | if (skb->len) { | 1386 | IUCV_SKB_CB(skb)->offset = offset + copied; |
1387 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
1388 | goto done; | 1387 | goto done; |
1389 | } | 1388 | } |
1390 | } | 1389 | } |
@@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1403 | spin_lock_bh(&iucv->message_q.lock); | 1402 | spin_lock_bh(&iucv->message_q.lock); |
1404 | rskb = skb_dequeue(&iucv->backlog_skb_q); | 1403 | rskb = skb_dequeue(&iucv->backlog_skb_q); |
1405 | while (rskb) { | 1404 | while (rskb) { |
1405 | IUCV_SKB_CB(rskb)->offset = 0; | ||
1406 | if (sock_queue_rcv_skb(sk, rskb)) { | 1406 | if (sock_queue_rcv_skb(sk, rskb)) { |
1407 | skb_queue_head(&iucv->backlog_skb_q, | 1407 | skb_queue_head(&iucv->backlog_skb_q, |
1408 | rskb); | 1408 | rskb); |
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path, | |||
1830 | spin_lock_irqsave(&list->lock, flags); | 1830 | spin_lock_irqsave(&list->lock, flags); |
1831 | 1831 | ||
1832 | while (list_skb != (struct sk_buff *)list) { | 1832 | while (list_skb != (struct sk_buff *)list) { |
1833 | if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { | 1833 | if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { |
1834 | this = list_skb; | 1834 | this = list_skb; |
1835 | break; | 1835 | break; |
1836 | } | 1836 | } |
@@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) | |||
2091 | skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); | 2091 | skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); |
2092 | skb_reset_transport_header(skb); | 2092 | skb_reset_transport_header(skb); |
2093 | skb_reset_network_header(skb); | 2093 | skb_reset_network_header(skb); |
2094 | IUCV_SKB_CB(skb)->offset = 0; | ||
2094 | spin_lock(&iucv->message_q.lock); | 2095 | spin_lock(&iucv->message_q.lock); |
2095 | if (skb_queue_empty(&iucv->backlog_skb_q)) { | 2096 | if (skb_queue_empty(&iucv->backlog_skb_q)) { |
2096 | if (sock_queue_rcv_skb(sk, skb)) { | 2097 | if (sock_queue_rcv_skb(sk, skb)) { |
@@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2195 | /* fall through and receive zero length data */ | 2196 | /* fall through and receive zero length data */ |
2196 | case 0: | 2197 | case 0: |
2197 | /* plain data frame */ | 2198 | /* plain data frame */ |
2198 | memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, | 2199 | IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; |
2199 | CB_TRGCLS_LEN); | ||
2200 | err = afiucv_hs_callback_rx(sk, skb); | 2200 | err = afiucv_hs_callback_rx(sk, skb); |
2201 | break; | 2201 | break; |
2202 | default: | 2202 | default: |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 8555f331ea60..5b1e5af25713 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c) | |||
2693 | hdr->sadb_msg_pid = c->portid; | 2693 | hdr->sadb_msg_pid = c->portid; |
2694 | hdr->sadb_msg_version = PF_KEY_V2; | 2694 | hdr->sadb_msg_version = PF_KEY_V2; |
2695 | hdr->sadb_msg_errno = (uint8_t) 0; | 2695 | hdr->sadb_msg_errno = (uint8_t) 0; |
2696 | hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; | ||
2696 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); | 2697 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); |
2697 | pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); | 2698 | pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); |
2698 | return 0; | 2699 | return 0; |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index c74f5a91ff6a..b8a6039314e8 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -690,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
690 | lsa->l2tp_addr = ipv6_hdr(skb)->saddr; | 690 | lsa->l2tp_addr = ipv6_hdr(skb)->saddr; |
691 | lsa->l2tp_flowinfo = 0; | 691 | lsa->l2tp_flowinfo = 0; |
692 | lsa->l2tp_scope_id = 0; | 692 | lsa->l2tp_scope_id = 0; |
693 | lsa->l2tp_conn_id = 0; | ||
693 | if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) | 694 | if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) |
694 | lsa->l2tp_scope_id = IP6CB(skb)->iif; | 695 | lsa->l2tp_scope_id = IP6CB(skb)->iif; |
695 | } | 696 | } |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 88709882c464..48aaa89253e0 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
720 | int target; /* Read at least this many bytes */ | 720 | int target; /* Read at least this many bytes */ |
721 | long timeo; | 721 | long timeo; |
722 | 722 | ||
723 | msg->msg_namelen = 0; | ||
724 | |||
723 | lock_sock(sk); | 725 | lock_sock(sk); |
724 | copied = -ENOTCONN; | 726 | copied = -ENOTCONN; |
725 | if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) | 727 | if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fb306814576a..a6893602f87a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2582,7 +2582,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2582 | list_del(&dep->list); | 2582 | list_del(&dep->list); |
2583 | mutex_unlock(&local->mtx); | 2583 | mutex_unlock(&local->mtx); |
2584 | 2584 | ||
2585 | ieee80211_roc_notify_destroy(dep); | 2585 | ieee80211_roc_notify_destroy(dep, true); |
2586 | return 0; | 2586 | return 0; |
2587 | } | 2587 | } |
2588 | 2588 | ||
@@ -2622,7 +2622,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2622 | ieee80211_start_next_roc(local); | 2622 | ieee80211_start_next_roc(local); |
2623 | mutex_unlock(&local->mtx); | 2623 | mutex_unlock(&local->mtx); |
2624 | 2624 | ||
2625 | ieee80211_roc_notify_destroy(found); | 2625 | ieee80211_roc_notify_destroy(found, true); |
2626 | } else { | 2626 | } else { |
2627 | /* work may be pending so use it all the time */ | 2627 | /* work may be pending so use it all the time */ |
2628 | found->abort = true; | 2628 | found->abort = true; |
@@ -2632,6 +2632,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2632 | 2632 | ||
2633 | /* work will clean up etc */ | 2633 | /* work will clean up etc */ |
2634 | flush_delayed_work(&found->work); | 2634 | flush_delayed_work(&found->work); |
2635 | WARN_ON(!found->to_be_freed); | ||
2636 | kfree(found); | ||
2635 | } | 2637 | } |
2636 | 2638 | ||
2637 | return 0; | 2639 | return 0; |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 78c0d90dd641..931be419ab5a 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -63,6 +63,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
63 | enum ieee80211_chanctx_mode mode) | 63 | enum ieee80211_chanctx_mode mode) |
64 | { | 64 | { |
65 | struct ieee80211_chanctx *ctx; | 65 | struct ieee80211_chanctx *ctx; |
66 | u32 changed; | ||
66 | int err; | 67 | int err; |
67 | 68 | ||
68 | lockdep_assert_held(&local->chanctx_mtx); | 69 | lockdep_assert_held(&local->chanctx_mtx); |
@@ -76,6 +77,13 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
76 | ctx->conf.rx_chains_dynamic = 1; | 77 | ctx->conf.rx_chains_dynamic = 1; |
77 | ctx->mode = mode; | 78 | ctx->mode = mode; |
78 | 79 | ||
80 | /* acquire mutex to prevent idle from changing */ | ||
81 | mutex_lock(&local->mtx); | ||
82 | /* turn idle off *before* setting channel -- some drivers need that */ | ||
83 | changed = ieee80211_idle_off(local); | ||
84 | if (changed) | ||
85 | ieee80211_hw_config(local, changed); | ||
86 | |||
79 | if (!local->use_chanctx) { | 87 | if (!local->use_chanctx) { |
80 | local->_oper_channel_type = | 88 | local->_oper_channel_type = |
81 | cfg80211_get_chandef_type(chandef); | 89 | cfg80211_get_chandef_type(chandef); |
@@ -85,14 +93,17 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
85 | err = drv_add_chanctx(local, ctx); | 93 | err = drv_add_chanctx(local, ctx); |
86 | if (err) { | 94 | if (err) { |
87 | kfree(ctx); | 95 | kfree(ctx); |
88 | return ERR_PTR(err); | 96 | ctx = ERR_PTR(err); |
97 | |||
98 | ieee80211_recalc_idle(local); | ||
99 | goto out; | ||
89 | } | 100 | } |
90 | } | 101 | } |
91 | 102 | ||
103 | /* and keep the mutex held until the new chanctx is on the list */ | ||
92 | list_add_rcu(&ctx->list, &local->chanctx_list); | 104 | list_add_rcu(&ctx->list, &local->chanctx_list); |
93 | 105 | ||
94 | mutex_lock(&local->mtx); | 106 | out: |
95 | ieee80211_recalc_idle(local); | ||
96 | mutex_unlock(&local->mtx); | 107 | mutex_unlock(&local->mtx); |
97 | 108 | ||
98 | return ctx; | 109 | return ctx; |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 388580a1bada..5672533a0832 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -309,6 +309,7 @@ struct ieee80211_roc_work { | |||
309 | struct ieee80211_channel *chan; | 309 | struct ieee80211_channel *chan; |
310 | 310 | ||
311 | bool started, abort, hw_begun, notified; | 311 | bool started, abort, hw_begun, notified; |
312 | bool to_be_freed; | ||
312 | 313 | ||
313 | unsigned long hw_start_time; | 314 | unsigned long hw_start_time; |
314 | 315 | ||
@@ -1347,7 +1348,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local); | |||
1347 | void ieee80211_roc_setup(struct ieee80211_local *local); | 1348 | void ieee80211_roc_setup(struct ieee80211_local *local); |
1348 | void ieee80211_start_next_roc(struct ieee80211_local *local); | 1349 | void ieee80211_start_next_roc(struct ieee80211_local *local); |
1349 | void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); | 1350 | void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); |
1350 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc); | 1351 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free); |
1351 | void ieee80211_sw_roc_work(struct work_struct *work); | 1352 | void ieee80211_sw_roc_work(struct work_struct *work); |
1352 | void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); | 1353 | void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); |
1353 | 1354 | ||
@@ -1361,6 +1362,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
1361 | enum nl80211_iftype type); | 1362 | enum nl80211_iftype type); |
1362 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); | 1363 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); |
1363 | void ieee80211_remove_interfaces(struct ieee80211_local *local); | 1364 | void ieee80211_remove_interfaces(struct ieee80211_local *local); |
1365 | u32 ieee80211_idle_off(struct ieee80211_local *local); | ||
1364 | void ieee80211_recalc_idle(struct ieee80211_local *local); | 1366 | void ieee80211_recalc_idle(struct ieee80211_local *local); |
1365 | void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, | 1367 | void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, |
1366 | const int offset); | 1368 | const int offset); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index baaa8608e52d..9ed49ad0380f 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -78,7 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) | |||
78 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); | 78 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); |
79 | } | 79 | } |
80 | 80 | ||
81 | static u32 ieee80211_idle_off(struct ieee80211_local *local) | 81 | static u32 __ieee80211_idle_off(struct ieee80211_local *local) |
82 | { | 82 | { |
83 | if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) | 83 | if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) |
84 | return 0; | 84 | return 0; |
@@ -87,7 +87,7 @@ static u32 ieee80211_idle_off(struct ieee80211_local *local) | |||
87 | return IEEE80211_CONF_CHANGE_IDLE; | 87 | return IEEE80211_CONF_CHANGE_IDLE; |
88 | } | 88 | } |
89 | 89 | ||
90 | static u32 ieee80211_idle_on(struct ieee80211_local *local) | 90 | static u32 __ieee80211_idle_on(struct ieee80211_local *local) |
91 | { | 91 | { |
92 | if (local->hw.conf.flags & IEEE80211_CONF_IDLE) | 92 | if (local->hw.conf.flags & IEEE80211_CONF_IDLE) |
93 | return 0; | 93 | return 0; |
@@ -98,16 +98,18 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local) | |||
98 | return IEEE80211_CONF_CHANGE_IDLE; | 98 | return IEEE80211_CONF_CHANGE_IDLE; |
99 | } | 99 | } |
100 | 100 | ||
101 | void ieee80211_recalc_idle(struct ieee80211_local *local) | 101 | static u32 __ieee80211_recalc_idle(struct ieee80211_local *local, |
102 | bool force_active) | ||
102 | { | 103 | { |
103 | bool working = false, scanning, active; | 104 | bool working = false, scanning, active; |
104 | unsigned int led_trig_start = 0, led_trig_stop = 0; | 105 | unsigned int led_trig_start = 0, led_trig_stop = 0; |
105 | struct ieee80211_roc_work *roc; | 106 | struct ieee80211_roc_work *roc; |
106 | u32 change; | ||
107 | 107 | ||
108 | lockdep_assert_held(&local->mtx); | 108 | lockdep_assert_held(&local->mtx); |
109 | 109 | ||
110 | active = !list_empty(&local->chanctx_list) || local->monitors; | 110 | active = force_active || |
111 | !list_empty(&local->chanctx_list) || | ||
112 | local->monitors; | ||
111 | 113 | ||
112 | if (!local->ops->remain_on_channel) { | 114 | if (!local->ops->remain_on_channel) { |
113 | list_for_each_entry(roc, &local->roc_list, list) { | 115 | list_for_each_entry(roc, &local->roc_list, list) { |
@@ -132,9 +134,18 @@ void ieee80211_recalc_idle(struct ieee80211_local *local) | |||
132 | ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); | 134 | ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); |
133 | 135 | ||
134 | if (working || scanning || active) | 136 | if (working || scanning || active) |
135 | change = ieee80211_idle_off(local); | 137 | return __ieee80211_idle_off(local); |
136 | else | 138 | return __ieee80211_idle_on(local); |
137 | change = ieee80211_idle_on(local); | 139 | } |
140 | |||
141 | u32 ieee80211_idle_off(struct ieee80211_local *local) | ||
142 | { | ||
143 | return __ieee80211_recalc_idle(local, true); | ||
144 | } | ||
145 | |||
146 | void ieee80211_recalc_idle(struct ieee80211_local *local) | ||
147 | { | ||
148 | u32 change = __ieee80211_recalc_idle(local, false); | ||
138 | if (change) | 149 | if (change) |
139 | ieee80211_hw_config(local, change); | 150 | ieee80211_hw_config(local, change); |
140 | } | 151 | } |
@@ -349,21 +360,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata) | |||
349 | static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | 360 | static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) |
350 | { | 361 | { |
351 | struct ieee80211_sub_if_data *sdata; | 362 | struct ieee80211_sub_if_data *sdata; |
352 | int ret = 0; | 363 | int ret; |
353 | 364 | ||
354 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) | 365 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) |
355 | return 0; | 366 | return 0; |
356 | 367 | ||
357 | mutex_lock(&local->iflist_mtx); | 368 | ASSERT_RTNL(); |
358 | 369 | ||
359 | if (local->monitor_sdata) | 370 | if (local->monitor_sdata) |
360 | goto out_unlock; | 371 | return 0; |
361 | 372 | ||
362 | sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); | 373 | sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); |
363 | if (!sdata) { | 374 | if (!sdata) |
364 | ret = -ENOMEM; | 375 | return -ENOMEM; |
365 | goto out_unlock; | ||
366 | } | ||
367 | 376 | ||
368 | /* set up data */ | 377 | /* set up data */ |
369 | sdata->local = local; | 378 | sdata->local = local; |
@@ -377,13 +386,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | |||
377 | if (WARN_ON(ret)) { | 386 | if (WARN_ON(ret)) { |
378 | /* ok .. stupid driver, it asked for this! */ | 387 | /* ok .. stupid driver, it asked for this! */ |
379 | kfree(sdata); | 388 | kfree(sdata); |
380 | goto out_unlock; | 389 | return ret; |
381 | } | 390 | } |
382 | 391 | ||
383 | ret = ieee80211_check_queues(sdata); | 392 | ret = ieee80211_check_queues(sdata); |
384 | if (ret) { | 393 | if (ret) { |
385 | kfree(sdata); | 394 | kfree(sdata); |
386 | goto out_unlock; | 395 | return ret; |
387 | } | 396 | } |
388 | 397 | ||
389 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, | 398 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, |
@@ -391,13 +400,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | |||
391 | if (ret) { | 400 | if (ret) { |
392 | drv_remove_interface(local, sdata); | 401 | drv_remove_interface(local, sdata); |
393 | kfree(sdata); | 402 | kfree(sdata); |
394 | goto out_unlock; | 403 | return ret; |
395 | } | 404 | } |
396 | 405 | ||
406 | mutex_lock(&local->iflist_mtx); | ||
397 | rcu_assign_pointer(local->monitor_sdata, sdata); | 407 | rcu_assign_pointer(local->monitor_sdata, sdata); |
398 | out_unlock: | ||
399 | mutex_unlock(&local->iflist_mtx); | 408 | mutex_unlock(&local->iflist_mtx); |
400 | return ret; | 409 | |
410 | return 0; | ||
401 | } | 411 | } |
402 | 412 | ||
403 | static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | 413 | static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) |
@@ -407,14 +417,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | |||
407 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) | 417 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) |
408 | return; | 418 | return; |
409 | 419 | ||
420 | ASSERT_RTNL(); | ||
421 | |||
410 | mutex_lock(&local->iflist_mtx); | 422 | mutex_lock(&local->iflist_mtx); |
411 | 423 | ||
412 | sdata = rcu_dereference_protected(local->monitor_sdata, | 424 | sdata = rcu_dereference_protected(local->monitor_sdata, |
413 | lockdep_is_held(&local->iflist_mtx)); | 425 | lockdep_is_held(&local->iflist_mtx)); |
414 | if (!sdata) | 426 | if (!sdata) { |
415 | goto out_unlock; | 427 | mutex_unlock(&local->iflist_mtx); |
428 | return; | ||
429 | } | ||
416 | 430 | ||
417 | rcu_assign_pointer(local->monitor_sdata, NULL); | 431 | rcu_assign_pointer(local->monitor_sdata, NULL); |
432 | mutex_unlock(&local->iflist_mtx); | ||
433 | |||
418 | synchronize_net(); | 434 | synchronize_net(); |
419 | 435 | ||
420 | ieee80211_vif_release_channel(sdata); | 436 | ieee80211_vif_release_channel(sdata); |
@@ -422,8 +438,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | |||
422 | drv_remove_interface(local, sdata); | 438 | drv_remove_interface(local, sdata); |
423 | 439 | ||
424 | kfree(sdata); | 440 | kfree(sdata); |
425 | out_unlock: | ||
426 | mutex_unlock(&local->iflist_mtx); | ||
427 | } | 441 | } |
428 | 442 | ||
429 | /* | 443 | /* |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 29ce2aa87e7b..4749b3858695 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | |||
1060 | 1060 | ||
1061 | rcu_read_lock(); | 1061 | rcu_read_lock(); |
1062 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 1062 | list_for_each_entry_rcu(sdata, &local->interfaces, list) |
1063 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 1063 | if (ieee80211_vif_is_mesh(&sdata->vif) && |
1064 | ieee80211_sdata_running(sdata)) | ||
1064 | ieee80211_queue_work(&local->hw, &sdata->work); | 1065 | ieee80211_queue_work(&local->hw, &sdata->work); |
1065 | rcu_read_unlock(); | 1066 | rcu_read_unlock(); |
1066 | } | 1067 | } |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 141577412d84..346ad4cfb013 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -3608,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) | |||
3608 | 3608 | ||
3609 | /* Restart STA timers */ | 3609 | /* Restart STA timers */ |
3610 | rcu_read_lock(); | 3610 | rcu_read_lock(); |
3611 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 3611 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
3612 | ieee80211_restart_sta_timer(sdata); | 3612 | if (ieee80211_sdata_running(sdata)) |
3613 | ieee80211_restart_sta_timer(sdata); | ||
3614 | } | ||
3613 | rcu_read_unlock(); | 3615 | rcu_read_unlock(); |
3614 | } | 3616 | } |
3615 | 3617 | ||
@@ -3962,8 +3964,16 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, | |||
3962 | /* prep auth_data so we don't go into idle on disassoc */ | 3964 | /* prep auth_data so we don't go into idle on disassoc */ |
3963 | ifmgd->auth_data = auth_data; | 3965 | ifmgd->auth_data = auth_data; |
3964 | 3966 | ||
3965 | if (ifmgd->associated) | 3967 | if (ifmgd->associated) { |
3966 | ieee80211_set_disassoc(sdata, 0, 0, false, NULL); | 3968 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
3969 | |||
3970 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | ||
3971 | WLAN_REASON_UNSPECIFIED, | ||
3972 | false, frame_buf); | ||
3973 | |||
3974 | __cfg80211_send_deauth(sdata->dev, frame_buf, | ||
3975 | sizeof(frame_buf)); | ||
3976 | } | ||
3967 | 3977 | ||
3968 | sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); | 3978 | sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); |
3969 | 3979 | ||
@@ -4023,8 +4033,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
4023 | 4033 | ||
4024 | mutex_lock(&ifmgd->mtx); | 4034 | mutex_lock(&ifmgd->mtx); |
4025 | 4035 | ||
4026 | if (ifmgd->associated) | 4036 | if (ifmgd->associated) { |
4027 | ieee80211_set_disassoc(sdata, 0, 0, false, NULL); | 4037 | u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; |
4038 | |||
4039 | ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, | ||
4040 | WLAN_REASON_UNSPECIFIED, | ||
4041 | false, frame_buf); | ||
4042 | |||
4043 | __cfg80211_send_deauth(sdata->dev, frame_buf, | ||
4044 | sizeof(frame_buf)); | ||
4045 | } | ||
4028 | 4046 | ||
4029 | if (ifmgd->auth_data && !ifmgd->auth_data->done) { | 4047 | if (ifmgd->auth_data && !ifmgd->auth_data->done) { |
4030 | err = -EBUSY; | 4048 | err = -EBUSY; |
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index cc79b4a2e821..430bd254e496 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
@@ -297,10 +297,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local) | |||
297 | } | 297 | } |
298 | } | 298 | } |
299 | 299 | ||
300 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) | 300 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free) |
301 | { | 301 | { |
302 | struct ieee80211_roc_work *dep, *tmp; | 302 | struct ieee80211_roc_work *dep, *tmp; |
303 | 303 | ||
304 | if (WARN_ON(roc->to_be_freed)) | ||
305 | return; | ||
306 | |||
304 | /* was never transmitted */ | 307 | /* was never transmitted */ |
305 | if (roc->frame) { | 308 | if (roc->frame) { |
306 | cfg80211_mgmt_tx_status(&roc->sdata->wdev, | 309 | cfg80211_mgmt_tx_status(&roc->sdata->wdev, |
@@ -316,9 +319,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) | |||
316 | GFP_KERNEL); | 319 | GFP_KERNEL); |
317 | 320 | ||
318 | list_for_each_entry_safe(dep, tmp, &roc->dependents, list) | 321 | list_for_each_entry_safe(dep, tmp, &roc->dependents, list) |
319 | ieee80211_roc_notify_destroy(dep); | 322 | ieee80211_roc_notify_destroy(dep, true); |
320 | 323 | ||
321 | kfree(roc); | 324 | if (free) |
325 | kfree(roc); | ||
326 | else | ||
327 | roc->to_be_freed = true; | ||
322 | } | 328 | } |
323 | 329 | ||
324 | void ieee80211_sw_roc_work(struct work_struct *work) | 330 | void ieee80211_sw_roc_work(struct work_struct *work) |
@@ -331,6 +337,9 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
331 | 337 | ||
332 | mutex_lock(&local->mtx); | 338 | mutex_lock(&local->mtx); |
333 | 339 | ||
340 | if (roc->to_be_freed) | ||
341 | goto out_unlock; | ||
342 | |||
334 | if (roc->abort) | 343 | if (roc->abort) |
335 | goto finish; | 344 | goto finish; |
336 | 345 | ||
@@ -370,7 +379,7 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
370 | finish: | 379 | finish: |
371 | list_del(&roc->list); | 380 | list_del(&roc->list); |
372 | started = roc->started; | 381 | started = roc->started; |
373 | ieee80211_roc_notify_destroy(roc); | 382 | ieee80211_roc_notify_destroy(roc, !roc->abort); |
374 | 383 | ||
375 | if (started) { | 384 | if (started) { |
376 | drv_flush(local, false); | 385 | drv_flush(local, false); |
@@ -410,7 +419,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work) | |||
410 | 419 | ||
411 | list_del(&roc->list); | 420 | list_del(&roc->list); |
412 | 421 | ||
413 | ieee80211_roc_notify_destroy(roc); | 422 | ieee80211_roc_notify_destroy(roc, true); |
414 | 423 | ||
415 | /* if there's another roc, start it now */ | 424 | /* if there's another roc, start it now */ |
416 | ieee80211_start_next_roc(local); | 425 | ieee80211_start_next_roc(local); |
@@ -460,12 +469,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata) | |||
460 | list_for_each_entry_safe(roc, tmp, &tmp_list, list) { | 469 | list_for_each_entry_safe(roc, tmp, &tmp_list, list) { |
461 | if (local->ops->remain_on_channel) { | 470 | if (local->ops->remain_on_channel) { |
462 | list_del(&roc->list); | 471 | list_del(&roc->list); |
463 | ieee80211_roc_notify_destroy(roc); | 472 | ieee80211_roc_notify_destroy(roc, true); |
464 | } else { | 473 | } else { |
465 | ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); | 474 | ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); |
466 | 475 | ||
467 | /* work will clean up etc */ | 476 | /* work will clean up etc */ |
468 | flush_delayed_work(&roc->work); | 477 | flush_delayed_work(&roc->work); |
478 | WARN_ON(!roc->to_be_freed); | ||
479 | kfree(roc); | ||
469 | } | 480 | } |
470 | } | 481 | } |
471 | 482 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bb73ed2d20b9..c6844ad080be 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) | |||
2675 | 2675 | ||
2676 | memset(nskb->cb, 0, sizeof(nskb->cb)); | 2676 | memset(nskb->cb, 0, sizeof(nskb->cb)); |
2677 | 2677 | ||
2678 | ieee80211_tx_skb(rx->sdata, nskb); | 2678 | if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { |
2679 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); | ||
2680 | |||
2681 | info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | | ||
2682 | IEEE80211_TX_INTFL_OFFCHAN_TX_OK | | ||
2683 | IEEE80211_TX_CTL_NO_CCK_RATE; | ||
2684 | if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) | ||
2685 | info->hw_queue = | ||
2686 | local->hw.offchannel_tx_hw_queue; | ||
2687 | } | ||
2688 | |||
2689 | __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, | ||
2690 | status->band); | ||
2679 | } | 2691 | } |
2680 | dev_kfree_skb(rx->skb); | 2692 | dev_kfree_skb(rx->skb); |
2681 | return RX_QUEUED; | 2693 | return RX_QUEUED; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a79ce820cb50..238a0cca320e 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
766 | struct ieee80211_local *local; | 766 | struct ieee80211_local *local; |
767 | struct ieee80211_sub_if_data *sdata; | 767 | struct ieee80211_sub_if_data *sdata; |
768 | int ret, i; | 768 | int ret, i; |
769 | bool have_key = false; | ||
769 | 770 | ||
770 | might_sleep(); | 771 | might_sleep(); |
771 | 772 | ||
@@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
793 | list_del_rcu(&sta->list); | 794 | list_del_rcu(&sta->list); |
794 | 795 | ||
795 | mutex_lock(&local->key_mtx); | 796 | mutex_lock(&local->key_mtx); |
796 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) | 797 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) { |
797 | __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); | 798 | __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); |
798 | if (sta->ptk) | 799 | have_key = true; |
800 | } | ||
801 | if (sta->ptk) { | ||
799 | __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); | 802 | __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); |
803 | have_key = true; | ||
804 | } | ||
800 | mutex_unlock(&local->key_mtx); | 805 | mutex_unlock(&local->key_mtx); |
801 | 806 | ||
807 | if (!have_key) | ||
808 | synchronize_net(); | ||
809 | |||
802 | sta->dead = true; | 810 | sta->dead = true; |
803 | 811 | ||
804 | local->num_sta--; | 812 | local->num_sta--; |
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 0f92dc24cb89..d7df6ac2c6f1 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c | |||
@@ -339,7 +339,11 @@ bitmap_ipmac_tlist(const struct ip_set *set, | |||
339 | nla_put_failure: | 339 | nla_put_failure: |
340 | nla_nest_cancel(skb, nested); | 340 | nla_nest_cancel(skb, nested); |
341 | ipset_nest_end(skb, atd); | 341 | ipset_nest_end(skb, atd); |
342 | return -EMSGSIZE; | 342 | if (unlikely(id == first)) { |
343 | cb->args[2] = 0; | ||
344 | return -EMSGSIZE; | ||
345 | } | ||
346 | return 0; | ||
343 | } | 347 | } |
344 | 348 | ||
345 | static int | 349 | static int |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index f2627226a087..10a30b4fc7db 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c | |||
@@ -104,6 +104,15 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags) | |||
104 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); | 104 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
105 | } | 105 | } |
106 | 106 | ||
107 | static inline void | ||
108 | hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *dst, u32 *flags) | ||
109 | { | ||
110 | if (dst->nomatch) { | ||
111 | *flags = IPSET_FLAG_NOMATCH; | ||
112 | dst->nomatch = 0; | ||
113 | } | ||
114 | } | ||
115 | |||
107 | static inline int | 116 | static inline int |
108 | hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem) | 117 | hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem) |
109 | { | 118 | { |
@@ -414,6 +423,15 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags) | |||
414 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); | 423 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
415 | } | 424 | } |
416 | 425 | ||
426 | static inline void | ||
427 | hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *dst, u32 *flags) | ||
428 | { | ||
429 | if (dst->nomatch) { | ||
430 | *flags = IPSET_FLAG_NOMATCH; | ||
431 | dst->nomatch = 0; | ||
432 | } | ||
433 | } | ||
434 | |||
417 | static inline int | 435 | static inline int |
418 | hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem) | 436 | hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem) |
419 | { | 437 | { |
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 4b677cf6bf7d..d6a59154d710 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c | |||
@@ -87,7 +87,16 @@ hash_net4_data_copy(struct hash_net4_elem *dst, | |||
87 | static inline void | 87 | static inline void |
88 | hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags) | 88 | hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags) |
89 | { | 89 | { |
90 | dst->nomatch = flags & IPSET_FLAG_NOMATCH; | 90 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
91 | } | ||
92 | |||
93 | static inline void | ||
94 | hash_net4_data_reset_flags(struct hash_net4_elem *dst, u32 *flags) | ||
95 | { | ||
96 | if (dst->nomatch) { | ||
97 | *flags = IPSET_FLAG_NOMATCH; | ||
98 | dst->nomatch = 0; | ||
99 | } | ||
91 | } | 100 | } |
92 | 101 | ||
93 | static inline int | 102 | static inline int |
@@ -308,7 +317,16 @@ hash_net6_data_copy(struct hash_net6_elem *dst, | |||
308 | static inline void | 317 | static inline void |
309 | hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags) | 318 | hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags) |
310 | { | 319 | { |
311 | dst->nomatch = flags & IPSET_FLAG_NOMATCH; | 320 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
321 | } | ||
322 | |||
323 | static inline void | ||
324 | hash_net6_data_reset_flags(struct hash_net6_elem *dst, u32 *flags) | ||
325 | { | ||
326 | if (dst->nomatch) { | ||
327 | *flags = IPSET_FLAG_NOMATCH; | ||
328 | dst->nomatch = 0; | ||
329 | } | ||
312 | } | 330 | } |
313 | 331 | ||
314 | static inline int | 332 | static inline int |
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 6ba985f1c96f..f2b0a3c30130 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c | |||
@@ -198,7 +198,16 @@ hash_netiface4_data_copy(struct hash_netiface4_elem *dst, | |||
198 | static inline void | 198 | static inline void |
199 | hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags) | 199 | hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags) |
200 | { | 200 | { |
201 | dst->nomatch = flags & IPSET_FLAG_NOMATCH; | 201 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
202 | } | ||
203 | |||
204 | static inline void | ||
205 | hash_netiface4_data_reset_flags(struct hash_netiface4_elem *dst, u32 *flags) | ||
206 | { | ||
207 | if (dst->nomatch) { | ||
208 | *flags = IPSET_FLAG_NOMATCH; | ||
209 | dst->nomatch = 0; | ||
210 | } | ||
202 | } | 211 | } |
203 | 212 | ||
204 | static inline int | 213 | static inline int |
@@ -494,7 +503,7 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst, | |||
494 | static inline void | 503 | static inline void |
495 | hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags) | 504 | hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags) |
496 | { | 505 | { |
497 | dst->nomatch = flags & IPSET_FLAG_NOMATCH; | 506 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
498 | } | 507 | } |
499 | 508 | ||
500 | static inline int | 509 | static inline int |
@@ -504,6 +513,15 @@ hash_netiface6_data_match(const struct hash_netiface6_elem *elem) | |||
504 | } | 513 | } |
505 | 514 | ||
506 | static inline void | 515 | static inline void |
516 | hash_netiface6_data_reset_flags(struct hash_netiface6_elem *dst, u32 *flags) | ||
517 | { | ||
518 | if (dst->nomatch) { | ||
519 | *flags = IPSET_FLAG_NOMATCH; | ||
520 | dst->nomatch = 0; | ||
521 | } | ||
522 | } | ||
523 | |||
524 | static inline void | ||
507 | hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) | 525 | hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem) |
508 | { | 526 | { |
509 | elem->elem = 0; | 527 | elem->elem = 0; |
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index af20c0c5ced2..349deb672a2d 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c | |||
@@ -104,6 +104,15 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags) | |||
104 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); | 104 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
105 | } | 105 | } |
106 | 106 | ||
107 | static inline void | ||
108 | hash_netport4_data_reset_flags(struct hash_netport4_elem *dst, u32 *flags) | ||
109 | { | ||
110 | if (dst->nomatch) { | ||
111 | *flags = IPSET_FLAG_NOMATCH; | ||
112 | dst->nomatch = 0; | ||
113 | } | ||
114 | } | ||
115 | |||
107 | static inline int | 116 | static inline int |
108 | hash_netport4_data_match(const struct hash_netport4_elem *elem) | 117 | hash_netport4_data_match(const struct hash_netport4_elem *elem) |
109 | { | 118 | { |
@@ -375,6 +384,15 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags) | |||
375 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); | 384 | dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); |
376 | } | 385 | } |
377 | 386 | ||
387 | static inline void | ||
388 | hash_netport6_data_reset_flags(struct hash_netport6_elem *dst, u32 *flags) | ||
389 | { | ||
390 | if (dst->nomatch) { | ||
391 | *flags = IPSET_FLAG_NOMATCH; | ||
392 | dst->nomatch = 0; | ||
393 | } | ||
394 | } | ||
395 | |||
378 | static inline int | 396 | static inline int |
379 | hash_netport6_data_match(const struct hash_netport6_elem *elem) | 397 | hash_netport6_data_match(const struct hash_netport6_elem *elem) |
380 | { | 398 | { |
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 8371c2bac2e4..09c744aa8982 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c | |||
@@ -174,9 +174,13 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id, | |||
174 | { | 174 | { |
175 | const struct set_elem *e = list_set_elem(map, i); | 175 | const struct set_elem *e = list_set_elem(map, i); |
176 | 176 | ||
177 | if (i == map->size - 1 && e->id != IPSET_INVALID_ID) | 177 | if (e->id != IPSET_INVALID_ID) { |
178 | /* Last element replaced: e.g. add new,before,last */ | 178 | const struct set_elem *x = list_set_elem(map, map->size - 1); |
179 | ip_set_put_byindex(e->id); | 179 | |
180 | /* Last element replaced or pushed off */ | ||
181 | if (x->id != IPSET_INVALID_ID) | ||
182 | ip_set_put_byindex(x->id); | ||
183 | } | ||
180 | if (with_timeout(map->timeout)) | 184 | if (with_timeout(map->timeout)) |
181 | list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); | 185 | list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); |
182 | else | 186 | else |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 0e7d423324c3..e0c4373b4747 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -1593,10 +1593,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, | |||
1593 | end += strlen("\r\n\r\n") + clen; | 1593 | end += strlen("\r\n\r\n") + clen; |
1594 | 1594 | ||
1595 | msglen = origlen = end - dptr; | 1595 | msglen = origlen = end - dptr; |
1596 | if (msglen > datalen) { | 1596 | if (msglen > datalen) |
1597 | nf_ct_helper_log(skb, ct, "incomplete/bad SIP message"); | 1597 | return NF_ACCEPT; |
1598 | return NF_DROP; | ||
1599 | } | ||
1600 | 1598 | ||
1601 | ret = process_sip_msg(skb, ct, protoff, dataoff, | 1599 | ret = process_sip_msg(skb, ct, protoff, dataoff, |
1602 | &dptr, &msglen); | 1600 | &dptr, &msglen); |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 6bcce401fd1c..fedee3943661 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -568,6 +568,7 @@ static int __init nf_conntrack_standalone_init(void) | |||
568 | register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); | 568 | register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); |
569 | if (!nf_ct_netfilter_header) { | 569 | if (!nf_ct_netfilter_header) { |
570 | pr_err("nf_conntrack: can't register to sysctl.\n"); | 570 | pr_err("nf_conntrack: can't register to sysctl.\n"); |
571 | ret = -ENOMEM; | ||
571 | goto out_sysctl; | 572 | goto out_sysctl; |
572 | } | 573 | } |
573 | #endif | 574 | #endif |
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 8d5769c6d16e..ad24be070e53 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
@@ -467,33 +467,22 @@ EXPORT_SYMBOL_GPL(nf_nat_packet); | |||
467 | struct nf_nat_proto_clean { | 467 | struct nf_nat_proto_clean { |
468 | u8 l3proto; | 468 | u8 l3proto; |
469 | u8 l4proto; | 469 | u8 l4proto; |
470 | bool hash; | ||
471 | }; | 470 | }; |
472 | 471 | ||
473 | /* Clear NAT section of all conntracks, in case we're loaded again. */ | 472 | /* kill conntracks with affected NAT section */ |
474 | static int nf_nat_proto_clean(struct nf_conn *i, void *data) | 473 | static int nf_nat_proto_remove(struct nf_conn *i, void *data) |
475 | { | 474 | { |
476 | const struct nf_nat_proto_clean *clean = data; | 475 | const struct nf_nat_proto_clean *clean = data; |
477 | struct nf_conn_nat *nat = nfct_nat(i); | 476 | struct nf_conn_nat *nat = nfct_nat(i); |
478 | 477 | ||
479 | if (!nat) | 478 | if (!nat) |
480 | return 0; | 479 | return 0; |
481 | if (!(i->status & IPS_SRC_NAT_DONE)) | 480 | |
482 | return 0; | ||
483 | if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || | 481 | if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) || |
484 | (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) | 482 | (clean->l4proto && nf_ct_protonum(i) != clean->l4proto)) |
485 | return 0; | 483 | return 0; |
486 | 484 | ||
487 | if (clean->hash) { | 485 | return i->status & IPS_NAT_MASK ? 1 : 0; |
488 | spin_lock_bh(&nf_nat_lock); | ||
489 | hlist_del_rcu(&nat->bysource); | ||
490 | spin_unlock_bh(&nf_nat_lock); | ||
491 | } else { | ||
492 | memset(nat, 0, sizeof(*nat)); | ||
493 | i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | | ||
494 | IPS_SEQ_ADJUST); | ||
495 | } | ||
496 | return 0; | ||
497 | } | 486 | } |
498 | 487 | ||
499 | static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) | 488 | static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) |
@@ -505,16 +494,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) | |||
505 | struct net *net; | 494 | struct net *net; |
506 | 495 | ||
507 | rtnl_lock(); | 496 | rtnl_lock(); |
508 | /* Step 1 - remove from bysource hash */ | ||
509 | clean.hash = true; | ||
510 | for_each_net(net) | 497 | for_each_net(net) |
511 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); | 498 | nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); |
512 | synchronize_rcu(); | ||
513 | |||
514 | /* Step 2 - clean NAT section */ | ||
515 | clean.hash = false; | ||
516 | for_each_net(net) | ||
517 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); | ||
518 | rtnl_unlock(); | 499 | rtnl_unlock(); |
519 | } | 500 | } |
520 | 501 | ||
@@ -526,16 +507,9 @@ static void nf_nat_l3proto_clean(u8 l3proto) | |||
526 | struct net *net; | 507 | struct net *net; |
527 | 508 | ||
528 | rtnl_lock(); | 509 | rtnl_lock(); |
529 | /* Step 1 - remove from bysource hash */ | ||
530 | clean.hash = true; | ||
531 | for_each_net(net) | ||
532 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); | ||
533 | synchronize_rcu(); | ||
534 | 510 | ||
535 | /* Step 2 - clean NAT section */ | ||
536 | clean.hash = false; | ||
537 | for_each_net(net) | 511 | for_each_net(net) |
538 | nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); | 512 | nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean); |
539 | rtnl_unlock(); | 513 | rtnl_unlock(); |
540 | } | 514 | } |
541 | 515 | ||
@@ -773,7 +747,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) | |||
773 | { | 747 | { |
774 | struct nf_nat_proto_clean clean = {}; | 748 | struct nf_nat_proto_clean clean = {}; |
775 | 749 | ||
776 | nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean); | 750 | nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean); |
777 | synchronize_rcu(); | 751 | synchronize_rcu(); |
778 | nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); | 752 | nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); |
779 | } | 753 | } |
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 589d686f0b4c..dc3fd5d44464 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c | |||
@@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, | |||
49 | return -EINVAL; | 49 | return -EINVAL; |
50 | 50 | ||
51 | acct_name = nla_data(tb[NFACCT_NAME]); | 51 | acct_name = nla_data(tb[NFACCT_NAME]); |
52 | if (strlen(acct_name) == 0) | ||
53 | return -EINVAL; | ||
52 | 54 | ||
53 | list_for_each_entry(nfacct, &nfnl_acct_list, head) { | 55 | list_for_each_entry(nfacct, &nfnl_acct_list, head) { |
54 | if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) | 56 | if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 1cb48540f86a..42680b2baa11 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -1062,8 +1062,10 @@ static int __init nfnetlink_queue_init(void) | |||
1062 | 1062 | ||
1063 | #ifdef CONFIG_PROC_FS | 1063 | #ifdef CONFIG_PROC_FS |
1064 | if (!proc_create("nfnetlink_queue", 0440, | 1064 | if (!proc_create("nfnetlink_queue", 0440, |
1065 | proc_net_netfilter, &nfqnl_file_ops)) | 1065 | proc_net_netfilter, &nfqnl_file_ops)) { |
1066 | status = -ENOMEM; | ||
1066 | goto cleanup_subsys; | 1067 | goto cleanup_subsys; |
1068 | } | ||
1067 | #endif | 1069 | #endif |
1068 | 1070 | ||
1069 | register_netdevice_notifier(&nfqnl_dev_notifier); | 1071 | register_netdevice_notifier(&nfqnl_dev_notifier); |
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index d1fa1d9ffd2e..103bd704b5fc 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1173 | } | 1173 | } |
1174 | 1174 | ||
1175 | if (sax != NULL) { | 1175 | if (sax != NULL) { |
1176 | memset(sax, 0, sizeof(*sax)); | ||
1176 | sax->sax25_family = AF_NETROM; | 1177 | sax->sax25_family = AF_NETROM; |
1177 | skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, | 1178 | skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, |
1178 | AX25_ADDR_LEN); | 1179 | AX25_ADDR_LEN); |
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index b530afadd76c..ee25f25f0cd6 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c | |||
@@ -107,8 +107,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, | |||
107 | accept_sk->sk_state_change(sk); | 107 | accept_sk->sk_state_change(sk); |
108 | 108 | ||
109 | bh_unlock_sock(accept_sk); | 109 | bh_unlock_sock(accept_sk); |
110 | |||
111 | sock_orphan(accept_sk); | ||
112 | } | 110 | } |
113 | 111 | ||
114 | if (listen == true) { | 112 | if (listen == true) { |
@@ -134,8 +132,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, | |||
134 | 132 | ||
135 | bh_unlock_sock(sk); | 133 | bh_unlock_sock(sk); |
136 | 134 | ||
137 | sock_orphan(sk); | ||
138 | |||
139 | sk_del_node_init(sk); | 135 | sk_del_node_init(sk); |
140 | } | 136 | } |
141 | 137 | ||
@@ -164,8 +160,6 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, | |||
164 | 160 | ||
165 | bh_unlock_sock(sk); | 161 | bh_unlock_sock(sk); |
166 | 162 | ||
167 | sock_orphan(sk); | ||
168 | |||
169 | sk_del_node_init(sk); | 163 | sk_del_node_init(sk); |
170 | } | 164 | } |
171 | 165 | ||
@@ -827,7 +821,6 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local, | |||
827 | skb_get(skb); | 821 | skb_get(skb); |
828 | } else { | 822 | } else { |
829 | pr_err("Receive queue is full\n"); | 823 | pr_err("Receive queue is full\n"); |
830 | kfree_skb(skb); | ||
831 | } | 824 | } |
832 | 825 | ||
833 | nfc_llcp_sock_put(llcp_sock); | 826 | nfc_llcp_sock_put(llcp_sock); |
@@ -1028,7 +1021,6 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, | |||
1028 | skb_get(skb); | 1021 | skb_get(skb); |
1029 | } else { | 1022 | } else { |
1030 | pr_err("Receive queue is full\n"); | 1023 | pr_err("Receive queue is full\n"); |
1031 | kfree_skb(skb); | ||
1032 | } | 1024 | } |
1033 | } | 1025 | } |
1034 | 1026 | ||
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 5c7cdf3f2a83..6c94447ec414 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
@@ -270,7 +270,9 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent, | |||
270 | } | 270 | } |
271 | 271 | ||
272 | if (sk->sk_state == LLCP_CONNECTED || !newsock) { | 272 | if (sk->sk_state == LLCP_CONNECTED || !newsock) { |
273 | nfc_llcp_accept_unlink(sk); | 273 | list_del_init(&lsk->accept_queue); |
274 | sock_put(sk); | ||
275 | |||
274 | if (newsock) | 276 | if (newsock) |
275 | sock_graft(sk, newsock); | 277 | sock_graft(sk, newsock); |
276 | 278 | ||
@@ -464,8 +466,6 @@ static int llcp_sock_release(struct socket *sock) | |||
464 | nfc_llcp_accept_unlink(accept_sk); | 466 | nfc_llcp_accept_unlink(accept_sk); |
465 | 467 | ||
466 | release_sock(accept_sk); | 468 | release_sock(accept_sk); |
467 | |||
468 | sock_orphan(accept_sk); | ||
469 | } | 469 | } |
470 | } | 470 | } |
471 | 471 | ||
@@ -646,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
646 | 646 | ||
647 | pr_debug("%p %zu\n", sk, len); | 647 | pr_debug("%p %zu\n", sk, len); |
648 | 648 | ||
649 | msg->msg_namelen = 0; | ||
650 | |||
649 | lock_sock(sk); | 651 | lock_sock(sk); |
650 | 652 | ||
651 | if (sk->sk_state == LLCP_CLOSED && | 653 | if (sk->sk_state == LLCP_CLOSED && |
@@ -691,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
691 | 693 | ||
692 | pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); | 694 | pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); |
693 | 695 | ||
696 | memset(sockaddr, 0, sizeof(*sockaddr)); | ||
694 | sockaddr->sa_family = AF_NFC; | 697 | sockaddr->sa_family = AF_NFC; |
695 | sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; | 698 | sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; |
696 | sockaddr->dsap = ui_cb->dsap; | 699 | sockaddr->dsap = ui_cb->dsap; |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index a4b724708a1a..6980c3e6f066 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -1593,10 +1593,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, | |||
1593 | return ERR_PTR(-ENOMEM); | 1593 | return ERR_PTR(-ENOMEM); |
1594 | 1594 | ||
1595 | retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); | 1595 | retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); |
1596 | if (retval < 0) { | 1596 | BUG_ON(retval < 0); |
1597 | kfree_skb(skb); | 1597 | |
1598 | return ERR_PTR(retval); | ||
1599 | } | ||
1600 | return skb; | 1598 | return skb; |
1601 | } | 1599 | } |
1602 | 1600 | ||
@@ -1726,24 +1724,32 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
1726 | nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) | 1724 | nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) |
1727 | err = -EINVAL; | 1725 | err = -EINVAL; |
1728 | 1726 | ||
1727 | reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); | ||
1728 | if (!reply) { | ||
1729 | err = -ENOMEM; | ||
1730 | goto exit_unlock; | ||
1731 | } | ||
1732 | |||
1729 | if (!err && a[OVS_VPORT_ATTR_OPTIONS]) | 1733 | if (!err && a[OVS_VPORT_ATTR_OPTIONS]) |
1730 | err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); | 1734 | err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]); |
1731 | if (err) | 1735 | if (err) |
1732 | goto exit_unlock; | 1736 | goto exit_free; |
1737 | |||
1733 | if (a[OVS_VPORT_ATTR_UPCALL_PID]) | 1738 | if (a[OVS_VPORT_ATTR_UPCALL_PID]) |
1734 | vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); | 1739 | vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); |
1735 | 1740 | ||
1736 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, | 1741 | err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, |
1737 | OVS_VPORT_CMD_NEW); | 1742 | info->snd_seq, 0, OVS_VPORT_CMD_NEW); |
1738 | if (IS_ERR(reply)) { | 1743 | BUG_ON(err < 0); |
1739 | netlink_set_err(sock_net(skb->sk)->genl_sock, 0, | ||
1740 | ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); | ||
1741 | goto exit_unlock; | ||
1742 | } | ||
1743 | 1744 | ||
1744 | genl_notify(reply, genl_info_net(info), info->snd_portid, | 1745 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
1745 | ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); | 1746 | ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); |
1746 | 1747 | ||
1748 | rtnl_unlock(); | ||
1749 | return 0; | ||
1750 | |||
1751 | exit_free: | ||
1752 | kfree_skb(reply); | ||
1747 | exit_unlock: | 1753 | exit_unlock: |
1748 | rtnl_unlock(); | 1754 | rtnl_unlock(); |
1749 | return err; | 1755 | return err; |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index fe0e4215c73d..67a2b783fe70 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -795,9 +795,9 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) | |||
795 | 795 | ||
796 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) | 796 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) |
797 | { | 797 | { |
798 | BUG_ON(table->count == 0); | ||
798 | hlist_del_rcu(&flow->hash_node[table->node_ver]); | 799 | hlist_del_rcu(&flow->hash_node[table->node_ver]); |
799 | table->count--; | 800 | table->count--; |
800 | BUG_ON(table->count < 0); | ||
801 | } | 801 | } |
802 | 802 | ||
803 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ | 803 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index cf68e6e4054a..9c8347451597 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1253 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 1253 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
1254 | 1254 | ||
1255 | if (srose != NULL) { | 1255 | if (srose != NULL) { |
1256 | memset(srose, 0, msg->msg_namelen); | ||
1256 | srose->srose_family = AF_ROSE; | 1257 | srose->srose_family = AF_ROSE; |
1257 | srose->srose_addr = rose->dest_addr; | 1258 | srose->srose_addr = rose->dest_addr; |
1258 | srose->srose_call = rose->dest_call; | 1259 | srose->srose_call = rose->dest_call; |
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 1135d8227f9b..9b97172db84a 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c | |||
@@ -204,7 +204,6 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, | |||
204 | if (err < 0) | 204 | if (err < 0) |
205 | return err; | 205 | return err; |
206 | 206 | ||
207 | err = -EINVAL; | ||
208 | if (tb[TCA_FW_CLASSID]) { | 207 | if (tb[TCA_FW_CLASSID]) { |
209 | f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); | 208 | f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); |
210 | tcf_bind_filter(tp, &f->res, base); | 209 | tcf_bind_filter(tp, &f->res, base); |
@@ -218,6 +217,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, | |||
218 | } | 217 | } |
219 | #endif /* CONFIG_NET_CLS_IND */ | 218 | #endif /* CONFIG_NET_CLS_IND */ |
220 | 219 | ||
220 | err = -EINVAL; | ||
221 | if (tb[TCA_FW_MASK]) { | 221 | if (tb[TCA_FW_MASK]) { |
222 | mask = nla_get_u32(tb[TCA_FW_MASK]); | 222 | mask = nla_get_u32(tb[TCA_FW_MASK]); |
223 | if (mask != head->mask) | 223 | if (mask != head->mask) |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 13aa47aa2ffb..1bc210ffcba2 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch) | |||
962 | cbq_update(q); | 962 | cbq_update(q); |
963 | if ((incr -= incr2) < 0) | 963 | if ((incr -= incr2) < 0) |
964 | incr = 0; | 964 | incr = 0; |
965 | q->now += incr; | ||
966 | } else { | ||
967 | if (now > q->now) | ||
968 | q->now = now; | ||
965 | } | 969 | } |
966 | q->now += incr; | ||
967 | q->now_rt = now; | 970 | q->now_rt = now; |
968 | 971 | ||
969 | for (;;) { | 972 | for (;;) { |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 4e606fcb2534..55786283a3df 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
195 | flow->deficit = q->quantum; | 195 | flow->deficit = q->quantum; |
196 | flow->dropped = 0; | 196 | flow->dropped = 0; |
197 | } | 197 | } |
198 | if (++sch->q.qlen < sch->limit) | 198 | if (++sch->q.qlen <= sch->limit) |
199 | return NET_XMIT_SUCCESS; | 199 | return NET_XMIT_SUCCESS; |
200 | 200 | ||
201 | q->drop_overlimit++; | 201 | q->drop_overlimit++; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ffad48109a22..eac7e0ee23c1 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate) | |||
904 | u64 mult; | 904 | u64 mult; |
905 | int shift; | 905 | int shift; |
906 | 906 | ||
907 | r->rate_bps = rate << 3; | 907 | r->rate_bps = (u64)rate << 3; |
908 | r->shift = 0; | 908 | r->shift = 0; |
909 | r->mult = 1; | 909 | r->mult = 1; |
910 | /* | 910 | /* |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index dcc446e7fbf6..d5f35f15af98 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -304,10 +304,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru | |||
304 | err = rpciod_up(); | 304 | err = rpciod_up(); |
305 | if (err) | 305 | if (err) |
306 | goto out_no_rpciod; | 306 | goto out_no_rpciod; |
307 | err = -EINVAL; | ||
308 | if (!xprt) | ||
309 | goto out_no_xprt; | ||
310 | 307 | ||
308 | err = -EINVAL; | ||
311 | if (args->version >= program->nrvers) | 309 | if (args->version >= program->nrvers) |
312 | goto out_err; | 310 | goto out_err; |
313 | version = program->version[args->version]; | 311 | version = program->version[args->version]; |
@@ -382,10 +380,9 @@ out_no_principal: | |||
382 | out_no_stats: | 380 | out_no_stats: |
383 | kfree(clnt); | 381 | kfree(clnt); |
384 | out_err: | 382 | out_err: |
385 | xprt_put(xprt); | ||
386 | out_no_xprt: | ||
387 | rpciod_down(); | 383 | rpciod_down(); |
388 | out_no_rpciod: | 384 | out_no_rpciod: |
385 | xprt_put(xprt); | ||
389 | return ERR_PTR(err); | 386 | return ERR_PTR(err); |
390 | } | 387 | } |
391 | 388 | ||
@@ -512,7 +509,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, | |||
512 | new = rpc_new_client(args, xprt); | 509 | new = rpc_new_client(args, xprt); |
513 | if (IS_ERR(new)) { | 510 | if (IS_ERR(new)) { |
514 | err = PTR_ERR(new); | 511 | err = PTR_ERR(new); |
515 | goto out_put; | 512 | goto out_err; |
516 | } | 513 | } |
517 | 514 | ||
518 | atomic_inc(&clnt->cl_count); | 515 | atomic_inc(&clnt->cl_count); |
@@ -525,8 +522,6 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, | |||
525 | new->cl_chatty = clnt->cl_chatty; | 522 | new->cl_chatty = clnt->cl_chatty; |
526 | return new; | 523 | return new; |
527 | 524 | ||
528 | out_put: | ||
529 | xprt_put(xprt); | ||
530 | out_err: | 525 | out_err: |
531 | dprintk("RPC: %s: returned error %d\n", __func__, err); | 526 | dprintk("RPC: %s: returned error %d\n", __func__, err); |
532 | return ERR_PTR(err); | 527 | return ERR_PTR(err); |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a9622b6cd916..515ce38e4f4c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) | |||
790 | if (addr) { | 790 | if (addr) { |
791 | addr->family = AF_TIPC; | 791 | addr->family = AF_TIPC; |
792 | addr->addrtype = TIPC_ADDR_ID; | 792 | addr->addrtype = TIPC_ADDR_ID; |
793 | memset(&addr->addr, 0, sizeof(addr->addr)); | ||
793 | addr->addr.id.ref = msg_origport(msg); | 794 | addr->addr.id.ref = msg_origport(msg); |
794 | addr->addr.id.node = msg_orignode(msg); | 795 | addr->addr.id.node = msg_orignode(msg); |
795 | addr->addr.name.domain = 0; /* could leave uninitialized */ | 796 | addr->addr.name.domain = 0; /* could leave uninitialized */ |
@@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, | |||
904 | goto exit; | 905 | goto exit; |
905 | } | 906 | } |
906 | 907 | ||
908 | /* will be updated in set_orig_addr() if needed */ | ||
909 | m->msg_namelen = 0; | ||
910 | |||
907 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 911 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
908 | restart: | 912 | restart: |
909 | 913 | ||
@@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, | |||
1013 | goto exit; | 1017 | goto exit; |
1014 | } | 1018 | } |
1015 | 1019 | ||
1020 | /* will be updated in set_orig_addr() if needed */ | ||
1021 | m->msg_namelen = 0; | ||
1022 | |||
1016 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); | 1023 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); |
1017 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 1024 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
1018 | 1025 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 971282b6f6a3..2db702d82e7d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1412,8 +1412,8 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, | |||
1412 | if (UNIXCB(skb).cred) | 1412 | if (UNIXCB(skb).cred) |
1413 | return; | 1413 | return; |
1414 | if (test_bit(SOCK_PASSCRED, &sock->flags) || | 1414 | if (test_bit(SOCK_PASSCRED, &sock->flags) || |
1415 | (other->sk_socket && | 1415 | !other->sk_socket || |
1416 | test_bit(SOCK_PASSCRED, &other->sk_socket->flags))) { | 1416 | test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) { |
1417 | UNIXCB(skb).pid = get_pid(task_tgid(current)); | 1417 | UNIXCB(skb).pid = get_pid(task_tgid(current)); |
1418 | UNIXCB(skb).cred = get_current_cred(); | 1418 | UNIXCB(skb).cred = get_current_cred(); |
1419 | } | 1419 | } |
@@ -1993,7 +1993,7 @@ again: | |||
1993 | if ((UNIXCB(skb).pid != siocb->scm->pid) || | 1993 | if ((UNIXCB(skb).pid != siocb->scm->pid) || |
1994 | (UNIXCB(skb).cred != siocb->scm->cred)) | 1994 | (UNIXCB(skb).cred != siocb->scm->cred)) |
1995 | break; | 1995 | break; |
1996 | } else { | 1996 | } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { |
1997 | /* Copy credentials */ | 1997 | /* Copy credentials */ |
1998 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); | 1998 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); |
1999 | check_creds = 1; | 1999 | check_creds = 1; |
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ca511c4f388a..7f93e2a42d7a 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
@@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) | |||
207 | struct vsock_sock *vsk; | 207 | struct vsock_sock *vsk; |
208 | 208 | ||
209 | list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) | 209 | list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) |
210 | if (vsock_addr_equals_addr_any(addr, &vsk->local_addr)) | 210 | if (addr->svm_port == vsk->local_addr.svm_port) |
211 | return sk_vsock(vsk); | 211 | return sk_vsock(vsk); |
212 | 212 | ||
213 | return NULL; | 213 | return NULL; |
@@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, | |||
220 | 220 | ||
221 | list_for_each_entry(vsk, vsock_connected_sockets(src, dst), | 221 | list_for_each_entry(vsk, vsock_connected_sockets(src, dst), |
222 | connected_table) { | 222 | connected_table) { |
223 | if (vsock_addr_equals_addr(src, &vsk->remote_addr) | 223 | if (vsock_addr_equals_addr(src, &vsk->remote_addr) && |
224 | && vsock_addr_equals_addr(dst, &vsk->local_addr)) { | 224 | dst->svm_port == vsk->local_addr.svm_port) { |
225 | return sk_vsock(vsk); | 225 | return sk_vsock(vsk); |
226 | } | 226 | } |
227 | } | 227 | } |
@@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb, | |||
1670 | vsk = vsock_sk(sk); | 1670 | vsk = vsock_sk(sk); |
1671 | err = 0; | 1671 | err = 0; |
1672 | 1672 | ||
1673 | msg->msg_namelen = 0; | ||
1674 | |||
1673 | lock_sock(sk); | 1675 | lock_sock(sk); |
1674 | 1676 | ||
1675 | if (sk->sk_state != SS_CONNECTED) { | 1677 | if (sk->sk_state != SS_CONNECTED) { |
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index a70ace83a153..5e04d3d96285 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c | |||
@@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending( | |||
464 | struct vsock_sock *vlistener; | 464 | struct vsock_sock *vlistener; |
465 | struct vsock_sock *vpending; | 465 | struct vsock_sock *vpending; |
466 | struct sock *pending; | 466 | struct sock *pending; |
467 | struct sockaddr_vm src; | ||
468 | |||
469 | vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); | ||
467 | 470 | ||
468 | vlistener = vsock_sk(listener); | 471 | vlistener = vsock_sk(listener); |
469 | 472 | ||
470 | list_for_each_entry(vpending, &vlistener->pending_links, | 473 | list_for_each_entry(vpending, &vlistener->pending_links, |
471 | pending_links) { | 474 | pending_links) { |
472 | struct sockaddr_vm src; | ||
473 | struct sockaddr_vm dst; | ||
474 | |||
475 | vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); | ||
476 | vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port); | ||
477 | |||
478 | if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && | 475 | if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && |
479 | vsock_addr_equals_addr(&dst, &vpending->local_addr)) { | 476 | pkt->dst_port == vpending->local_addr.svm_port) { |
480 | pending = sk_vsock(vpending); | 477 | pending = sk_vsock(vpending); |
481 | sock_hold(pending); | 478 | sock_hold(pending); |
482 | goto found; | 479 | goto found; |
@@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg) | |||
739 | */ | 736 | */ |
740 | bh_lock_sock(sk); | 737 | bh_lock_sock(sk); |
741 | 738 | ||
742 | if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED) | 739 | if (!sock_owned_by_user(sk)) { |
743 | vmci_trans(vsk)->notify_ops->handle_notify_pkt( | 740 | /* The local context ID may be out of date, update it. */ |
744 | sk, pkt, true, &dst, &src, | 741 | vsk->local_addr.svm_cid = dst.svm_cid; |
745 | &bh_process_pkt); | 742 | |
743 | if (sk->sk_state == SS_CONNECTED) | ||
744 | vmci_trans(vsk)->notify_ops->handle_notify_pkt( | ||
745 | sk, pkt, true, &dst, &src, | ||
746 | &bh_process_pkt); | ||
747 | } | ||
746 | 748 | ||
747 | bh_unlock_sock(sk); | 749 | bh_unlock_sock(sk); |
748 | 750 | ||
@@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work) | |||
902 | 904 | ||
903 | lock_sock(sk); | 905 | lock_sock(sk); |
904 | 906 | ||
907 | /* The local context ID may be out of date. */ | ||
908 | vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; | ||
909 | |||
905 | switch (sk->sk_state) { | 910 | switch (sk->sk_state) { |
906 | case SS_LISTEN: | 911 | case SS_LISTEN: |
907 | vmci_transport_recv_listen(sk, pkt); | 912 | vmci_transport_recv_listen(sk, pkt); |
@@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk, | |||
958 | pending = vmci_transport_get_pending(sk, pkt); | 963 | pending = vmci_transport_get_pending(sk, pkt); |
959 | if (pending) { | 964 | if (pending) { |
960 | lock_sock(pending); | 965 | lock_sock(pending); |
966 | |||
967 | /* The local context ID may be out of date. */ | ||
968 | vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context; | ||
969 | |||
961 | switch (pending->sk_state) { | 970 | switch (pending->sk_state) { |
962 | case SS_CONNECTING: | 971 | case SS_CONNECTING: |
963 | err = vmci_transport_recv_connecting_server(sk, | 972 | err = vmci_transport_recv_connecting_server(sk, |
@@ -1727,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, | |||
1727 | if (flags & MSG_OOB || flags & MSG_ERRQUEUE) | 1736 | if (flags & MSG_OOB || flags & MSG_ERRQUEUE) |
1728 | return -EOPNOTSUPP; | 1737 | return -EOPNOTSUPP; |
1729 | 1738 | ||
1739 | msg->msg_namelen = 0; | ||
1740 | |||
1730 | /* Retrieve the head sk_buff from the socket's receive queue. */ | 1741 | /* Retrieve the head sk_buff from the socket's receive queue. */ |
1731 | err = 0; | 1742 | err = 0; |
1732 | skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); | 1743 | skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); |
@@ -1759,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, | |||
1759 | if (err) | 1770 | if (err) |
1760 | goto out; | 1771 | goto out; |
1761 | 1772 | ||
1762 | msg->msg_namelen = 0; | ||
1763 | if (msg->msg_name) { | 1773 | if (msg->msg_name) { |
1764 | struct sockaddr_vm *vm_addr; | 1774 | struct sockaddr_vm *vm_addr; |
1765 | 1775 | ||
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c index b7df1aea7c59..ec2611b4ea0e 100644 --- a/net/vmw_vsock/vsock_addr.c +++ b/net/vmw_vsock/vsock_addr.c | |||
@@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, | |||
64 | } | 64 | } |
65 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); | 65 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); |
66 | 66 | ||
67 | bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, | ||
68 | const struct sockaddr_vm *other) | ||
69 | { | ||
70 | return (addr->svm_cid == VMADDR_CID_ANY || | ||
71 | other->svm_cid == VMADDR_CID_ANY || | ||
72 | addr->svm_cid == other->svm_cid) && | ||
73 | addr->svm_port == other->svm_port; | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any); | ||
76 | |||
77 | int vsock_addr_cast(const struct sockaddr *addr, | 67 | int vsock_addr_cast(const struct sockaddr *addr, |
78 | size_t len, struct sockaddr_vm **out_addr) | 68 | size_t len, struct sockaddr_vm **out_addr) |
79 | { | 69 | { |
diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h index cdfbcefdf843..9ccd5316eac0 100644 --- a/net/vmw_vsock/vsock_addr.h +++ b/net/vmw_vsock/vsock_addr.h | |||
@@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr); | |||
24 | void vsock_addr_unbind(struct sockaddr_vm *addr); | 24 | void vsock_addr_unbind(struct sockaddr_vm *addr); |
25 | bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, | 25 | bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, |
26 | const struct sockaddr_vm *other); | 26 | const struct sockaddr_vm *other); |
27 | bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, | ||
28 | const struct sockaddr_vm *other); | ||
29 | int vsock_addr_cast(const struct sockaddr *addr, size_t len, | 27 | int vsock_addr_cast(const struct sockaddr *addr, size_t len, |
30 | struct sockaddr_vm **out_addr); | 28 | struct sockaddr_vm **out_addr); |
31 | 29 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index ea4155fe9733..6ddf74f0ae1e 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) | |||
212 | rdev_rfkill_poll(rdev); | 212 | rdev_rfkill_poll(rdev); |
213 | } | 213 | } |
214 | 214 | ||
215 | void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | ||
216 | struct wireless_dev *wdev) | ||
217 | { | ||
218 | lockdep_assert_held(&rdev->devlist_mtx); | ||
219 | lockdep_assert_held(&rdev->sched_scan_mtx); | ||
220 | |||
221 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) | ||
222 | return; | ||
223 | |||
224 | if (!wdev->p2p_started) | ||
225 | return; | ||
226 | |||
227 | rdev_stop_p2p_device(rdev, wdev); | ||
228 | wdev->p2p_started = false; | ||
229 | |||
230 | rdev->opencount--; | ||
231 | |||
232 | if (rdev->scan_req && rdev->scan_req->wdev == wdev) { | ||
233 | bool busy = work_busy(&rdev->scan_done_wk); | ||
234 | |||
235 | /* | ||
236 | * If the work isn't pending or running (in which case it would | ||
237 | * be waiting for the lock we hold) the driver didn't properly | ||
238 | * cancel the scan when the interface was removed. In this case | ||
239 | * warn and leak the scan request object to not crash later. | ||
240 | */ | ||
241 | WARN_ON(!busy); | ||
242 | |||
243 | rdev->scan_req->aborted = true; | ||
244 | ___cfg80211_scan_done(rdev, !busy); | ||
245 | } | ||
246 | } | ||
247 | |||
215 | static int cfg80211_rfkill_set_block(void *data, bool blocked) | 248 | static int cfg80211_rfkill_set_block(void *data, bool blocked) |
216 | { | 249 | { |
217 | struct cfg80211_registered_device *rdev = data; | 250 | struct cfg80211_registered_device *rdev = data; |
@@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) | |||
221 | return 0; | 254 | return 0; |
222 | 255 | ||
223 | rtnl_lock(); | 256 | rtnl_lock(); |
224 | mutex_lock(&rdev->devlist_mtx); | 257 | |
258 | /* read-only iteration need not hold the devlist_mtx */ | ||
225 | 259 | ||
226 | list_for_each_entry(wdev, &rdev->wdev_list, list) { | 260 | list_for_each_entry(wdev, &rdev->wdev_list, list) { |
227 | if (wdev->netdev) { | 261 | if (wdev->netdev) { |
@@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) | |||
231 | /* otherwise, check iftype */ | 265 | /* otherwise, check iftype */ |
232 | switch (wdev->iftype) { | 266 | switch (wdev->iftype) { |
233 | case NL80211_IFTYPE_P2P_DEVICE: | 267 | case NL80211_IFTYPE_P2P_DEVICE: |
234 | if (!wdev->p2p_started) | 268 | /* but this requires it */ |
235 | break; | 269 | mutex_lock(&rdev->devlist_mtx); |
236 | rdev_stop_p2p_device(rdev, wdev); | 270 | mutex_lock(&rdev->sched_scan_mtx); |
237 | wdev->p2p_started = false; | 271 | cfg80211_stop_p2p_device(rdev, wdev); |
238 | rdev->opencount--; | 272 | mutex_unlock(&rdev->sched_scan_mtx); |
273 | mutex_unlock(&rdev->devlist_mtx); | ||
239 | break; | 274 | break; |
240 | default: | 275 | default: |
241 | break; | 276 | break; |
242 | } | 277 | } |
243 | } | 278 | } |
244 | 279 | ||
245 | mutex_unlock(&rdev->devlist_mtx); | ||
246 | rtnl_unlock(); | 280 | rtnl_unlock(); |
247 | 281 | ||
248 | return 0; | 282 | return 0; |
@@ -745,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work) | |||
745 | wdev = container_of(work, struct wireless_dev, cleanup_work); | 779 | wdev = container_of(work, struct wireless_dev, cleanup_work); |
746 | rdev = wiphy_to_dev(wdev->wiphy); | 780 | rdev = wiphy_to_dev(wdev->wiphy); |
747 | 781 | ||
748 | cfg80211_lock_rdev(rdev); | 782 | mutex_lock(&rdev->sched_scan_mtx); |
749 | 783 | ||
750 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { | 784 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { |
751 | rdev->scan_req->aborted = true; | 785 | rdev->scan_req->aborted = true; |
752 | ___cfg80211_scan_done(rdev, true); | 786 | ___cfg80211_scan_done(rdev, true); |
753 | } | 787 | } |
754 | 788 | ||
755 | cfg80211_unlock_rdev(rdev); | ||
756 | |||
757 | mutex_lock(&rdev->sched_scan_mtx); | ||
758 | |||
759 | if (WARN_ON(rdev->sched_scan_req && | 789 | if (WARN_ON(rdev->sched_scan_req && |
760 | rdev->sched_scan_req->dev == wdev->netdev)) { | 790 | rdev->sched_scan_req->dev == wdev->netdev)) { |
761 | __cfg80211_stop_sched_scan(rdev, false); | 791 | __cfg80211_stop_sched_scan(rdev, false); |
@@ -781,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) | |||
781 | return; | 811 | return; |
782 | 812 | ||
783 | mutex_lock(&rdev->devlist_mtx); | 813 | mutex_lock(&rdev->devlist_mtx); |
814 | mutex_lock(&rdev->sched_scan_mtx); | ||
784 | list_del_rcu(&wdev->list); | 815 | list_del_rcu(&wdev->list); |
785 | rdev->devlist_generation++; | 816 | rdev->devlist_generation++; |
786 | 817 | ||
787 | switch (wdev->iftype) { | 818 | switch (wdev->iftype) { |
788 | case NL80211_IFTYPE_P2P_DEVICE: | 819 | case NL80211_IFTYPE_P2P_DEVICE: |
789 | if (!wdev->p2p_started) | 820 | cfg80211_stop_p2p_device(rdev, wdev); |
790 | break; | ||
791 | rdev_stop_p2p_device(rdev, wdev); | ||
792 | wdev->p2p_started = false; | ||
793 | rdev->opencount--; | ||
794 | break; | 821 | break; |
795 | default: | 822 | default: |
796 | WARN_ON_ONCE(1); | 823 | WARN_ON_ONCE(1); |
797 | break; | 824 | break; |
798 | } | 825 | } |
826 | mutex_unlock(&rdev->sched_scan_mtx); | ||
799 | mutex_unlock(&rdev->devlist_mtx); | 827 | mutex_unlock(&rdev->devlist_mtx); |
800 | } | 828 | } |
801 | EXPORT_SYMBOL(cfg80211_unregister_wdev); | 829 | EXPORT_SYMBOL(cfg80211_unregister_wdev); |
@@ -936,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
936 | cfg80211_update_iface_num(rdev, wdev->iftype, 1); | 964 | cfg80211_update_iface_num(rdev, wdev->iftype, 1); |
937 | cfg80211_lock_rdev(rdev); | 965 | cfg80211_lock_rdev(rdev); |
938 | mutex_lock(&rdev->devlist_mtx); | 966 | mutex_lock(&rdev->devlist_mtx); |
967 | mutex_lock(&rdev->sched_scan_mtx); | ||
939 | wdev_lock(wdev); | 968 | wdev_lock(wdev); |
940 | switch (wdev->iftype) { | 969 | switch (wdev->iftype) { |
941 | #ifdef CONFIG_CFG80211_WEXT | 970 | #ifdef CONFIG_CFG80211_WEXT |
@@ -967,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
967 | break; | 996 | break; |
968 | } | 997 | } |
969 | wdev_unlock(wdev); | 998 | wdev_unlock(wdev); |
999 | mutex_unlock(&rdev->sched_scan_mtx); | ||
970 | rdev->opencount++; | 1000 | rdev->opencount++; |
971 | mutex_unlock(&rdev->devlist_mtx); | 1001 | mutex_unlock(&rdev->devlist_mtx); |
972 | cfg80211_unlock_rdev(rdev); | 1002 | cfg80211_unlock_rdev(rdev); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 3aec0e429d8a..5845c2b37aa8 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, | |||
503 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, | 503 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, |
504 | enum nl80211_iftype iftype, int num); | 504 | enum nl80211_iftype iftype, int num); |
505 | 505 | ||
506 | void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | ||
507 | struct wireless_dev *wdev); | ||
508 | |||
506 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 | 509 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 |
507 | 510 | ||
508 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS | 511 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d44ab216c0ec..58e13a8c95f9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -4702,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
4702 | if (!rdev->ops->scan) | 4702 | if (!rdev->ops->scan) |
4703 | return -EOPNOTSUPP; | 4703 | return -EOPNOTSUPP; |
4704 | 4704 | ||
4705 | if (rdev->scan_req) | 4705 | mutex_lock(&rdev->sched_scan_mtx); |
4706 | return -EBUSY; | 4706 | if (rdev->scan_req) { |
4707 | err = -EBUSY; | ||
4708 | goto unlock; | ||
4709 | } | ||
4707 | 4710 | ||
4708 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { | 4711 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { |
4709 | n_channels = validate_scan_freqs( | 4712 | n_channels = validate_scan_freqs( |
4710 | info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); | 4713 | info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); |
4711 | if (!n_channels) | 4714 | if (!n_channels) { |
4712 | return -EINVAL; | 4715 | err = -EINVAL; |
4716 | goto unlock; | ||
4717 | } | ||
4713 | } else { | 4718 | } else { |
4714 | enum ieee80211_band band; | 4719 | enum ieee80211_band band; |
4715 | n_channels = 0; | 4720 | n_channels = 0; |
@@ -4723,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
4723 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) | 4728 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) |
4724 | n_ssids++; | 4729 | n_ssids++; |
4725 | 4730 | ||
4726 | if (n_ssids > wiphy->max_scan_ssids) | 4731 | if (n_ssids > wiphy->max_scan_ssids) { |
4727 | return -EINVAL; | 4732 | err = -EINVAL; |
4733 | goto unlock; | ||
4734 | } | ||
4728 | 4735 | ||
4729 | if (info->attrs[NL80211_ATTR_IE]) | 4736 | if (info->attrs[NL80211_ATTR_IE]) |
4730 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 4737 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
4731 | else | 4738 | else |
4732 | ie_len = 0; | 4739 | ie_len = 0; |
4733 | 4740 | ||
4734 | if (ie_len > wiphy->max_scan_ie_len) | 4741 | if (ie_len > wiphy->max_scan_ie_len) { |
4735 | return -EINVAL; | 4742 | err = -EINVAL; |
4743 | goto unlock; | ||
4744 | } | ||
4736 | 4745 | ||
4737 | request = kzalloc(sizeof(*request) | 4746 | request = kzalloc(sizeof(*request) |
4738 | + sizeof(*request->ssids) * n_ssids | 4747 | + sizeof(*request->ssids) * n_ssids |
4739 | + sizeof(*request->channels) * n_channels | 4748 | + sizeof(*request->channels) * n_channels |
4740 | + ie_len, GFP_KERNEL); | 4749 | + ie_len, GFP_KERNEL); |
4741 | if (!request) | 4750 | if (!request) { |
4742 | return -ENOMEM; | 4751 | err = -ENOMEM; |
4752 | goto unlock; | ||
4753 | } | ||
4743 | 4754 | ||
4744 | if (n_ssids) | 4755 | if (n_ssids) |
4745 | request->ssids = (void *)&request->channels[n_channels]; | 4756 | request->ssids = (void *)&request->channels[n_channels]; |
@@ -4876,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
4876 | kfree(request); | 4887 | kfree(request); |
4877 | } | 4888 | } |
4878 | 4889 | ||
4890 | unlock: | ||
4891 | mutex_unlock(&rdev->sched_scan_mtx); | ||
4879 | return err; | 4892 | return err; |
4880 | } | 4893 | } |
4881 | 4894 | ||
@@ -7749,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info) | |||
7749 | if (!rdev->ops->stop_p2p_device) | 7762 | if (!rdev->ops->stop_p2p_device) |
7750 | return -EOPNOTSUPP; | 7763 | return -EOPNOTSUPP; |
7751 | 7764 | ||
7752 | if (!wdev->p2p_started) | 7765 | mutex_lock(&rdev->sched_scan_mtx); |
7753 | return 0; | 7766 | cfg80211_stop_p2p_device(rdev, wdev); |
7754 | 7767 | mutex_unlock(&rdev->sched_scan_mtx); | |
7755 | rdev_stop_p2p_device(rdev, wdev); | ||
7756 | wdev->p2p_started = false; | ||
7757 | |||
7758 | mutex_lock(&rdev->devlist_mtx); | ||
7759 | rdev->opencount--; | ||
7760 | mutex_unlock(&rdev->devlist_mtx); | ||
7761 | |||
7762 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { | ||
7763 | rdev->scan_req->aborted = true; | ||
7764 | ___cfg80211_scan_done(rdev, true); | ||
7765 | } | ||
7766 | 7768 | ||
7767 | return 0; | 7769 | return 0; |
7768 | } | 7770 | } |
@@ -8486,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg, | |||
8486 | struct nlattr *nest; | 8488 | struct nlattr *nest; |
8487 | int i; | 8489 | int i; |
8488 | 8490 | ||
8489 | ASSERT_RDEV_LOCK(rdev); | 8491 | lockdep_assert_held(&rdev->sched_scan_mtx); |
8490 | 8492 | ||
8491 | if (WARN_ON(!req)) | 8493 | if (WARN_ON(!req)) |
8492 | return 0; | 8494 | return 0; |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 674aadca0079..fd99ea495b7e 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) | |||
169 | union iwreq_data wrqu; | 169 | union iwreq_data wrqu; |
170 | #endif | 170 | #endif |
171 | 171 | ||
172 | ASSERT_RDEV_LOCK(rdev); | 172 | lockdep_assert_held(&rdev->sched_scan_mtx); |
173 | 173 | ||
174 | request = rdev->scan_req; | 174 | request = rdev->scan_req; |
175 | 175 | ||
@@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk) | |||
230 | rdev = container_of(wk, struct cfg80211_registered_device, | 230 | rdev = container_of(wk, struct cfg80211_registered_device, |
231 | scan_done_wk); | 231 | scan_done_wk); |
232 | 232 | ||
233 | cfg80211_lock_rdev(rdev); | 233 | mutex_lock(&rdev->sched_scan_mtx); |
234 | ___cfg80211_scan_done(rdev, false); | 234 | ___cfg80211_scan_done(rdev, false); |
235 | cfg80211_unlock_rdev(rdev); | 235 | mutex_unlock(&rdev->sched_scan_mtx); |
236 | } | 236 | } |
237 | 237 | ||
238 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | 238 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) |
@@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
698 | found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); | 698 | found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); |
699 | 699 | ||
700 | if (found) { | 700 | if (found) { |
701 | found->pub.beacon_interval = tmp->pub.beacon_interval; | ||
702 | found->pub.signal = tmp->pub.signal; | ||
703 | found->pub.capability = tmp->pub.capability; | ||
704 | found->ts = tmp->ts; | ||
705 | |||
706 | /* Update IEs */ | 701 | /* Update IEs */ |
707 | if (rcu_access_pointer(tmp->pub.proberesp_ies)) { | 702 | if (rcu_access_pointer(tmp->pub.proberesp_ies)) { |
708 | const struct cfg80211_bss_ies *old; | 703 | const struct cfg80211_bss_ies *old; |
@@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
723 | 718 | ||
724 | if (found->pub.hidden_beacon_bss && | 719 | if (found->pub.hidden_beacon_bss && |
725 | !list_empty(&found->hidden_list)) { | 720 | !list_empty(&found->hidden_list)) { |
721 | const struct cfg80211_bss_ies *f; | ||
722 | |||
726 | /* | 723 | /* |
727 | * The found BSS struct is one of the probe | 724 | * The found BSS struct is one of the probe |
728 | * response members of a group, but we're | 725 | * response members of a group, but we're |
@@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
732 | * SSID to showing it, which is confusing so | 729 | * SSID to showing it, which is confusing so |
733 | * drop this information. | 730 | * drop this information. |
734 | */ | 731 | */ |
732 | |||
733 | f = rcu_access_pointer(tmp->pub.beacon_ies); | ||
734 | kfree_rcu((struct cfg80211_bss_ies *)f, | ||
735 | rcu_head); | ||
735 | goto drop; | 736 | goto drop; |
736 | } | 737 | } |
737 | 738 | ||
@@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
761 | kfree_rcu((struct cfg80211_bss_ies *)old, | 762 | kfree_rcu((struct cfg80211_bss_ies *)old, |
762 | rcu_head); | 763 | rcu_head); |
763 | } | 764 | } |
765 | |||
766 | found->pub.beacon_interval = tmp->pub.beacon_interval; | ||
767 | found->pub.signal = tmp->pub.signal; | ||
768 | found->pub.capability = tmp->pub.capability; | ||
769 | found->ts = tmp->ts; | ||
764 | } else { | 770 | } else { |
765 | struct cfg80211_internal_bss *new; | 771 | struct cfg80211_internal_bss *new; |
766 | struct cfg80211_internal_bss *hidden; | 772 | struct cfg80211_internal_bss *hidden; |
@@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
1056 | if (IS_ERR(rdev)) | 1062 | if (IS_ERR(rdev)) |
1057 | return PTR_ERR(rdev); | 1063 | return PTR_ERR(rdev); |
1058 | 1064 | ||
1065 | mutex_lock(&rdev->sched_scan_mtx); | ||
1059 | if (rdev->scan_req) { | 1066 | if (rdev->scan_req) { |
1060 | err = -EBUSY; | 1067 | err = -EBUSY; |
1061 | goto out; | 1068 | goto out; |
@@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
1162 | dev_hold(dev); | 1169 | dev_hold(dev); |
1163 | } | 1170 | } |
1164 | out: | 1171 | out: |
1172 | mutex_unlock(&rdev->sched_scan_mtx); | ||
1165 | kfree(creq); | 1173 | kfree(creq); |
1166 | cfg80211_unlock_rdev(rdev); | 1174 | cfg80211_unlock_rdev(rdev); |
1167 | return err; | 1175 | return err; |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f432bd3755b1..482c70e70127 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) | |||
85 | ASSERT_RTNL(); | 85 | ASSERT_RTNL(); |
86 | ASSERT_RDEV_LOCK(rdev); | 86 | ASSERT_RDEV_LOCK(rdev); |
87 | ASSERT_WDEV_LOCK(wdev); | 87 | ASSERT_WDEV_LOCK(wdev); |
88 | lockdep_assert_held(&rdev->sched_scan_mtx); | ||
88 | 89 | ||
89 | if (rdev->scan_req) | 90 | if (rdev->scan_req) |
90 | return -EBUSY; | 91 | return -EBUSY; |
@@ -223,6 +224,7 @@ void cfg80211_conn_work(struct work_struct *work) | |||
223 | rtnl_lock(); | 224 | rtnl_lock(); |
224 | cfg80211_lock_rdev(rdev); | 225 | cfg80211_lock_rdev(rdev); |
225 | mutex_lock(&rdev->devlist_mtx); | 226 | mutex_lock(&rdev->devlist_mtx); |
227 | mutex_lock(&rdev->sched_scan_mtx); | ||
226 | 228 | ||
227 | list_for_each_entry(wdev, &rdev->wdev_list, list) { | 229 | list_for_each_entry(wdev, &rdev->wdev_list, list) { |
228 | wdev_lock(wdev); | 230 | wdev_lock(wdev); |
@@ -247,6 +249,7 @@ void cfg80211_conn_work(struct work_struct *work) | |||
247 | wdev_unlock(wdev); | 249 | wdev_unlock(wdev); |
248 | } | 250 | } |
249 | 251 | ||
252 | mutex_unlock(&rdev->sched_scan_mtx); | ||
250 | mutex_unlock(&rdev->devlist_mtx); | 253 | mutex_unlock(&rdev->devlist_mtx); |
251 | cfg80211_unlock_rdev(rdev); | 254 | cfg80211_unlock_rdev(rdev); |
252 | rtnl_unlock(); | 255 | rtnl_unlock(); |
@@ -320,11 +323,9 @@ void cfg80211_sme_scan_done(struct net_device *dev) | |||
320 | { | 323 | { |
321 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 324 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
322 | 325 | ||
323 | mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); | ||
324 | wdev_lock(wdev); | 326 | wdev_lock(wdev); |
325 | __cfg80211_sme_scan_done(dev); | 327 | __cfg80211_sme_scan_done(dev); |
326 | wdev_unlock(wdev); | 328 | wdev_unlock(wdev); |
327 | mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); | ||
328 | } | 329 | } |
329 | 330 | ||
330 | void cfg80211_sme_rx_auth(struct net_device *dev, | 331 | void cfg80211_sme_rx_auth(struct net_device *dev, |
@@ -924,9 +925,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
924 | int err; | 925 | int err; |
925 | 926 | ||
926 | mutex_lock(&rdev->devlist_mtx); | 927 | mutex_lock(&rdev->devlist_mtx); |
928 | /* might request scan - scan_mtx -> wdev_mtx dependency */ | ||
929 | mutex_lock(&rdev->sched_scan_mtx); | ||
927 | wdev_lock(dev->ieee80211_ptr); | 930 | wdev_lock(dev->ieee80211_ptr); |
928 | err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); | 931 | err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); |
929 | wdev_unlock(dev->ieee80211_ptr); | 932 | wdev_unlock(dev->ieee80211_ptr); |
933 | mutex_unlock(&rdev->sched_scan_mtx); | ||
930 | mutex_unlock(&rdev->devlist_mtx); | 934 | mutex_unlock(&rdev->devlist_mtx); |
931 | 935 | ||
932 | return err; | 936 | return err; |
diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b7a531380e19..7586de77a2f8 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h | |||
@@ -27,7 +27,8 @@ | |||
27 | #define WIPHY_PR_ARG __entry->wiphy_name | 27 | #define WIPHY_PR_ARG __entry->wiphy_name |
28 | 28 | ||
29 | #define WDEV_ENTRY __field(u32, id) | 29 | #define WDEV_ENTRY __field(u32, id) |
30 | #define WDEV_ASSIGN (__entry->id) = (wdev ? wdev->identifier : 0) | 30 | #define WDEV_ASSIGN (__entry->id) = (!IS_ERR_OR_NULL(wdev) \ |
31 | ? wdev->identifier : 0) | ||
31 | #define WDEV_PR_FMT "wdev(%u)" | 32 | #define WDEV_PR_FMT "wdev(%u)" |
32 | #define WDEV_PR_ARG (__entry->id) | 33 | #define WDEV_PR_ARG (__entry->id) |
33 | 34 | ||
@@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl, | |||
1778 | ), | 1779 | ), |
1779 | TP_fast_assign( | 1780 | TP_fast_assign( |
1780 | WIPHY_ASSIGN; | 1781 | WIPHY_ASSIGN; |
1781 | WIPHY_ASSIGN; | 1782 | NETDEV_ASSIGN; |
1782 | __entry->acl_policy = params->acl_policy; | 1783 | __entry->acl_policy = params->acl_policy; |
1783 | ), | 1784 | ), |
1784 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", | 1785 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", |
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index fb9622f6d99c..e79cb5c0655a 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c | |||
@@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
89 | 89 | ||
90 | cfg80211_lock_rdev(rdev); | 90 | cfg80211_lock_rdev(rdev); |
91 | mutex_lock(&rdev->devlist_mtx); | 91 | mutex_lock(&rdev->devlist_mtx); |
92 | mutex_lock(&rdev->sched_scan_mtx); | ||
92 | wdev_lock(wdev); | 93 | wdev_lock(wdev); |
93 | 94 | ||
94 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 95 | if (wdev->sme_state != CFG80211_SME_IDLE) { |
@@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
135 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 136 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
136 | out: | 137 | out: |
137 | wdev_unlock(wdev); | 138 | wdev_unlock(wdev); |
139 | mutex_unlock(&rdev->sched_scan_mtx); | ||
138 | mutex_unlock(&rdev->devlist_mtx); | 140 | mutex_unlock(&rdev->devlist_mtx); |
139 | cfg80211_unlock_rdev(rdev); | 141 | cfg80211_unlock_rdev(rdev); |
140 | return err; | 142 | return err; |
@@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, | |||
190 | 192 | ||
191 | cfg80211_lock_rdev(rdev); | 193 | cfg80211_lock_rdev(rdev); |
192 | mutex_lock(&rdev->devlist_mtx); | 194 | mutex_lock(&rdev->devlist_mtx); |
195 | mutex_lock(&rdev->sched_scan_mtx); | ||
193 | wdev_lock(wdev); | 196 | wdev_lock(wdev); |
194 | 197 | ||
195 | err = 0; | 198 | err = 0; |
@@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, | |||
223 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 226 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
224 | out: | 227 | out: |
225 | wdev_unlock(wdev); | 228 | wdev_unlock(wdev); |
229 | mutex_unlock(&rdev->sched_scan_mtx); | ||
226 | mutex_unlock(&rdev->devlist_mtx); | 230 | mutex_unlock(&rdev->devlist_mtx); |
227 | cfg80211_unlock_rdev(rdev); | 231 | cfg80211_unlock_rdev(rdev); |
228 | return err; | 232 | return err; |
@@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, | |||
285 | 289 | ||
286 | cfg80211_lock_rdev(rdev); | 290 | cfg80211_lock_rdev(rdev); |
287 | mutex_lock(&rdev->devlist_mtx); | 291 | mutex_lock(&rdev->devlist_mtx); |
292 | mutex_lock(&rdev->sched_scan_mtx); | ||
288 | wdev_lock(wdev); | 293 | wdev_lock(wdev); |
289 | 294 | ||
290 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 295 | if (wdev->sme_state != CFG80211_SME_IDLE) { |
@@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, | |||
313 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 318 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
314 | out: | 319 | out: |
315 | wdev_unlock(wdev); | 320 | wdev_unlock(wdev); |
321 | mutex_unlock(&rdev->sched_scan_mtx); | ||
316 | mutex_unlock(&rdev->devlist_mtx); | 322 | mutex_unlock(&rdev->devlist_mtx); |
317 | cfg80211_unlock_rdev(rdev); | 323 | cfg80211_unlock_rdev(rdev); |
318 | return err; | 324 | return err; |
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 35754cc8a9e5..8dafe6d3c6e4 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) | |||
334 | x->xflags &= ~XFRM_TIME_DEFER; | 334 | x->xflags &= ~XFRM_TIME_DEFER; |
335 | } | 335 | } |
336 | 336 | ||
337 | static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) | ||
338 | { | ||
339 | u32 seq_diff, oseq_diff; | ||
340 | struct km_event c; | ||
341 | struct xfrm_replay_state_esn *replay_esn = x->replay_esn; | ||
342 | struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; | ||
343 | |||
344 | /* we send notify messages in case | ||
345 | * 1. we updated on of the sequence numbers, and the seqno difference | ||
346 | * is at least x->replay_maxdiff, in this case we also update the | ||
347 | * timeout of our timer function | ||
348 | * 2. if x->replay_maxage has elapsed since last update, | ||
349 | * and there were changes | ||
350 | * | ||
351 | * The state structure must be locked! | ||
352 | */ | ||
353 | |||
354 | switch (event) { | ||
355 | case XFRM_REPLAY_UPDATE: | ||
356 | if (!x->replay_maxdiff) | ||
357 | break; | ||
358 | |||
359 | if (replay_esn->seq_hi == preplay_esn->seq_hi) | ||
360 | seq_diff = replay_esn->seq - preplay_esn->seq; | ||
361 | else | ||
362 | seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; | ||
363 | |||
364 | if (replay_esn->oseq_hi == preplay_esn->oseq_hi) | ||
365 | oseq_diff = replay_esn->oseq - preplay_esn->oseq; | ||
366 | else | ||
367 | oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; | ||
368 | |||
369 | if (seq_diff < x->replay_maxdiff && | ||
370 | oseq_diff < x->replay_maxdiff) { | ||
371 | |||
372 | if (x->xflags & XFRM_TIME_DEFER) | ||
373 | event = XFRM_REPLAY_TIMEOUT; | ||
374 | else | ||
375 | return; | ||
376 | } | ||
377 | |||
378 | break; | ||
379 | |||
380 | case XFRM_REPLAY_TIMEOUT: | ||
381 | if (memcmp(x->replay_esn, x->preplay_esn, | ||
382 | xfrm_replay_state_esn_len(replay_esn)) == 0) { | ||
383 | x->xflags |= XFRM_TIME_DEFER; | ||
384 | return; | ||
385 | } | ||
386 | |||
387 | break; | ||
388 | } | ||
389 | |||
390 | memcpy(x->preplay_esn, x->replay_esn, | ||
391 | xfrm_replay_state_esn_len(replay_esn)); | ||
392 | c.event = XFRM_MSG_NEWAE; | ||
393 | c.data.aevent = event; | ||
394 | km_state_notify(x, &c); | ||
395 | |||
396 | if (x->replay_maxage && | ||
397 | !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) | ||
398 | x->xflags &= ~XFRM_TIME_DEFER; | ||
399 | } | ||
400 | |||
337 | static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) | 401 | static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) |
338 | { | 402 | { |
339 | int err = 0; | 403 | int err = 0; |
@@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = { | |||
510 | .advance = xfrm_replay_advance_esn, | 574 | .advance = xfrm_replay_advance_esn, |
511 | .check = xfrm_replay_check_esn, | 575 | .check = xfrm_replay_check_esn, |
512 | .recheck = xfrm_replay_recheck_esn, | 576 | .recheck = xfrm_replay_recheck_esn, |
513 | .notify = xfrm_replay_notify_bmp, | 577 | .notify = xfrm_replay_notify_esn, |
514 | .overflow = xfrm_replay_overflow_esn, | 578 | .overflow = xfrm_replay_overflow_esn, |
515 | }; | 579 | }; |
516 | 580 | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index b28cc384a5bc..4de4bc48493b 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -3016,6 +3016,7 @@ sub process { | |||
3016 | $dstat !~ /^'X'$/ && # character constants | 3016 | $dstat !~ /^'X'$/ && # character constants |
3017 | $dstat !~ /$exceptions/ && | 3017 | $dstat !~ /$exceptions/ && |
3018 | $dstat !~ /^\.$Ident\s*=/ && # .foo = | 3018 | $dstat !~ /^\.$Ident\s*=/ && # .foo = |
3019 | $dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ && # stringification #foo | ||
3019 | $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...) | 3020 | $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...) |
3020 | $dstat !~ /^for\s*$Constant$/ && # for (...) | 3021 | $dstat !~ /^for\s*$Constant$/ && # for (...) |
3021 | $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ && # for (...) bar() | 3022 | $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ && # for (...) bar() |
diff --git a/security/capability.c b/security/capability.c index 579775088967..6783c3e6c88e 100644 --- a/security/capability.c +++ b/security/capability.c | |||
@@ -737,6 +737,11 @@ static int cap_tun_dev_open(void *security) | |||
737 | { | 737 | { |
738 | return 0; | 738 | return 0; |
739 | } | 739 | } |
740 | |||
741 | static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk) | ||
742 | { | ||
743 | } | ||
744 | |||
740 | #endif /* CONFIG_SECURITY_NETWORK */ | 745 | #endif /* CONFIG_SECURITY_NETWORK */ |
741 | 746 | ||
742 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 747 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
@@ -1071,6 +1076,7 @@ void __init security_fixup_ops(struct security_operations *ops) | |||
1071 | set_to_cap_if_null(ops, tun_dev_open); | 1076 | set_to_cap_if_null(ops, tun_dev_open); |
1072 | set_to_cap_if_null(ops, tun_dev_attach_queue); | 1077 | set_to_cap_if_null(ops, tun_dev_attach_queue); |
1073 | set_to_cap_if_null(ops, tun_dev_attach); | 1078 | set_to_cap_if_null(ops, tun_dev_attach); |
1079 | set_to_cap_if_null(ops, skb_owned_by); | ||
1074 | #endif /* CONFIG_SECURITY_NETWORK */ | 1080 | #endif /* CONFIG_SECURITY_NETWORK */ |
1075 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1081 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
1076 | set_to_cap_if_null(ops, xfrm_policy_alloc_security); | 1082 | set_to_cap_if_null(ops, xfrm_policy_alloc_security); |
diff --git a/security/security.c b/security/security.c index 7b88c6aeaed4..03f248b84e9f 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -1290,6 +1290,11 @@ int security_tun_dev_open(void *security) | |||
1290 | } | 1290 | } |
1291 | EXPORT_SYMBOL(security_tun_dev_open); | 1291 | EXPORT_SYMBOL(security_tun_dev_open); |
1292 | 1292 | ||
1293 | void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) | ||
1294 | { | ||
1295 | security_ops->skb_owned_by(skb, sk); | ||
1296 | } | ||
1297 | |||
1293 | #endif /* CONFIG_SECURITY_NETWORK */ | 1298 | #endif /* CONFIG_SECURITY_NETWORK */ |
1294 | 1299 | ||
1295 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1300 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 2fa28c88900c..7171a957b933 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/tty.h> | 51 | #include <linux/tty.h> |
52 | #include <net/icmp.h> | 52 | #include <net/icmp.h> |
53 | #include <net/ip.h> /* for local_port_range[] */ | 53 | #include <net/ip.h> /* for local_port_range[] */ |
54 | #include <net/sock.h> | ||
54 | #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ | 55 | #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ |
55 | #include <net/net_namespace.h> | 56 | #include <net/net_namespace.h> |
56 | #include <net/netlabel.h> | 57 | #include <net/netlabel.h> |
@@ -4363,6 +4364,11 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb) | |||
4363 | selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); | 4364 | selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); |
4364 | } | 4365 | } |
4365 | 4366 | ||
4367 | static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk) | ||
4368 | { | ||
4369 | skb_set_owner_w(skb, sk); | ||
4370 | } | ||
4371 | |||
4366 | static int selinux_secmark_relabel_packet(u32 sid) | 4372 | static int selinux_secmark_relabel_packet(u32 sid) |
4367 | { | 4373 | { |
4368 | const struct task_security_struct *__tsec; | 4374 | const struct task_security_struct *__tsec; |
@@ -5664,6 +5670,7 @@ static struct security_operations selinux_ops = { | |||
5664 | .tun_dev_attach_queue = selinux_tun_dev_attach_queue, | 5670 | .tun_dev_attach_queue = selinux_tun_dev_attach_queue, |
5665 | .tun_dev_attach = selinux_tun_dev_attach, | 5671 | .tun_dev_attach = selinux_tun_dev_attach, |
5666 | .tun_dev_open = selinux_tun_dev_open, | 5672 | .tun_dev_open = selinux_tun_dev_open, |
5673 | .skb_owned_by = selinux_skb_owned_by, | ||
5667 | 5674 | ||
5668 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 5675 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
5669 | .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, | 5676 | .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 71ae86ca64ac..eb560fa32321 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -3222,18 +3222,10 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); | |||
3222 | int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, | 3222 | int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, |
3223 | struct vm_area_struct *area) | 3223 | struct vm_area_struct *area) |
3224 | { | 3224 | { |
3225 | long size; | 3225 | struct snd_pcm_runtime *runtime = substream->runtime;; |
3226 | unsigned long offset; | ||
3227 | 3226 | ||
3228 | area->vm_page_prot = pgprot_noncached(area->vm_page_prot); | 3227 | area->vm_page_prot = pgprot_noncached(area->vm_page_prot); |
3229 | area->vm_flags |= VM_IO; | 3228 | return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes); |
3230 | size = area->vm_end - area->vm_start; | ||
3231 | offset = area->vm_pgoff << PAGE_SHIFT; | ||
3232 | if (io_remap_pfn_range(area, area->vm_start, | ||
3233 | (substream->runtime->dma_addr + offset) >> PAGE_SHIFT, | ||
3234 | size, area->vm_page_prot)) | ||
3235 | return -EAGAIN; | ||
3236 | return 0; | ||
3237 | } | 3229 | } |
3238 | 3230 | ||
3239 | EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); | 3231 | EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index ecdf30eb5879..4aba7646dd9c 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -173,7 +173,7 @@ const char *snd_hda_get_jack_type(u32 cfg) | |||
173 | "Line Out", "Speaker", "HP Out", "CD", | 173 | "Line Out", "Speaker", "HP Out", "CD", |
174 | "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand", | 174 | "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand", |
175 | "Line In", "Aux", "Mic", "Telephony", | 175 | "Line In", "Aux", "Mic", "Telephony", |
176 | "SPDIF In", "Digitial In", "Reserved", "Other" | 176 | "SPDIF In", "Digital In", "Reserved", "Other" |
177 | }; | 177 | }; |
178 | 178 | ||
179 | return jack_types[(cfg & AC_DEFCFG_DEVICE) | 179 | return jack_types[(cfg & AC_DEFCFG_DEVICE) |
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index 7dd846380a50..d0d7ac1e99d2 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c | |||
@@ -320,7 +320,7 @@ int snd_hdmi_get_eld(struct hda_codec *codec, hda_nid_t nid, | |||
320 | unsigned char *buf, int *eld_size) | 320 | unsigned char *buf, int *eld_size) |
321 | { | 321 | { |
322 | int i; | 322 | int i; |
323 | int ret; | 323 | int ret = 0; |
324 | int size; | 324 | int size; |
325 | 325 | ||
326 | /* | 326 | /* |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 43c2ea539561..2dbe767be16b 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -740,7 +740,7 @@ EXPORT_SYMBOL_HDA(snd_hda_activate_path); | |||
740 | static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) | 740 | static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) |
741 | { | 741 | { |
742 | struct hda_gen_spec *spec = codec->spec; | 742 | struct hda_gen_spec *spec = codec->spec; |
743 | bool changed; | 743 | bool changed = false; |
744 | int i; | 744 | int i; |
745 | 745 | ||
746 | if (!spec->power_down_unused || path->active) | 746 | if (!spec->power_down_unused || path->active) |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 418bfc0eb0a3..bcd40ee488e3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -134,8 +134,8 @@ MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " | |||
134 | * this may give more power-saving, but will take longer time to | 134 | * this may give more power-saving, but will take longer time to |
135 | * wake up. | 135 | * wake up. |
136 | */ | 136 | */ |
137 | static int power_save_controller = -1; | 137 | static bool power_save_controller = 1; |
138 | module_param(power_save_controller, bint, 0644); | 138 | module_param(power_save_controller, bool, 0644); |
139 | MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode."); | 139 | MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode."); |
140 | #endif /* CONFIG_PM */ | 140 | #endif /* CONFIG_PM */ |
141 | 141 | ||
@@ -2931,8 +2931,6 @@ static int azx_runtime_idle(struct device *dev) | |||
2931 | struct snd_card *card = dev_get_drvdata(dev); | 2931 | struct snd_card *card = dev_get_drvdata(dev); |
2932 | struct azx *chip = card->private_data; | 2932 | struct azx *chip = card->private_data; |
2933 | 2933 | ||
2934 | if (power_save_controller > 0) | ||
2935 | return 0; | ||
2936 | if (!power_save_controller || | 2934 | if (!power_save_controller || |
2937 | !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) | 2935 | !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) |
2938 | return -EBUSY; | 2936 | return -EBUSY; |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 78e1827d0a95..de8ac5c07fd0 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -1196,7 +1196,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) | |||
1196 | 1196 | ||
1197 | _snd_printd(SND_PR_VERBOSE, | 1197 | _snd_printd(SND_PR_VERBOSE, |
1198 | "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", | 1198 | "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", |
1199 | codec->addr, pin_nid, eld->monitor_present, eld->eld_valid); | 1199 | codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid); |
1200 | 1200 | ||
1201 | if (eld->eld_valid) { | 1201 | if (eld->eld_valid) { |
1202 | if (snd_hdmi_get_eld(codec, pin_nid, eld->eld_buffer, | 1202 | if (snd_hdmi_get_eld(codec, pin_nid, eld->eld_buffer, |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 563c24df4d6f..f15c36bde540 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -3440,7 +3440,8 @@ static int alc662_parse_auto_config(struct hda_codec *codec) | |||
3440 | const hda_nid_t *ssids; | 3440 | const hda_nid_t *ssids; |
3441 | 3441 | ||
3442 | if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 || | 3442 | if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 || |
3443 | codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670) | 3443 | codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670 || |
3444 | codec->vendor_id == 0x10ec0671) | ||
3444 | ssids = alc663_ssids; | 3445 | ssids = alc663_ssids; |
3445 | else | 3446 | else |
3446 | ssids = alc662_ssids; | 3447 | ssids = alc662_ssids; |
@@ -3894,6 +3895,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { | |||
3894 | { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, | 3895 | { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, |
3895 | { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, | 3896 | { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, |
3896 | { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, | 3897 | { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, |
3898 | { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 }, | ||
3897 | { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, | 3899 | { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, |
3898 | { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, | 3900 | { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, |
3899 | { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, | 3901 | { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, |
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index fc176044994d..fc176044994d 100755..100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c | |||
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h index 7e103f249053..7e103f249053 100755..100644 --- a/sound/soc/codecs/max98090.h +++ b/sound/soc/codecs/max98090.h | |||
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c index f2d61a187830..566ea3256e2d 100644 --- a/sound/soc/codecs/si476x.c +++ b/sound/soc/codecs/si476x.c | |||
@@ -159,6 +159,7 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream, | |||
159 | switch (params_format(params)) { | 159 | switch (params_format(params)) { |
160 | case SNDRV_PCM_FORMAT_S8: | 160 | case SNDRV_PCM_FORMAT_S8: |
161 | width = SI476X_PCM_FORMAT_S8; | 161 | width = SI476X_PCM_FORMAT_S8; |
162 | break; | ||
162 | case SNDRV_PCM_FORMAT_S16_LE: | 163 | case SNDRV_PCM_FORMAT_S16_LE: |
163 | width = SI476X_PCM_FORMAT_S16_LE; | 164 | width = SI476X_PCM_FORMAT_S16_LE; |
164 | break; | 165 | break; |
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index b82bbf584146..34d0201d6a78 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c | |||
@@ -584,7 +584,7 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w, | |||
584 | struct snd_kcontrol *kcontrol, int event) | 584 | struct snd_kcontrol *kcontrol, int event) |
585 | { | 585 | { |
586 | struct snd_soc_codec *codec = w->codec; | 586 | struct snd_soc_codec *codec = w->codec; |
587 | struct arizona *arizona = dev_get_drvdata(codec->dev); | 587 | struct arizona *arizona = dev_get_drvdata(codec->dev->parent); |
588 | struct regmap *regmap = codec->control_data; | 588 | struct regmap *regmap = codec->control_data; |
589 | const struct reg_default *patch = NULL; | 589 | const struct reg_default *patch = NULL; |
590 | int i, patch_size; | 590 | int i, patch_size; |
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 134e41c870b9..f8a31ad0b203 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c | |||
@@ -1083,6 +1083,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = { | |||
1083 | { "ROP", NULL, "Right Speaker PGA" }, | 1083 | { "ROP", NULL, "Right Speaker PGA" }, |
1084 | { "RON", NULL, "Right Speaker PGA" }, | 1084 | { "RON", NULL, "Right Speaker PGA" }, |
1085 | 1085 | ||
1086 | { "Charge Pump", NULL, "CLK_DSP" }, | ||
1087 | |||
1086 | { "Left Headphone Output PGA", NULL, "Charge Pump" }, | 1088 | { "Left Headphone Output PGA", NULL, "Charge Pump" }, |
1087 | { "Right Headphone Output PGA", NULL, "Charge Pump" }, | 1089 | { "Right Headphone Output PGA", NULL, "Charge Pump" }, |
1088 | { "Left Line Output PGA", NULL, "Charge Pump" }, | 1090 | { "Left Line Output PGA", NULL, "Charge Pump" }, |
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index f3f7e75f8628..9af1bddc4c62 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c | |||
@@ -828,7 +828,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) | |||
828 | &buf_list); | 828 | &buf_list); |
829 | if (!buf) { | 829 | if (!buf) { |
830 | adsp_err(dsp, "Out of memory\n"); | 830 | adsp_err(dsp, "Out of memory\n"); |
831 | return -ENOMEM; | 831 | ret = -ENOMEM; |
832 | goto out_fw; | ||
832 | } | 833 | } |
833 | 834 | ||
834 | adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", | 835 | adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", |
@@ -865,7 +866,7 @@ out_fw: | |||
865 | wm_adsp_buf_free(&buf_list); | 866 | wm_adsp_buf_free(&buf_list); |
866 | out: | 867 | out: |
867 | kfree(file); | 868 | kfree(file); |
868 | return 0; | 869 | return ret; |
869 | } | 870 | } |
870 | 871 | ||
871 | int wm_adsp1_init(struct wm_adsp *adsp) | 872 | int wm_adsp1_init(struct wm_adsp *adsp) |
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 55464a5b0706..810c7eeb7b03 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c | |||
@@ -496,6 +496,8 @@ static void imx_ssi_ac97_reset(struct snd_ac97 *ac97) | |||
496 | 496 | ||
497 | if (imx_ssi->ac97_reset) | 497 | if (imx_ssi->ac97_reset) |
498 | imx_ssi->ac97_reset(ac97); | 498 | imx_ssi->ac97_reset(ac97); |
499 | /* First read sometimes fails, do a dummy read */ | ||
500 | imx_ssi_ac97_read(ac97, 0); | ||
499 | } | 501 | } |
500 | 502 | ||
501 | static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) | 503 | static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) |
@@ -504,6 +506,9 @@ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) | |||
504 | 506 | ||
505 | if (imx_ssi->ac97_warm_reset) | 507 | if (imx_ssi->ac97_warm_reset) |
506 | imx_ssi->ac97_warm_reset(ac97); | 508 | imx_ssi->ac97_warm_reset(ac97); |
509 | |||
510 | /* First read sometimes fails, do a dummy read */ | ||
511 | imx_ssi_ac97_read(ac97, 0); | ||
507 | } | 512 | } |
508 | 513 | ||
509 | struct snd_ac97_bus_ops soc_ac97_ops = { | 514 | struct snd_ac97_bus_ops soc_ac97_ops = { |
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c index 8e52c1485df3..eb4373840bb6 100644 --- a/sound/soc/fsl/pcm030-audio-fabric.c +++ b/sound/soc/fsl/pcm030-audio-fabric.c | |||
@@ -51,7 +51,7 @@ static struct snd_soc_card pcm030_card = { | |||
51 | .num_links = ARRAY_SIZE(pcm030_fabric_dai), | 51 | .num_links = ARRAY_SIZE(pcm030_fabric_dai), |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static int __init pcm030_fabric_probe(struct platform_device *op) | 54 | static int pcm030_fabric_probe(struct platform_device *op) |
55 | { | 55 | { |
56 | struct device_node *np = op->dev.of_node; | 56 | struct device_node *np = op->dev.of_node; |
57 | struct device_node *platform_np; | 57 | struct device_node *platform_np; |
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index d7231e336a7c..6bbeb0bf1a73 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c | |||
@@ -972,6 +972,7 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = { | |||
972 | static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) | 972 | static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) |
973 | { | 973 | { |
974 | struct i2s_dai *i2s; | 974 | struct i2s_dai *i2s; |
975 | int ret; | ||
975 | 976 | ||
976 | i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL); | 977 | i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL); |
977 | if (i2s == NULL) | 978 | if (i2s == NULL) |
@@ -996,15 +997,17 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) | |||
996 | i2s->i2s_dai_drv.capture.channels_max = 2; | 997 | i2s->i2s_dai_drv.capture.channels_max = 2; |
997 | i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES; | 998 | i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES; |
998 | i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS; | 999 | i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS; |
1000 | dev_set_drvdata(&i2s->pdev->dev, i2s); | ||
999 | } else { /* Create a new platform_device for Secondary */ | 1001 | } else { /* Create a new platform_device for Secondary */ |
1000 | i2s->pdev = platform_device_register_resndata(NULL, | 1002 | i2s->pdev = platform_device_alloc("samsung-i2s-sec", -1); |
1001 | "samsung-i2s-sec", -1, NULL, 0, NULL, 0); | ||
1002 | if (IS_ERR(i2s->pdev)) | 1003 | if (IS_ERR(i2s->pdev)) |
1003 | return NULL; | 1004 | return NULL; |
1004 | } | ||
1005 | 1005 | ||
1006 | /* Pre-assign snd_soc_dai_set_drvdata */ | 1006 | platform_set_drvdata(i2s->pdev, i2s); |
1007 | dev_set_drvdata(&i2s->pdev->dev, i2s); | 1007 | ret = platform_device_add(i2s->pdev); |
1008 | if (ret < 0) | ||
1009 | return NULL; | ||
1010 | } | ||
1008 | 1011 | ||
1009 | return i2s; | 1012 | return i2s; |
1010 | } | 1013 | } |
@@ -1107,6 +1110,10 @@ static int samsung_i2s_probe(struct platform_device *pdev) | |||
1107 | 1110 | ||
1108 | if (samsung_dai_type == TYPE_SEC) { | 1111 | if (samsung_dai_type == TYPE_SEC) { |
1109 | sec_dai = dev_get_drvdata(&pdev->dev); | 1112 | sec_dai = dev_get_drvdata(&pdev->dev); |
1113 | if (!sec_dai) { | ||
1114 | dev_err(&pdev->dev, "Unable to get drvdata\n"); | ||
1115 | return -EFAULT; | ||
1116 | } | ||
1110 | snd_soc_register_dai(&sec_dai->pdev->dev, | 1117 | snd_soc_register_dai(&sec_dai->pdev->dev, |
1111 | &sec_dai->i2s_dai_drv); | 1118 | &sec_dai->i2s_dai_drv); |
1112 | asoc_dma_platform_register(&pdev->dev); | 1119 | asoc_dma_platform_register(&pdev->dev); |
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c index 19eff8fc4fdd..1a8b03e4b41b 100644 --- a/sound/soc/sh/dma-sh7760.c +++ b/sound/soc/sh/dma-sh7760.c | |||
@@ -342,8 +342,8 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd) | |||
342 | return 0; | 342 | return 0; |
343 | } | 343 | } |
344 | 344 | ||
345 | static struct snd_soc_platform sh7760_soc_platform = { | 345 | static struct snd_soc_platform_driver sh7760_soc_platform = { |
346 | .pcm_ops = &camelot_pcm_ops, | 346 | .ops = &camelot_pcm_ops, |
347 | .pcm_new = camelot_pcm_new, | 347 | .pcm_new = camelot_pcm_new, |
348 | .pcm_free = camelot_pcm_free, | 348 | .pcm_free = camelot_pcm_free, |
349 | }; | 349 | }; |
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index b5b3db71e253..ed0bfb0ddb96 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c | |||
@@ -211,19 +211,27 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream, | |||
211 | if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) { | 211 | if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) { |
212 | ret = platform->driver->compr_ops->set_params(cstream, params); | 212 | ret = platform->driver->compr_ops->set_params(cstream, params); |
213 | if (ret < 0) | 213 | if (ret < 0) |
214 | goto out; | 214 | goto err; |
215 | } | 215 | } |
216 | 216 | ||
217 | if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) { | 217 | if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) { |
218 | ret = rtd->dai_link->compr_ops->set_params(cstream); | 218 | ret = rtd->dai_link->compr_ops->set_params(cstream); |
219 | if (ret < 0) | 219 | if (ret < 0) |
220 | goto out; | 220 | goto err; |
221 | } | 221 | } |
222 | 222 | ||
223 | snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK, | 223 | snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK, |
224 | SND_SOC_DAPM_STREAM_START); | 224 | SND_SOC_DAPM_STREAM_START); |
225 | 225 | ||
226 | out: | 226 | /* cancel any delayed stream shutdown that is pending */ |
227 | rtd->pop_wait = 0; | ||
228 | mutex_unlock(&rtd->pcm_mutex); | ||
229 | |||
230 | cancel_delayed_work_sync(&rtd->delayed_work); | ||
231 | |||
232 | return ret; | ||
233 | |||
234 | err: | ||
227 | mutex_unlock(&rtd->pcm_mutex); | 235 | mutex_unlock(&rtd->pcm_mutex); |
228 | return ret; | 236 | return ret; |
229 | } | 237 | } |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index b7e84a7cd9ee..ff4b45a5d796 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -2963,7 +2963,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, | |||
2963 | val = val << shift; | 2963 | val = val << shift; |
2964 | 2964 | ||
2965 | ret = snd_soc_update_bits_locked(codec, reg, val_mask, val); | 2965 | ret = snd_soc_update_bits_locked(codec, reg, val_mask, val); |
2966 | if (ret != 0) | 2966 | if (ret < 0) |
2967 | return ret; | 2967 | return ret; |
2968 | 2968 | ||
2969 | if (snd_soc_volsw_is_stereo(mc)) { | 2969 | if (snd_soc_volsw_is_stereo(mc)) { |
@@ -3140,7 +3140,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, | |||
3140 | if (params->mask) { | 3140 | if (params->mask) { |
3141 | ret = regmap_read(codec->control_data, params->base, &val); | 3141 | ret = regmap_read(codec->control_data, params->base, &val); |
3142 | if (ret != 0) | 3142 | if (ret != 0) |
3143 | return ret; | 3143 | goto out; |
3144 | 3144 | ||
3145 | val &= params->mask; | 3145 | val &= params->mask; |
3146 | 3146 | ||
@@ -3158,13 +3158,15 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, | |||
3158 | ((u32 *)data)[0] |= cpu_to_be32(val); | 3158 | ((u32 *)data)[0] |= cpu_to_be32(val); |
3159 | break; | 3159 | break; |
3160 | default: | 3160 | default: |
3161 | return -EINVAL; | 3161 | ret = -EINVAL; |
3162 | goto out; | ||
3162 | } | 3163 | } |
3163 | } | 3164 | } |
3164 | 3165 | ||
3165 | ret = regmap_raw_write(codec->control_data, params->base, | 3166 | ret = regmap_raw_write(codec->control_data, params->base, |
3166 | data, len); | 3167 | data, len); |
3167 | 3168 | ||
3169 | out: | ||
3168 | kfree(data); | 3170 | kfree(data); |
3169 | 3171 | ||
3170 | return ret; | 3172 | return ret; |
@@ -4197,7 +4199,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, | |||
4197 | dev_err(card->dev, | 4199 | dev_err(card->dev, |
4198 | "ASoC: Property '%s' index %d could not be read: %d\n", | 4200 | "ASoC: Property '%s' index %d could not be read: %d\n", |
4199 | propname, 2 * i, ret); | 4201 | propname, 2 * i, ret); |
4200 | kfree(routes); | ||
4201 | return -EINVAL; | 4202 | return -EINVAL; |
4202 | } | 4203 | } |
4203 | ret = of_property_read_string_index(np, propname, | 4204 | ret = of_property_read_string_index(np, propname, |
@@ -4206,7 +4207,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, | |||
4206 | dev_err(card->dev, | 4207 | dev_err(card->dev, |
4207 | "ASoC: Property '%s' index %d could not be read: %d\n", | 4208 | "ASoC: Property '%s' index %d could not be read: %d\n", |
4208 | propname, (2 * i) + 1, ret); | 4209 | propname, (2 * i) + 1, ret); |
4209 | kfree(routes); | ||
4210 | return -EINVAL; | 4210 | return -EINVAL; |
4211 | } | 4211 | } |
4212 | } | 4212 | } |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 1d6a9b3ceb27..d6d9ba2e6916 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -831,6 +831,9 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, | |||
831 | if (path->weak) | 831 | if (path->weak) |
832 | continue; | 832 | continue; |
833 | 833 | ||
834 | if (path->walking) | ||
835 | return 1; | ||
836 | |||
834 | if (path->walked) | 837 | if (path->walked) |
835 | continue; | 838 | continue; |
836 | 839 | ||
@@ -838,6 +841,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, | |||
838 | 841 | ||
839 | if (path->sink && path->connect) { | 842 | if (path->sink && path->connect) { |
840 | path->walked = 1; | 843 | path->walked = 1; |
844 | path->walking = 1; | ||
841 | 845 | ||
842 | /* do we need to add this widget to the list ? */ | 846 | /* do we need to add this widget to the list ? */ |
843 | if (list) { | 847 | if (list) { |
@@ -847,11 +851,14 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, | |||
847 | dev_err(widget->dapm->dev, | 851 | dev_err(widget->dapm->dev, |
848 | "ASoC: could not add widget %s\n", | 852 | "ASoC: could not add widget %s\n", |
849 | widget->name); | 853 | widget->name); |
854 | path->walking = 0; | ||
850 | return con; | 855 | return con; |
851 | } | 856 | } |
852 | } | 857 | } |
853 | 858 | ||
854 | con += is_connected_output_ep(path->sink, list); | 859 | con += is_connected_output_ep(path->sink, list); |
860 | |||
861 | path->walking = 0; | ||
855 | } | 862 | } |
856 | } | 863 | } |
857 | 864 | ||
@@ -931,6 +938,9 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
931 | if (path->weak) | 938 | if (path->weak) |
932 | continue; | 939 | continue; |
933 | 940 | ||
941 | if (path->walking) | ||
942 | return 1; | ||
943 | |||
934 | if (path->walked) | 944 | if (path->walked) |
935 | continue; | 945 | continue; |
936 | 946 | ||
@@ -938,6 +948,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
938 | 948 | ||
939 | if (path->source && path->connect) { | 949 | if (path->source && path->connect) { |
940 | path->walked = 1; | 950 | path->walked = 1; |
951 | path->walking = 1; | ||
941 | 952 | ||
942 | /* do we need to add this widget to the list ? */ | 953 | /* do we need to add this widget to the list ? */ |
943 | if (list) { | 954 | if (list) { |
@@ -947,11 +958,14 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
947 | dev_err(widget->dapm->dev, | 958 | dev_err(widget->dapm->dev, |
948 | "ASoC: could not add widget %s\n", | 959 | "ASoC: could not add widget %s\n", |
949 | widget->name); | 960 | widget->name); |
961 | path->walking = 0; | ||
950 | return con; | 962 | return con; |
951 | } | 963 | } |
952 | } | 964 | } |
953 | 965 | ||
954 | con += is_connected_input_ep(path->source, list); | 966 | con += is_connected_input_ep(path->source, list); |
967 | |||
968 | path->walking = 0; | ||
955 | } | 969 | } |
956 | } | 970 | } |
957 | 971 | ||
diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c index 9b76cc5a1148..5e7aebe1e664 100644 --- a/sound/soc/spear/spear_pcm.c +++ b/sound/soc/spear/spear_pcm.c | |||
@@ -149,9 +149,9 @@ static void spear_pcm_free(struct snd_pcm *pcm) | |||
149 | 149 | ||
150 | static u64 spear_pcm_dmamask = DMA_BIT_MASK(32); | 150 | static u64 spear_pcm_dmamask = DMA_BIT_MASK(32); |
151 | 151 | ||
152 | static int spear_pcm_new(struct snd_card *card, | 152 | static int spear_pcm_new(struct snd_soc_pcm_runtime *rtd) |
153 | struct snd_soc_dai *dai, struct snd_pcm *pcm) | ||
154 | { | 153 | { |
154 | struct snd_card *card = rtd->card->snd_card; | ||
155 | int ret; | 155 | int ret; |
156 | 156 | ||
157 | if (!card->dev->dma_mask) | 157 | if (!card->dev->dma_mask) |
@@ -159,16 +159,16 @@ static int spear_pcm_new(struct snd_card *card, | |||
159 | if (!card->dev->coherent_dma_mask) | 159 | if (!card->dev->coherent_dma_mask) |
160 | card->dev->coherent_dma_mask = DMA_BIT_MASK(32); | 160 | card->dev->coherent_dma_mask = DMA_BIT_MASK(32); |
161 | 161 | ||
162 | if (dai->driver->playback.channels_min) { | 162 | if (rtd->cpu_dai->driver->playback.channels_min) { |
163 | ret = spear_pcm_preallocate_dma_buffer(pcm, | 163 | ret = spear_pcm_preallocate_dma_buffer(rtd->pcm, |
164 | SNDRV_PCM_STREAM_PLAYBACK, | 164 | SNDRV_PCM_STREAM_PLAYBACK, |
165 | spear_pcm_hardware.buffer_bytes_max); | 165 | spear_pcm_hardware.buffer_bytes_max); |
166 | if (ret) | 166 | if (ret) |
167 | return ret; | 167 | return ret; |
168 | } | 168 | } |
169 | 169 | ||
170 | if (dai->driver->capture.channels_min) { | 170 | if (rtd->cpu_dai->driver->capture.channels_min) { |
171 | ret = spear_pcm_preallocate_dma_buffer(pcm, | 171 | ret = spear_pcm_preallocate_dma_buffer(rtd->pcm, |
172 | SNDRV_PCM_STREAM_CAPTURE, | 172 | SNDRV_PCM_STREAM_CAPTURE, |
173 | spear_pcm_hardware.buffer_bytes_max); | 173 | spear_pcm_hardware.buffer_bytes_max); |
174 | if (ret) | 174 | if (ret) |
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c index c925ab0adeb6..5e2c55c5b255 100644 --- a/sound/soc/tegra/tegra_pcm.c +++ b/sound/soc/tegra/tegra_pcm.c | |||
@@ -43,8 +43,6 @@ | |||
43 | static const struct snd_pcm_hardware tegra_pcm_hardware = { | 43 | static const struct snd_pcm_hardware tegra_pcm_hardware = { |
44 | .info = SNDRV_PCM_INFO_MMAP | | 44 | .info = SNDRV_PCM_INFO_MMAP | |
45 | SNDRV_PCM_INFO_MMAP_VALID | | 45 | SNDRV_PCM_INFO_MMAP_VALID | |
46 | SNDRV_PCM_INFO_PAUSE | | ||
47 | SNDRV_PCM_INFO_RESUME | | ||
48 | SNDRV_PCM_INFO_INTERLEAVED, | 46 | SNDRV_PCM_INFO_INTERLEAVED, |
49 | .formats = SNDRV_PCM_FMTBIT_S16_LE, | 47 | .formats = SNDRV_PCM_FMTBIT_S16_LE, |
50 | .channels_min = 2, | 48 | .channels_min = 2, |
@@ -127,26 +125,6 @@ static int tegra_pcm_hw_free(struct snd_pcm_substream *substream) | |||
127 | return 0; | 125 | return 0; |
128 | } | 126 | } |
129 | 127 | ||
130 | static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | ||
131 | { | ||
132 | switch (cmd) { | ||
133 | case SNDRV_PCM_TRIGGER_START: | ||
134 | case SNDRV_PCM_TRIGGER_RESUME: | ||
135 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | ||
136 | return snd_dmaengine_pcm_trigger(substream, | ||
137 | SNDRV_PCM_TRIGGER_START); | ||
138 | |||
139 | case SNDRV_PCM_TRIGGER_STOP: | ||
140 | case SNDRV_PCM_TRIGGER_SUSPEND: | ||
141 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | ||
142 | return snd_dmaengine_pcm_trigger(substream, | ||
143 | SNDRV_PCM_TRIGGER_STOP); | ||
144 | default: | ||
145 | return -EINVAL; | ||
146 | } | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int tegra_pcm_mmap(struct snd_pcm_substream *substream, | 128 | static int tegra_pcm_mmap(struct snd_pcm_substream *substream, |
151 | struct vm_area_struct *vma) | 129 | struct vm_area_struct *vma) |
152 | { | 130 | { |
@@ -164,7 +142,7 @@ static struct snd_pcm_ops tegra_pcm_ops = { | |||
164 | .ioctl = snd_pcm_lib_ioctl, | 142 | .ioctl = snd_pcm_lib_ioctl, |
165 | .hw_params = tegra_pcm_hw_params, | 143 | .hw_params = tegra_pcm_hw_params, |
166 | .hw_free = tegra_pcm_hw_free, | 144 | .hw_free = tegra_pcm_hw_free, |
167 | .trigger = tegra_pcm_trigger, | 145 | .trigger = snd_dmaengine_pcm_trigger, |
168 | .pointer = snd_dmaengine_pcm_pointer, | 146 | .pointer = snd_dmaengine_pcm_pointer, |
169 | .mmap = tegra_pcm_mmap, | 147 | .mmap = tegra_pcm_mmap, |
170 | }; | 148 | }; |
diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 5e634a2eb282..9e2703a25156 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c | |||
@@ -253,7 +253,7 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface, | |||
253 | { | 253 | { |
254 | struct usb_device *dev = chip->dev; | 254 | struct usb_device *dev = chip->dev; |
255 | unsigned char data[4]; | 255 | unsigned char data[4]; |
256 | int err, crate; | 256 | int err, cur_rate, prev_rate; |
257 | int clock = snd_usb_clock_find_source(chip, fmt->clock); | 257 | int clock = snd_usb_clock_find_source(chip, fmt->clock); |
258 | 258 | ||
259 | if (clock < 0) | 259 | if (clock < 0) |
@@ -266,6 +266,19 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface, | |||
266 | return -ENXIO; | 266 | return -ENXIO; |
267 | } | 267 | } |
268 | 268 | ||
269 | err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, | ||
270 | USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, | ||
271 | UAC2_CS_CONTROL_SAM_FREQ << 8, | ||
272 | snd_usb_ctrl_intf(chip) | (clock << 8), | ||
273 | data, sizeof(data)); | ||
274 | if (err < 0) { | ||
275 | snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", | ||
276 | dev->devnum, iface, fmt->altsetting); | ||
277 | prev_rate = 0; | ||
278 | } else { | ||
279 | prev_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); | ||
280 | } | ||
281 | |||
269 | data[0] = rate; | 282 | data[0] = rate; |
270 | data[1] = rate >> 8; | 283 | data[1] = rate >> 8; |
271 | data[2] = rate >> 16; | 284 | data[2] = rate >> 16; |
@@ -280,19 +293,31 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface, | |||
280 | return err; | 293 | return err; |
281 | } | 294 | } |
282 | 295 | ||
283 | if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, | 296 | err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, |
284 | USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, | 297 | USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, |
285 | UAC2_CS_CONTROL_SAM_FREQ << 8, | 298 | UAC2_CS_CONTROL_SAM_FREQ << 8, |
286 | snd_usb_ctrl_intf(chip) | (clock << 8), | 299 | snd_usb_ctrl_intf(chip) | (clock << 8), |
287 | data, sizeof(data))) < 0) { | 300 | data, sizeof(data)); |
301 | if (err < 0) { | ||
288 | snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", | 302 | snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", |
289 | dev->devnum, iface, fmt->altsetting); | 303 | dev->devnum, iface, fmt->altsetting); |
290 | return err; | 304 | cur_rate = 0; |
305 | } else { | ||
306 | cur_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); | ||
291 | } | 307 | } |
292 | 308 | ||
293 | crate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); | 309 | if (cur_rate != rate) { |
294 | if (crate != rate) | 310 | snd_printd(KERN_WARNING |
295 | snd_printd(KERN_WARNING "current rate %d is different from the runtime rate %d\n", crate, rate); | 311 | "current rate %d is different from the runtime rate %d\n", |
312 | cur_rate, rate); | ||
313 | } | ||
314 | |||
315 | /* Some devices doesn't respond to sample rate changes while the | ||
316 | * interface is active. */ | ||
317 | if (rate != prev_rate) { | ||
318 | usb_set_interface(dev, iface, 0); | ||
319 | usb_set_interface(dev, iface, fmt->altsetting); | ||
320 | } | ||
296 | 321 | ||
297 | return 0; | 322 | return 0; |
298 | } | 323 | } |
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 497d2741d119..ebe91440a068 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c | |||
@@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol, | |||
509 | else | 509 | else |
510 | ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, | 510 | ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, |
511 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, | 511 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, |
512 | 0, cpu_to_le16(wIndex), | 512 | 0, wIndex, |
513 | &tmp, sizeof(tmp), 1000); | 513 | &tmp, sizeof(tmp), 1000); |
514 | up_read(&mixer->chip->shutdown_rwsem); | 514 | up_read(&mixer->chip->shutdown_rwsem); |
515 | 515 | ||
@@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol, | |||
540 | else | 540 | else |
541 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, | 541 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, |
542 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, | 542 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, |
543 | cpu_to_le16(wValue), cpu_to_le16(wIndex), | 543 | wValue, wIndex, |
544 | NULL, 0, 1000); | 544 | NULL, 0, 1000); |
545 | up_read(&mixer->chip->shutdown_rwsem); | 545 | up_read(&mixer->chip->shutdown_rwsem); |
546 | 546 | ||
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 5325a3869bb7..9c5ab22358b1 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev) | |||
486 | { | 486 | { |
487 | int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), | 487 | int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
488 | 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 488 | 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
489 | cpu_to_le16(1), 0, NULL, 0, 1000); | 489 | 1, 0, NULL, 0, 1000); |
490 | 490 | ||
491 | if (ret < 0) | 491 | if (ret < 0) |
492 | return ret; | 492 | return ret; |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 6f3214ed4444..321e066a0753 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
@@ -1421,6 +1421,7 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model) | |||
1421 | case 0x3C: /* HSW */ | 1421 | case 0x3C: /* HSW */ |
1422 | case 0x3F: /* HSW */ | 1422 | case 0x3F: /* HSW */ |
1423 | case 0x45: /* HSW */ | 1423 | case 0x45: /* HSW */ |
1424 | case 0x46: /* HSW */ | ||
1424 | return 1; | 1425 | return 1; |
1425 | case 0x2E: /* Nehalem-EX Xeon - Beckton */ | 1426 | case 0x2E: /* Nehalem-EX Xeon - Beckton */ |
1426 | case 0x2F: /* Westmere-EX Xeon - Eagleton */ | 1427 | case 0x2F: /* Westmere-EX Xeon - Eagleton */ |
@@ -1515,6 +1516,7 @@ void rapl_probe(unsigned int family, unsigned int model) | |||
1515 | case 0x3C: /* HSW */ | 1516 | case 0x3C: /* HSW */ |
1516 | case 0x3F: /* HSW */ | 1517 | case 0x3F: /* HSW */ |
1517 | case 0x45: /* HSW */ | 1518 | case 0x45: /* HSW */ |
1519 | case 0x46: /* HSW */ | ||
1518 | do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX; | 1520 | do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX; |
1519 | break; | 1521 | break; |
1520 | case 0x2D: | 1522 | case 0x2D: |
@@ -1754,6 +1756,7 @@ int is_snb(unsigned int family, unsigned int model) | |||
1754 | case 0x3C: /* HSW */ | 1756 | case 0x3C: /* HSW */ |
1755 | case 0x3F: /* HSW */ | 1757 | case 0x3F: /* HSW */ |
1756 | case 0x45: /* HSW */ | 1758 | case 0x45: /* HSW */ |
1759 | case 0x46: /* HSW */ | ||
1757 | return 1; | 1760 | return 1; |
1758 | } | 1761 | } |
1759 | return 0; | 1762 | return 0; |
@@ -2276,7 +2279,7 @@ int main(int argc, char **argv) | |||
2276 | cmdline(argc, argv); | 2279 | cmdline(argc, argv); |
2277 | 2280 | ||
2278 | if (verbose) | 2281 | if (verbose) |
2279 | fprintf(stderr, "turbostat v3.2 February 11, 2013" | 2282 | fprintf(stderr, "turbostat v3.3 March 15, 2013" |
2280 | " - Len Brown <lenb@kernel.org>\n"); | 2283 | " - Len Brown <lenb@kernel.org>\n"); |
2281 | 2284 | ||
2282 | turbostat_init(); | 2285 | turbostat_init(); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index adc68feb5c5a..f18013f09e68 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |||
1541 | } | 1541 | } |
1542 | 1542 | ||
1543 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | 1543 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1544 | gpa_t gpa) | 1544 | gpa_t gpa, unsigned long len) |
1545 | { | 1545 | { |
1546 | struct kvm_memslots *slots = kvm_memslots(kvm); | 1546 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1547 | int offset = offset_in_page(gpa); | 1547 | int offset = offset_in_page(gpa); |
1548 | gfn_t gfn = gpa >> PAGE_SHIFT; | 1548 | gfn_t start_gfn = gpa >> PAGE_SHIFT; |
1549 | gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; | ||
1550 | gfn_t nr_pages_needed = end_gfn - start_gfn + 1; | ||
1551 | gfn_t nr_pages_avail; | ||
1549 | 1552 | ||
1550 | ghc->gpa = gpa; | 1553 | ghc->gpa = gpa; |
1551 | ghc->generation = slots->generation; | 1554 | ghc->generation = slots->generation; |
1552 | ghc->memslot = gfn_to_memslot(kvm, gfn); | 1555 | ghc->len = len; |
1553 | ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); | 1556 | ghc->memslot = gfn_to_memslot(kvm, start_gfn); |
1554 | if (!kvm_is_error_hva(ghc->hva)) | 1557 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); |
1558 | if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { | ||
1555 | ghc->hva += offset; | 1559 | ghc->hva += offset; |
1556 | else | 1560 | } else { |
1557 | return -EFAULT; | 1561 | /* |
1558 | 1562 | * If the requested region crosses two memslots, we still | |
1563 | * verify that the entire region is valid here. | ||
1564 | */ | ||
1565 | while (start_gfn <= end_gfn) { | ||
1566 | ghc->memslot = gfn_to_memslot(kvm, start_gfn); | ||
1567 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, | ||
1568 | &nr_pages_avail); | ||
1569 | if (kvm_is_error_hva(ghc->hva)) | ||
1570 | return -EFAULT; | ||
1571 | start_gfn += nr_pages_avail; | ||
1572 | } | ||
1573 | /* Use the slow path for cross page reads and writes. */ | ||
1574 | ghc->memslot = NULL; | ||
1575 | } | ||
1559 | return 0; | 1576 | return 0; |
1560 | } | 1577 | } |
1561 | EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); | 1578 | EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); |
@@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |||
1566 | struct kvm_memslots *slots = kvm_memslots(kvm); | 1583 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1567 | int r; | 1584 | int r; |
1568 | 1585 | ||
1586 | BUG_ON(len > ghc->len); | ||
1587 | |||
1569 | if (slots->generation != ghc->generation) | 1588 | if (slots->generation != ghc->generation) |
1570 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); | 1589 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); |
1590 | |||
1591 | if (unlikely(!ghc->memslot)) | ||
1592 | return kvm_write_guest(kvm, ghc->gpa, data, len); | ||
1571 | 1593 | ||
1572 | if (kvm_is_error_hva(ghc->hva)) | 1594 | if (kvm_is_error_hva(ghc->hva)) |
1573 | return -EFAULT; | 1595 | return -EFAULT; |
@@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |||
1587 | struct kvm_memslots *slots = kvm_memslots(kvm); | 1609 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1588 | int r; | 1610 | int r; |
1589 | 1611 | ||
1612 | BUG_ON(len > ghc->len); | ||
1613 | |||
1590 | if (slots->generation != ghc->generation) | 1614 | if (slots->generation != ghc->generation) |
1591 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); | 1615 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); |
1616 | |||
1617 | if (unlikely(!ghc->memslot)) | ||
1618 | return kvm_read_guest(kvm, ghc->gpa, data, len); | ||
1592 | 1619 | ||
1593 | if (kvm_is_error_hva(ghc->hva)) | 1620 | if (kvm_is_error_hva(ghc->hva)) |
1594 | return -EFAULT; | 1621 | return -EFAULT; |