diff options
author | Ingo Molnar <mingo@kernel.org> | 2016-05-07 01:00:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-05-07 01:00:07 -0400 |
commit | 35dc9ec1076b79c31bf7ed538af008b7f23bb14d (patch) | |
tree | f90affdd5dc45f47f8d5d84ea34c71e5e521e7a1 | |
parent | 0ec7ae928a9c19c2b7b8054507d5694a2597065e (diff) | |
parent | 07837831047fb72856d1f61a726a4094397facd8 (diff) |
Merge branch 'linus' into efi/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
379 files changed, 3888 insertions, 2056 deletions
@@ -48,6 +48,9 @@ Felix Kuhling <fxkuehl@gmx.de> | |||
48 | Felix Moeller <felix@derklecks.de> | 48 | Felix Moeller <felix@derklecks.de> |
49 | Filipe Lautert <filipe@icewall.org> | 49 | Filipe Lautert <filipe@icewall.org> |
50 | Franck Bui-Huu <vagabon.xyz@gmail.com> | 50 | Franck Bui-Huu <vagabon.xyz@gmail.com> |
51 | Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com> | ||
52 | Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com> | ||
53 | Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com> | ||
51 | Frank Zago <fzago@systemfabricworks.com> | 54 | Frank Zago <fzago@systemfabricworks.com> |
52 | Greg Kroah-Hartman <greg@echidna.(none)> | 55 | Greg Kroah-Hartman <greg@echidna.(none)> |
53 | Greg Kroah-Hartman <gregkh@suse.de> | 56 | Greg Kroah-Hartman <gregkh@suse.de> |
@@ -66,6 +69,7 @@ Jean Tourrilhes <jt@hpl.hp.com> | |||
66 | Jeff Garzik <jgarzik@pretzel.yyz.us> | 69 | Jeff Garzik <jgarzik@pretzel.yyz.us> |
67 | Jens Axboe <axboe@suse.de> | 70 | Jens Axboe <axboe@suse.de> |
68 | Jens Osterkamp <Jens.Osterkamp@de.ibm.com> | 71 | Jens Osterkamp <Jens.Osterkamp@de.ibm.com> |
72 | John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> | ||
69 | John Stultz <johnstul@us.ibm.com> | 73 | John Stultz <johnstul@us.ibm.com> |
70 | <josh@joshtriplett.org> <josh@freedesktop.org> | 74 | <josh@joshtriplett.org> <josh@freedesktop.org> |
71 | <josh@joshtriplett.org> <josh@kernel.org> | 75 | <josh@joshtriplett.org> <josh@kernel.org> |
@@ -79,6 +83,7 @@ Kay Sievers <kay.sievers@vrfy.org> | |||
79 | Kenneth W Chen <kenneth.w.chen@intel.com> | 83 | Kenneth W Chen <kenneth.w.chen@intel.com> |
80 | Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> | 84 | Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> |
81 | Koushik <raghavendra.koushik@neterion.com> | 85 | Koushik <raghavendra.koushik@neterion.com> |
86 | Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com> | ||
82 | Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | 87 | Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> |
83 | Leonid I Ananiev <leonid.i.ananiev@intel.com> | 88 | Leonid I Ananiev <leonid.i.ananiev@intel.com> |
84 | Linas Vepstas <linas@austin.ibm.com> | 89 | Linas Vepstas <linas@austin.ibm.com> |
diff --git a/Documentation/devicetree/bindings/arc/archs-pct.txt b/Documentation/devicetree/bindings/arc/archs-pct.txt index 1ae98b87c640..e4b9dcee6d41 100644 --- a/Documentation/devicetree/bindings/arc/archs-pct.txt +++ b/Documentation/devicetree/bindings/arc/archs-pct.txt | |||
@@ -2,7 +2,7 @@ | |||
2 | 2 | ||
3 | The ARC HS can be configured with a pipeline performance monitor for counting | 3 | The ARC HS can be configured with a pipeline performance monitor for counting |
4 | CPU and cache events like cache misses and hits. Like conventional PCT there | 4 | CPU and cache events like cache misses and hits. Like conventional PCT there |
5 | are 100+ hardware conditions dynamically mapped to upto 32 counters. | 5 | are 100+ hardware conditions dynamically mapped to up to 32 counters. |
6 | It also supports overflow interrupts. | 6 | It also supports overflow interrupts. |
7 | 7 | ||
8 | Required properties: | 8 | Required properties: |
diff --git a/Documentation/devicetree/bindings/arc/pct.txt b/Documentation/devicetree/bindings/arc/pct.txt index 7b9588444f20..4e874d9a38a6 100644 --- a/Documentation/devicetree/bindings/arc/pct.txt +++ b/Documentation/devicetree/bindings/arc/pct.txt | |||
@@ -2,7 +2,7 @@ | |||
2 | 2 | ||
3 | The ARC700 can be configured with a pipeline performance monitor for counting | 3 | The ARC700 can be configured with a pipeline performance monitor for counting |
4 | CPU and cache events like cache misses and hits. Like conventional PCT there | 4 | CPU and cache events like cache misses and hits. Like conventional PCT there |
5 | are 100+ hardware conditions dynamically mapped to upto 32 counters | 5 | are 100+ hardware conditions dynamically mapped to up to 32 counters |
6 | 6 | ||
7 | Note that: | 7 | Note that: |
8 | * The ARC 700 PCT does not support interrupts; although HW events may be | 8 | * The ARC 700 PCT does not support interrupts; although HW events may be |
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt index ccc62f145306..3f0cbbb8395f 100644 --- a/Documentation/devicetree/bindings/arm/cpus.txt +++ b/Documentation/devicetree/bindings/arm/cpus.txt | |||
@@ -192,7 +192,6 @@ nodes to be present and contain the properties described below. | |||
192 | can be one of: | 192 | can be one of: |
193 | "allwinner,sun6i-a31" | 193 | "allwinner,sun6i-a31" |
194 | "allwinner,sun8i-a23" | 194 | "allwinner,sun8i-a23" |
195 | "arm,psci" | ||
196 | "arm,realview-smp" | 195 | "arm,realview-smp" |
197 | "brcm,bcm-nsp-smp" | 196 | "brcm,bcm-nsp-smp" |
198 | "brcm,brahma-b15" | 197 | "brcm,brahma-b15" |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt index f0d71bc52e64..0b4a85fe2d86 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt | |||
@@ -6,8 +6,8 @@ RK3xxx SoCs. | |||
6 | Required properties : | 6 | Required properties : |
7 | 7 | ||
8 | - reg : Offset and length of the register set for the device | 8 | - reg : Offset and length of the register set for the device |
9 | - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or | 9 | - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c", |
10 | "rockchip,rk3288-i2c". | 10 | "rockchip,rk3228-i2c" or "rockchip,rk3288-i2c". |
11 | - interrupts : interrupt number | 11 | - interrupts : interrupt number |
12 | - clocks : parent clock | 12 | - clocks : parent clock |
13 | 13 | ||
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt index 28a4781ab6d7..0ae06491b430 100644 --- a/Documentation/devicetree/bindings/net/cpsw.txt +++ b/Documentation/devicetree/bindings/net/cpsw.txt | |||
@@ -45,13 +45,13 @@ Required properties: | |||
45 | Optional properties: | 45 | Optional properties: |
46 | - dual_emac_res_vlan : Specifies VID to be used to segregate the ports | 46 | - dual_emac_res_vlan : Specifies VID to be used to segregate the ports |
47 | - mac-address : See ethernet.txt file in the same directory | 47 | - mac-address : See ethernet.txt file in the same directory |
48 | - phy_id : Specifies slave phy id | 48 | - phy_id : Specifies slave phy id (deprecated, use phy-handle) |
49 | - phy-handle : See ethernet.txt file in the same directory | 49 | - phy-handle : See ethernet.txt file in the same directory |
50 | 50 | ||
51 | Slave sub-nodes: | 51 | Slave sub-nodes: |
52 | - fixed-link : See fixed-link.txt file in the same directory | 52 | - fixed-link : See fixed-link.txt file in the same directory |
53 | Either the property phy_id, or the sub-node | 53 | |
54 | fixed-link can be specified | 54 | Note: Exactly one of phy_id, phy-handle, or fixed-link must be specified. |
55 | 55 | ||
56 | Note: "ti,hwmods" field is used to fetch the base address and irq | 56 | Note: "ti,hwmods" field is used to fetch the base address and irq |
57 | resources from TI, omap hwmod data base during device registration. | 57 | resources from TI, omap hwmod data base during device registration. |
diff --git a/Documentation/networking/altera_tse.txt b/Documentation/networking/altera_tse.txt index 3f24df8c6e65..50b8589d12fd 100644 --- a/Documentation/networking/altera_tse.txt +++ b/Documentation/networking/altera_tse.txt | |||
@@ -6,7 +6,7 @@ This is the driver for the Altera Triple-Speed Ethernet (TSE) controllers | |||
6 | using the SGDMA and MSGDMA soft DMA IP components. The driver uses the | 6 | using the SGDMA and MSGDMA soft DMA IP components. The driver uses the |
7 | platform bus to obtain component resources. The designs used to test this | 7 | platform bus to obtain component resources. The designs used to test this |
8 | driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board, | 8 | driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board, |
9 | and tested with ARM and NIOS processor hosts seperately. The anticipated use | 9 | and tested with ARM and NIOS processor hosts separately. The anticipated use |
10 | cases are simple communications between an embedded system and an external peer | 10 | cases are simple communications between an embedded system and an external peer |
11 | for status and simple configuration of the embedded system. | 11 | for status and simple configuration of the embedded system. |
12 | 12 | ||
@@ -65,14 +65,14 @@ Driver parameters can be also passed in command line by using: | |||
65 | 4.1) Transmit process | 65 | 4.1) Transmit process |
66 | When the driver's transmit routine is called by the kernel, it sets up a | 66 | When the driver's transmit routine is called by the kernel, it sets up a |
67 | transmit descriptor by calling the underlying DMA transmit routine (SGDMA or | 67 | transmit descriptor by calling the underlying DMA transmit routine (SGDMA or |
68 | MSGDMA), and initites a transmit operation. Once the transmit is complete, an | 68 | MSGDMA), and initiates a transmit operation. Once the transmit is complete, an |
69 | interrupt is driven by the transmit DMA logic. The driver handles the transmit | 69 | interrupt is driven by the transmit DMA logic. The driver handles the transmit |
70 | completion in the context of the interrupt handling chain by recycling | 70 | completion in the context of the interrupt handling chain by recycling |
71 | resource required to send and track the requested transmit operation. | 71 | resource required to send and track the requested transmit operation. |
72 | 72 | ||
73 | 4.2) Receive process | 73 | 4.2) Receive process |
74 | The driver will post receive buffers to the receive DMA logic during driver | 74 | The driver will post receive buffers to the receive DMA logic during driver |
75 | intialization. Receive buffers may or may not be queued depending upon the | 75 | initialization. Receive buffers may or may not be queued depending upon the |
76 | underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able | 76 | underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able |
77 | to queue receive buffers to the SGDMA receive logic). When a packet is | 77 | to queue receive buffers to the SGDMA receive logic). When a packet is |
78 | received, the DMA logic generates an interrupt. The driver handles a receive | 78 | received, the DMA logic generates an interrupt. The driver handles a receive |
diff --git a/Documentation/networking/ipvlan.txt b/Documentation/networking/ipvlan.txt index cf996394e466..14422f8fcdc4 100644 --- a/Documentation/networking/ipvlan.txt +++ b/Documentation/networking/ipvlan.txt | |||
@@ -8,7 +8,7 @@ Initial Release: | |||
8 | This is conceptually very similar to the macvlan driver with one major | 8 | This is conceptually very similar to the macvlan driver with one major |
9 | exception of using L3 for mux-ing /demux-ing among slaves. This property makes | 9 | exception of using L3 for mux-ing /demux-ing among slaves. This property makes |
10 | the master device share the L2 with it's slave devices. I have developed this | 10 | the master device share the L2 with it's slave devices. I have developed this |
11 | driver in conjuntion with network namespaces and not sure if there is use case | 11 | driver in conjunction with network namespaces and not sure if there is use case |
12 | outside of it. | 12 | outside of it. |
13 | 13 | ||
14 | 14 | ||
@@ -42,7 +42,7 @@ out. In this mode the slaves will RX/TX multicast and broadcast (if applicable) | |||
42 | as well. | 42 | as well. |
43 | 43 | ||
44 | 4.2 L3 mode: | 44 | 4.2 L3 mode: |
45 | In this mode TX processing upto L3 happens on the stack instance attached | 45 | In this mode TX processing up to L3 happens on the stack instance attached |
46 | to the slave device and packets are switched to the stack instance of the | 46 | to the slave device and packets are switched to the stack instance of the |
47 | master device for the L2 processing and routing from that instance will be | 47 | master device for the L2 processing and routing from that instance will be |
48 | used before packets are queued on the outbound device. In this mode the slaves | 48 | used before packets are queued on the outbound device. In this mode the slaves |
@@ -56,7 +56,7 @@ situations defines your use case then you can choose to use ipvlan - | |||
56 | (a) The Linux host that is connected to the external switch / router has | 56 | (a) The Linux host that is connected to the external switch / router has |
57 | policy configured that allows only one mac per port. | 57 | policy configured that allows only one mac per port. |
58 | (b) No of virtual devices created on a master exceed the mac capacity and | 58 | (b) No of virtual devices created on a master exceed the mac capacity and |
59 | puts the NIC in promiscous mode and degraded performance is a concern. | 59 | puts the NIC in promiscuous mode and degraded performance is a concern. |
60 | (c) If the slave device is to be put into the hostile / untrusted network | 60 | (c) If the slave device is to be put into the hostile / untrusted network |
61 | namespace where L2 on the slave could be changed / misused. | 61 | namespace where L2 on the slave could be changed / misused. |
62 | 62 | ||
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt index f4be85e96005..2c4e3354e128 100644 --- a/Documentation/networking/pktgen.txt +++ b/Documentation/networking/pktgen.txt | |||
@@ -67,12 +67,12 @@ The two basic thread commands are: | |||
67 | * add_device DEVICE@NAME -- adds a single device | 67 | * add_device DEVICE@NAME -- adds a single device |
68 | * rem_device_all -- remove all associated devices | 68 | * rem_device_all -- remove all associated devices |
69 | 69 | ||
70 | When adding a device to a thread, a corrosponding procfile is created | 70 | When adding a device to a thread, a corresponding procfile is created |
71 | which is used for configuring this device. Thus, device names need to | 71 | which is used for configuring this device. Thus, device names need to |
72 | be unique. | 72 | be unique. |
73 | 73 | ||
74 | To support adding the same device to multiple threads, which is useful | 74 | To support adding the same device to multiple threads, which is useful |
75 | with multi queue NICs, a the device naming scheme is extended with "@": | 75 | with multi queue NICs, the device naming scheme is extended with "@": |
76 | device@something | 76 | device@something |
77 | 77 | ||
78 | The part after "@" can be anything, but it is custom to use the thread | 78 | The part after "@" can be anything, but it is custom to use the thread |
@@ -221,7 +221,7 @@ Sample scripts | |||
221 | 221 | ||
222 | A collection of tutorial scripts and helpers for pktgen is in the | 222 | A collection of tutorial scripts and helpers for pktgen is in the |
223 | samples/pktgen directory. The helper parameters.sh file support easy | 223 | samples/pktgen directory. The helper parameters.sh file support easy |
224 | and consistant parameter parsing across the sample scripts. | 224 | and consistent parameter parsing across the sample scripts. |
225 | 225 | ||
226 | Usage example and help: | 226 | Usage example and help: |
227 | ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2 | 227 | ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2 |
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt index d52aa10cfe91..5da679c573d2 100644 --- a/Documentation/networking/vrf.txt +++ b/Documentation/networking/vrf.txt | |||
@@ -41,7 +41,7 @@ using an rx_handler which gives the impression that packets flow through | |||
41 | the VRF device. Similarly on egress routing rules are used to send packets | 41 | the VRF device. Similarly on egress routing rules are used to send packets |
42 | to the VRF device driver before getting sent out the actual interface. This | 42 | to the VRF device driver before getting sent out the actual interface. This |
43 | allows tcpdump on a VRF device to capture all packets into and out of the | 43 | allows tcpdump on a VRF device to capture all packets into and out of the |
44 | VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied | 44 | VRF as a whole.[1] Similarly, netfilter [2] and tc rules can be applied |
45 | using the VRF device to specify rules that apply to the VRF domain as a whole. | 45 | using the VRF device to specify rules that apply to the VRF domain as a whole. |
46 | 46 | ||
47 | [1] Packets in the forwarded state do not flow through the device, so those | 47 | [1] Packets in the forwarded state do not flow through the device, so those |
diff --git a/Documentation/networking/xfrm_sync.txt b/Documentation/networking/xfrm_sync.txt index d7aac9dedeb4..8d88e0f2ec49 100644 --- a/Documentation/networking/xfrm_sync.txt +++ b/Documentation/networking/xfrm_sync.txt | |||
@@ -4,7 +4,7 @@ Krisztian <hidden@balabit.hu> and others and additional patches | |||
4 | from Jamal <hadi@cyberus.ca>. | 4 | from Jamal <hadi@cyberus.ca>. |
5 | 5 | ||
6 | The end goal for syncing is to be able to insert attributes + generate | 6 | The end goal for syncing is to be able to insert attributes + generate |
7 | events so that the an SA can be safely moved from one machine to another | 7 | events so that the SA can be safely moved from one machine to another |
8 | for HA purposes. | 8 | for HA purposes. |
9 | The idea is to synchronize the SA so that the takeover machine can do | 9 | The idea is to synchronize the SA so that the takeover machine can do |
10 | the processing of the SA as accurate as possible if it has access to it. | 10 | the processing of the SA as accurate as possible if it has access to it. |
@@ -13,7 +13,7 @@ We already have the ability to generate SA add/del/upd events. | |||
13 | These patches add ability to sync and have accurate lifetime byte (to | 13 | These patches add ability to sync and have accurate lifetime byte (to |
14 | ensure proper decay of SAs) and replay counters to avoid replay attacks | 14 | ensure proper decay of SAs) and replay counters to avoid replay attacks |
15 | with as minimal loss at failover time. | 15 | with as minimal loss at failover time. |
16 | This way a backup stays as closely uptodate as an active member. | 16 | This way a backup stays as closely up-to-date as an active member. |
17 | 17 | ||
18 | Because the above items change for every packet the SA receives, | 18 | Because the above items change for every packet the SA receives, |
19 | it is possible for a lot of the events to be generated. | 19 | it is possible for a lot of the events to be generated. |
@@ -163,7 +163,7 @@ If you have an SA that is getting hit by traffic in bursts such that | |||
163 | there is a period where the timer threshold expires with no packets | 163 | there is a period where the timer threshold expires with no packets |
164 | seen, then an odd behavior is seen as follows: | 164 | seen, then an odd behavior is seen as follows: |
165 | The first packet arrival after a timer expiry will trigger a timeout | 165 | The first packet arrival after a timer expiry will trigger a timeout |
166 | aevent; i.e we dont wait for a timeout period or a packet threshold | 166 | event; i.e we don't wait for a timeout period or a packet threshold |
167 | to be reached. This is done for simplicity and efficiency reasons. | 167 | to be reached. This is done for simplicity and efficiency reasons. |
168 | 168 | ||
169 | -JHS | 169 | -JHS |
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index cb0368459da3..34a5fece3121 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt | |||
@@ -581,15 +581,16 @@ Specify "[Nn]ode" for node order | |||
581 | "Zone Order" orders the zonelists by zone type, then by node within each | 581 | "Zone Order" orders the zonelists by zone type, then by node within each |
582 | zone. Specify "[Zz]one" for zone order. | 582 | zone. Specify "[Zz]one" for zone order. |
583 | 583 | ||
584 | Specify "[Dd]efault" to request automatic configuration. Autoconfiguration | 584 | Specify "[Dd]efault" to request automatic configuration. |
585 | will select "node" order in following case. | 585 | |
586 | (1) if the DMA zone does not exist or | 586 | On 32-bit, the Normal zone needs to be preserved for allocations accessible |
587 | (2) if the DMA zone comprises greater than 50% of the available memory or | 587 | by the kernel, so "zone" order will be selected. |
588 | (3) if any node's DMA zone comprises greater than 70% of its local memory and | 588 | |
589 | the amount of local memory is big enough. | 589 | On 64-bit, devices that require DMA32/DMA are relatively rare, so "node" |
590 | 590 | order will be selected. | |
591 | Otherwise, "zone" order will be selected. Default order is recommended unless | 591 | |
592 | this is causing problems for your system/application. | 592 | Default order is recommended unless this is causing problems for your |
593 | system/application. | ||
593 | 594 | ||
594 | ============================================================== | 595 | ============================================================== |
595 | 596 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 1d5b4becab6f..a727d9959ecd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -872,9 +872,9 @@ F: drivers/perf/arm_pmu.c | |||
872 | F: include/linux/perf/arm_pmu.h | 872 | F: include/linux/perf/arm_pmu.h |
873 | 873 | ||
874 | ARM PORT | 874 | ARM PORT |
875 | M: Russell King <linux@arm.linux.org.uk> | 875 | M: Russell King <linux@armlinux.org.uk> |
876 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 876 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
877 | W: http://www.arm.linux.org.uk/ | 877 | W: http://www.armlinux.org.uk/ |
878 | S: Maintained | 878 | S: Maintained |
879 | F: arch/arm/ | 879 | F: arch/arm/ |
880 | 880 | ||
@@ -886,35 +886,35 @@ F: arch/arm/plat-*/ | |||
886 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git | 886 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git |
887 | 887 | ||
888 | ARM PRIMECELL AACI PL041 DRIVER | 888 | ARM PRIMECELL AACI PL041 DRIVER |
889 | M: Russell King <linux@arm.linux.org.uk> | 889 | M: Russell King <linux@armlinux.org.uk> |
890 | S: Maintained | 890 | S: Maintained |
891 | F: sound/arm/aaci.* | 891 | F: sound/arm/aaci.* |
892 | 892 | ||
893 | ARM PRIMECELL CLCD PL110 DRIVER | 893 | ARM PRIMECELL CLCD PL110 DRIVER |
894 | M: Russell King <linux@arm.linux.org.uk> | 894 | M: Russell King <linux@armlinux.org.uk> |
895 | S: Maintained | 895 | S: Maintained |
896 | F: drivers/video/fbdev/amba-clcd.* | 896 | F: drivers/video/fbdev/amba-clcd.* |
897 | 897 | ||
898 | ARM PRIMECELL KMI PL050 DRIVER | 898 | ARM PRIMECELL KMI PL050 DRIVER |
899 | M: Russell King <linux@arm.linux.org.uk> | 899 | M: Russell King <linux@armlinux.org.uk> |
900 | S: Maintained | 900 | S: Maintained |
901 | F: drivers/input/serio/ambakmi.* | 901 | F: drivers/input/serio/ambakmi.* |
902 | F: include/linux/amba/kmi.h | 902 | F: include/linux/amba/kmi.h |
903 | 903 | ||
904 | ARM PRIMECELL MMCI PL180/1 DRIVER | 904 | ARM PRIMECELL MMCI PL180/1 DRIVER |
905 | M: Russell King <linux@arm.linux.org.uk> | 905 | M: Russell King <linux@armlinux.org.uk> |
906 | S: Maintained | 906 | S: Maintained |
907 | F: drivers/mmc/host/mmci.* | 907 | F: drivers/mmc/host/mmci.* |
908 | F: include/linux/amba/mmci.h | 908 | F: include/linux/amba/mmci.h |
909 | 909 | ||
910 | ARM PRIMECELL UART PL010 AND PL011 DRIVERS | 910 | ARM PRIMECELL UART PL010 AND PL011 DRIVERS |
911 | M: Russell King <linux@arm.linux.org.uk> | 911 | M: Russell King <linux@armlinux.org.uk> |
912 | S: Maintained | 912 | S: Maintained |
913 | F: drivers/tty/serial/amba-pl01*.c | 913 | F: drivers/tty/serial/amba-pl01*.c |
914 | F: include/linux/amba/serial.h | 914 | F: include/linux/amba/serial.h |
915 | 915 | ||
916 | ARM PRIMECELL BUS SUPPORT | 916 | ARM PRIMECELL BUS SUPPORT |
917 | M: Russell King <linux@arm.linux.org.uk> | 917 | M: Russell King <linux@armlinux.org.uk> |
918 | S: Maintained | 918 | S: Maintained |
919 | F: drivers/amba/ | 919 | F: drivers/amba/ |
920 | F: include/linux/amba/bus.h | 920 | F: include/linux/amba/bus.h |
@@ -1036,7 +1036,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | |||
1036 | S: Maintained | 1036 | S: Maintained |
1037 | 1037 | ||
1038 | ARM/CLKDEV SUPPORT | 1038 | ARM/CLKDEV SUPPORT |
1039 | M: Russell King <linux@arm.linux.org.uk> | 1039 | M: Russell King <linux@armlinux.org.uk> |
1040 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1040 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1041 | S: Maintained | 1041 | S: Maintained |
1042 | F: arch/arm/include/asm/clkdev.h | 1042 | F: arch/arm/include/asm/clkdev.h |
@@ -1093,9 +1093,9 @@ F: arch/arm/boot/dts/cx92755* | |||
1093 | N: digicolor | 1093 | N: digicolor |
1094 | 1094 | ||
1095 | ARM/EBSA110 MACHINE SUPPORT | 1095 | ARM/EBSA110 MACHINE SUPPORT |
1096 | M: Russell King <linux@arm.linux.org.uk> | 1096 | M: Russell King <linux@armlinux.org.uk> |
1097 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1097 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1098 | W: http://www.arm.linux.org.uk/ | 1098 | W: http://www.armlinux.org.uk/ |
1099 | S: Maintained | 1099 | S: Maintained |
1100 | F: arch/arm/mach-ebsa110/ | 1100 | F: arch/arm/mach-ebsa110/ |
1101 | F: drivers/net/ethernet/amd/am79c961a.* | 1101 | F: drivers/net/ethernet/amd/am79c961a.* |
@@ -1124,9 +1124,9 @@ T: git git://git.berlios.de/gemini-board | |||
1124 | F: arch/arm/mm/*-fa* | 1124 | F: arch/arm/mm/*-fa* |
1125 | 1125 | ||
1126 | ARM/FOOTBRIDGE ARCHITECTURE | 1126 | ARM/FOOTBRIDGE ARCHITECTURE |
1127 | M: Russell King <linux@arm.linux.org.uk> | 1127 | M: Russell King <linux@armlinux.org.uk> |
1128 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1128 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1129 | W: http://www.arm.linux.org.uk/ | 1129 | W: http://www.armlinux.org.uk/ |
1130 | S: Maintained | 1130 | S: Maintained |
1131 | F: arch/arm/include/asm/hardware/dec21285.h | 1131 | F: arch/arm/include/asm/hardware/dec21285.h |
1132 | F: arch/arm/mach-footbridge/ | 1132 | F: arch/arm/mach-footbridge/ |
@@ -1457,7 +1457,7 @@ S: Maintained | |||
1457 | ARM/PT DIGITAL BOARD PORT | 1457 | ARM/PT DIGITAL BOARD PORT |
1458 | M: Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de> | 1458 | M: Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de> |
1459 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1459 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1460 | W: http://www.arm.linux.org.uk/ | 1460 | W: http://www.armlinux.org.uk/ |
1461 | S: Maintained | 1461 | S: Maintained |
1462 | 1462 | ||
1463 | ARM/QUALCOMM SUPPORT | 1463 | ARM/QUALCOMM SUPPORT |
@@ -1493,9 +1493,9 @@ S: Supported | |||
1493 | F: arch/arm64/boot/dts/renesas/ | 1493 | F: arch/arm64/boot/dts/renesas/ |
1494 | 1494 | ||
1495 | ARM/RISCPC ARCHITECTURE | 1495 | ARM/RISCPC ARCHITECTURE |
1496 | M: Russell King <linux@arm.linux.org.uk> | 1496 | M: Russell King <linux@armlinux.org.uk> |
1497 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1497 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1498 | W: http://www.arm.linux.org.uk/ | 1498 | W: http://www.armlinux.org.uk/ |
1499 | S: Maintained | 1499 | S: Maintained |
1500 | F: arch/arm/include/asm/hardware/entry-macro-iomd.S | 1500 | F: arch/arm/include/asm/hardware/entry-macro-iomd.S |
1501 | F: arch/arm/include/asm/hardware/ioc.h | 1501 | F: arch/arm/include/asm/hardware/ioc.h |
@@ -1773,9 +1773,9 @@ F: drivers/clk/versatile/clk-vexpress-osc.c | |||
1773 | F: drivers/clocksource/versatile.c | 1773 | F: drivers/clocksource/versatile.c |
1774 | 1774 | ||
1775 | ARM/VFP SUPPORT | 1775 | ARM/VFP SUPPORT |
1776 | M: Russell King <linux@arm.linux.org.uk> | 1776 | M: Russell King <linux@armlinux.org.uk> |
1777 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1777 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1778 | W: http://www.arm.linux.org.uk/ | 1778 | W: http://www.armlinux.org.uk/ |
1779 | S: Maintained | 1779 | S: Maintained |
1780 | F: arch/arm/vfp/ | 1780 | F: arch/arm/vfp/ |
1781 | 1781 | ||
@@ -2921,7 +2921,7 @@ F: mm/cleancache.c | |||
2921 | F: include/linux/cleancache.h | 2921 | F: include/linux/cleancache.h |
2922 | 2922 | ||
2923 | CLK API | 2923 | CLK API |
2924 | M: Russell King <linux@arm.linux.org.uk> | 2924 | M: Russell King <linux@armlinux.org.uk> |
2925 | L: linux-clk@vger.kernel.org | 2925 | L: linux-clk@vger.kernel.org |
2926 | S: Maintained | 2926 | S: Maintained |
2927 | F: include/linux/clk.h | 2927 | F: include/linux/clk.h |
@@ -3354,9 +3354,9 @@ S: Supported | |||
3354 | F: drivers/net/ethernet/stmicro/stmmac/ | 3354 | F: drivers/net/ethernet/stmicro/stmmac/ |
3355 | 3355 | ||
3356 | CYBERPRO FB DRIVER | 3356 | CYBERPRO FB DRIVER |
3357 | M: Russell King <linux@arm.linux.org.uk> | 3357 | M: Russell King <linux@armlinux.org.uk> |
3358 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 3358 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
3359 | W: http://www.arm.linux.org.uk/ | 3359 | W: http://www.armlinux.org.uk/ |
3360 | S: Maintained | 3360 | S: Maintained |
3361 | F: drivers/video/fbdev/cyber2000fb.* | 3361 | F: drivers/video/fbdev/cyber2000fb.* |
3362 | 3362 | ||
@@ -3881,7 +3881,7 @@ F: Documentation/devicetree/bindings/display/st,stih4xx.txt | |||
3881 | 3881 | ||
3882 | DRM DRIVERS FOR VIVANTE GPU IP | 3882 | DRM DRIVERS FOR VIVANTE GPU IP |
3883 | M: Lucas Stach <l.stach@pengutronix.de> | 3883 | M: Lucas Stach <l.stach@pengutronix.de> |
3884 | R: Russell King <linux+etnaviv@arm.linux.org.uk> | 3884 | R: Russell King <linux+etnaviv@armlinux.org.uk> |
3885 | R: Christian Gmeiner <christian.gmeiner@gmail.com> | 3885 | R: Christian Gmeiner <christian.gmeiner@gmail.com> |
3886 | L: dri-devel@lists.freedesktop.org | 3886 | L: dri-devel@lists.freedesktop.org |
3887 | S: Maintained | 3887 | S: Maintained |
@@ -4223,8 +4223,8 @@ F: Documentation/efi-stub.txt | |||
4223 | F: arch/ia64/kernel/efi.c | 4223 | F: arch/ia64/kernel/efi.c |
4224 | F: arch/x86/boot/compressed/eboot.[ch] | 4224 | F: arch/x86/boot/compressed/eboot.[ch] |
4225 | F: arch/x86/include/asm/efi.h | 4225 | F: arch/x86/include/asm/efi.h |
4226 | F: arch/x86/platform/efi/* | 4226 | F: arch/x86/platform/efi/ |
4227 | F: drivers/firmware/efi/* | 4227 | F: drivers/firmware/efi/ |
4228 | F: include/linux/efi*.h | 4228 | F: include/linux/efi*.h |
4229 | 4229 | ||
4230 | EFI VARIABLE FILESYSTEM | 4230 | EFI VARIABLE FILESYSTEM |
@@ -4744,7 +4744,7 @@ F: drivers/platform/x86/fujitsu-tablet.c | |||
4744 | 4744 | ||
4745 | FUSE: FILESYSTEM IN USERSPACE | 4745 | FUSE: FILESYSTEM IN USERSPACE |
4746 | M: Miklos Szeredi <miklos@szeredi.hu> | 4746 | M: Miklos Szeredi <miklos@szeredi.hu> |
4747 | L: fuse-devel@lists.sourceforge.net | 4747 | L: linux-fsdevel@vger.kernel.org |
4748 | W: http://fuse.sourceforge.net/ | 4748 | W: http://fuse.sourceforge.net/ |
4749 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git | 4749 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git |
4750 | S: Maintained | 4750 | S: Maintained |
@@ -4903,7 +4903,7 @@ F: net/ipv4/gre_offload.c | |||
4903 | F: include/net/gre.h | 4903 | F: include/net/gre.h |
4904 | 4904 | ||
4905 | GRETH 10/100/1G Ethernet MAC device driver | 4905 | GRETH 10/100/1G Ethernet MAC device driver |
4906 | M: Kristoffer Glembo <kristoffer@gaisler.com> | 4906 | M: Andreas Larsson <andreas@gaisler.com> |
4907 | L: netdev@vger.kernel.org | 4907 | L: netdev@vger.kernel.org |
4908 | S: Maintained | 4908 | S: Maintained |
4909 | F: drivers/net/ethernet/aeroflex/ | 4909 | F: drivers/net/ethernet/aeroflex/ |
@@ -6027,7 +6027,7 @@ F: include/scsi/*iscsi* | |||
6027 | 6027 | ||
6028 | ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR | 6028 | ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR |
6029 | M: Or Gerlitz <ogerlitz@mellanox.com> | 6029 | M: Or Gerlitz <ogerlitz@mellanox.com> |
6030 | M: Sagi Grimberg <sagig@mellanox.com> | 6030 | M: Sagi Grimberg <sagi@grimberg.me> |
6031 | M: Roi Dayan <roid@mellanox.com> | 6031 | M: Roi Dayan <roid@mellanox.com> |
6032 | L: linux-rdma@vger.kernel.org | 6032 | L: linux-rdma@vger.kernel.org |
6033 | S: Supported | 6033 | S: Supported |
@@ -6037,7 +6037,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/ | |||
6037 | F: drivers/infiniband/ulp/iser/ | 6037 | F: drivers/infiniband/ulp/iser/ |
6038 | 6038 | ||
6039 | ISCSI EXTENSIONS FOR RDMA (ISER) TARGET | 6039 | ISCSI EXTENSIONS FOR RDMA (ISER) TARGET |
6040 | M: Sagi Grimberg <sagig@mellanox.com> | 6040 | M: Sagi Grimberg <sagi@grimberg.me> |
6041 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master | 6041 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master |
6042 | L: linux-rdma@vger.kernel.org | 6042 | L: linux-rdma@vger.kernel.org |
6043 | L: target-devel@vger.kernel.org | 6043 | L: target-devel@vger.kernel.org |
@@ -6400,7 +6400,7 @@ F: mm/kmemleak.c | |||
6400 | F: mm/kmemleak-test.c | 6400 | F: mm/kmemleak-test.c |
6401 | 6401 | ||
6402 | KPROBES | 6402 | KPROBES |
6403 | M: Ananth N Mavinakayanahalli <ananth@in.ibm.com> | 6403 | M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com> |
6404 | M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 6404 | M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
6405 | M: "David S. Miller" <davem@davemloft.net> | 6405 | M: "David S. Miller" <davem@davemloft.net> |
6406 | M: Masami Hiramatsu <mhiramat@kernel.org> | 6406 | M: Masami Hiramatsu <mhiramat@kernel.org> |
@@ -6905,7 +6905,7 @@ L: linux-man@vger.kernel.org | |||
6905 | S: Maintained | 6905 | S: Maintained |
6906 | 6906 | ||
6907 | MARVELL ARMADA DRM SUPPORT | 6907 | MARVELL ARMADA DRM SUPPORT |
6908 | M: Russell King <rmk+kernel@arm.linux.org.uk> | 6908 | M: Russell King <rmk+kernel@armlinux.org.uk> |
6909 | S: Maintained | 6909 | S: Maintained |
6910 | F: drivers/gpu/drm/armada/ | 6910 | F: drivers/gpu/drm/armada/ |
6911 | 6911 | ||
@@ -7905,7 +7905,7 @@ S: Supported | |||
7905 | F: drivers/nfc/nxp-nci | 7905 | F: drivers/nfc/nxp-nci |
7906 | 7906 | ||
7907 | NXP TDA998X DRM DRIVER | 7907 | NXP TDA998X DRM DRIVER |
7908 | M: Russell King <rmk+kernel@arm.linux.org.uk> | 7908 | M: Russell King <rmk+kernel@armlinux.org.uk> |
7909 | S: Supported | 7909 | S: Supported |
7910 | F: drivers/gpu/drm/i2c/tda998x_drv.c | 7910 | F: drivers/gpu/drm/i2c/tda998x_drv.c |
7911 | F: include/drm/i2c/tda998x.h | 7911 | F: include/drm/i2c/tda998x.h |
@@ -7978,7 +7978,7 @@ F: arch/arm/*omap*/*pm* | |||
7978 | F: drivers/cpufreq/omap-cpufreq.c | 7978 | F: drivers/cpufreq/omap-cpufreq.c |
7979 | 7979 | ||
7980 | OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT | 7980 | OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT |
7981 | M: Rajendra Nayak <rnayak@ti.com> | 7981 | M: Rajendra Nayak <rnayak@codeaurora.org> |
7982 | M: Paul Walmsley <paul@pwsan.com> | 7982 | M: Paul Walmsley <paul@pwsan.com> |
7983 | L: linux-omap@vger.kernel.org | 7983 | L: linux-omap@vger.kernel.org |
7984 | S: Maintained | 7984 | S: Maintained |
@@ -10014,7 +10014,8 @@ F: drivers/infiniband/hw/ocrdma/ | |||
10014 | 10014 | ||
10015 | SFC NETWORK DRIVER | 10015 | SFC NETWORK DRIVER |
10016 | M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> | 10016 | M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> |
10017 | M: Shradha Shah <sshah@solarflare.com> | 10017 | M: Edward Cree <ecree@solarflare.com> |
10018 | M: Bert Kenward <bkenward@solarflare.com> | ||
10018 | L: netdev@vger.kernel.org | 10019 | L: netdev@vger.kernel.org |
10019 | S: Supported | 10020 | S: Supported |
10020 | F: drivers/net/ethernet/sfc/ | 10021 | F: drivers/net/ethernet/sfc/ |
@@ -11071,6 +11072,15 @@ S: Maintained | |||
11071 | F: drivers/clk/ti/ | 11072 | F: drivers/clk/ti/ |
11072 | F: include/linux/clk/ti.h | 11073 | F: include/linux/clk/ti.h |
11073 | 11074 | ||
11075 | TI ETHERNET SWITCH DRIVER (CPSW) | ||
11076 | M: Mugunthan V N <mugunthanvnm@ti.com> | ||
11077 | R: Grygorii Strashko <grygorii.strashko@ti.com> | ||
11078 | L: linux-omap@vger.kernel.org | ||
11079 | L: netdev@vger.kernel.org | ||
11080 | S: Maintained | ||
11081 | F: drivers/net/ethernet/ti/cpsw* | ||
11082 | F: drivers/net/ethernet/ti/davinci* | ||
11083 | |||
11074 | TI FLASH MEDIA INTERFACE DRIVER | 11084 | TI FLASH MEDIA INTERFACE DRIVER |
11075 | M: Alex Dubov <oakad@yahoo.com> | 11085 | M: Alex Dubov <oakad@yahoo.com> |
11076 | S: Maintained | 11086 | S: Maintained |
@@ -1,8 +1,8 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc6 |
5 | NAME = Blurry Fish Butt | 5 | NAME = Charred Weasel |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
8 | # To see a list of typical targets execute "make help" | 8 | # To see a list of typical targets execute "make help" |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 12d0284a46e5..a8767430df7d 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -35,8 +35,10 @@ config ARC | |||
35 | select NO_BOOTMEM | 35 | select NO_BOOTMEM |
36 | select OF | 36 | select OF |
37 | select OF_EARLY_FLATTREE | 37 | select OF_EARLY_FLATTREE |
38 | select OF_RESERVED_MEM | ||
38 | select PERF_USE_VMALLOC | 39 | select PERF_USE_VMALLOC |
39 | select HAVE_DEBUG_STACKOVERFLOW | 40 | select HAVE_DEBUG_STACKOVERFLOW |
41 | select HAVE_GENERIC_DMA_COHERENT | ||
40 | 42 | ||
41 | config MIGHT_HAVE_PCI | 43 | config MIGHT_HAVE_PCI |
42 | bool | 44 | bool |
@@ -56,6 +58,9 @@ config GENERIC_CSUM | |||
56 | config RWSEM_GENERIC_SPINLOCK | 58 | config RWSEM_GENERIC_SPINLOCK |
57 | def_bool y | 59 | def_bool y |
58 | 60 | ||
61 | config ARCH_DISCONTIGMEM_ENABLE | ||
62 | def_bool y | ||
63 | |||
59 | config ARCH_FLATMEM_ENABLE | 64 | config ARCH_FLATMEM_ENABLE |
60 | def_bool y | 65 | def_bool y |
61 | 66 | ||
@@ -345,6 +350,15 @@ config ARC_HUGEPAGE_16M | |||
345 | 350 | ||
346 | endchoice | 351 | endchoice |
347 | 352 | ||
353 | config NODES_SHIFT | ||
354 | int "Maximum NUMA Nodes (as a power of 2)" | ||
355 | default "1" if !DISCONTIGMEM | ||
356 | default "2" if DISCONTIGMEM | ||
357 | depends on NEED_MULTIPLE_NODES | ||
358 | ---help--- | ||
359 | Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory | ||
360 | zones. | ||
361 | |||
348 | if ISA_ARCOMPACT | 362 | if ISA_ARCOMPACT |
349 | 363 | ||
350 | config ARC_COMPACT_IRQ_LEVELS | 364 | config ARC_COMPACT_IRQ_LEVELS |
@@ -453,6 +467,7 @@ config LINUX_LINK_BASE | |||
453 | 467 | ||
454 | config HIGHMEM | 468 | config HIGHMEM |
455 | bool "High Memory Support" | 469 | bool "High Memory Support" |
470 | select DISCONTIGMEM | ||
456 | help | 471 | help |
457 | With ARC 2G:2G address split, only upper 2G is directly addressable by | 472 | With ARC 2G:2G address split, only upper 2G is directly addressable by |
458 | kernel. Enable this to potentially allow access to rest of 2G and PAE | 473 | kernel. Enable this to potentially allow access to rest of 2G and PAE |
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 17f85c9c73cf..c22b181e8206 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h | |||
@@ -13,6 +13,15 @@ | |||
13 | #include <asm/byteorder.h> | 13 | #include <asm/byteorder.h> |
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | 15 | ||
16 | #ifdef CONFIG_ISA_ARCV2 | ||
17 | #include <asm/barrier.h> | ||
18 | #define __iormb() rmb() | ||
19 | #define __iowmb() wmb() | ||
20 | #else | ||
21 | #define __iormb() do { } while (0) | ||
22 | #define __iowmb() do { } while (0) | ||
23 | #endif | ||
24 | |||
16 | extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size); | 25 | extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size); |
17 | extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, | 26 | extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, |
18 | unsigned long flags); | 27 | unsigned long flags); |
@@ -31,6 +40,15 @@ extern void iounmap(const void __iomem *addr); | |||
31 | #define ioremap_wc(phy, sz) ioremap(phy, sz) | 40 | #define ioremap_wc(phy, sz) ioremap(phy, sz) |
32 | #define ioremap_wt(phy, sz) ioremap(phy, sz) | 41 | #define ioremap_wt(phy, sz) ioremap(phy, sz) |
33 | 42 | ||
43 | /* | ||
44 | * io{read,write}{16,32}be() macros | ||
45 | */ | ||
46 | #define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) | ||
47 | #define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) | ||
48 | |||
49 | #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); }) | ||
50 | #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); }) | ||
51 | |||
34 | /* Change struct page to physical address */ | 52 | /* Change struct page to physical address */ |
35 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 53 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
36 | 54 | ||
@@ -108,15 +126,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) | |||
108 | 126 | ||
109 | } | 127 | } |
110 | 128 | ||
111 | #ifdef CONFIG_ISA_ARCV2 | ||
112 | #include <asm/barrier.h> | ||
113 | #define __iormb() rmb() | ||
114 | #define __iowmb() wmb() | ||
115 | #else | ||
116 | #define __iormb() do { } while (0) | ||
117 | #define __iowmb() do { } while (0) | ||
118 | #endif | ||
119 | |||
120 | /* | 129 | /* |
121 | * MMIO can also get buffered/optimized in micro-arch, so barriers needed | 130 | * MMIO can also get buffered/optimized in micro-arch, so barriers needed |
122 | * Based on ARM model for the typical use case | 131 | * Based on ARM model for the typical use case |
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h index 37c2f751eebf..d1ec7f6b31e0 100644 --- a/arch/arc/include/asm/irqflags-arcv2.h +++ b/arch/arc/include/asm/irqflags-arcv2.h | |||
@@ -18,6 +18,12 @@ | |||
18 | #define STATUS_AD_MASK (1<<STATUS_AD_BIT) | 18 | #define STATUS_AD_MASK (1<<STATUS_AD_BIT) |
19 | #define STATUS_IE_MASK (1<<STATUS_IE_BIT) | 19 | #define STATUS_IE_MASK (1<<STATUS_IE_BIT) |
20 | 20 | ||
21 | /* status32 Bits as encoded/expected by CLRI/SETI */ | ||
22 | #define CLRI_STATUS_IE_BIT 4 | ||
23 | |||
24 | #define CLRI_STATUS_E_MASK 0xF | ||
25 | #define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT) | ||
26 | |||
21 | #define AUX_USER_SP 0x00D | 27 | #define AUX_USER_SP 0x00D |
22 | #define AUX_IRQ_CTRL 0x00E | 28 | #define AUX_IRQ_CTRL 0x00E |
23 | #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ | 29 | #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */ |
@@ -100,6 +106,13 @@ static inline long arch_local_save_flags(void) | |||
100 | : | 106 | : |
101 | : "memory"); | 107 | : "memory"); |
102 | 108 | ||
109 | /* To be compatible with irq_save()/irq_restore() | ||
110 | * encode the irq bits as expected by CLRI/SETI | ||
111 | * (this was needed to make CONFIG_TRACE_IRQFLAGS work) | ||
112 | */ | ||
113 | temp = (1 << 5) | | ||
114 | ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) | | ||
115 | (temp & CLRI_STATUS_E_MASK); | ||
103 | return temp; | 116 | return temp; |
104 | } | 117 | } |
105 | 118 | ||
@@ -108,7 +121,7 @@ static inline long arch_local_save_flags(void) | |||
108 | */ | 121 | */ |
109 | static inline int arch_irqs_disabled_flags(unsigned long flags) | 122 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
110 | { | 123 | { |
111 | return !(flags & (STATUS_IE_MASK)); | 124 | return !(flags & CLRI_STATUS_IE_MASK); |
112 | } | 125 | } |
113 | 126 | ||
114 | static inline int arch_irqs_disabled(void) | 127 | static inline int arch_irqs_disabled(void) |
@@ -128,11 +141,32 @@ static inline void arc_softirq_clear(int irq) | |||
128 | 141 | ||
129 | #else | 142 | #else |
130 | 143 | ||
144 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
145 | |||
146 | .macro TRACE_ASM_IRQ_DISABLE | ||
147 | bl trace_hardirqs_off | ||
148 | .endm | ||
149 | |||
150 | .macro TRACE_ASM_IRQ_ENABLE | ||
151 | bl trace_hardirqs_on | ||
152 | .endm | ||
153 | |||
154 | #else | ||
155 | |||
156 | .macro TRACE_ASM_IRQ_DISABLE | ||
157 | .endm | ||
158 | |||
159 | .macro TRACE_ASM_IRQ_ENABLE | ||
160 | .endm | ||
161 | |||
162 | #endif | ||
131 | .macro IRQ_DISABLE scratch | 163 | .macro IRQ_DISABLE scratch |
132 | clri | 164 | clri |
165 | TRACE_ASM_IRQ_DISABLE | ||
133 | .endm | 166 | .endm |
134 | 167 | ||
135 | .macro IRQ_ENABLE scratch | 168 | .macro IRQ_ENABLE scratch |
169 | TRACE_ASM_IRQ_ENABLE | ||
136 | seti | 170 | seti |
137 | .endm | 171 | .endm |
138 | 172 | ||
diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h new file mode 100644 index 000000000000..8e97136413d9 --- /dev/null +++ b/arch/arc/include/asm/mmzone.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef _ASM_ARC_MMZONE_H | ||
10 | #define _ASM_ARC_MMZONE_H | ||
11 | |||
12 | #ifdef CONFIG_DISCONTIGMEM | ||
13 | |||
14 | extern struct pglist_data node_data[]; | ||
15 | #define NODE_DATA(nid) (&node_data[nid]) | ||
16 | |||
17 | static inline int pfn_to_nid(unsigned long pfn) | ||
18 | { | ||
19 | int is_end_low = 1; | ||
20 | |||
21 | if (IS_ENABLED(CONFIG_ARC_HAS_PAE40)) | ||
22 | is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL); | ||
23 | |||
24 | /* | ||
25 | * node 0: lowmem: 0x8000_0000 to 0xFFFF_FFFF | ||
26 | * node 1: HIGHMEM w/o PAE40: 0x0 to 0x7FFF_FFFF | ||
27 | * HIGHMEM with PAE40: 0x1_0000_0000 to ... | ||
28 | */ | ||
29 | if (pfn >= ARCH_PFN_OFFSET && is_end_low) | ||
30 | return 0; | ||
31 | |||
32 | return 1; | ||
33 | } | ||
34 | |||
35 | static inline int pfn_valid(unsigned long pfn) | ||
36 | { | ||
37 | int nid = pfn_to_nid(pfn); | ||
38 | |||
39 | return (pfn <= node_end_pfn(nid)); | ||
40 | } | ||
41 | #endif /* CONFIG_DISCONTIGMEM */ | ||
42 | |||
43 | #endif | ||
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index 36da89e2c853..0d53854884d0 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h | |||
@@ -72,11 +72,20 @@ typedef unsigned long pgprot_t; | |||
72 | 72 | ||
73 | typedef pte_t * pgtable_t; | 73 | typedef pte_t * pgtable_t; |
74 | 74 | ||
75 | /* | ||
76 | * Use virt_to_pfn with caution: | ||
77 | * If used in pte or paddr related macros, it could cause truncation | ||
78 | * in PAE40 builds | ||
79 | * As a rule of thumb, only use it in helpers starting with virt_ | ||
80 | * You have been warned ! | ||
81 | */ | ||
75 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) | 82 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) |
76 | 83 | ||
77 | #define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE) | 84 | #define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_LINK_BASE) |
78 | 85 | ||
86 | #ifdef CONFIG_FLATMEM | ||
79 | #define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) | 87 | #define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr) |
88 | #endif | ||
80 | 89 | ||
81 | /* | 90 | /* |
82 | * __pa, __va, virt_to_page (ALERT: deprecated, don't use them) | 91 | * __pa, __va, virt_to_page (ALERT: deprecated, don't use them) |
@@ -85,12 +94,10 @@ typedef pte_t * pgtable_t; | |||
85 | * virt here means link-address/program-address as embedded in object code. | 94 | * virt here means link-address/program-address as embedded in object code. |
86 | * And for ARC, link-addr = physical address | 95 | * And for ARC, link-addr = physical address |
87 | */ | 96 | */ |
88 | #define __pa(vaddr) ((unsigned long)vaddr) | 97 | #define __pa(vaddr) ((unsigned long)(vaddr)) |
89 | #define __va(paddr) ((void *)((unsigned long)(paddr))) | 98 | #define __va(paddr) ((void *)((unsigned long)(paddr))) |
90 | 99 | ||
91 | #define virt_to_page(kaddr) \ | 100 | #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) |
92 | (mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE)) | ||
93 | |||
94 | #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) | 101 | #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) |
95 | 102 | ||
96 | /* Default Permissions for stack/heaps pages (Non Executable) */ | 103 | /* Default Permissions for stack/heaps pages (Non Executable) */ |
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 7d6c93e63adf..10d4b8b8e545 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -278,14 +278,13 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) | |||
278 | #define pmd_present(x) (pmd_val(x)) | 278 | #define pmd_present(x) (pmd_val(x)) |
279 | #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) | 279 | #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) |
280 | 280 | ||
281 | #define pte_page(pte) \ | 281 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
282 | (mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE)) | ||
283 | |||
284 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) | 282 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) |
285 | #define pte_pfn(pte) virt_to_pfn(pte_val(pte)) | 283 | #define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) |
286 | #define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \ | 284 | |
287 | pgprot_val(prot))) | 285 | /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ |
288 | #define __pte_index(addr) (virt_to_pfn(addr) & (PTRS_PER_PTE - 1)) | 286 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
287 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
289 | 288 | ||
290 | /* | 289 | /* |
291 | * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system) | 290 | * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system) |
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index c1264607bbff..7a1c124ff021 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S | |||
@@ -69,8 +69,11 @@ ENTRY(handle_interrupt) | |||
69 | 69 | ||
70 | clri ; To make status32.IE agree with CPU internal state | 70 | clri ; To make status32.IE agree with CPU internal state |
71 | 71 | ||
72 | lr r0, [ICAUSE] | 72 | #ifdef CONFIG_TRACE_IRQFLAGS |
73 | TRACE_ASM_IRQ_DISABLE | ||
74 | #endif | ||
73 | 75 | ||
76 | lr r0, [ICAUSE] | ||
74 | mov blink, ret_from_exception | 77 | mov blink, ret_from_exception |
75 | 78 | ||
76 | b.d arch_do_IRQ | 79 | b.d arch_do_IRQ |
@@ -169,6 +172,11 @@ END(EV_TLBProtV) | |||
169 | 172 | ||
170 | .Lrestore_regs: | 173 | .Lrestore_regs: |
171 | 174 | ||
175 | # Interrpts are actually disabled from this point on, but will get | ||
176 | # reenabled after we return from interrupt/exception. | ||
177 | # But irq tracer needs to be told now... | ||
178 | TRACE_ASM_IRQ_ENABLE | ||
179 | |||
172 | ld r0, [sp, PT_status32] ; U/K mode at time of entry | 180 | ld r0, [sp, PT_status32] ; U/K mode at time of entry |
173 | lr r10, [AUX_IRQ_ACT] | 181 | lr r10, [AUX_IRQ_ACT] |
174 | 182 | ||
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S index 431433929189..0cb0abaa0479 100644 --- a/arch/arc/kernel/entry-compact.S +++ b/arch/arc/kernel/entry-compact.S | |||
@@ -341,6 +341,9 @@ END(call_do_page_fault) | |||
341 | 341 | ||
342 | .Lrestore_regs: | 342 | .Lrestore_regs: |
343 | 343 | ||
344 | # Interrpts are actually disabled from this point on, but will get | ||
345 | # reenabled after we return from interrupt/exception. | ||
346 | # But irq tracer needs to be told now... | ||
344 | TRACE_ASM_IRQ_ENABLE | 347 | TRACE_ASM_IRQ_ENABLE |
345 | 348 | ||
346 | lr r10, [status32] | 349 | lr r10, [status32] |
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 7d2c4fbf4f22..8be930394750 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #ifdef CONFIG_BLK_DEV_INITRD | 13 | #ifdef CONFIG_BLK_DEV_INITRD |
14 | #include <linux/initrd.h> | 14 | #include <linux/initrd.h> |
15 | #endif | 15 | #endif |
16 | #include <linux/of_fdt.h> | ||
16 | #include <linux/swap.h> | 17 | #include <linux/swap.h> |
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
18 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
@@ -29,11 +30,16 @@ static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE; | |||
29 | static unsigned long low_mem_sz; | 30 | static unsigned long low_mem_sz; |
30 | 31 | ||
31 | #ifdef CONFIG_HIGHMEM | 32 | #ifdef CONFIG_HIGHMEM |
32 | static unsigned long min_high_pfn; | 33 | static unsigned long min_high_pfn, max_high_pfn; |
33 | static u64 high_mem_start; | 34 | static u64 high_mem_start; |
34 | static u64 high_mem_sz; | 35 | static u64 high_mem_sz; |
35 | #endif | 36 | #endif |
36 | 37 | ||
38 | #ifdef CONFIG_DISCONTIGMEM | ||
39 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; | ||
40 | EXPORT_SYMBOL(node_data); | ||
41 | #endif | ||
42 | |||
37 | /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ | 43 | /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */ |
38 | static int __init setup_mem_sz(char *str) | 44 | static int __init setup_mem_sz(char *str) |
39 | { | 45 | { |
@@ -108,13 +114,11 @@ void __init setup_arch_memory(void) | |||
108 | /* Last usable page of low mem */ | 114 | /* Last usable page of low mem */ |
109 | max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz); | 115 | max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz); |
110 | 116 | ||
111 | #ifdef CONFIG_HIGHMEM | 117 | #ifdef CONFIG_FLATMEM |
112 | min_high_pfn = PFN_DOWN(high_mem_start); | 118 | /* pfn_valid() uses this */ |
113 | max_pfn = PFN_DOWN(high_mem_start + high_mem_sz); | 119 | max_mapnr = max_low_pfn - min_low_pfn; |
114 | #endif | 120 | #endif |
115 | 121 | ||
116 | max_mapnr = max_pfn - min_low_pfn; | ||
117 | |||
118 | /*------------- bootmem allocator setup -----------------------*/ | 122 | /*------------- bootmem allocator setup -----------------------*/ |
119 | 123 | ||
120 | /* | 124 | /* |
@@ -128,7 +132,7 @@ void __init setup_arch_memory(void) | |||
128 | * the crash | 132 | * the crash |
129 | */ | 133 | */ |
130 | 134 | ||
131 | memblock_add(low_mem_start, low_mem_sz); | 135 | memblock_add_node(low_mem_start, low_mem_sz, 0); |
132 | memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); | 136 | memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); |
133 | 137 | ||
134 | #ifdef CONFIG_BLK_DEV_INITRD | 138 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -136,6 +140,9 @@ void __init setup_arch_memory(void) | |||
136 | memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); | 140 | memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); |
137 | #endif | 141 | #endif |
138 | 142 | ||
143 | early_init_fdt_reserve_self(); | ||
144 | early_init_fdt_scan_reserved_mem(); | ||
145 | |||
139 | memblock_dump_all(); | 146 | memblock_dump_all(); |
140 | 147 | ||
141 | /*----------------- node/zones setup --------------------------*/ | 148 | /*----------------- node/zones setup --------------------------*/ |
@@ -145,13 +152,6 @@ void __init setup_arch_memory(void) | |||
145 | zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; | 152 | zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn; |
146 | zones_holes[ZONE_NORMAL] = 0; | 153 | zones_holes[ZONE_NORMAL] = 0; |
147 | 154 | ||
148 | #ifdef CONFIG_HIGHMEM | ||
149 | zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn; | ||
150 | |||
151 | /* This handles the peripheral address space hole */ | ||
152 | zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn; | ||
153 | #endif | ||
154 | |||
155 | /* | 155 | /* |
156 | * We can't use the helper free_area_init(zones[]) because it uses | 156 | * We can't use the helper free_area_init(zones[]) because it uses |
157 | * PAGE_OFFSET to compute the @min_low_pfn which would be wrong | 157 | * PAGE_OFFSET to compute the @min_low_pfn which would be wrong |
@@ -164,6 +164,34 @@ void __init setup_arch_memory(void) | |||
164 | zones_holes); /* holes */ | 164 | zones_holes); /* holes */ |
165 | 165 | ||
166 | #ifdef CONFIG_HIGHMEM | 166 | #ifdef CONFIG_HIGHMEM |
167 | /* | ||
168 | * Populate a new node with highmem | ||
169 | * | ||
170 | * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based) | ||
171 | * than addresses in normal ala low memory (0x8000_0000 based). | ||
172 | * Even with PAE, the huge peripheral space hole would waste a lot of | ||
173 | * mem with single mem_map[]. This warrants a mem_map per region design. | ||
174 | * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM. | ||
175 | * | ||
176 | * DISCONTIGMEM in turns requires multiple nodes. node 0 above is | ||
177 | * populated with normal memory zone while node 1 only has highmem | ||
178 | */ | ||
179 | node_set_online(1); | ||
180 | |||
181 | min_high_pfn = PFN_DOWN(high_mem_start); | ||
182 | max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz); | ||
183 | |||
184 | zones_size[ZONE_NORMAL] = 0; | ||
185 | zones_holes[ZONE_NORMAL] = 0; | ||
186 | |||
187 | zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn; | ||
188 | zones_holes[ZONE_HIGHMEM] = 0; | ||
189 | |||
190 | free_area_init_node(1, /* node-id */ | ||
191 | zones_size, /* num pages per zone */ | ||
192 | min_high_pfn, /* first pfn of node */ | ||
193 | zones_holes); /* holes */ | ||
194 | |||
167 | high_memory = (void *)(min_high_pfn << PAGE_SHIFT); | 195 | high_memory = (void *)(min_high_pfn << PAGE_SHIFT); |
168 | kmap_init(); | 196 | kmap_init(); |
169 | #endif | 197 | #endif |
@@ -181,7 +209,7 @@ void __init mem_init(void) | |||
181 | unsigned long tmp; | 209 | unsigned long tmp; |
182 | 210 | ||
183 | reset_all_zones_managed_pages(); | 211 | reset_all_zones_managed_pages(); |
184 | for (tmp = min_high_pfn; tmp < max_pfn; tmp++) | 212 | for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++) |
185 | free_highmem_page(pfn_to_page(tmp)); | 213 | free_highmem_page(pfn_to_page(tmp)); |
186 | #endif | 214 | #endif |
187 | 215 | ||
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 55ca9c7dcf6a..0467846b4cc3 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi | |||
@@ -860,7 +860,7 @@ | |||
860 | ti,no-idle-on-init; | 860 | ti,no-idle-on-init; |
861 | reg = <0x50000000 0x2000>; | 861 | reg = <0x50000000 0x2000>; |
862 | interrupts = <100>; | 862 | interrupts = <100>; |
863 | dmas = <&edma 52>; | 863 | dmas = <&edma 52 0>; |
864 | dma-names = "rxtx"; | 864 | dma-names = "rxtx"; |
865 | gpmc,num-cs = <7>; | 865 | gpmc,num-cs = <7>; |
866 | gpmc,num-waitpins = <2>; | 866 | gpmc,num-waitpins = <2>; |
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index 344b861a55a5..ba580a9da390 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi | |||
@@ -884,7 +884,7 @@ | |||
884 | gpmc: gpmc@50000000 { | 884 | gpmc: gpmc@50000000 { |
885 | compatible = "ti,am3352-gpmc"; | 885 | compatible = "ti,am3352-gpmc"; |
886 | ti,hwmods = "gpmc"; | 886 | ti,hwmods = "gpmc"; |
887 | dmas = <&edma 52>; | 887 | dmas = <&edma 52 0>; |
888 | dma-names = "rxtx"; | 888 | dma-names = "rxtx"; |
889 | clocks = <&l3s_gclk>; | 889 | clocks = <&l3s_gclk>; |
890 | clock-names = "fck"; | 890 | clock-names = "fck"; |
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 0a5fc5d02ce2..4168eb9dd369 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts | |||
@@ -99,13 +99,6 @@ | |||
99 | #cooling-cells = <2>; | 99 | #cooling-cells = <2>; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | extcon_usb1: extcon_usb1 { | ||
103 | compatible = "linux,extcon-usb-gpio"; | ||
104 | id-gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>; | ||
105 | pinctrl-names = "default"; | ||
106 | pinctrl-0 = <&extcon_usb1_pins>; | ||
107 | }; | ||
108 | |||
109 | hdmi0: connector { | 102 | hdmi0: connector { |
110 | compatible = "hdmi-connector"; | 103 | compatible = "hdmi-connector"; |
111 | label = "hdmi"; | 104 | label = "hdmi"; |
@@ -349,12 +342,6 @@ | |||
349 | >; | 342 | >; |
350 | }; | 343 | }; |
351 | 344 | ||
352 | extcon_usb1_pins: extcon_usb1_pins { | ||
353 | pinctrl-single,pins = < | ||
354 | DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_rtsn.gpio7_25 */ | ||
355 | >; | ||
356 | }; | ||
357 | |||
358 | tpd12s015_pins: pinmux_tpd12s015_pins { | 345 | tpd12s015_pins: pinmux_tpd12s015_pins { |
359 | pinctrl-single,pins = < | 346 | pinctrl-single,pins = < |
360 | DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */ | 347 | DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */ |
@@ -706,10 +693,6 @@ | |||
706 | pinctrl-0 = <&usb1_pins>; | 693 | pinctrl-0 = <&usb1_pins>; |
707 | }; | 694 | }; |
708 | 695 | ||
709 | &omap_dwc3_1 { | ||
710 | extcon = <&extcon_usb1>; | ||
711 | }; | ||
712 | |||
713 | &omap_dwc3_2 { | 696 | &omap_dwc3_2 { |
714 | extcon = <&extcon_usb2>; | 697 | extcon = <&extcon_usb2>; |
715 | }; | 698 | }; |
diff --git a/arch/arm/boot/dts/dm814x-clocks.dtsi b/arch/arm/boot/dts/dm814x-clocks.dtsi index e0ea6a93a22e..792a64ee0df7 100644 --- a/arch/arm/boot/dts/dm814x-clocks.dtsi +++ b/arch/arm/boot/dts/dm814x-clocks.dtsi | |||
@@ -4,6 +4,157 @@ | |||
4 | * published by the Free Software Foundation. | 4 | * published by the Free Software Foundation. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | &pllss { | ||
8 | /* | ||
9 | * See TRM "2.6.10 Connected outputso DPLLS" and | ||
10 | * "2.6.11 Connected Outputs of DPLLJ". Only clkout is | ||
11 | * connected except for hdmi and usb. | ||
12 | */ | ||
13 | adpll_mpu_ck: adpll@40 { | ||
14 | #clock-cells = <1>; | ||
15 | compatible = "ti,dm814-adpll-s-clock"; | ||
16 | reg = <0x40 0x40>; | ||
17 | clocks = <&devosc_ck &devosc_ck &devosc_ck>; | ||
18 | clock-names = "clkinp", "clkinpulow", "clkinphif"; | ||
19 | clock-output-names = "481c5040.adpll.dcoclkldo", | ||
20 | "481c5040.adpll.clkout", | ||
21 | "481c5040.adpll.clkoutx2", | ||
22 | "481c5040.adpll.clkouthif"; | ||
23 | }; | ||
24 | |||
25 | adpll_dsp_ck: adpll@80 { | ||
26 | #clock-cells = <1>; | ||
27 | compatible = "ti,dm814-adpll-lj-clock"; | ||
28 | reg = <0x80 0x30>; | ||
29 | clocks = <&devosc_ck &devosc_ck>; | ||
30 | clock-names = "clkinp", "clkinpulow"; | ||
31 | clock-output-names = "481c5080.adpll.dcoclkldo", | ||
32 | "481c5080.adpll.clkout", | ||
33 | "481c5080.adpll.clkoutldo"; | ||
34 | }; | ||
35 | |||
36 | adpll_sgx_ck: adpll@b0 { | ||
37 | #clock-cells = <1>; | ||
38 | compatible = "ti,dm814-adpll-lj-clock"; | ||
39 | reg = <0xb0 0x30>; | ||
40 | clocks = <&devosc_ck &devosc_ck>; | ||
41 | clock-names = "clkinp", "clkinpulow"; | ||
42 | clock-output-names = "481c50b0.adpll.dcoclkldo", | ||
43 | "481c50b0.adpll.clkout", | ||
44 | "481c50b0.adpll.clkoutldo"; | ||
45 | }; | ||
46 | |||
47 | adpll_hdvic_ck: adpll@e0 { | ||
48 | #clock-cells = <1>; | ||
49 | compatible = "ti,dm814-adpll-lj-clock"; | ||
50 | reg = <0xe0 0x30>; | ||
51 | clocks = <&devosc_ck &devosc_ck>; | ||
52 | clock-names = "clkinp", "clkinpulow"; | ||
53 | clock-output-names = "481c50e0.adpll.dcoclkldo", | ||
54 | "481c50e0.adpll.clkout", | ||
55 | "481c50e0.adpll.clkoutldo"; | ||
56 | }; | ||
57 | |||
58 | adpll_l3_ck: adpll@110 { | ||
59 | #clock-cells = <1>; | ||
60 | compatible = "ti,dm814-adpll-lj-clock"; | ||
61 | reg = <0x110 0x30>; | ||
62 | clocks = <&devosc_ck &devosc_ck>; | ||
63 | clock-names = "clkinp", "clkinpulow"; | ||
64 | clock-output-names = "481c5110.adpll.dcoclkldo", | ||
65 | "481c5110.adpll.clkout", | ||
66 | "481c5110.adpll.clkoutldo"; | ||
67 | }; | ||
68 | |||
69 | adpll_isp_ck: adpll@140 { | ||
70 | #clock-cells = <1>; | ||
71 | compatible = "ti,dm814-adpll-lj-clock"; | ||
72 | reg = <0x140 0x30>; | ||
73 | clocks = <&devosc_ck &devosc_ck>; | ||
74 | clock-names = "clkinp", "clkinpulow"; | ||
75 | clock-output-names = "481c5140.adpll.dcoclkldo", | ||
76 | "481c5140.adpll.clkout", | ||
77 | "481c5140.adpll.clkoutldo"; | ||
78 | }; | ||
79 | |||
80 | adpll_dss_ck: adpll@170 { | ||
81 | #clock-cells = <1>; | ||
82 | compatible = "ti,dm814-adpll-lj-clock"; | ||
83 | reg = <0x170 0x30>; | ||
84 | clocks = <&devosc_ck &devosc_ck>; | ||
85 | clock-names = "clkinp", "clkinpulow"; | ||
86 | clock-output-names = "481c5170.adpll.dcoclkldo", | ||
87 | "481c5170.adpll.clkout", | ||
88 | "481c5170.adpll.clkoutldo"; | ||
89 | }; | ||
90 | |||
91 | adpll_video0_ck: adpll@1a0 { | ||
92 | #clock-cells = <1>; | ||
93 | compatible = "ti,dm814-adpll-lj-clock"; | ||
94 | reg = <0x1a0 0x30>; | ||
95 | clocks = <&devosc_ck &devosc_ck>; | ||
96 | clock-names = "clkinp", "clkinpulow"; | ||
97 | clock-output-names = "481c51a0.adpll.dcoclkldo", | ||
98 | "481c51a0.adpll.clkout", | ||
99 | "481c51a0.adpll.clkoutldo"; | ||
100 | }; | ||
101 | |||
102 | adpll_video1_ck: adpll@1d0 { | ||
103 | #clock-cells = <1>; | ||
104 | compatible = "ti,dm814-adpll-lj-clock"; | ||
105 | reg = <0x1d0 0x30>; | ||
106 | clocks = <&devosc_ck &devosc_ck>; | ||
107 | clock-names = "clkinp", "clkinpulow"; | ||
108 | clock-output-names = "481c51d0.adpll.dcoclkldo", | ||
109 | "481c51d0.adpll.clkout", | ||
110 | "481c51d0.adpll.clkoutldo"; | ||
111 | }; | ||
112 | |||
113 | adpll_hdmi_ck: adpll@200 { | ||
114 | #clock-cells = <1>; | ||
115 | compatible = "ti,dm814-adpll-lj-clock"; | ||
116 | reg = <0x200 0x30>; | ||
117 | clocks = <&devosc_ck &devosc_ck>; | ||
118 | clock-names = "clkinp", "clkinpulow"; | ||
119 | clock-output-names = "481c5200.adpll.dcoclkldo", | ||
120 | "481c5200.adpll.clkout", | ||
121 | "481c5200.adpll.clkoutldo"; | ||
122 | }; | ||
123 | |||
124 | adpll_audio_ck: adpll@230 { | ||
125 | #clock-cells = <1>; | ||
126 | compatible = "ti,dm814-adpll-lj-clock"; | ||
127 | reg = <0x230 0x30>; | ||
128 | clocks = <&devosc_ck &devosc_ck>; | ||
129 | clock-names = "clkinp", "clkinpulow"; | ||
130 | clock-output-names = "481c5230.adpll.dcoclkldo", | ||
131 | "481c5230.adpll.clkout", | ||
132 | "481c5230.adpll.clkoutldo"; | ||
133 | }; | ||
134 | |||
135 | adpll_usb_ck: adpll@260 { | ||
136 | #clock-cells = <1>; | ||
137 | compatible = "ti,dm814-adpll-lj-clock"; | ||
138 | reg = <0x260 0x30>; | ||
139 | clocks = <&devosc_ck &devosc_ck>; | ||
140 | clock-names = "clkinp", "clkinpulow"; | ||
141 | clock-output-names = "481c5260.adpll.dcoclkldo", | ||
142 | "481c5260.adpll.clkout", | ||
143 | "481c5260.adpll.clkoutldo"; | ||
144 | }; | ||
145 | |||
146 | adpll_ddr_ck: adpll@290 { | ||
147 | #clock-cells = <1>; | ||
148 | compatible = "ti,dm814-adpll-lj-clock"; | ||
149 | reg = <0x290 0x30>; | ||
150 | clocks = <&devosc_ck &devosc_ck>; | ||
151 | clock-names = "clkinp", "clkinpulow"; | ||
152 | clock-output-names = "481c5290.adpll.dcoclkldo", | ||
153 | "481c5290.adpll.clkout", | ||
154 | "481c5290.adpll.clkoutldo"; | ||
155 | }; | ||
156 | }; | ||
157 | |||
7 | &pllss_clocks { | 158 | &pllss_clocks { |
8 | timer1_fck: timer1_fck { | 159 | timer1_fck: timer1_fck { |
9 | #clock-cells = <0>; | 160 | #clock-cells = <0>; |
@@ -23,6 +174,24 @@ | |||
23 | reg = <0x2e0>; | 174 | reg = <0x2e0>; |
24 | }; | 175 | }; |
25 | 176 | ||
177 | /* CPTS_RFT_CLK in RMII_REFCLK_SRC, usually sourced from auiod */ | ||
178 | cpsw_cpts_rft_clk: cpsw_cpts_rft_clk { | ||
179 | #clock-cells = <0>; | ||
180 | compatible = "ti,mux-clock"; | ||
181 | clocks = <&adpll_video0_ck 1 | ||
182 | &adpll_video1_ck 1 | ||
183 | &adpll_audio_ck 1>; | ||
184 | ti,bit-shift = <1>; | ||
185 | reg = <0x2e8>; | ||
186 | }; | ||
187 | |||
188 | /* REVISIT: Set up with a proper mux using RMII_REFCLK_SRC */ | ||
189 | cpsw_125mhz_gclk: cpsw_125mhz_gclk { | ||
190 | #clock-cells = <0>; | ||
191 | compatible = "fixed-clock"; | ||
192 | clock-frequency = <125000000>; | ||
193 | }; | ||
194 | |||
26 | sysclk18_ck: sysclk18_ck { | 195 | sysclk18_ck: sysclk18_ck { |
27 | #clock-cells = <0>; | 196 | #clock-cells = <0>; |
28 | compatible = "ti,mux-clock"; | 197 | compatible = "ti,mux-clock"; |
@@ -79,37 +248,6 @@ | |||
79 | compatible = "fixed-clock"; | 248 | compatible = "fixed-clock"; |
80 | clock-frequency = <1000000000>; | 249 | clock-frequency = <1000000000>; |
81 | }; | 250 | }; |
82 | |||
83 | sysclk4_ck: sysclk4_ck { | ||
84 | #clock-cells = <0>; | ||
85 | compatible = "fixed-clock"; | ||
86 | clock-frequency = <222000000>; | ||
87 | }; | ||
88 | |||
89 | sysclk6_ck: sysclk6_ck { | ||
90 | #clock-cells = <0>; | ||
91 | compatible = "fixed-clock"; | ||
92 | clock-frequency = <100000000>; | ||
93 | }; | ||
94 | |||
95 | sysclk10_ck: sysclk10_ck { | ||
96 | #clock-cells = <0>; | ||
97 | compatible = "fixed-clock"; | ||
98 | clock-frequency = <48000000>; | ||
99 | }; | ||
100 | |||
101 | cpsw_125mhz_gclk: cpsw_125mhz_gclk { | ||
102 | #clock-cells = <0>; | ||
103 | compatible = "fixed-clock"; | ||
104 | clock-frequency = <125000000>; | ||
105 | }; | ||
106 | |||
107 | cpsw_cpts_rft_clk: cpsw_cpts_rft_clk { | ||
108 | #clock-cells = <0>; | ||
109 | compatible = "fixed-clock"; | ||
110 | clock-frequency = <250000000>; | ||
111 | }; | ||
112 | |||
113 | }; | 251 | }; |
114 | 252 | ||
115 | &prcm_clocks { | 253 | &prcm_clocks { |
@@ -138,6 +276,49 @@ | |||
138 | clock-div = <78125>; | 276 | clock-div = <78125>; |
139 | }; | 277 | }; |
140 | 278 | ||
279 | /* L4_HS 220 MHz*/ | ||
280 | sysclk4_ck: sysclk4_ck { | ||
281 | #clock-cells = <0>; | ||
282 | compatible = "ti,fixed-factor-clock"; | ||
283 | clocks = <&adpll_l3_ck 1>; | ||
284 | ti,clock-mult = <1>; | ||
285 | ti,clock-div = <1>; | ||
286 | }; | ||
287 | |||
288 | /* L4_FWCFG */ | ||
289 | sysclk5_ck: sysclk5_ck { | ||
290 | #clock-cells = <0>; | ||
291 | compatible = "ti,fixed-factor-clock"; | ||
292 | clocks = <&adpll_l3_ck 1>; | ||
293 | ti,clock-mult = <1>; | ||
294 | ti,clock-div = <2>; | ||
295 | }; | ||
296 | |||
297 | /* L4_LS 110 MHz */ | ||
298 | sysclk6_ck: sysclk6_ck { | ||
299 | #clock-cells = <0>; | ||
300 | compatible = "ti,fixed-factor-clock"; | ||
301 | clocks = <&adpll_l3_ck 1>; | ||
302 | ti,clock-mult = <1>; | ||
303 | ti,clock-div = <2>; | ||
304 | }; | ||
305 | |||
306 | sysclk8_ck: sysclk8_ck { | ||
307 | #clock-cells = <0>; | ||
308 | compatible = "ti,fixed-factor-clock"; | ||
309 | clocks = <&adpll_usb_ck 1>; | ||
310 | ti,clock-mult = <1>; | ||
311 | ti,clock-div = <1>; | ||
312 | }; | ||
313 | |||
314 | sysclk10_ck: sysclk10_ck { | ||
315 | compatible = "ti,divider-clock"; | ||
316 | reg = <0x324>; | ||
317 | ti,max-div = <7>; | ||
318 | #clock-cells = <0>; | ||
319 | clocks = <&adpll_usb_ck 1>; | ||
320 | }; | ||
321 | |||
141 | aud_clkin0_ck: aud_clkin0_ck { | 322 | aud_clkin0_ck: aud_clkin0_ck { |
142 | #clock-cells = <0>; | 323 | #clock-cells = <0>; |
143 | compatible = "fixed-clock"; | 324 | compatible = "fixed-clock"; |
diff --git a/arch/arm/boot/dts/dra62x-clocks.dtsi b/arch/arm/boot/dts/dra62x-clocks.dtsi index 6f98dc8df9dd..0e49741747ef 100644 --- a/arch/arm/boot/dts/dra62x-clocks.dtsi +++ b/arch/arm/boot/dts/dra62x-clocks.dtsi | |||
@@ -6,6 +6,32 @@ | |||
6 | 6 | ||
7 | #include "dm814x-clocks.dtsi" | 7 | #include "dm814x-clocks.dtsi" |
8 | 8 | ||
9 | /* Compared to dm814x, dra62x does not have hdic, l3 or dss PLLs */ | ||
10 | &adpll_hdvic_ck { | ||
11 | status = "disabled"; | ||
12 | }; | ||
13 | |||
14 | &adpll_l3_ck { | ||
15 | status = "disabled"; | ||
16 | }; | ||
17 | |||
18 | &adpll_dss_ck { | ||
19 | status = "disabled"; | ||
20 | }; | ||
21 | |||
22 | /* Compared to dm814x, dra62x has interconnect clocks on isp PLL */ | ||
23 | &sysclk4_ck { | ||
24 | clocks = <&adpll_isp_ck 1>; | ||
25 | }; | ||
26 | |||
27 | &sysclk5_ck { | ||
28 | clocks = <&adpll_isp_ck 1>; | ||
29 | }; | ||
30 | |||
31 | &sysclk6_ck { | ||
32 | clocks = <&adpll_isp_ck 1>; | ||
33 | }; | ||
34 | |||
9 | /* | 35 | /* |
10 | * Compared to dm814x, dra62x has different shifts and more mux options. | 36 | * Compared to dm814x, dra62x has different shifts and more mux options. |
11 | * Please add the extra options for ysclk_14 and 16 if really needed. | 37 | * Please add the extra options for ysclk_14 and 16 if really needed. |
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index d0bae06b7eb7..ef2164a99d0f 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi | |||
@@ -98,12 +98,20 @@ | |||
98 | clock-frequency = <32768>; | 98 | clock-frequency = <32768>; |
99 | }; | 99 | }; |
100 | 100 | ||
101 | sys_32k_ck: sys_32k_ck { | 101 | sys_clk32_crystal_ck: sys_clk32_crystal_ck { |
102 | #clock-cells = <0>; | 102 | #clock-cells = <0>; |
103 | compatible = "fixed-clock"; | 103 | compatible = "fixed-clock"; |
104 | clock-frequency = <32768>; | 104 | clock-frequency = <32768>; |
105 | }; | 105 | }; |
106 | 106 | ||
107 | sys_clk32_pseudo_ck: sys_clk32_pseudo_ck { | ||
108 | #clock-cells = <0>; | ||
109 | compatible = "fixed-factor-clock"; | ||
110 | clocks = <&sys_clkin1>; | ||
111 | clock-mult = <1>; | ||
112 | clock-div = <610>; | ||
113 | }; | ||
114 | |||
107 | virt_12000000_ck: virt_12000000_ck { | 115 | virt_12000000_ck: virt_12000000_ck { |
108 | #clock-cells = <0>; | 116 | #clock-cells = <0>; |
109 | compatible = "fixed-clock"; | 117 | compatible = "fixed-clock"; |
@@ -2170,4 +2178,12 @@ | |||
2170 | ti,bit-shift = <22>; | 2178 | ti,bit-shift = <22>; |
2171 | reg = <0x0558>; | 2179 | reg = <0x0558>; |
2172 | }; | 2180 | }; |
2181 | |||
2182 | sys_32k_ck: sys_32k_ck { | ||
2183 | #clock-cells = <0>; | ||
2184 | compatible = "ti,mux-clock"; | ||
2185 | clocks = <&sys_clk32_crystal_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>; | ||
2186 | ti,bit-shift = <8>; | ||
2187 | reg = <0x6c4>; | ||
2188 | }; | ||
2173 | }; | 2189 | }; |
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index b3c26a96a726..d9e2d9c6e999 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
@@ -329,6 +329,7 @@ | |||
329 | regulator-name = "V28"; | 329 | regulator-name = "V28"; |
330 | regulator-min-microvolt = <2800000>; | 330 | regulator-min-microvolt = <2800000>; |
331 | regulator-max-microvolt = <2800000>; | 331 | regulator-max-microvolt = <2800000>; |
332 | regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ | ||
332 | regulator-always-on; /* due to battery cover sensor */ | 333 | regulator-always-on; /* due to battery cover sensor */ |
333 | }; | 334 | }; |
334 | 335 | ||
@@ -336,30 +337,35 @@ | |||
336 | regulator-name = "VCSI"; | 337 | regulator-name = "VCSI"; |
337 | regulator-min-microvolt = <1800000>; | 338 | regulator-min-microvolt = <1800000>; |
338 | regulator-max-microvolt = <1800000>; | 339 | regulator-max-microvolt = <1800000>; |
340 | regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ | ||
339 | }; | 341 | }; |
340 | 342 | ||
341 | &vaux3 { | 343 | &vaux3 { |
342 | regulator-name = "VMMC2_30"; | 344 | regulator-name = "VMMC2_30"; |
343 | regulator-min-microvolt = <2800000>; | 345 | regulator-min-microvolt = <2800000>; |
344 | regulator-max-microvolt = <3000000>; | 346 | regulator-max-microvolt = <3000000>; |
347 | regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ | ||
345 | }; | 348 | }; |
346 | 349 | ||
347 | &vaux4 { | 350 | &vaux4 { |
348 | regulator-name = "VCAM_ANA_28"; | 351 | regulator-name = "VCAM_ANA_28"; |
349 | regulator-min-microvolt = <2800000>; | 352 | regulator-min-microvolt = <2800000>; |
350 | regulator-max-microvolt = <2800000>; | 353 | regulator-max-microvolt = <2800000>; |
354 | regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ | ||
351 | }; | 355 | }; |
352 | 356 | ||
353 | &vmmc1 { | 357 | &vmmc1 { |
354 | regulator-name = "VMMC1"; | 358 | regulator-name = "VMMC1"; |
355 | regulator-min-microvolt = <1850000>; | 359 | regulator-min-microvolt = <1850000>; |
356 | regulator-max-microvolt = <3150000>; | 360 | regulator-max-microvolt = <3150000>; |
361 | regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ | ||
357 | }; | 362 | }; |
358 | 363 | ||
359 | &vmmc2 { | 364 | &vmmc2 { |
360 | regulator-name = "V28_A"; | 365 | regulator-name = "V28_A"; |
361 | regulator-min-microvolt = <2800000>; | 366 | regulator-min-microvolt = <2800000>; |
362 | regulator-max-microvolt = <3000000>; | 367 | regulator-max-microvolt = <3000000>; |
368 | regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ | ||
363 | regulator-always-on; /* due VIO leak to AIC34 VDDs */ | 369 | regulator-always-on; /* due VIO leak to AIC34 VDDs */ |
364 | }; | 370 | }; |
365 | 371 | ||
@@ -367,6 +373,7 @@ | |||
367 | regulator-name = "VPLL"; | 373 | regulator-name = "VPLL"; |
368 | regulator-min-microvolt = <1800000>; | 374 | regulator-min-microvolt = <1800000>; |
369 | regulator-max-microvolt = <1800000>; | 375 | regulator-max-microvolt = <1800000>; |
376 | regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ | ||
370 | regulator-always-on; | 377 | regulator-always-on; |
371 | }; | 378 | }; |
372 | 379 | ||
@@ -374,6 +381,7 @@ | |||
374 | regulator-name = "VSDI_CSI"; | 381 | regulator-name = "VSDI_CSI"; |
375 | regulator-min-microvolt = <1800000>; | 382 | regulator-min-microvolt = <1800000>; |
376 | regulator-max-microvolt = <1800000>; | 383 | regulator-max-microvolt = <1800000>; |
384 | regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ | ||
377 | regulator-always-on; | 385 | regulator-always-on; |
378 | }; | 386 | }; |
379 | 387 | ||
@@ -381,6 +389,7 @@ | |||
381 | regulator-name = "VMMC2_IO_18"; | 389 | regulator-name = "VMMC2_IO_18"; |
382 | regulator-min-microvolt = <1800000>; | 390 | regulator-min-microvolt = <1800000>; |
383 | regulator-max-microvolt = <1800000>; | 391 | regulator-max-microvolt = <1800000>; |
392 | regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */ | ||
384 | }; | 393 | }; |
385 | 394 | ||
386 | &vio { | 395 | &vio { |
diff --git a/arch/arm/boot/dts/omap34xx.dtsi b/arch/arm/boot/dts/omap34xx.dtsi index 387dc31822fe..96f8ce7bd2af 100644 --- a/arch/arm/boot/dts/omap34xx.dtsi +++ b/arch/arm/boot/dts/omap34xx.dtsi | |||
@@ -46,7 +46,7 @@ | |||
46 | 0x480bd800 0x017c>; | 46 | 0x480bd800 0x017c>; |
47 | interrupts = <24>; | 47 | interrupts = <24>; |
48 | iommus = <&mmu_isp>; | 48 | iommus = <&mmu_isp>; |
49 | syscon = <&scm_conf 0xdc>; | 49 | syscon = <&scm_conf 0x6c>; |
50 | ti,phy-type = <OMAP3ISP_PHY_TYPE_COMPLEX_IO>; | 50 | ti,phy-type = <OMAP3ISP_PHY_TYPE_COMPLEX_IO>; |
51 | #clock-cells = <1>; | 51 | #clock-cells = <1>; |
52 | ports { | 52 | ports { |
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index 902657d6713b..914bf4c47404 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi | |||
@@ -472,7 +472,7 @@ | |||
472 | ldo1_reg: ldo1 { | 472 | ldo1_reg: ldo1 { |
473 | /* VDDAPHY_CAM: vdda_csiport */ | 473 | /* VDDAPHY_CAM: vdda_csiport */ |
474 | regulator-name = "ldo1"; | 474 | regulator-name = "ldo1"; |
475 | regulator-min-microvolt = <1500000>; | 475 | regulator-min-microvolt = <1800000>; |
476 | regulator-max-microvolt = <1800000>; | 476 | regulator-max-microvolt = <1800000>; |
477 | }; | 477 | }; |
478 | 478 | ||
@@ -498,7 +498,7 @@ | |||
498 | ldo4_reg: ldo4 { | 498 | ldo4_reg: ldo4 { |
499 | /* VDDAPHY_DISP: vdda_dsiport/hdmi */ | 499 | /* VDDAPHY_DISP: vdda_dsiport/hdmi */ |
500 | regulator-name = "ldo4"; | 500 | regulator-name = "ldo4"; |
501 | regulator-min-microvolt = <1500000>; | 501 | regulator-min-microvolt = <1800000>; |
502 | regulator-max-microvolt = <1800000>; | 502 | regulator-max-microvolt = <1800000>; |
503 | }; | 503 | }; |
504 | 504 | ||
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index ecc591dc0778..4d87d9c6c86d 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts | |||
@@ -513,7 +513,7 @@ | |||
513 | ldo1_reg: ldo1 { | 513 | ldo1_reg: ldo1 { |
514 | /* VDDAPHY_CAM: vdda_csiport */ | 514 | /* VDDAPHY_CAM: vdda_csiport */ |
515 | regulator-name = "ldo1"; | 515 | regulator-name = "ldo1"; |
516 | regulator-min-microvolt = <1500000>; | 516 | regulator-min-microvolt = <1800000>; |
517 | regulator-max-microvolt = <1800000>; | 517 | regulator-max-microvolt = <1800000>; |
518 | }; | 518 | }; |
519 | 519 | ||
@@ -537,7 +537,7 @@ | |||
537 | ldo4_reg: ldo4 { | 537 | ldo4_reg: ldo4 { |
538 | /* VDDAPHY_DISP: vdda_dsiport/hdmi */ | 538 | /* VDDAPHY_DISP: vdda_dsiport/hdmi */ |
539 | regulator-name = "ldo4"; | 539 | regulator-name = "ldo4"; |
540 | regulator-min-microvolt = <1500000>; | 540 | regulator-min-microvolt = <1800000>; |
541 | regulator-max-microvolt = <1800000>; | 541 | regulator-max-microvolt = <1800000>; |
542 | }; | 542 | }; |
543 | 543 | ||
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 38805ebbe2ba..120b6b80cd39 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi | |||
@@ -269,7 +269,7 @@ | |||
269 | omap5_pmx_wkup: pinmux@c840 { | 269 | omap5_pmx_wkup: pinmux@c840 { |
270 | compatible = "ti,omap5-padconf", | 270 | compatible = "ti,omap5-padconf", |
271 | "pinctrl-single"; | 271 | "pinctrl-single"; |
272 | reg = <0xc840 0x0038>; | 272 | reg = <0xc840 0x003c>; |
273 | #address-cells = <1>; | 273 | #address-cells = <1>; |
274 | #size-cells = <0>; | 274 | #size-cells = <0>; |
275 | #interrupt-cells = <1>; | 275 | #interrupt-cells = <1>; |
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi index ef5330578431..8193139d0d87 100644 --- a/arch/arm/boot/dts/qcom-msm8974.dtsi +++ b/arch/arm/boot/dts/qcom-msm8974.dtsi | |||
@@ -1,6 +1,6 @@ | |||
1 | /dts-v1/; | 1 | /dts-v1/; |
2 | 2 | ||
3 | #include <dt-bindings/interrupt-controller/arm-gic.h> | 3 | #include <dt-bindings/interrupt-controller/irq.h> |
4 | #include <dt-bindings/clock/qcom,gcc-msm8974.h> | 4 | #include <dt-bindings/clock/qcom,gcc-msm8974.h> |
5 | #include "skeleton.dtsi" | 5 | #include "skeleton.dtsi" |
6 | 6 | ||
@@ -460,8 +460,6 @@ | |||
460 | clock-names = "core", "iface"; | 460 | clock-names = "core", "iface"; |
461 | #address-cells = <1>; | 461 | #address-cells = <1>; |
462 | #size-cells = <0>; | 462 | #size-cells = <0>; |
463 | dmas = <&blsp2_dma 20>, <&blsp2_dma 21>; | ||
464 | dma-names = "tx", "rx"; | ||
465 | }; | 463 | }; |
466 | 464 | ||
467 | spmi_bus: spmi@fc4cf000 { | 465 | spmi_bus: spmi@fc4cf000 { |
@@ -479,16 +477,6 @@ | |||
479 | interrupt-controller; | 477 | interrupt-controller; |
480 | #interrupt-cells = <4>; | 478 | #interrupt-cells = <4>; |
481 | }; | 479 | }; |
482 | |||
483 | blsp2_dma: dma-controller@f9944000 { | ||
484 | compatible = "qcom,bam-v1.4.0"; | ||
485 | reg = <0xf9944000 0x19000>; | ||
486 | interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>; | ||
487 | clocks = <&gcc GCC_BLSP2_AHB_CLK>; | ||
488 | clock-names = "bam_clk"; | ||
489 | #dma-cells = <1>; | ||
490 | qcom,ee = <0>; | ||
491 | }; | ||
492 | }; | 480 | }; |
493 | 481 | ||
494 | smd { | 482 | smd { |
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index 0ad71b81d3a2..cc6e28f81fe4 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts | |||
@@ -661,6 +661,7 @@ | |||
661 | }; | 661 | }; |
662 | 662 | ||
663 | &pcie_bus_clk { | 663 | &pcie_bus_clk { |
664 | clock-frequency = <100000000>; | ||
664 | status = "okay"; | 665 | status = "okay"; |
665 | }; | 666 | }; |
666 | 667 | ||
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts index 6c08314427d6..a9285d9a57cd 100644 --- a/arch/arm/boot/dts/r8a7791-porter.dts +++ b/arch/arm/boot/dts/r8a7791-porter.dts | |||
@@ -143,19 +143,11 @@ | |||
143 | }; | 143 | }; |
144 | 144 | ||
145 | &pfc { | 145 | &pfc { |
146 | pinctrl-0 = <&scif_clk_pins>; | ||
147 | pinctrl-names = "default"; | ||
148 | |||
149 | scif0_pins: serial0 { | 146 | scif0_pins: serial0 { |
150 | renesas,groups = "scif0_data_d"; | 147 | renesas,groups = "scif0_data_d"; |
151 | renesas,function = "scif0"; | 148 | renesas,function = "scif0"; |
152 | }; | 149 | }; |
153 | 150 | ||
154 | scif_clk_pins: scif_clk { | ||
155 | renesas,groups = "scif_clk"; | ||
156 | renesas,function = "scif_clk"; | ||
157 | }; | ||
158 | |||
159 | ether_pins: ether { | 151 | ether_pins: ether { |
160 | renesas,groups = "eth_link", "eth_mdio", "eth_rmii"; | 152 | renesas,groups = "eth_link", "eth_mdio", "eth_rmii"; |
161 | renesas,function = "eth"; | 153 | renesas,function = "eth"; |
@@ -229,11 +221,6 @@ | |||
229 | status = "okay"; | 221 | status = "okay"; |
230 | }; | 222 | }; |
231 | 223 | ||
232 | &scif_clk { | ||
233 | clock-frequency = <14745600>; | ||
234 | status = "okay"; | ||
235 | }; | ||
236 | |||
237 | ðer { | 224 | ðer { |
238 | pinctrl-0 = <ðer_pins &phy1_pins>; | 225 | pinctrl-0 = <ðer_pins &phy1_pins>; |
239 | pinctrl-names = "default"; | 226 | pinctrl-names = "default"; |
@@ -414,6 +401,7 @@ | |||
414 | }; | 401 | }; |
415 | 402 | ||
416 | &pcie_bus_clk { | 403 | &pcie_bus_clk { |
404 | clock-frequency = <100000000>; | ||
417 | status = "okay"; | 405 | status = "okay"; |
418 | }; | 406 | }; |
419 | 407 | ||
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 6439f0569fe2..1cd1b6a3a72a 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi | |||
@@ -1083,9 +1083,8 @@ | |||
1083 | pcie_bus_clk: pcie_bus_clk { | 1083 | pcie_bus_clk: pcie_bus_clk { |
1084 | compatible = "fixed-clock"; | 1084 | compatible = "fixed-clock"; |
1085 | #clock-cells = <0>; | 1085 | #clock-cells = <0>; |
1086 | clock-frequency = <100000000>; | 1086 | clock-frequency = <0>; |
1087 | clock-output-names = "pcie_bus"; | 1087 | clock-output-names = "pcie_bus"; |
1088 | status = "disabled"; | ||
1089 | }; | 1088 | }; |
1090 | 1089 | ||
1091 | /* External SCIF clock */ | 1090 | /* External SCIF clock */ |
@@ -1094,7 +1093,6 @@ | |||
1094 | #clock-cells = <0>; | 1093 | #clock-cells = <0>; |
1095 | /* This value must be overridden by the board. */ | 1094 | /* This value must be overridden by the board. */ |
1096 | clock-frequency = <0>; | 1095 | clock-frequency = <0>; |
1097 | status = "disabled"; | ||
1098 | }; | 1096 | }; |
1099 | 1097 | ||
1100 | /* External USB clock - can be overridden by the board */ | 1098 | /* External USB clock - can be overridden by the board */ |
@@ -1112,7 +1110,6 @@ | |||
1112 | /* This value must be overridden by the board. */ | 1110 | /* This value must be overridden by the board. */ |
1113 | clock-frequency = <0>; | 1111 | clock-frequency = <0>; |
1114 | clock-output-names = "can_clk"; | 1112 | clock-output-names = "can_clk"; |
1115 | status = "disabled"; | ||
1116 | }; | 1113 | }; |
1117 | 1114 | ||
1118 | /* Special CPG clocks */ | 1115 | /* Special CPG clocks */ |
diff --git a/arch/arm/boot/dts/sun8i-q8-common.dtsi b/arch/arm/boot/dts/sun8i-q8-common.dtsi index 9d2b7e2f5975..346a49d805a7 100644 --- a/arch/arm/boot/dts/sun8i-q8-common.dtsi +++ b/arch/arm/boot/dts/sun8i-q8-common.dtsi | |||
@@ -125,8 +125,6 @@ | |||
125 | }; | 125 | }; |
126 | 126 | ||
127 | ®_dc1sw { | 127 | ®_dc1sw { |
128 | regulator-min-microvolt = <3000000>; | ||
129 | regulator-max-microvolt = <3000000>; | ||
130 | regulator-name = "vcc-lcd"; | 128 | regulator-name = "vcc-lcd"; |
131 | }; | 129 | }; |
132 | 130 | ||
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 58dbd5c439df..d6d4191e68f2 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -1004,7 +1004,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap) | |||
1004 | kvm_pfn_t pfn = *pfnp; | 1004 | kvm_pfn_t pfn = *pfnp; |
1005 | gfn_t gfn = *ipap >> PAGE_SHIFT; | 1005 | gfn_t gfn = *ipap >> PAGE_SHIFT; |
1006 | 1006 | ||
1007 | if (PageTransCompound(pfn_to_page(pfn))) { | 1007 | if (PageTransCompoundMap(pfn_to_page(pfn))) { |
1008 | unsigned long mask; | 1008 | unsigned long mask; |
1009 | /* | 1009 | /* |
1010 | * The address we faulted on is backed by a transparent huge | 1010 | * The address we faulted on is backed by a transparent huge |
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index d97c588550ad..bc4e63fa9808 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c | |||
@@ -121,6 +121,11 @@ static void read_factory_config(struct nvmem_device *nvmem, void *context) | |||
121 | const char *partnum = NULL; | 121 | const char *partnum = NULL; |
122 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 122 | struct davinci_soc_info *soc_info = &davinci_soc_info; |
123 | 123 | ||
124 | if (!IS_BUILTIN(CONFIG_NVMEM)) { | ||
125 | pr_warn("Factory Config not available without CONFIG_NVMEM\n"); | ||
126 | goto bad_config; | ||
127 | } | ||
128 | |||
124 | ret = nvmem_device_read(nvmem, 0, sizeof(factory_config), | 129 | ret = nvmem_device_read(nvmem, 0, sizeof(factory_config), |
125 | &factory_config); | 130 | &factory_config); |
126 | if (ret != sizeof(struct factory_config)) { | 131 | if (ret != sizeof(struct factory_config)) { |
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c index f55ef2ef2f92..742133b7266a 100644 --- a/arch/arm/mach-davinci/common.c +++ b/arch/arm/mach-davinci/common.c | |||
@@ -33,6 +33,11 @@ void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context) | |||
33 | char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; | 33 | char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; |
34 | off_t offset = (off_t)context; | 34 | off_t offset = (off_t)context; |
35 | 35 | ||
36 | if (!IS_BUILTIN(CONFIG_NVMEM)) { | ||
37 | pr_warn("Cannot read MAC addr from EEPROM without CONFIG_NVMEM\n"); | ||
38 | return; | ||
39 | } | ||
40 | |||
36 | /* Read MAC addr from EEPROM */ | 41 | /* Read MAC addr from EEPROM */ |
37 | if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) | 42 | if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) |
38 | pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); | 43 | pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); |
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 7c21760f590f..875a2bab64f6 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c | |||
@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) | |||
92 | if (IS_ERR(pd->clk[i])) | 92 | if (IS_ERR(pd->clk[i])) |
93 | break; | 93 | break; |
94 | 94 | ||
95 | if (IS_ERR(pd->clk[i])) | 95 | if (IS_ERR(pd->pclk[i])) |
96 | continue; /* Skip on first power up */ | 96 | continue; /* Skip on first power up */ |
97 | if (clk_set_parent(pd->clk[i], pd->pclk[i])) | 97 | if (clk_set_parent(pd->clk[i], pd->pclk[i])) |
98 | pr_err("%s: error setting parent to clock%d\n", | 98 | pr_err("%s: error setting parent to clock%d\n", |
diff --git a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c index a5edd7d60266..3d039ef021e0 100644 --- a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c +++ b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c | |||
@@ -71,6 +71,7 @@ struct platform_device *__init imx_add_sdhci_esdhc_imx( | |||
71 | if (!pdata) | 71 | if (!pdata) |
72 | pdata = &default_esdhc_pdata; | 72 | pdata = &default_esdhc_pdata; |
73 | 73 | ||
74 | return imx_add_platform_device(data->devid, data->id, res, | 74 | return imx_add_platform_device_dmamask(data->devid, data->id, res, |
75 | ARRAY_SIZE(res), pdata, sizeof(*pdata)); | 75 | ARRAY_SIZE(res), pdata, sizeof(*pdata), |
76 | DMA_BIT_MASK(32)); | ||
76 | } | 77 | } |
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c index 7581e036bda6..ef9ed36e8a61 100644 --- a/arch/arm/mach-omap2/clockdomains7xx_data.c +++ b/arch/arm/mach-omap2/clockdomains7xx_data.c | |||
@@ -461,7 +461,7 @@ static struct clockdomain ipu_7xx_clkdm = { | |||
461 | .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST, | 461 | .cm_inst = DRA7XX_CM_CORE_AON_IPU_INST, |
462 | .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS, | 462 | .clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS, |
463 | .dep_bit = DRA7XX_IPU_STATDEP_SHIFT, | 463 | .dep_bit = DRA7XX_IPU_STATDEP_SHIFT, |
464 | .flags = CLKDM_CAN_HWSUP_SWSUP, | 464 | .flags = CLKDM_CAN_SWSUP, |
465 | }; | 465 | }; |
466 | 466 | ||
467 | static struct clockdomain mpu1_7xx_clkdm = { | 467 | static struct clockdomain mpu1_7xx_clkdm = { |
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 9821be6dfd5e..49de4dd227be 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c | |||
@@ -737,7 +737,8 @@ void __init omap5_init_late(void) | |||
737 | #ifdef CONFIG_SOC_DRA7XX | 737 | #ifdef CONFIG_SOC_DRA7XX |
738 | void __init dra7xx_init_early(void) | 738 | void __init dra7xx_init_early(void) |
739 | { | 739 | { |
740 | omap2_set_globals_tap(-1, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE)); | 740 | omap2_set_globals_tap(DRA7XX_CLASS, |
741 | OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE)); | ||
741 | omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE)); | 742 | omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE)); |
742 | omap2_control_base_init(); | 743 | omap2_control_base_init(); |
743 | omap4_pm_init_early(); | 744 | omap4_pm_init_early(); |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index f397bd6bd6e3..2c04f2741476 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |||
@@ -274,6 +274,10 @@ static inline void omap5_irq_save_context(void) | |||
274 | */ | 274 | */ |
275 | static void irq_save_context(void) | 275 | static void irq_save_context(void) |
276 | { | 276 | { |
277 | /* DRA7 has no SAR to save */ | ||
278 | if (soc_is_dra7xx()) | ||
279 | return; | ||
280 | |||
277 | if (!sar_base) | 281 | if (!sar_base) |
278 | sar_base = omap4_get_sar_ram_base(); | 282 | sar_base = omap4_get_sar_ram_base(); |
279 | 283 | ||
@@ -290,6 +294,9 @@ static void irq_sar_clear(void) | |||
290 | { | 294 | { |
291 | u32 val; | 295 | u32 val; |
292 | u32 offset = SAR_BACKUP_STATUS_OFFSET; | 296 | u32 offset = SAR_BACKUP_STATUS_OFFSET; |
297 | /* DRA7 has no SAR to save */ | ||
298 | if (soc_is_dra7xx()) | ||
299 | return; | ||
293 | 300 | ||
294 | if (soc_is_omap54xx()) | 301 | if (soc_is_omap54xx()) |
295 | offset = OMAP5_SAR_BACKUP_STATUS_OFFSET; | 302 | offset = OMAP5_SAR_BACKUP_STATUS_OFFSET; |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 2dbd3785ee6f..d44e0e2f1106 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
@@ -198,7 +198,6 @@ void omap_sram_idle(void) | |||
198 | int per_next_state = PWRDM_POWER_ON; | 198 | int per_next_state = PWRDM_POWER_ON; |
199 | int core_next_state = PWRDM_POWER_ON; | 199 | int core_next_state = PWRDM_POWER_ON; |
200 | int per_going_off; | 200 | int per_going_off; |
201 | int core_prev_state; | ||
202 | u32 sdrc_pwr = 0; | 201 | u32 sdrc_pwr = 0; |
203 | 202 | ||
204 | mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); | 203 | mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm); |
@@ -278,16 +277,20 @@ void omap_sram_idle(void) | |||
278 | sdrc_write_reg(sdrc_pwr, SDRC_POWER); | 277 | sdrc_write_reg(sdrc_pwr, SDRC_POWER); |
279 | 278 | ||
280 | /* CORE */ | 279 | /* CORE */ |
281 | if (core_next_state < PWRDM_POWER_ON) { | 280 | if (core_next_state < PWRDM_POWER_ON && |
282 | core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); | 281 | pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) { |
283 | if (core_prev_state == PWRDM_POWER_OFF) { | 282 | omap3_core_restore_context(); |
284 | omap3_core_restore_context(); | 283 | omap3_cm_restore_context(); |
285 | omap3_cm_restore_context(); | 284 | omap3_sram_restore_context(); |
286 | omap3_sram_restore_context(); | 285 | omap2_sms_restore_context(); |
287 | omap2_sms_restore_context(); | 286 | } else { |
288 | } | 287 | /* |
288 | * In off-mode resume path above, omap3_core_restore_context | ||
289 | * also handles the INTC autoidle restore done here so limit | ||
290 | * this to non-off mode resume paths so we don't do it twice. | ||
291 | */ | ||
292 | omap3_intc_resume_idle(); | ||
289 | } | 293 | } |
290 | omap3_intc_resume_idle(); | ||
291 | 294 | ||
292 | pwrdm_post_transition(NULL); | 295 | pwrdm_post_transition(NULL); |
293 | 296 | ||
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c index ad008e4b0c49..67d79f9c6bad 100644 --- a/arch/arm/mach-shmobile/timer.c +++ b/arch/arm/mach-shmobile/timer.c | |||
@@ -40,8 +40,7 @@ static void __init shmobile_setup_delay_hz(unsigned int max_cpu_core_hz, | |||
40 | void __init shmobile_init_delay(void) | 40 | void __init shmobile_init_delay(void) |
41 | { | 41 | { |
42 | struct device_node *np, *cpus; | 42 | struct device_node *np, *cpus; |
43 | bool is_a7_a8_a9 = false; | 43 | unsigned int div = 0; |
44 | bool is_a15 = false; | ||
45 | bool has_arch_timer = false; | 44 | bool has_arch_timer = false; |
46 | u32 max_freq = 0; | 45 | u32 max_freq = 0; |
47 | 46 | ||
@@ -55,27 +54,22 @@ void __init shmobile_init_delay(void) | |||
55 | if (!of_property_read_u32(np, "clock-frequency", &freq)) | 54 | if (!of_property_read_u32(np, "clock-frequency", &freq)) |
56 | max_freq = max(max_freq, freq); | 55 | max_freq = max(max_freq, freq); |
57 | 56 | ||
58 | if (of_device_is_compatible(np, "arm,cortex-a8") || | 57 | if (of_device_is_compatible(np, "arm,cortex-a8")) { |
59 | of_device_is_compatible(np, "arm,cortex-a9")) { | 58 | div = 2; |
60 | is_a7_a8_a9 = true; | 59 | } else if (of_device_is_compatible(np, "arm,cortex-a9")) { |
61 | } else if (of_device_is_compatible(np, "arm,cortex-a7")) { | 60 | div = 1; |
62 | is_a7_a8_a9 = true; | 61 | } else if (of_device_is_compatible(np, "arm,cortex-a7") || |
63 | has_arch_timer = true; | 62 | of_device_is_compatible(np, "arm,cortex-a15")) { |
64 | } else if (of_device_is_compatible(np, "arm,cortex-a15")) { | 63 | div = 1; |
65 | is_a15 = true; | ||
66 | has_arch_timer = true; | 64 | has_arch_timer = true; |
67 | } | 65 | } |
68 | } | 66 | } |
69 | 67 | ||
70 | of_node_put(cpus); | 68 | of_node_put(cpus); |
71 | 69 | ||
72 | if (!max_freq) | 70 | if (!max_freq || !div) |
73 | return; | 71 | return; |
74 | 72 | ||
75 | if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { | 73 | if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) |
76 | if (is_a7_a8_a9) | 74 | shmobile_setup_delay_hz(max_freq, 1, div); |
77 | shmobile_setup_delay_hz(max_freq, 1, 3); | ||
78 | else if (is_a15) | ||
79 | shmobile_setup_delay_hz(max_freq, 2, 4); | ||
80 | } | ||
81 | } | 75 | } |
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S index 5d94b7a2fb10..c160fa3007e9 100644 --- a/arch/arm/mach-socfpga/headsmp.S +++ b/arch/arm/mach-socfpga/headsmp.S | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <asm/assembler.h> | 13 | #include <asm/assembler.h> |
14 | 14 | ||
15 | .arch armv7-a | 15 | .arch armv7-a |
16 | .arm | ||
16 | 17 | ||
17 | ENTRY(secondary_trampoline) | 18 | ENTRY(secondary_trampoline) |
18 | /* CPU1 will always fetch from 0x0 when it is brought out of reset. | 19 | /* CPU1 will always fetch from 0x0 when it is brought out of reset. |
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi index a7315ebe3883..706d2426024f 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi | |||
@@ -120,7 +120,6 @@ | |||
120 | compatible = "fixed-clock"; | 120 | compatible = "fixed-clock"; |
121 | #clock-cells = <0>; | 121 | #clock-cells = <0>; |
122 | clock-frequency = <0>; | 122 | clock-frequency = <0>; |
123 | status = "disabled"; | ||
124 | }; | 123 | }; |
125 | 124 | ||
126 | soc { | 125 | soc { |
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts index 727ae5f8c4e7..b0ed44313a5b 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts | |||
@@ -70,7 +70,6 @@ | |||
70 | i2c3 = &i2c3; | 70 | i2c3 = &i2c3; |
71 | i2c4 = &i2c4; | 71 | i2c4 = &i2c4; |
72 | i2c5 = &i2c5; | 72 | i2c5 = &i2c5; |
73 | i2c6 = &i2c6; | ||
74 | }; | 73 | }; |
75 | }; | 74 | }; |
76 | 75 | ||
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi index e682a3f52791..651c9d9d2d54 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi | |||
@@ -201,15 +201,12 @@ | |||
201 | 201 | ||
202 | i2c2: i2c@58782000 { | 202 | i2c2: i2c@58782000 { |
203 | compatible = "socionext,uniphier-fi2c"; | 203 | compatible = "socionext,uniphier-fi2c"; |
204 | status = "disabled"; | ||
205 | reg = <0x58782000 0x80>; | 204 | reg = <0x58782000 0x80>; |
206 | #address-cells = <1>; | 205 | #address-cells = <1>; |
207 | #size-cells = <0>; | 206 | #size-cells = <0>; |
208 | interrupts = <0 43 4>; | 207 | interrupts = <0 43 4>; |
209 | pinctrl-names = "default"; | ||
210 | pinctrl-0 = <&pinctrl_i2c2>; | ||
211 | clocks = <&i2c_clk>; | 208 | clocks = <&i2c_clk>; |
212 | clock-frequency = <100000>; | 209 | clock-frequency = <400000>; |
213 | }; | 210 | }; |
214 | 211 | ||
215 | i2c3: i2c@58783000 { | 212 | i2c3: i2c@58783000 { |
@@ -227,12 +224,15 @@ | |||
227 | 224 | ||
228 | i2c4: i2c@58784000 { | 225 | i2c4: i2c@58784000 { |
229 | compatible = "socionext,uniphier-fi2c"; | 226 | compatible = "socionext,uniphier-fi2c"; |
227 | status = "disabled"; | ||
230 | reg = <0x58784000 0x80>; | 228 | reg = <0x58784000 0x80>; |
231 | #address-cells = <1>; | 229 | #address-cells = <1>; |
232 | #size-cells = <0>; | 230 | #size-cells = <0>; |
233 | interrupts = <0 45 4>; | 231 | interrupts = <0 45 4>; |
232 | pinctrl-names = "default"; | ||
233 | pinctrl-0 = <&pinctrl_i2c4>; | ||
234 | clocks = <&i2c_clk>; | 234 | clocks = <&i2c_clk>; |
235 | clock-frequency = <400000>; | 235 | clock-frequency = <100000>; |
236 | }; | 236 | }; |
237 | 237 | ||
238 | i2c5: i2c@58785000 { | 238 | i2c5: i2c@58785000 { |
@@ -245,16 +245,6 @@ | |||
245 | clock-frequency = <400000>; | 245 | clock-frequency = <400000>; |
246 | }; | 246 | }; |
247 | 247 | ||
248 | i2c6: i2c@58786000 { | ||
249 | compatible = "socionext,uniphier-fi2c"; | ||
250 | reg = <0x58786000 0x80>; | ||
251 | #address-cells = <1>; | ||
252 | #size-cells = <0>; | ||
253 | interrupts = <0 26 4>; | ||
254 | clocks = <&i2c_clk>; | ||
255 | clock-frequency = <400000>; | ||
256 | }; | ||
257 | |||
258 | system_bus: system-bus@58c00000 { | 248 | system_bus: system-bus@58c00000 { |
259 | compatible = "socionext,uniphier-system-bus"; | 249 | compatible = "socionext,uniphier-system-bus"; |
260 | status = "disabled"; | 250 | status = "disabled"; |
diff --git a/arch/nios2/lib/memset.c b/arch/nios2/lib/memset.c index c2cfcb121e34..2fcefe720283 100644 --- a/arch/nios2/lib/memset.c +++ b/arch/nios2/lib/memset.c | |||
@@ -68,7 +68,7 @@ void *memset(void *s, int c, size_t count) | |||
68 | "=r" (charcnt), /* %1 Output */ | 68 | "=r" (charcnt), /* %1 Output */ |
69 | "=r" (dwordcnt), /* %2 Output */ | 69 | "=r" (dwordcnt), /* %2 Output */ |
70 | "=r" (fill8reg), /* %3 Output */ | 70 | "=r" (fill8reg), /* %3 Output */ |
71 | "=r" (wrkrega) /* %4 Output */ | 71 | "=&r" (wrkrega) /* %4 Output only */ |
72 | : "r" (c), /* %5 Input */ | 72 | : "r" (c), /* %5 Input */ |
73 | "0" (s), /* %0 Input/Output */ | 73 | "0" (s), /* %0 Input/Output */ |
74 | "1" (count) /* %1 Input/Output */ | 74 | "1" (count) /* %1 Input/Output */ |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index c976ebfe2269..57b4836b7ecd 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
@@ -344,7 +344,7 @@ tracesys_next: | |||
344 | #endif | 344 | #endif |
345 | 345 | ||
346 | cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */ | 346 | cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */ |
347 | comiclr,>>= __NR_Linux_syscalls, %r20, %r0 | 347 | comiclr,>> __NR_Linux_syscalls, %r20, %r0 |
348 | b,n .Ltracesys_nosys | 348 | b,n .Ltracesys_nosys |
349 | 349 | ||
350 | LDREGX %r20(%r19), %r19 | 350 | LDREGX %r20(%r19), %r19 |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 3fa9df70aa20..2fc5d4db503c 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
@@ -384,3 +384,5 @@ SYSCALL(ni_syscall) | |||
384 | SYSCALL(ni_syscall) | 384 | SYSCALL(ni_syscall) |
385 | SYSCALL(mlock2) | 385 | SYSCALL(mlock2) |
386 | SYSCALL(copy_file_range) | 386 | SYSCALL(copy_file_range) |
387 | COMPAT_SYS_SPU(preadv2) | ||
388 | COMPAT_SYS_SPU(pwritev2) | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 1f2594d45605..cf12c580f6b2 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <uapi/asm/unistd.h> | 12 | #include <uapi/asm/unistd.h> |
13 | 13 | ||
14 | 14 | ||
15 | #define NR_syscalls 380 | 15 | #define NR_syscalls 382 |
16 | 16 | ||
17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
18 | 18 | ||
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index e4396a7d0f7c..4afe66aa1400 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h | |||
@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits) | |||
82 | "andc %1,%1,%2\n\t" | 82 | "andc %1,%1,%2\n\t" |
83 | "popcntd %0,%1" | 83 | "popcntd %0,%1" |
84 | : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) | 84 | : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) |
85 | : "r" (bits)); | 85 | : "b" (bits)); |
86 | 86 | ||
87 | return leading_zero_bits; | 87 | return leading_zero_bits; |
88 | } | 88 | } |
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 940290d45b08..e9f5f41aa55a 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h | |||
@@ -390,5 +390,7 @@ | |||
390 | #define __NR_membarrier 365 | 390 | #define __NR_membarrier 365 |
391 | #define __NR_mlock2 378 | 391 | #define __NR_mlock2 378 |
392 | #define __NR_copy_file_range 379 | 392 | #define __NR_copy_file_range 379 |
393 | #define __NR_preadv2 380 | ||
394 | #define __NR_pwritev2 381 | ||
393 | 395 | ||
394 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ | 396 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index d29ad9545b41..081b2ad99d73 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -11,7 +11,7 @@ typedef struct { | |||
11 | spinlock_t list_lock; | 11 | spinlock_t list_lock; |
12 | struct list_head pgtable_list; | 12 | struct list_head pgtable_list; |
13 | struct list_head gmap_list; | 13 | struct list_head gmap_list; |
14 | unsigned long asce_bits; | 14 | unsigned long asce; |
15 | unsigned long asce_limit; | 15 | unsigned long asce_limit; |
16 | unsigned long vdso_base; | 16 | unsigned long vdso_base; |
17 | /* The mmu context allocates 4K page tables. */ | 17 | /* The mmu context allocates 4K page tables. */ |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index d321469eeda7..c837b79b455d 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk, | |||
26 | mm->context.has_pgste = 0; | 26 | mm->context.has_pgste = 0; |
27 | mm->context.use_skey = 0; | 27 | mm->context.use_skey = 0; |
28 | #endif | 28 | #endif |
29 | if (mm->context.asce_limit == 0) { | 29 | switch (mm->context.asce_limit) { |
30 | case 1UL << 42: | ||
31 | /* | ||
32 | * forked 3-level task, fall through to set new asce with new | ||
33 | * mm->pgd | ||
34 | */ | ||
35 | case 0: | ||
30 | /* context created by exec, set asce limit to 4TB */ | 36 | /* context created by exec, set asce limit to 4TB */ |
31 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | ||
32 | _ASCE_USER_BITS | _ASCE_TYPE_REGION3; | ||
33 | mm->context.asce_limit = STACK_TOP_MAX; | 37 | mm->context.asce_limit = STACK_TOP_MAX; |
34 | } else if (mm->context.asce_limit == (1UL << 31)) { | 38 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | |
39 | _ASCE_USER_BITS | _ASCE_TYPE_REGION3; | ||
40 | break; | ||
41 | case 1UL << 53: | ||
42 | /* forked 4-level task, set new asce with new mm->pgd */ | ||
43 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | | ||
44 | _ASCE_USER_BITS | _ASCE_TYPE_REGION2; | ||
45 | break; | ||
46 | case 1UL << 31: | ||
47 | /* forked 2-level compat task, set new asce with new mm->pgd */ | ||
48 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | | ||
49 | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; | ||
50 | /* pgd_alloc() did not increase mm->nr_pmds */ | ||
35 | mm_inc_nr_pmds(mm); | 51 | mm_inc_nr_pmds(mm); |
36 | } | 52 | } |
37 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); | 53 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); |
@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk, | |||
42 | 58 | ||
43 | static inline void set_user_asce(struct mm_struct *mm) | 59 | static inline void set_user_asce(struct mm_struct *mm) |
44 | { | 60 | { |
45 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); | 61 | S390_lowcore.user_asce = mm->context.asce; |
46 | if (current->thread.mm_segment.ar4) | 62 | if (current->thread.mm_segment.ar4) |
47 | __ctl_load(S390_lowcore.user_asce, 7, 7); | 63 | __ctl_load(S390_lowcore.user_asce, 7, 7); |
48 | set_cpu_flag(CIF_ASCE); | 64 | set_cpu_flag(CIF_ASCE); |
@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
71 | { | 87 | { |
72 | int cpu = smp_processor_id(); | 88 | int cpu = smp_processor_id(); |
73 | 89 | ||
74 | S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); | 90 | S390_lowcore.user_asce = next->context.asce; |
75 | if (prev == next) | 91 | if (prev == next) |
76 | return; | 92 | return; |
77 | if (MACHINE_HAS_TLB_LC) | 93 | if (MACHINE_HAS_TLB_LC) |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 9b3d9b6099f2..da34cb6b1f3b 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -52,8 +52,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm) | |||
52 | return _REGION2_ENTRY_EMPTY; | 52 | return _REGION2_ENTRY_EMPTY; |
53 | } | 53 | } |
54 | 54 | ||
55 | int crst_table_upgrade(struct mm_struct *, unsigned long limit); | 55 | int crst_table_upgrade(struct mm_struct *); |
56 | void crst_table_downgrade(struct mm_struct *, unsigned long limit); | 56 | void crst_table_downgrade(struct mm_struct *); |
57 | 57 | ||
58 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) | 58 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) |
59 | { | 59 | { |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index d6fd22ea270d..18cdede1aeda 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -175,7 +175,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS]; | |||
175 | regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ | 175 | regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ |
176 | regs->psw.addr = new_psw; \ | 176 | regs->psw.addr = new_psw; \ |
177 | regs->gprs[15] = new_stackp; \ | 177 | regs->gprs[15] = new_stackp; \ |
178 | crst_table_downgrade(current->mm, 1UL << 31); \ | 178 | crst_table_downgrade(current->mm); \ |
179 | execve_tail(); \ | 179 | execve_tail(); \ |
180 | } while (0) | 180 | } while (0) |
181 | 181 | ||
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index ca148f7c3eaa..a2e6ef32e054 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) | |||
110 | static inline void __tlb_flush_kernel(void) | 110 | static inline void __tlb_flush_kernel(void) |
111 | { | 111 | { |
112 | if (MACHINE_HAS_IDTE) | 112 | if (MACHINE_HAS_IDTE) |
113 | __tlb_flush_idte((unsigned long) init_mm.pgd | | 113 | __tlb_flush_idte(init_mm.context.asce); |
114 | init_mm.context.asce_bits); | ||
115 | else | 114 | else |
116 | __tlb_flush_global(); | 115 | __tlb_flush_global(); |
117 | } | 116 | } |
@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) | |||
133 | static inline void __tlb_flush_kernel(void) | 132 | static inline void __tlb_flush_kernel(void) |
134 | { | 133 | { |
135 | if (MACHINE_HAS_TLB_LC) | 134 | if (MACHINE_HAS_TLB_LC) |
136 | __tlb_flush_idte_local((unsigned long) init_mm.pgd | | 135 | __tlb_flush_idte_local(init_mm.context.asce); |
137 | init_mm.context.asce_bits); | ||
138 | else | 136 | else |
139 | __tlb_flush_local(); | 137 | __tlb_flush_local(); |
140 | } | 138 | } |
@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) | |||
148 | * only ran on the local cpu. | 146 | * only ran on the local cpu. |
149 | */ | 147 | */ |
150 | if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) | 148 | if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) |
151 | __tlb_flush_asce(mm, (unsigned long) mm->pgd | | 149 | __tlb_flush_asce(mm, mm->context.asce); |
152 | mm->context.asce_bits); | ||
153 | else | 150 | else |
154 | __tlb_flush_full(mm); | 151 | __tlb_flush_full(mm); |
155 | } | 152 | } |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c7b0451397d6..2489b2e917c8 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -89,7 +89,8 @@ void __init paging_init(void) | |||
89 | asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; | 89 | asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; |
90 | pgd_type = _REGION3_ENTRY_EMPTY; | 90 | pgd_type = _REGION3_ENTRY_EMPTY; |
91 | } | 91 | } |
92 | S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; | 92 | init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; |
93 | S390_lowcore.kernel_asce = init_mm.context.asce; | ||
93 | clear_table((unsigned long *) init_mm.pgd, pgd_type, | 94 | clear_table((unsigned long *) init_mm.pgd, pgd_type, |
94 | sizeof(unsigned long)*2048); | 95 | sizeof(unsigned long)*2048); |
95 | vmem_map_init(); | 96 | vmem_map_init(); |
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 45c4daa49930..89cf09e5f168 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) | |||
174 | if (!(flags & MAP_FIXED)) | 174 | if (!(flags & MAP_FIXED)) |
175 | addr = 0; | 175 | addr = 0; |
176 | if ((addr + len) >= TASK_SIZE) | 176 | if ((addr + len) >= TASK_SIZE) |
177 | return crst_table_upgrade(current->mm, TASK_MAX_SIZE); | 177 | return crst_table_upgrade(current->mm); |
178 | return 0; | 178 | return 0; |
179 | } | 179 | } |
180 | 180 | ||
@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, | |||
191 | return area; | 191 | return area; |
192 | if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { | 192 | if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { |
193 | /* Upgrade the page table to 4 levels and retry. */ | 193 | /* Upgrade the page table to 4 levels and retry. */ |
194 | rc = crst_table_upgrade(mm, TASK_MAX_SIZE); | 194 | rc = crst_table_upgrade(mm); |
195 | if (rc) | 195 | if (rc) |
196 | return (unsigned long) rc; | 196 | return (unsigned long) rc; |
197 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); | 197 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); |
@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, | |||
213 | return area; | 213 | return area; |
214 | if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { | 214 | if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { |
215 | /* Upgrade the page table to 4 levels and retry. */ | 215 | /* Upgrade the page table to 4 levels and retry. */ |
216 | rc = crst_table_upgrade(mm, TASK_MAX_SIZE); | 216 | rc = crst_table_upgrade(mm); |
217 | if (rc) | 217 | if (rc) |
218 | return (unsigned long) rc; | 218 | return (unsigned long) rc; |
219 | area = arch_get_unmapped_area_topdown(filp, addr, len, | 219 | area = arch_get_unmapped_area_topdown(filp, addr, len, |
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index f6c3de26cda8..e8b5962ac12a 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c | |||
@@ -76,81 +76,52 @@ static void __crst_table_upgrade(void *arg) | |||
76 | __tlb_flush_local(); | 76 | __tlb_flush_local(); |
77 | } | 77 | } |
78 | 78 | ||
79 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) | 79 | int crst_table_upgrade(struct mm_struct *mm) |
80 | { | 80 | { |
81 | unsigned long *table, *pgd; | 81 | unsigned long *table, *pgd; |
82 | unsigned long entry; | ||
83 | int flush; | ||
84 | 82 | ||
85 | BUG_ON(limit > TASK_MAX_SIZE); | 83 | /* upgrade should only happen from 3 to 4 levels */ |
86 | flush = 0; | 84 | BUG_ON(mm->context.asce_limit != (1UL << 42)); |
87 | repeat: | 85 | |
88 | table = crst_table_alloc(mm); | 86 | table = crst_table_alloc(mm); |
89 | if (!table) | 87 | if (!table) |
90 | return -ENOMEM; | 88 | return -ENOMEM; |
89 | |||
91 | spin_lock_bh(&mm->page_table_lock); | 90 | spin_lock_bh(&mm->page_table_lock); |
92 | if (mm->context.asce_limit < limit) { | 91 | pgd = (unsigned long *) mm->pgd; |
93 | pgd = (unsigned long *) mm->pgd; | 92 | crst_table_init(table, _REGION2_ENTRY_EMPTY); |
94 | if (mm->context.asce_limit <= (1UL << 31)) { | 93 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); |
95 | entry = _REGION3_ENTRY_EMPTY; | 94 | mm->pgd = (pgd_t *) table; |
96 | mm->context.asce_limit = 1UL << 42; | 95 | mm->context.asce_limit = 1UL << 53; |
97 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | 96 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | |
98 | _ASCE_USER_BITS | | 97 | _ASCE_USER_BITS | _ASCE_TYPE_REGION2; |
99 | _ASCE_TYPE_REGION3; | 98 | mm->task_size = mm->context.asce_limit; |
100 | } else { | ||
101 | entry = _REGION2_ENTRY_EMPTY; | ||
102 | mm->context.asce_limit = 1UL << 53; | ||
103 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | ||
104 | _ASCE_USER_BITS | | ||
105 | _ASCE_TYPE_REGION2; | ||
106 | } | ||
107 | crst_table_init(table, entry); | ||
108 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); | ||
109 | mm->pgd = (pgd_t *) table; | ||
110 | mm->task_size = mm->context.asce_limit; | ||
111 | table = NULL; | ||
112 | flush = 1; | ||
113 | } | ||
114 | spin_unlock_bh(&mm->page_table_lock); | 99 | spin_unlock_bh(&mm->page_table_lock); |
115 | if (table) | 100 | |
116 | crst_table_free(mm, table); | 101 | on_each_cpu(__crst_table_upgrade, mm, 0); |
117 | if (mm->context.asce_limit < limit) | ||
118 | goto repeat; | ||
119 | if (flush) | ||
120 | on_each_cpu(__crst_table_upgrade, mm, 0); | ||
121 | return 0; | 102 | return 0; |
122 | } | 103 | } |
123 | 104 | ||
124 | void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) | 105 | void crst_table_downgrade(struct mm_struct *mm) |
125 | { | 106 | { |
126 | pgd_t *pgd; | 107 | pgd_t *pgd; |
127 | 108 | ||
109 | /* downgrade should only happen from 3 to 2 levels (compat only) */ | ||
110 | BUG_ON(mm->context.asce_limit != (1UL << 42)); | ||
111 | |||
128 | if (current->active_mm == mm) { | 112 | if (current->active_mm == mm) { |
129 | clear_user_asce(); | 113 | clear_user_asce(); |
130 | __tlb_flush_mm(mm); | 114 | __tlb_flush_mm(mm); |
131 | } | 115 | } |
132 | while (mm->context.asce_limit > limit) { | 116 | |
133 | pgd = mm->pgd; | 117 | pgd = mm->pgd; |
134 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { | 118 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); |
135 | case _REGION_ENTRY_TYPE_R2: | 119 | mm->context.asce_limit = 1UL << 31; |
136 | mm->context.asce_limit = 1UL << 42; | 120 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | |
137 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | 121 | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; |
138 | _ASCE_USER_BITS | | 122 | mm->task_size = mm->context.asce_limit; |
139 | _ASCE_TYPE_REGION3; | 123 | crst_table_free(mm, (unsigned long *) pgd); |
140 | break; | 124 | |
141 | case _REGION_ENTRY_TYPE_R3: | ||
142 | mm->context.asce_limit = 1UL << 31; | ||
143 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | | ||
144 | _ASCE_USER_BITS | | ||
145 | _ASCE_TYPE_SEGMENT; | ||
146 | break; | ||
147 | default: | ||
148 | BUG(); | ||
149 | } | ||
150 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | ||
151 | mm->task_size = mm->context.asce_limit; | ||
152 | crst_table_free(mm, (unsigned long *) pgd); | ||
153 | } | ||
154 | if (current->active_mm == mm) | 125 | if (current->active_mm == mm) |
155 | set_user_asce(mm); | 126 | set_user_asce(mm); |
156 | } | 127 | } |
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index e595e89eac65..1ea8c07eab84 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
@@ -457,7 +457,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) | |||
457 | zdev->dma_table = dma_alloc_cpu_table(); | 457 | zdev->dma_table = dma_alloc_cpu_table(); |
458 | if (!zdev->dma_table) { | 458 | if (!zdev->dma_table) { |
459 | rc = -ENOMEM; | 459 | rc = -ENOMEM; |
460 | goto out_clean; | 460 | goto out; |
461 | } | 461 | } |
462 | 462 | ||
463 | /* | 463 | /* |
@@ -477,18 +477,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev) | |||
477 | zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); | 477 | zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); |
478 | if (!zdev->iommu_bitmap) { | 478 | if (!zdev->iommu_bitmap) { |
479 | rc = -ENOMEM; | 479 | rc = -ENOMEM; |
480 | goto out_reg; | 480 | goto free_dma_table; |
481 | } | 481 | } |
482 | 482 | ||
483 | rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, | 483 | rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, |
484 | (u64) zdev->dma_table); | 484 | (u64) zdev->dma_table); |
485 | if (rc) | 485 | if (rc) |
486 | goto out_reg; | 486 | goto free_bitmap; |
487 | return 0; | ||
488 | 487 | ||
489 | out_reg: | 488 | return 0; |
489 | free_bitmap: | ||
490 | vfree(zdev->iommu_bitmap); | ||
491 | zdev->iommu_bitmap = NULL; | ||
492 | free_dma_table: | ||
490 | dma_free_cpu_table(zdev->dma_table); | 493 | dma_free_cpu_table(zdev->dma_table); |
491 | out_clean: | 494 | zdev->dma_table = NULL; |
495 | out: | ||
492 | return rc; | 496 | return rc; |
493 | } | 497 | } |
494 | 498 | ||
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig index fb23fd6b186a..c74d3701ad68 100644 --- a/arch/sparc/configs/sparc32_defconfig +++ b/arch/sparc/configs/sparc32_defconfig | |||
@@ -24,7 +24,6 @@ CONFIG_INET_AH=y | |||
24 | CONFIG_INET_ESP=y | 24 | CONFIG_INET_ESP=y |
25 | CONFIG_INET_IPCOMP=y | 25 | CONFIG_INET_IPCOMP=y |
26 | # CONFIG_INET_LRO is not set | 26 | # CONFIG_INET_LRO is not set |
27 | CONFIG_IPV6_PRIVACY=y | ||
28 | CONFIG_INET6_AH=m | 27 | CONFIG_INET6_AH=m |
29 | CONFIG_INET6_ESP=m | 28 | CONFIG_INET6_ESP=m |
30 | CONFIG_INET6_IPCOMP=m | 29 | CONFIG_INET6_IPCOMP=m |
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 04920ab8e292..3583d676a916 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig | |||
@@ -48,7 +48,6 @@ CONFIG_SYN_COOKIES=y | |||
48 | CONFIG_INET_AH=y | 48 | CONFIG_INET_AH=y |
49 | CONFIG_INET_ESP=y | 49 | CONFIG_INET_ESP=y |
50 | CONFIG_INET_IPCOMP=y | 50 | CONFIG_INET_IPCOMP=y |
51 | CONFIG_IPV6_PRIVACY=y | ||
52 | CONFIG_IPV6_ROUTER_PREF=y | 51 | CONFIG_IPV6_ROUTER_PREF=y |
53 | CONFIG_IPV6_ROUTE_INFO=y | 52 | CONFIG_IPV6_ROUTE_INFO=y |
54 | CONFIG_IPV6_OPTIMISTIC_DAD=y | 53 | CONFIG_IPV6_OPTIMISTIC_DAD=y |
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h index 56f933816144..1d8321c827a8 100644 --- a/arch/sparc/include/asm/spitfire.h +++ b/arch/sparc/include/asm/spitfire.h | |||
@@ -48,6 +48,7 @@ | |||
48 | #define SUN4V_CHIP_SPARC_M6 0x06 | 48 | #define SUN4V_CHIP_SPARC_M6 0x06 |
49 | #define SUN4V_CHIP_SPARC_M7 0x07 | 49 | #define SUN4V_CHIP_SPARC_M7 0x07 |
50 | #define SUN4V_CHIP_SPARC64X 0x8a | 50 | #define SUN4V_CHIP_SPARC64X 0x8a |
51 | #define SUN4V_CHIP_SPARC_SN 0x8b | ||
51 | #define SUN4V_CHIP_UNKNOWN 0xff | 52 | #define SUN4V_CHIP_UNKNOWN 0xff |
52 | 53 | ||
53 | #ifndef __ASSEMBLY__ | 54 | #ifndef __ASSEMBLY__ |
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index b6de8b10a55b..36eee8132c22 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h | |||
@@ -423,8 +423,10 @@ | |||
423 | #define __NR_setsockopt 355 | 423 | #define __NR_setsockopt 355 |
424 | #define __NR_mlock2 356 | 424 | #define __NR_mlock2 356 |
425 | #define __NR_copy_file_range 357 | 425 | #define __NR_copy_file_range 357 |
426 | #define __NR_preadv2 358 | ||
427 | #define __NR_pwritev2 359 | ||
426 | 428 | ||
427 | #define NR_syscalls 358 | 429 | #define NR_syscalls 360 |
428 | 430 | ||
429 | /* Bitmask values returned from kern_features system call. */ | 431 | /* Bitmask values returned from kern_features system call. */ |
430 | #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 | 432 | #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 |
diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S index 4ee1ad420862..655628def68e 100644 --- a/arch/sparc/kernel/cherrs.S +++ b/arch/sparc/kernel/cherrs.S | |||
@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ | |||
214 | subcc %g1, %g2, %g1 ! Next cacheline | 214 | subcc %g1, %g2, %g1 ! Next cacheline |
215 | bge,pt %icc, 1b | 215 | bge,pt %icc, 1b |
216 | nop | 216 | nop |
217 | ba,pt %xcc, dcpe_icpe_tl1_common | 217 | ba,a,pt %xcc, dcpe_icpe_tl1_common |
218 | nop | ||
219 | 218 | ||
220 | do_dcpe_tl1_fatal: | 219 | do_dcpe_tl1_fatal: |
221 | sethi %hi(1f), %g7 | 220 | sethi %hi(1f), %g7 |
@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal: | |||
224 | mov 0x2, %o0 | 223 | mov 0x2, %o0 |
225 | call cheetah_plus_parity_error | 224 | call cheetah_plus_parity_error |
226 | add %sp, PTREGS_OFF, %o1 | 225 | add %sp, PTREGS_OFF, %o1 |
227 | ba,pt %xcc, rtrap | 226 | ba,a,pt %xcc, rtrap |
228 | nop | ||
229 | .size do_dcpe_tl1,.-do_dcpe_tl1 | 227 | .size do_dcpe_tl1,.-do_dcpe_tl1 |
230 | 228 | ||
231 | .globl do_icpe_tl1 | 229 | .globl do_icpe_tl1 |
@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ | |||
259 | subcc %g1, %g2, %g1 | 257 | subcc %g1, %g2, %g1 |
260 | bge,pt %icc, 1b | 258 | bge,pt %icc, 1b |
261 | nop | 259 | nop |
262 | ba,pt %xcc, dcpe_icpe_tl1_common | 260 | ba,a,pt %xcc, dcpe_icpe_tl1_common |
263 | nop | ||
264 | 261 | ||
265 | do_icpe_tl1_fatal: | 262 | do_icpe_tl1_fatal: |
266 | sethi %hi(1f), %g7 | 263 | sethi %hi(1f), %g7 |
@@ -269,8 +266,7 @@ do_icpe_tl1_fatal: | |||
269 | mov 0x3, %o0 | 266 | mov 0x3, %o0 |
270 | call cheetah_plus_parity_error | 267 | call cheetah_plus_parity_error |
271 | add %sp, PTREGS_OFF, %o1 | 268 | add %sp, PTREGS_OFF, %o1 |
272 | ba,pt %xcc, rtrap | 269 | ba,a,pt %xcc, rtrap |
273 | nop | ||
274 | .size do_icpe_tl1,.-do_icpe_tl1 | 270 | .size do_icpe_tl1,.-do_icpe_tl1 |
275 | 271 | ||
276 | .type dcpe_icpe_tl1_common,#function | 272 | .type dcpe_icpe_tl1_common,#function |
@@ -456,7 +452,7 @@ __cheetah_log_error: | |||
456 | cmp %g2, 0x63 | 452 | cmp %g2, 0x63 |
457 | be c_cee | 453 | be c_cee |
458 | nop | 454 | nop |
459 | ba,pt %xcc, c_deferred | 455 | ba,a,pt %xcc, c_deferred |
460 | .size __cheetah_log_error,.-__cheetah_log_error | 456 | .size __cheetah_log_error,.-__cheetah_log_error |
461 | 457 | ||
462 | /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc | 458 | /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc |
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index dfad8b1aea9f..493e023a468a 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c | |||
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void) | |||
506 | sparc_pmu_type = "sparc-m7"; | 506 | sparc_pmu_type = "sparc-m7"; |
507 | break; | 507 | break; |
508 | 508 | ||
509 | case SUN4V_CHIP_SPARC_SN: | ||
510 | sparc_cpu_type = "SPARC-SN"; | ||
511 | sparc_fpu_type = "SPARC-SN integrated FPU"; | ||
512 | sparc_pmu_type = "sparc-sn"; | ||
513 | break; | ||
514 | |||
509 | case SUN4V_CHIP_SPARC64X: | 515 | case SUN4V_CHIP_SPARC64X: |
510 | sparc_cpu_type = "SPARC64-X"; | 516 | sparc_cpu_type = "SPARC64-X"; |
511 | sparc_fpu_type = "SPARC64-X integrated FPU"; | 517 | sparc_fpu_type = "SPARC64-X integrated FPU"; |
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c index e69ec0e3f155..45c820e1cba5 100644 --- a/arch/sparc/kernel/cpumap.c +++ b/arch/sparc/kernel/cpumap.c | |||
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index) | |||
328 | case SUN4V_CHIP_NIAGARA5: | 328 | case SUN4V_CHIP_NIAGARA5: |
329 | case SUN4V_CHIP_SPARC_M6: | 329 | case SUN4V_CHIP_SPARC_M6: |
330 | case SUN4V_CHIP_SPARC_M7: | 330 | case SUN4V_CHIP_SPARC_M7: |
331 | case SUN4V_CHIP_SPARC_SN: | ||
331 | case SUN4V_CHIP_SPARC64X: | 332 | case SUN4V_CHIP_SPARC64X: |
332 | rover_inc_table = niagara_iterate_method; | 333 | rover_inc_table = niagara_iterate_method; |
333 | break; | 334 | break; |
diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S index a6864826a4bd..336d2750fe78 100644 --- a/arch/sparc/kernel/fpu_traps.S +++ b/arch/sparc/kernel/fpu_traps.S | |||
@@ -100,8 +100,8 @@ do_fpdis: | |||
100 | fmuld %f0, %f2, %f26 | 100 | fmuld %f0, %f2, %f26 |
101 | faddd %f0, %f2, %f28 | 101 | faddd %f0, %f2, %f28 |
102 | fmuld %f0, %f2, %f30 | 102 | fmuld %f0, %f2, %f30 |
103 | b,pt %xcc, fpdis_exit | 103 | ba,a,pt %xcc, fpdis_exit |
104 | nop | 104 | |
105 | 2: andcc %g5, FPRS_DU, %g0 | 105 | 2: andcc %g5, FPRS_DU, %g0 |
106 | bne,pt %icc, 3f | 106 | bne,pt %icc, 3f |
107 | fzero %f32 | 107 | fzero %f32 |
@@ -144,8 +144,8 @@ do_fpdis: | |||
144 | fmuld %f32, %f34, %f58 | 144 | fmuld %f32, %f34, %f58 |
145 | faddd %f32, %f34, %f60 | 145 | faddd %f32, %f34, %f60 |
146 | fmuld %f32, %f34, %f62 | 146 | fmuld %f32, %f34, %f62 |
147 | ba,pt %xcc, fpdis_exit | 147 | ba,a,pt %xcc, fpdis_exit |
148 | nop | 148 | |
149 | 3: mov SECONDARY_CONTEXT, %g3 | 149 | 3: mov SECONDARY_CONTEXT, %g3 |
150 | add %g6, TI_FPREGS, %g1 | 150 | add %g6, TI_FPREGS, %g1 |
151 | 151 | ||
@@ -197,8 +197,7 @@ fpdis_exit2: | |||
197 | fp_other_bounce: | 197 | fp_other_bounce: |
198 | call do_fpother | 198 | call do_fpother |
199 | add %sp, PTREGS_OFF, %o0 | 199 | add %sp, PTREGS_OFF, %o0 |
200 | ba,pt %xcc, rtrap | 200 | ba,a,pt %xcc, rtrap |
201 | nop | ||
202 | .size fp_other_bounce,.-fp_other_bounce | 201 | .size fp_other_bounce,.-fp_other_bounce |
203 | 202 | ||
204 | .align 32 | 203 | .align 32 |
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index cd1f592cd347..a076b4249e62 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S | |||
@@ -414,6 +414,8 @@ sun4v_chip_type: | |||
414 | cmp %g2, 'T' | 414 | cmp %g2, 'T' |
415 | be,pt %xcc, 70f | 415 | be,pt %xcc, 70f |
416 | cmp %g2, 'M' | 416 | cmp %g2, 'M' |
417 | be,pt %xcc, 70f | ||
418 | cmp %g2, 'S' | ||
417 | bne,pn %xcc, 49f | 419 | bne,pn %xcc, 49f |
418 | nop | 420 | nop |
419 | 421 | ||
@@ -433,6 +435,9 @@ sun4v_chip_type: | |||
433 | cmp %g2, '7' | 435 | cmp %g2, '7' |
434 | be,pt %xcc, 5f | 436 | be,pt %xcc, 5f |
435 | mov SUN4V_CHIP_SPARC_M7, %g4 | 437 | mov SUN4V_CHIP_SPARC_M7, %g4 |
438 | cmp %g2, 'N' | ||
439 | be,pt %xcc, 5f | ||
440 | mov SUN4V_CHIP_SPARC_SN, %g4 | ||
436 | ba,pt %xcc, 49f | 441 | ba,pt %xcc, 49f |
437 | nop | 442 | nop |
438 | 443 | ||
@@ -461,9 +466,8 @@ sun4v_chip_type: | |||
461 | subcc %g3, 1, %g3 | 466 | subcc %g3, 1, %g3 |
462 | bne,pt %xcc, 41b | 467 | bne,pt %xcc, 41b |
463 | add %g1, 1, %g1 | 468 | add %g1, 1, %g1 |
464 | mov SUN4V_CHIP_SPARC64X, %g4 | ||
465 | ba,pt %xcc, 5f | 469 | ba,pt %xcc, 5f |
466 | nop | 470 | mov SUN4V_CHIP_SPARC64X, %g4 |
467 | 471 | ||
468 | 49: | 472 | 49: |
469 | mov SUN4V_CHIP_UNKNOWN, %g4 | 473 | mov SUN4V_CHIP_UNKNOWN, %g4 |
@@ -548,8 +552,7 @@ sun4u_init: | |||
548 | stxa %g0, [%g7] ASI_DMMU | 552 | stxa %g0, [%g7] ASI_DMMU |
549 | membar #Sync | 553 | membar #Sync |
550 | 554 | ||
551 | ba,pt %xcc, sun4u_continue | 555 | ba,a,pt %xcc, sun4u_continue |
552 | nop | ||
553 | 556 | ||
554 | sun4v_init: | 557 | sun4v_init: |
555 | /* Set ctx 0 */ | 558 | /* Set ctx 0 */ |
@@ -560,14 +563,12 @@ sun4v_init: | |||
560 | mov SECONDARY_CONTEXT, %g7 | 563 | mov SECONDARY_CONTEXT, %g7 |
561 | stxa %g0, [%g7] ASI_MMU | 564 | stxa %g0, [%g7] ASI_MMU |
562 | membar #Sync | 565 | membar #Sync |
563 | ba,pt %xcc, niagara_tlb_fixup | 566 | ba,a,pt %xcc, niagara_tlb_fixup |
564 | nop | ||
565 | 567 | ||
566 | sun4u_continue: | 568 | sun4u_continue: |
567 | BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) | 569 | BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) |
568 | 570 | ||
569 | ba,pt %xcc, spitfire_tlb_fixup | 571 | ba,a,pt %xcc, spitfire_tlb_fixup |
570 | nop | ||
571 | 572 | ||
572 | niagara_tlb_fixup: | 573 | niagara_tlb_fixup: |
573 | mov 3, %g2 /* Set TLB type to hypervisor. */ | 574 | mov 3, %g2 /* Set TLB type to hypervisor. */ |
@@ -597,6 +598,9 @@ niagara_tlb_fixup: | |||
597 | cmp %g1, SUN4V_CHIP_SPARC_M7 | 598 | cmp %g1, SUN4V_CHIP_SPARC_M7 |
598 | be,pt %xcc, niagara4_patch | 599 | be,pt %xcc, niagara4_patch |
599 | nop | 600 | nop |
601 | cmp %g1, SUN4V_CHIP_SPARC_SN | ||
602 | be,pt %xcc, niagara4_patch | ||
603 | nop | ||
600 | 604 | ||
601 | call generic_patch_copyops | 605 | call generic_patch_copyops |
602 | nop | 606 | nop |
@@ -639,8 +643,7 @@ niagara_patch: | |||
639 | call hypervisor_patch_cachetlbops | 643 | call hypervisor_patch_cachetlbops |
640 | nop | 644 | nop |
641 | 645 | ||
642 | ba,pt %xcc, tlb_fixup_done | 646 | ba,a,pt %xcc, tlb_fixup_done |
643 | nop | ||
644 | 647 | ||
645 | cheetah_tlb_fixup: | 648 | cheetah_tlb_fixup: |
646 | mov 2, %g2 /* Set TLB type to cheetah+. */ | 649 | mov 2, %g2 /* Set TLB type to cheetah+. */ |
@@ -659,8 +662,7 @@ cheetah_tlb_fixup: | |||
659 | call cheetah_patch_cachetlbops | 662 | call cheetah_patch_cachetlbops |
660 | nop | 663 | nop |
661 | 664 | ||
662 | ba,pt %xcc, tlb_fixup_done | 665 | ba,a,pt %xcc, tlb_fixup_done |
663 | nop | ||
664 | 666 | ||
665 | spitfire_tlb_fixup: | 667 | spitfire_tlb_fixup: |
666 | /* Set TLB type to spitfire. */ | 668 | /* Set TLB type to spitfire. */ |
@@ -774,8 +776,7 @@ setup_trap_table: | |||
774 | call %o1 | 776 | call %o1 |
775 | add %sp, (2047 + 128), %o0 | 777 | add %sp, (2047 + 128), %o0 |
776 | 778 | ||
777 | ba,pt %xcc, 2f | 779 | ba,a,pt %xcc, 2f |
778 | nop | ||
779 | 780 | ||
780 | 1: sethi %hi(sparc64_ttable_tl0), %o0 | 781 | 1: sethi %hi(sparc64_ttable_tl0), %o0 |
781 | set prom_set_trap_table_name, %g2 | 782 | set prom_set_trap_table_name, %g2 |
@@ -814,8 +815,7 @@ setup_trap_table: | |||
814 | 815 | ||
815 | BRANCH_IF_ANY_CHEETAH(o2, o3, 1f) | 816 | BRANCH_IF_ANY_CHEETAH(o2, o3, 1f) |
816 | 817 | ||
817 | ba,pt %xcc, 2f | 818 | ba,a,pt %xcc, 2f |
818 | nop | ||
819 | 819 | ||
820 | /* Disable STICK_INT interrupts. */ | 820 | /* Disable STICK_INT interrupts. */ |
821 | 1: | 821 | 1: |
diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S index 753b4f031bfb..34b4933900bf 100644 --- a/arch/sparc/kernel/misctrap.S +++ b/arch/sparc/kernel/misctrap.S | |||
@@ -18,8 +18,7 @@ __do_privact: | |||
18 | 109: or %g7, %lo(109b), %g7 | 18 | 109: or %g7, %lo(109b), %g7 |
19 | call do_privact | 19 | call do_privact |
20 | add %sp, PTREGS_OFF, %o0 | 20 | add %sp, PTREGS_OFF, %o0 |
21 | ba,pt %xcc, rtrap | 21 | ba,a,pt %xcc, rtrap |
22 | nop | ||
23 | .size __do_privact,.-__do_privact | 22 | .size __do_privact,.-__do_privact |
24 | 23 | ||
25 | .type do_mna,#function | 24 | .type do_mna,#function |
@@ -46,8 +45,7 @@ do_mna: | |||
46 | mov %l5, %o2 | 45 | mov %l5, %o2 |
47 | call mem_address_unaligned | 46 | call mem_address_unaligned |
48 | add %sp, PTREGS_OFF, %o0 | 47 | add %sp, PTREGS_OFF, %o0 |
49 | ba,pt %xcc, rtrap | 48 | ba,a,pt %xcc, rtrap |
50 | nop | ||
51 | .size do_mna,.-do_mna | 49 | .size do_mna,.-do_mna |
52 | 50 | ||
53 | .type do_lddfmna,#function | 51 | .type do_lddfmna,#function |
@@ -65,8 +63,7 @@ do_lddfmna: | |||
65 | mov %l5, %o2 | 63 | mov %l5, %o2 |
66 | call handle_lddfmna | 64 | call handle_lddfmna |
67 | add %sp, PTREGS_OFF, %o0 | 65 | add %sp, PTREGS_OFF, %o0 |
68 | ba,pt %xcc, rtrap | 66 | ba,a,pt %xcc, rtrap |
69 | nop | ||
70 | .size do_lddfmna,.-do_lddfmna | 67 | .size do_lddfmna,.-do_lddfmna |
71 | 68 | ||
72 | .type do_stdfmna,#function | 69 | .type do_stdfmna,#function |
@@ -84,8 +81,7 @@ do_stdfmna: | |||
84 | mov %l5, %o2 | 81 | mov %l5, %o2 |
85 | call handle_stdfmna | 82 | call handle_stdfmna |
86 | add %sp, PTREGS_OFF, %o0 | 83 | add %sp, PTREGS_OFF, %o0 |
87 | ba,pt %xcc, rtrap | 84 | ba,a,pt %xcc, rtrap |
88 | nop | ||
89 | .size do_stdfmna,.-do_stdfmna | 85 | .size do_stdfmna,.-do_stdfmna |
90 | 86 | ||
91 | .type breakpoint_trap,#function | 87 | .type breakpoint_trap,#function |
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index badf0951d73c..c2b202d763a1 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
@@ -245,6 +245,18 @@ static void pci_parse_of_addrs(struct platform_device *op, | |||
245 | } | 245 | } |
246 | } | 246 | } |
247 | 247 | ||
248 | static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu, | ||
249 | void *stc, void *host_controller, | ||
250 | struct platform_device *op, | ||
251 | int numa_node) | ||
252 | { | ||
253 | sd->iommu = iommu; | ||
254 | sd->stc = stc; | ||
255 | sd->host_controller = host_controller; | ||
256 | sd->op = op; | ||
257 | sd->numa_node = numa_node; | ||
258 | } | ||
259 | |||
248 | static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, | 260 | static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, |
249 | struct device_node *node, | 261 | struct device_node *node, |
250 | struct pci_bus *bus, int devfn) | 262 | struct pci_bus *bus, int devfn) |
@@ -259,13 +271,10 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, | |||
259 | if (!dev) | 271 | if (!dev) |
260 | return NULL; | 272 | return NULL; |
261 | 273 | ||
274 | op = of_find_device_by_node(node); | ||
262 | sd = &dev->dev.archdata; | 275 | sd = &dev->dev.archdata; |
263 | sd->iommu = pbm->iommu; | 276 | pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op, |
264 | sd->stc = &pbm->stc; | 277 | pbm->numa_node); |
265 | sd->host_controller = pbm; | ||
266 | sd->op = op = of_find_device_by_node(node); | ||
267 | sd->numa_node = pbm->numa_node; | ||
268 | |||
269 | sd = &op->dev.archdata; | 278 | sd = &op->dev.archdata; |
270 | sd->iommu = pbm->iommu; | 279 | sd->iommu = pbm->iommu; |
271 | sd->stc = &pbm->stc; | 280 | sd->stc = &pbm->stc; |
@@ -994,6 +1003,27 @@ void pcibios_set_master(struct pci_dev *dev) | |||
994 | /* No special bus mastering setup handling */ | 1003 | /* No special bus mastering setup handling */ |
995 | } | 1004 | } |
996 | 1005 | ||
1006 | #ifdef CONFIG_PCI_IOV | ||
1007 | int pcibios_add_device(struct pci_dev *dev) | ||
1008 | { | ||
1009 | struct pci_dev *pdev; | ||
1010 | |||
1011 | /* Add sriov arch specific initialization here. | ||
1012 | * Copy dev_archdata from PF to VF | ||
1013 | */ | ||
1014 | if (dev->is_virtfn) { | ||
1015 | struct dev_archdata *psd; | ||
1016 | |||
1017 | pdev = dev->physfn; | ||
1018 | psd = &pdev->dev.archdata; | ||
1019 | pci_init_dev_archdata(&dev->dev.archdata, psd->iommu, | ||
1020 | psd->stc, psd->host_controller, NULL, | ||
1021 | psd->numa_node); | ||
1022 | } | ||
1023 | return 0; | ||
1024 | } | ||
1025 | #endif /* CONFIG_PCI_IOV */ | ||
1026 | |||
997 | static int __init pcibios_init(void) | 1027 | static int __init pcibios_init(void) |
998 | { | 1028 | { |
999 | pci_dfl_cache_line_size = 64 >> 2; | 1029 | pci_dfl_cache_line_size = 64 >> 2; |
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 26db95b54ee9..599f1207eed2 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c | |||
@@ -285,7 +285,8 @@ static void __init sun4v_patch(void) | |||
285 | 285 | ||
286 | sun4v_patch_2insn_range(&__sun4v_2insn_patch, | 286 | sun4v_patch_2insn_range(&__sun4v_2insn_patch, |
287 | &__sun4v_2insn_patch_end); | 287 | &__sun4v_2insn_patch_end); |
288 | if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7) | 288 | if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || |
289 | sun4v_chip_type == SUN4V_CHIP_SPARC_SN) | ||
289 | sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, | 290 | sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, |
290 | &__sun_m7_2insn_patch_end); | 291 | &__sun_m7_2insn_patch_end); |
291 | 292 | ||
@@ -524,6 +525,7 @@ static void __init init_sparc64_elf_hwcap(void) | |||
524 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || | 525 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || |
525 | sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || | 526 | sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || |
526 | sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || | 527 | sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || |
528 | sun4v_chip_type == SUN4V_CHIP_SPARC_SN || | ||
527 | sun4v_chip_type == SUN4V_CHIP_SPARC64X) | 529 | sun4v_chip_type == SUN4V_CHIP_SPARC64X) |
528 | cap |= HWCAP_SPARC_BLKINIT; | 530 | cap |= HWCAP_SPARC_BLKINIT; |
529 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || | 531 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || |
@@ -532,6 +534,7 @@ static void __init init_sparc64_elf_hwcap(void) | |||
532 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || | 534 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || |
533 | sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || | 535 | sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || |
534 | sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || | 536 | sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || |
537 | sun4v_chip_type == SUN4V_CHIP_SPARC_SN || | ||
535 | sun4v_chip_type == SUN4V_CHIP_SPARC64X) | 538 | sun4v_chip_type == SUN4V_CHIP_SPARC64X) |
536 | cap |= HWCAP_SPARC_N2; | 539 | cap |= HWCAP_SPARC_N2; |
537 | } | 540 | } |
@@ -561,6 +564,7 @@ static void __init init_sparc64_elf_hwcap(void) | |||
561 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || | 564 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || |
562 | sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || | 565 | sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || |
563 | sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || | 566 | sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || |
567 | sun4v_chip_type == SUN4V_CHIP_SPARC_SN || | ||
564 | sun4v_chip_type == SUN4V_CHIP_SPARC64X) | 568 | sun4v_chip_type == SUN4V_CHIP_SPARC64X) |
565 | cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | | 569 | cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | |
566 | AV_SPARC_ASI_BLK_INIT | | 570 | AV_SPARC_ASI_BLK_INIT | |
@@ -570,6 +574,7 @@ static void __init init_sparc64_elf_hwcap(void) | |||
570 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || | 574 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || |
571 | sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || | 575 | sun4v_chip_type == SUN4V_CHIP_SPARC_M6 || |
572 | sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || | 576 | sun4v_chip_type == SUN4V_CHIP_SPARC_M7 || |
577 | sun4v_chip_type == SUN4V_CHIP_SPARC_SN || | ||
573 | sun4v_chip_type == SUN4V_CHIP_SPARC64X) | 578 | sun4v_chip_type == SUN4V_CHIP_SPARC64X) |
574 | cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | | 579 | cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | |
575 | AV_SPARC_FMAF); | 580 | AV_SPARC_FMAF); |
diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S index c357e40ffd01..4a73009f66a5 100644 --- a/arch/sparc/kernel/spiterrs.S +++ b/arch/sparc/kernel/spiterrs.S | |||
@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue: | |||
85 | ba,pt %xcc, etraptl1 | 85 | ba,pt %xcc, etraptl1 |
86 | rd %pc, %g7 | 86 | rd %pc, %g7 |
87 | 87 | ||
88 | ba,pt %xcc, 2f | 88 | ba,a,pt %xcc, 2f |
89 | nop | ||
90 | 89 | ||
91 | 1: ba,pt %xcc, etrap_irq | 90 | 1: ba,pt %xcc, etrap_irq |
92 | rd %pc, %g7 | 91 | rd %pc, %g7 |
@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue: | |||
100 | mov %l5, %o2 | 99 | mov %l5, %o2 |
101 | call spitfire_access_error | 100 | call spitfire_access_error |
102 | add %sp, PTREGS_OFF, %o0 | 101 | add %sp, PTREGS_OFF, %o0 |
103 | ba,pt %xcc, rtrap | 102 | ba,a,pt %xcc, rtrap |
104 | nop | ||
105 | .size __spitfire_access_error,.-__spitfire_access_error | 103 | .size __spitfire_access_error,.-__spitfire_access_error |
106 | 104 | ||
107 | /* This is the trap handler entry point for ECC correctable | 105 | /* This is the trap handler entry point for ECC correctable |
@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1: | |||
179 | mov %l5, %o2 | 177 | mov %l5, %o2 |
180 | call spitfire_data_access_exception_tl1 | 178 | call spitfire_data_access_exception_tl1 |
181 | add %sp, PTREGS_OFF, %o0 | 179 | add %sp, PTREGS_OFF, %o0 |
182 | ba,pt %xcc, rtrap | 180 | ba,a,pt %xcc, rtrap |
183 | nop | ||
184 | .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1 | 181 | .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1 |
185 | 182 | ||
186 | .type __spitfire_data_access_exception,#function | 183 | .type __spitfire_data_access_exception,#function |
@@ -200,8 +197,7 @@ __spitfire_data_access_exception: | |||
200 | mov %l5, %o2 | 197 | mov %l5, %o2 |
201 | call spitfire_data_access_exception | 198 | call spitfire_data_access_exception |
202 | add %sp, PTREGS_OFF, %o0 | 199 | add %sp, PTREGS_OFF, %o0 |
203 | ba,pt %xcc, rtrap | 200 | ba,a,pt %xcc, rtrap |
204 | nop | ||
205 | .size __spitfire_data_access_exception,.-__spitfire_data_access_exception | 201 | .size __spitfire_data_access_exception,.-__spitfire_data_access_exception |
206 | 202 | ||
207 | .type __spitfire_insn_access_exception_tl1,#function | 203 | .type __spitfire_insn_access_exception_tl1,#function |
@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1: | |||
220 | mov %l5, %o2 | 216 | mov %l5, %o2 |
221 | call spitfire_insn_access_exception_tl1 | 217 | call spitfire_insn_access_exception_tl1 |
222 | add %sp, PTREGS_OFF, %o0 | 218 | add %sp, PTREGS_OFF, %o0 |
223 | ba,pt %xcc, rtrap | 219 | ba,a,pt %xcc, rtrap |
224 | nop | ||
225 | .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1 | 220 | .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1 |
226 | 221 | ||
227 | .type __spitfire_insn_access_exception,#function | 222 | .type __spitfire_insn_access_exception,#function |
@@ -240,6 +235,5 @@ __spitfire_insn_access_exception: | |||
240 | mov %l5, %o2 | 235 | mov %l5, %o2 |
241 | call spitfire_insn_access_exception | 236 | call spitfire_insn_access_exception |
242 | add %sp, PTREGS_OFF, %o0 | 237 | add %sp, PTREGS_OFF, %o0 |
243 | ba,pt %xcc, rtrap | 238 | ba,a,pt %xcc, rtrap |
244 | nop | ||
245 | .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception | 239 | .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception |
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 6c3dd6c52f8b..eac7f0db5c8c 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S | |||
@@ -88,4 +88,4 @@ sys_call_table: | |||
88 | /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr | 88 | /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr |
89 | /*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf | 89 | /*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf |
90 | /*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen | 90 | /*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen |
91 | /*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range | 91 | /*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2 |
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 12b524cfcfa0..b0f17ff2ddba 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S | |||
@@ -89,7 +89,7 @@ sys_call_table32: | |||
89 | /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr | 89 | /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr |
90 | .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf | 90 | .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf |
91 | /*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen | 91 | /*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen |
92 | .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range | 92 | .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2 |
93 | 93 | ||
94 | #endif /* CONFIG_COMPAT */ | 94 | #endif /* CONFIG_COMPAT */ |
95 | 95 | ||
@@ -170,4 +170,4 @@ sys_call_table: | |||
170 | /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr | 170 | /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr |
171 | .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf | 171 | .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf |
172 | /*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen | 172 | /*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen |
173 | .word sys_setsockopt, sys_mlock2, sys_copy_file_range | 173 | .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2 |
diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S index b7f0f3f3a909..c731e8023d3e 100644 --- a/arch/sparc/kernel/utrap.S +++ b/arch/sparc/kernel/utrap.S | |||
@@ -11,8 +11,7 @@ utrap_trap: /* %g3=handler,%g4=level */ | |||
11 | mov %l4, %o1 | 11 | mov %l4, %o1 |
12 | call bad_trap | 12 | call bad_trap |
13 | add %sp, PTREGS_OFF, %o0 | 13 | add %sp, PTREGS_OFF, %o0 |
14 | ba,pt %xcc, rtrap | 14 | ba,a,pt %xcc, rtrap |
15 | nop | ||
16 | 15 | ||
17 | invoke_utrap: | 16 | invoke_utrap: |
18 | sllx %g3, 3, %g3 | 17 | sllx %g3, 3, %g3 |
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index cb5789c9f961..f6bb857254fc 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c | |||
@@ -45,6 +45,14 @@ static const struct vio_device_id *vio_match_device( | |||
45 | return NULL; | 45 | return NULL; |
46 | } | 46 | } |
47 | 47 | ||
48 | static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env) | ||
49 | { | ||
50 | const struct vio_dev *vio_dev = to_vio_dev(dev); | ||
51 | |||
52 | add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
48 | static int vio_bus_match(struct device *dev, struct device_driver *drv) | 56 | static int vio_bus_match(struct device *dev, struct device_driver *drv) |
49 | { | 57 | { |
50 | struct vio_dev *vio_dev = to_vio_dev(dev); | 58 | struct vio_dev *vio_dev = to_vio_dev(dev); |
@@ -105,15 +113,25 @@ static ssize_t type_show(struct device *dev, | |||
105 | return sprintf(buf, "%s\n", vdev->type); | 113 | return sprintf(buf, "%s\n", vdev->type); |
106 | } | 114 | } |
107 | 115 | ||
116 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | ||
117 | char *buf) | ||
118 | { | ||
119 | const struct vio_dev *vdev = to_vio_dev(dev); | ||
120 | |||
121 | return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat); | ||
122 | } | ||
123 | |||
108 | static struct device_attribute vio_dev_attrs[] = { | 124 | static struct device_attribute vio_dev_attrs[] = { |
109 | __ATTR_RO(devspec), | 125 | __ATTR_RO(devspec), |
110 | __ATTR_RO(type), | 126 | __ATTR_RO(type), |
127 | __ATTR_RO(modalias), | ||
111 | __ATTR_NULL | 128 | __ATTR_NULL |
112 | }; | 129 | }; |
113 | 130 | ||
114 | static struct bus_type vio_bus_type = { | 131 | static struct bus_type vio_bus_type = { |
115 | .name = "vio", | 132 | .name = "vio", |
116 | .dev_attrs = vio_dev_attrs, | 133 | .dev_attrs = vio_dev_attrs, |
134 | .uevent = vio_hotplug, | ||
117 | .match = vio_bus_match, | 135 | .match = vio_bus_match, |
118 | .probe = vio_device_probe, | 136 | .probe = vio_device_probe, |
119 | .remove = vio_device_remove, | 137 | .remove = vio_device_remove, |
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index aadd321aa05d..7d02b1fef025 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S | |||
@@ -33,6 +33,10 @@ ENTRY(_start) | |||
33 | jiffies = jiffies_64; | 33 | jiffies = jiffies_64; |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | #ifdef CONFIG_SPARC64 | ||
37 | ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large") | ||
38 | #endif | ||
39 | |||
36 | SECTIONS | 40 | SECTIONS |
37 | { | 41 | { |
38 | #ifdef CONFIG_SPARC64 | 42 | #ifdef CONFIG_SPARC64 |
diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S index 1e67ce958369..855019a8590e 100644 --- a/arch/sparc/kernel/winfixup.S +++ b/arch/sparc/kernel/winfixup.S | |||
@@ -32,8 +32,7 @@ fill_fixup: | |||
32 | rd %pc, %g7 | 32 | rd %pc, %g7 |
33 | call do_sparc64_fault | 33 | call do_sparc64_fault |
34 | add %sp, PTREGS_OFF, %o0 | 34 | add %sp, PTREGS_OFF, %o0 |
35 | ba,pt %xcc, rtrap | 35 | ba,a,pt %xcc, rtrap |
36 | nop | ||
37 | 36 | ||
38 | /* Be very careful about usage of the trap globals here. | 37 | /* Be very careful about usage of the trap globals here. |
39 | * You cannot touch %g5 as that has the fault information. | 38 | * You cannot touch %g5 as that has the fault information. |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 1cfe6aab7a11..09e838801e39 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -1769,6 +1769,7 @@ static void __init setup_page_offset(void) | |||
1769 | max_phys_bits = 47; | 1769 | max_phys_bits = 47; |
1770 | break; | 1770 | break; |
1771 | case SUN4V_CHIP_SPARC_M7: | 1771 | case SUN4V_CHIP_SPARC_M7: |
1772 | case SUN4V_CHIP_SPARC_SN: | ||
1772 | default: | 1773 | default: |
1773 | /* M7 and later support 52-bit virtual addresses. */ | 1774 | /* M7 and later support 52-bit virtual addresses. */ |
1774 | sparc64_va_hole_top = 0xfff8000000000000UL; | 1775 | sparc64_va_hole_top = 0xfff8000000000000UL; |
@@ -1986,6 +1987,7 @@ static void __init sun4v_linear_pte_xor_finalize(void) | |||
1986 | */ | 1987 | */ |
1987 | switch (sun4v_chip_type) { | 1988 | switch (sun4v_chip_type) { |
1988 | case SUN4V_CHIP_SPARC_M7: | 1989 | case SUN4V_CHIP_SPARC_M7: |
1990 | case SUN4V_CHIP_SPARC_SN: | ||
1989 | pagecv_flag = 0x00; | 1991 | pagecv_flag = 0x00; |
1990 | break; | 1992 | break; |
1991 | default: | 1993 | default: |
@@ -2138,6 +2140,7 @@ void __init paging_init(void) | |||
2138 | */ | 2140 | */ |
2139 | switch (sun4v_chip_type) { | 2141 | switch (sun4v_chip_type) { |
2140 | case SUN4V_CHIP_SPARC_M7: | 2142 | case SUN4V_CHIP_SPARC_M7: |
2143 | case SUN4V_CHIP_SPARC_SN: | ||
2141 | page_cache4v_flag = _PAGE_CP_4V; | 2144 | page_cache4v_flag = _PAGE_CP_4V; |
2142 | break; | 2145 | break; |
2143 | default: | 2146 | default: |
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 86a9bec18dab..bd3e8421b57c 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c | |||
@@ -115,7 +115,7 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
115 | /* | 115 | /* |
116 | * AMD Performance Monitor K7 and later. | 116 | * AMD Performance Monitor K7 and later. |
117 | */ | 117 | */ |
118 | static const u64 amd_perfmon_event_map[] = | 118 | static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] = |
119 | { | 119 | { |
120 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, | 120 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
121 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | 121 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, |
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index 40625ca7a190..6011a573dd64 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c | |||
@@ -474,6 +474,7 @@ static __init int _init_perf_amd_iommu( | |||
474 | 474 | ||
475 | static struct perf_amd_iommu __perf_iommu = { | 475 | static struct perf_amd_iommu __perf_iommu = { |
476 | .pmu = { | 476 | .pmu = { |
477 | .task_ctx_nr = perf_invalid_context, | ||
477 | .event_init = perf_iommu_event_init, | 478 | .event_init = perf_iommu_event_init, |
478 | .add = perf_iommu_add, | 479 | .add = perf_iommu_add, |
479 | .del = perf_iommu_del, | 480 | .del = perf_iommu_del, |
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 68fa55b4d42e..a6fd4dbcf820 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -3637,8 +3637,11 @@ __init int intel_pmu_init(void) | |||
3637 | pr_cont("Knights Landing events, "); | 3637 | pr_cont("Knights Landing events, "); |
3638 | break; | 3638 | break; |
3639 | 3639 | ||
3640 | case 142: /* 14nm Kabylake Mobile */ | ||
3641 | case 158: /* 14nm Kabylake Desktop */ | ||
3640 | case 78: /* 14nm Skylake Mobile */ | 3642 | case 78: /* 14nm Skylake Mobile */ |
3641 | case 94: /* 14nm Skylake Desktop */ | 3643 | case 94: /* 14nm Skylake Desktop */ |
3644 | case 85: /* 14nm Skylake Server */ | ||
3642 | x86_pmu.late_ack = true; | 3645 | x86_pmu.late_ack = true; |
3643 | memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 3646 | memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
3644 | memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | 3647 | memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); |
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 6c3b7c1780c9..1ca5d1e7d4f2 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c | |||
@@ -63,7 +63,7 @@ static enum { | |||
63 | 63 | ||
64 | #define LBR_PLM (LBR_KERNEL | LBR_USER) | 64 | #define LBR_PLM (LBR_KERNEL | LBR_USER) |
65 | 65 | ||
66 | #define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ | 66 | #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */ |
67 | #define LBR_NOT_SUPP -1 /* LBR filter not supported */ | 67 | #define LBR_NOT_SUPP -1 /* LBR filter not supported */ |
68 | #define LBR_IGN 0 /* ignored */ | 68 | #define LBR_IGN 0 /* ignored */ |
69 | 69 | ||
@@ -610,8 +610,10 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) | |||
610 | * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate | 610 | * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate |
611 | * in suppress mode. So LBR_SELECT should be set to | 611 | * in suppress mode. So LBR_SELECT should be set to |
612 | * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK) | 612 | * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK) |
613 | * But the 10th bit LBR_CALL_STACK does not operate | ||
614 | * in suppress mode. | ||
613 | */ | 615 | */ |
614 | reg->config = mask ^ x86_pmu.lbr_sel_mask; | 616 | reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK); |
615 | 617 | ||
616 | if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) && | 618 | if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) && |
617 | (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) && | 619 | (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) && |
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 6af7cf71d6b2..09a77dbc73c9 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c | |||
@@ -136,9 +136,21 @@ static int __init pt_pmu_hw_init(void) | |||
136 | struct dev_ext_attribute *de_attrs; | 136 | struct dev_ext_attribute *de_attrs; |
137 | struct attribute **attrs; | 137 | struct attribute **attrs; |
138 | size_t size; | 138 | size_t size; |
139 | u64 reg; | ||
139 | int ret; | 140 | int ret; |
140 | long i; | 141 | long i; |
141 | 142 | ||
143 | if (boot_cpu_has(X86_FEATURE_VMX)) { | ||
144 | /* | ||
145 | * Intel SDM, 36.5 "Tracing post-VMXON" says that | ||
146 | * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace | ||
147 | * post-VMXON. | ||
148 | */ | ||
149 | rdmsrl(MSR_IA32_VMX_MISC, reg); | ||
150 | if (reg & BIT(14)) | ||
151 | pt_pmu.vmx = true; | ||
152 | } | ||
153 | |||
142 | attrs = NULL; | 154 | attrs = NULL; |
143 | 155 | ||
144 | for (i = 0; i < PT_CPUID_LEAVES; i++) { | 156 | for (i = 0; i < PT_CPUID_LEAVES; i++) { |
@@ -269,20 +281,23 @@ static void pt_config(struct perf_event *event) | |||
269 | 281 | ||
270 | reg |= (event->attr.config & PT_CONFIG_MASK); | 282 | reg |= (event->attr.config & PT_CONFIG_MASK); |
271 | 283 | ||
284 | event->hw.config = reg; | ||
272 | wrmsrl(MSR_IA32_RTIT_CTL, reg); | 285 | wrmsrl(MSR_IA32_RTIT_CTL, reg); |
273 | } | 286 | } |
274 | 287 | ||
275 | static void pt_config_start(bool start) | 288 | static void pt_config_stop(struct perf_event *event) |
276 | { | 289 | { |
277 | u64 ctl; | 290 | u64 ctl = READ_ONCE(event->hw.config); |
291 | |||
292 | /* may be already stopped by a PMI */ | ||
293 | if (!(ctl & RTIT_CTL_TRACEEN)) | ||
294 | return; | ||
278 | 295 | ||
279 | rdmsrl(MSR_IA32_RTIT_CTL, ctl); | 296 | ctl &= ~RTIT_CTL_TRACEEN; |
280 | if (start) | ||
281 | ctl |= RTIT_CTL_TRACEEN; | ||
282 | else | ||
283 | ctl &= ~RTIT_CTL_TRACEEN; | ||
284 | wrmsrl(MSR_IA32_RTIT_CTL, ctl); | 297 | wrmsrl(MSR_IA32_RTIT_CTL, ctl); |
285 | 298 | ||
299 | WRITE_ONCE(event->hw.config, ctl); | ||
300 | |||
286 | /* | 301 | /* |
287 | * A wrmsr that disables trace generation serializes other PT | 302 | * A wrmsr that disables trace generation serializes other PT |
288 | * registers and causes all data packets to be written to memory, | 303 | * registers and causes all data packets to be written to memory, |
@@ -291,8 +306,7 @@ static void pt_config_start(bool start) | |||
291 | * The below WMB, separating data store and aux_head store matches | 306 | * The below WMB, separating data store and aux_head store matches |
292 | * the consumer's RMB that separates aux_head load and data load. | 307 | * the consumer's RMB that separates aux_head load and data load. |
293 | */ | 308 | */ |
294 | if (!start) | 309 | wmb(); |
295 | wmb(); | ||
296 | } | 310 | } |
297 | 311 | ||
298 | static void pt_config_buffer(void *buf, unsigned int topa_idx, | 312 | static void pt_config_buffer(void *buf, unsigned int topa_idx, |
@@ -942,11 +956,17 @@ void intel_pt_interrupt(void) | |||
942 | if (!ACCESS_ONCE(pt->handle_nmi)) | 956 | if (!ACCESS_ONCE(pt->handle_nmi)) |
943 | return; | 957 | return; |
944 | 958 | ||
945 | pt_config_start(false); | 959 | /* |
960 | * If VMX is on and PT does not support it, don't touch anything. | ||
961 | */ | ||
962 | if (READ_ONCE(pt->vmx_on)) | ||
963 | return; | ||
946 | 964 | ||
947 | if (!event) | 965 | if (!event) |
948 | return; | 966 | return; |
949 | 967 | ||
968 | pt_config_stop(event); | ||
969 | |||
950 | buf = perf_get_aux(&pt->handle); | 970 | buf = perf_get_aux(&pt->handle); |
951 | if (!buf) | 971 | if (!buf) |
952 | return; | 972 | return; |
@@ -983,6 +1003,35 @@ void intel_pt_interrupt(void) | |||
983 | } | 1003 | } |
984 | } | 1004 | } |
985 | 1005 | ||
1006 | void intel_pt_handle_vmx(int on) | ||
1007 | { | ||
1008 | struct pt *pt = this_cpu_ptr(&pt_ctx); | ||
1009 | struct perf_event *event; | ||
1010 | unsigned long flags; | ||
1011 | |||
1012 | /* PT plays nice with VMX, do nothing */ | ||
1013 | if (pt_pmu.vmx) | ||
1014 | return; | ||
1015 | |||
1016 | /* | ||
1017 | * VMXON will clear RTIT_CTL.TraceEn; we need to make | ||
1018 | * sure to not try to set it while VMX is on. Disable | ||
1019 | * interrupts to avoid racing with pmu callbacks; | ||
1020 | * concurrent PMI should be handled fine. | ||
1021 | */ | ||
1022 | local_irq_save(flags); | ||
1023 | WRITE_ONCE(pt->vmx_on, on); | ||
1024 | |||
1025 | if (on) { | ||
1026 | /* prevent pt_config_stop() from writing RTIT_CTL */ | ||
1027 | event = pt->handle.event; | ||
1028 | if (event) | ||
1029 | event->hw.config = 0; | ||
1030 | } | ||
1031 | local_irq_restore(flags); | ||
1032 | } | ||
1033 | EXPORT_SYMBOL_GPL(intel_pt_handle_vmx); | ||
1034 | |||
986 | /* | 1035 | /* |
987 | * PMU callbacks | 1036 | * PMU callbacks |
988 | */ | 1037 | */ |
@@ -992,6 +1041,9 @@ static void pt_event_start(struct perf_event *event, int mode) | |||
992 | struct pt *pt = this_cpu_ptr(&pt_ctx); | 1041 | struct pt *pt = this_cpu_ptr(&pt_ctx); |
993 | struct pt_buffer *buf = perf_get_aux(&pt->handle); | 1042 | struct pt_buffer *buf = perf_get_aux(&pt->handle); |
994 | 1043 | ||
1044 | if (READ_ONCE(pt->vmx_on)) | ||
1045 | return; | ||
1046 | |||
995 | if (!buf || pt_buffer_is_full(buf, pt)) { | 1047 | if (!buf || pt_buffer_is_full(buf, pt)) { |
996 | event->hw.state = PERF_HES_STOPPED; | 1048 | event->hw.state = PERF_HES_STOPPED; |
997 | return; | 1049 | return; |
@@ -1014,7 +1066,8 @@ static void pt_event_stop(struct perf_event *event, int mode) | |||
1014 | * see comment in intel_pt_interrupt(). | 1066 | * see comment in intel_pt_interrupt(). |
1015 | */ | 1067 | */ |
1016 | ACCESS_ONCE(pt->handle_nmi) = 0; | 1068 | ACCESS_ONCE(pt->handle_nmi) = 0; |
1017 | pt_config_start(false); | 1069 | |
1070 | pt_config_stop(event); | ||
1018 | 1071 | ||
1019 | if (event->hw.state == PERF_HES_STOPPED) | 1072 | if (event->hw.state == PERF_HES_STOPPED) |
1020 | return; | 1073 | return; |
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index 336878a5d205..3abb5f5cccc8 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h | |||
@@ -65,6 +65,7 @@ enum pt_capabilities { | |||
65 | struct pt_pmu { | 65 | struct pt_pmu { |
66 | struct pmu pmu; | 66 | struct pmu pmu; |
67 | u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; | 67 | u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; |
68 | bool vmx; | ||
68 | }; | 69 | }; |
69 | 70 | ||
70 | /** | 71 | /** |
@@ -107,10 +108,12 @@ struct pt_buffer { | |||
107 | * struct pt - per-cpu pt context | 108 | * struct pt - per-cpu pt context |
108 | * @handle: perf output handle | 109 | * @handle: perf output handle |
109 | * @handle_nmi: do handle PT PMI on this cpu, there's an active event | 110 | * @handle_nmi: do handle PT PMI on this cpu, there's an active event |
111 | * @vmx_on: 1 if VMX is ON on this cpu | ||
110 | */ | 112 | */ |
111 | struct pt { | 113 | struct pt { |
112 | struct perf_output_handle handle; | 114 | struct perf_output_handle handle; |
113 | int handle_nmi; | 115 | int handle_nmi; |
116 | int vmx_on; | ||
114 | }; | 117 | }; |
115 | 118 | ||
116 | #endif /* __INTEL_PT_H__ */ | 119 | #endif /* __INTEL_PT_H__ */ |
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 70c93f9b03ac..1705c9d75e44 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c | |||
@@ -718,6 +718,7 @@ static int __init rapl_pmu_init(void) | |||
718 | break; | 718 | break; |
719 | case 60: /* Haswell */ | 719 | case 60: /* Haswell */ |
720 | case 69: /* Haswell-Celeron */ | 720 | case 69: /* Haswell-Celeron */ |
721 | case 70: /* Haswell GT3e */ | ||
721 | case 61: /* Broadwell */ | 722 | case 61: /* Broadwell */ |
722 | case 71: /* Broadwell-H */ | 723 | case 71: /* Broadwell-H */ |
723 | rapl_cntr_mask = RAPL_IDX_HSW; | 724 | rapl_cntr_mask = RAPL_IDX_HSW; |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 5a2ed3ed2f26..f353061bba1d 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -285,6 +285,10 @@ static inline void perf_events_lapic_init(void) { } | |||
285 | static inline void perf_check_microcode(void) { } | 285 | static inline void perf_check_microcode(void) { } |
286 | #endif | 286 | #endif |
287 | 287 | ||
288 | #ifdef CONFIG_CPU_SUP_INTEL | ||
289 | extern void intel_pt_handle_vmx(int on); | ||
290 | #endif | ||
291 | |||
288 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) | 292 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) |
289 | extern void amd_pmu_enable_virt(void); | 293 | extern void amd_pmu_enable_virt(void); |
290 | extern void amd_pmu_disable_virt(void); | 294 | extern void amd_pmu_disable_virt(void); |
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index ad59d70bcb1a..ef495511f019 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -256,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data) | |||
256 | struct irq_desc *desc; | 256 | struct irq_desc *desc; |
257 | int cpu, vector; | 257 | int cpu, vector; |
258 | 258 | ||
259 | BUG_ON(!data->cfg.vector); | 259 | if (!data->cfg.vector) |
260 | return; | ||
260 | 261 | ||
261 | vector = data->cfg.vector; | 262 | vector = data->cfg.vector; |
262 | for_each_cpu_and(cpu, data->domain, cpu_online_mask) | 263 | for_each_cpu_and(cpu, data->domain, cpu_online_mask) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 8f4942e2bcbb..d7ce96a7daca 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -891,9 +891,7 @@ void __init uv_system_init(void) | |||
891 | } | 891 | } |
892 | pr_info("UV: Found %s hub\n", hub); | 892 | pr_info("UV: Found %s hub\n", hub); |
893 | 893 | ||
894 | /* We now only need to map the MMRs on UV1 */ | 894 | map_low_mmrs(); |
895 | if (is_uv1_hub()) | ||
896 | map_low_mmrs(); | ||
897 | 895 | ||
898 | m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); | 896 | m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); |
899 | m_val = m_n_config.s.m_skt; | 897 | m_val = m_n_config.s.m_skt; |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 54cdbd2003fe..af1112980dd4 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -389,12 +389,6 @@ default_entry: | |||
389 | /* Make changes effective */ | 389 | /* Make changes effective */ |
390 | wrmsr | 390 | wrmsr |
391 | 391 | ||
392 | /* | ||
393 | * And make sure that all the mappings we set up have NX set from | ||
394 | * the beginning. | ||
395 | */ | ||
396 | orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4) | ||
397 | |||
398 | enable_paging: | 392 | enable_paging: |
399 | 393 | ||
400 | /* | 394 | /* |
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c index e21a8a7ddcff..623965e86b65 100644 --- a/arch/x86/kernel/sysfb_efi.c +++ b/arch/x86/kernel/sysfb_efi.c | |||
@@ -121,14 +121,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id) | |||
121 | continue; | 121 | continue; |
122 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 122 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
123 | resource_size_t start, end; | 123 | resource_size_t start, end; |
124 | unsigned long flags; | ||
125 | |||
126 | flags = pci_resource_flags(dev, i); | ||
127 | if (!(flags & IORESOURCE_MEM)) | ||
128 | continue; | ||
129 | |||
130 | if (flags & IORESOURCE_UNSET) | ||
131 | continue; | ||
132 | |||
133 | if (pci_resource_len(dev, i) == 0) | ||
134 | continue; | ||
124 | 135 | ||
125 | start = pci_resource_start(dev, i); | 136 | start = pci_resource_start(dev, i); |
126 | if (start == 0) | ||
127 | break; | ||
128 | end = pci_resource_end(dev, i); | 137 | end = pci_resource_end(dev, i); |
129 | if (screen_info.lfb_base >= start && | 138 | if (screen_info.lfb_base >= start && |
130 | screen_info.lfb_base < end) { | 139 | screen_info.lfb_base < end) { |
131 | found_bar = 1; | 140 | found_bar = 1; |
141 | break; | ||
132 | } | 142 | } |
133 | } | 143 | } |
134 | } | 144 | } |
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c index 92ae6acac8a7..6aa0f4d9eea6 100644 --- a/arch/x86/kernel/tsc_msr.c +++ b/arch/x86/kernel/tsc_msr.c | |||
@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void) | |||
92 | 92 | ||
93 | if (freq_desc_tables[cpu_index].msr_plat) { | 93 | if (freq_desc_tables[cpu_index].msr_plat) { |
94 | rdmsr(MSR_PLATFORM_INFO, lo, hi); | 94 | rdmsr(MSR_PLATFORM_INFO, lo, hi); |
95 | ratio = (lo >> 8) & 0x1f; | 95 | ratio = (lo >> 8) & 0xff; |
96 | } else { | 96 | } else { |
97 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | 97 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); |
98 | ratio = (hi >> 8) & 0x1f; | 98 | ratio = (hi >> 8) & 0x1f; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1ff4dbb73fb7..b6f50e8b0a39 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2823,7 +2823,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, | |||
2823 | */ | 2823 | */ |
2824 | if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && | 2824 | if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && |
2825 | level == PT_PAGE_TABLE_LEVEL && | 2825 | level == PT_PAGE_TABLE_LEVEL && |
2826 | PageTransCompound(pfn_to_page(pfn)) && | 2826 | PageTransCompoundMap(pfn_to_page(pfn)) && |
2827 | !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { | 2827 | !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) { |
2828 | unsigned long mask; | 2828 | unsigned long mask; |
2829 | /* | 2829 | /* |
@@ -4785,7 +4785,7 @@ restart: | |||
4785 | */ | 4785 | */ |
4786 | if (sp->role.direct && | 4786 | if (sp->role.direct && |
4787 | !kvm_is_reserved_pfn(pfn) && | 4787 | !kvm_is_reserved_pfn(pfn) && |
4788 | PageTransCompound(pfn_to_page(pfn))) { | 4788 | PageTransCompoundMap(pfn_to_page(pfn))) { |
4789 | drop_spte(kvm, sptep); | 4789 | drop_spte(kvm, sptep); |
4790 | need_tlb_flush = 1; | 4790 | need_tlb_flush = 1; |
4791 | goto restart; | 4791 | goto restart; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ee1c8a93871c..133679d520af 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3103,6 +3103,8 @@ static __init int vmx_disabled_by_bios(void) | |||
3103 | 3103 | ||
3104 | static void kvm_cpu_vmxon(u64 addr) | 3104 | static void kvm_cpu_vmxon(u64 addr) |
3105 | { | 3105 | { |
3106 | intel_pt_handle_vmx(1); | ||
3107 | |||
3106 | asm volatile (ASM_VMX_VMXON_RAX | 3108 | asm volatile (ASM_VMX_VMXON_RAX |
3107 | : : "a"(&addr), "m"(addr) | 3109 | : : "a"(&addr), "m"(addr) |
3108 | : "memory", "cc"); | 3110 | : "memory", "cc"); |
@@ -3172,6 +3174,8 @@ static void vmclear_local_loaded_vmcss(void) | |||
3172 | static void kvm_cpu_vmxoff(void) | 3174 | static void kvm_cpu_vmxoff(void) |
3173 | { | 3175 | { |
3174 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); | 3176 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); |
3177 | |||
3178 | intel_pt_handle_vmx(0); | ||
3175 | } | 3179 | } |
3176 | 3180 | ||
3177 | static void hardware_disable(void) | 3181 | static void hardware_disable(void) |
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c index 8bea84724a7d..f65a33f505b6 100644 --- a/arch/x86/mm/setup_nx.c +++ b/arch/x86/mm/setup_nx.c | |||
@@ -32,8 +32,9 @@ early_param("noexec", noexec_setup); | |||
32 | 32 | ||
33 | void x86_configure_nx(void) | 33 | void x86_configure_nx(void) |
34 | { | 34 | { |
35 | /* If disable_nx is set, clear NX on all new mappings going forward. */ | 35 | if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx) |
36 | if (disable_nx) | 36 | __supported_pte_mask |= _PAGE_NX; |
37 | else | ||
37 | __supported_pte_mask &= ~_PAGE_NX; | 38 | __supported_pte_mask &= ~_PAGE_NX; |
38 | } | 39 | } |
39 | 40 | ||
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c index a2433817c987..6a2f5691b1ab 100644 --- a/arch/x86/platform/efi/efi-bgrt.c +++ b/arch/x86/platform/efi/efi-bgrt.c | |||
@@ -43,40 +43,40 @@ void __init efi_bgrt_init(void) | |||
43 | return; | 43 | return; |
44 | 44 | ||
45 | if (bgrt_tab->header.length < sizeof(*bgrt_tab)) { | 45 | if (bgrt_tab->header.length < sizeof(*bgrt_tab)) { |
46 | pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n", | 46 | pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", |
47 | bgrt_tab->header.length, sizeof(*bgrt_tab)); | 47 | bgrt_tab->header.length, sizeof(*bgrt_tab)); |
48 | return; | 48 | return; |
49 | } | 49 | } |
50 | if (bgrt_tab->version != 1) { | 50 | if (bgrt_tab->version != 1) { |
51 | pr_err("Ignoring BGRT: invalid version %u (expected 1)\n", | 51 | pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n", |
52 | bgrt_tab->version); | 52 | bgrt_tab->version); |
53 | return; | 53 | return; |
54 | } | 54 | } |
55 | if (bgrt_tab->status & 0xfe) { | 55 | if (bgrt_tab->status & 0xfe) { |
56 | pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n", | 56 | pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n", |
57 | bgrt_tab->status); | 57 | bgrt_tab->status); |
58 | return; | 58 | return; |
59 | } | 59 | } |
60 | if (bgrt_tab->image_type != 0) { | 60 | if (bgrt_tab->image_type != 0) { |
61 | pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n", | 61 | pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n", |
62 | bgrt_tab->image_type); | 62 | bgrt_tab->image_type); |
63 | return; | 63 | return; |
64 | } | 64 | } |
65 | if (!bgrt_tab->image_address) { | 65 | if (!bgrt_tab->image_address) { |
66 | pr_err("Ignoring BGRT: null image address\n"); | 66 | pr_notice("Ignoring BGRT: null image address\n"); |
67 | return; | 67 | return; |
68 | } | 68 | } |
69 | 69 | ||
70 | image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB); | 70 | image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB); |
71 | if (!image) { | 71 | if (!image) { |
72 | pr_err("Ignoring BGRT: failed to map image header memory\n"); | 72 | pr_notice("Ignoring BGRT: failed to map image header memory\n"); |
73 | return; | 73 | return; |
74 | } | 74 | } |
75 | 75 | ||
76 | memcpy(&bmp_header, image, sizeof(bmp_header)); | 76 | memcpy(&bmp_header, image, sizeof(bmp_header)); |
77 | memunmap(image); | 77 | memunmap(image); |
78 | if (bmp_header.id != 0x4d42) { | 78 | if (bmp_header.id != 0x4d42) { |
79 | pr_err("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n", | 79 | pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n", |
80 | bmp_header.id); | 80 | bmp_header.id); |
81 | return; | 81 | return; |
82 | } | 82 | } |
@@ -84,14 +84,14 @@ void __init efi_bgrt_init(void) | |||
84 | 84 | ||
85 | bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN); | 85 | bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN); |
86 | if (!bgrt_image) { | 86 | if (!bgrt_image) { |
87 | pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n", | 87 | pr_notice("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n", |
88 | bgrt_image_size); | 88 | bgrt_image_size); |
89 | return; | 89 | return; |
90 | } | 90 | } |
91 | 91 | ||
92 | image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB); | 92 | image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB); |
93 | if (!image) { | 93 | if (!image) { |
94 | pr_err("Ignoring BGRT: failed to map image memory\n"); | 94 | pr_notice("Ignoring BGRT: failed to map image memory\n"); |
95 | kfree(bgrt_image); | 95 | kfree(bgrt_image); |
96 | bgrt_image = NULL; | 96 | bgrt_image = NULL; |
97 | return; | 97 | return; |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 9e2ba5c6e1dd..f42e78de1e10 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -27,6 +27,12 @@ static bool xen_pvspin = true; | |||
27 | 27 | ||
28 | static void xen_qlock_kick(int cpu) | 28 | static void xen_qlock_kick(int cpu) |
29 | { | 29 | { |
30 | int irq = per_cpu(lock_kicker_irq, cpu); | ||
31 | |||
32 | /* Don't kick if the target's kicker interrupt is not initialized. */ | ||
33 | if (irq == -1) | ||
34 | return; | ||
35 | |||
30 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); | 36 | xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); |
31 | } | 37 | } |
32 | 38 | ||
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 1982310e6d83..da198b864107 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c | |||
@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, | |||
428 | obj_desc->method.mutex->mutex. | 428 | obj_desc->method.mutex->mutex. |
429 | original_sync_level = | 429 | original_sync_level = |
430 | obj_desc->method.mutex->mutex.sync_level; | 430 | obj_desc->method.mutex->mutex.sync_level; |
431 | |||
432 | obj_desc->method.mutex->mutex.thread_id = | ||
433 | acpi_os_get_thread_id(); | ||
431 | } | 434 | } |
432 | } | 435 | } |
433 | 436 | ||
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index d0f35e63640b..63cc9dbe4f3b 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c | |||
@@ -287,8 +287,11 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, | |||
287 | offset); | 287 | offset); |
288 | rc = -ENXIO; | 288 | rc = -ENXIO; |
289 | } | 289 | } |
290 | } else | 290 | } else { |
291 | rc = 0; | 291 | rc = 0; |
292 | if (cmd_rc) | ||
293 | *cmd_rc = xlat_status(buf, cmd); | ||
294 | } | ||
292 | 295 | ||
293 | out: | 296 | out: |
294 | ACPI_FREE(out_obj); | 297 | ACPI_FREE(out_obj); |
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 433b60092972..d8f4cc22856c 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c | |||
@@ -259,9 +259,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) | |||
259 | reg = opp_table->regulator; | 259 | reg = opp_table->regulator; |
260 | if (IS_ERR(reg)) { | 260 | if (IS_ERR(reg)) { |
261 | /* Regulator may not be required for device */ | 261 | /* Regulator may not be required for device */ |
262 | if (reg) | ||
263 | dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__, | ||
264 | PTR_ERR(reg)); | ||
265 | rcu_read_unlock(); | 262 | rcu_read_unlock(); |
266 | return 0; | 263 | return 0; |
267 | } | 264 | } |
diff --git a/drivers/base/property.c b/drivers/base/property.c index 9b1a65debd49..7f692accdc90 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | static inline bool is_pset_node(struct fwnode_handle *fwnode) | 22 | static inline bool is_pset_node(struct fwnode_handle *fwnode) |
23 | { | 23 | { |
24 | return fwnode && fwnode->type == FWNODE_PDATA; | 24 | return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA; |
25 | } | 25 | } |
26 | 26 | ||
27 | static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode) | 27 | static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode) |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 94a1843b0426..0ede6d7e2568 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -538,7 +538,6 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, | |||
538 | u8 *order, u64 *snap_size); | 538 | u8 *order, u64 *snap_size); |
539 | static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, | 539 | static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, |
540 | u64 *snap_features); | 540 | u64 *snap_features); |
541 | static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name); | ||
542 | 541 | ||
543 | static int rbd_open(struct block_device *bdev, fmode_t mode) | 542 | static int rbd_open(struct block_device *bdev, fmode_t mode) |
544 | { | 543 | { |
@@ -3127,9 +3126,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) | |||
3127 | struct rbd_device *rbd_dev = (struct rbd_device *)data; | 3126 | struct rbd_device *rbd_dev = (struct rbd_device *)data; |
3128 | int ret; | 3127 | int ret; |
3129 | 3128 | ||
3130 | if (!rbd_dev) | ||
3131 | return; | ||
3132 | |||
3133 | dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, | 3129 | dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, |
3134 | rbd_dev->header_name, (unsigned long long)notify_id, | 3130 | rbd_dev->header_name, (unsigned long long)notify_id, |
3135 | (unsigned int)opcode); | 3131 | (unsigned int)opcode); |
@@ -3263,6 +3259,9 @@ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev) | |||
3263 | 3259 | ||
3264 | ceph_osdc_cancel_event(rbd_dev->watch_event); | 3260 | ceph_osdc_cancel_event(rbd_dev->watch_event); |
3265 | rbd_dev->watch_event = NULL; | 3261 | rbd_dev->watch_event = NULL; |
3262 | |||
3263 | dout("%s flushing notifies\n", __func__); | ||
3264 | ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); | ||
3266 | } | 3265 | } |
3267 | 3266 | ||
3268 | /* | 3267 | /* |
@@ -3642,21 +3641,14 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev) | |||
3642 | static void rbd_dev_update_size(struct rbd_device *rbd_dev) | 3641 | static void rbd_dev_update_size(struct rbd_device *rbd_dev) |
3643 | { | 3642 | { |
3644 | sector_t size; | 3643 | sector_t size; |
3645 | bool removing; | ||
3646 | 3644 | ||
3647 | /* | 3645 | /* |
3648 | * Don't hold the lock while doing disk operations, | 3646 | * If EXISTS is not set, rbd_dev->disk may be NULL, so don't |
3649 | * or lock ordering will conflict with the bdev mutex via: | 3647 | * try to update its size. If REMOVING is set, updating size |
3650 | * rbd_add() -> blkdev_get() -> rbd_open() | 3648 | * is just useless work since the device can't be opened. |
3651 | */ | 3649 | */ |
3652 | spin_lock_irq(&rbd_dev->lock); | 3650 | if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) && |
3653 | removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); | 3651 | !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) { |
3654 | spin_unlock_irq(&rbd_dev->lock); | ||
3655 | /* | ||
3656 | * If the device is being removed, rbd_dev->disk has | ||
3657 | * been destroyed, so don't try to update its size | ||
3658 | */ | ||
3659 | if (!removing) { | ||
3660 | size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; | 3652 | size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; |
3661 | dout("setting size to %llu sectors", (unsigned long long)size); | 3653 | dout("setting size to %llu sectors", (unsigned long long)size); |
3662 | set_capacity(rbd_dev->disk, size); | 3654 | set_capacity(rbd_dev->disk, size); |
@@ -4191,7 +4183,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, | |||
4191 | __le64 features; | 4183 | __le64 features; |
4192 | __le64 incompat; | 4184 | __le64 incompat; |
4193 | } __attribute__ ((packed)) features_buf = { 0 }; | 4185 | } __attribute__ ((packed)) features_buf = { 0 }; |
4194 | u64 incompat; | 4186 | u64 unsup; |
4195 | int ret; | 4187 | int ret; |
4196 | 4188 | ||
4197 | ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, | 4189 | ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, |
@@ -4204,9 +4196,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, | |||
4204 | if (ret < sizeof (features_buf)) | 4196 | if (ret < sizeof (features_buf)) |
4205 | return -ERANGE; | 4197 | return -ERANGE; |
4206 | 4198 | ||
4207 | incompat = le64_to_cpu(features_buf.incompat); | 4199 | unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED; |
4208 | if (incompat & ~RBD_FEATURES_SUPPORTED) | 4200 | if (unsup) { |
4201 | rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx", | ||
4202 | unsup); | ||
4209 | return -ENXIO; | 4203 | return -ENXIO; |
4204 | } | ||
4210 | 4205 | ||
4211 | *snap_features = le64_to_cpu(features_buf.features); | 4206 | *snap_features = le64_to_cpu(features_buf.features); |
4212 | 4207 | ||
@@ -5187,6 +5182,10 @@ out_err: | |||
5187 | return ret; | 5182 | return ret; |
5188 | } | 5183 | } |
5189 | 5184 | ||
5185 | /* | ||
5186 | * rbd_dev->header_rwsem must be locked for write and will be unlocked | ||
5187 | * upon return. | ||
5188 | */ | ||
5190 | static int rbd_dev_device_setup(struct rbd_device *rbd_dev) | 5189 | static int rbd_dev_device_setup(struct rbd_device *rbd_dev) |
5191 | { | 5190 | { |
5192 | int ret; | 5191 | int ret; |
@@ -5195,7 +5194,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) | |||
5195 | 5194 | ||
5196 | ret = rbd_dev_id_get(rbd_dev); | 5195 | ret = rbd_dev_id_get(rbd_dev); |
5197 | if (ret) | 5196 | if (ret) |
5198 | return ret; | 5197 | goto err_out_unlock; |
5199 | 5198 | ||
5200 | BUILD_BUG_ON(DEV_NAME_LEN | 5199 | BUILD_BUG_ON(DEV_NAME_LEN |
5201 | < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); | 5200 | < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); |
@@ -5236,8 +5235,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) | |||
5236 | /* Everything's ready. Announce the disk to the world. */ | 5235 | /* Everything's ready. Announce the disk to the world. */ |
5237 | 5236 | ||
5238 | set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); | 5237 | set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); |
5239 | add_disk(rbd_dev->disk); | 5238 | up_write(&rbd_dev->header_rwsem); |
5240 | 5239 | ||
5240 | add_disk(rbd_dev->disk); | ||
5241 | pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, | 5241 | pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, |
5242 | (unsigned long long) rbd_dev->mapping.size); | 5242 | (unsigned long long) rbd_dev->mapping.size); |
5243 | 5243 | ||
@@ -5252,6 +5252,8 @@ err_out_blkdev: | |||
5252 | unregister_blkdev(rbd_dev->major, rbd_dev->name); | 5252 | unregister_blkdev(rbd_dev->major, rbd_dev->name); |
5253 | err_out_id: | 5253 | err_out_id: |
5254 | rbd_dev_id_put(rbd_dev); | 5254 | rbd_dev_id_put(rbd_dev); |
5255 | err_out_unlock: | ||
5256 | up_write(&rbd_dev->header_rwsem); | ||
5255 | return ret; | 5257 | return ret; |
5256 | } | 5258 | } |
5257 | 5259 | ||
@@ -5442,6 +5444,7 @@ static ssize_t do_rbd_add(struct bus_type *bus, | |||
5442 | spec = NULL; /* rbd_dev now owns this */ | 5444 | spec = NULL; /* rbd_dev now owns this */ |
5443 | rbd_opts = NULL; /* rbd_dev now owns this */ | 5445 | rbd_opts = NULL; /* rbd_dev now owns this */ |
5444 | 5446 | ||
5447 | down_write(&rbd_dev->header_rwsem); | ||
5445 | rc = rbd_dev_image_probe(rbd_dev, 0); | 5448 | rc = rbd_dev_image_probe(rbd_dev, 0); |
5446 | if (rc < 0) | 5449 | if (rc < 0) |
5447 | goto err_out_rbd_dev; | 5450 | goto err_out_rbd_dev; |
@@ -5471,6 +5474,7 @@ out: | |||
5471 | return rc; | 5474 | return rc; |
5472 | 5475 | ||
5473 | err_out_rbd_dev: | 5476 | err_out_rbd_dev: |
5477 | up_write(&rbd_dev->header_rwsem); | ||
5474 | rbd_dev_destroy(rbd_dev); | 5478 | rbd_dev_destroy(rbd_dev); |
5475 | err_out_client: | 5479 | err_out_client: |
5476 | rbd_put_client(rbdc); | 5480 | rbd_put_client(rbdc); |
@@ -5577,12 +5581,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus, | |||
5577 | return ret; | 5581 | return ret; |
5578 | 5582 | ||
5579 | rbd_dev_header_unwatch_sync(rbd_dev); | 5583 | rbd_dev_header_unwatch_sync(rbd_dev); |
5580 | /* | ||
5581 | * flush remaining watch callbacks - these must be complete | ||
5582 | * before the osd_client is shutdown | ||
5583 | */ | ||
5584 | dout("%s: flushing notifies", __func__); | ||
5585 | ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc); | ||
5586 | 5584 | ||
5587 | /* | 5585 | /* |
5588 | * Don't free anything from rbd_dev->disk until after all | 5586 | * Don't free anything from rbd_dev->disk until after all |
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c index 02e18182fcb5..2beb396fe652 100644 --- a/drivers/clk/imx/clk-imx6q.c +++ b/drivers/clk/imx/clk-imx6q.c | |||
@@ -394,7 +394,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) | |||
394 | clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7); | 394 | clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7); |
395 | } else { | 395 | } else { |
396 | clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6); | 396 | clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6); |
397 | clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60", base + 0x20, 2, 6); | 397 | clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6); |
398 | clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup); | 398 | clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup); |
399 | clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6); | 399 | clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6); |
400 | clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); | 400 | clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e93405f0eac4..c4acfc5273b3 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1557,21 +1557,25 @@ void cpufreq_suspend(void) | |||
1557 | if (!cpufreq_driver) | 1557 | if (!cpufreq_driver) |
1558 | return; | 1558 | return; |
1559 | 1559 | ||
1560 | if (!has_target()) | 1560 | if (!has_target() && !cpufreq_driver->suspend) |
1561 | goto suspend; | 1561 | goto suspend; |
1562 | 1562 | ||
1563 | pr_debug("%s: Suspending Governors\n", __func__); | 1563 | pr_debug("%s: Suspending Governors\n", __func__); |
1564 | 1564 | ||
1565 | for_each_active_policy(policy) { | 1565 | for_each_active_policy(policy) { |
1566 | down_write(&policy->rwsem); | 1566 | if (has_target()) { |
1567 | ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); | 1567 | down_write(&policy->rwsem); |
1568 | up_write(&policy->rwsem); | 1568 | ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP); |
1569 | up_write(&policy->rwsem); | ||
1569 | 1570 | ||
1570 | if (ret) | 1571 | if (ret) { |
1571 | pr_err("%s: Failed to stop governor for policy: %p\n", | 1572 | pr_err("%s: Failed to stop governor for policy: %p\n", |
1572 | __func__, policy); | 1573 | __func__, policy); |
1573 | else if (cpufreq_driver->suspend | 1574 | continue; |
1574 | && cpufreq_driver->suspend(policy)) | 1575 | } |
1576 | } | ||
1577 | |||
1578 | if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy)) | ||
1575 | pr_err("%s: Failed to suspend driver: %p\n", __func__, | 1579 | pr_err("%s: Failed to suspend driver: %p\n", __func__, |
1576 | policy); | 1580 | policy); |
1577 | } | 1581 | } |
@@ -1596,7 +1600,7 @@ void cpufreq_resume(void) | |||
1596 | 1600 | ||
1597 | cpufreq_suspended = false; | 1601 | cpufreq_suspended = false; |
1598 | 1602 | ||
1599 | if (!has_target()) | 1603 | if (!has_target() && !cpufreq_driver->resume) |
1600 | return; | 1604 | return; |
1601 | 1605 | ||
1602 | pr_debug("%s: Resuming Governors\n", __func__); | 1606 | pr_debug("%s: Resuming Governors\n", __func__); |
@@ -1605,7 +1609,7 @@ void cpufreq_resume(void) | |||
1605 | if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { | 1609 | if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) { |
1606 | pr_err("%s: Failed to resume driver: %p\n", __func__, | 1610 | pr_err("%s: Failed to resume driver: %p\n", __func__, |
1607 | policy); | 1611 | policy); |
1608 | } else { | 1612 | } else if (has_target()) { |
1609 | down_write(&policy->rwsem); | 1613 | down_write(&policy->rwsem); |
1610 | ret = cpufreq_start_governor(policy); | 1614 | ret = cpufreq_start_governor(policy); |
1611 | up_write(&policy->rwsem); | 1615 | up_write(&policy->rwsem); |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 10a5cfeae8c5..5f1147fa9239 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -193,12 +193,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy) | |||
193 | wall_time = cur_wall_time - j_cdbs->prev_cpu_wall; | 193 | wall_time = cur_wall_time - j_cdbs->prev_cpu_wall; |
194 | j_cdbs->prev_cpu_wall = cur_wall_time; | 194 | j_cdbs->prev_cpu_wall = cur_wall_time; |
195 | 195 | ||
196 | if (cur_idle_time <= j_cdbs->prev_cpu_idle) { | 196 | idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; |
197 | idle_time = 0; | 197 | j_cdbs->prev_cpu_idle = cur_idle_time; |
198 | } else { | ||
199 | idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; | ||
200 | j_cdbs->prev_cpu_idle = cur_idle_time; | ||
201 | } | ||
202 | 198 | ||
203 | if (ignore_nice) { | 199 | if (ignore_nice) { |
204 | u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 200 | u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 30fe323c4551..b230ebaae66c 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -453,6 +453,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask) | |||
453 | } | 453 | } |
454 | } | 454 | } |
455 | 455 | ||
456 | static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy) | ||
457 | { | ||
458 | if (hwp_active) | ||
459 | intel_pstate_hwp_set(policy->cpus); | ||
460 | |||
461 | return 0; | ||
462 | } | ||
463 | |||
456 | static void intel_pstate_hwp_set_online_cpus(void) | 464 | static void intel_pstate_hwp_set_online_cpus(void) |
457 | { | 465 | { |
458 | get_online_cpus(); | 466 | get_online_cpus(); |
@@ -813,6 +821,11 @@ static int core_get_max_pstate(void) | |||
813 | if (err) | 821 | if (err) |
814 | goto skip_tar; | 822 | goto skip_tar; |
815 | 823 | ||
824 | /* For level 1 and 2, bits[23:16] contain the ratio */ | ||
825 | if (tdp_ctrl) | ||
826 | tdp_ratio >>= 16; | ||
827 | |||
828 | tdp_ratio &= 0xff; /* ratios are only 8 bits long */ | ||
816 | if (tdp_ratio - 1 == tar) { | 829 | if (tdp_ratio - 1 == tar) { |
817 | max_pstate = tar; | 830 | max_pstate = tar; |
818 | pr_debug("max_pstate=TAC %x\n", max_pstate); | 831 | pr_debug("max_pstate=TAC %x\n", max_pstate); |
@@ -1057,8 +1070,9 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) | |||
1057 | 1070 | ||
1058 | static inline int32_t get_avg_frequency(struct cpudata *cpu) | 1071 | static inline int32_t get_avg_frequency(struct cpudata *cpu) |
1059 | { | 1072 | { |
1060 | return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf * | 1073 | return fp_toint(mul_fp(cpu->sample.core_pct_busy, |
1061 | cpu->pstate.scaling, cpu->sample.mperf); | 1074 | int_tofp(cpu->pstate.max_pstate_physical * |
1075 | cpu->pstate.scaling / 100))); | ||
1062 | } | 1076 | } |
1063 | 1077 | ||
1064 | static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) | 1078 | static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) |
@@ -1101,8 +1115,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) | |||
1101 | int32_t core_busy, max_pstate, current_pstate, sample_ratio; | 1115 | int32_t core_busy, max_pstate, current_pstate, sample_ratio; |
1102 | u64 duration_ns; | 1116 | u64 duration_ns; |
1103 | 1117 | ||
1104 | intel_pstate_calc_busy(cpu); | ||
1105 | |||
1106 | /* | 1118 | /* |
1107 | * core_busy is the ratio of actual performance to max | 1119 | * core_busy is the ratio of actual performance to max |
1108 | * max_pstate is the max non turbo pstate available | 1120 | * max_pstate is the max non turbo pstate available |
@@ -1186,8 +1198,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, | |||
1186 | if ((s64)delta_ns >= pid_params.sample_rate_ns) { | 1198 | if ((s64)delta_ns >= pid_params.sample_rate_ns) { |
1187 | bool sample_taken = intel_pstate_sample(cpu, time); | 1199 | bool sample_taken = intel_pstate_sample(cpu, time); |
1188 | 1200 | ||
1189 | if (sample_taken && !hwp_active) | 1201 | if (sample_taken) { |
1190 | intel_pstate_adjust_busy_pstate(cpu); | 1202 | intel_pstate_calc_busy(cpu); |
1203 | if (!hwp_active) | ||
1204 | intel_pstate_adjust_busy_pstate(cpu); | ||
1205 | } | ||
1191 | } | 1206 | } |
1192 | } | 1207 | } |
1193 | 1208 | ||
@@ -1341,8 +1356,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
1341 | out: | 1356 | out: |
1342 | intel_pstate_set_update_util_hook(policy->cpu); | 1357 | intel_pstate_set_update_util_hook(policy->cpu); |
1343 | 1358 | ||
1344 | if (hwp_active) | 1359 | intel_pstate_hwp_set_policy(policy); |
1345 | intel_pstate_hwp_set(policy->cpus); | ||
1346 | 1360 | ||
1347 | return 0; | 1361 | return 0; |
1348 | } | 1362 | } |
@@ -1406,6 +1420,7 @@ static struct cpufreq_driver intel_pstate_driver = { | |||
1406 | .flags = CPUFREQ_CONST_LOOPS, | 1420 | .flags = CPUFREQ_CONST_LOOPS, |
1407 | .verify = intel_pstate_verify_policy, | 1421 | .verify = intel_pstate_verify_policy, |
1408 | .setpolicy = intel_pstate_set_policy, | 1422 | .setpolicy = intel_pstate_set_policy, |
1423 | .resume = intel_pstate_hwp_set_policy, | ||
1409 | .get = intel_pstate_get, | 1424 | .get = intel_pstate_get, |
1410 | .init = intel_pstate_cpu_init, | 1425 | .init = intel_pstate_cpu_init, |
1411 | .stop_cpu = intel_pstate_stop_cpu, | 1426 | .stop_cpu = intel_pstate_stop_cpu, |
diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c index a9c659f58974..04042038ec4b 100644 --- a/drivers/cpufreq/sti-cpufreq.c +++ b/drivers/cpufreq/sti-cpufreq.c | |||
@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void) | |||
259 | { | 259 | { |
260 | int ret; | 260 | int ret; |
261 | 261 | ||
262 | if ((!of_machine_is_compatible("st,stih407")) && | ||
263 | (!of_machine_is_compatible("st,stih410"))) | ||
264 | return -ENODEV; | ||
265 | |||
262 | ddata.cpu = get_cpu_device(0); | 266 | ddata.cpu = get_cpu_device(0); |
263 | if (!ddata.cpu) { | 267 | if (!ddata.cpu) { |
264 | dev_err(ddata.cpu, "Failed to get device for CPU0\n"); | 268 | dev_err(ddata.cpu, "Failed to get device for CPU0\n"); |
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 545069d5fdfb..e342565e8715 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c | |||
@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev, | |||
50 | * call the CPU ops suspend protocol with idle index as a | 50 | * call the CPU ops suspend protocol with idle index as a |
51 | * parameter. | 51 | * parameter. |
52 | */ | 52 | */ |
53 | arm_cpuidle_suspend(idx); | 53 | ret = arm_cpuidle_suspend(idx); |
54 | 54 | ||
55 | cpu_pm_exit(); | 55 | cpu_pm_exit(); |
56 | } | 56 | } |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index a0d4a08313ae..aae05547b924 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, | |||
63 | ptr->eptr = upper_32_bits(dma_addr); | 63 | ptr->eptr = upper_32_bits(dma_addr); |
64 | } | 64 | } |
65 | 65 | ||
66 | static void copy_talitos_ptr(struct talitos_ptr *dst_ptr, | ||
67 | struct talitos_ptr *src_ptr, bool is_sec1) | ||
68 | { | ||
69 | dst_ptr->ptr = src_ptr->ptr; | ||
70 | if (!is_sec1) | ||
71 | dst_ptr->eptr = src_ptr->eptr; | ||
72 | } | ||
73 | |||
66 | static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, | 74 | static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, |
67 | bool is_sec1) | 75 | bool is_sec1) |
68 | { | 76 | { |
@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1083 | sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1, | 1091 | sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1, |
1084 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL | 1092 | (areq->src == areq->dst) ? DMA_BIDIRECTIONAL |
1085 | : DMA_TO_DEVICE); | 1093 | : DMA_TO_DEVICE); |
1086 | |||
1087 | /* hmac data */ | 1094 | /* hmac data */ |
1088 | desc->ptr[1].len = cpu_to_be16(areq->assoclen); | 1095 | desc->ptr[1].len = cpu_to_be16(areq->assoclen); |
1089 | if (sg_count > 1 && | 1096 | if (sg_count > 1 && |
1090 | (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, | 1097 | (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, |
1091 | areq->assoclen, | 1098 | areq->assoclen, |
1092 | &edesc->link_tbl[tbl_off])) > 1) { | 1099 | &edesc->link_tbl[tbl_off])) > 1) { |
1093 | tbl_off += ret; | ||
1094 | |||
1095 | to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * | 1100 | to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * |
1096 | sizeof(struct talitos_ptr), 0); | 1101 | sizeof(struct talitos_ptr), 0); |
1097 | desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; | 1102 | desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; |
1098 | 1103 | ||
1099 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1104 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
1100 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1105 | edesc->dma_len, DMA_BIDIRECTIONAL); |
1106 | |||
1107 | tbl_off += ret; | ||
1101 | } else { | 1108 | } else { |
1102 | to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); | 1109 | to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); |
1103 | desc->ptr[1].j_extent = 0; | 1110 | desc->ptr[1].j_extent = 0; |
@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1126 | if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) | 1133 | if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) |
1127 | sg_link_tbl_len += authsize; | 1134 | sg_link_tbl_len += authsize; |
1128 | 1135 | ||
1129 | if (sg_count > 1 && | 1136 | if (sg_count == 1) { |
1130 | (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen, | 1137 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) + |
1131 | sg_link_tbl_len, | 1138 | areq->assoclen, 0); |
1132 | &edesc->link_tbl[tbl_off])) > 1) { | 1139 | } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count, |
1133 | tbl_off += ret; | 1140 | areq->assoclen, sg_link_tbl_len, |
1141 | &edesc->link_tbl[tbl_off])) > | ||
1142 | 1) { | ||
1134 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; | 1143 | desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; |
1135 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + | 1144 | to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + |
1136 | tbl_off * | 1145 | tbl_off * |
@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1138 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, | 1147 | dma_sync_single_for_device(dev, edesc->dma_link_tbl, |
1139 | edesc->dma_len, | 1148 | edesc->dma_len, |
1140 | DMA_BIDIRECTIONAL); | 1149 | DMA_BIDIRECTIONAL); |
1141 | } else | 1150 | tbl_off += ret; |
1142 | to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); | 1151 | } else { |
1152 | copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0); | ||
1153 | } | ||
1143 | 1154 | ||
1144 | /* cipher out */ | 1155 | /* cipher out */ |
1145 | desc->ptr[5].len = cpu_to_be16(cryptlen); | 1156 | desc->ptr[5].len = cpu_to_be16(cryptlen); |
@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1151 | 1162 | ||
1152 | edesc->icv_ool = false; | 1163 | edesc->icv_ool = false; |
1153 | 1164 | ||
1154 | if (sg_count > 1 && | 1165 | if (sg_count == 1) { |
1155 | (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count, | 1166 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) + |
1167 | areq->assoclen, 0); | ||
1168 | } else if ((sg_count = | ||
1169 | sg_to_link_tbl_offset(areq->dst, sg_count, | ||
1156 | areq->assoclen, cryptlen, | 1170 | areq->assoclen, cryptlen, |
1157 | &edesc->link_tbl[tbl_off])) > | 1171 | &edesc->link_tbl[tbl_off])) > 1) { |
1158 | 1) { | ||
1159 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; | 1172 | struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; |
1160 | 1173 | ||
1161 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + | 1174 | to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + |
@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, | |||
1178 | edesc->dma_len, DMA_BIDIRECTIONAL); | 1191 | edesc->dma_len, DMA_BIDIRECTIONAL); |
1179 | 1192 | ||
1180 | edesc->icv_ool = true; | 1193 | edesc->icv_ool = true; |
1181 | } else | 1194 | } else { |
1182 | to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); | 1195 | copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0); |
1196 | } | ||
1183 | 1197 | ||
1184 | /* iv out */ | 1198 | /* iv out */ |
1185 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, | 1199 | map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, |
@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg { | |||
2629 | struct talitos_alg_template algt; | 2643 | struct talitos_alg_template algt; |
2630 | }; | 2644 | }; |
2631 | 2645 | ||
2632 | static int talitos_cra_init(struct crypto_tfm *tfm) | 2646 | static int talitos_init_common(struct talitos_ctx *ctx, |
2647 | struct talitos_crypto_alg *talitos_alg) | ||
2633 | { | 2648 | { |
2634 | struct crypto_alg *alg = tfm->__crt_alg; | ||
2635 | struct talitos_crypto_alg *talitos_alg; | ||
2636 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2637 | struct talitos_private *priv; | 2649 | struct talitos_private *priv; |
2638 | 2650 | ||
2639 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) | ||
2640 | talitos_alg = container_of(__crypto_ahash_alg(alg), | ||
2641 | struct talitos_crypto_alg, | ||
2642 | algt.alg.hash); | ||
2643 | else | ||
2644 | talitos_alg = container_of(alg, struct talitos_crypto_alg, | ||
2645 | algt.alg.crypto); | ||
2646 | |||
2647 | /* update context with ptr to dev */ | 2651 | /* update context with ptr to dev */ |
2648 | ctx->dev = talitos_alg->dev; | 2652 | ctx->dev = talitos_alg->dev; |
2649 | 2653 | ||
@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm) | |||
2661 | return 0; | 2665 | return 0; |
2662 | } | 2666 | } |
2663 | 2667 | ||
2668 | static int talitos_cra_init(struct crypto_tfm *tfm) | ||
2669 | { | ||
2670 | struct crypto_alg *alg = tfm->__crt_alg; | ||
2671 | struct talitos_crypto_alg *talitos_alg; | ||
2672 | struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); | ||
2673 | |||
2674 | if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) | ||
2675 | talitos_alg = container_of(__crypto_ahash_alg(alg), | ||
2676 | struct talitos_crypto_alg, | ||
2677 | algt.alg.hash); | ||
2678 | else | ||
2679 | talitos_alg = container_of(alg, struct talitos_crypto_alg, | ||
2680 | algt.alg.crypto); | ||
2681 | |||
2682 | return talitos_init_common(ctx, talitos_alg); | ||
2683 | } | ||
2684 | |||
2664 | static int talitos_cra_init_aead(struct crypto_aead *tfm) | 2685 | static int talitos_cra_init_aead(struct crypto_aead *tfm) |
2665 | { | 2686 | { |
2666 | talitos_cra_init(crypto_aead_tfm(tfm)); | 2687 | struct aead_alg *alg = crypto_aead_alg(tfm); |
2667 | return 0; | 2688 | struct talitos_crypto_alg *talitos_alg; |
2689 | struct talitos_ctx *ctx = crypto_aead_ctx(tfm); | ||
2690 | |||
2691 | talitos_alg = container_of(alg, struct talitos_crypto_alg, | ||
2692 | algt.alg.aead); | ||
2693 | |||
2694 | return talitos_init_common(ctx, talitos_alg); | ||
2668 | } | 2695 | } |
2669 | 2696 | ||
2670 | static int talitos_cra_init_ahash(struct crypto_tfm *tfm) | 2697 | static int talitos_cra_init_ahash(struct crypto_tfm *tfm) |
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 01087a38da22..792bdae2b91d 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val, | |||
1866 | 1866 | ||
1867 | i7_dev = get_i7core_dev(mce->socketid); | 1867 | i7_dev = get_i7core_dev(mce->socketid); |
1868 | if (!i7_dev) | 1868 | if (!i7_dev) |
1869 | return NOTIFY_BAD; | 1869 | return NOTIFY_DONE; |
1870 | 1870 | ||
1871 | mci = i7_dev->mci; | 1871 | mci = i7_dev->mci; |
1872 | pvt = mci->pvt_info; | 1872 | pvt = mci->pvt_info; |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 468447aff8eb..8bf745d2da7e 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
@@ -3168,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val, | |||
3168 | 3168 | ||
3169 | mci = get_mci_for_node_id(mce->socketid); | 3169 | mci = get_mci_for_node_id(mce->socketid); |
3170 | if (!mci) | 3170 | if (!mci) |
3171 | return NOTIFY_BAD; | 3171 | return NOTIFY_DONE; |
3172 | pvt = mci->pvt_info; | 3172 | pvt = mci->pvt_info; |
3173 | 3173 | ||
3174 | /* | 3174 | /* |
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index 11bfee8b79a9..b5d05807e6ec 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c | |||
@@ -360,7 +360,7 @@ static struct cpuidle_ops psci_cpuidle_ops __initdata = { | |||
360 | .init = psci_dt_cpu_init_idle, | 360 | .init = psci_dt_cpu_init_idle, |
361 | }; | 361 | }; |
362 | 362 | ||
363 | CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops); | 363 | CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops); |
364 | #endif | 364 | #endif |
365 | #endif | 365 | #endif |
366 | 366 | ||
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 815c4a5cae54..1b95475b6aef 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c | |||
@@ -77,7 +77,7 @@ static inline u16 fw_cfg_sel_endianness(u16 key) | |||
77 | static inline void fw_cfg_read_blob(u16 key, | 77 | static inline void fw_cfg_read_blob(u16 key, |
78 | void *buf, loff_t pos, size_t count) | 78 | void *buf, loff_t pos, size_t count) |
79 | { | 79 | { |
80 | u32 glk; | 80 | u32 glk = -1U; |
81 | acpi_status status; | 81 | acpi_status status; |
82 | 82 | ||
83 | /* If we have ACPI, ensure mutual exclusion against any potential | 83 | /* If we have ACPI, ensure mutual exclusion against any potential |
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index d9ab0cd1d205..4d9a315cfd43 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c | |||
@@ -196,44 +196,6 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on) | |||
196 | return 0; | 196 | return 0; |
197 | } | 197 | } |
198 | 198 | ||
199 | static void gpio_rcar_irq_bus_lock(struct irq_data *d) | ||
200 | { | ||
201 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
202 | struct gpio_rcar_priv *p = gpiochip_get_data(gc); | ||
203 | |||
204 | pm_runtime_get_sync(&p->pdev->dev); | ||
205 | } | ||
206 | |||
207 | static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d) | ||
208 | { | ||
209 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
210 | struct gpio_rcar_priv *p = gpiochip_get_data(gc); | ||
211 | |||
212 | pm_runtime_put(&p->pdev->dev); | ||
213 | } | ||
214 | |||
215 | |||
216 | static int gpio_rcar_irq_request_resources(struct irq_data *d) | ||
217 | { | ||
218 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
219 | struct gpio_rcar_priv *p = gpiochip_get_data(gc); | ||
220 | int error; | ||
221 | |||
222 | error = pm_runtime_get_sync(&p->pdev->dev); | ||
223 | if (error < 0) | ||
224 | return error; | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static void gpio_rcar_irq_release_resources(struct irq_data *d) | ||
230 | { | ||
231 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
232 | struct gpio_rcar_priv *p = gpiochip_get_data(gc); | ||
233 | |||
234 | pm_runtime_put(&p->pdev->dev); | ||
235 | } | ||
236 | |||
237 | static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) | 199 | static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) |
238 | { | 200 | { |
239 | struct gpio_rcar_priv *p = dev_id; | 201 | struct gpio_rcar_priv *p = dev_id; |
@@ -280,32 +242,18 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip, | |||
280 | 242 | ||
281 | static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) | 243 | static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) |
282 | { | 244 | { |
283 | struct gpio_rcar_priv *p = gpiochip_get_data(chip); | 245 | return pinctrl_request_gpio(chip->base + offset); |
284 | int error; | ||
285 | |||
286 | error = pm_runtime_get_sync(&p->pdev->dev); | ||
287 | if (error < 0) | ||
288 | return error; | ||
289 | |||
290 | error = pinctrl_request_gpio(chip->base + offset); | ||
291 | if (error) | ||
292 | pm_runtime_put(&p->pdev->dev); | ||
293 | |||
294 | return error; | ||
295 | } | 246 | } |
296 | 247 | ||
297 | static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset) | 248 | static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset) |
298 | { | 249 | { |
299 | struct gpio_rcar_priv *p = gpiochip_get_data(chip); | ||
300 | |||
301 | pinctrl_free_gpio(chip->base + offset); | 250 | pinctrl_free_gpio(chip->base + offset); |
302 | 251 | ||
303 | /* Set the GPIO as an input to ensure that the next GPIO request won't | 252 | /* |
253 | * Set the GPIO as an input to ensure that the next GPIO request won't | ||
304 | * drive the GPIO pin as an output. | 254 | * drive the GPIO pin as an output. |
305 | */ | 255 | */ |
306 | gpio_rcar_config_general_input_output_mode(chip, offset, false); | 256 | gpio_rcar_config_general_input_output_mode(chip, offset, false); |
307 | |||
308 | pm_runtime_put(&p->pdev->dev); | ||
309 | } | 257 | } |
310 | 258 | ||
311 | static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset) | 259 | static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset) |
@@ -452,6 +400,7 @@ static int gpio_rcar_probe(struct platform_device *pdev) | |||
452 | } | 400 | } |
453 | 401 | ||
454 | pm_runtime_enable(dev); | 402 | pm_runtime_enable(dev); |
403 | pm_runtime_get_sync(dev); | ||
455 | 404 | ||
456 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 405 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
457 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 406 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
@@ -488,10 +437,6 @@ static int gpio_rcar_probe(struct platform_device *pdev) | |||
488 | irq_chip->irq_unmask = gpio_rcar_irq_enable; | 437 | irq_chip->irq_unmask = gpio_rcar_irq_enable; |
489 | irq_chip->irq_set_type = gpio_rcar_irq_set_type; | 438 | irq_chip->irq_set_type = gpio_rcar_irq_set_type; |
490 | irq_chip->irq_set_wake = gpio_rcar_irq_set_wake; | 439 | irq_chip->irq_set_wake = gpio_rcar_irq_set_wake; |
491 | irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock; | ||
492 | irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock; | ||
493 | irq_chip->irq_request_resources = gpio_rcar_irq_request_resources; | ||
494 | irq_chip->irq_release_resources = gpio_rcar_irq_release_resources; | ||
495 | irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; | 440 | irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; |
496 | 441 | ||
497 | ret = gpiochip_add_data(gpio_chip, p); | 442 | ret = gpiochip_add_data(gpio_chip, p); |
@@ -522,6 +467,7 @@ static int gpio_rcar_probe(struct platform_device *pdev) | |||
522 | err1: | 467 | err1: |
523 | gpiochip_remove(gpio_chip); | 468 | gpiochip_remove(gpio_chip); |
524 | err0: | 469 | err0: |
470 | pm_runtime_put(dev); | ||
525 | pm_runtime_disable(dev); | 471 | pm_runtime_disable(dev); |
526 | return ret; | 472 | return ret; |
527 | } | 473 | } |
@@ -532,6 +478,7 @@ static int gpio_rcar_remove(struct platform_device *pdev) | |||
532 | 478 | ||
533 | gpiochip_remove(&p->gpio_chip); | 479 | gpiochip_remove(&p->gpio_chip); |
534 | 480 | ||
481 | pm_runtime_put(&pdev->dev); | ||
535 | pm_runtime_disable(&pdev->dev); | 482 | pm_runtime_disable(&pdev->dev); |
536 | return 0; | 483 | return 0; |
537 | } | 484 | } |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 682070d20f00..2dc52585e3f2 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id) | |||
977 | lookup = kmalloc(sizeof(*lookup), GFP_KERNEL); | 977 | lookup = kmalloc(sizeof(*lookup), GFP_KERNEL); |
978 | if (lookup) { | 978 | if (lookup) { |
979 | lookup->adev = adev; | 979 | lookup->adev = adev; |
980 | lookup->con_id = con_id; | 980 | lookup->con_id = kstrdup(con_id, GFP_KERNEL); |
981 | list_add_tail(&lookup->node, &acpi_crs_lookup_list); | 981 | list_add_tail(&lookup->node, &acpi_crs_lookup_list); |
982 | } | 982 | } |
983 | } | 983 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 0020a0ea43ff..35a1248aaa77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) { | |||
63 | return amdgpu_atpx_priv.atpx_detected; | 63 | return amdgpu_atpx_priv.atpx_detected; |
64 | } | 64 | } |
65 | 65 | ||
66 | bool amdgpu_has_atpx_dgpu_power_cntl(void) { | ||
67 | return amdgpu_atpx_priv.atpx.functions.power_cntl; | ||
68 | } | ||
69 | |||
70 | /** | 66 | /** |
71 | * amdgpu_atpx_call - call an ATPX method | 67 | * amdgpu_atpx_call - call an ATPX method |
72 | * | 68 | * |
@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas | |||
146 | */ | 142 | */ |
147 | static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) | 143 | static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) |
148 | { | 144 | { |
145 | /* make sure required functions are enabled */ | ||
146 | /* dGPU power control is required */ | ||
147 | if (atpx->functions.power_cntl == false) { | ||
148 | printk("ATPX dGPU power cntl not present, forcing\n"); | ||
149 | atpx->functions.power_cntl = true; | ||
150 | } | ||
151 | |||
149 | if (atpx->functions.px_params) { | 152 | if (atpx->functions.px_params) { |
150 | union acpi_object *info; | 153 | union acpi_object *info; |
151 | struct atpx_px_params output; | 154 | struct atpx_px_params output; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 612117478b57..2139da773da6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = { | |||
62 | "LAST", | 62 | "LAST", |
63 | }; | 63 | }; |
64 | 64 | ||
65 | #if defined(CONFIG_VGA_SWITCHEROO) | ||
66 | bool amdgpu_has_atpx_dgpu_power_cntl(void); | ||
67 | #else | ||
68 | static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } | ||
69 | #endif | ||
70 | |||
71 | bool amdgpu_device_is_px(struct drm_device *dev) | 65 | bool amdgpu_device_is_px(struct drm_device *dev) |
72 | { | 66 | { |
73 | struct amdgpu_device *adev = dev->dev_private; | 67 | struct amdgpu_device *adev = dev->dev_private; |
@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
1485 | 1479 | ||
1486 | if (amdgpu_runtime_pm == 1) | 1480 | if (amdgpu_runtime_pm == 1) |
1487 | runtime = true; | 1481 | runtime = true; |
1488 | if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl()) | 1482 | if (amdgpu_device_is_px(ddev)) |
1489 | runtime = true; | 1483 | runtime = true; |
1490 | vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); | 1484 | vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime); |
1491 | if (runtime) | 1485 | if (runtime) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e557fc1f17c8..7ecea83ce453 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -541,6 +541,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, | |||
541 | if (!metadata_size) { | 541 | if (!metadata_size) { |
542 | if (bo->metadata_size) { | 542 | if (bo->metadata_size) { |
543 | kfree(bo->metadata); | 543 | kfree(bo->metadata); |
544 | bo->metadata = NULL; | ||
544 | bo->metadata_size = 0; | 545 | bo->metadata_size = 0; |
545 | } | 546 | } |
546 | return 0; | 547 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index 1e0bba29e167..1cd6de575305 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | |||
@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder, | |||
298 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) | 298 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) |
299 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; | 299 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; |
300 | 300 | ||
301 | /* vertical FP must be at least 1 */ | ||
302 | if (mode->crtc_vsync_start == mode->crtc_vdisplay) | ||
303 | adjusted_mode->crtc_vsync_start++; | ||
304 | |||
301 | /* get the native mode for scaling */ | 305 | /* get the native mode for scaling */ |
302 | if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) | 306 | if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) |
303 | amdgpu_panel_mode_fixup(encoder, adjusted_mode); | 307 | amdgpu_panel_mode_fixup(encoder, adjusted_mode); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 05b0353d3880..a4a2e6cc61bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -910,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle) | |||
910 | { | 910 | { |
911 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 911 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
912 | 912 | ||
913 | return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); | 913 | if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) |
914 | return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); | ||
915 | else | ||
916 | return 0; | ||
914 | } | 917 | } |
915 | 918 | ||
916 | static int gmc_v7_0_sw_init(void *handle) | 919 | static int gmc_v7_0_sw_init(void *handle) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 02deb3229405..7a9db2c72c89 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -870,7 +870,10 @@ static int gmc_v8_0_late_init(void *handle) | |||
870 | { | 870 | { |
871 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 871 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
872 | 872 | ||
873 | return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); | 873 | if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) |
874 | return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); | ||
875 | else | ||
876 | return 0; | ||
874 | } | 877 | } |
875 | 878 | ||
876 | #define mmMC_SEQ_MISC0_FIJI 0xA71 | 879 | #define mmMC_SEQ_MISC0_FIJI 0xA71 |
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e17fbdaf874b..71ea0521ea96 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
@@ -1796,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) | |||
1796 | req_payload.start_slot = cur_slots; | 1796 | req_payload.start_slot = cur_slots; |
1797 | if (mgr->proposed_vcpis[i]) { | 1797 | if (mgr->proposed_vcpis[i]) { |
1798 | port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); | 1798 | port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); |
1799 | port = drm_dp_get_validated_port_ref(mgr, port); | ||
1800 | if (!port) { | ||
1801 | mutex_unlock(&mgr->payload_lock); | ||
1802 | return -EINVAL; | ||
1803 | } | ||
1799 | req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; | 1804 | req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; |
1800 | req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; | 1805 | req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; |
1801 | } else { | 1806 | } else { |
@@ -1823,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) | |||
1823 | mgr->payloads[i].payload_state = req_payload.payload_state; | 1828 | mgr->payloads[i].payload_state = req_payload.payload_state; |
1824 | } | 1829 | } |
1825 | cur_slots += req_payload.num_slots; | 1830 | cur_slots += req_payload.num_slots; |
1831 | |||
1832 | if (port) | ||
1833 | drm_dp_put_port(port); | ||
1826 | } | 1834 | } |
1827 | 1835 | ||
1828 | for (i = 0; i < mgr->max_payloads; i++) { | 1836 | for (i = 0; i < mgr->max_payloads; i++) { |
@@ -2128,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) | |||
2128 | 2136 | ||
2129 | if (mgr->mst_primary) { | 2137 | if (mgr->mst_primary) { |
2130 | int sret; | 2138 | int sret; |
2139 | u8 guid[16]; | ||
2140 | |||
2131 | sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); | 2141 | sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); |
2132 | if (sret != DP_RECEIVER_CAP_SIZE) { | 2142 | if (sret != DP_RECEIVER_CAP_SIZE) { |
2133 | DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); | 2143 | DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); |
@@ -2142,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) | |||
2142 | ret = -1; | 2152 | ret = -1; |
2143 | goto out_unlock; | 2153 | goto out_unlock; |
2144 | } | 2154 | } |
2155 | |||
2156 | /* Some hubs forget their guids after they resume */ | ||
2157 | sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); | ||
2158 | if (sret != 16) { | ||
2159 | DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); | ||
2160 | ret = -1; | ||
2161 | goto out_unlock; | ||
2162 | } | ||
2163 | drm_dp_check_mstb_guid(mgr->mst_primary, guid); | ||
2164 | |||
2145 | ret = 0; | 2165 | ret = 0; |
2146 | } else | 2166 | } else |
2147 | ret = -1; | 2167 | ret = -1; |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 09198d0b5814..306dde18a94a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c | |||
@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) | |||
572 | goto fail; | 572 | goto fail; |
573 | } | 573 | } |
574 | 574 | ||
575 | /* | ||
576 | * Set the GPU linear window to be at the end of the DMA window, where | ||
577 | * the CMA area is likely to reside. This ensures that we are able to | ||
578 | * map the command buffers while having the linear window overlap as | ||
579 | * much RAM as possible, so we can optimize mappings for other buffers. | ||
580 | * | ||
581 | * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads | ||
582 | * to different views of the memory on the individual engines. | ||
583 | */ | ||
584 | if (!(gpu->identity.features & chipFeatures_PIPE_3D) || | ||
585 | (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) { | ||
586 | u32 dma_mask = (u32)dma_get_required_mask(gpu->dev); | ||
587 | if (dma_mask < PHYS_OFFSET + SZ_2G) | ||
588 | gpu->memory_base = PHYS_OFFSET; | ||
589 | else | ||
590 | gpu->memory_base = dma_mask - SZ_2G + 1; | ||
591 | } | ||
592 | |||
575 | ret = etnaviv_hw_reset(gpu); | 593 | ret = etnaviv_hw_reset(gpu); |
576 | if (ret) | 594 | if (ret) |
577 | goto fail; | 595 | goto fail; |
@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) | |||
1566 | { | 1584 | { |
1567 | struct device *dev = &pdev->dev; | 1585 | struct device *dev = &pdev->dev; |
1568 | struct etnaviv_gpu *gpu; | 1586 | struct etnaviv_gpu *gpu; |
1569 | u32 dma_mask; | ||
1570 | int err = 0; | 1587 | int err = 0; |
1571 | 1588 | ||
1572 | gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); | 1589 | gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); |
@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) | |||
1576 | gpu->dev = &pdev->dev; | 1593 | gpu->dev = &pdev->dev; |
1577 | mutex_init(&gpu->lock); | 1594 | mutex_init(&gpu->lock); |
1578 | 1595 | ||
1579 | /* | ||
1580 | * Set the GPU linear window to be at the end of the DMA window, where | ||
1581 | * the CMA area is likely to reside. This ensures that we are able to | ||
1582 | * map the command buffers while having the linear window overlap as | ||
1583 | * much RAM as possible, so we can optimize mappings for other buffers. | ||
1584 | */ | ||
1585 | dma_mask = (u32)dma_get_required_mask(dev); | ||
1586 | if (dma_mask < PHYS_OFFSET + SZ_2G) | ||
1587 | gpu->memory_base = PHYS_OFFSET; | ||
1588 | else | ||
1589 | gpu->memory_base = dma_mask - SZ_2G + 1; | ||
1590 | |||
1591 | /* Map registers: */ | 1596 | /* Map registers: */ |
1592 | gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); | 1597 | gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); |
1593 | if (IS_ERR(gpu->mmio)) | 1598 | if (IS_ERR(gpu->mmio)) |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 30798cbc6fc0..6d2fb3f4ac62 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -792,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev) | |||
792 | static int i915_drm_resume_early(struct drm_device *dev) | 792 | static int i915_drm_resume_early(struct drm_device *dev) |
793 | { | 793 | { |
794 | struct drm_i915_private *dev_priv = dev->dev_private; | 794 | struct drm_i915_private *dev_priv = dev->dev_private; |
795 | int ret = 0; | 795 | int ret; |
796 | 796 | ||
797 | /* | 797 | /* |
798 | * We have a resume ordering issue with the snd-hda driver also | 798 | * We have a resume ordering issue with the snd-hda driver also |
@@ -803,6 +803,36 @@ static int i915_drm_resume_early(struct drm_device *dev) | |||
803 | * FIXME: This should be solved with a special hdmi sink device or | 803 | * FIXME: This should be solved with a special hdmi sink device or |
804 | * similar so that power domains can be employed. | 804 | * similar so that power domains can be employed. |
805 | */ | 805 | */ |
806 | |||
807 | /* | ||
808 | * Note that we need to set the power state explicitly, since we | ||
809 | * powered off the device during freeze and the PCI core won't power | ||
810 | * it back up for us during thaw. Powering off the device during | ||
811 | * freeze is not a hard requirement though, and during the | ||
812 | * suspend/resume phases the PCI core makes sure we get here with the | ||
813 | * device powered on. So in case we change our freeze logic and keep | ||
814 | * the device powered we can also remove the following set power state | ||
815 | * call. | ||
816 | */ | ||
817 | ret = pci_set_power_state(dev->pdev, PCI_D0); | ||
818 | if (ret) { | ||
819 | DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); | ||
820 | goto out; | ||
821 | } | ||
822 | |||
823 | /* | ||
824 | * Note that pci_enable_device() first enables any parent bridge | ||
825 | * device and only then sets the power state for this device. The | ||
826 | * bridge enabling is a nop though, since bridge devices are resumed | ||
827 | * first. The order of enabling power and enabling the device is | ||
828 | * imposed by the PCI core as described above, so here we preserve the | ||
829 | * same order for the freeze/thaw phases. | ||
830 | * | ||
831 | * TODO: eventually we should remove pci_disable_device() / | ||
832 | * pci_enable_enable_device() from suspend/resume. Due to how they | ||
833 | * depend on the device enable refcount we can't anyway depend on them | ||
834 | * disabling/enabling the device. | ||
835 | */ | ||
806 | if (pci_enable_device(dev->pdev)) { | 836 | if (pci_enable_device(dev->pdev)) { |
807 | ret = -EIO; | 837 | ret = -EIO; |
808 | goto out; | 838 | goto out; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f76cbf3e5d1e..fffdac801d3b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -2907,7 +2907,14 @@ enum skl_disp_power_wells { | |||
2907 | #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) | 2907 | #define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998) |
2908 | #define BXT_RP_STATE_CAP _MMIO(0x138170) | 2908 | #define BXT_RP_STATE_CAP _MMIO(0x138170) |
2909 | 2909 | ||
2910 | #define INTERVAL_1_28_US(us) (((us) * 100) >> 7) | 2910 | /* |
2911 | * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS | ||
2912 | * 8300) freezing up around GPU hangs. Looks as if even | ||
2913 | * scheduling/timer interrupts start misbehaving if the RPS | ||
2914 | * EI/thresholds are "bad", leading to a very sluggish or even | ||
2915 | * frozen machine. | ||
2916 | */ | ||
2917 | #define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25) | ||
2911 | #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) | 2918 | #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) |
2912 | #define INTERVAL_0_833_US(us) (((us) * 6) / 5) | 2919 | #define INTERVAL_0_833_US(us) (((us) * 6) / 5) |
2913 | #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ | 2920 | #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 62de9f4bce09..3b57bf06abe8 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
443 | } else if (IS_BROADWELL(dev_priv)) { | 443 | } else if (IS_BROADWELL(dev_priv)) { |
444 | ddi_translations_fdi = bdw_ddi_translations_fdi; | 444 | ddi_translations_fdi = bdw_ddi_translations_fdi; |
445 | ddi_translations_dp = bdw_ddi_translations_dp; | 445 | ddi_translations_dp = bdw_ddi_translations_dp; |
446 | ddi_translations_edp = bdw_ddi_translations_edp; | 446 | |
447 | if (dev_priv->edp_low_vswing) { | ||
448 | ddi_translations_edp = bdw_ddi_translations_edp; | ||
449 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); | ||
450 | } else { | ||
451 | ddi_translations_edp = bdw_ddi_translations_dp; | ||
452 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); | ||
453 | } | ||
454 | |||
447 | ddi_translations_hdmi = bdw_ddi_translations_hdmi; | 455 | ddi_translations_hdmi = bdw_ddi_translations_hdmi; |
448 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); | 456 | |
449 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); | 457 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); |
450 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | 458 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); |
451 | hdmi_default_entry = 7; | 459 | hdmi_default_entry = 7; |
@@ -3201,12 +3209,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
3201 | intel_ddi_clock_get(encoder, pipe_config); | 3209 | intel_ddi_clock_get(encoder, pipe_config); |
3202 | } | 3210 | } |
3203 | 3211 | ||
3204 | static void intel_ddi_destroy(struct drm_encoder *encoder) | ||
3205 | { | ||
3206 | /* HDMI has nothing special to destroy, so we can go with this. */ | ||
3207 | intel_dp_encoder_destroy(encoder); | ||
3208 | } | ||
3209 | |||
3210 | static bool intel_ddi_compute_config(struct intel_encoder *encoder, | 3212 | static bool intel_ddi_compute_config(struct intel_encoder *encoder, |
3211 | struct intel_crtc_state *pipe_config) | 3213 | struct intel_crtc_state *pipe_config) |
3212 | { | 3214 | { |
@@ -3225,7 +3227,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, | |||
3225 | } | 3227 | } |
3226 | 3228 | ||
3227 | static const struct drm_encoder_funcs intel_ddi_funcs = { | 3229 | static const struct drm_encoder_funcs intel_ddi_funcs = { |
3228 | .destroy = intel_ddi_destroy, | 3230 | .reset = intel_dp_encoder_reset, |
3231 | .destroy = intel_dp_encoder_destroy, | ||
3229 | }; | 3232 | }; |
3230 | 3233 | ||
3231 | static struct intel_connector * | 3234 | static struct intel_connector * |
@@ -3324,6 +3327,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
3324 | intel_encoder->post_disable = intel_ddi_post_disable; | 3327 | intel_encoder->post_disable = intel_ddi_post_disable; |
3325 | intel_encoder->get_hw_state = intel_ddi_get_hw_state; | 3328 | intel_encoder->get_hw_state = intel_ddi_get_hw_state; |
3326 | intel_encoder->get_config = intel_ddi_get_config; | 3329 | intel_encoder->get_config = intel_ddi_get_config; |
3330 | intel_encoder->suspend = intel_dp_encoder_suspend; | ||
3327 | 3331 | ||
3328 | intel_dig_port->port = port; | 3332 | intel_dig_port->port = port; |
3329 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & | 3333 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6e0d8283daa6..182f84937345 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -13351,6 +13351,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, | |||
13351 | } | 13351 | } |
13352 | 13352 | ||
13353 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | 13353 | for_each_crtc_in_state(state, crtc, crtc_state, i) { |
13354 | if (state->legacy_cursor_update) | ||
13355 | continue; | ||
13356 | |||
13354 | ret = intel_crtc_wait_for_pending_flips(crtc); | 13357 | ret = intel_crtc_wait_for_pending_flips(crtc); |
13355 | if (ret) | 13358 | if (ret) |
13356 | return ret; | 13359 | return ret; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f069a82deb57..412a34c39522 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -4898,7 +4898,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
4898 | kfree(intel_dig_port); | 4898 | kfree(intel_dig_port); |
4899 | } | 4899 | } |
4900 | 4900 | ||
4901 | static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) | 4901 | void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) |
4902 | { | 4902 | { |
4903 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); | 4903 | struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); |
4904 | 4904 | ||
@@ -4940,7 +4940,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) | |||
4940 | edp_panel_vdd_schedule_off(intel_dp); | 4940 | edp_panel_vdd_schedule_off(intel_dp); |
4941 | } | 4941 | } |
4942 | 4942 | ||
4943 | static void intel_dp_encoder_reset(struct drm_encoder *encoder) | 4943 | void intel_dp_encoder_reset(struct drm_encoder *encoder) |
4944 | { | 4944 | { |
4945 | struct intel_dp *intel_dp; | 4945 | struct intel_dp *intel_dp; |
4946 | 4946 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 4c027d69fac9..7d3af3a72abe 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -1238,6 +1238,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, | |||
1238 | void intel_dp_start_link_train(struct intel_dp *intel_dp); | 1238 | void intel_dp_start_link_train(struct intel_dp *intel_dp); |
1239 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); | 1239 | void intel_dp_stop_link_train(struct intel_dp *intel_dp); |
1240 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); | 1240 | void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); |
1241 | void intel_dp_encoder_reset(struct drm_encoder *encoder); | ||
1242 | void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); | ||
1241 | void intel_dp_encoder_destroy(struct drm_encoder *encoder); | 1243 | void intel_dp_encoder_destroy(struct drm_encoder *encoder); |
1242 | int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); | 1244 | int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); |
1243 | bool intel_dp_compute_config(struct intel_encoder *encoder, | 1245 | bool intel_dp_compute_config(struct intel_encoder *encoder, |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a0d8daed2470..1ab6f687f640 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -1415,8 +1415,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
1415 | hdmi_to_dig_port(intel_hdmi)); | 1415 | hdmi_to_dig_port(intel_hdmi)); |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | if (!live_status) | 1418 | if (!live_status) { |
1419 | DRM_DEBUG_KMS("Live status not up!"); | 1419 | DRM_DEBUG_KMS("HDMI live status down\n"); |
1420 | /* | ||
1421 | * Live status register is not reliable on all intel platforms. | ||
1422 | * So consider live_status only for certain platforms, for | ||
1423 | * others, read EDID to determine presence of sink. | ||
1424 | */ | ||
1425 | if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv)) | ||
1426 | live_status = true; | ||
1427 | } | ||
1420 | 1428 | ||
1421 | intel_hdmi_unset_edid(connector); | 1429 | intel_hdmi_unset_edid(connector); |
1422 | 1430 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index edd05cdb0cd8..587cae4e73c9 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
310 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) | 310 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) |
311 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; | 311 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; |
312 | 312 | ||
313 | /* vertical FP must be at least 1 */ | ||
314 | if (mode->crtc_vsync_start == mode->crtc_vdisplay) | ||
315 | adjusted_mode->crtc_vsync_start++; | ||
316 | |||
313 | /* get the native mode for scaling */ | 317 | /* get the native mode for scaling */ |
314 | if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { | 318 | if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { |
315 | radeon_panel_mode_fixup(encoder, adjusted_mode); | 319 | radeon_panel_mode_fixup(encoder, adjusted_mode); |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 76c4bdf21b20..34f7a29d9366 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev) | |||
2608 | WREG32(VM_CONTEXT1_CNTL, 0); | 2608 | WREG32(VM_CONTEXT1_CNTL, 0); |
2609 | } | 2609 | } |
2610 | 2610 | ||
2611 | static const unsigned ni_dig_offsets[] = | ||
2612 | { | ||
2613 | NI_DIG0_REGISTER_OFFSET, | ||
2614 | NI_DIG1_REGISTER_OFFSET, | ||
2615 | NI_DIG2_REGISTER_OFFSET, | ||
2616 | NI_DIG3_REGISTER_OFFSET, | ||
2617 | NI_DIG4_REGISTER_OFFSET, | ||
2618 | NI_DIG5_REGISTER_OFFSET | ||
2619 | }; | ||
2620 | |||
2621 | static const unsigned ni_tx_offsets[] = | ||
2622 | { | ||
2623 | NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1, | ||
2624 | NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1, | ||
2625 | NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1, | ||
2626 | NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1, | ||
2627 | NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1, | ||
2628 | NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 | ||
2629 | }; | ||
2630 | |||
2631 | static const unsigned evergreen_dp_offsets[] = | ||
2632 | { | ||
2633 | EVERGREEN_DP0_REGISTER_OFFSET, | ||
2634 | EVERGREEN_DP1_REGISTER_OFFSET, | ||
2635 | EVERGREEN_DP2_REGISTER_OFFSET, | ||
2636 | EVERGREEN_DP3_REGISTER_OFFSET, | ||
2637 | EVERGREEN_DP4_REGISTER_OFFSET, | ||
2638 | EVERGREEN_DP5_REGISTER_OFFSET | ||
2639 | }; | ||
2640 | |||
2641 | |||
2642 | /* | ||
2643 | * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc | ||
2644 | * We go from crtc to connector and it is not relible since it | ||
2645 | * should be an opposite direction .If crtc is enable then | ||
2646 | * find the dig_fe which selects this crtc and insure that it enable. | ||
2647 | * if such dig_fe is found then find dig_be which selects found dig_be and | ||
2648 | * insure that it enable and in DP_SST mode. | ||
2649 | * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing | ||
2650 | * from dp symbols clocks . | ||
2651 | */ | ||
2652 | static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev, | ||
2653 | unsigned crtc_id, unsigned *ret_dig_fe) | ||
2654 | { | ||
2655 | unsigned i; | ||
2656 | unsigned dig_fe; | ||
2657 | unsigned dig_be; | ||
2658 | unsigned dig_en_be; | ||
2659 | unsigned uniphy_pll; | ||
2660 | unsigned digs_fe_selected; | ||
2661 | unsigned dig_be_mode; | ||
2662 | unsigned dig_fe_mask; | ||
2663 | bool is_enabled = false; | ||
2664 | bool found_crtc = false; | ||
2665 | |||
2666 | /* loop through all running dig_fe to find selected crtc */ | ||
2667 | for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) { | ||
2668 | dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]); | ||
2669 | if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON && | ||
2670 | crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) { | ||
2671 | /* found running pipe */ | ||
2672 | found_crtc = true; | ||
2673 | dig_fe_mask = 1 << i; | ||
2674 | dig_fe = i; | ||
2675 | break; | ||
2676 | } | ||
2677 | } | ||
2678 | |||
2679 | if (found_crtc) { | ||
2680 | /* loop through all running dig_be to find selected dig_fe */ | ||
2681 | for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) { | ||
2682 | dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]); | ||
2683 | /* if dig_fe_selected by dig_be? */ | ||
2684 | digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be); | ||
2685 | dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be); | ||
2686 | if (dig_fe_mask & digs_fe_selected && | ||
2687 | /* if dig_be in sst mode? */ | ||
2688 | dig_be_mode == NI_DIG_BE_DPSST) { | ||
2689 | dig_en_be = RREG32(NI_DIG_BE_EN_CNTL + | ||
2690 | ni_dig_offsets[i]); | ||
2691 | uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 + | ||
2692 | ni_tx_offsets[i]); | ||
2693 | /* dig_be enable and tx is running */ | ||
2694 | if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE && | ||
2695 | dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON && | ||
2696 | uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) { | ||
2697 | is_enabled = true; | ||
2698 | *ret_dig_fe = dig_fe; | ||
2699 | break; | ||
2700 | } | ||
2701 | } | ||
2702 | } | ||
2703 | } | ||
2704 | |||
2705 | return is_enabled; | ||
2706 | } | ||
2707 | |||
2708 | /* | ||
2709 | * Blank dig when in dp sst mode | ||
2710 | * Dig ignores crtc timing | ||
2711 | */ | ||
2712 | static void evergreen_blank_dp_output(struct radeon_device *rdev, | ||
2713 | unsigned dig_fe) | ||
2714 | { | ||
2715 | unsigned stream_ctrl; | ||
2716 | unsigned fifo_ctrl; | ||
2717 | unsigned counter = 0; | ||
2718 | |||
2719 | if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) { | ||
2720 | DRM_ERROR("invalid dig_fe %d\n", dig_fe); | ||
2721 | return; | ||
2722 | } | ||
2723 | |||
2724 | stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + | ||
2725 | evergreen_dp_offsets[dig_fe]); | ||
2726 | if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) { | ||
2727 | DRM_ERROR("dig %d , should be enable\n", dig_fe); | ||
2728 | return; | ||
2729 | } | ||
2730 | |||
2731 | stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE; | ||
2732 | WREG32(EVERGREEN_DP_VID_STREAM_CNTL + | ||
2733 | evergreen_dp_offsets[dig_fe], stream_ctrl); | ||
2734 | |||
2735 | stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + | ||
2736 | evergreen_dp_offsets[dig_fe]); | ||
2737 | while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) { | ||
2738 | msleep(1); | ||
2739 | counter++; | ||
2740 | stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL + | ||
2741 | evergreen_dp_offsets[dig_fe]); | ||
2742 | } | ||
2743 | if (counter >= 32 ) | ||
2744 | DRM_ERROR("counter exceeds %d\n", counter); | ||
2745 | |||
2746 | fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]); | ||
2747 | fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET; | ||
2748 | WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl); | ||
2749 | |||
2750 | } | ||
2751 | |||
2611 | void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) | 2752 | void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) |
2612 | { | 2753 | { |
2613 | u32 crtc_enabled, tmp, frame_count, blackout; | 2754 | u32 crtc_enabled, tmp, frame_count, blackout; |
2614 | int i, j; | 2755 | int i, j; |
2756 | unsigned dig_fe; | ||
2615 | 2757 | ||
2616 | if (!ASIC_IS_NODCE(rdev)) { | 2758 | if (!ASIC_IS_NODCE(rdev)) { |
2617 | save->vga_render_control = RREG32(VGA_RENDER_CONTROL); | 2759 | save->vga_render_control = RREG32(VGA_RENDER_CONTROL); |
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav | |||
2651 | break; | 2793 | break; |
2652 | udelay(1); | 2794 | udelay(1); |
2653 | } | 2795 | } |
2654 | 2796 | /*we should disable dig if it drives dp sst*/ | |
2797 | /*but we are in radeon_device_init and the topology is unknown*/ | ||
2798 | /*and it is available after radeon_modeset_init*/ | ||
2799 | /*the following method radeon_atom_encoder_dpms_dig*/ | ||
2800 | /*does the job if we initialize it properly*/ | ||
2801 | /*for now we do it this manually*/ | ||
2802 | /**/ | ||
2803 | if (ASIC_IS_DCE5(rdev) && | ||
2804 | evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe)) | ||
2805 | evergreen_blank_dp_output(rdev, dig_fe); | ||
2806 | /*we could remove 6 lines below*/ | ||
2655 | /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ | 2807 | /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ |
2656 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); | 2808 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); |
2657 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); | 2809 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); |
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index aa939dfed3a3..b436badf9efa 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | |||
@@ -250,8 +250,43 @@ | |||
250 | 250 | ||
251 | /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ | 251 | /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ |
252 | #define EVERGREEN_HDMI_BASE 0x7030 | 252 | #define EVERGREEN_HDMI_BASE 0x7030 |
253 | /*DIG block*/ | ||
254 | #define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000) | ||
255 | #define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000) | ||
256 | #define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000) | ||
257 | #define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000) | ||
258 | #define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000) | ||
259 | #define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000) | ||
260 | |||
261 | |||
262 | #define NI_DIG_FE_CNTL 0x7000 | ||
263 | # define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3) | ||
264 | # define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24) | ||
265 | |||
266 | |||
267 | #define NI_DIG_BE_CNTL 0x7140 | ||
268 | # define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F) | ||
269 | # define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 ) | ||
270 | |||
271 | #define NI_DIG_BE_EN_CNTL 0x7144 | ||
272 | # define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0) | ||
273 | # define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8) | ||
274 | # define NI_DIG_BE_DPSST 0 | ||
253 | 275 | ||
254 | /* Display Port block */ | 276 | /* Display Port block */ |
277 | #define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C) | ||
278 | #define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C) | ||
279 | #define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C) | ||
280 | #define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C) | ||
281 | #define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C) | ||
282 | #define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C) | ||
283 | |||
284 | |||
285 | #define EVERGREEN_DP_VID_STREAM_CNTL 0x730C | ||
286 | # define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0) | ||
287 | # define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16) | ||
288 | #define EVERGREEN_DP_STEER_FIFO 0x7310 | ||
289 | # define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0) | ||
255 | #define EVERGREEN_DP_SEC_CNTL 0x7280 | 290 | #define EVERGREEN_DP_SEC_CNTL 0x7280 |
256 | # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0) | 291 | # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0) |
257 | # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4) | 292 | # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4) |
@@ -266,4 +301,15 @@ | |||
266 | # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24) | 301 | # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24) |
267 | # define EVERGREEN_DP_SEC_SS_EN (1 << 28) | 302 | # define EVERGREEN_DP_SEC_SS_EN (1 << 28) |
268 | 303 | ||
304 | /*DCIO_UNIPHY block*/ | ||
305 | #define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600) | ||
306 | #define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600) | ||
307 | #define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600) | ||
308 | #define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600) | ||
309 | #define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600) | ||
310 | #define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600) | ||
311 | |||
312 | #define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618 | ||
313 | # define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0) | ||
314 | |||
269 | #endif | 315 | #endif |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4cbf26555093..e3daafa1be13 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); | |||
230 | 230 | ||
231 | void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) | 231 | void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) |
232 | { | 232 | { |
233 | struct ttm_bo_device *bdev = bo->bdev; | 233 | int put_count = 0; |
234 | struct ttm_mem_type_manager *man; | ||
235 | 234 | ||
236 | lockdep_assert_held(&bo->resv->lock.base); | 235 | lockdep_assert_held(&bo->resv->lock.base); |
237 | 236 | ||
238 | if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { | 237 | put_count = ttm_bo_del_from_lru(bo); |
239 | list_del_init(&bo->swap); | 238 | ttm_bo_list_ref_sub(bo, put_count, true); |
240 | list_del_init(&bo->lru); | 239 | ttm_bo_add_to_lru(bo); |
241 | |||
242 | } else { | ||
243 | if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) | ||
244 | list_move_tail(&bo->swap, &bo->glob->swap_lru); | ||
245 | |||
246 | man = &bdev->man[bo->mem.mem_type]; | ||
247 | list_move_tail(&bo->lru, &man->lru); | ||
248 | } | ||
249 | } | 240 | } |
250 | EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); | 241 | EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); |
251 | 242 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 4854dac87e24..5fd1fd06effc 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c | |||
@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, | |||
267 | return 0; | 267 | return 0; |
268 | } | 268 | } |
269 | 269 | ||
270 | static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, | ||
271 | struct drm_crtc_state *old_state) | ||
272 | { | ||
273 | unsigned long flags; | ||
274 | |||
275 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
276 | if (crtc->state->event) | ||
277 | drm_crtc_send_vblank_event(crtc, crtc->state->event); | ||
278 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
279 | } | ||
280 | |||
270 | static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { | 281 | static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { |
271 | .enable = virtio_gpu_crtc_enable, | 282 | .enable = virtio_gpu_crtc_enable, |
272 | .disable = virtio_gpu_crtc_disable, | 283 | .disable = virtio_gpu_crtc_disable, |
273 | .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, | 284 | .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, |
274 | .atomic_check = virtio_gpu_crtc_atomic_check, | 285 | .atomic_check = virtio_gpu_crtc_atomic_check, |
286 | .atomic_flush = virtio_gpu_crtc_atomic_flush, | ||
275 | }; | 287 | }; |
276 | 288 | ||
277 | static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, | 289 | static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 723ba16c6084..1a1a87cbf109 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = { | |||
3293 | &vmw_cmd_dx_cid_check, true, false, true), | 3293 | &vmw_cmd_dx_cid_check, true, false, true), |
3294 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, | 3294 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query, |
3295 | true, false, true), | 3295 | true, false, true), |
3296 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok, | 3296 | VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check, |
3297 | true, false, true), | 3297 | true, false, true), |
3298 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, | 3298 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query, |
3299 | true, false, true), | 3299 | true, false, true), |
3300 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, | 3300 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET, |
3301 | &vmw_cmd_ok, true, false, true), | 3301 | &vmw_cmd_dx_cid_check, true, false, true), |
3302 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok, | 3302 | VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check, |
3303 | true, false, true), | 3303 | true, false, true), |
3304 | VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok, | 3304 | VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check, |
3305 | true, false, true), | 3305 | true, false, true), |
3306 | VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, | 3306 | VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, |
3307 | true, false, true), | 3307 | true, false, true), |
3308 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, | 3308 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check, |
3309 | true, false, true), | 3309 | true, false, true), |
3310 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, | 3310 | VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, |
3311 | true, false, true), | 3311 | true, false, true), |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 3b1faf7862a5..679a4cb98ee3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info) | |||
573 | mode = old_mode; | 573 | mode = old_mode; |
574 | old_mode = NULL; | 574 | old_mode = NULL; |
575 | } else if (!vmw_kms_validate_mode_vram(vmw_priv, | 575 | } else if (!vmw_kms_validate_mode_vram(vmw_priv, |
576 | mode->hdisplay * | 576 | mode->hdisplay * |
577 | (var->bits_per_pixel + 7) / 8, | 577 | DIV_ROUND_UP(var->bits_per_pixel, 8), |
578 | mode->vdisplay)) { | 578 | mode->vdisplay)) { |
579 | drm_mode_destroy(vmw_priv->dev, mode); | 579 | drm_mode_destroy(vmw_priv->dev, mode); |
580 | return -EINVAL; | 580 | return -EINVAL; |
581 | } | 581 | } |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index e00db3f510dd..abb98c77bad2 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) | |||
1068 | goto err_register; | 1068 | goto err_register; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | pdev->dev.of_node = of_node; | ||
1072 | pdev->dev.parent = dev; | 1071 | pdev->dev.parent = dev; |
1073 | 1072 | ||
1074 | ret = platform_device_add_data(pdev, ®->pdata, | 1073 | ret = platform_device_add_data(pdev, ®->pdata, |
@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) | |||
1079 | platform_device_put(pdev); | 1078 | platform_device_put(pdev); |
1080 | goto err_register; | 1079 | goto err_register; |
1081 | } | 1080 | } |
1081 | |||
1082 | /* | ||
1083 | * Set of_node only after calling platform_device_add. Otherwise | ||
1084 | * the platform:imx-ipuv3-crtc modalias won't be used. | ||
1085 | */ | ||
1086 | pdev->dev.of_node = of_node; | ||
1082 | } | 1087 | } |
1083 | 1088 | ||
1084 | return 0; | 1089 | return 0; |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index c6eaff5f8845..0238f0169e48 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -259,6 +259,7 @@ | |||
259 | #define USB_DEVICE_ID_CORSAIR_K90 0x1b02 | 259 | #define USB_DEVICE_ID_CORSAIR_K90 0x1b02 |
260 | 260 | ||
261 | #define USB_VENDOR_ID_CREATIVELABS 0x041e | 261 | #define USB_VENDOR_ID_CREATIVELABS 0x041e |
262 | #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c | ||
262 | #define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801 | 263 | #define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801 |
263 | 264 | ||
264 | #define USB_VENDOR_ID_CVTOUCH 0x1ff7 | 265 | #define USB_VENDOR_ID_CVTOUCH 0x1ff7 |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index ed2f68edc8f1..53fc856d6867 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -71,6 +71,7 @@ static const struct hid_blacklist { | |||
71 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, | 71 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, |
72 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, | 72 | { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, |
73 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, | 73 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, |
74 | { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, | ||
74 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, | 75 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, |
75 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, | 76 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, |
76 | { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL }, | 77 | { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL }, |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 02c4efea241c..cf2ba43453fd 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
@@ -684,6 +684,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
684 | 684 | ||
685 | wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]); | 685 | wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]); |
686 | 686 | ||
687 | wacom->shared->stylus_in_proximity = true; | ||
687 | return 1; | 688 | return 1; |
688 | } | 689 | } |
689 | 690 | ||
@@ -3395,6 +3396,10 @@ static const struct wacom_features wacom_features_0x33E = | |||
3395 | { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63, | 3396 | { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63, |
3396 | INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, | 3397 | INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, |
3397 | .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; | 3398 | .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; |
3399 | static const struct wacom_features wacom_features_0x343 = | ||
3400 | { "Wacom DTK1651", 34616, 19559, 1023, 0, | ||
3401 | DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4, | ||
3402 | WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; | ||
3398 | 3403 | ||
3399 | static const struct wacom_features wacom_features_HID_ANY_ID = | 3404 | static const struct wacom_features wacom_features_HID_ANY_ID = |
3400 | { "Wacom HID", .type = HID_GENERIC }; | 3405 | { "Wacom HID", .type = HID_GENERIC }; |
@@ -3560,6 +3565,7 @@ const struct hid_device_id wacom_ids[] = { | |||
3560 | { USB_DEVICE_WACOM(0x33C) }, | 3565 | { USB_DEVICE_WACOM(0x33C) }, |
3561 | { USB_DEVICE_WACOM(0x33D) }, | 3566 | { USB_DEVICE_WACOM(0x33D) }, |
3562 | { USB_DEVICE_WACOM(0x33E) }, | 3567 | { USB_DEVICE_WACOM(0x33E) }, |
3568 | { USB_DEVICE_WACOM(0x343) }, | ||
3563 | { USB_DEVICE_WACOM(0x4001) }, | 3569 | { USB_DEVICE_WACOM(0x4001) }, |
3564 | { USB_DEVICE_WACOM(0x4004) }, | 3570 | { USB_DEVICE_WACOM(0x4004) }, |
3565 | { USB_DEVICE_WACOM(0x5000) }, | 3571 | { USB_DEVICE_WACOM(0x5000) }, |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index faa8e6821fea..0967e1a5b3a2 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -975,10 +975,10 @@ config I2C_XLR | |||
975 | 975 | ||
976 | config I2C_XLP9XX | 976 | config I2C_XLP9XX |
977 | tristate "XLP9XX I2C support" | 977 | tristate "XLP9XX I2C support" |
978 | depends on CPU_XLP || COMPILE_TEST | 978 | depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST |
979 | help | 979 | help |
980 | This driver enables support for the on-chip I2C interface of | 980 | This driver enables support for the on-chip I2C interface of |
981 | the Broadcom XLP9xx/XLP5xx MIPS processors. | 981 | the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors. |
982 | 982 | ||
983 | This driver can also be built as a module. If so, the module will | 983 | This driver can also be built as a module. If so, the module will |
984 | be called i2c-xlp9xx. | 984 | be called i2c-xlp9xx. |
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 714bdc837769..b167ab25310a 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c | |||
@@ -116,8 +116,8 @@ struct cpm_i2c { | |||
116 | cbd_t __iomem *rbase; | 116 | cbd_t __iomem *rbase; |
117 | u_char *txbuf[CPM_MAXBD]; | 117 | u_char *txbuf[CPM_MAXBD]; |
118 | u_char *rxbuf[CPM_MAXBD]; | 118 | u_char *rxbuf[CPM_MAXBD]; |
119 | u32 txdma[CPM_MAXBD]; | 119 | dma_addr_t txdma[CPM_MAXBD]; |
120 | u32 rxdma[CPM_MAXBD]; | 120 | dma_addr_t rxdma[CPM_MAXBD]; |
121 | }; | 121 | }; |
122 | 122 | ||
123 | static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id) | 123 | static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id) |
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c index b29c7500461a..f54ece8fce78 100644 --- a/drivers/i2c/busses/i2c-exynos5.c +++ b/drivers/i2c/busses/i2c-exynos5.c | |||
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap, | |||
671 | return -EIO; | 671 | return -EIO; |
672 | } | 672 | } |
673 | 673 | ||
674 | clk_prepare_enable(i2c->clk); | 674 | ret = clk_enable(i2c->clk); |
675 | if (ret) | ||
676 | return ret; | ||
675 | 677 | ||
676 | for (i = 0; i < num; i++, msgs++) { | 678 | for (i = 0; i < num; i++, msgs++) { |
677 | stop = (i == num - 1); | 679 | stop = (i == num - 1); |
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap, | |||
695 | } | 697 | } |
696 | 698 | ||
697 | out: | 699 | out: |
698 | clk_disable_unprepare(i2c->clk); | 700 | clk_disable(i2c->clk); |
699 | return ret; | 701 | return ret; |
700 | } | 702 | } |
701 | 703 | ||
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev) | |||
747 | return -ENOENT; | 749 | return -ENOENT; |
748 | } | 750 | } |
749 | 751 | ||
750 | clk_prepare_enable(i2c->clk); | 752 | ret = clk_prepare_enable(i2c->clk); |
753 | if (ret) | ||
754 | return ret; | ||
751 | 755 | ||
752 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 756 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
753 | i2c->regs = devm_ioremap_resource(&pdev->dev, mem); | 757 | i2c->regs = devm_ioremap_resource(&pdev->dev, mem); |
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev) | |||
799 | 803 | ||
800 | platform_set_drvdata(pdev, i2c); | 804 | platform_set_drvdata(pdev, i2c); |
801 | 805 | ||
806 | clk_disable(i2c->clk); | ||
807 | |||
808 | return 0; | ||
809 | |||
802 | err_clk: | 810 | err_clk: |
803 | clk_disable_unprepare(i2c->clk); | 811 | clk_disable_unprepare(i2c->clk); |
804 | return ret; | 812 | return ret; |
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev) | |||
810 | 818 | ||
811 | i2c_del_adapter(&i2c->adap); | 819 | i2c_del_adapter(&i2c->adap); |
812 | 820 | ||
821 | clk_unprepare(i2c->clk); | ||
822 | |||
813 | return 0; | 823 | return 0; |
814 | } | 824 | } |
815 | 825 | ||
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev) | |||
821 | 831 | ||
822 | i2c->suspended = 1; | 832 | i2c->suspended = 1; |
823 | 833 | ||
834 | clk_unprepare(i2c->clk); | ||
835 | |||
824 | return 0; | 836 | return 0; |
825 | } | 837 | } |
826 | 838 | ||
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev) | |||
830 | struct exynos5_i2c *i2c = platform_get_drvdata(pdev); | 842 | struct exynos5_i2c *i2c = platform_get_drvdata(pdev); |
831 | int ret = 0; | 843 | int ret = 0; |
832 | 844 | ||
833 | clk_prepare_enable(i2c->clk); | 845 | ret = clk_prepare_enable(i2c->clk); |
846 | if (ret) | ||
847 | return ret; | ||
834 | 848 | ||
835 | ret = exynos5_hsi2c_clock_setup(i2c); | 849 | ret = exynos5_hsi2c_clock_setup(i2c); |
836 | if (ret) { | 850 | if (ret) { |
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev) | |||
839 | } | 853 | } |
840 | 854 | ||
841 | exynos5_i2c_init(i2c); | 855 | exynos5_i2c_init(i2c); |
842 | clk_disable_unprepare(i2c->clk); | 856 | clk_disable(i2c->clk); |
843 | i2c->suspended = 0; | 857 | i2c->suspended = 0; |
844 | 858 | ||
845 | return 0; | 859 | return 0; |
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 7ba795b24e75..1c8707710098 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
@@ -75,6 +75,7 @@ | |||
75 | /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ | 75 | /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ |
76 | #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 | 76 | #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 |
77 | #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a | 77 | #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a |
78 | #define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac | ||
78 | #define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 | 79 | #define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 |
79 | 80 | ||
80 | #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */ | 81 | #define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */ |
@@ -180,6 +181,7 @@ struct ismt_priv { | |||
180 | static const struct pci_device_id ismt_ids[] = { | 181 | static const struct pci_device_id ismt_ids[] = { |
181 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, | 182 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, |
182 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, | 183 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, |
184 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) }, | ||
183 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, | 185 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, |
184 | { 0, } | 186 | { 0, } |
185 | }; | 187 | }; |
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 9096d17beb5b..3dcc5f3f26cb 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c | |||
@@ -855,6 +855,7 @@ static struct rk3x_i2c_soc_data soc_data[3] = { | |||
855 | static const struct of_device_id rk3x_i2c_match[] = { | 855 | static const struct of_device_id rk3x_i2c_match[] = { |
856 | { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] }, | 856 | { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] }, |
857 | { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] }, | 857 | { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] }, |
858 | { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] }, | ||
858 | { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] }, | 859 | { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] }, |
859 | {}, | 860 | {}, |
860 | }; | 861 | }; |
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index cb00d59da456..c2e257d97eff 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, | |||
691 | NULL); | 691 | NULL); |
692 | 692 | ||
693 | /* Coudn't find default GID location */ | 693 | /* Coudn't find default GID location */ |
694 | WARN_ON(ix < 0); | 694 | if (WARN_ON(ix < 0)) |
695 | goto release; | ||
695 | 696 | ||
696 | zattr_type.gid_type = gid_type; | 697 | zattr_type.gid_type = gid_type; |
697 | 698 | ||
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 4a9aa0433b07..7713ef089c3c 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -48,6 +48,7 @@ | |||
48 | 48 | ||
49 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
50 | 50 | ||
51 | #include <rdma/ib.h> | ||
51 | #include <rdma/ib_cm.h> | 52 | #include <rdma/ib_cm.h> |
52 | #include <rdma/ib_user_cm.h> | 53 | #include <rdma/ib_user_cm.h> |
53 | #include <rdma/ib_marshall.h> | 54 | #include <rdma/ib_marshall.h> |
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, | |||
1103 | struct ib_ucm_cmd_hdr hdr; | 1104 | struct ib_ucm_cmd_hdr hdr; |
1104 | ssize_t result; | 1105 | ssize_t result; |
1105 | 1106 | ||
1107 | if (WARN_ON_ONCE(!ib_safe_file_access(filp))) | ||
1108 | return -EACCES; | ||
1109 | |||
1106 | if (len < sizeof(hdr)) | 1110 | if (len < sizeof(hdr)) |
1107 | return -EINVAL; | 1111 | return -EINVAL; |
1108 | 1112 | ||
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index dd3bcceadfde..c0f3826abb30 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf, | |||
1574 | struct rdma_ucm_cmd_hdr hdr; | 1574 | struct rdma_ucm_cmd_hdr hdr; |
1575 | ssize_t ret; | 1575 | ssize_t ret; |
1576 | 1576 | ||
1577 | if (WARN_ON_ONCE(!ib_safe_file_access(filp))) | ||
1578 | return -EACCES; | ||
1579 | |||
1577 | if (len < sizeof(hdr)) | 1580 | if (len < sizeof(hdr)) |
1578 | return -EINVAL; | 1581 | return -EINVAL; |
1579 | 1582 | ||
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 28ba2cc81535..31f422a70623 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -48,6 +48,8 @@ | |||
48 | 48 | ||
49 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
50 | 50 | ||
51 | #include <rdma/ib.h> | ||
52 | |||
51 | #include "uverbs.h" | 53 | #include "uverbs.h" |
52 | 54 | ||
53 | MODULE_AUTHOR("Roland Dreier"); | 55 | MODULE_AUTHOR("Roland Dreier"); |
@@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
709 | int srcu_key; | 711 | int srcu_key; |
710 | ssize_t ret; | 712 | ssize_t ret; |
711 | 713 | ||
714 | if (WARN_ON_ONCE(!ib_safe_file_access(filp))) | ||
715 | return -EACCES; | ||
716 | |||
712 | if (count < sizeof hdr) | 717 | if (count < sizeof hdr) |
713 | return -EINVAL; | 718 | return -EINVAL; |
714 | 719 | ||
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 15b8adbf39c0..b65b3541e732 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -1860,6 +1860,7 @@ EXPORT_SYMBOL(ib_drain_rq); | |||
1860 | void ib_drain_qp(struct ib_qp *qp) | 1860 | void ib_drain_qp(struct ib_qp *qp) |
1861 | { | 1861 | { |
1862 | ib_drain_sq(qp); | 1862 | ib_drain_sq(qp); |
1863 | ib_drain_rq(qp); | 1863 | if (!qp->srq) |
1864 | ib_drain_rq(qp); | ||
1864 | } | 1865 | } |
1865 | EXPORT_SYMBOL(ib_drain_qp); | 1866 | EXPORT_SYMBOL(ib_drain_qp); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 42a7b8952d13..3234a8be16f6 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -1390,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev) | |||
1390 | dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; | 1390 | dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; |
1391 | dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; | 1391 | dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; |
1392 | dev->ibdev.iwcm->get_qp = iwch_get_qp; | 1392 | dev->ibdev.iwcm->get_qp = iwch_get_qp; |
1393 | memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name, | ||
1394 | sizeof(dev->ibdev.iwcm->ifname)); | ||
1393 | 1395 | ||
1394 | ret = ib_register_device(&dev->ibdev, NULL); | 1396 | ret = ib_register_device(&dev->ibdev, NULL); |
1395 | if (ret) | 1397 | if (ret) |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index b4eeb783573c..b0b955724458 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
162 | cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, | 162 | cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, |
163 | &cq->bar2_qid, | 163 | &cq->bar2_qid, |
164 | user ? &cq->bar2_pa : NULL); | 164 | user ? &cq->bar2_pa : NULL); |
165 | if (user && !cq->bar2_va) { | 165 | if (user && !cq->bar2_pa) { |
166 | pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", | 166 | pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", |
167 | pci_name(rdev->lldi.pdev), cq->cqid); | 167 | pci_name(rdev->lldi.pdev), cq->cqid); |
168 | ret = -EINVAL; | 168 | ret = -EINVAL; |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 124682dc5709..7574f394fdac 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -580,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
580 | dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref; | 580 | dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref; |
581 | dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; | 581 | dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; |
582 | dev->ibdev.iwcm->get_qp = c4iw_get_qp; | 582 | dev->ibdev.iwcm->get_qp = c4iw_get_qp; |
583 | memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name, | ||
584 | sizeof(dev->ibdev.iwcm->ifname)); | ||
583 | 585 | ||
584 | ret = ib_register_device(&dev->ibdev, NULL); | 586 | ret = ib_register_device(&dev->ibdev, NULL); |
585 | if (ret) | 587 | if (ret) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index e17fb5d5e033..e8993e49b8b3 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid, | |||
185 | 185 | ||
186 | if (pbar2_pa) | 186 | if (pbar2_pa) |
187 | *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; | 187 | *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; |
188 | |||
189 | if (is_t4(rdev->lldi.adapter_type)) | ||
190 | return NULL; | ||
191 | |||
188 | return rdev->bar2_kva + bar2_qoffset; | 192 | return rdev->bar2_kva + bar2_qoffset; |
189 | } | 193 | } |
190 | 194 | ||
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
270 | /* | 274 | /* |
271 | * User mode must have bar2 access. | 275 | * User mode must have bar2 access. |
272 | */ | 276 | */ |
273 | if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) { | 277 | if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) { |
274 | pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", | 278 | pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", |
275 | pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); | 279 | pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); |
276 | goto free_dma; | 280 | goto free_dma; |
@@ -1895,13 +1899,27 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1895 | void c4iw_drain_sq(struct ib_qp *ibqp) | 1899 | void c4iw_drain_sq(struct ib_qp *ibqp) |
1896 | { | 1900 | { |
1897 | struct c4iw_qp *qp = to_c4iw_qp(ibqp); | 1901 | struct c4iw_qp *qp = to_c4iw_qp(ibqp); |
1902 | unsigned long flag; | ||
1903 | bool need_to_wait; | ||
1898 | 1904 | ||
1899 | wait_for_completion(&qp->sq_drained); | 1905 | spin_lock_irqsave(&qp->lock, flag); |
1906 | need_to_wait = !t4_sq_empty(&qp->wq); | ||
1907 | spin_unlock_irqrestore(&qp->lock, flag); | ||
1908 | |||
1909 | if (need_to_wait) | ||
1910 | wait_for_completion(&qp->sq_drained); | ||
1900 | } | 1911 | } |
1901 | 1912 | ||
1902 | void c4iw_drain_rq(struct ib_qp *ibqp) | 1913 | void c4iw_drain_rq(struct ib_qp *ibqp) |
1903 | { | 1914 | { |
1904 | struct c4iw_qp *qp = to_c4iw_qp(ibqp); | 1915 | struct c4iw_qp *qp = to_c4iw_qp(ibqp); |
1916 | unsigned long flag; | ||
1917 | bool need_to_wait; | ||
1918 | |||
1919 | spin_lock_irqsave(&qp->lock, flag); | ||
1920 | need_to_wait = !t4_rq_empty(&qp->wq); | ||
1921 | spin_unlock_irqrestore(&qp->lock, flag); | ||
1905 | 1922 | ||
1906 | wait_for_completion(&qp->rq_drained); | 1923 | if (need_to_wait) |
1924 | wait_for_completion(&qp->rq_drained); | ||
1907 | } | 1925 | } |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 5acf346e048e..6ad0489cb3c5 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -530,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
530 | sizeof(struct mlx5_wqe_ctrl_seg)) / | 530 | sizeof(struct mlx5_wqe_ctrl_seg)) / |
531 | sizeof(struct mlx5_wqe_data_seg); | 531 | sizeof(struct mlx5_wqe_data_seg); |
532 | props->max_sge = min(max_rq_sg, max_sq_sg); | 532 | props->max_sge = min(max_rq_sg, max_sq_sg); |
533 | props->max_sge_rd = props->max_sge; | 533 | props->max_sge_rd = MLX5_MAX_SGE_RD; |
534 | props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); | 534 | props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); |
535 | props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; | 535 | props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; |
536 | props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); | 536 | props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); |
@@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, | |||
671 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 671 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
672 | struct mlx5_core_dev *mdev = dev->mdev; | 672 | struct mlx5_core_dev *mdev = dev->mdev; |
673 | struct mlx5_hca_vport_context *rep; | 673 | struct mlx5_hca_vport_context *rep; |
674 | int max_mtu; | 674 | u16 max_mtu; |
675 | int oper_mtu; | 675 | u16 oper_mtu; |
676 | int err; | 676 | int err; |
677 | u8 ib_link_width_oper; | 677 | u8 ib_link_width_oper; |
678 | u8 vl_hw_cap; | 678 | u8 vl_hw_cap; |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 3ea9e055fdd3..92914539edc7 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -500,9 +500,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
500 | * skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); | 500 | * skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); |
501 | */ | 501 | */ |
502 | 502 | ||
503 | if (!netif_carrier_ok(netdev)) | ||
504 | return NETDEV_TX_OK; | ||
505 | |||
506 | if (netif_queue_stopped(netdev)) | 503 | if (netif_queue_stopped(netdev)) |
507 | return NETDEV_TX_BUSY; | 504 | return NETDEV_TX_BUSY; |
508 | 505 | ||
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index e449e394963f..24f4a782e0f4 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -45,6 +45,8 @@ | |||
45 | #include <linux/export.h> | 45 | #include <linux/export.h> |
46 | #include <linux/uio.h> | 46 | #include <linux/uio.h> |
47 | 47 | ||
48 | #include <rdma/ib.h> | ||
49 | |||
48 | #include "qib.h" | 50 | #include "qib.h" |
49 | #include "qib_common.h" | 51 | #include "qib_common.h" |
50 | #include "qib_user_sdma.h" | 52 | #include "qib_user_sdma.h" |
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data, | |||
2067 | ssize_t ret = 0; | 2069 | ssize_t ret = 0; |
2068 | void *dest; | 2070 | void *dest; |
2069 | 2071 | ||
2072 | if (WARN_ON_ONCE(!ib_safe_file_access(fp))) | ||
2073 | return -EACCES; | ||
2074 | |||
2070 | if (count < sizeof(cmd.type)) { | 2075 | if (count < sizeof(cmd.type)) { |
2071 | ret = -EINVAL; | 2076 | ret = -EINVAL; |
2072 | goto bail; | 2077 | goto bail; |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index bd82a6948dc8..a9e3bcc522c4 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
@@ -1637,9 +1637,9 @@ bail: | |||
1637 | spin_unlock_irqrestore(&qp->s_hlock, flags); | 1637 | spin_unlock_irqrestore(&qp->s_hlock, flags); |
1638 | if (nreq) { | 1638 | if (nreq) { |
1639 | if (call_send) | 1639 | if (call_send) |
1640 | rdi->driver_f.schedule_send_no_lock(qp); | ||
1641 | else | ||
1642 | rdi->driver_f.do_send(qp); | 1640 | rdi->driver_f.do_send(qp); |
1641 | else | ||
1642 | rdi->driver_f.schedule_send_no_lock(qp); | ||
1643 | } | 1643 | } |
1644 | return err; | 1644 | return err; |
1645 | } | 1645 | } |
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c index 53e33fab3f7a..df3581f60628 100644 --- a/drivers/input/misc/twl6040-vibra.c +++ b/drivers/input/misc/twl6040-vibra.c | |||
@@ -181,6 +181,14 @@ static void vibra_play_work(struct work_struct *work) | |||
181 | { | 181 | { |
182 | struct vibra_info *info = container_of(work, | 182 | struct vibra_info *info = container_of(work, |
183 | struct vibra_info, play_work); | 183 | struct vibra_info, play_work); |
184 | int ret; | ||
185 | |||
186 | /* Do not allow effect, while the routing is set to use audio */ | ||
187 | ret = twl6040_get_vibralr_status(info->twl6040); | ||
188 | if (ret & TWL6040_VIBSEL) { | ||
189 | dev_info(info->dev, "Vibra is configured for audio\n"); | ||
190 | return; | ||
191 | } | ||
184 | 192 | ||
185 | mutex_lock(&info->mutex); | 193 | mutex_lock(&info->mutex); |
186 | 194 | ||
@@ -199,14 +207,6 @@ static int vibra_play(struct input_dev *input, void *data, | |||
199 | struct ff_effect *effect) | 207 | struct ff_effect *effect) |
200 | { | 208 | { |
201 | struct vibra_info *info = input_get_drvdata(input); | 209 | struct vibra_info *info = input_get_drvdata(input); |
202 | int ret; | ||
203 | |||
204 | /* Do not allow effect, while the routing is set to use audio */ | ||
205 | ret = twl6040_get_vibralr_status(info->twl6040); | ||
206 | if (ret & TWL6040_VIBSEL) { | ||
207 | dev_info(&input->dev, "Vibra is configured for audio\n"); | ||
208 | return -EBUSY; | ||
209 | } | ||
210 | 210 | ||
211 | info->weak_speed = effect->u.rumble.weak_magnitude; | 211 | info->weak_speed = effect->u.rumble.weak_magnitude; |
212 | info->strong_speed = effect->u.rumble.strong_magnitude; | 212 | info->strong_speed = effect->u.rumble.strong_magnitude; |
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 2160512e861a..5af7907d0af4 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c | |||
@@ -1093,6 +1093,19 @@ static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset, | |||
1093 | return 0; | 1093 | return 0; |
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | static int mxt_acquire_irq(struct mxt_data *data) | ||
1097 | { | ||
1098 | int error; | ||
1099 | |||
1100 | enable_irq(data->irq); | ||
1101 | |||
1102 | error = mxt_process_messages_until_invalid(data); | ||
1103 | if (error) | ||
1104 | return error; | ||
1105 | |||
1106 | return 0; | ||
1107 | } | ||
1108 | |||
1096 | static int mxt_soft_reset(struct mxt_data *data) | 1109 | static int mxt_soft_reset(struct mxt_data *data) |
1097 | { | 1110 | { |
1098 | struct device *dev = &data->client->dev; | 1111 | struct device *dev = &data->client->dev; |
@@ -1111,7 +1124,7 @@ static int mxt_soft_reset(struct mxt_data *data) | |||
1111 | /* Ignore CHG line for 100ms after reset */ | 1124 | /* Ignore CHG line for 100ms after reset */ |
1112 | msleep(100); | 1125 | msleep(100); |
1113 | 1126 | ||
1114 | enable_irq(data->irq); | 1127 | mxt_acquire_irq(data); |
1115 | 1128 | ||
1116 | ret = mxt_wait_for_completion(data, &data->reset_completion, | 1129 | ret = mxt_wait_for_completion(data, &data->reset_completion, |
1117 | MXT_RESET_TIMEOUT); | 1130 | MXT_RESET_TIMEOUT); |
@@ -1466,19 +1479,6 @@ release_mem: | |||
1466 | return ret; | 1479 | return ret; |
1467 | } | 1480 | } |
1468 | 1481 | ||
1469 | static int mxt_acquire_irq(struct mxt_data *data) | ||
1470 | { | ||
1471 | int error; | ||
1472 | |||
1473 | enable_irq(data->irq); | ||
1474 | |||
1475 | error = mxt_process_messages_until_invalid(data); | ||
1476 | if (error) | ||
1477 | return error; | ||
1478 | |||
1479 | return 0; | ||
1480 | } | ||
1481 | |||
1482 | static int mxt_get_info(struct mxt_data *data) | 1482 | static int mxt_get_info(struct mxt_data *data) |
1483 | { | 1483 | { |
1484 | struct i2c_client *client = data->client; | 1484 | struct i2c_client *client = data->client; |
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index 9bbadaaf6bc3..7b3845aa5983 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c | |||
@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload) | |||
370 | point.coord_x = point.coord_y = 0; | 370 | point.coord_x = point.coord_y = 0; |
371 | } | 371 | } |
372 | 372 | ||
373 | point.state = payload[9 * i + 5] & 0x03; | 373 | point.state = payload[9 * i + 5] & 0x0f; |
374 | point.id = (payload[9 * i + 5] & 0xfc) >> 2; | 374 | point.id = (payload[9 * i + 5] & 0xf0) >> 4; |
375 | 375 | ||
376 | /* determine touch major, minor and orientation */ | 376 | /* determine touch major, minor and orientation */ |
377 | point.area_major = max(payload[9 * i + 6], | 377 | point.area_major = max(payload[9 * i + 6], |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 194580fba7fd..14d3b37944df 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) | |||
284 | * go away inside make_request | 284 | * go away inside make_request |
285 | */ | 285 | */ |
286 | sectors = bio_sectors(bio); | 286 | sectors = bio_sectors(bio); |
287 | /* bio could be mergeable after passing to underlayer */ | ||
288 | bio->bi_rw &= ~REQ_NOMERGE; | ||
287 | mddev->pers->make_request(mddev, bio); | 289 | mddev->pers->make_request(mddev, bio); |
288 | 290 | ||
289 | cpu = part_stat_lock(); | 291 | cpu = part_stat_lock(); |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 2ea12c6bf659..34783a3c8b3c 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -70,7 +70,6 @@ static void dump_zones(struct mddev *mddev) | |||
70 | (unsigned long long)zone_size>>1); | 70 | (unsigned long long)zone_size>>1); |
71 | zone_start = conf->strip_zone[j].zone_end; | 71 | zone_start = conf->strip_zone[j].zone_end; |
72 | } | 72 | } |
73 | printk(KERN_INFO "\n"); | ||
74 | } | 73 | } |
75 | 74 | ||
76 | static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | 75 | static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) |
@@ -85,6 +84,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | |||
85 | struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); | 84 | struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL); |
86 | unsigned short blksize = 512; | 85 | unsigned short blksize = 512; |
87 | 86 | ||
87 | *private_conf = ERR_PTR(-ENOMEM); | ||
88 | if (!conf) | 88 | if (!conf) |
89 | return -ENOMEM; | 89 | return -ENOMEM; |
90 | rdev_for_each(rdev1, mddev) { | 90 | rdev_for_each(rdev1, mddev) { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8ab8b65e1741..e48c262ce032 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3502,8 +3502,6 @@ returnbi: | |||
3502 | dev = &sh->dev[i]; | 3502 | dev = &sh->dev[i]; |
3503 | } else if (test_bit(R5_Discard, &dev->flags)) | 3503 | } else if (test_bit(R5_Discard, &dev->flags)) |
3504 | discard_pending = 1; | 3504 | discard_pending = 1; |
3505 | WARN_ON(test_bit(R5_SkipCopy, &dev->flags)); | ||
3506 | WARN_ON(dev->page != dev->orig_page); | ||
3507 | } | 3505 | } |
3508 | 3506 | ||
3509 | r5l_stripe_write_finished(sh); | 3507 | r5l_stripe_write_finished(sh); |
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index 12f5ebbd0436..ad2f3d27b266 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c | |||
@@ -1452,13 +1452,6 @@ static int usbvision_probe(struct usb_interface *intf, | |||
1452 | printk(KERN_INFO "%s: %s found\n", __func__, | 1452 | printk(KERN_INFO "%s: %s found\n", __func__, |
1453 | usbvision_device_data[model].model_string); | 1453 | usbvision_device_data[model].model_string); |
1454 | 1454 | ||
1455 | /* | ||
1456 | * this is a security check. | ||
1457 | * an exploit using an incorrect bInterfaceNumber is known | ||
1458 | */ | ||
1459 | if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum]) | ||
1460 | return -ENODEV; | ||
1461 | |||
1462 | if (usbvision_device_data[model].interface >= 0) | 1455 | if (usbvision_device_data[model].interface >= 0) |
1463 | interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; | 1456 | interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; |
1464 | else if (ifnum < dev->actconfig->desc.bNumInterfaces) | 1457 | else if (ifnum < dev->actconfig->desc.bNumInterfaces) |
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 5d016f496e0e..9fbcb67a9ee6 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
@@ -1645,7 +1645,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) | |||
1645 | * Will sleep if required for nonblocking == false. | 1645 | * Will sleep if required for nonblocking == false. |
1646 | */ | 1646 | */ |
1647 | static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, | 1647 | static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, |
1648 | int nonblocking) | 1648 | void *pb, int nonblocking) |
1649 | { | 1649 | { |
1650 | unsigned long flags; | 1650 | unsigned long flags; |
1651 | int ret; | 1651 | int ret; |
@@ -1666,10 +1666,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, | |||
1666 | /* | 1666 | /* |
1667 | * Only remove the buffer from done_list if v4l2_buffer can handle all | 1667 | * Only remove the buffer from done_list if v4l2_buffer can handle all |
1668 | * the planes. | 1668 | * the planes. |
1669 | * Verifying planes is NOT necessary since it already has been checked | ||
1670 | * before the buffer is queued/prepared. So it can never fail. | ||
1671 | */ | 1669 | */ |
1672 | list_del(&(*vb)->done_entry); | 1670 | ret = call_bufop(q, verify_planes_array, *vb, pb); |
1671 | if (!ret) | ||
1672 | list_del(&(*vb)->done_entry); | ||
1673 | spin_unlock_irqrestore(&q->done_lock, flags); | 1673 | spin_unlock_irqrestore(&q->done_lock, flags); |
1674 | 1674 | ||
1675 | return ret; | 1675 | return ret; |
@@ -1748,7 +1748,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, | |||
1748 | struct vb2_buffer *vb = NULL; | 1748 | struct vb2_buffer *vb = NULL; |
1749 | int ret; | 1749 | int ret; |
1750 | 1750 | ||
1751 | ret = __vb2_get_done_vb(q, &vb, nonblocking); | 1751 | ret = __vb2_get_done_vb(q, &vb, pb, nonblocking); |
1752 | if (ret < 0) | 1752 | if (ret < 0) |
1753 | return ret; | 1753 | return ret; |
1754 | 1754 | ||
@@ -2298,6 +2298,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file, | |||
2298 | return POLLERR; | 2298 | return POLLERR; |
2299 | 2299 | ||
2300 | /* | 2300 | /* |
2301 | * If this quirk is set and QBUF hasn't been called yet then | ||
2302 | * return POLLERR as well. This only affects capture queues, output | ||
2303 | * queues will always initialize waiting_for_buffers to false. | ||
2304 | * This quirk is set by V4L2 for backwards compatibility reasons. | ||
2305 | */ | ||
2306 | if (q->quirk_poll_must_check_waiting_for_buffers && | ||
2307 | q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM))) | ||
2308 | return POLLERR; | ||
2309 | |||
2310 | /* | ||
2301 | * For output streams you can call write() as long as there are fewer | 2311 | * For output streams you can call write() as long as there are fewer |
2302 | * buffers queued than there are buffers available. | 2312 | * buffers queued than there are buffers available. |
2303 | */ | 2313 | */ |
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c index dbec5923fcf0..3c3b517f1d1c 100644 --- a/drivers/media/v4l2-core/videobuf2-memops.c +++ b/drivers/media/v4l2-core/videobuf2-memops.c | |||
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start, | |||
49 | vec = frame_vector_create(nr); | 49 | vec = frame_vector_create(nr); |
50 | if (!vec) | 50 | if (!vec) |
51 | return ERR_PTR(-ENOMEM); | 51 | return ERR_PTR(-ENOMEM); |
52 | ret = get_vaddr_frames(start, nr, write, 1, vec); | 52 | ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec); |
53 | if (ret < 0) | 53 | if (ret < 0) |
54 | goto out_destroy; | 54 | goto out_destroy; |
55 | /* We accept only complete set of PFNs */ | 55 | /* We accept only complete set of PFNs */ |
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c index 91f552124050..7f366f1b0377 100644 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c | |||
@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer | |||
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb) | ||
78 | { | ||
79 | return __verify_planes_array(vb, pb); | ||
80 | } | ||
81 | |||
77 | /** | 82 | /** |
78 | * __verify_length() - Verify that the bytesused value for each plane fits in | 83 | * __verify_length() - Verify that the bytesused value for each plane fits in |
79 | * the plane length and that the data offset doesn't exceed the bytesused value. | 84 | * the plane length and that the data offset doesn't exceed the bytesused value. |
@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, | |||
437 | } | 442 | } |
438 | 443 | ||
439 | static const struct vb2_buf_ops v4l2_buf_ops = { | 444 | static const struct vb2_buf_ops v4l2_buf_ops = { |
445 | .verify_planes_array = __verify_planes_array_core, | ||
440 | .fill_user_buffer = __fill_v4l2_buffer, | 446 | .fill_user_buffer = __fill_v4l2_buffer, |
441 | .fill_vb2_buffer = __fill_vb2_buffer, | 447 | .fill_vb2_buffer = __fill_vb2_buffer, |
442 | .copy_timestamp = __copy_timestamp, | 448 | .copy_timestamp = __copy_timestamp, |
@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q) | |||
765 | q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); | 771 | q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); |
766 | q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) | 772 | q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) |
767 | == V4L2_BUF_FLAG_TIMESTAMP_COPY; | 773 | == V4L2_BUF_FLAG_TIMESTAMP_COPY; |
774 | /* | ||
775 | * For compatibility with vb1: if QBUF hasn't been called yet, then | ||
776 | * return POLLERR as well. This only affects capture queues, output | ||
777 | * queues will always initialize waiting_for_buffers to false. | ||
778 | */ | ||
779 | q->quirk_poll_must_check_waiting_for_buffers = true; | ||
768 | 780 | ||
769 | return vb2_core_queue_init(q); | 781 | return vb2_core_queue_init(q); |
770 | } | 782 | } |
@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) | |||
818 | poll_wait(file, &fh->wait, wait); | 830 | poll_wait(file, &fh->wait, wait); |
819 | } | 831 | } |
820 | 832 | ||
821 | /* | ||
822 | * For compatibility with vb1: if QBUF hasn't been called yet, then | ||
823 | * return POLLERR as well. This only affects capture queues, output | ||
824 | * queues will always initialize waiting_for_buffers to false. | ||
825 | */ | ||
826 | if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM))) | ||
827 | return POLLERR; | ||
828 | |||
829 | return res | vb2_core_poll(q, file, wait); | 833 | return res | vb2_core_poll(q, file, wait); |
830 | } | 834 | } |
831 | EXPORT_SYMBOL_GPL(vb2_poll); | 835 | EXPORT_SYMBOL_GPL(vb2_poll); |
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 10370f280500..7edea9c19199 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c | |||
@@ -223,6 +223,13 @@ int __detach_context(struct cxl_context *ctx) | |||
223 | cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); | 223 | cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)); |
224 | flush_work(&ctx->fault_work); /* Only needed for dedicated process */ | 224 | flush_work(&ctx->fault_work); /* Only needed for dedicated process */ |
225 | 225 | ||
226 | /* | ||
227 | * Wait until no further interrupts are presented by the PSL | ||
228 | * for this context. | ||
229 | */ | ||
230 | if (cxl_ops->irq_wait) | ||
231 | cxl_ops->irq_wait(ctx); | ||
232 | |||
226 | /* release the reference to the group leader and mm handling pid */ | 233 | /* release the reference to the group leader and mm handling pid */ |
227 | put_pid(ctx->pid); | 234 | put_pid(ctx->pid); |
228 | put_pid(ctx->glpid); | 235 | put_pid(ctx->glpid); |
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 38e21cf7806e..73dc2a33da74 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h | |||
@@ -274,6 +274,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; | |||
274 | #define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ | 274 | #define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */ |
275 | #define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ | 275 | #define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */ |
276 | #define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ | 276 | #define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */ |
277 | #define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC) | ||
277 | /* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */ | 278 | /* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */ |
278 | #define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */ | 279 | #define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */ |
279 | #define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */ | 280 | #define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */ |
@@ -855,6 +856,7 @@ struct cxl_backend_ops { | |||
855 | u64 dsisr, u64 errstat); | 856 | u64 dsisr, u64 errstat); |
856 | irqreturn_t (*psl_interrupt)(int irq, void *data); | 857 | irqreturn_t (*psl_interrupt)(int irq, void *data); |
857 | int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); | 858 | int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); |
859 | void (*irq_wait)(struct cxl_context *ctx); | ||
858 | int (*attach_process)(struct cxl_context *ctx, bool kernel, | 860 | int (*attach_process)(struct cxl_context *ctx, bool kernel, |
859 | u64 wed, u64 amr); | 861 | u64 wed, u64 amr); |
860 | int (*detach_process)(struct cxl_context *ctx); | 862 | int (*detach_process)(struct cxl_context *ctx); |
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index be646dc41a2c..8def4553acba 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c | |||
@@ -203,7 +203,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, | |||
203 | void cxl_unmap_irq(unsigned int virq, void *cookie) | 203 | void cxl_unmap_irq(unsigned int virq, void *cookie) |
204 | { | 204 | { |
205 | free_irq(virq, cookie); | 205 | free_irq(virq, cookie); |
206 | irq_dispose_mapping(virq); | ||
207 | } | 206 | } |
208 | 207 | ||
209 | int cxl_register_one_irq(struct cxl *adapter, | 208 | int cxl_register_one_irq(struct cxl *adapter, |
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 387fcbdf9793..ecf7557cd657 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
17 | #include <linux/delay.h> | ||
17 | #include <asm/synch.h> | 18 | #include <asm/synch.h> |
18 | #include <misc/cxl-base.h> | 19 | #include <misc/cxl-base.h> |
19 | 20 | ||
@@ -797,6 +798,35 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data) | |||
797 | return fail_psl_irq(afu, &irq_info); | 798 | return fail_psl_irq(afu, &irq_info); |
798 | } | 799 | } |
799 | 800 | ||
801 | void native_irq_wait(struct cxl_context *ctx) | ||
802 | { | ||
803 | u64 dsisr; | ||
804 | int timeout = 1000; | ||
805 | int ph; | ||
806 | |||
807 | /* | ||
808 | * Wait until no further interrupts are presented by the PSL | ||
809 | * for this context. | ||
810 | */ | ||
811 | while (timeout--) { | ||
812 | ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff; | ||
813 | if (ph != ctx->pe) | ||
814 | return; | ||
815 | dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); | ||
816 | if ((dsisr & CXL_PSL_DSISR_PENDING) == 0) | ||
817 | return; | ||
818 | /* | ||
819 | * We are waiting for the workqueue to process our | ||
820 | * irq, so need to let that run here. | ||
821 | */ | ||
822 | msleep(1); | ||
823 | } | ||
824 | |||
825 | dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i" | ||
826 | " DSISR %016llx!\n", ph, dsisr); | ||
827 | return; | ||
828 | } | ||
829 | |||
800 | static irqreturn_t native_slice_irq_err(int irq, void *data) | 830 | static irqreturn_t native_slice_irq_err(int irq, void *data) |
801 | { | 831 | { |
802 | struct cxl_afu *afu = data; | 832 | struct cxl_afu *afu = data; |
@@ -1076,6 +1106,7 @@ const struct cxl_backend_ops cxl_native_ops = { | |||
1076 | .handle_psl_slice_error = native_handle_psl_slice_error, | 1106 | .handle_psl_slice_error = native_handle_psl_slice_error, |
1077 | .psl_interrupt = NULL, | 1107 | .psl_interrupt = NULL, |
1078 | .ack_irq = native_ack_irq, | 1108 | .ack_irq = native_ack_irq, |
1109 | .irq_wait = native_irq_wait, | ||
1079 | .attach_process = native_attach_process, | 1110 | .attach_process = native_attach_process, |
1080 | .detach_process = native_detach_process, | 1111 | .detach_process = native_detach_process, |
1081 | .support_attributes = native_support_attributes, | 1112 | .support_attributes = native_support_attributes, |
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 04feea8354cb..e657af0e95fa 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC | |||
97 | config MMC_SDHCI_ACPI | 97 | config MMC_SDHCI_ACPI |
98 | tristate "SDHCI support for ACPI enumerated SDHCI controllers" | 98 | tristate "SDHCI support for ACPI enumerated SDHCI controllers" |
99 | depends on MMC_SDHCI && ACPI | 99 | depends on MMC_SDHCI && ACPI |
100 | select IOSF_MBI if X86 | ||
100 | help | 101 | help |
101 | This selects support for ACPI enumerated SDHCI controllers, | 102 | This selects support for ACPI enumerated SDHCI controllers, |
102 | identified by ACPI Compatibility ID PNP0D40 or specific | 103 | identified by ACPI Compatibility ID PNP0D40 or specific |
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 6839e41c6d58..bed6a494f52c 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c | |||
@@ -41,6 +41,11 @@ | |||
41 | #include <linux/mmc/pm.h> | 41 | #include <linux/mmc/pm.h> |
42 | #include <linux/mmc/slot-gpio.h> | 42 | #include <linux/mmc/slot-gpio.h> |
43 | 43 | ||
44 | #ifdef CONFIG_X86 | ||
45 | #include <asm/cpu_device_id.h> | ||
46 | #include <asm/iosf_mbi.h> | ||
47 | #endif | ||
48 | |||
44 | #include "sdhci.h" | 49 | #include "sdhci.h" |
45 | 50 | ||
46 | enum { | 51 | enum { |
@@ -116,6 +121,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { | |||
116 | .ops = &sdhci_acpi_ops_int, | 121 | .ops = &sdhci_acpi_ops_int, |
117 | }; | 122 | }; |
118 | 123 | ||
124 | #ifdef CONFIG_X86 | ||
125 | |||
126 | static bool sdhci_acpi_byt(void) | ||
127 | { | ||
128 | static const struct x86_cpu_id byt[] = { | ||
129 | { X86_VENDOR_INTEL, 6, 0x37 }, | ||
130 | {} | ||
131 | }; | ||
132 | |||
133 | return x86_match_cpu(byt); | ||
134 | } | ||
135 | |||
136 | #define BYT_IOSF_SCCEP 0x63 | ||
137 | #define BYT_IOSF_OCP_NETCTRL0 0x1078 | ||
138 | #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) | ||
139 | |||
140 | static void sdhci_acpi_byt_setting(struct device *dev) | ||
141 | { | ||
142 | u32 val = 0; | ||
143 | |||
144 | if (!sdhci_acpi_byt()) | ||
145 | return; | ||
146 | |||
147 | if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0, | ||
148 | &val)) { | ||
149 | dev_err(dev, "%s read error\n", __func__); | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) | ||
154 | return; | ||
155 | |||
156 | val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; | ||
157 | |||
158 | if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0, | ||
159 | val)) { | ||
160 | dev_err(dev, "%s write error\n", __func__); | ||
161 | return; | ||
162 | } | ||
163 | |||
164 | dev_dbg(dev, "%s completed\n", __func__); | ||
165 | } | ||
166 | |||
167 | static bool sdhci_acpi_byt_defer(struct device *dev) | ||
168 | { | ||
169 | if (!sdhci_acpi_byt()) | ||
170 | return false; | ||
171 | |||
172 | if (!iosf_mbi_available()) | ||
173 | return true; | ||
174 | |||
175 | sdhci_acpi_byt_setting(dev); | ||
176 | |||
177 | return false; | ||
178 | } | ||
179 | |||
180 | #else | ||
181 | |||
182 | static inline void sdhci_acpi_byt_setting(struct device *dev) | ||
183 | { | ||
184 | } | ||
185 | |||
186 | static inline bool sdhci_acpi_byt_defer(struct device *dev) | ||
187 | { | ||
188 | return false; | ||
189 | } | ||
190 | |||
191 | #endif | ||
192 | |||
119 | static int bxt_get_cd(struct mmc_host *mmc) | 193 | static int bxt_get_cd(struct mmc_host *mmc) |
120 | { | 194 | { |
121 | int gpio_cd = mmc_gpio_get_cd(mmc); | 195 | int gpio_cd = mmc_gpio_get_cd(mmc); |
@@ -322,6 +396,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev) | |||
322 | if (acpi_bus_get_status(device) || !device->status.present) | 396 | if (acpi_bus_get_status(device) || !device->status.present) |
323 | return -ENODEV; | 397 | return -ENODEV; |
324 | 398 | ||
399 | if (sdhci_acpi_byt_defer(dev)) | ||
400 | return -EPROBE_DEFER; | ||
401 | |||
325 | hid = acpi_device_hid(device); | 402 | hid = acpi_device_hid(device); |
326 | uid = device->pnp.unique_id; | 403 | uid = device->pnp.unique_id; |
327 | 404 | ||
@@ -447,6 +524,8 @@ static int sdhci_acpi_resume(struct device *dev) | |||
447 | { | 524 | { |
448 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | 525 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); |
449 | 526 | ||
527 | sdhci_acpi_byt_setting(&c->pdev->dev); | ||
528 | |||
450 | return sdhci_resume_host(c->host); | 529 | return sdhci_resume_host(c->host); |
451 | } | 530 | } |
452 | 531 | ||
@@ -470,6 +549,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev) | |||
470 | { | 549 | { |
471 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); | 550 | struct sdhci_acpi_host *c = dev_get_drvdata(dev); |
472 | 551 | ||
552 | sdhci_acpi_byt_setting(&c->pdev->dev); | ||
553 | |||
473 | return sdhci_runtime_resume_host(c->host); | 554 | return sdhci_runtime_resume_host(c->host); |
474 | } | 555 | } |
475 | 556 | ||
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 8372a413848c..7fc8b7aa83f0 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
@@ -1129,6 +1129,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev) | |||
1129 | MMC_CAP_1_8V_DDR | | 1129 | MMC_CAP_1_8V_DDR | |
1130 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; | 1130 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; |
1131 | 1131 | ||
1132 | /* TODO MMC DDR is not working on A80 */ | ||
1133 | if (of_device_is_compatible(pdev->dev.of_node, | ||
1134 | "allwinner,sun9i-a80-mmc")) | ||
1135 | mmc->caps &= ~MMC_CAP_1_8V_DDR; | ||
1136 | |||
1132 | ret = mmc_of_parse(mmc); | 1137 | ret = mmc_of_parse(mmc); |
1133 | if (ret) | 1138 | if (ret) |
1134 | goto error_free_dma; | 1139 | goto error_free_dma; |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index a24c18eee598..befd67df08e1 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -62,9 +62,8 @@ config DUMMY | |||
62 | this device is consigned into oblivion) with a configurable IP | 62 | this device is consigned into oblivion) with a configurable IP |
63 | address. It is most commonly used in order to make your currently | 63 | address. It is most commonly used in order to make your currently |
64 | inactive SLIP address seem like a real address for local programs. | 64 | inactive SLIP address seem like a real address for local programs. |
65 | If you use SLIP or PPP, you might want to say Y here. Since this | 65 | If you use SLIP or PPP, you might want to say Y here. It won't |
66 | thing often comes in handy, the default is Y. It won't enlarge your | 66 | enlarge your kernel. What a deal. Read about it in the Network |
67 | kernel either. What a deal. Read about it in the Network | ||
68 | Administrator's Guide, available from | 67 | Administrator's Guide, available from |
69 | <http://www.tldp.org/docs.html#guide>. | 68 | <http://www.tldp.org/docs.html#guide>. |
70 | 69 | ||
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index a2904029cccc..5e572b3510b9 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c | |||
@@ -2181,7 +2181,7 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, | |||
2181 | struct net_device *bridge) | 2181 | struct net_device *bridge) |
2182 | { | 2182 | { |
2183 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); | 2183 | struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); |
2184 | int i, err; | 2184 | int i, err = 0; |
2185 | 2185 | ||
2186 | mutex_lock(&ps->smi_mutex); | 2186 | mutex_lock(&ps->smi_mutex); |
2187 | 2187 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 12a009d720cd..72eb29ed0359 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -581,12 +581,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, | |||
581 | struct page *page; | 581 | struct page *page; |
582 | dma_addr_t mapping; | 582 | dma_addr_t mapping; |
583 | u16 sw_prod = rxr->rx_sw_agg_prod; | 583 | u16 sw_prod = rxr->rx_sw_agg_prod; |
584 | unsigned int offset = 0; | ||
584 | 585 | ||
585 | page = alloc_page(gfp); | 586 | if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { |
586 | if (!page) | 587 | page = rxr->rx_page; |
587 | return -ENOMEM; | 588 | if (!page) { |
589 | page = alloc_page(gfp); | ||
590 | if (!page) | ||
591 | return -ENOMEM; | ||
592 | rxr->rx_page = page; | ||
593 | rxr->rx_page_offset = 0; | ||
594 | } | ||
595 | offset = rxr->rx_page_offset; | ||
596 | rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; | ||
597 | if (rxr->rx_page_offset == PAGE_SIZE) | ||
598 | rxr->rx_page = NULL; | ||
599 | else | ||
600 | get_page(page); | ||
601 | } else { | ||
602 | page = alloc_page(gfp); | ||
603 | if (!page) | ||
604 | return -ENOMEM; | ||
605 | } | ||
588 | 606 | ||
589 | mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE, | 607 | mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE, |
590 | PCI_DMA_FROMDEVICE); | 608 | PCI_DMA_FROMDEVICE); |
591 | if (dma_mapping_error(&pdev->dev, mapping)) { | 609 | if (dma_mapping_error(&pdev->dev, mapping)) { |
592 | __free_page(page); | 610 | __free_page(page); |
@@ -601,6 +619,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, | |||
601 | rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); | 619 | rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); |
602 | 620 | ||
603 | rx_agg_buf->page = page; | 621 | rx_agg_buf->page = page; |
622 | rx_agg_buf->offset = offset; | ||
604 | rx_agg_buf->mapping = mapping; | 623 | rx_agg_buf->mapping = mapping; |
605 | rxbd->rx_bd_haddr = cpu_to_le64(mapping); | 624 | rxbd->rx_bd_haddr = cpu_to_le64(mapping); |
606 | rxbd->rx_bd_opaque = sw_prod; | 625 | rxbd->rx_bd_opaque = sw_prod; |
@@ -642,6 +661,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, | |||
642 | page = cons_rx_buf->page; | 661 | page = cons_rx_buf->page; |
643 | cons_rx_buf->page = NULL; | 662 | cons_rx_buf->page = NULL; |
644 | prod_rx_buf->page = page; | 663 | prod_rx_buf->page = page; |
664 | prod_rx_buf->offset = cons_rx_buf->offset; | ||
645 | 665 | ||
646 | prod_rx_buf->mapping = cons_rx_buf->mapping; | 666 | prod_rx_buf->mapping = cons_rx_buf->mapping; |
647 | 667 | ||
@@ -709,7 +729,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, | |||
709 | RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; | 729 | RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; |
710 | 730 | ||
711 | cons_rx_buf = &rxr->rx_agg_ring[cons]; | 731 | cons_rx_buf = &rxr->rx_agg_ring[cons]; |
712 | skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len); | 732 | skb_fill_page_desc(skb, i, cons_rx_buf->page, |
733 | cons_rx_buf->offset, frag_len); | ||
713 | __clear_bit(cons, rxr->rx_agg_bmap); | 734 | __clear_bit(cons, rxr->rx_agg_bmap); |
714 | 735 | ||
715 | /* It is possible for bnxt_alloc_rx_page() to allocate | 736 | /* It is possible for bnxt_alloc_rx_page() to allocate |
@@ -740,7 +761,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, | |||
740 | return NULL; | 761 | return NULL; |
741 | } | 762 | } |
742 | 763 | ||
743 | dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE, | 764 | dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, |
744 | PCI_DMA_FROMDEVICE); | 765 | PCI_DMA_FROMDEVICE); |
745 | 766 | ||
746 | skb->data_len += frag_len; | 767 | skb->data_len += frag_len; |
@@ -1584,13 +1605,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) | |||
1584 | 1605 | ||
1585 | dma_unmap_page(&pdev->dev, | 1606 | dma_unmap_page(&pdev->dev, |
1586 | dma_unmap_addr(rx_agg_buf, mapping), | 1607 | dma_unmap_addr(rx_agg_buf, mapping), |
1587 | PAGE_SIZE, PCI_DMA_FROMDEVICE); | 1608 | BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE); |
1588 | 1609 | ||
1589 | rx_agg_buf->page = NULL; | 1610 | rx_agg_buf->page = NULL; |
1590 | __clear_bit(j, rxr->rx_agg_bmap); | 1611 | __clear_bit(j, rxr->rx_agg_bmap); |
1591 | 1612 | ||
1592 | __free_page(page); | 1613 | __free_page(page); |
1593 | } | 1614 | } |
1615 | if (rxr->rx_page) { | ||
1616 | __free_page(rxr->rx_page); | ||
1617 | rxr->rx_page = NULL; | ||
1618 | } | ||
1594 | } | 1619 | } |
1595 | } | 1620 | } |
1596 | 1621 | ||
@@ -1973,7 +1998,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) | |||
1973 | if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) | 1998 | if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) |
1974 | return 0; | 1999 | return 0; |
1975 | 2000 | ||
1976 | type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) | | 2001 | type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | |
1977 | RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; | 2002 | RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; |
1978 | 2003 | ||
1979 | bnxt_init_rxbd_pages(ring, type); | 2004 | bnxt_init_rxbd_pages(ring, type); |
@@ -2164,7 +2189,7 @@ void bnxt_set_ring_params(struct bnxt *bp) | |||
2164 | bp->rx_agg_nr_pages = 0; | 2189 | bp->rx_agg_nr_pages = 0; |
2165 | 2190 | ||
2166 | if (bp->flags & BNXT_FLAG_TPA) | 2191 | if (bp->flags & BNXT_FLAG_TPA) |
2167 | agg_factor = 4; | 2192 | agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); |
2168 | 2193 | ||
2169 | bp->flags &= ~BNXT_FLAG_JUMBO; | 2194 | bp->flags &= ~BNXT_FLAG_JUMBO; |
2170 | if (rx_space > PAGE_SIZE) { | 2195 | if (rx_space > PAGE_SIZE) { |
@@ -3020,12 +3045,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) | |||
3020 | /* Number of segs are log2 units, and first packet is not | 3045 | /* Number of segs are log2 units, and first packet is not |
3021 | * included as part of this units. | 3046 | * included as part of this units. |
3022 | */ | 3047 | */ |
3023 | if (mss <= PAGE_SIZE) { | 3048 | if (mss <= BNXT_RX_PAGE_SIZE) { |
3024 | n = PAGE_SIZE / mss; | 3049 | n = BNXT_RX_PAGE_SIZE / mss; |
3025 | nsegs = (MAX_SKB_FRAGS - 1) * n; | 3050 | nsegs = (MAX_SKB_FRAGS - 1) * n; |
3026 | } else { | 3051 | } else { |
3027 | n = mss / PAGE_SIZE; | 3052 | n = mss / BNXT_RX_PAGE_SIZE; |
3028 | if (mss & (PAGE_SIZE - 1)) | 3053 | if (mss & (BNXT_RX_PAGE_SIZE - 1)) |
3029 | n++; | 3054 | n++; |
3030 | nsegs = (MAX_SKB_FRAGS - n) / n; | 3055 | nsegs = (MAX_SKB_FRAGS - n) / n; |
3031 | } | 3056 | } |
@@ -4309,7 +4334,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp) | |||
4309 | if (bp->flags & BNXT_FLAG_MSIX_CAP) | 4334 | if (bp->flags & BNXT_FLAG_MSIX_CAP) |
4310 | rc = bnxt_setup_msix(bp); | 4335 | rc = bnxt_setup_msix(bp); |
4311 | 4336 | ||
4312 | if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { | 4337 | if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { |
4313 | /* fallback to INTA */ | 4338 | /* fallback to INTA */ |
4314 | rc = bnxt_setup_inta(bp); | 4339 | rc = bnxt_setup_inta(bp); |
4315 | } | 4340 | } |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 709b95b8fcba..8b823ff558ff 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
@@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext { | |||
407 | 407 | ||
408 | #define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT) | 408 | #define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT) |
409 | 409 | ||
410 | /* The RXBD length is 16-bit so we can only support page sizes < 64K */ | ||
411 | #if (PAGE_SHIFT > 15) | ||
412 | #define BNXT_RX_PAGE_SHIFT 15 | ||
413 | #else | ||
414 | #define BNXT_RX_PAGE_SHIFT PAGE_SHIFT | ||
415 | #endif | ||
416 | |||
417 | #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT) | ||
418 | |||
410 | #define BNXT_MIN_PKT_SIZE 45 | 419 | #define BNXT_MIN_PKT_SIZE 45 |
411 | 420 | ||
412 | #define BNXT_NUM_TESTS(bp) 0 | 421 | #define BNXT_NUM_TESTS(bp) 0 |
@@ -506,6 +515,7 @@ struct bnxt_sw_rx_bd { | |||
506 | 515 | ||
507 | struct bnxt_sw_rx_agg_bd { | 516 | struct bnxt_sw_rx_agg_bd { |
508 | struct page *page; | 517 | struct page *page; |
518 | unsigned int offset; | ||
509 | dma_addr_t mapping; | 519 | dma_addr_t mapping; |
510 | }; | 520 | }; |
511 | 521 | ||
@@ -586,6 +596,9 @@ struct bnxt_rx_ring_info { | |||
586 | unsigned long *rx_agg_bmap; | 596 | unsigned long *rx_agg_bmap; |
587 | u16 rx_agg_bmap_size; | 597 | u16 rx_agg_bmap_size; |
588 | 598 | ||
599 | struct page *rx_page; | ||
600 | unsigned int rx_page_offset; | ||
601 | |||
589 | dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; | 602 | dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; |
590 | dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; | 603 | dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; |
591 | 604 | ||
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 48a7d7dee846..a63551d0a18a 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -441,7 +441,7 @@ static int macb_mii_init(struct macb *bp) | |||
441 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", | 441 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
442 | bp->pdev->name, bp->pdev->id); | 442 | bp->pdev->name, bp->pdev->id); |
443 | bp->mii_bus->priv = bp; | 443 | bp->mii_bus->priv = bp; |
444 | bp->mii_bus->parent = &bp->dev->dev; | 444 | bp->mii_bus->parent = &bp->pdev->dev; |
445 | pdata = dev_get_platdata(&bp->pdev->dev); | 445 | pdata = dev_get_platdata(&bp->pdev->dev); |
446 | 446 | ||
447 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); | 447 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
@@ -458,7 +458,8 @@ static int macb_mii_init(struct macb *bp) | |||
458 | struct phy_device *phydev; | 458 | struct phy_device *phydev; |
459 | 459 | ||
460 | phydev = mdiobus_scan(bp->mii_bus, i); | 460 | phydev = mdiobus_scan(bp->mii_bus, i); |
461 | if (IS_ERR(phydev)) { | 461 | if (IS_ERR(phydev) && |
462 | PTR_ERR(phydev) != -ENODEV) { | ||
462 | err = PTR_ERR(phydev); | 463 | err = PTR_ERR(phydev); |
463 | break; | 464 | break; |
464 | } | 465 | } |
@@ -3019,29 +3020,36 @@ static int macb_probe(struct platform_device *pdev) | |||
3019 | if (err) | 3020 | if (err) |
3020 | goto err_out_free_netdev; | 3021 | goto err_out_free_netdev; |
3021 | 3022 | ||
3023 | err = macb_mii_init(bp); | ||
3024 | if (err) | ||
3025 | goto err_out_free_netdev; | ||
3026 | |||
3027 | phydev = bp->phy_dev; | ||
3028 | |||
3029 | netif_carrier_off(dev); | ||
3030 | |||
3022 | err = register_netdev(dev); | 3031 | err = register_netdev(dev); |
3023 | if (err) { | 3032 | if (err) { |
3024 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | 3033 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); |
3025 | goto err_out_unregister_netdev; | 3034 | goto err_out_unregister_mdio; |
3026 | } | 3035 | } |
3027 | 3036 | ||
3028 | err = macb_mii_init(bp); | 3037 | phy_attached_info(phydev); |
3029 | if (err) | ||
3030 | goto err_out_unregister_netdev; | ||
3031 | |||
3032 | netif_carrier_off(dev); | ||
3033 | 3038 | ||
3034 | netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", | 3039 | netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", |
3035 | macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), | 3040 | macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), |
3036 | dev->base_addr, dev->irq, dev->dev_addr); | 3041 | dev->base_addr, dev->irq, dev->dev_addr); |
3037 | 3042 | ||
3038 | phydev = bp->phy_dev; | ||
3039 | phy_attached_info(phydev); | ||
3040 | |||
3041 | return 0; | 3043 | return 0; |
3042 | 3044 | ||
3043 | err_out_unregister_netdev: | 3045 | err_out_unregister_mdio: |
3044 | unregister_netdev(dev); | 3046 | phy_disconnect(bp->phy_dev); |
3047 | mdiobus_unregister(bp->mii_bus); | ||
3048 | mdiobus_free(bp->mii_bus); | ||
3049 | |||
3050 | /* Shutdown the PHY if there is a GPIO reset */ | ||
3051 | if (bp->reset_gpio) | ||
3052 | gpiod_set_value(bp->reset_gpio, 0); | ||
3045 | 3053 | ||
3046 | err_out_free_netdev: | 3054 | err_out_free_netdev: |
3047 | free_netdev(dev); | 3055 | free_netdev(dev); |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 60908eab3b3a..43da891fab97 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | |||
@@ -576,7 +576,7 @@ static void setup_rss(struct adapter *adap) | |||
576 | unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; | 576 | unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; |
577 | unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; | 577 | unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; |
578 | u8 cpus[SGE_QSETS + 1]; | 578 | u8 cpus[SGE_QSETS + 1]; |
579 | u16 rspq_map[RSS_TABLE_SIZE]; | 579 | u16 rspq_map[RSS_TABLE_SIZE + 1]; |
580 | 580 | ||
581 | for (i = 0; i < SGE_QSETS; ++i) | 581 | for (i = 0; i < SGE_QSETS; ++i) |
582 | cpus[i] = i; | 582 | cpus[i] = i; |
@@ -586,6 +586,7 @@ static void setup_rss(struct adapter *adap) | |||
586 | rspq_map[i] = i % nq0; | 586 | rspq_map[i] = i % nq0; |
587 | rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0; | 587 | rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0; |
588 | } | 588 | } |
589 | rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */ | ||
589 | 590 | ||
590 | t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | | 591 | t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | |
591 | F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | | 592 | F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 7fc490225da5..a6d26d351dfc 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -3354,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, | |||
3354 | /* Enable per-CPU interrupts on the CPU that is | 3354 | /* Enable per-CPU interrupts on the CPU that is |
3355 | * brought up. | 3355 | * brought up. |
3356 | */ | 3356 | */ |
3357 | smp_call_function_single(cpu, mvneta_percpu_enable, | 3357 | mvneta_percpu_enable(pp); |
3358 | pp, true); | ||
3359 | 3358 | ||
3360 | /* Enable per-CPU interrupt on the one CPU we care | 3359 | /* Enable per-CPU interrupt on the one CPU we care |
3361 | * about. | 3360 | * about. |
@@ -3387,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, | |||
3387 | /* Disable per-CPU interrupts on the CPU that is | 3386 | /* Disable per-CPU interrupts on the CPU that is |
3388 | * brought down. | 3387 | * brought down. |
3389 | */ | 3388 | */ |
3390 | smp_call_function_single(cpu, mvneta_percpu_disable, | 3389 | mvneta_percpu_disable(pp); |
3391 | pp, true); | ||
3392 | 3390 | ||
3393 | break; | 3391 | break; |
3394 | case CPU_DEAD: | 3392 | case CPU_DEAD: |
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 7ace07dad6a3..c442f6ad15ff 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c | |||
@@ -979,6 +979,8 @@ static int pxa168_init_phy(struct net_device *dev) | |||
979 | return 0; | 979 | return 0; |
980 | 980 | ||
981 | pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); | 981 | pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); |
982 | if (IS_ERR(pep->phy)) | ||
983 | return PTR_ERR(pep->phy); | ||
982 | if (!pep->phy) | 984 | if (!pep->phy) |
983 | return -ENODEV; | 985 | return -ENODEV; |
984 | 986 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index c0d7b7296236..a386f047c1af 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -405,7 +405,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, | |||
405 | u32 packets = 0; | 405 | u32 packets = 0; |
406 | u32 bytes = 0; | 406 | u32 bytes = 0; |
407 | int factor = priv->cqe_factor; | 407 | int factor = priv->cqe_factor; |
408 | u64 timestamp = 0; | ||
409 | int done = 0; | 408 | int done = 0; |
410 | int budget = priv->tx_work_limit; | 409 | int budget = priv->tx_work_limit; |
411 | u32 last_nr_txbb; | 410 | u32 last_nr_txbb; |
@@ -445,9 +444,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, | |||
445 | new_index = be16_to_cpu(cqe->wqe_index) & size_mask; | 444 | new_index = be16_to_cpu(cqe->wqe_index) & size_mask; |
446 | 445 | ||
447 | do { | 446 | do { |
447 | u64 timestamp = 0; | ||
448 | |||
448 | txbbs_skipped += last_nr_txbb; | 449 | txbbs_skipped += last_nr_txbb; |
449 | ring_index = (ring_index + last_nr_txbb) & size_mask; | 450 | ring_index = (ring_index + last_nr_txbb) & size_mask; |
450 | if (ring->tx_info[ring_index].ts_requested) | 451 | |
452 | if (unlikely(ring->tx_info[ring_index].ts_requested)) | ||
451 | timestamp = mlx4_en_get_cqe_ts(cqe); | 453 | timestamp = mlx4_en_get_cqe_ts(cqe); |
452 | 454 | ||
453 | /* free next descriptor */ | 455 | /* free next descriptor */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 1cf722eba607..559d11a443bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig | |||
@@ -14,6 +14,7 @@ config MLX5_CORE_EN | |||
14 | bool "Mellanox Technologies ConnectX-4 Ethernet support" | 14 | bool "Mellanox Technologies ConnectX-4 Ethernet support" |
15 | depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE | 15 | depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE |
16 | select PTP_1588_CLOCK | 16 | select PTP_1588_CLOCK |
17 | select VXLAN if MLX5_CORE=y | ||
17 | default n | 18 | default n |
18 | ---help--- | 19 | ---help--- |
19 | Ethernet support in Mellanox Technologies ConnectX-4 NIC. | 20 | Ethernet support in Mellanox Technologies ConnectX-4 NIC. |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 879e6276c473..3881dce0cc30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -567,6 +567,7 @@ struct mlx5e_priv { | |||
567 | struct mlx5e_vxlan_db vxlan; | 567 | struct mlx5e_vxlan_db vxlan; |
568 | 568 | ||
569 | struct mlx5e_params params; | 569 | struct mlx5e_params params; |
570 | struct workqueue_struct *wq; | ||
570 | struct work_struct update_carrier_work; | 571 | struct work_struct update_carrier_work; |
571 | struct work_struct set_rx_mode_work; | 572 | struct work_struct set_rx_mode_work; |
572 | struct delayed_work update_stats_work; | 573 | struct delayed_work update_stats_work; |
@@ -609,7 +610,7 @@ enum mlx5e_link_mode { | |||
609 | MLX5E_100GBASE_KR4 = 22, | 610 | MLX5E_100GBASE_KR4 = 22, |
610 | MLX5E_100GBASE_LR4 = 23, | 611 | MLX5E_100GBASE_LR4 = 23, |
611 | MLX5E_100BASE_TX = 24, | 612 | MLX5E_100BASE_TX = 24, |
612 | MLX5E_100BASE_T = 25, | 613 | MLX5E_1000BASE_T = 25, |
613 | MLX5E_10GBASE_T = 26, | 614 | MLX5E_10GBASE_T = 26, |
614 | MLX5E_25GBASE_CR = 27, | 615 | MLX5E_25GBASE_CR = 27, |
615 | MLX5E_25GBASE_KR = 28, | 616 | MLX5E_25GBASE_KR = 28, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 68834b715f6c..3476ab844634 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -138,10 +138,10 @@ static const struct { | |||
138 | [MLX5E_100BASE_TX] = { | 138 | [MLX5E_100BASE_TX] = { |
139 | .speed = 100, | 139 | .speed = 100, |
140 | }, | 140 | }, |
141 | [MLX5E_100BASE_T] = { | 141 | [MLX5E_1000BASE_T] = { |
142 | .supported = SUPPORTED_100baseT_Full, | 142 | .supported = SUPPORTED_1000baseT_Full, |
143 | .advertised = ADVERTISED_100baseT_Full, | 143 | .advertised = ADVERTISED_1000baseT_Full, |
144 | .speed = 100, | 144 | .speed = 1000, |
145 | }, | 145 | }, |
146 | [MLX5E_10GBASE_T] = { | 146 | [MLX5E_10GBASE_T] = { |
147 | .supported = SUPPORTED_10000baseT_Full, | 147 | .supported = SUPPORTED_10000baseT_Full, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e0adb604f461..d4dfc5ce516a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -262,9 +262,8 @@ static void mlx5e_update_stats_work(struct work_struct *work) | |||
262 | mutex_lock(&priv->state_lock); | 262 | mutex_lock(&priv->state_lock); |
263 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { | 263 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { |
264 | mlx5e_update_stats(priv); | 264 | mlx5e_update_stats(priv); |
265 | schedule_delayed_work(dwork, | 265 | queue_delayed_work(priv->wq, dwork, |
266 | msecs_to_jiffies( | 266 | msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); |
267 | MLX5E_UPDATE_STATS_INTERVAL)); | ||
268 | } | 267 | } |
269 | mutex_unlock(&priv->state_lock); | 268 | mutex_unlock(&priv->state_lock); |
270 | } | 269 | } |
@@ -280,7 +279,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, | |||
280 | switch (event) { | 279 | switch (event) { |
281 | case MLX5_DEV_EVENT_PORT_UP: | 280 | case MLX5_DEV_EVENT_PORT_UP: |
282 | case MLX5_DEV_EVENT_PORT_DOWN: | 281 | case MLX5_DEV_EVENT_PORT_DOWN: |
283 | schedule_work(&priv->update_carrier_work); | 282 | queue_work(priv->wq, &priv->update_carrier_work); |
284 | break; | 283 | break; |
285 | 284 | ||
286 | default: | 285 | default: |
@@ -1404,24 +1403,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) | |||
1404 | return 0; | 1403 | return 0; |
1405 | } | 1404 | } |
1406 | 1405 | ||
1407 | static int mlx5e_set_dev_port_mtu(struct net_device *netdev) | 1406 | static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) |
1408 | { | 1407 | { |
1409 | struct mlx5e_priv *priv = netdev_priv(netdev); | ||
1410 | struct mlx5_core_dev *mdev = priv->mdev; | 1408 | struct mlx5_core_dev *mdev = priv->mdev; |
1411 | int hw_mtu; | 1409 | u16 hw_mtu = MLX5E_SW2HW_MTU(mtu); |
1412 | int err; | 1410 | int err; |
1413 | 1411 | ||
1414 | err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); | 1412 | err = mlx5_set_port_mtu(mdev, hw_mtu, 1); |
1415 | if (err) | 1413 | if (err) |
1416 | return err; | 1414 | return err; |
1417 | 1415 | ||
1418 | mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); | 1416 | /* Update vport context MTU */ |
1417 | mlx5_modify_nic_vport_mtu(mdev, hw_mtu); | ||
1418 | return 0; | ||
1419 | } | ||
1420 | |||
1421 | static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) | ||
1422 | { | ||
1423 | struct mlx5_core_dev *mdev = priv->mdev; | ||
1424 | u16 hw_mtu = 0; | ||
1425 | int err; | ||
1419 | 1426 | ||
1420 | if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) | 1427 | err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu); |
1421 | netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", | 1428 | if (err || !hw_mtu) /* fallback to port oper mtu */ |
1422 | __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); | 1429 | mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); |
1430 | |||
1431 | *mtu = MLX5E_HW2SW_MTU(hw_mtu); | ||
1432 | } | ||
1433 | |||
1434 | static int mlx5e_set_dev_port_mtu(struct net_device *netdev) | ||
1435 | { | ||
1436 | struct mlx5e_priv *priv = netdev_priv(netdev); | ||
1437 | u16 mtu; | ||
1438 | int err; | ||
1423 | 1439 | ||
1424 | netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); | 1440 | err = mlx5e_set_mtu(priv, netdev->mtu); |
1441 | if (err) | ||
1442 | return err; | ||
1443 | |||
1444 | mlx5e_query_mtu(priv, &mtu); | ||
1445 | if (mtu != netdev->mtu) | ||
1446 | netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", | ||
1447 | __func__, mtu, netdev->mtu); | ||
1448 | |||
1449 | netdev->mtu = mtu; | ||
1425 | return 0; | 1450 | return 0; |
1426 | } | 1451 | } |
1427 | 1452 | ||
@@ -1479,7 +1504,7 @@ int mlx5e_open_locked(struct net_device *netdev) | |||
1479 | mlx5e_update_carrier(priv); | 1504 | mlx5e_update_carrier(priv); |
1480 | mlx5e_timestamp_init(priv); | 1505 | mlx5e_timestamp_init(priv); |
1481 | 1506 | ||
1482 | schedule_delayed_work(&priv->update_stats_work, 0); | 1507 | queue_delayed_work(priv->wq, &priv->update_stats_work, 0); |
1483 | 1508 | ||
1484 | return 0; | 1509 | return 0; |
1485 | 1510 | ||
@@ -1935,7 +1960,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev) | |||
1935 | { | 1960 | { |
1936 | struct mlx5e_priv *priv = netdev_priv(dev); | 1961 | struct mlx5e_priv *priv = netdev_priv(dev); |
1937 | 1962 | ||
1938 | schedule_work(&priv->set_rx_mode_work); | 1963 | queue_work(priv->wq, &priv->set_rx_mode_work); |
1939 | } | 1964 | } |
1940 | 1965 | ||
1941 | static int mlx5e_set_mac(struct net_device *netdev, void *addr) | 1966 | static int mlx5e_set_mac(struct net_device *netdev, void *addr) |
@@ -1950,7 +1975,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr) | |||
1950 | ether_addr_copy(netdev->dev_addr, saddr->sa_data); | 1975 | ether_addr_copy(netdev->dev_addr, saddr->sa_data); |
1951 | netif_addr_unlock_bh(netdev); | 1976 | netif_addr_unlock_bh(netdev); |
1952 | 1977 | ||
1953 | schedule_work(&priv->set_rx_mode_work); | 1978 | queue_work(priv->wq, &priv->set_rx_mode_work); |
1954 | 1979 | ||
1955 | return 0; | 1980 | return 0; |
1956 | } | 1981 | } |
@@ -1999,22 +2024,27 @@ static int mlx5e_set_features(struct net_device *netdev, | |||
1999 | return err; | 2024 | return err; |
2000 | } | 2025 | } |
2001 | 2026 | ||
2027 | #define MXL5_HW_MIN_MTU 64 | ||
2028 | #define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN) | ||
2029 | |||
2002 | static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) | 2030 | static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) |
2003 | { | 2031 | { |
2004 | struct mlx5e_priv *priv = netdev_priv(netdev); | 2032 | struct mlx5e_priv *priv = netdev_priv(netdev); |
2005 | struct mlx5_core_dev *mdev = priv->mdev; | 2033 | struct mlx5_core_dev *mdev = priv->mdev; |
2006 | bool was_opened; | 2034 | bool was_opened; |
2007 | int max_mtu; | 2035 | u16 max_mtu; |
2036 | u16 min_mtu; | ||
2008 | int err = 0; | 2037 | int err = 0; |
2009 | 2038 | ||
2010 | mlx5_query_port_max_mtu(mdev, &max_mtu, 1); | 2039 | mlx5_query_port_max_mtu(mdev, &max_mtu, 1); |
2011 | 2040 | ||
2012 | max_mtu = MLX5E_HW2SW_MTU(max_mtu); | 2041 | max_mtu = MLX5E_HW2SW_MTU(max_mtu); |
2042 | min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU); | ||
2013 | 2043 | ||
2014 | if (new_mtu > max_mtu) { | 2044 | if (new_mtu > max_mtu || new_mtu < min_mtu) { |
2015 | netdev_err(netdev, | 2045 | netdev_err(netdev, |
2016 | "%s: Bad MTU (%d) > (%d) Max\n", | 2046 | "%s: Bad MTU (%d), valid range is: [%d..%d]\n", |
2017 | __func__, new_mtu, max_mtu); | 2047 | __func__, new_mtu, min_mtu, max_mtu); |
2018 | return -EINVAL; | 2048 | return -EINVAL; |
2019 | } | 2049 | } |
2020 | 2050 | ||
@@ -2127,7 +2157,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev, | |||
2127 | if (!mlx5e_vxlan_allowed(priv->mdev)) | 2157 | if (!mlx5e_vxlan_allowed(priv->mdev)) |
2128 | return; | 2158 | return; |
2129 | 2159 | ||
2130 | mlx5e_vxlan_add_port(priv, be16_to_cpu(port)); | 2160 | mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1); |
2131 | } | 2161 | } |
2132 | 2162 | ||
2133 | static void mlx5e_del_vxlan_port(struct net_device *netdev, | 2163 | static void mlx5e_del_vxlan_port(struct net_device *netdev, |
@@ -2138,7 +2168,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, | |||
2138 | if (!mlx5e_vxlan_allowed(priv->mdev)) | 2168 | if (!mlx5e_vxlan_allowed(priv->mdev)) |
2139 | return; | 2169 | return; |
2140 | 2170 | ||
2141 | mlx5e_vxlan_del_port(priv, be16_to_cpu(port)); | 2171 | mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0); |
2142 | } | 2172 | } |
2143 | 2173 | ||
2144 | static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, | 2174 | static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, |
@@ -2467,10 +2497,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) | |||
2467 | 2497 | ||
2468 | priv = netdev_priv(netdev); | 2498 | priv = netdev_priv(netdev); |
2469 | 2499 | ||
2500 | priv->wq = create_singlethread_workqueue("mlx5e"); | ||
2501 | if (!priv->wq) | ||
2502 | goto err_free_netdev; | ||
2503 | |||
2470 | err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); | 2504 | err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); |
2471 | if (err) { | 2505 | if (err) { |
2472 | mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); | 2506 | mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); |
2473 | goto err_free_netdev; | 2507 | goto err_destroy_wq; |
2474 | } | 2508 | } |
2475 | 2509 | ||
2476 | err = mlx5_core_alloc_pd(mdev, &priv->pdn); | 2510 | err = mlx5_core_alloc_pd(mdev, &priv->pdn); |
@@ -2549,7 +2583,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) | |||
2549 | vxlan_get_rx_port(netdev); | 2583 | vxlan_get_rx_port(netdev); |
2550 | 2584 | ||
2551 | mlx5e_enable_async_events(priv); | 2585 | mlx5e_enable_async_events(priv); |
2552 | schedule_work(&priv->set_rx_mode_work); | 2586 | queue_work(priv->wq, &priv->set_rx_mode_work); |
2553 | 2587 | ||
2554 | return priv; | 2588 | return priv; |
2555 | 2589 | ||
@@ -2586,6 +2620,9 @@ err_dealloc_pd: | |||
2586 | err_unmap_free_uar: | 2620 | err_unmap_free_uar: |
2587 | mlx5_unmap_free_uar(mdev, &priv->cq_uar); | 2621 | mlx5_unmap_free_uar(mdev, &priv->cq_uar); |
2588 | 2622 | ||
2623 | err_destroy_wq: | ||
2624 | destroy_workqueue(priv->wq); | ||
2625 | |||
2589 | err_free_netdev: | 2626 | err_free_netdev: |
2590 | free_netdev(netdev); | 2627 | free_netdev(netdev); |
2591 | 2628 | ||
@@ -2599,10 +2636,19 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) | |||
2599 | 2636 | ||
2600 | set_bit(MLX5E_STATE_DESTROYING, &priv->state); | 2637 | set_bit(MLX5E_STATE_DESTROYING, &priv->state); |
2601 | 2638 | ||
2602 | schedule_work(&priv->set_rx_mode_work); | 2639 | queue_work(priv->wq, &priv->set_rx_mode_work); |
2603 | mlx5e_disable_async_events(priv); | 2640 | mlx5e_disable_async_events(priv); |
2604 | flush_scheduled_work(); | 2641 | flush_workqueue(priv->wq); |
2605 | unregister_netdev(netdev); | 2642 | if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { |
2643 | netif_device_detach(netdev); | ||
2644 | mutex_lock(&priv->state_lock); | ||
2645 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) | ||
2646 | mlx5e_close_locked(netdev); | ||
2647 | mutex_unlock(&priv->state_lock); | ||
2648 | } else { | ||
2649 | unregister_netdev(netdev); | ||
2650 | } | ||
2651 | |||
2606 | mlx5e_tc_cleanup(priv); | 2652 | mlx5e_tc_cleanup(priv); |
2607 | mlx5e_vxlan_cleanup(priv); | 2653 | mlx5e_vxlan_cleanup(priv); |
2608 | mlx5e_destroy_flow_tables(priv); | 2654 | mlx5e_destroy_flow_tables(priv); |
@@ -2615,7 +2661,11 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) | |||
2615 | mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); | 2661 | mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); |
2616 | mlx5_core_dealloc_pd(priv->mdev, priv->pdn); | 2662 | mlx5_core_dealloc_pd(priv->mdev, priv->pdn); |
2617 | mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); | 2663 | mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); |
2618 | free_netdev(netdev); | 2664 | cancel_delayed_work_sync(&priv->update_stats_work); |
2665 | destroy_workqueue(priv->wq); | ||
2666 | |||
2667 | if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) | ||
2668 | free_netdev(netdev); | ||
2619 | } | 2669 | } |
2620 | 2670 | ||
2621 | static void *mlx5e_get_netdev(void *vpriv) | 2671 | static void *mlx5e_get_netdev(void *vpriv) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 5121be4675d1..89cce97d46c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -1065,33 +1065,6 @@ unlock_fg: | |||
1065 | return rule; | 1065 | return rule; |
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft, | ||
1069 | u8 match_criteria_enable, | ||
1070 | u32 *match_criteria, | ||
1071 | u32 *match_value, | ||
1072 | u8 action, | ||
1073 | u32 flow_tag, | ||
1074 | struct mlx5_flow_destination *dest) | ||
1075 | { | ||
1076 | struct mlx5_flow_rule *rule; | ||
1077 | struct mlx5_flow_group *g; | ||
1078 | |||
1079 | g = create_autogroup(ft, match_criteria_enable, match_criteria); | ||
1080 | if (IS_ERR(g)) | ||
1081 | return (void *)g; | ||
1082 | |||
1083 | rule = add_rule_fg(g, match_value, | ||
1084 | action, flow_tag, dest); | ||
1085 | if (IS_ERR(rule)) { | ||
1086 | /* Remove assumes refcount > 0 and autogroup creates a group | ||
1087 | * with a refcount = 0. | ||
1088 | */ | ||
1089 | tree_get_node(&g->node); | ||
1090 | tree_remove_node(&g->node); | ||
1091 | } | ||
1092 | return rule; | ||
1093 | } | ||
1094 | |||
1095 | static struct mlx5_flow_rule * | 1068 | static struct mlx5_flow_rule * |
1096 | _mlx5_add_flow_rule(struct mlx5_flow_table *ft, | 1069 | _mlx5_add_flow_rule(struct mlx5_flow_table *ft, |
1097 | u8 match_criteria_enable, | 1070 | u8 match_criteria_enable, |
@@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, | |||
1119 | goto unlock; | 1092 | goto unlock; |
1120 | } | 1093 | } |
1121 | 1094 | ||
1122 | rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, | 1095 | g = create_autogroup(ft, match_criteria_enable, match_criteria); |
1123 | match_value, action, flow_tag, dest); | 1096 | if (IS_ERR(g)) { |
1097 | rule = (void *)g; | ||
1098 | goto unlock; | ||
1099 | } | ||
1100 | |||
1101 | rule = add_rule_fg(g, match_value, | ||
1102 | action, flow_tag, dest); | ||
1103 | if (IS_ERR(rule)) { | ||
1104 | /* Remove assumes refcount > 0 and autogroup creates a group | ||
1105 | * with a refcount = 0. | ||
1106 | */ | ||
1107 | unlock_ref_node(&ft->node); | ||
1108 | tree_get_node(&g->node); | ||
1109 | tree_remove_node(&g->node); | ||
1110 | return rule; | ||
1111 | } | ||
1124 | unlock: | 1112 | unlock: |
1125 | unlock_ref_node(&ft->node); | 1113 | unlock_ref_node(&ft->node); |
1126 | return rule; | 1114 | return rule; |
@@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, | |||
1288 | { | 1276 | { |
1289 | struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; | 1277 | struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; |
1290 | int prio; | 1278 | int prio; |
1291 | static struct fs_prio *fs_prio; | 1279 | struct fs_prio *fs_prio; |
1292 | struct mlx5_flow_namespace *ns; | 1280 | struct mlx5_flow_namespace *ns; |
1293 | 1281 | ||
1294 | if (!root_ns) | 1282 | if (!root_ns) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3f3b2fae4991..6892746fd10d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | |||
966 | int err; | 966 | int err; |
967 | 967 | ||
968 | mutex_lock(&dev->intf_state_mutex); | 968 | mutex_lock(&dev->intf_state_mutex); |
969 | if (dev->interface_state == MLX5_INTERFACE_STATE_UP) { | 969 | if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { |
970 | dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", | 970 | dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", |
971 | __func__); | 971 | __func__); |
972 | goto out; | 972 | goto out; |
@@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | |||
1133 | if (err) | 1133 | if (err) |
1134 | pr_info("failed request module on %s\n", MLX5_IB_MOD); | 1134 | pr_info("failed request module on %s\n", MLX5_IB_MOD); |
1135 | 1135 | ||
1136 | dev->interface_state = MLX5_INTERFACE_STATE_UP; | 1136 | clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); |
1137 | set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); | ||
1137 | out: | 1138 | out: |
1138 | mutex_unlock(&dev->intf_state_mutex); | 1139 | mutex_unlock(&dev->intf_state_mutex); |
1139 | 1140 | ||
@@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | |||
1207 | } | 1208 | } |
1208 | 1209 | ||
1209 | mutex_lock(&dev->intf_state_mutex); | 1210 | mutex_lock(&dev->intf_state_mutex); |
1210 | if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { | 1211 | if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { |
1211 | dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", | 1212 | dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", |
1212 | __func__); | 1213 | __func__); |
1213 | goto out; | 1214 | goto out; |
@@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | |||
1241 | mlx5_cmd_cleanup(dev); | 1242 | mlx5_cmd_cleanup(dev); |
1242 | 1243 | ||
1243 | out: | 1244 | out: |
1244 | dev->interface_state = MLX5_INTERFACE_STATE_DOWN; | 1245 | clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); |
1246 | set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); | ||
1245 | mutex_unlock(&dev->intf_state_mutex); | 1247 | mutex_unlock(&dev->intf_state_mutex); |
1246 | return err; | 1248 | return err; |
1247 | } | 1249 | } |
@@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = { | |||
1452 | .resume = mlx5_pci_resume | 1454 | .resume = mlx5_pci_resume |
1453 | }; | 1455 | }; |
1454 | 1456 | ||
1457 | static void shutdown(struct pci_dev *pdev) | ||
1458 | { | ||
1459 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); | ||
1460 | struct mlx5_priv *priv = &dev->priv; | ||
1461 | |||
1462 | dev_info(&pdev->dev, "Shutdown was called\n"); | ||
1463 | /* Notify mlx5 clients that the kernel is being shut down */ | ||
1464 | set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); | ||
1465 | mlx5_unload_one(dev, priv); | ||
1466 | mlx5_pci_disable_device(dev); | ||
1467 | } | ||
1468 | |||
1455 | static const struct pci_device_id mlx5_core_pci_table[] = { | 1469 | static const struct pci_device_id mlx5_core_pci_table[] = { |
1456 | { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ | 1470 | { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ |
1457 | { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ | 1471 | { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ |
@@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = { | |||
1459 | { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ | 1473 | { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ |
1460 | { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ | 1474 | { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ |
1461 | { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ | 1475 | { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ |
1476 | { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */ | ||
1477 | { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ | ||
1462 | { 0, } | 1478 | { 0, } |
1463 | }; | 1479 | }; |
1464 | 1480 | ||
@@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = { | |||
1469 | .id_table = mlx5_core_pci_table, | 1485 | .id_table = mlx5_core_pci_table, |
1470 | .probe = init_one, | 1486 | .probe = init_one, |
1471 | .remove = remove_one, | 1487 | .remove = remove_one, |
1488 | .shutdown = shutdown, | ||
1472 | .err_handler = &mlx5_err_handler, | 1489 | .err_handler = &mlx5_err_handler, |
1473 | .sriov_configure = mlx5_core_sriov_configure, | 1490 | .sriov_configure = mlx5_core_sriov_configure, |
1474 | }; | 1491 | }; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index ae378c575deb..53cc1e2c693b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c | |||
@@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, | |||
247 | } | 247 | } |
248 | EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); | 248 | EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); |
249 | 249 | ||
250 | static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, | 250 | static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, |
251 | int *max_mtu, int *oper_mtu, u8 port) | 251 | u16 *max_mtu, u16 *oper_mtu, u8 port) |
252 | { | 252 | { |
253 | u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; | 253 | u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; |
254 | u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; | 254 | u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; |
@@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, | |||
268 | *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); | 268 | *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); |
269 | } | 269 | } |
270 | 270 | ||
271 | int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) | 271 | int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port) |
272 | { | 272 | { |
273 | u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; | 273 | u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; |
274 | u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; | 274 | u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; |
@@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) | |||
283 | } | 283 | } |
284 | EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); | 284 | EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); |
285 | 285 | ||
286 | void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, | 286 | void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, |
287 | u8 port) | 287 | u8 port) |
288 | { | 288 | { |
289 | mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); | 289 | mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); |
290 | } | 290 | } |
291 | EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); | 291 | EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); |
292 | 292 | ||
293 | void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, | 293 | void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, |
294 | u8 port) | 294 | u8 port) |
295 | { | 295 | { |
296 | mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port); | 296 | mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 8ba080e441a1..5ff8af472bf5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c | |||
@@ -269,8 +269,10 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar); | |||
269 | 269 | ||
270 | void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) | 270 | void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) |
271 | { | 271 | { |
272 | iounmap(uar->map); | 272 | if (uar->map) |
273 | iounmap(uar->bf_map); | 273 | iounmap(uar->map); |
274 | else | ||
275 | iounmap(uar->bf_map); | ||
274 | mlx5_cmd_free_uar(mdev, uar->index); | 276 | mlx5_cmd_free_uar(mdev, uar->index); |
275 | } | 277 | } |
276 | EXPORT_SYMBOL(mlx5_unmap_free_uar); | 278 | EXPORT_SYMBOL(mlx5_unmap_free_uar); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index bd518405859e..b69dadcfb897 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c | |||
@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, | |||
196 | } | 196 | } |
197 | EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); | 197 | EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); |
198 | 198 | ||
199 | int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu) | ||
200 | { | ||
201 | int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); | ||
202 | u32 *out; | ||
203 | int err; | ||
204 | |||
205 | out = mlx5_vzalloc(outlen); | ||
206 | if (!out) | ||
207 | return -ENOMEM; | ||
208 | |||
209 | err = mlx5_query_nic_vport_context(mdev, 0, out, outlen); | ||
210 | if (!err) | ||
211 | *mtu = MLX5_GET(query_nic_vport_context_out, out, | ||
212 | nic_vport_context.mtu); | ||
213 | |||
214 | kvfree(out); | ||
215 | return err; | ||
216 | } | ||
217 | EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu); | ||
218 | |||
219 | int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) | ||
220 | { | ||
221 | int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); | ||
222 | void *in; | ||
223 | int err; | ||
224 | |||
225 | in = mlx5_vzalloc(inlen); | ||
226 | if (!in) | ||
227 | return -ENOMEM; | ||
228 | |||
229 | MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1); | ||
230 | MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu); | ||
231 | |||
232 | err = mlx5_modify_nic_vport_context(mdev, in, inlen); | ||
233 | |||
234 | kvfree(in); | ||
235 | return err; | ||
236 | } | ||
237 | EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu); | ||
238 | |||
199 | int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, | 239 | int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, |
200 | u32 vport, | 240 | u32 vport, |
201 | enum mlx5_list_type list_type, | 241 | enum mlx5_list_type list_type, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 9f10df25f3cd..f2fd1ef16da7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c | |||
@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) | |||
95 | return vxlan; | 95 | return vxlan; |
96 | } | 96 | } |
97 | 97 | ||
98 | int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) | 98 | static void mlx5e_vxlan_add_port(struct work_struct *work) |
99 | { | 99 | { |
100 | struct mlx5e_vxlan_work *vxlan_work = | ||
101 | container_of(work, struct mlx5e_vxlan_work, work); | ||
102 | struct mlx5e_priv *priv = vxlan_work->priv; | ||
100 | struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; | 103 | struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; |
104 | u16 port = vxlan_work->port; | ||
101 | struct mlx5e_vxlan *vxlan; | 105 | struct mlx5e_vxlan *vxlan; |
102 | int err; | 106 | int err; |
103 | 107 | ||
104 | err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port); | 108 | if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) |
105 | if (err) | 109 | goto free_work; |
106 | return err; | ||
107 | 110 | ||
108 | vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); | 111 | vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); |
109 | if (!vxlan) { | 112 | if (!vxlan) |
110 | err = -ENOMEM; | ||
111 | goto err_delete_port; | 113 | goto err_delete_port; |
112 | } | ||
113 | 114 | ||
114 | vxlan->udp_port = port; | 115 | vxlan->udp_port = port; |
115 | 116 | ||
@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) | |||
119 | if (err) | 120 | if (err) |
120 | goto err_free; | 121 | goto err_free; |
121 | 122 | ||
122 | return 0; | 123 | goto free_work; |
123 | 124 | ||
124 | err_free: | 125 | err_free: |
125 | kfree(vxlan); | 126 | kfree(vxlan); |
126 | err_delete_port: | 127 | err_delete_port: |
127 | mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); | 128 | mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); |
128 | return err; | 129 | free_work: |
130 | kfree(vxlan_work); | ||
129 | } | 131 | } |
130 | 132 | ||
131 | static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) | 133 | static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) |
@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) | |||
145 | kfree(vxlan); | 147 | kfree(vxlan); |
146 | } | 148 | } |
147 | 149 | ||
148 | void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) | 150 | static void mlx5e_vxlan_del_port(struct work_struct *work) |
149 | { | 151 | { |
150 | if (!mlx5e_vxlan_lookup_port(priv, port)) | 152 | struct mlx5e_vxlan_work *vxlan_work = |
151 | return; | 153 | container_of(work, struct mlx5e_vxlan_work, work); |
154 | struct mlx5e_priv *priv = vxlan_work->priv; | ||
155 | u16 port = vxlan_work->port; | ||
152 | 156 | ||
153 | __mlx5e_vxlan_core_del_port(priv, port); | 157 | __mlx5e_vxlan_core_del_port(priv, port); |
158 | |||
159 | kfree(vxlan_work); | ||
160 | } | ||
161 | |||
162 | void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, | ||
163 | u16 port, int add) | ||
164 | { | ||
165 | struct mlx5e_vxlan_work *vxlan_work; | ||
166 | |||
167 | vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC); | ||
168 | if (!vxlan_work) | ||
169 | return; | ||
170 | |||
171 | if (add) | ||
172 | INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port); | ||
173 | else | ||
174 | INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port); | ||
175 | |||
176 | vxlan_work->priv = priv; | ||
177 | vxlan_work->port = port; | ||
178 | vxlan_work->sa_family = sa_family; | ||
179 | queue_work(priv->wq, &vxlan_work->work); | ||
154 | } | 180 | } |
155 | 181 | ||
156 | void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) | 182 | void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index a01685056ab1..129f3527aa14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h | |||
@@ -39,6 +39,13 @@ struct mlx5e_vxlan { | |||
39 | u16 udp_port; | 39 | u16 udp_port; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | struct mlx5e_vxlan_work { | ||
43 | struct work_struct work; | ||
44 | struct mlx5e_priv *priv; | ||
45 | sa_family_t sa_family; | ||
46 | u16 port; | ||
47 | }; | ||
48 | |||
42 | static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) | 49 | static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) |
43 | { | 50 | { |
44 | return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && | 51 | return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && |
@@ -46,8 +53,8 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) | |||
46 | } | 53 | } |
47 | 54 | ||
48 | void mlx5e_vxlan_init(struct mlx5e_priv *priv); | 55 | void mlx5e_vxlan_init(struct mlx5e_priv *priv); |
49 | int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); | 56 | void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, |
50 | void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); | 57 | u16 port, int add); |
51 | struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); | 58 | struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); |
52 | void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); | 59 | void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); |
53 | 60 | ||
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 270c9eeb7ab6..6d1a956e3f77 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c | |||
@@ -2668,9 +2668,9 @@ static int myri10ge_close(struct net_device *dev) | |||
2668 | 2668 | ||
2669 | del_timer_sync(&mgp->watchdog_timer); | 2669 | del_timer_sync(&mgp->watchdog_timer); |
2670 | mgp->running = MYRI10GE_ETH_STOPPING; | 2670 | mgp->running = MYRI10GE_ETH_STOPPING; |
2671 | local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */ | ||
2672 | for (i = 0; i < mgp->num_slices; i++) { | 2671 | for (i = 0; i < mgp->num_slices; i++) { |
2673 | napi_disable(&mgp->ss[i].napi); | 2672 | napi_disable(&mgp->ss[i].napi); |
2673 | local_bh_disable(); /* myri10ge_ss_lock_napi needs this */ | ||
2674 | /* Lock the slice to prevent the busy_poll handler from | 2674 | /* Lock the slice to prevent the busy_poll handler from |
2675 | * accessing it. Later when we bring the NIC up, myri10ge_open | 2675 | * accessing it. Later when we bring the NIC up, myri10ge_open |
2676 | * resets the slice including this lock. | 2676 | * resets the slice including this lock. |
@@ -2679,8 +2679,8 @@ static int myri10ge_close(struct net_device *dev) | |||
2679 | pr_info("Slice %d locked\n", i); | 2679 | pr_info("Slice %d locked\n", i); |
2680 | mdelay(1); | 2680 | mdelay(1); |
2681 | } | 2681 | } |
2682 | local_bh_enable(); | ||
2682 | } | 2683 | } |
2683 | local_bh_enable(); | ||
2684 | netif_carrier_off(dev); | 2684 | netif_carrier_off(dev); |
2685 | 2685 | ||
2686 | netif_tx_stop_all_queues(dev); | 2686 | netif_tx_stop_all_queues(dev); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 55007f1e6bbc..caf6ddb7ea76 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -37,8 +37,8 @@ | |||
37 | 37 | ||
38 | #define _QLCNIC_LINUX_MAJOR 5 | 38 | #define _QLCNIC_LINUX_MAJOR 5 |
39 | #define _QLCNIC_LINUX_MINOR 3 | 39 | #define _QLCNIC_LINUX_MINOR 3 |
40 | #define _QLCNIC_LINUX_SUBVERSION 63 | 40 | #define _QLCNIC_LINUX_SUBVERSION 64 |
41 | #define QLCNIC_LINUX_VERSIONID "5.3.63" | 41 | #define QLCNIC_LINUX_VERSIONID "5.3.64" |
42 | #define QLCNIC_DRV_IDC_VER 0x01 | 42 | #define QLCNIC_DRV_IDC_VER 0x01 |
43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ | 43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ |
44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) | 44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 98d33d462c6c..1681084cc96f 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -1920,6 +1920,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, | |||
1920 | return 0; | 1920 | return 0; |
1921 | } | 1921 | } |
1922 | 1922 | ||
1923 | if (nic_data->datapath_caps & | ||
1924 | 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN) | ||
1925 | return -EOPNOTSUPP; | ||
1926 | |||
1923 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, | 1927 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, |
1924 | nic_data->vport_id); | 1928 | nic_data->vport_id); |
1925 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); | 1929 | MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); |
@@ -2923,9 +2927,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, | |||
2923 | bool replacing) | 2927 | bool replacing) |
2924 | { | 2928 | { |
2925 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | 2929 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
2930 | u32 flags = spec->flags; | ||
2926 | 2931 | ||
2927 | memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); | 2932 | memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); |
2928 | 2933 | ||
2934 | /* Remove RSS flag if we don't have an RSS context. */ | ||
2935 | if (flags & EFX_FILTER_FLAG_RX_RSS && | ||
2936 | spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT && | ||
2937 | nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) | ||
2938 | flags &= ~EFX_FILTER_FLAG_RX_RSS; | ||
2939 | |||
2929 | if (replacing) { | 2940 | if (replacing) { |
2930 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, | 2941 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, |
2931 | MC_CMD_FILTER_OP_IN_OP_REPLACE); | 2942 | MC_CMD_FILTER_OP_IN_OP_REPLACE); |
@@ -2985,10 +2996,10 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, | |||
2985 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? | 2996 | spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? |
2986 | 0 : spec->dmaq_id); | 2997 | 0 : spec->dmaq_id); |
2987 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, | 2998 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, |
2988 | (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? | 2999 | (flags & EFX_FILTER_FLAG_RX_RSS) ? |
2989 | MC_CMD_FILTER_OP_IN_RX_MODE_RSS : | 3000 | MC_CMD_FILTER_OP_IN_RX_MODE_RSS : |
2990 | MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); | 3001 | MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); |
2991 | if (spec->flags & EFX_FILTER_FLAG_RX_RSS) | 3002 | if (flags & EFX_FILTER_FLAG_RX_RSS) |
2992 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, | 3003 | MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, |
2993 | spec->rss_context != | 3004 | spec->rss_context != |
2994 | EFX_FILTER_RSS_CONTEXT_DEFAULT ? | 3005 | EFX_FILTER_RSS_CONTEXT_DEFAULT ? |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 44022b1845ce..afb90d129cb6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c | |||
@@ -49,7 +49,6 @@ struct socfpga_dwmac { | |||
49 | u32 reg_shift; | 49 | u32 reg_shift; |
50 | struct device *dev; | 50 | struct device *dev; |
51 | struct regmap *sys_mgr_base_addr; | 51 | struct regmap *sys_mgr_base_addr; |
52 | struct reset_control *stmmac_rst; | ||
53 | void __iomem *splitter_base; | 52 | void __iomem *splitter_base; |
54 | bool f2h_ptp_ref_clk; | 53 | bool f2h_ptp_ref_clk; |
55 | }; | 54 | }; |
@@ -92,15 +91,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * | |||
92 | struct device_node *np_splitter; | 91 | struct device_node *np_splitter; |
93 | struct resource res_splitter; | 92 | struct resource res_splitter; |
94 | 93 | ||
95 | dwmac->stmmac_rst = devm_reset_control_get(dev, | ||
96 | STMMAC_RESOURCE_NAME); | ||
97 | if (IS_ERR(dwmac->stmmac_rst)) { | ||
98 | dev_info(dev, "Could not get reset control!\n"); | ||
99 | if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER) | ||
100 | return -EPROBE_DEFER; | ||
101 | dwmac->stmmac_rst = NULL; | ||
102 | } | ||
103 | |||
104 | dwmac->interface = of_get_phy_mode(np); | 94 | dwmac->interface = of_get_phy_mode(np); |
105 | 95 | ||
106 | sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon"); | 96 | sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon"); |
@@ -194,30 +184,23 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) | |||
194 | return 0; | 184 | return 0; |
195 | } | 185 | } |
196 | 186 | ||
197 | static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv) | ||
198 | { | ||
199 | struct socfpga_dwmac *dwmac = priv; | ||
200 | |||
201 | /* On socfpga platform exit, assert and hold reset to the | ||
202 | * enet controller - the default state after a hard reset. | ||
203 | */ | ||
204 | if (dwmac->stmmac_rst) | ||
205 | reset_control_assert(dwmac->stmmac_rst); | ||
206 | } | ||
207 | |||
208 | static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) | 187 | static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) |
209 | { | 188 | { |
210 | struct socfpga_dwmac *dwmac = priv; | 189 | struct socfpga_dwmac *dwmac = priv; |
211 | struct net_device *ndev = platform_get_drvdata(pdev); | 190 | struct net_device *ndev = platform_get_drvdata(pdev); |
212 | struct stmmac_priv *stpriv = NULL; | 191 | struct stmmac_priv *stpriv = NULL; |
213 | int ret = 0; | 192 | int ret = 0; |
214 | 193 | ||
215 | if (ndev) | 194 | if (!ndev) |
216 | stpriv = netdev_priv(ndev); | 195 | return -EINVAL; |
196 | |||
197 | stpriv = netdev_priv(ndev); | ||
198 | if (!stpriv) | ||
199 | return -EINVAL; | ||
217 | 200 | ||
218 | /* Assert reset to the enet controller before changing the phy mode */ | 201 | /* Assert reset to the enet controller before changing the phy mode */ |
219 | if (dwmac->stmmac_rst) | 202 | if (stpriv->stmmac_rst) |
220 | reset_control_assert(dwmac->stmmac_rst); | 203 | reset_control_assert(stpriv->stmmac_rst); |
221 | 204 | ||
222 | /* Setup the phy mode in the system manager registers according to | 205 | /* Setup the phy mode in the system manager registers according to |
223 | * devicetree configuration | 206 | * devicetree configuration |
@@ -227,8 +210,8 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) | |||
227 | /* Deassert reset for the phy configuration to be sampled by | 210 | /* Deassert reset for the phy configuration to be sampled by |
228 | * the enet controller, and operation to start in requested mode | 211 | * the enet controller, and operation to start in requested mode |
229 | */ | 212 | */ |
230 | if (dwmac->stmmac_rst) | 213 | if (stpriv->stmmac_rst) |
231 | reset_control_deassert(dwmac->stmmac_rst); | 214 | reset_control_deassert(stpriv->stmmac_rst); |
232 | 215 | ||
233 | /* Before the enet controller is suspended, the phy is suspended. | 216 | /* Before the enet controller is suspended, the phy is suspended. |
234 | * This causes the phy clock to be gated. The enet controller is | 217 | * This causes the phy clock to be gated. The enet controller is |
@@ -245,7 +228,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) | |||
245 | * control register 0, and can be modified by the phy driver | 228 | * control register 0, and can be modified by the phy driver |
246 | * framework. | 229 | * framework. |
247 | */ | 230 | */ |
248 | if (stpriv && stpriv->phydev) | 231 | if (stpriv->phydev) |
249 | phy_resume(stpriv->phydev); | 232 | phy_resume(stpriv->phydev); |
250 | 233 | ||
251 | return ret; | 234 | return ret; |
@@ -285,14 +268,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) | |||
285 | 268 | ||
286 | plat_dat->bsp_priv = dwmac; | 269 | plat_dat->bsp_priv = dwmac; |
287 | plat_dat->init = socfpga_dwmac_init; | 270 | plat_dat->init = socfpga_dwmac_init; |
288 | plat_dat->exit = socfpga_dwmac_exit; | ||
289 | plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; | 271 | plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; |
290 | 272 | ||
291 | ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv); | 273 | ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); |
292 | if (ret) | 274 | if (!ret) |
293 | return ret; | 275 | ret = socfpga_dwmac_init(pdev, dwmac); |
294 | 276 | ||
295 | return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); | 277 | return ret; |
296 | } | 278 | } |
297 | 279 | ||
298 | static const struct of_device_id socfpga_dwmac_match[] = { | 280 | static const struct of_device_id socfpga_dwmac_match[] = { |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index bbb77cd8ad67..e2fcdf1eec44 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -367,7 +367,6 @@ struct cpsw_priv { | |||
367 | spinlock_t lock; | 367 | spinlock_t lock; |
368 | struct platform_device *pdev; | 368 | struct platform_device *pdev; |
369 | struct net_device *ndev; | 369 | struct net_device *ndev; |
370 | struct device_node *phy_node; | ||
371 | struct napi_struct napi_rx; | 370 | struct napi_struct napi_rx; |
372 | struct napi_struct napi_tx; | 371 | struct napi_struct napi_tx; |
373 | struct device *dev; | 372 | struct device *dev; |
@@ -1148,25 +1147,34 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) | |||
1148 | cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, | 1147 | cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, |
1149 | 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); | 1148 | 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); |
1150 | 1149 | ||
1151 | if (priv->phy_node) | 1150 | if (slave->data->phy_node) { |
1152 | slave->phy = of_phy_connect(priv->ndev, priv->phy_node, | 1151 | slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node, |
1153 | &cpsw_adjust_link, 0, slave->data->phy_if); | 1152 | &cpsw_adjust_link, 0, slave->data->phy_if); |
1154 | else | 1153 | if (!slave->phy) { |
1154 | dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", | ||
1155 | slave->data->phy_node->full_name, | ||
1156 | slave->slave_num); | ||
1157 | return; | ||
1158 | } | ||
1159 | } else { | ||
1155 | slave->phy = phy_connect(priv->ndev, slave->data->phy_id, | 1160 | slave->phy = phy_connect(priv->ndev, slave->data->phy_id, |
1156 | &cpsw_adjust_link, slave->data->phy_if); | 1161 | &cpsw_adjust_link, slave->data->phy_if); |
1157 | if (IS_ERR(slave->phy)) { | 1162 | if (IS_ERR(slave->phy)) { |
1158 | dev_err(priv->dev, "phy %s not found on slave %d\n", | 1163 | dev_err(priv->dev, |
1159 | slave->data->phy_id, slave->slave_num); | 1164 | "phy \"%s\" not found on slave %d, err %ld\n", |
1160 | slave->phy = NULL; | 1165 | slave->data->phy_id, slave->slave_num, |
1161 | } else { | 1166 | PTR_ERR(slave->phy)); |
1162 | phy_attached_info(slave->phy); | 1167 | slave->phy = NULL; |
1168 | return; | ||
1169 | } | ||
1170 | } | ||
1163 | 1171 | ||
1164 | phy_start(slave->phy); | 1172 | phy_attached_info(slave->phy); |
1165 | 1173 | ||
1166 | /* Configure GMII_SEL register */ | 1174 | phy_start(slave->phy); |
1167 | cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, | 1175 | |
1168 | slave->slave_num); | 1176 | /* Configure GMII_SEL register */ |
1169 | } | 1177 | cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num); |
1170 | } | 1178 | } |
1171 | 1179 | ||
1172 | static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) | 1180 | static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) |
@@ -1940,12 +1948,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, | |||
1940 | slave->port_vlan = data->dual_emac_res_vlan; | 1948 | slave->port_vlan = data->dual_emac_res_vlan; |
1941 | } | 1949 | } |
1942 | 1950 | ||
1943 | static int cpsw_probe_dt(struct cpsw_priv *priv, | 1951 | static int cpsw_probe_dt(struct cpsw_platform_data *data, |
1944 | struct platform_device *pdev) | 1952 | struct platform_device *pdev) |
1945 | { | 1953 | { |
1946 | struct device_node *node = pdev->dev.of_node; | 1954 | struct device_node *node = pdev->dev.of_node; |
1947 | struct device_node *slave_node; | 1955 | struct device_node *slave_node; |
1948 | struct cpsw_platform_data *data = &priv->data; | ||
1949 | int i = 0, ret; | 1956 | int i = 0, ret; |
1950 | u32 prop; | 1957 | u32 prop; |
1951 | 1958 | ||
@@ -2033,25 +2040,21 @@ static int cpsw_probe_dt(struct cpsw_priv *priv, | |||
2033 | if (strcmp(slave_node->name, "slave")) | 2040 | if (strcmp(slave_node->name, "slave")) |
2034 | continue; | 2041 | continue; |
2035 | 2042 | ||
2036 | priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); | 2043 | slave_data->phy_node = of_parse_phandle(slave_node, |
2044 | "phy-handle", 0); | ||
2037 | parp = of_get_property(slave_node, "phy_id", &lenp); | 2045 | parp = of_get_property(slave_node, "phy_id", &lenp); |
2038 | if (of_phy_is_fixed_link(slave_node)) { | 2046 | if (slave_data->phy_node) { |
2039 | struct device_node *phy_node; | 2047 | dev_dbg(&pdev->dev, |
2040 | struct phy_device *phy_dev; | 2048 | "slave[%d] using phy-handle=\"%s\"\n", |
2041 | 2049 | i, slave_data->phy_node->full_name); | |
2050 | } else if (of_phy_is_fixed_link(slave_node)) { | ||
2042 | /* In the case of a fixed PHY, the DT node associated | 2051 | /* In the case of a fixed PHY, the DT node associated |
2043 | * to the PHY is the Ethernet MAC DT node. | 2052 | * to the PHY is the Ethernet MAC DT node. |
2044 | */ | 2053 | */ |
2045 | ret = of_phy_register_fixed_link(slave_node); | 2054 | ret = of_phy_register_fixed_link(slave_node); |
2046 | if (ret) | 2055 | if (ret) |
2047 | return ret; | 2056 | return ret; |
2048 | phy_node = of_node_get(slave_node); | 2057 | slave_data->phy_node = of_node_get(slave_node); |
2049 | phy_dev = of_phy_find_device(phy_node); | ||
2050 | if (!phy_dev) | ||
2051 | return -ENODEV; | ||
2052 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), | ||
2053 | PHY_ID_FMT, phy_dev->mdio.bus->id, | ||
2054 | phy_dev->mdio.addr); | ||
2055 | } else if (parp) { | 2058 | } else if (parp) { |
2056 | u32 phyid; | 2059 | u32 phyid; |
2057 | struct device_node *mdio_node; | 2060 | struct device_node *mdio_node; |
@@ -2072,7 +2075,9 @@ static int cpsw_probe_dt(struct cpsw_priv *priv, | |||
2072 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), | 2075 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), |
2073 | PHY_ID_FMT, mdio->name, phyid); | 2076 | PHY_ID_FMT, mdio->name, phyid); |
2074 | } else { | 2077 | } else { |
2075 | dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i); | 2078 | dev_err(&pdev->dev, |
2079 | "No slave[%d] phy_id, phy-handle, or fixed-link property\n", | ||
2080 | i); | ||
2076 | goto no_phy_slave; | 2081 | goto no_phy_slave; |
2077 | } | 2082 | } |
2078 | slave_data->phy_if = of_get_phy_mode(slave_node); | 2083 | slave_data->phy_if = of_get_phy_mode(slave_node); |
@@ -2275,7 +2280,7 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2275 | /* Select default pin state */ | 2280 | /* Select default pin state */ |
2276 | pinctrl_pm_select_default_state(&pdev->dev); | 2281 | pinctrl_pm_select_default_state(&pdev->dev); |
2277 | 2282 | ||
2278 | if (cpsw_probe_dt(priv, pdev)) { | 2283 | if (cpsw_probe_dt(&priv->data, pdev)) { |
2279 | dev_err(&pdev->dev, "cpsw: platform data missing\n"); | 2284 | dev_err(&pdev->dev, "cpsw: platform data missing\n"); |
2280 | ret = -ENODEV; | 2285 | ret = -ENODEV; |
2281 | goto clean_runtime_disable_ret; | 2286 | goto clean_runtime_disable_ret; |
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h index 442a7038e660..e50afd1b2eda 100644 --- a/drivers/net/ethernet/ti/cpsw.h +++ b/drivers/net/ethernet/ti/cpsw.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/phy.h> | 18 | #include <linux/phy.h> |
19 | 19 | ||
20 | struct cpsw_slave_data { | 20 | struct cpsw_slave_data { |
21 | struct device_node *phy_node; | ||
21 | char phy_id[MII_BUS_ID_SIZE]; | 22 | char phy_id[MII_BUS_ID_SIZE]; |
22 | int phy_if; | 23 | int phy_if; |
23 | u8 mac_addr[ETH_ALEN]; | 24 | u8 mac_addr[ETH_ALEN]; |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 58d58f002559..f56d66e6ec15 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) | |||
1512 | 1512 | ||
1513 | /* TODO: Add phy read and write and private statistics get feature */ | 1513 | /* TODO: Add phy read and write and private statistics get feature */ |
1514 | 1514 | ||
1515 | return phy_mii_ioctl(priv->phydev, ifrq, cmd); | 1515 | if (priv->phydev) |
1516 | return phy_mii_ioctl(priv->phydev, ifrq, cmd); | ||
1517 | else | ||
1518 | return -EOPNOTSUPP; | ||
1516 | } | 1519 | } |
1517 | 1520 | ||
1518 | static int match_first_device(struct device *dev, void *data) | 1521 | static int match_first_device(struct device *dev, void *data) |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 13214a6492ac..743b18266a7c 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c | |||
@@ -1622,7 +1622,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl) | |||
1622 | continue; | 1622 | continue; |
1623 | 1623 | ||
1624 | /* copy hw scan info */ | 1624 | /* copy hw scan info */ |
1625 | memcpy(target->hwinfo, scan_info, scan_info->size); | 1625 | memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size)); |
1626 | target->essid_len = strnlen(scan_info->essid, | 1626 | target->essid_len = strnlen(scan_info->essid, |
1627 | sizeof(scan_info->essid)); | 1627 | sizeof(scan_info->essid)); |
1628 | target->rate_len = 0; | 1628 | target->rate_len = 0; |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 84d3e5ca8817..c6385617bfb2 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
@@ -880,12 +880,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, | |||
880 | macsec_skb_cb(skb)->valid = false; | 880 | macsec_skb_cb(skb)->valid = false; |
881 | skb = skb_share_check(skb, GFP_ATOMIC); | 881 | skb = skb_share_check(skb, GFP_ATOMIC); |
882 | if (!skb) | 882 | if (!skb) |
883 | return NULL; | 883 | return ERR_PTR(-ENOMEM); |
884 | 884 | ||
885 | req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC); | 885 | req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC); |
886 | if (!req) { | 886 | if (!req) { |
887 | kfree_skb(skb); | 887 | kfree_skb(skb); |
888 | return NULL; | 888 | return ERR_PTR(-ENOMEM); |
889 | } | 889 | } |
890 | 890 | ||
891 | hdr = (struct macsec_eth_header *)skb->data; | 891 | hdr = (struct macsec_eth_header *)skb->data; |
@@ -905,7 +905,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, | |||
905 | skb = skb_unshare(skb, GFP_ATOMIC); | 905 | skb = skb_unshare(skb, GFP_ATOMIC); |
906 | if (!skb) { | 906 | if (!skb) { |
907 | aead_request_free(req); | 907 | aead_request_free(req); |
908 | return NULL; | 908 | return ERR_PTR(-ENOMEM); |
909 | } | 909 | } |
910 | } else { | 910 | } else { |
911 | /* integrity only: all headers + data authenticated */ | 911 | /* integrity only: all headers + data authenticated */ |
@@ -921,14 +921,14 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, | |||
921 | dev_hold(dev); | 921 | dev_hold(dev); |
922 | ret = crypto_aead_decrypt(req); | 922 | ret = crypto_aead_decrypt(req); |
923 | if (ret == -EINPROGRESS) { | 923 | if (ret == -EINPROGRESS) { |
924 | return NULL; | 924 | return ERR_PTR(ret); |
925 | } else if (ret != 0) { | 925 | } else if (ret != 0) { |
926 | /* decryption/authentication failed | 926 | /* decryption/authentication failed |
927 | * 10.6 if validateFrames is disabled, deliver anyway | 927 | * 10.6 if validateFrames is disabled, deliver anyway |
928 | */ | 928 | */ |
929 | if (ret != -EBADMSG) { | 929 | if (ret != -EBADMSG) { |
930 | kfree_skb(skb); | 930 | kfree_skb(skb); |
931 | skb = NULL; | 931 | skb = ERR_PTR(ret); |
932 | } | 932 | } |
933 | } else { | 933 | } else { |
934 | macsec_skb_cb(skb)->valid = true; | 934 | macsec_skb_cb(skb)->valid = true; |
@@ -1146,8 +1146,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) | |||
1146 | secy->validate_frames != MACSEC_VALIDATE_DISABLED) | 1146 | secy->validate_frames != MACSEC_VALIDATE_DISABLED) |
1147 | skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); | 1147 | skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); |
1148 | 1148 | ||
1149 | if (!skb) { | 1149 | if (IS_ERR(skb)) { |
1150 | macsec_rxsa_put(rx_sa); | 1150 | /* the decrypt callback needs the reference */ |
1151 | if (PTR_ERR(skb) != -EINPROGRESS) | ||
1152 | macsec_rxsa_put(rx_sa); | ||
1151 | rcu_read_unlock(); | 1153 | rcu_read_unlock(); |
1152 | *pskb = NULL; | 1154 | *pskb = NULL; |
1153 | return RX_HANDLER_CONSUMED; | 1155 | return RX_HANDLER_CONSUMED; |
@@ -1161,7 +1163,8 @@ deliver: | |||
1161 | macsec_extra_len(macsec_skb_cb(skb)->has_sci)); | 1163 | macsec_extra_len(macsec_skb_cb(skb)->has_sci)); |
1162 | macsec_reset_skb(skb, secy->netdev); | 1164 | macsec_reset_skb(skb, secy->netdev); |
1163 | 1165 | ||
1164 | macsec_rxsa_put(rx_sa); | 1166 | if (rx_sa) |
1167 | macsec_rxsa_put(rx_sa); | ||
1165 | count_rx(dev, skb->len); | 1168 | count_rx(dev, skb->len); |
1166 | 1169 | ||
1167 | rcu_read_unlock(); | 1170 | rcu_read_unlock(); |
@@ -1622,8 +1625,9 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) | |||
1622 | } | 1625 | } |
1623 | 1626 | ||
1624 | rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); | 1627 | rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); |
1625 | if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len, | 1628 | if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), |
1626 | secy->icv_len)) { | 1629 | secy->key_len, secy->icv_len)) { |
1630 | kfree(rx_sa); | ||
1627 | rtnl_unlock(); | 1631 | rtnl_unlock(); |
1628 | return -ENOMEM; | 1632 | return -ENOMEM; |
1629 | } | 1633 | } |
@@ -1768,6 +1772,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) | |||
1768 | tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); | 1772 | tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); |
1769 | if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), | 1773 | if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), |
1770 | secy->key_len, secy->icv_len)) { | 1774 | secy->key_len, secy->icv_len)) { |
1775 | kfree(tx_sa); | ||
1771 | rtnl_unlock(); | 1776 | rtnl_unlock(); |
1772 | return -ENOMEM; | 1777 | return -ENOMEM; |
1773 | } | 1778 | } |
@@ -2227,7 +2232,8 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) | |||
2227 | return 1; | 2232 | return 1; |
2228 | 2233 | ||
2229 | if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) || | 2234 | if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) || |
2230 | nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) || | 2235 | nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, |
2236 | MACSEC_DEFAULT_CIPHER_ID) || | ||
2231 | nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || | 2237 | nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || |
2232 | nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || | 2238 | nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || |
2233 | nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || | 2239 | nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || |
@@ -2268,7 +2274,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, | |||
2268 | if (!hdr) | 2274 | if (!hdr) |
2269 | return -EMSGSIZE; | 2275 | return -EMSGSIZE; |
2270 | 2276 | ||
2271 | rtnl_lock(); | 2277 | genl_dump_check_consistent(cb, hdr, &macsec_fam); |
2272 | 2278 | ||
2273 | if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) | 2279 | if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) |
2274 | goto nla_put_failure; | 2280 | goto nla_put_failure; |
@@ -2429,18 +2435,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev, | |||
2429 | 2435 | ||
2430 | nla_nest_end(skb, rxsc_list); | 2436 | nla_nest_end(skb, rxsc_list); |
2431 | 2437 | ||
2432 | rtnl_unlock(); | ||
2433 | |||
2434 | genlmsg_end(skb, hdr); | 2438 | genlmsg_end(skb, hdr); |
2435 | 2439 | ||
2436 | return 0; | 2440 | return 0; |
2437 | 2441 | ||
2438 | nla_put_failure: | 2442 | nla_put_failure: |
2439 | rtnl_unlock(); | ||
2440 | genlmsg_cancel(skb, hdr); | 2443 | genlmsg_cancel(skb, hdr); |
2441 | return -EMSGSIZE; | 2444 | return -EMSGSIZE; |
2442 | } | 2445 | } |
2443 | 2446 | ||
2447 | static int macsec_generation = 1; /* protected by RTNL */ | ||
2448 | |||
2444 | static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) | 2449 | static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) |
2445 | { | 2450 | { |
2446 | struct net *net = sock_net(skb->sk); | 2451 | struct net *net = sock_net(skb->sk); |
@@ -2450,6 +2455,10 @@ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) | |||
2450 | dev_idx = cb->args[0]; | 2455 | dev_idx = cb->args[0]; |
2451 | 2456 | ||
2452 | d = 0; | 2457 | d = 0; |
2458 | rtnl_lock(); | ||
2459 | |||
2460 | cb->seq = macsec_generation; | ||
2461 | |||
2453 | for_each_netdev(net, dev) { | 2462 | for_each_netdev(net, dev) { |
2454 | struct macsec_secy *secy; | 2463 | struct macsec_secy *secy; |
2455 | 2464 | ||
@@ -2467,6 +2476,7 @@ next: | |||
2467 | } | 2476 | } |
2468 | 2477 | ||
2469 | done: | 2478 | done: |
2479 | rtnl_unlock(); | ||
2470 | cb->args[0] = d; | 2480 | cb->args[0] = d; |
2471 | return skb->len; | 2481 | return skb->len; |
2472 | } | 2482 | } |
@@ -2920,10 +2930,14 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head) | |||
2920 | struct net_device *real_dev = macsec->real_dev; | 2930 | struct net_device *real_dev = macsec->real_dev; |
2921 | struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); | 2931 | struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); |
2922 | 2932 | ||
2933 | macsec_generation++; | ||
2934 | |||
2923 | unregister_netdevice_queue(dev, head); | 2935 | unregister_netdevice_queue(dev, head); |
2924 | list_del_rcu(&macsec->secys); | 2936 | list_del_rcu(&macsec->secys); |
2925 | if (list_empty(&rxd->secys)) | 2937 | if (list_empty(&rxd->secys)) { |
2926 | netdev_rx_handler_unregister(real_dev); | 2938 | netdev_rx_handler_unregister(real_dev); |
2939 | kfree(rxd); | ||
2940 | } | ||
2927 | 2941 | ||
2928 | macsec_del_dev(macsec); | 2942 | macsec_del_dev(macsec); |
2929 | } | 2943 | } |
@@ -2945,8 +2959,10 @@ static int register_macsec_dev(struct net_device *real_dev, | |||
2945 | 2959 | ||
2946 | err = netdev_rx_handler_register(real_dev, macsec_handle_frame, | 2960 | err = netdev_rx_handler_register(real_dev, macsec_handle_frame, |
2947 | rxd); | 2961 | rxd); |
2948 | if (err < 0) | 2962 | if (err < 0) { |
2963 | kfree(rxd); | ||
2949 | return err; | 2964 | return err; |
2965 | } | ||
2950 | } | 2966 | } |
2951 | 2967 | ||
2952 | list_add_tail_rcu(&macsec->secys, &rxd->secys); | 2968 | list_add_tail_rcu(&macsec->secys, &rxd->secys); |
@@ -3066,6 +3082,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3066 | if (err < 0) | 3082 | if (err < 0) |
3067 | goto del_dev; | 3083 | goto del_dev; |
3068 | 3084 | ||
3085 | macsec_generation++; | ||
3086 | |||
3069 | dev_hold(real_dev); | 3087 | dev_hold(real_dev); |
3070 | 3088 | ||
3071 | return 0; | 3089 | return 0; |
@@ -3079,7 +3097,7 @@ unregister: | |||
3079 | 3097 | ||
3080 | static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) | 3098 | static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) |
3081 | { | 3099 | { |
3082 | u64 csid = DEFAULT_CIPHER_ID; | 3100 | u64 csid = MACSEC_DEFAULT_CIPHER_ID; |
3083 | u8 icv_len = DEFAULT_ICV_LEN; | 3101 | u8 icv_len = DEFAULT_ICV_LEN; |
3084 | int flag; | 3102 | int flag; |
3085 | bool es, scb, sci; | 3103 | bool es, scb, sci; |
@@ -3094,8 +3112,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) | |||
3094 | icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); | 3112 | icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); |
3095 | 3113 | ||
3096 | switch (csid) { | 3114 | switch (csid) { |
3097 | case DEFAULT_CIPHER_ID: | 3115 | case MACSEC_DEFAULT_CIPHER_ID: |
3098 | case DEFAULT_CIPHER_ALT: | 3116 | case MACSEC_DEFAULT_CIPHER_ALT: |
3099 | if (icv_len < MACSEC_MIN_ICV_LEN || | 3117 | if (icv_len < MACSEC_MIN_ICV_LEN || |
3100 | icv_len > MACSEC_MAX_ICV_LEN) | 3118 | icv_len > MACSEC_MAX_ICV_LEN) |
3101 | return -EINVAL; | 3119 | return -EINVAL; |
@@ -3129,8 +3147,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) | |||
3129 | nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) | 3147 | nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) |
3130 | return -EINVAL; | 3148 | return -EINVAL; |
3131 | 3149 | ||
3132 | if ((data[IFLA_MACSEC_PROTECT] && | 3150 | if ((data[IFLA_MACSEC_REPLAY_PROTECT] && |
3133 | nla_get_u8(data[IFLA_MACSEC_PROTECT])) && | 3151 | nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && |
3134 | !data[IFLA_MACSEC_WINDOW]) | 3152 | !data[IFLA_MACSEC_WINDOW]) |
3135 | return -EINVAL; | 3153 | return -EINVAL; |
3136 | 3154 | ||
@@ -3168,7 +3186,8 @@ static int macsec_fill_info(struct sk_buff *skb, | |||
3168 | 3186 | ||
3169 | if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) || | 3187 | if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) || |
3170 | nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || | 3188 | nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || |
3171 | nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) || | 3189 | nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, |
3190 | MACSEC_DEFAULT_CIPHER_ID) || | ||
3172 | nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || | 3191 | nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || |
3173 | nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || | 3192 | nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || |
3174 | nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || | 3193 | nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || |
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index b3ffaee30858..f279a897a5c7 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c | |||
@@ -359,27 +359,25 @@ static void at803x_link_change_notify(struct phy_device *phydev) | |||
359 | * in the FIFO. In such cases, the FIFO enters an error mode it | 359 | * in the FIFO. In such cases, the FIFO enters an error mode it |
360 | * cannot recover from by software. | 360 | * cannot recover from by software. |
361 | */ | 361 | */ |
362 | if (phydev->drv->phy_id == ATH8030_PHY_ID) { | 362 | if (phydev->state == PHY_NOLINK) { |
363 | if (phydev->state == PHY_NOLINK) { | 363 | if (priv->gpiod_reset && !priv->phy_reset) { |
364 | if (priv->gpiod_reset && !priv->phy_reset) { | 364 | struct at803x_context context; |
365 | struct at803x_context context; | 365 | |
366 | 366 | at803x_context_save(phydev, &context); | |
367 | at803x_context_save(phydev, &context); | 367 | |
368 | 368 | gpiod_set_value(priv->gpiod_reset, 1); | |
369 | gpiod_set_value(priv->gpiod_reset, 1); | 369 | msleep(1); |
370 | msleep(1); | 370 | gpiod_set_value(priv->gpiod_reset, 0); |
371 | gpiod_set_value(priv->gpiod_reset, 0); | 371 | msleep(1); |
372 | msleep(1); | 372 | |
373 | 373 | at803x_context_restore(phydev, &context); | |
374 | at803x_context_restore(phydev, &context); | 374 | |
375 | 375 | phydev_dbg(phydev, "%s(): phy was reset\n", | |
376 | phydev_dbg(phydev, "%s(): phy was reset\n", | 376 | __func__); |
377 | __func__); | 377 | priv->phy_reset = true; |
378 | priv->phy_reset = true; | ||
379 | } | ||
380 | } else { | ||
381 | priv->phy_reset = false; | ||
382 | } | 378 | } |
379 | } else { | ||
380 | priv->phy_reset = false; | ||
383 | } | 381 | } |
384 | } | 382 | } |
385 | 383 | ||
@@ -391,7 +389,6 @@ static struct phy_driver at803x_driver[] = { | |||
391 | .phy_id_mask = 0xffffffef, | 389 | .phy_id_mask = 0xffffffef, |
392 | .probe = at803x_probe, | 390 | .probe = at803x_probe, |
393 | .config_init = at803x_config_init, | 391 | .config_init = at803x_config_init, |
394 | .link_change_notify = at803x_link_change_notify, | ||
395 | .set_wol = at803x_set_wol, | 392 | .set_wol = at803x_set_wol, |
396 | .get_wol = at803x_get_wol, | 393 | .get_wol = at803x_get_wol, |
397 | .suspend = at803x_suspend, | 394 | .suspend = at803x_suspend, |
@@ -427,7 +424,6 @@ static struct phy_driver at803x_driver[] = { | |||
427 | .phy_id_mask = 0xffffffef, | 424 | .phy_id_mask = 0xffffffef, |
428 | .probe = at803x_probe, | 425 | .probe = at803x_probe, |
429 | .config_init = at803x_config_init, | 426 | .config_init = at803x_config_init, |
430 | .link_change_notify = at803x_link_change_notify, | ||
431 | .set_wol = at803x_set_wol, | 427 | .set_wol = at803x_set_wol, |
432 | .get_wol = at803x_get_wol, | 428 | .get_wol = at803x_get_wol, |
433 | .suspend = at803x_suspend, | 429 | .suspend = at803x_suspend, |
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index f20890ee03f3..f64778ad9753 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c | |||
@@ -269,6 +269,7 @@ struct skb_data { /* skb->cb is one of these */ | |||
269 | struct lan78xx_net *dev; | 269 | struct lan78xx_net *dev; |
270 | enum skb_state state; | 270 | enum skb_state state; |
271 | size_t length; | 271 | size_t length; |
272 | int num_of_packet; | ||
272 | }; | 273 | }; |
273 | 274 | ||
274 | struct usb_context { | 275 | struct usb_context { |
@@ -1803,7 +1804,34 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev) | |||
1803 | 1804 | ||
1804 | static void lan78xx_link_status_change(struct net_device *net) | 1805 | static void lan78xx_link_status_change(struct net_device *net) |
1805 | { | 1806 | { |
1806 | /* nothing to do */ | 1807 | struct phy_device *phydev = net->phydev; |
1808 | int ret, temp; | ||
1809 | |||
1810 | /* At forced 100 F/H mode, chip may fail to set mode correctly | ||
1811 | * when cable is switched between long(~50+m) and short one. | ||
1812 | * As workaround, set to 10 before setting to 100 | ||
1813 | * at forced 100 F/H mode. | ||
1814 | */ | ||
1815 | if (!phydev->autoneg && (phydev->speed == 100)) { | ||
1816 | /* disable phy interrupt */ | ||
1817 | temp = phy_read(phydev, LAN88XX_INT_MASK); | ||
1818 | temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; | ||
1819 | ret = phy_write(phydev, LAN88XX_INT_MASK, temp); | ||
1820 | |||
1821 | temp = phy_read(phydev, MII_BMCR); | ||
1822 | temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000); | ||
1823 | phy_write(phydev, MII_BMCR, temp); /* set to 10 first */ | ||
1824 | temp |= BMCR_SPEED100; | ||
1825 | phy_write(phydev, MII_BMCR, temp); /* set to 100 later */ | ||
1826 | |||
1827 | /* clear pending interrupt generated while workaround */ | ||
1828 | temp = phy_read(phydev, LAN88XX_INT_STS); | ||
1829 | |||
1830 | /* enable phy interrupt back */ | ||
1831 | temp = phy_read(phydev, LAN88XX_INT_MASK); | ||
1832 | temp |= LAN88XX_INT_MASK_MDINTPIN_EN_; | ||
1833 | ret = phy_write(phydev, LAN88XX_INT_MASK, temp); | ||
1834 | } | ||
1807 | } | 1835 | } |
1808 | 1836 | ||
1809 | static int lan78xx_phy_init(struct lan78xx_net *dev) | 1837 | static int lan78xx_phy_init(struct lan78xx_net *dev) |
@@ -2464,7 +2492,7 @@ static void tx_complete(struct urb *urb) | |||
2464 | struct lan78xx_net *dev = entry->dev; | 2492 | struct lan78xx_net *dev = entry->dev; |
2465 | 2493 | ||
2466 | if (urb->status == 0) { | 2494 | if (urb->status == 0) { |
2467 | dev->net->stats.tx_packets++; | 2495 | dev->net->stats.tx_packets += entry->num_of_packet; |
2468 | dev->net->stats.tx_bytes += entry->length; | 2496 | dev->net->stats.tx_bytes += entry->length; |
2469 | } else { | 2497 | } else { |
2470 | dev->net->stats.tx_errors++; | 2498 | dev->net->stats.tx_errors++; |
@@ -2681,10 +2709,11 @@ void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) | |||
2681 | return; | 2709 | return; |
2682 | } | 2710 | } |
2683 | 2711 | ||
2684 | skb->protocol = eth_type_trans(skb, dev->net); | ||
2685 | dev->net->stats.rx_packets++; | 2712 | dev->net->stats.rx_packets++; |
2686 | dev->net->stats.rx_bytes += skb->len; | 2713 | dev->net->stats.rx_bytes += skb->len; |
2687 | 2714 | ||
2715 | skb->protocol = eth_type_trans(skb, dev->net); | ||
2716 | |||
2688 | netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", | 2717 | netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", |
2689 | skb->len + sizeof(struct ethhdr), skb->protocol); | 2718 | skb->len + sizeof(struct ethhdr), skb->protocol); |
2690 | memset(skb->cb, 0, sizeof(struct skb_data)); | 2719 | memset(skb->cb, 0, sizeof(struct skb_data)); |
@@ -2934,13 +2963,16 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) | |||
2934 | 2963 | ||
2935 | skb_totallen = 0; | 2964 | skb_totallen = 0; |
2936 | pkt_cnt = 0; | 2965 | pkt_cnt = 0; |
2966 | count = 0; | ||
2967 | length = 0; | ||
2937 | for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { | 2968 | for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { |
2938 | if (skb_is_gso(skb)) { | 2969 | if (skb_is_gso(skb)) { |
2939 | if (pkt_cnt) { | 2970 | if (pkt_cnt) { |
2940 | /* handle previous packets first */ | 2971 | /* handle previous packets first */ |
2941 | break; | 2972 | break; |
2942 | } | 2973 | } |
2943 | length = skb->len; | 2974 | count = 1; |
2975 | length = skb->len - TX_OVERHEAD; | ||
2944 | skb2 = skb_dequeue(tqp); | 2976 | skb2 = skb_dequeue(tqp); |
2945 | goto gso_skb; | 2977 | goto gso_skb; |
2946 | } | 2978 | } |
@@ -2961,14 +2993,13 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) | |||
2961 | for (count = pos = 0; count < pkt_cnt; count++) { | 2993 | for (count = pos = 0; count < pkt_cnt; count++) { |
2962 | skb2 = skb_dequeue(tqp); | 2994 | skb2 = skb_dequeue(tqp); |
2963 | if (skb2) { | 2995 | if (skb2) { |
2996 | length += (skb2->len - TX_OVERHEAD); | ||
2964 | memcpy(skb->data + pos, skb2->data, skb2->len); | 2997 | memcpy(skb->data + pos, skb2->data, skb2->len); |
2965 | pos += roundup(skb2->len, sizeof(u32)); | 2998 | pos += roundup(skb2->len, sizeof(u32)); |
2966 | dev_kfree_skb(skb2); | 2999 | dev_kfree_skb(skb2); |
2967 | } | 3000 | } |
2968 | } | 3001 | } |
2969 | 3002 | ||
2970 | length = skb_totallen; | ||
2971 | |||
2972 | gso_skb: | 3003 | gso_skb: |
2973 | urb = usb_alloc_urb(0, GFP_ATOMIC); | 3004 | urb = usb_alloc_urb(0, GFP_ATOMIC); |
2974 | if (!urb) { | 3005 | if (!urb) { |
@@ -2980,6 +3011,7 @@ gso_skb: | |||
2980 | entry->urb = urb; | 3011 | entry->urb = urb; |
2981 | entry->dev = dev; | 3012 | entry->dev = dev; |
2982 | entry->length = length; | 3013 | entry->length = length; |
3014 | entry->num_of_packet = count; | ||
2983 | 3015 | ||
2984 | spin_lock_irqsave(&dev->txq.lock, flags); | 3016 | spin_lock_irqsave(&dev->txq.lock, flags); |
2985 | ret = usb_autopm_get_interface_async(dev->intf); | 3017 | ret = usb_autopm_get_interface_async(dev->intf); |
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index f84080215915..82129eef7774 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c | |||
@@ -411,7 +411,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) | |||
411 | int ret; | 411 | int ret; |
412 | 412 | ||
413 | read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); | 413 | read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); |
414 | data[0] = 0xc9; | 414 | data[0] = 0xc8; /* TX & RX enable, append status, no CRC */ |
415 | data[1] = 0; | 415 | data[1] = 0; |
416 | if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL)) | 416 | if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL)) |
417 | data[1] |= 0x20; /* set full duplex */ | 417 | data[1] |= 0x20; /* set full duplex */ |
@@ -497,7 +497,7 @@ static void read_bulk_callback(struct urb *urb) | |||
497 | pkt_len = buf[count - 3] << 8; | 497 | pkt_len = buf[count - 3] << 8; |
498 | pkt_len += buf[count - 4]; | 498 | pkt_len += buf[count - 4]; |
499 | pkt_len &= 0xfff; | 499 | pkt_len &= 0xfff; |
500 | pkt_len -= 8; | 500 | pkt_len -= 4; |
501 | } | 501 | } |
502 | 502 | ||
503 | /* | 503 | /* |
@@ -528,7 +528,7 @@ static void read_bulk_callback(struct urb *urb) | |||
528 | goon: | 528 | goon: |
529 | usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, | 529 | usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, |
530 | usb_rcvbulkpipe(pegasus->usb, 1), | 530 | usb_rcvbulkpipe(pegasus->usb, 1), |
531 | pegasus->rx_skb->data, PEGASUS_MTU + 8, | 531 | pegasus->rx_skb->data, PEGASUS_MTU, |
532 | read_bulk_callback, pegasus); | 532 | read_bulk_callback, pegasus); |
533 | rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); | 533 | rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); |
534 | if (rx_status == -ENODEV) | 534 | if (rx_status == -ENODEV) |
@@ -569,7 +569,7 @@ static void rx_fixup(unsigned long data) | |||
569 | } | 569 | } |
570 | usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, | 570 | usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, |
571 | usb_rcvbulkpipe(pegasus->usb, 1), | 571 | usb_rcvbulkpipe(pegasus->usb, 1), |
572 | pegasus->rx_skb->data, PEGASUS_MTU + 8, | 572 | pegasus->rx_skb->data, PEGASUS_MTU, |
573 | read_bulk_callback, pegasus); | 573 | read_bulk_callback, pegasus); |
574 | try_again: | 574 | try_again: |
575 | status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); | 575 | status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); |
@@ -823,7 +823,7 @@ static int pegasus_open(struct net_device *net) | |||
823 | 823 | ||
824 | usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, | 824 | usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, |
825 | usb_rcvbulkpipe(pegasus->usb, 1), | 825 | usb_rcvbulkpipe(pegasus->usb, 1), |
826 | pegasus->rx_skb->data, PEGASUS_MTU + 8, | 826 | pegasus->rx_skb->data, PEGASUS_MTU, |
827 | read_bulk_callback, pegasus); | 827 | read_bulk_callback, pegasus); |
828 | if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) { | 828 | if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) { |
829 | if (res == -ENODEV) | 829 | if (res == -ENODEV) |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 30033dbe6662..c369db99c005 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/crc32.h> | 29 | #include <linux/crc32.h> |
30 | #include <linux/usb/usbnet.h> | 30 | #include <linux/usb/usbnet.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/of_net.h> | ||
32 | #include "smsc75xx.h" | 33 | #include "smsc75xx.h" |
33 | 34 | ||
34 | #define SMSC_CHIPNAME "smsc75xx" | 35 | #define SMSC_CHIPNAME "smsc75xx" |
@@ -761,6 +762,15 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) | |||
761 | 762 | ||
762 | static void smsc75xx_init_mac_address(struct usbnet *dev) | 763 | static void smsc75xx_init_mac_address(struct usbnet *dev) |
763 | { | 764 | { |
765 | const u8 *mac_addr; | ||
766 | |||
767 | /* maybe the boot loader passed the MAC address in devicetree */ | ||
768 | mac_addr = of_get_mac_address(dev->udev->dev.of_node); | ||
769 | if (mac_addr) { | ||
770 | memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN); | ||
771 | return; | ||
772 | } | ||
773 | |||
764 | /* try reading mac address from EEPROM */ | 774 | /* try reading mac address from EEPROM */ |
765 | if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, | 775 | if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, |
766 | dev->net->dev_addr) == 0) { | 776 | dev->net->dev_addr) == 0) { |
@@ -772,7 +782,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev) | |||
772 | } | 782 | } |
773 | } | 783 | } |
774 | 784 | ||
775 | /* no eeprom, or eeprom values are invalid. generate random MAC */ | 785 | /* no useful static MAC address found. generate a random one */ |
776 | eth_hw_addr_random(dev->net); | 786 | eth_hw_addr_random(dev->net); |
777 | netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); | 787 | netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); |
778 | } | 788 | } |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 66b3ab9f614e..2edc2bc6d1b9 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/crc32.h> | 29 | #include <linux/crc32.h> |
30 | #include <linux/usb/usbnet.h> | 30 | #include <linux/usb/usbnet.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/of_net.h> | ||
32 | #include "smsc95xx.h" | 33 | #include "smsc95xx.h" |
33 | 34 | ||
34 | #define SMSC_CHIPNAME "smsc95xx" | 35 | #define SMSC_CHIPNAME "smsc95xx" |
@@ -765,6 +766,15 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) | |||
765 | 766 | ||
766 | static void smsc95xx_init_mac_address(struct usbnet *dev) | 767 | static void smsc95xx_init_mac_address(struct usbnet *dev) |
767 | { | 768 | { |
769 | const u8 *mac_addr; | ||
770 | |||
771 | /* maybe the boot loader passed the MAC address in devicetree */ | ||
772 | mac_addr = of_get_mac_address(dev->udev->dev.of_node); | ||
773 | if (mac_addr) { | ||
774 | memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN); | ||
775 | return; | ||
776 | } | ||
777 | |||
768 | /* try reading mac address from EEPROM */ | 778 | /* try reading mac address from EEPROM */ |
769 | if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, | 779 | if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, |
770 | dev->net->dev_addr) == 0) { | 780 | dev->net->dev_addr) == 0) { |
@@ -775,7 +785,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev) | |||
775 | } | 785 | } |
776 | } | 786 | } |
777 | 787 | ||
778 | /* no eeprom, or eeprom values are invalid. generate random MAC */ | 788 | /* no useful static MAC address found. generate a random one */ |
779 | eth_hw_addr_random(dev->net); | 789 | eth_hw_addr_random(dev->net); |
780 | netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); | 790 | netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n"); |
781 | } | 791 | } |
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 8f8793004b9f..1b271b99c49e 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c | |||
@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah, | |||
274 | }; | 274 | }; |
275 | static const int inc[4] = { 0, 100, 0, 0 }; | 275 | static const int inc[4] = { 0, 100, 0, 0 }; |
276 | 276 | ||
277 | memset(&mask_m, 0, sizeof(int8_t) * 123); | ||
278 | memset(&mask_p, 0, sizeof(int8_t) * 123); | ||
279 | |||
277 | cur_bin = -6000; | 280 | cur_bin = -6000; |
278 | upper = bin + 100; | 281 | upper = bin + 100; |
279 | lower = bin - 100; | 282 | lower = bin - 100; |
@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah, | |||
424 | int tmp, new; | 427 | int tmp, new; |
425 | int i; | 428 | int i; |
426 | 429 | ||
427 | int8_t mask_m[123]; | ||
428 | int8_t mask_p[123]; | ||
429 | int cur_bb_spur; | 430 | int cur_bb_spur; |
430 | bool is2GHz = IS_CHAN_2GHZ(chan); | 431 | bool is2GHz = IS_CHAN_2GHZ(chan); |
431 | 432 | ||
432 | memset(&mask_m, 0, sizeof(int8_t) * 123); | ||
433 | memset(&mask_p, 0, sizeof(int8_t) * 123); | ||
434 | |||
435 | for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { | 433 | for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { |
436 | cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); | 434 | cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); |
437 | if (AR_NO_SPUR == cur_bb_spur) | 435 | if (AR_NO_SPUR == cur_bb_spur) |
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index db6624527d99..53d7445a5d12 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c | |||
@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah, | |||
178 | int i; | 178 | int i; |
179 | struct chan_centers centers; | 179 | struct chan_centers centers; |
180 | 180 | ||
181 | int8_t mask_m[123]; | ||
182 | int8_t mask_p[123]; | ||
183 | int cur_bb_spur; | 181 | int cur_bb_spur; |
184 | bool is2GHz = IS_CHAN_2GHZ(chan); | 182 | bool is2GHz = IS_CHAN_2GHZ(chan); |
185 | 183 | ||
186 | memset(&mask_m, 0, sizeof(int8_t) * 123); | ||
187 | memset(&mask_p, 0, sizeof(int8_t) * 123); | ||
188 | |||
189 | ath9k_hw_get_channel_centers(ah, chan, ¢ers); | 184 | ath9k_hw_get_channel_centers(ah, chan, ¢ers); |
190 | freq = centers.synth_center; | 185 | freq = centers.synth_center; |
191 | 186 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index 97be104d1203..b5c57eebf995 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c | |||
@@ -93,7 +93,7 @@ | |||
93 | #define IWL8260_SMEM_OFFSET 0x400000 | 93 | #define IWL8260_SMEM_OFFSET 0x400000 |
94 | #define IWL8260_SMEM_LEN 0x68000 | 94 | #define IWL8260_SMEM_LEN 0x68000 |
95 | 95 | ||
96 | #define IWL8000_FW_PRE "iwlwifi-8000" | 96 | #define IWL8000_FW_PRE "iwlwifi-8000C-" |
97 | #define IWL8000_MODULE_FIRMWARE(api) \ | 97 | #define IWL8000_MODULE_FIRMWARE(api) \ |
98 | IWL8000_FW_PRE "-" __stringify(api) ".ucode" | 98 | IWL8000_FW_PRE "-" __stringify(api) ".ucode" |
99 | 99 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index f899666acb41..9e45bf9c6071 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c | |||
@@ -238,19 +238,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) | |||
238 | snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", | 238 | snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", |
239 | name_pre, tag); | 239 | name_pre, tag); |
240 | 240 | ||
241 | /* | ||
242 | * Starting 8000B - FW name format has changed. This overwrites the | ||
243 | * previous name and uses the new format. | ||
244 | */ | ||
245 | if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { | ||
246 | char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev); | ||
247 | |||
248 | if (rev_step != 'A') | ||
249 | snprintf(drv->firmware_name, | ||
250 | sizeof(drv->firmware_name), "%s%c-%s.ucode", | ||
251 | name_pre, rev_step, tag); | ||
252 | } | ||
253 | |||
254 | IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", | 241 | IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", |
255 | (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) | 242 | (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) |
256 | ? "EXPERIMENTAL " : "", | 243 | ? "EXPERIMENTAL " : "", |
@@ -1060,11 +1047,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, | |||
1060 | return -EINVAL; | 1047 | return -EINVAL; |
1061 | } | 1048 | } |
1062 | 1049 | ||
1063 | if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && | 1050 | /* |
1064 | !gscan_capa, | 1051 | * If ucode advertises that it supports GSCAN but GSCAN |
1065 | "GSCAN is supported but capabilities TLV is unavailable\n")) | 1052 | * capabilities TLV is not present, or if it has an old format, |
1053 | * warn and continue without GSCAN. | ||
1054 | */ | ||
1055 | if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && | ||
1056 | !gscan_capa) { | ||
1057 | IWL_DEBUG_INFO(drv, | ||
1058 | "GSCAN is supported but capabilities TLV is unavailable\n"); | ||
1066 | __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, | 1059 | __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, |
1067 | capa->_capa); | 1060 | capa->_capa); |
1061 | } | ||
1068 | 1062 | ||
1069 | return 0; | 1063 | return 0; |
1070 | 1064 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 4856eac120f6..6938cd37be57 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | |||
@@ -526,7 +526,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) | |||
526 | file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; | 526 | file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; |
527 | 527 | ||
528 | /* Make room for fw's virtual image pages, if it exists */ | 528 | /* Make room for fw's virtual image pages, if it exists */ |
529 | if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) | 529 | if (mvm->fw->img[mvm->cur_ucode].paging_mem_size && |
530 | mvm->fw_paging_db[0].fw_paging_block) | ||
530 | file_len += mvm->num_of_paging_blk * | 531 | file_len += mvm->num_of_paging_blk * |
531 | (sizeof(*dump_data) + | 532 | (sizeof(*dump_data) + |
532 | sizeof(struct iwl_fw_error_dump_paging) + | 533 | sizeof(struct iwl_fw_error_dump_paging) + |
@@ -643,7 +644,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) | |||
643 | } | 644 | } |
644 | 645 | ||
645 | /* Dump fw's virtual image */ | 646 | /* Dump fw's virtual image */ |
646 | if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { | 647 | if (mvm->fw->img[mvm->cur_ucode].paging_mem_size && |
648 | mvm->fw_paging_db[0].fw_paging_block) { | ||
647 | for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { | 649 | for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { |
648 | struct iwl_fw_error_dump_paging *paging; | 650 | struct iwl_fw_error_dump_paging *paging; |
649 | struct page *pages = | 651 | struct page *pages = |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 594cd0dc7df9..09d895fafaf2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c | |||
@@ -144,9 +144,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm) | |||
144 | 144 | ||
145 | __free_pages(mvm->fw_paging_db[i].fw_paging_block, | 145 | __free_pages(mvm->fw_paging_db[i].fw_paging_block, |
146 | get_order(mvm->fw_paging_db[i].fw_paging_size)); | 146 | get_order(mvm->fw_paging_db[i].fw_paging_size)); |
147 | mvm->fw_paging_db[i].fw_paging_block = NULL; | ||
147 | } | 148 | } |
148 | kfree(mvm->trans->paging_download_buf); | 149 | kfree(mvm->trans->paging_download_buf); |
149 | mvm->trans->paging_download_buf = NULL; | 150 | mvm->trans->paging_download_buf = NULL; |
151 | mvm->trans->paging_db = NULL; | ||
150 | 152 | ||
151 | memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); | 153 | memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); |
152 | } | 154 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 05b968506836..79d7cd7d461e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c | |||
@@ -479,8 +479,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
479 | {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, | 479 | {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, |
480 | {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, | 480 | {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, |
481 | {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, | 481 | {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, |
482 | {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)}, | ||
483 | {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)}, | ||
484 | {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)}, | ||
485 | {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)}, | ||
486 | {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)}, | ||
487 | {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)}, | ||
488 | {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)}, | ||
489 | {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)}, | ||
482 | {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, | 490 | {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, |
483 | {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, | 491 | {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, |
492 | {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)}, | ||
493 | {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)}, | ||
484 | 494 | ||
485 | /* 9000 Series */ | 495 | /* 9000 Series */ |
486 | {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, | 496 | {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, |
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index f798899338ed..5101f3ab4f29 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c | |||
@@ -397,10 +397,17 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) | |||
397 | */ | 397 | */ |
398 | start += start_pad; | 398 | start += start_pad; |
399 | npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K; | 399 | npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K; |
400 | if (nd_pfn->mode == PFN_MODE_PMEM) | 400 | if (nd_pfn->mode == PFN_MODE_PMEM) { |
401 | offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align) | 401 | unsigned long memmap_size; |
402 | |||
403 | /* | ||
404 | * vmemmap_populate_hugepages() allocates the memmap array in | ||
405 | * HPAGE_SIZE chunks. | ||
406 | */ | ||
407 | memmap_size = ALIGN(64 * npfns, HPAGE_SIZE); | ||
408 | offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align) | ||
402 | - start; | 409 | - start; |
403 | else if (nd_pfn->mode == PFN_MODE_RAM) | 410 | } else if (nd_pfn->mode == PFN_MODE_RAM) |
404 | offset = ALIGN(start + SZ_8K, nd_pfn->align) - start; | 411 | offset = ALIGN(start + SZ_8K, nd_pfn->align) - start; |
405 | else | 412 | else |
406 | goto err; | 413 | goto err; |
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index df1f1a76a862..01e12d221a8b 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c | |||
@@ -135,7 +135,7 @@ MODULE_LICENSE("GPL"); | |||
135 | /* Field definitions */ | 135 | /* Field definitions */ |
136 | #define HCI_ACCEL_MASK 0x7fff | 136 | #define HCI_ACCEL_MASK 0x7fff |
137 | #define HCI_HOTKEY_DISABLE 0x0b | 137 | #define HCI_HOTKEY_DISABLE 0x0b |
138 | #define HCI_HOTKEY_ENABLE 0x01 | 138 | #define HCI_HOTKEY_ENABLE 0x09 |
139 | #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10 | 139 | #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10 |
140 | #define HCI_LCD_BRIGHTNESS_BITS 3 | 140 | #define HCI_LCD_BRIGHTNESS_BITS 3 |
141 | #define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS) | 141 | #define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS) |
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 5d4d91846357..e165b7ce29d7 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c | |||
@@ -126,7 +126,7 @@ struct rio_mport_mapping { | |||
126 | struct list_head node; | 126 | struct list_head node; |
127 | struct mport_dev *md; | 127 | struct mport_dev *md; |
128 | enum rio_mport_map_dir dir; | 128 | enum rio_mport_map_dir dir; |
129 | u32 rioid; | 129 | u16 rioid; |
130 | u64 rio_addr; | 130 | u64 rio_addr; |
131 | dma_addr_t phys_addr; /* for mmap */ | 131 | dma_addr_t phys_addr; /* for mmap */ |
132 | void *virt_addr; /* kernel address, for dma_free_coherent */ | 132 | void *virt_addr; /* kernel address, for dma_free_coherent */ |
@@ -137,7 +137,7 @@ struct rio_mport_mapping { | |||
137 | 137 | ||
138 | struct rio_mport_dma_map { | 138 | struct rio_mport_dma_map { |
139 | int valid; | 139 | int valid; |
140 | uint64_t length; | 140 | u64 length; |
141 | void *vaddr; | 141 | void *vaddr; |
142 | dma_addr_t paddr; | 142 | dma_addr_t paddr; |
143 | }; | 143 | }; |
@@ -208,7 +208,7 @@ struct mport_cdev_priv { | |||
208 | struct kfifo event_fifo; | 208 | struct kfifo event_fifo; |
209 | wait_queue_head_t event_rx_wait; | 209 | wait_queue_head_t event_rx_wait; |
210 | spinlock_t fifo_lock; | 210 | spinlock_t fifo_lock; |
211 | unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ | 211 | u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ |
212 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | 212 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE |
213 | struct dma_chan *dmach; | 213 | struct dma_chan *dmach; |
214 | struct list_head async_list; | 214 | struct list_head async_list; |
@@ -276,7 +276,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, | |||
276 | return -EFAULT; | 276 | return -EFAULT; |
277 | 277 | ||
278 | if ((maint_io.offset % 4) || | 278 | if ((maint_io.offset % 4) || |
279 | (maint_io.length == 0) || (maint_io.length % 4)) | 279 | (maint_io.length == 0) || (maint_io.length % 4) || |
280 | (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) | ||
280 | return -EINVAL; | 281 | return -EINVAL; |
281 | 282 | ||
282 | buffer = vmalloc(maint_io.length); | 283 | buffer = vmalloc(maint_io.length); |
@@ -298,7 +299,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, | |||
298 | offset += 4; | 299 | offset += 4; |
299 | } | 300 | } |
300 | 301 | ||
301 | if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length))) | 302 | if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, |
303 | buffer, maint_io.length))) | ||
302 | ret = -EFAULT; | 304 | ret = -EFAULT; |
303 | out: | 305 | out: |
304 | vfree(buffer); | 306 | vfree(buffer); |
@@ -319,7 +321,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, | |||
319 | return -EFAULT; | 321 | return -EFAULT; |
320 | 322 | ||
321 | if ((maint_io.offset % 4) || | 323 | if ((maint_io.offset % 4) || |
322 | (maint_io.length == 0) || (maint_io.length % 4)) | 324 | (maint_io.length == 0) || (maint_io.length % 4) || |
325 | (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) | ||
323 | return -EINVAL; | 326 | return -EINVAL; |
324 | 327 | ||
325 | buffer = vmalloc(maint_io.length); | 328 | buffer = vmalloc(maint_io.length); |
@@ -327,7 +330,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, | |||
327 | return -ENOMEM; | 330 | return -ENOMEM; |
328 | length = maint_io.length; | 331 | length = maint_io.length; |
329 | 332 | ||
330 | if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) { | 333 | if (unlikely(copy_from_user(buffer, |
334 | (void __user *)(uintptr_t)maint_io.buffer, length))) { | ||
331 | ret = -EFAULT; | 335 | ret = -EFAULT; |
332 | goto out; | 336 | goto out; |
333 | } | 337 | } |
@@ -360,7 +364,7 @@ out: | |||
360 | */ | 364 | */ |
361 | static int | 365 | static int |
362 | rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, | 366 | rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, |
363 | u32 rioid, u64 raddr, u32 size, | 367 | u16 rioid, u64 raddr, u32 size, |
364 | dma_addr_t *paddr) | 368 | dma_addr_t *paddr) |
365 | { | 369 | { |
366 | struct rio_mport *mport = md->mport; | 370 | struct rio_mport *mport = md->mport; |
@@ -369,7 +373,7 @@ rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, | |||
369 | 373 | ||
370 | rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); | 374 | rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); |
371 | 375 | ||
372 | map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); | 376 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
373 | if (map == NULL) | 377 | if (map == NULL) |
374 | return -ENOMEM; | 378 | return -ENOMEM; |
375 | 379 | ||
@@ -394,7 +398,7 @@ err_map_outb: | |||
394 | 398 | ||
395 | static int | 399 | static int |
396 | rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, | 400 | rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, |
397 | u32 rioid, u64 raddr, u32 size, | 401 | u16 rioid, u64 raddr, u32 size, |
398 | dma_addr_t *paddr) | 402 | dma_addr_t *paddr) |
399 | { | 403 | { |
400 | struct rio_mport_mapping *map; | 404 | struct rio_mport_mapping *map; |
@@ -433,7 +437,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg) | |||
433 | dma_addr_t paddr; | 437 | dma_addr_t paddr; |
434 | int ret; | 438 | int ret; |
435 | 439 | ||
436 | if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) | 440 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
437 | return -EFAULT; | 441 | return -EFAULT; |
438 | 442 | ||
439 | rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", | 443 | rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", |
@@ -448,7 +452,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg) | |||
448 | 452 | ||
449 | map.handle = paddr; | 453 | map.handle = paddr; |
450 | 454 | ||
451 | if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) | 455 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) |
452 | return -EFAULT; | 456 | return -EFAULT; |
453 | return 0; | 457 | return 0; |
454 | } | 458 | } |
@@ -469,7 +473,7 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg) | |||
469 | if (!md->mport->ops->unmap_outb) | 473 | if (!md->mport->ops->unmap_outb) |
470 | return -EPROTONOSUPPORT; | 474 | return -EPROTONOSUPPORT; |
471 | 475 | ||
472 | if (copy_from_user(&handle, arg, sizeof(u64))) | 476 | if (copy_from_user(&handle, arg, sizeof(handle))) |
473 | return -EFAULT; | 477 | return -EFAULT; |
474 | 478 | ||
475 | rmcd_debug(OBW, "h=0x%llx", handle); | 479 | rmcd_debug(OBW, "h=0x%llx", handle); |
@@ -498,9 +502,9 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg) | |||
498 | static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) | 502 | static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) |
499 | { | 503 | { |
500 | struct mport_dev *md = priv->md; | 504 | struct mport_dev *md = priv->md; |
501 | uint16_t hdid; | 505 | u16 hdid; |
502 | 506 | ||
503 | if (copy_from_user(&hdid, arg, sizeof(uint16_t))) | 507 | if (copy_from_user(&hdid, arg, sizeof(hdid))) |
504 | return -EFAULT; | 508 | return -EFAULT; |
505 | 509 | ||
506 | md->mport->host_deviceid = hdid; | 510 | md->mport->host_deviceid = hdid; |
@@ -520,9 +524,9 @@ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) | |||
520 | static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) | 524 | static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) |
521 | { | 525 | { |
522 | struct mport_dev *md = priv->md; | 526 | struct mport_dev *md = priv->md; |
523 | uint32_t comptag; | 527 | u32 comptag; |
524 | 528 | ||
525 | if (copy_from_user(&comptag, arg, sizeof(uint32_t))) | 529 | if (copy_from_user(&comptag, arg, sizeof(comptag))) |
526 | return -EFAULT; | 530 | return -EFAULT; |
527 | 531 | ||
528 | rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); | 532 | rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); |
@@ -837,7 +841,7 @@ err_out: | |||
837 | * @xfer: data transfer descriptor structure | 841 | * @xfer: data transfer descriptor structure |
838 | */ | 842 | */ |
839 | static int | 843 | static int |
840 | rio_dma_transfer(struct file *filp, uint32_t transfer_mode, | 844 | rio_dma_transfer(struct file *filp, u32 transfer_mode, |
841 | enum rio_transfer_sync sync, enum dma_data_direction dir, | 845 | enum rio_transfer_sync sync, enum dma_data_direction dir, |
842 | struct rio_transfer_io *xfer) | 846 | struct rio_transfer_io *xfer) |
843 | { | 847 | { |
@@ -875,7 +879,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode, | |||
875 | unsigned long offset; | 879 | unsigned long offset; |
876 | long pinned; | 880 | long pinned; |
877 | 881 | ||
878 | offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK; | 882 | offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK; |
879 | nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; | 883 | nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; |
880 | 884 | ||
881 | page_list = kmalloc_array(nr_pages, | 885 | page_list = kmalloc_array(nr_pages, |
@@ -1015,19 +1019,20 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) | |||
1015 | if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) | 1019 | if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) |
1016 | return -EFAULT; | 1020 | return -EFAULT; |
1017 | 1021 | ||
1018 | if (transaction.count != 1) | 1022 | if (transaction.count != 1) /* only single transfer for now */ |
1019 | return -EINVAL; | 1023 | return -EINVAL; |
1020 | 1024 | ||
1021 | if ((transaction.transfer_mode & | 1025 | if ((transaction.transfer_mode & |
1022 | priv->md->properties.transfer_mode) == 0) | 1026 | priv->md->properties.transfer_mode) == 0) |
1023 | return -ENODEV; | 1027 | return -ENODEV; |
1024 | 1028 | ||
1025 | transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io)); | 1029 | transfer = vmalloc(transaction.count * sizeof(*transfer)); |
1026 | if (!transfer) | 1030 | if (!transfer) |
1027 | return -ENOMEM; | 1031 | return -ENOMEM; |
1028 | 1032 | ||
1029 | if (unlikely(copy_from_user(transfer, transaction.block, | 1033 | if (unlikely(copy_from_user(transfer, |
1030 | transaction.count * sizeof(struct rio_transfer_io)))) { | 1034 | (void __user *)(uintptr_t)transaction.block, |
1035 | transaction.count * sizeof(*transfer)))) { | ||
1031 | ret = -EFAULT; | 1036 | ret = -EFAULT; |
1032 | goto out_free; | 1037 | goto out_free; |
1033 | } | 1038 | } |
@@ -1038,8 +1043,9 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) | |||
1038 | ret = rio_dma_transfer(filp, transaction.transfer_mode, | 1043 | ret = rio_dma_transfer(filp, transaction.transfer_mode, |
1039 | transaction.sync, dir, &transfer[i]); | 1044 | transaction.sync, dir, &transfer[i]); |
1040 | 1045 | ||
1041 | if (unlikely(copy_to_user(transaction.block, transfer, | 1046 | if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, |
1042 | transaction.count * sizeof(struct rio_transfer_io)))) | 1047 | transfer, |
1048 | transaction.count * sizeof(*transfer)))) | ||
1043 | ret = -EFAULT; | 1049 | ret = -EFAULT; |
1044 | 1050 | ||
1045 | out_free: | 1051 | out_free: |
@@ -1129,11 +1135,11 @@ err_tmo: | |||
1129 | } | 1135 | } |
1130 | 1136 | ||
1131 | static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, | 1137 | static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, |
1132 | uint64_t size, struct rio_mport_mapping **mapping) | 1138 | u64 size, struct rio_mport_mapping **mapping) |
1133 | { | 1139 | { |
1134 | struct rio_mport_mapping *map; | 1140 | struct rio_mport_mapping *map; |
1135 | 1141 | ||
1136 | map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); | 1142 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
1137 | if (map == NULL) | 1143 | if (map == NULL) |
1138 | return -ENOMEM; | 1144 | return -ENOMEM; |
1139 | 1145 | ||
@@ -1165,7 +1171,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg) | |||
1165 | struct rio_mport_mapping *mapping = NULL; | 1171 | struct rio_mport_mapping *mapping = NULL; |
1166 | int ret; | 1172 | int ret; |
1167 | 1173 | ||
1168 | if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem)))) | 1174 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
1169 | return -EFAULT; | 1175 | return -EFAULT; |
1170 | 1176 | ||
1171 | ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); | 1177 | ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); |
@@ -1174,7 +1180,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg) | |||
1174 | 1180 | ||
1175 | map.dma_handle = mapping->phys_addr; | 1181 | map.dma_handle = mapping->phys_addr; |
1176 | 1182 | ||
1177 | if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) { | 1183 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { |
1178 | mutex_lock(&md->buf_mutex); | 1184 | mutex_lock(&md->buf_mutex); |
1179 | kref_put(&mapping->ref, mport_release_mapping); | 1185 | kref_put(&mapping->ref, mport_release_mapping); |
1180 | mutex_unlock(&md->buf_mutex); | 1186 | mutex_unlock(&md->buf_mutex); |
@@ -1192,7 +1198,7 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg) | |||
1192 | int ret = -EFAULT; | 1198 | int ret = -EFAULT; |
1193 | struct rio_mport_mapping *map, *_map; | 1199 | struct rio_mport_mapping *map, *_map; |
1194 | 1200 | ||
1195 | if (copy_from_user(&handle, arg, sizeof(u64))) | 1201 | if (copy_from_user(&handle, arg, sizeof(handle))) |
1196 | return -EFAULT; | 1202 | return -EFAULT; |
1197 | rmcd_debug(EXIT, "filp=%p", filp); | 1203 | rmcd_debug(EXIT, "filp=%p", filp); |
1198 | 1204 | ||
@@ -1242,14 +1248,18 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg) | |||
1242 | 1248 | ||
1243 | static int | 1249 | static int |
1244 | rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, | 1250 | rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, |
1245 | u64 raddr, u32 size, | 1251 | u64 raddr, u64 size, |
1246 | struct rio_mport_mapping **mapping) | 1252 | struct rio_mport_mapping **mapping) |
1247 | { | 1253 | { |
1248 | struct rio_mport *mport = md->mport; | 1254 | struct rio_mport *mport = md->mport; |
1249 | struct rio_mport_mapping *map; | 1255 | struct rio_mport_mapping *map; |
1250 | int ret; | 1256 | int ret; |
1251 | 1257 | ||
1252 | map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); | 1258 | /* rio_map_inb_region() accepts u32 size */ |
1259 | if (size > 0xffffffff) | ||
1260 | return -EINVAL; | ||
1261 | |||
1262 | map = kzalloc(sizeof(*map), GFP_KERNEL); | ||
1253 | if (map == NULL) | 1263 | if (map == NULL) |
1254 | return -ENOMEM; | 1264 | return -ENOMEM; |
1255 | 1265 | ||
@@ -1262,7 +1272,7 @@ rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, | |||
1262 | 1272 | ||
1263 | if (raddr == RIO_MAP_ANY_ADDR) | 1273 | if (raddr == RIO_MAP_ANY_ADDR) |
1264 | raddr = map->phys_addr; | 1274 | raddr = map->phys_addr; |
1265 | ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0); | 1275 | ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); |
1266 | if (ret < 0) | 1276 | if (ret < 0) |
1267 | goto err_map_inb; | 1277 | goto err_map_inb; |
1268 | 1278 | ||
@@ -1288,7 +1298,7 @@ err_dma_alloc: | |||
1288 | 1298 | ||
1289 | static int | 1299 | static int |
1290 | rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, | 1300 | rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, |
1291 | u64 raddr, u32 size, | 1301 | u64 raddr, u64 size, |
1292 | struct rio_mport_mapping **mapping) | 1302 | struct rio_mport_mapping **mapping) |
1293 | { | 1303 | { |
1294 | struct rio_mport_mapping *map; | 1304 | struct rio_mport_mapping *map; |
@@ -1331,7 +1341,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg) | |||
1331 | 1341 | ||
1332 | if (!md->mport->ops->map_inb) | 1342 | if (!md->mport->ops->map_inb) |
1333 | return -EPROTONOSUPPORT; | 1343 | return -EPROTONOSUPPORT; |
1334 | if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) | 1344 | if (unlikely(copy_from_user(&map, arg, sizeof(map)))) |
1335 | return -EFAULT; | 1345 | return -EFAULT; |
1336 | 1346 | ||
1337 | rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); | 1347 | rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); |
@@ -1344,7 +1354,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg) | |||
1344 | map.handle = mapping->phys_addr; | 1354 | map.handle = mapping->phys_addr; |
1345 | map.rio_addr = mapping->rio_addr; | 1355 | map.rio_addr = mapping->rio_addr; |
1346 | 1356 | ||
1347 | if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) { | 1357 | if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { |
1348 | /* Delete mapping if it was created by this request */ | 1358 | /* Delete mapping if it was created by this request */ |
1349 | if (ret == 0 && mapping->filp == filp) { | 1359 | if (ret == 0 && mapping->filp == filp) { |
1350 | mutex_lock(&md->buf_mutex); | 1360 | mutex_lock(&md->buf_mutex); |
@@ -1375,7 +1385,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg) | |||
1375 | if (!md->mport->ops->unmap_inb) | 1385 | if (!md->mport->ops->unmap_inb) |
1376 | return -EPROTONOSUPPORT; | 1386 | return -EPROTONOSUPPORT; |
1377 | 1387 | ||
1378 | if (copy_from_user(&handle, arg, sizeof(u64))) | 1388 | if (copy_from_user(&handle, arg, sizeof(handle))) |
1379 | return -EFAULT; | 1389 | return -EFAULT; |
1380 | 1390 | ||
1381 | mutex_lock(&md->buf_mutex); | 1391 | mutex_lock(&md->buf_mutex); |
@@ -1401,7 +1411,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg) | |||
1401 | static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) | 1411 | static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) |
1402 | { | 1412 | { |
1403 | struct mport_dev *md = priv->md; | 1413 | struct mport_dev *md = priv->md; |
1404 | uint32_t port_idx = md->mport->index; | 1414 | u32 port_idx = md->mport->index; |
1405 | 1415 | ||
1406 | rmcd_debug(MPORT, "port_index=%d", port_idx); | 1416 | rmcd_debug(MPORT, "port_index=%d", port_idx); |
1407 | 1417 | ||
@@ -1451,7 +1461,7 @@ static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id, | |||
1451 | handled = 0; | 1461 | handled = 0; |
1452 | spin_lock(&data->db_lock); | 1462 | spin_lock(&data->db_lock); |
1453 | list_for_each_entry(db_filter, &data->doorbells, data_node) { | 1463 | list_for_each_entry(db_filter, &data->doorbells, data_node) { |
1454 | if (((db_filter->filter.rioid == 0xffffffff || | 1464 | if (((db_filter->filter.rioid == RIO_INVALID_DESTID || |
1455 | db_filter->filter.rioid == src)) && | 1465 | db_filter->filter.rioid == src)) && |
1456 | info >= db_filter->filter.low && | 1466 | info >= db_filter->filter.low && |
1457 | info <= db_filter->filter.high) { | 1467 | info <= db_filter->filter.high) { |
@@ -1525,6 +1535,9 @@ static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv, | |||
1525 | if (copy_from_user(&filter, arg, sizeof(filter))) | 1535 | if (copy_from_user(&filter, arg, sizeof(filter))) |
1526 | return -EFAULT; | 1536 | return -EFAULT; |
1527 | 1537 | ||
1538 | if (filter.low > filter.high) | ||
1539 | return -EINVAL; | ||
1540 | |||
1528 | spin_lock_irqsave(&priv->md->db_lock, flags); | 1541 | spin_lock_irqsave(&priv->md->db_lock, flags); |
1529 | list_for_each_entry(db_filter, &priv->db_filters, priv_node) { | 1542 | list_for_each_entry(db_filter, &priv->db_filters, priv_node) { |
1530 | if (db_filter->filter.rioid == filter.rioid && | 1543 | if (db_filter->filter.rioid == filter.rioid && |
@@ -1737,10 +1750,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, | |||
1737 | return -EEXIST; | 1750 | return -EEXIST; |
1738 | } | 1751 | } |
1739 | 1752 | ||
1740 | size = sizeof(struct rio_dev); | 1753 | size = sizeof(*rdev); |
1741 | mport = md->mport; | 1754 | mport = md->mport; |
1742 | destid = (u16)dev_info.destid; | 1755 | destid = dev_info.destid; |
1743 | hopcount = (u8)dev_info.hopcount; | 1756 | hopcount = dev_info.hopcount; |
1744 | 1757 | ||
1745 | if (rio_mport_read_config_32(mport, destid, hopcount, | 1758 | if (rio_mport_read_config_32(mport, destid, hopcount, |
1746 | RIO_PEF_CAR, &rval)) | 1759 | RIO_PEF_CAR, &rval)) |
@@ -1872,8 +1885,8 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) | |||
1872 | do { | 1885 | do { |
1873 | rdev = rio_get_comptag(dev_info.comptag, rdev); | 1886 | rdev = rio_get_comptag(dev_info.comptag, rdev); |
1874 | if (rdev && rdev->dev.parent == &mport->net->dev && | 1887 | if (rdev && rdev->dev.parent == &mport->net->dev && |
1875 | rdev->destid == (u16)dev_info.destid && | 1888 | rdev->destid == dev_info.destid && |
1876 | rdev->hopcount == (u8)dev_info.hopcount) | 1889 | rdev->hopcount == dev_info.hopcount) |
1877 | break; | 1890 | break; |
1878 | } while (rdev); | 1891 | } while (rdev); |
1879 | } | 1892 | } |
@@ -2146,8 +2159,8 @@ static long mport_cdev_ioctl(struct file *filp, | |||
2146 | return maint_port_idx_get(data, (void __user *)arg); | 2159 | return maint_port_idx_get(data, (void __user *)arg); |
2147 | case RIO_MPORT_GET_PROPERTIES: | 2160 | case RIO_MPORT_GET_PROPERTIES: |
2148 | md->properties.hdid = md->mport->host_deviceid; | 2161 | md->properties.hdid = md->mport->host_deviceid; |
2149 | if (copy_to_user((void __user *)arg, &(data->md->properties), | 2162 | if (copy_to_user((void __user *)arg, &(md->properties), |
2150 | sizeof(data->md->properties))) | 2163 | sizeof(md->properties))) |
2151 | return -EFAULT; | 2164 | return -EFAULT; |
2152 | return 0; | 2165 | return 0; |
2153 | case RIO_ENABLE_DOORBELL_RANGE: | 2166 | case RIO_ENABLE_DOORBELL_RANGE: |
@@ -2159,11 +2172,11 @@ static long mport_cdev_ioctl(struct file *filp, | |||
2159 | case RIO_DISABLE_PORTWRITE_RANGE: | 2172 | case RIO_DISABLE_PORTWRITE_RANGE: |
2160 | return rio_mport_remove_pw_filter(data, (void __user *)arg); | 2173 | return rio_mport_remove_pw_filter(data, (void __user *)arg); |
2161 | case RIO_SET_EVENT_MASK: | 2174 | case RIO_SET_EVENT_MASK: |
2162 | data->event_mask = arg; | 2175 | data->event_mask = (u32)arg; |
2163 | return 0; | 2176 | return 0; |
2164 | case RIO_GET_EVENT_MASK: | 2177 | case RIO_GET_EVENT_MASK: |
2165 | if (copy_to_user((void __user *)arg, &data->event_mask, | 2178 | if (copy_to_user((void __user *)arg, &data->event_mask, |
2166 | sizeof(data->event_mask))) | 2179 | sizeof(u32))) |
2167 | return -EFAULT; | 2180 | return -EFAULT; |
2168 | return 0; | 2181 | return 0; |
2169 | case RIO_MAP_OUTBOUND: | 2182 | case RIO_MAP_OUTBOUND: |
@@ -2374,7 +2387,7 @@ static ssize_t mport_write(struct file *filp, const char __user *buf, | |||
2374 | return -EINVAL; | 2387 | return -EINVAL; |
2375 | 2388 | ||
2376 | ret = rio_mport_send_doorbell(mport, | 2389 | ret = rio_mport_send_doorbell(mport, |
2377 | (u16)event.u.doorbell.rioid, | 2390 | event.u.doorbell.rioid, |
2378 | event.u.doorbell.payload); | 2391 | event.u.doorbell.payload); |
2379 | if (ret < 0) | 2392 | if (ret < 0) |
2380 | return ret; | 2393 | return ret; |
@@ -2421,7 +2434,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) | |||
2421 | struct mport_dev *md; | 2434 | struct mport_dev *md; |
2422 | struct rio_mport_attr attr; | 2435 | struct rio_mport_attr attr; |
2423 | 2436 | ||
2424 | md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL); | 2437 | md = kzalloc(sizeof(*md), GFP_KERNEL); |
2425 | if (!md) { | 2438 | if (!md) { |
2426 | rmcd_error("Unable allocate a device object"); | 2439 | rmcd_error("Unable allocate a device object"); |
2427 | return NULL; | 2440 | return NULL; |
@@ -2470,7 +2483,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport) | |||
2470 | /* The transfer_mode property will be returned through mport query | 2483 | /* The transfer_mode property will be returned through mport query |
2471 | * interface | 2484 | * interface |
2472 | */ | 2485 | */ |
2473 | #ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */ | 2486 | #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ |
2474 | md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; | 2487 | md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; |
2475 | #else | 2488 | #else |
2476 | md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; | 2489 | md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; |
@@ -2669,9 +2682,9 @@ static int __init mport_init(void) | |||
2669 | 2682 | ||
2670 | /* Create device class needed by udev */ | 2683 | /* Create device class needed by udev */ |
2671 | dev_class = class_create(THIS_MODULE, DRV_NAME); | 2684 | dev_class = class_create(THIS_MODULE, DRV_NAME); |
2672 | if (!dev_class) { | 2685 | if (IS_ERR(dev_class)) { |
2673 | rmcd_error("Unable to create " DRV_NAME " class"); | 2686 | rmcd_error("Unable to create " DRV_NAME " class"); |
2674 | return -EINVAL; | 2687 | return PTR_ERR(dev_class); |
2675 | } | 2688 | } |
2676 | 2689 | ||
2677 | ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); | 2690 | ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); |
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c index 648cb86afd42..ea607a4a1bdd 100644 --- a/drivers/s390/char/sclp_ctl.c +++ b/drivers/s390/char/sclp_ctl.c | |||
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area) | |||
56 | { | 56 | { |
57 | struct sclp_ctl_sccb ctl_sccb; | 57 | struct sclp_ctl_sccb ctl_sccb; |
58 | struct sccb_header *sccb; | 58 | struct sccb_header *sccb; |
59 | unsigned long copied; | ||
59 | int rc; | 60 | int rc; |
60 | 61 | ||
61 | if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb))) | 62 | if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb))) |
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area) | |||
65 | sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 66 | sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
66 | if (!sccb) | 67 | if (!sccb) |
67 | return -ENOMEM; | 68 | return -ENOMEM; |
68 | if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) { | 69 | copied = PAGE_SIZE - |
70 | copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE); | ||
71 | if (offsetof(struct sccb_header, length) + | ||
72 | sizeof(sccb->length) > copied || sccb->length > copied) { | ||
69 | rc = -EFAULT; | 73 | rc = -EFAULT; |
70 | goto out_free; | 74 | goto out_free; |
71 | } | 75 | } |
72 | if (sccb->length > PAGE_SIZE || sccb->length < 8) | 76 | if (sccb->length < 8) { |
73 | return -EINVAL; | 77 | rc = -EINVAL; |
74 | if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) { | ||
75 | rc = -EFAULT; | ||
76 | goto out_free; | 78 | goto out_free; |
77 | } | 79 | } |
78 | rc = sclp_sync_request(ctl_sccb.cmdw, sccb); | 80 | rc = sclp_sync_request(ctl_sccb.cmdw, sccb); |
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index f3bb7af4e984..ead83a24bcd1 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
@@ -688,6 +688,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, | |||
688 | { | 688 | { |
689 | struct flowi6 fl; | 689 | struct flowi6 fl; |
690 | 690 | ||
691 | memset(&fl, 0, sizeof(fl)); | ||
691 | if (saddr) | 692 | if (saddr) |
692 | memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); | 693 | memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); |
693 | if (daddr) | 694 | if (daddr) |
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c index 57e781c71e67..837effe19907 100644 --- a/drivers/soc/mediatek/mtk-scpsys.c +++ b/drivers/soc/mediatek/mtk-scpsys.c | |||
@@ -491,13 +491,14 @@ static int scpsys_probe(struct platform_device *pdev) | |||
491 | genpd->dev_ops.active_wakeup = scpsys_active_wakeup; | 491 | genpd->dev_ops.active_wakeup = scpsys_active_wakeup; |
492 | 492 | ||
493 | /* | 493 | /* |
494 | * With CONFIG_PM disabled turn on all domains to make the | 494 | * Initially turn on all domains to make the domains usable |
495 | * hardware usable. | 495 | * with !CONFIG_PM and to get the hardware in sync with the |
496 | * software. The unused domains will be switched off during | ||
497 | * late_init time. | ||
496 | */ | 498 | */ |
497 | if (!IS_ENABLED(CONFIG_PM)) | 499 | genpd->power_on(genpd); |
498 | genpd->power_on(genpd); | ||
499 | 500 | ||
500 | pm_genpd_init(genpd, NULL, true); | 501 | pm_genpd_init(genpd, NULL, false); |
501 | } | 502 | } |
502 | 503 | ||
503 | /* | 504 | /* |
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c index b793c04028a3..be72a8e5f221 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c | |||
@@ -172,9 +172,11 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video) | |||
172 | static int vpfe_update_pipe_state(struct vpfe_video_device *video) | 172 | static int vpfe_update_pipe_state(struct vpfe_video_device *video) |
173 | { | 173 | { |
174 | struct vpfe_pipeline *pipe = &video->pipe; | 174 | struct vpfe_pipeline *pipe = &video->pipe; |
175 | int ret; | ||
175 | 176 | ||
176 | if (vpfe_prepare_pipeline(video)) | 177 | ret = vpfe_prepare_pipeline(video); |
177 | return vpfe_prepare_pipeline(video); | 178 | if (ret) |
179 | return ret; | ||
178 | 180 | ||
179 | /* | 181 | /* |
180 | * Find out if there is any input video | 182 | * Find out if there is any input video |
@@ -182,9 +184,10 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video) | |||
182 | */ | 184 | */ |
183 | if (pipe->input_num == 0) { | 185 | if (pipe->input_num == 0) { |
184 | pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS; | 186 | pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS; |
185 | if (vpfe_update_current_ext_subdev(video)) { | 187 | ret = vpfe_update_current_ext_subdev(video); |
188 | if (ret) { | ||
186 | pr_err("Invalid external subdev\n"); | 189 | pr_err("Invalid external subdev\n"); |
187 | return vpfe_update_current_ext_subdev(video); | 190 | return ret; |
188 | } | 191 | } |
189 | } else { | 192 | } else { |
190 | pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT; | 193 | pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT; |
@@ -667,6 +670,7 @@ static int vpfe_enum_fmt(struct file *file, void *priv, | |||
667 | struct v4l2_subdev *subdev; | 670 | struct v4l2_subdev *subdev; |
668 | struct v4l2_format format; | 671 | struct v4l2_format format; |
669 | struct media_pad *remote; | 672 | struct media_pad *remote; |
673 | int ret; | ||
670 | 674 | ||
671 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n"); | 675 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n"); |
672 | 676 | ||
@@ -695,10 +699,11 @@ static int vpfe_enum_fmt(struct file *file, void *priv, | |||
695 | sd_fmt.pad = remote->index; | 699 | sd_fmt.pad = remote->index; |
696 | sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; | 700 | sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; |
697 | /* get output format of remote subdev */ | 701 | /* get output format of remote subdev */ |
698 | if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) { | 702 | ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); |
703 | if (ret) { | ||
699 | v4l2_err(&vpfe_dev->v4l2_dev, | 704 | v4l2_err(&vpfe_dev->v4l2_dev, |
700 | "invalid remote subdev for video node\n"); | 705 | "invalid remote subdev for video node\n"); |
701 | return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt); | 706 | return ret; |
702 | } | 707 | } |
703 | /* convert to pix format */ | 708 | /* convert to pix format */ |
704 | mbus.code = sd_fmt.format.code; | 709 | mbus.code = sd_fmt.format.code; |
@@ -725,6 +730,7 @@ static int vpfe_s_fmt(struct file *file, void *priv, | |||
725 | struct vpfe_video_device *video = video_drvdata(file); | 730 | struct vpfe_video_device *video = video_drvdata(file); |
726 | struct vpfe_device *vpfe_dev = video->vpfe_dev; | 731 | struct vpfe_device *vpfe_dev = video->vpfe_dev; |
727 | struct v4l2_format format; | 732 | struct v4l2_format format; |
733 | int ret; | ||
728 | 734 | ||
729 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n"); | 735 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n"); |
730 | /* If streaming is started, return error */ | 736 | /* If streaming is started, return error */ |
@@ -733,8 +739,9 @@ static int vpfe_s_fmt(struct file *file, void *priv, | |||
733 | return -EBUSY; | 739 | return -EBUSY; |
734 | } | 740 | } |
735 | /* get adjacent subdev's output pad format */ | 741 | /* get adjacent subdev's output pad format */ |
736 | if (__vpfe_video_get_format(video, &format)) | 742 | ret = __vpfe_video_get_format(video, &format); |
737 | return __vpfe_video_get_format(video, &format); | 743 | if (ret) |
744 | return ret; | ||
738 | *fmt = format; | 745 | *fmt = format; |
739 | video->fmt = *fmt; | 746 | video->fmt = *fmt; |
740 | return 0; | 747 | return 0; |
@@ -757,11 +764,13 @@ static int vpfe_try_fmt(struct file *file, void *priv, | |||
757 | struct vpfe_video_device *video = video_drvdata(file); | 764 | struct vpfe_video_device *video = video_drvdata(file); |
758 | struct vpfe_device *vpfe_dev = video->vpfe_dev; | 765 | struct vpfe_device *vpfe_dev = video->vpfe_dev; |
759 | struct v4l2_format format; | 766 | struct v4l2_format format; |
767 | int ret; | ||
760 | 768 | ||
761 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n"); | 769 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n"); |
762 | /* get adjacent subdev's output pad format */ | 770 | /* get adjacent subdev's output pad format */ |
763 | if (__vpfe_video_get_format(video, &format)) | 771 | ret = __vpfe_video_get_format(video, &format); |
764 | return __vpfe_video_get_format(video, &format); | 772 | if (ret) |
773 | return ret; | ||
765 | 774 | ||
766 | *fmt = format; | 775 | *fmt = format; |
767 | return 0; | 776 | return 0; |
@@ -838,8 +847,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index) | |||
838 | 847 | ||
839 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n"); | 848 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n"); |
840 | 849 | ||
841 | if (mutex_lock_interruptible(&video->lock)) | 850 | ret = mutex_lock_interruptible(&video->lock); |
842 | return mutex_lock_interruptible(&video->lock); | 851 | if (ret) |
852 | return ret; | ||
843 | /* | 853 | /* |
844 | * If streaming is started return device busy | 854 | * If streaming is started return device busy |
845 | * error | 855 | * error |
@@ -940,8 +950,9 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id) | |||
940 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n"); | 950 | v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n"); |
941 | 951 | ||
942 | /* Call decoder driver function to set the standard */ | 952 | /* Call decoder driver function to set the standard */ |
943 | if (mutex_lock_interruptible(&video->lock)) | 953 | ret = mutex_lock_interruptible(&video->lock); |
944 | return mutex_lock_interruptible(&video->lock); | 954 | if (ret) |
955 | return ret; | ||
945 | sdinfo = video->current_ext_subdev; | 956 | sdinfo = video->current_ext_subdev; |
946 | /* If streaming is started, return device busy error */ | 957 | /* If streaming is started, return device busy error */ |
947 | if (video->started) { | 958 | if (video->started) { |
@@ -1327,8 +1338,9 @@ static int vpfe_reqbufs(struct file *file, void *priv, | |||
1327 | return -EINVAL; | 1338 | return -EINVAL; |
1328 | } | 1339 | } |
1329 | 1340 | ||
1330 | if (mutex_lock_interruptible(&video->lock)) | 1341 | ret = mutex_lock_interruptible(&video->lock); |
1331 | return mutex_lock_interruptible(&video->lock); | 1342 | if (ret) |
1343 | return ret; | ||
1332 | 1344 | ||
1333 | if (video->io_usrs != 0) { | 1345 | if (video->io_usrs != 0) { |
1334 | v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n"); | 1346 | v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n"); |
@@ -1354,10 +1366,11 @@ static int vpfe_reqbufs(struct file *file, void *priv, | |||
1354 | q->buf_struct_size = sizeof(struct vpfe_cap_buffer); | 1366 | q->buf_struct_size = sizeof(struct vpfe_cap_buffer); |
1355 | q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; | 1367 | q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; |
1356 | 1368 | ||
1357 | if (vb2_queue_init(q)) { | 1369 | ret = vb2_queue_init(q); |
1370 | if (ret) { | ||
1358 | v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); | 1371 | v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n"); |
1359 | vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev); | 1372 | vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev); |
1360 | return vb2_queue_init(q); | 1373 | return ret; |
1361 | } | 1374 | } |
1362 | 1375 | ||
1363 | fh->io_allowed = 1; | 1376 | fh->io_allowed = 1; |
@@ -1533,8 +1546,9 @@ static int vpfe_streamoff(struct file *file, void *priv, | |||
1533 | return -EINVAL; | 1546 | return -EINVAL; |
1534 | } | 1547 | } |
1535 | 1548 | ||
1536 | if (mutex_lock_interruptible(&video->lock)) | 1549 | ret = mutex_lock_interruptible(&video->lock); |
1537 | return mutex_lock_interruptible(&video->lock); | 1550 | if (ret) |
1551 | return ret; | ||
1538 | 1552 | ||
1539 | vpfe_stop_capture(video); | 1553 | vpfe_stop_capture(video); |
1540 | ret = vb2_streamoff(&video->buffer_queue, buf_type); | 1554 | ret = vb2_streamoff(&video->buffer_queue, buf_type); |
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO index 05de0dad8762..4c6f1d7d2eaf 100644 --- a/drivers/staging/rdma/hfi1/TODO +++ b/drivers/staging/rdma/hfi1/TODO | |||
@@ -3,4 +3,4 @@ July, 2015 | |||
3 | - Remove unneeded file entries in sysfs | 3 | - Remove unneeded file entries in sysfs |
4 | - Remove software processing of IB protocol and place in library for use | 4 | - Remove software processing of IB protocol and place in library for use |
5 | by qib, ipath (if still present), hfi1, and eventually soft-roce | 5 | by qib, ipath (if still present), hfi1, and eventually soft-roce |
6 | 6 | - Replace incorrect uAPI | |
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c index 8396dc5fb6c1..c1c5bf82addb 100644 --- a/drivers/staging/rdma/hfi1/file_ops.c +++ b/drivers/staging/rdma/hfi1/file_ops.c | |||
@@ -49,6 +49,8 @@ | |||
49 | #include <linux/vmalloc.h> | 49 | #include <linux/vmalloc.h> |
50 | #include <linux/io.h> | 50 | #include <linux/io.h> |
51 | 51 | ||
52 | #include <rdma/ib.h> | ||
53 | |||
52 | #include "hfi.h" | 54 | #include "hfi.h" |
53 | #include "pio.h" | 55 | #include "pio.h" |
54 | #include "device.h" | 56 | #include "device.h" |
@@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data, | |||
190 | int uctxt_required = 1; | 192 | int uctxt_required = 1; |
191 | int must_be_root = 0; | 193 | int must_be_root = 0; |
192 | 194 | ||
195 | /* FIXME: This interface cannot continue out of staging */ | ||
196 | if (WARN_ON_ONCE(!ib_safe_file_access(fp))) | ||
197 | return -EACCES; | ||
198 | |||
193 | if (count < sizeof(cmd)) { | 199 | if (count < sizeof(cmd)) { |
194 | ret = -EINVAL; | 200 | ret = -EINVAL; |
195 | goto bail; | 201 | goto bail; |
@@ -791,15 +797,16 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) | |||
791 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); | 797 | spin_unlock_irqrestore(&dd->uctxt_lock, flags); |
792 | 798 | ||
793 | dd->rcd[uctxt->ctxt] = NULL; | 799 | dd->rcd[uctxt->ctxt] = NULL; |
800 | |||
801 | hfi1_user_exp_rcv_free(fdata); | ||
802 | hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); | ||
803 | |||
794 | uctxt->rcvwait_to = 0; | 804 | uctxt->rcvwait_to = 0; |
795 | uctxt->piowait_to = 0; | 805 | uctxt->piowait_to = 0; |
796 | uctxt->rcvnowait = 0; | 806 | uctxt->rcvnowait = 0; |
797 | uctxt->pionowait = 0; | 807 | uctxt->pionowait = 0; |
798 | uctxt->event_flags = 0; | 808 | uctxt->event_flags = 0; |
799 | 809 | ||
800 | hfi1_user_exp_rcv_free(fdata); | ||
801 | hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); | ||
802 | |||
803 | hfi1_stats.sps_ctxts--; | 810 | hfi1_stats.sps_ctxts--; |
804 | if (++dd->freectxts == dd->num_user_contexts) | 811 | if (++dd->freectxts == dd->num_user_contexts) |
805 | aspm_enable_all(dd); | 812 | aspm_enable_all(dd); |
@@ -1127,27 +1134,13 @@ bail: | |||
1127 | 1134 | ||
1128 | static int user_init(struct file *fp) | 1135 | static int user_init(struct file *fp) |
1129 | { | 1136 | { |
1130 | int ret; | ||
1131 | unsigned int rcvctrl_ops = 0; | 1137 | unsigned int rcvctrl_ops = 0; |
1132 | struct hfi1_filedata *fd = fp->private_data; | 1138 | struct hfi1_filedata *fd = fp->private_data; |
1133 | struct hfi1_ctxtdata *uctxt = fd->uctxt; | 1139 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
1134 | 1140 | ||
1135 | /* make sure that the context has already been setup */ | 1141 | /* make sure that the context has already been setup */ |
1136 | if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) { | 1142 | if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) |
1137 | ret = -EFAULT; | 1143 | return -EFAULT; |
1138 | goto done; | ||
1139 | } | ||
1140 | |||
1141 | /* | ||
1142 | * Subctxts don't need to initialize anything since master | ||
1143 | * has done it. | ||
1144 | */ | ||
1145 | if (fd->subctxt) { | ||
1146 | ret = wait_event_interruptible(uctxt->wait, !test_bit( | ||
1147 | HFI1_CTXT_MASTER_UNINIT, | ||
1148 | &uctxt->event_flags)); | ||
1149 | goto expected; | ||
1150 | } | ||
1151 | 1144 | ||
1152 | /* initialize poll variables... */ | 1145 | /* initialize poll variables... */ |
1153 | uctxt->urgent = 0; | 1146 | uctxt->urgent = 0; |
@@ -1202,19 +1195,7 @@ static int user_init(struct file *fp) | |||
1202 | wake_up(&uctxt->wait); | 1195 | wake_up(&uctxt->wait); |
1203 | } | 1196 | } |
1204 | 1197 | ||
1205 | expected: | 1198 | return 0; |
1206 | /* | ||
1207 | * Expected receive has to be setup for all processes (including | ||
1208 | * shared contexts). However, it has to be done after the master | ||
1209 | * context has been fully configured as it depends on the | ||
1210 | * eager/expected split of the RcvArray entries. | ||
1211 | * Setting it up here ensures that the subcontexts will be waiting | ||
1212 | * (due to the above wait_event_interruptible() until the master | ||
1213 | * is setup. | ||
1214 | */ | ||
1215 | ret = hfi1_user_exp_rcv_init(fp); | ||
1216 | done: | ||
1217 | return ret; | ||
1218 | } | 1199 | } |
1219 | 1200 | ||
1220 | static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) | 1201 | static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len) |
@@ -1261,7 +1242,7 @@ static int setup_ctxt(struct file *fp) | |||
1261 | int ret = 0; | 1242 | int ret = 0; |
1262 | 1243 | ||
1263 | /* | 1244 | /* |
1264 | * Context should be set up only once (including allocation and | 1245 | * Context should be set up only once, including allocation and |
1265 | * programming of eager buffers. This is done if context sharing | 1246 | * programming of eager buffers. This is done if context sharing |
1266 | * is not requested or by the master process. | 1247 | * is not requested or by the master process. |
1267 | */ | 1248 | */ |
@@ -1282,10 +1263,29 @@ static int setup_ctxt(struct file *fp) | |||
1282 | if (ret) | 1263 | if (ret) |
1283 | goto done; | 1264 | goto done; |
1284 | } | 1265 | } |
1266 | } else { | ||
1267 | ret = wait_event_interruptible(uctxt->wait, !test_bit( | ||
1268 | HFI1_CTXT_MASTER_UNINIT, | ||
1269 | &uctxt->event_flags)); | ||
1270 | if (ret) | ||
1271 | goto done; | ||
1285 | } | 1272 | } |
1273 | |||
1286 | ret = hfi1_user_sdma_alloc_queues(uctxt, fp); | 1274 | ret = hfi1_user_sdma_alloc_queues(uctxt, fp); |
1287 | if (ret) | 1275 | if (ret) |
1288 | goto done; | 1276 | goto done; |
1277 | /* | ||
1278 | * Expected receive has to be setup for all processes (including | ||
1279 | * shared contexts). However, it has to be done after the master | ||
1280 | * context has been fully configured as it depends on the | ||
1281 | * eager/expected split of the RcvArray entries. | ||
1282 | * Setting it up here ensures that the subcontexts will be waiting | ||
1283 | * (due to the above wait_event_interruptible() until the master | ||
1284 | * is setup. | ||
1285 | */ | ||
1286 | ret = hfi1_user_exp_rcv_init(fp); | ||
1287 | if (ret) | ||
1288 | goto done; | ||
1289 | 1289 | ||
1290 | set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags); | 1290 | set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags); |
1291 | done: | 1291 | done: |
@@ -1565,29 +1565,8 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence) | |||
1565 | { | 1565 | { |
1566 | struct hfi1_devdata *dd = filp->private_data; | 1566 | struct hfi1_devdata *dd = filp->private_data; |
1567 | 1567 | ||
1568 | switch (whence) { | 1568 | return fixed_size_llseek(filp, offset, whence, |
1569 | case SEEK_SET: | 1569 | (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE); |
1570 | break; | ||
1571 | case SEEK_CUR: | ||
1572 | offset += filp->f_pos; | ||
1573 | break; | ||
1574 | case SEEK_END: | ||
1575 | offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) - | ||
1576 | offset; | ||
1577 | break; | ||
1578 | default: | ||
1579 | return -EINVAL; | ||
1580 | } | ||
1581 | |||
1582 | if (offset < 0) | ||
1583 | return -EINVAL; | ||
1584 | |||
1585 | if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) | ||
1586 | return -EINVAL; | ||
1587 | |||
1588 | filp->f_pos = offset; | ||
1589 | |||
1590 | return filp->f_pos; | ||
1591 | } | 1570 | } |
1592 | 1571 | ||
1593 | /* NOTE: assumes unsigned long is 8 bytes */ | 1572 | /* NOTE: assumes unsigned long is 8 bytes */ |
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c index c7ad0164ea9a..b3f0682a36c9 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.c +++ b/drivers/staging/rdma/hfi1/mmu_rb.c | |||
@@ -71,6 +71,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *, | |||
71 | struct mm_struct *, | 71 | struct mm_struct *, |
72 | unsigned long, unsigned long); | 72 | unsigned long, unsigned long); |
73 | static void mmu_notifier_mem_invalidate(struct mmu_notifier *, | 73 | static void mmu_notifier_mem_invalidate(struct mmu_notifier *, |
74 | struct mm_struct *, | ||
74 | unsigned long, unsigned long); | 75 | unsigned long, unsigned long); |
75 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, | 76 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *, |
76 | unsigned long, unsigned long); | 77 | unsigned long, unsigned long); |
@@ -137,7 +138,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root) | |||
137 | rbnode = rb_entry(node, struct mmu_rb_node, node); | 138 | rbnode = rb_entry(node, struct mmu_rb_node, node); |
138 | rb_erase(node, root); | 139 | rb_erase(node, root); |
139 | if (handler->ops->remove) | 140 | if (handler->ops->remove) |
140 | handler->ops->remove(root, rbnode, false); | 141 | handler->ops->remove(root, rbnode, NULL); |
141 | } | 142 | } |
142 | } | 143 | } |
143 | 144 | ||
@@ -176,7 +177,7 @@ unlock: | |||
176 | return ret; | 177 | return ret; |
177 | } | 178 | } |
178 | 179 | ||
179 | /* Caller must host handler lock */ | 180 | /* Caller must hold handler lock */ |
180 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, | 181 | static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, |
181 | unsigned long addr, | 182 | unsigned long addr, |
182 | unsigned long len) | 183 | unsigned long len) |
@@ -200,15 +201,21 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler, | |||
200 | return node; | 201 | return node; |
201 | } | 202 | } |
202 | 203 | ||
204 | /* Caller must *not* hold handler lock. */ | ||
203 | static void __mmu_rb_remove(struct mmu_rb_handler *handler, | 205 | static void __mmu_rb_remove(struct mmu_rb_handler *handler, |
204 | struct mmu_rb_node *node, bool arg) | 206 | struct mmu_rb_node *node, struct mm_struct *mm) |
205 | { | 207 | { |
208 | unsigned long flags; | ||
209 | |||
206 | /* Validity of handler and node pointers has been checked by caller. */ | 210 | /* Validity of handler and node pointers has been checked by caller. */ |
207 | hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, | 211 | hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr, |
208 | node->len); | 212 | node->len); |
213 | spin_lock_irqsave(&handler->lock, flags); | ||
209 | __mmu_int_rb_remove(node, handler->root); | 214 | __mmu_int_rb_remove(node, handler->root); |
215 | spin_unlock_irqrestore(&handler->lock, flags); | ||
216 | |||
210 | if (handler->ops->remove) | 217 | if (handler->ops->remove) |
211 | handler->ops->remove(handler->root, node, arg); | 218 | handler->ops->remove(handler->root, node, mm); |
212 | } | 219 | } |
213 | 220 | ||
214 | struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, | 221 | struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, |
@@ -231,14 +238,11 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr, | |||
231 | void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) | 238 | void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node) |
232 | { | 239 | { |
233 | struct mmu_rb_handler *handler = find_mmu_handler(root); | 240 | struct mmu_rb_handler *handler = find_mmu_handler(root); |
234 | unsigned long flags; | ||
235 | 241 | ||
236 | if (!handler || !node) | 242 | if (!handler || !node) |
237 | return; | 243 | return; |
238 | 244 | ||
239 | spin_lock_irqsave(&handler->lock, flags); | 245 | __mmu_rb_remove(handler, node, NULL); |
240 | __mmu_rb_remove(handler, node, false); | ||
241 | spin_unlock_irqrestore(&handler->lock, flags); | ||
242 | } | 246 | } |
243 | 247 | ||
244 | static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) | 248 | static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root) |
@@ -260,7 +264,7 @@ unlock: | |||
260 | static inline void mmu_notifier_page(struct mmu_notifier *mn, | 264 | static inline void mmu_notifier_page(struct mmu_notifier *mn, |
261 | struct mm_struct *mm, unsigned long addr) | 265 | struct mm_struct *mm, unsigned long addr) |
262 | { | 266 | { |
263 | mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE); | 267 | mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE); |
264 | } | 268 | } |
265 | 269 | ||
266 | static inline void mmu_notifier_range_start(struct mmu_notifier *mn, | 270 | static inline void mmu_notifier_range_start(struct mmu_notifier *mn, |
@@ -268,25 +272,31 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *mn, | |||
268 | unsigned long start, | 272 | unsigned long start, |
269 | unsigned long end) | 273 | unsigned long end) |
270 | { | 274 | { |
271 | mmu_notifier_mem_invalidate(mn, start, end); | 275 | mmu_notifier_mem_invalidate(mn, mm, start, end); |
272 | } | 276 | } |
273 | 277 | ||
274 | static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, | 278 | static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn, |
279 | struct mm_struct *mm, | ||
275 | unsigned long start, unsigned long end) | 280 | unsigned long start, unsigned long end) |
276 | { | 281 | { |
277 | struct mmu_rb_handler *handler = | 282 | struct mmu_rb_handler *handler = |
278 | container_of(mn, struct mmu_rb_handler, mn); | 283 | container_of(mn, struct mmu_rb_handler, mn); |
279 | struct rb_root *root = handler->root; | 284 | struct rb_root *root = handler->root; |
280 | struct mmu_rb_node *node; | 285 | struct mmu_rb_node *node, *ptr = NULL; |
281 | unsigned long flags; | 286 | unsigned long flags; |
282 | 287 | ||
283 | spin_lock_irqsave(&handler->lock, flags); | 288 | spin_lock_irqsave(&handler->lock, flags); |
284 | for (node = __mmu_int_rb_iter_first(root, start, end - 1); node; | 289 | for (node = __mmu_int_rb_iter_first(root, start, end - 1); |
285 | node = __mmu_int_rb_iter_next(node, start, end - 1)) { | 290 | node; node = ptr) { |
291 | /* Guard against node removal. */ | ||
292 | ptr = __mmu_int_rb_iter_next(node, start, end - 1); | ||
286 | hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", | 293 | hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u", |
287 | node->addr, node->len); | 294 | node->addr, node->len); |
288 | if (handler->ops->invalidate(root, node)) | 295 | if (handler->ops->invalidate(root, node)) { |
289 | __mmu_rb_remove(handler, node, true); | 296 | spin_unlock_irqrestore(&handler->lock, flags); |
297 | __mmu_rb_remove(handler, node, mm); | ||
298 | spin_lock_irqsave(&handler->lock, flags); | ||
299 | } | ||
290 | } | 300 | } |
291 | spin_unlock_irqrestore(&handler->lock, flags); | 301 | spin_unlock_irqrestore(&handler->lock, flags); |
292 | } | 302 | } |
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h index f8523fdb8a18..19a306e83c7d 100644 --- a/drivers/staging/rdma/hfi1/mmu_rb.h +++ b/drivers/staging/rdma/hfi1/mmu_rb.h | |||
@@ -59,7 +59,8 @@ struct mmu_rb_node { | |||
59 | struct mmu_rb_ops { | 59 | struct mmu_rb_ops { |
60 | bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long); | 60 | bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long); |
61 | int (*insert)(struct rb_root *, struct mmu_rb_node *); | 61 | int (*insert)(struct rb_root *, struct mmu_rb_node *); |
62 | void (*remove)(struct rb_root *, struct mmu_rb_node *, bool); | 62 | void (*remove)(struct rb_root *, struct mmu_rb_node *, |
63 | struct mm_struct *); | ||
63 | int (*invalidate)(struct rb_root *, struct mmu_rb_node *); | 64 | int (*invalidate)(struct rb_root *, struct mmu_rb_node *); |
64 | }; | 65 | }; |
65 | 66 | ||
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c index 29a5ad28019b..dc9119e1b458 100644 --- a/drivers/staging/rdma/hfi1/qp.c +++ b/drivers/staging/rdma/hfi1/qp.c | |||
@@ -519,10 +519,12 @@ static void iowait_sdma_drained(struct iowait *wait) | |||
519 | * do the flush work until that QP's | 519 | * do the flush work until that QP's |
520 | * sdma work has finished. | 520 | * sdma work has finished. |
521 | */ | 521 | */ |
522 | spin_lock(&qp->s_lock); | ||
522 | if (qp->s_flags & RVT_S_WAIT_DMA) { | 523 | if (qp->s_flags & RVT_S_WAIT_DMA) { |
523 | qp->s_flags &= ~RVT_S_WAIT_DMA; | 524 | qp->s_flags &= ~RVT_S_WAIT_DMA; |
524 | hfi1_schedule_send(qp); | 525 | hfi1_schedule_send(qp); |
525 | } | 526 | } |
527 | spin_unlock(&qp->s_lock); | ||
526 | } | 528 | } |
527 | 529 | ||
528 | /** | 530 | /** |
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index 0861e095df8d..8bd56d5c783d 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c | |||
@@ -87,7 +87,8 @@ static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *); | |||
87 | static int set_rcvarray_entry(struct file *, unsigned long, u32, | 87 | static int set_rcvarray_entry(struct file *, unsigned long, u32, |
88 | struct tid_group *, struct page **, unsigned); | 88 | struct tid_group *, struct page **, unsigned); |
89 | static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); | 89 | static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *); |
90 | static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); | 90 | static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, |
91 | struct mm_struct *); | ||
91 | static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); | 92 | static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *); |
92 | static int program_rcvarray(struct file *, unsigned long, struct tid_group *, | 93 | static int program_rcvarray(struct file *, unsigned long, struct tid_group *, |
93 | struct tid_pageset *, unsigned, u16, struct page **, | 94 | struct tid_pageset *, unsigned, u16, struct page **, |
@@ -254,6 +255,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) | |||
254 | struct hfi1_ctxtdata *uctxt = fd->uctxt; | 255 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
255 | struct tid_group *grp, *gptr; | 256 | struct tid_group *grp, *gptr; |
256 | 257 | ||
258 | if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) | ||
259 | return 0; | ||
257 | /* | 260 | /* |
258 | * The notifier would have been removed when the process'es mm | 261 | * The notifier would have been removed when the process'es mm |
259 | * was freed. | 262 | * was freed. |
@@ -899,7 +902,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo, | |||
899 | if (!node || node->rcventry != (uctxt->expected_base + rcventry)) | 902 | if (!node || node->rcventry != (uctxt->expected_base + rcventry)) |
900 | return -EBADF; | 903 | return -EBADF; |
901 | if (HFI1_CAP_IS_USET(TID_UNMAP)) | 904 | if (HFI1_CAP_IS_USET(TID_UNMAP)) |
902 | mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false); | 905 | mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL); |
903 | else | 906 | else |
904 | hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); | 907 | hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu); |
905 | 908 | ||
@@ -965,7 +968,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt, | |||
965 | continue; | 968 | continue; |
966 | if (HFI1_CAP_IS_USET(TID_UNMAP)) | 969 | if (HFI1_CAP_IS_USET(TID_UNMAP)) |
967 | mmu_rb_remove(&fd->tid_rb_root, | 970 | mmu_rb_remove(&fd->tid_rb_root, |
968 | &node->mmu, false); | 971 | &node->mmu, NULL); |
969 | else | 972 | else |
970 | hfi1_mmu_rb_remove(&fd->tid_rb_root, | 973 | hfi1_mmu_rb_remove(&fd->tid_rb_root, |
971 | &node->mmu); | 974 | &node->mmu); |
@@ -1032,7 +1035,7 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node) | |||
1032 | } | 1035 | } |
1033 | 1036 | ||
1034 | static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node, | 1037 | static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node, |
1035 | bool notifier) | 1038 | struct mm_struct *mm) |
1036 | { | 1039 | { |
1037 | struct hfi1_filedata *fdata = | 1040 | struct hfi1_filedata *fdata = |
1038 | container_of(root, struct hfi1_filedata, tid_rb_root); | 1041 | container_of(root, struct hfi1_filedata, tid_rb_root); |
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index ab6b6a42000f..d53a659548e0 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c | |||
@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *); | |||
278 | static void user_sdma_free_request(struct user_sdma_request *, bool); | 278 | static void user_sdma_free_request(struct user_sdma_request *, bool); |
279 | static int pin_vector_pages(struct user_sdma_request *, | 279 | static int pin_vector_pages(struct user_sdma_request *, |
280 | struct user_sdma_iovec *); | 280 | struct user_sdma_iovec *); |
281 | static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned); | 281 | static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned, |
282 | unsigned); | ||
282 | static int check_header_template(struct user_sdma_request *, | 283 | static int check_header_template(struct user_sdma_request *, |
283 | struct hfi1_pkt_header *, u32, u32); | 284 | struct hfi1_pkt_header *, u32, u32); |
284 | static int set_txreq_header(struct user_sdma_request *, | 285 | static int set_txreq_header(struct user_sdma_request *, |
@@ -299,7 +300,8 @@ static int defer_packet_queue( | |||
299 | static void activate_packet_queue(struct iowait *, int); | 300 | static void activate_packet_queue(struct iowait *, int); |
300 | static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); | 301 | static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long); |
301 | static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *); | 302 | static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *); |
302 | static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool); | 303 | static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, |
304 | struct mm_struct *); | ||
303 | static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *); | 305 | static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *); |
304 | 306 | ||
305 | static struct mmu_rb_ops sdma_rb_ops = { | 307 | static struct mmu_rb_ops sdma_rb_ops = { |
@@ -1063,8 +1065,10 @@ static int pin_vector_pages(struct user_sdma_request *req, | |||
1063 | rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root, | 1065 | rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root, |
1064 | (unsigned long)iovec->iov.iov_base, | 1066 | (unsigned long)iovec->iov.iov_base, |
1065 | iovec->iov.iov_len); | 1067 | iovec->iov.iov_len); |
1066 | if (rb_node) | 1068 | if (rb_node && !IS_ERR(rb_node)) |
1067 | node = container_of(rb_node, struct sdma_mmu_node, rb); | 1069 | node = container_of(rb_node, struct sdma_mmu_node, rb); |
1070 | else | ||
1071 | rb_node = NULL; | ||
1068 | 1072 | ||
1069 | if (!node) { | 1073 | if (!node) { |
1070 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 1074 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
@@ -1107,7 +1111,8 @@ retry: | |||
1107 | goto bail; | 1111 | goto bail; |
1108 | } | 1112 | } |
1109 | if (pinned != npages) { | 1113 | if (pinned != npages) { |
1110 | unpin_vector_pages(current->mm, pages, pinned); | 1114 | unpin_vector_pages(current->mm, pages, node->npages, |
1115 | pinned); | ||
1111 | ret = -EFAULT; | 1116 | ret = -EFAULT; |
1112 | goto bail; | 1117 | goto bail; |
1113 | } | 1118 | } |
@@ -1147,9 +1152,9 @@ bail: | |||
1147 | } | 1152 | } |
1148 | 1153 | ||
1149 | static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, | 1154 | static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, |
1150 | unsigned npages) | 1155 | unsigned start, unsigned npages) |
1151 | { | 1156 | { |
1152 | hfi1_release_user_pages(mm, pages, npages, 0); | 1157 | hfi1_release_user_pages(mm, pages + start, npages, 0); |
1153 | kfree(pages); | 1158 | kfree(pages); |
1154 | } | 1159 | } |
1155 | 1160 | ||
@@ -1502,7 +1507,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) | |||
1502 | &req->pq->sdma_rb_root, | 1507 | &req->pq->sdma_rb_root, |
1503 | (unsigned long)req->iovs[i].iov.iov_base, | 1508 | (unsigned long)req->iovs[i].iov.iov_base, |
1504 | req->iovs[i].iov.iov_len); | 1509 | req->iovs[i].iov.iov_len); |
1505 | if (!mnode) | 1510 | if (!mnode || IS_ERR(mnode)) |
1506 | continue; | 1511 | continue; |
1507 | 1512 | ||
1508 | node = container_of(mnode, struct sdma_mmu_node, rb); | 1513 | node = container_of(mnode, struct sdma_mmu_node, rb); |
@@ -1547,7 +1552,7 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode) | |||
1547 | } | 1552 | } |
1548 | 1553 | ||
1549 | static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, | 1554 | static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, |
1550 | bool notifier) | 1555 | struct mm_struct *mm) |
1551 | { | 1556 | { |
1552 | struct sdma_mmu_node *node = | 1557 | struct sdma_mmu_node *node = |
1553 | container_of(mnode, struct sdma_mmu_node, rb); | 1558 | container_of(mnode, struct sdma_mmu_node, rb); |
@@ -1557,14 +1562,20 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, | |||
1557 | node->pq->n_locked -= node->npages; | 1562 | node->pq->n_locked -= node->npages; |
1558 | spin_unlock(&node->pq->evict_lock); | 1563 | spin_unlock(&node->pq->evict_lock); |
1559 | 1564 | ||
1560 | unpin_vector_pages(notifier ? NULL : current->mm, node->pages, | 1565 | /* |
1566 | * If mm is set, we are being called by the MMU notifier and we | ||
1567 | * should not pass a mm_struct to unpin_vector_page(). This is to | ||
1568 | * prevent a deadlock when hfi1_release_user_pages() attempts to | ||
1569 | * take the mmap_sem, which the MMU notifier has already taken. | ||
1570 | */ | ||
1571 | unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0, | ||
1561 | node->npages); | 1572 | node->npages); |
1562 | /* | 1573 | /* |
1563 | * If called by the MMU notifier, we have to adjust the pinned | 1574 | * If called by the MMU notifier, we have to adjust the pinned |
1564 | * page count ourselves. | 1575 | * page count ourselves. |
1565 | */ | 1576 | */ |
1566 | if (notifier) | 1577 | if (mm) |
1567 | current->mm->pinned_vm -= node->npages; | 1578 | mm->pinned_vm -= node->npages; |
1568 | kfree(node); | 1579 | kfree(node); |
1569 | } | 1580 | } |
1570 | 1581 | ||
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index 36d07295f8e3..5e820b541506 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c | |||
@@ -68,12 +68,12 @@ static inline int _step_to_temp(int step) | |||
68 | * Every step equals (1 * 200) / 255 celsius, and finally | 68 | * Every step equals (1 * 200) / 255 celsius, and finally |
69 | * need convert to millicelsius. | 69 | * need convert to millicelsius. |
70 | */ | 70 | */ |
71 | return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000; | 71 | return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255)); |
72 | } | 72 | } |
73 | 73 | ||
74 | static inline long _temp_to_step(long temp) | 74 | static inline long _temp_to_step(long temp) |
75 | { | 75 | { |
76 | return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200); | 76 | return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000; |
77 | } | 77 | } |
78 | 78 | ||
79 | static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, | 79 | static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, |
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index f1db49625555..5133cd1e10b7 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c | |||
@@ -959,7 +959,7 @@ static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show, | |||
959 | struct thermal_zone_device *tz = to_thermal_zone(dev); \ | 959 | struct thermal_zone_device *tz = to_thermal_zone(dev); \ |
960 | \ | 960 | \ |
961 | if (tz->tzp) \ | 961 | if (tz->tzp) \ |
962 | return sprintf(buf, "%u\n", tz->tzp->name); \ | 962 | return sprintf(buf, "%d\n", tz->tzp->name); \ |
963 | else \ | 963 | else \ |
964 | return -EIO; \ | 964 | return -EIO; \ |
965 | } \ | 965 | } \ |
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 0058d9fbf931..cf0dc51a2690 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c | |||
@@ -626,7 +626,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty, | |||
626 | */ | 626 | */ |
627 | 627 | ||
628 | static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver, | 628 | static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver, |
629 | struct inode *ptm_inode, int idx) | 629 | struct file *file, int idx) |
630 | { | 630 | { |
631 | /* Master must be open via /dev/ptmx */ | 631 | /* Master must be open via /dev/ptmx */ |
632 | return ERR_PTR(-EIO); | 632 | return ERR_PTR(-EIO); |
@@ -642,12 +642,12 @@ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver, | |||
642 | */ | 642 | */ |
643 | 643 | ||
644 | static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver, | 644 | static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver, |
645 | struct inode *pts_inode, int idx) | 645 | struct file *file, int idx) |
646 | { | 646 | { |
647 | struct tty_struct *tty; | 647 | struct tty_struct *tty; |
648 | 648 | ||
649 | mutex_lock(&devpts_mutex); | 649 | mutex_lock(&devpts_mutex); |
650 | tty = devpts_get_priv(pts_inode); | 650 | tty = devpts_get_priv(file->f_path.dentry); |
651 | mutex_unlock(&devpts_mutex); | 651 | mutex_unlock(&devpts_mutex); |
652 | /* Master must be open before slave */ | 652 | /* Master must be open before slave */ |
653 | if (!tty) | 653 | if (!tty) |
@@ -722,7 +722,7 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
722 | { | 722 | { |
723 | struct pts_fs_info *fsi; | 723 | struct pts_fs_info *fsi; |
724 | struct tty_struct *tty; | 724 | struct tty_struct *tty; |
725 | struct inode *slave_inode; | 725 | struct dentry *dentry; |
726 | int retval; | 726 | int retval; |
727 | int index; | 727 | int index; |
728 | 728 | ||
@@ -769,14 +769,12 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
769 | 769 | ||
770 | tty_add_file(tty, filp); | 770 | tty_add_file(tty, filp); |
771 | 771 | ||
772 | slave_inode = devpts_pty_new(fsi, | 772 | dentry = devpts_pty_new(fsi, index, tty->link); |
773 | MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index, | 773 | if (IS_ERR(dentry)) { |
774 | tty->link); | 774 | retval = PTR_ERR(dentry); |
775 | if (IS_ERR(slave_inode)) { | ||
776 | retval = PTR_ERR(slave_inode); | ||
777 | goto err_release; | 775 | goto err_release; |
778 | } | 776 | } |
779 | tty->link->driver_data = slave_inode; | 777 | tty->link->driver_data = dentry; |
780 | 778 | ||
781 | retval = ptm_driver->ops->open(tty, filp); | 779 | retval = ptm_driver->ops->open(tty, filp); |
782 | if (retval) | 780 | if (retval) |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 9b04d72e752e..24d5491ef0da 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
@@ -1367,12 +1367,12 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p) | |||
1367 | * Locking: tty_mutex must be held. If the tty is found, bump the tty kref. | 1367 | * Locking: tty_mutex must be held. If the tty is found, bump the tty kref. |
1368 | */ | 1368 | */ |
1369 | static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, | 1369 | static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, |
1370 | struct inode *inode, int idx) | 1370 | struct file *file, int idx) |
1371 | { | 1371 | { |
1372 | struct tty_struct *tty; | 1372 | struct tty_struct *tty; |
1373 | 1373 | ||
1374 | if (driver->ops->lookup) | 1374 | if (driver->ops->lookup) |
1375 | tty = driver->ops->lookup(driver, inode, idx); | 1375 | tty = driver->ops->lookup(driver, file, idx); |
1376 | else | 1376 | else |
1377 | tty = driver->ttys[idx]; | 1377 | tty = driver->ttys[idx]; |
1378 | 1378 | ||
@@ -2040,7 +2040,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode, | |||
2040 | } | 2040 | } |
2041 | 2041 | ||
2042 | /* check whether we're reopening an existing tty */ | 2042 | /* check whether we're reopening an existing tty */ |
2043 | tty = tty_driver_lookup_tty(driver, inode, index); | 2043 | tty = tty_driver_lookup_tty(driver, filp, index); |
2044 | if (IS_ERR(tty)) { | 2044 | if (IS_ERR(tty)) { |
2045 | mutex_unlock(&tty_mutex); | 2045 | mutex_unlock(&tty_mutex); |
2046 | goto out; | 2046 | goto out; |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5c802d47892c..ca6bfddaacad 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -1006,7 +1006,7 @@ struct virtqueue *vring_create_virtqueue( | |||
1006 | const char *name) | 1006 | const char *name) |
1007 | { | 1007 | { |
1008 | struct virtqueue *vq; | 1008 | struct virtqueue *vq; |
1009 | void *queue; | 1009 | void *queue = NULL; |
1010 | dma_addr_t dma_addr; | 1010 | dma_addr_t dma_addr; |
1011 | size_t queue_size_in_bytes; | 1011 | size_t queue_size_in_bytes; |
1012 | struct vring vring; | 1012 | struct vring vring; |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 9781e0dd59d6..d46839f51e73 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -151,6 +151,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); | |||
151 | static void balloon_process(struct work_struct *work); | 151 | static void balloon_process(struct work_struct *work); |
152 | static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); | 152 | static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); |
153 | 153 | ||
154 | static void release_memory_resource(struct resource *resource); | ||
155 | |||
154 | /* When ballooning out (allocating memory to return to Xen) we don't really | 156 | /* When ballooning out (allocating memory to return to Xen) we don't really |
155 | want the kernel to try too hard since that can trigger the oom killer. */ | 157 | want the kernel to try too hard since that can trigger the oom killer. */ |
156 | #define GFP_BALLOON \ | 158 | #define GFP_BALLOON \ |
@@ -267,6 +269,20 @@ static struct resource *additional_memory_resource(phys_addr_t size) | |||
267 | return NULL; | 269 | return NULL; |
268 | } | 270 | } |
269 | 271 | ||
272 | #ifdef CONFIG_SPARSEMEM | ||
273 | { | ||
274 | unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT); | ||
275 | unsigned long pfn = res->start >> PAGE_SHIFT; | ||
276 | |||
277 | if (pfn > limit) { | ||
278 | pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", | ||
279 | pfn, limit); | ||
280 | release_memory_resource(res); | ||
281 | return NULL; | ||
282 | } | ||
283 | } | ||
284 | #endif | ||
285 | |||
270 | return res; | 286 | return res; |
271 | } | 287 | } |
272 | 288 | ||
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 38272ad24551..f4edd6df3df2 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c | |||
@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u) | |||
316 | { | 316 | { |
317 | unsigned int new_size; | 317 | unsigned int new_size; |
318 | evtchn_port_t *new_ring, *old_ring; | 318 | evtchn_port_t *new_ring, *old_ring; |
319 | unsigned int p, c; | ||
320 | 319 | ||
321 | /* | 320 | /* |
322 | * Ensure the ring is large enough to capture all possible | 321 | * Ensure the ring is large enough to capture all possible |
@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u) | |||
346 | /* | 345 | /* |
347 | * Copy the old ring contents to the new ring. | 346 | * Copy the old ring contents to the new ring. |
348 | * | 347 | * |
349 | * If the ring contents crosses the end of the current ring, | 348 | * To take care of wrapping, a full ring, and the new index |
350 | * it needs to be copied in two chunks. | 349 | * pointing into the second half, simply copy the old contents |
350 | * twice. | ||
351 | * | 351 | * |
352 | * +---------+ +------------------+ | 352 | * +---------+ +------------------+ |
353 | * |34567 12| -> | 1234567 | | 353 | * |34567 12| -> |34567 1234567 12| |
354 | * +-----p-c-+ +------------------+ | 354 | * +-----p-c-+ +-------c------p---+ |
355 | */ | 355 | */ |
356 | p = evtchn_ring_offset(u, u->ring_prod); | 356 | memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring)); |
357 | c = evtchn_ring_offset(u, u->ring_cons); | 357 | memcpy(new_ring + u->ring_size, old_ring, |
358 | if (p < c) { | 358 | u->ring_size * sizeof(*u->ring)); |
359 | memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring)); | ||
360 | memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring)); | ||
361 | } else | ||
362 | memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring)); | ||
363 | 359 | ||
364 | u->ring = new_ring; | 360 | u->ring = new_ring; |
365 | u->ring_size = new_size; | 361 | u->ring_size = new_size; |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 541ead4d8965..85b8517f17a0 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -386,9 +386,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s) | |||
386 | atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); | 386 | atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); |
387 | if (atomic_dec_and_test(&s->s_ref)) { | 387 | if (atomic_dec_and_test(&s->s_ref)) { |
388 | if (s->s_auth.authorizer) | 388 | if (s->s_auth.authorizer) |
389 | ceph_auth_destroy_authorizer( | 389 | ceph_auth_destroy_authorizer(s->s_auth.authorizer); |
390 | s->s_mdsc->fsc->client->monc.auth, | ||
391 | s->s_auth.authorizer); | ||
392 | kfree(s); | 390 | kfree(s); |
393 | } | 391 | } |
394 | } | 392 | } |
@@ -3900,7 +3898,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |||
3900 | struct ceph_auth_handshake *auth = &s->s_auth; | 3898 | struct ceph_auth_handshake *auth = &s->s_auth; |
3901 | 3899 | ||
3902 | if (force_new && auth->authorizer) { | 3900 | if (force_new && auth->authorizer) { |
3903 | ceph_auth_destroy_authorizer(ac, auth->authorizer); | 3901 | ceph_auth_destroy_authorizer(auth->authorizer); |
3904 | auth->authorizer = NULL; | 3902 | auth->authorizer = NULL; |
3905 | } | 3903 | } |
3906 | if (!auth->authorizer) { | 3904 | if (!auth->authorizer) { |
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 0af8e7d70d27..0b2954d7172d 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c | |||
@@ -604,8 +604,7 @@ void devpts_put_ref(struct pts_fs_info *fsi) | |||
604 | * | 604 | * |
605 | * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill. | 605 | * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill. |
606 | */ | 606 | */ |
607 | struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index, | 607 | struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv) |
608 | void *priv) | ||
609 | { | 608 | { |
610 | struct dentry *dentry; | 609 | struct dentry *dentry; |
611 | struct super_block *sb; | 610 | struct super_block *sb; |
@@ -629,25 +628,21 @@ struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index, | |||
629 | inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); | 628 | inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); |
630 | inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); | 629 | inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); |
631 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 630 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
632 | init_special_inode(inode, S_IFCHR|opts->mode, device); | 631 | init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index)); |
633 | inode->i_private = priv; | ||
634 | 632 | ||
635 | sprintf(s, "%d", index); | 633 | sprintf(s, "%d", index); |
636 | 634 | ||
637 | inode_lock(d_inode(root)); | ||
638 | |||
639 | dentry = d_alloc_name(root, s); | 635 | dentry = d_alloc_name(root, s); |
640 | if (dentry) { | 636 | if (dentry) { |
637 | dentry->d_fsdata = priv; | ||
641 | d_add(dentry, inode); | 638 | d_add(dentry, inode); |
642 | fsnotify_create(d_inode(root), dentry); | 639 | fsnotify_create(d_inode(root), dentry); |
643 | } else { | 640 | } else { |
644 | iput(inode); | 641 | iput(inode); |
645 | inode = ERR_PTR(-ENOMEM); | 642 | dentry = ERR_PTR(-ENOMEM); |
646 | } | 643 | } |
647 | 644 | ||
648 | inode_unlock(d_inode(root)); | 645 | return dentry; |
649 | |||
650 | return inode; | ||
651 | } | 646 | } |
652 | 647 | ||
653 | /** | 648 | /** |
@@ -656,24 +651,10 @@ struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index, | |||
656 | * | 651 | * |
657 | * Returns whatever was passed as priv in devpts_pty_new for a given inode. | 652 | * Returns whatever was passed as priv in devpts_pty_new for a given inode. |
658 | */ | 653 | */ |
659 | void *devpts_get_priv(struct inode *pts_inode) | 654 | void *devpts_get_priv(struct dentry *dentry) |
660 | { | 655 | { |
661 | struct dentry *dentry; | 656 | WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC); |
662 | void *priv = NULL; | 657 | return dentry->d_fsdata; |
663 | |||
664 | BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); | ||
665 | |||
666 | /* Ensure dentry has not been deleted by devpts_pty_kill() */ | ||
667 | dentry = d_find_alias(pts_inode); | ||
668 | if (!dentry) | ||
669 | return NULL; | ||
670 | |||
671 | if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) | ||
672 | priv = pts_inode->i_private; | ||
673 | |||
674 | dput(dentry); | ||
675 | |||
676 | return priv; | ||
677 | } | 658 | } |
678 | 659 | ||
679 | /** | 660 | /** |
@@ -682,24 +663,14 @@ void *devpts_get_priv(struct inode *pts_inode) | |||
682 | * | 663 | * |
683 | * This is an inverse operation of devpts_pty_new. | 664 | * This is an inverse operation of devpts_pty_new. |
684 | */ | 665 | */ |
685 | void devpts_pty_kill(struct inode *inode) | 666 | void devpts_pty_kill(struct dentry *dentry) |
686 | { | 667 | { |
687 | struct super_block *sb = pts_sb_from_inode(inode); | 668 | WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC); |
688 | struct dentry *root = sb->s_root; | ||
689 | struct dentry *dentry; | ||
690 | 669 | ||
691 | BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); | 670 | dentry->d_fsdata = NULL; |
692 | 671 | drop_nlink(dentry->d_inode); | |
693 | inode_lock(d_inode(root)); | ||
694 | |||
695 | dentry = d_find_alias(inode); | ||
696 | |||
697 | drop_nlink(inode); | ||
698 | d_delete(dentry); | 672 | d_delete(dentry); |
699 | dput(dentry); /* d_alloc_name() in devpts_pty_new() */ | 673 | dput(dentry); /* d_alloc_name() in devpts_pty_new() */ |
700 | dput(dentry); /* d_find_alias above */ | ||
701 | |||
702 | inode_unlock(d_inode(root)); | ||
703 | } | 674 | } |
704 | 675 | ||
705 | static int __init init_devpts_fs(void) | 676 | static int __init init_devpts_fs(void) |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 719924d6c706..dcad5e210525 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1295,7 +1295,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, | |||
1295 | 1295 | ||
1296 | *nbytesp = nbytes; | 1296 | *nbytesp = nbytes; |
1297 | 1297 | ||
1298 | return ret; | 1298 | return ret < 0 ? ret : 0; |
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | static inline int fuse_iter_npages(const struct iov_iter *ii_p) | 1301 | static inline int fuse_iter_npages(const struct iov_iter *ii_p) |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 9aed6e202201..13719d3f35f8 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -2455,6 +2455,8 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data, | |||
2455 | 2455 | ||
2456 | spin_unlock(&dlm->spinlock); | 2456 | spin_unlock(&dlm->spinlock); |
2457 | 2457 | ||
2458 | ret = 0; | ||
2459 | |||
2458 | done: | 2460 | done: |
2459 | dlm_put(dlm); | 2461 | dlm_put(dlm); |
2460 | return ret; | 2462 | return ret; |
diff --git a/fs/pnode.c b/fs/pnode.c index c524fdddc7fb..99899705b105 100644 --- a/fs/pnode.c +++ b/fs/pnode.c | |||
@@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin) | |||
198 | 198 | ||
199 | /* all accesses are serialized by namespace_sem */ | 199 | /* all accesses are serialized by namespace_sem */ |
200 | static struct user_namespace *user_ns; | 200 | static struct user_namespace *user_ns; |
201 | static struct mount *last_dest, *last_source, *dest_master; | 201 | static struct mount *last_dest, *first_source, *last_source, *dest_master; |
202 | static struct mountpoint *mp; | 202 | static struct mountpoint *mp; |
203 | static struct hlist_head *list; | 203 | static struct hlist_head *list; |
204 | 204 | ||
@@ -221,20 +221,22 @@ static int propagate_one(struct mount *m) | |||
221 | type = CL_MAKE_SHARED; | 221 | type = CL_MAKE_SHARED; |
222 | } else { | 222 | } else { |
223 | struct mount *n, *p; | 223 | struct mount *n, *p; |
224 | bool done; | ||
224 | for (n = m; ; n = p) { | 225 | for (n = m; ; n = p) { |
225 | p = n->mnt_master; | 226 | p = n->mnt_master; |
226 | if (p == dest_master || IS_MNT_MARKED(p)) { | 227 | if (p == dest_master || IS_MNT_MARKED(p)) |
227 | while (last_dest->mnt_master != p) { | ||
228 | last_source = last_source->mnt_master; | ||
229 | last_dest = last_source->mnt_parent; | ||
230 | } | ||
231 | if (!peers(n, last_dest)) { | ||
232 | last_source = last_source->mnt_master; | ||
233 | last_dest = last_source->mnt_parent; | ||
234 | } | ||
235 | break; | 228 | break; |
236 | } | ||
237 | } | 229 | } |
230 | do { | ||
231 | struct mount *parent = last_source->mnt_parent; | ||
232 | if (last_source == first_source) | ||
233 | break; | ||
234 | done = parent->mnt_master == p; | ||
235 | if (done && peers(n, parent)) | ||
236 | break; | ||
237 | last_source = last_source->mnt_master; | ||
238 | } while (!done); | ||
239 | |||
238 | type = CL_SLAVE; | 240 | type = CL_SLAVE; |
239 | /* beginning of peer group among the slaves? */ | 241 | /* beginning of peer group among the slaves? */ |
240 | if (IS_MNT_SHARED(m)) | 242 | if (IS_MNT_SHARED(m)) |
@@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, | |||
286 | */ | 288 | */ |
287 | user_ns = current->nsproxy->mnt_ns->user_ns; | 289 | user_ns = current->nsproxy->mnt_ns->user_ns; |
288 | last_dest = dest_mnt; | 290 | last_dest = dest_mnt; |
291 | first_source = source_mnt; | ||
289 | last_source = source_mnt; | 292 | last_source = source_mnt; |
290 | mp = dest_mp; | 293 | mp = dest_mp; |
291 | list = tree_list; | 294 | list = tree_list; |
diff --git a/fs/proc/base.c b/fs/proc/base.c index b1755b23893e..92e37e224cd2 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf, | |||
955 | struct mm_struct *mm = file->private_data; | 955 | struct mm_struct *mm = file->private_data; |
956 | unsigned long env_start, env_end; | 956 | unsigned long env_start, env_end; |
957 | 957 | ||
958 | if (!mm) | 958 | /* Ensure the process spawned far enough to have an environment. */ |
959 | if (!mm || !mm->env_end) | ||
959 | return 0; | 960 | return 0; |
960 | 961 | ||
961 | page = (char *)__get_free_page(GFP_TEMPORARY); | 962 | page = (char *)__get_free_page(GFP_TEMPORARY); |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 229cb546bee0..541583510cfb 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -1518,6 +1518,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, | |||
1518 | return page; | 1518 | return page; |
1519 | } | 1519 | } |
1520 | 1520 | ||
1521 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
1522 | static struct page *can_gather_numa_stats_pmd(pmd_t pmd, | ||
1523 | struct vm_area_struct *vma, | ||
1524 | unsigned long addr) | ||
1525 | { | ||
1526 | struct page *page; | ||
1527 | int nid; | ||
1528 | |||
1529 | if (!pmd_present(pmd)) | ||
1530 | return NULL; | ||
1531 | |||
1532 | page = vm_normal_page_pmd(vma, addr, pmd); | ||
1533 | if (!page) | ||
1534 | return NULL; | ||
1535 | |||
1536 | if (PageReserved(page)) | ||
1537 | return NULL; | ||
1538 | |||
1539 | nid = page_to_nid(page); | ||
1540 | if (!node_isset(nid, node_states[N_MEMORY])) | ||
1541 | return NULL; | ||
1542 | |||
1543 | return page; | ||
1544 | } | ||
1545 | #endif | ||
1546 | |||
1521 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | 1547 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, |
1522 | unsigned long end, struct mm_walk *walk) | 1548 | unsigned long end, struct mm_walk *walk) |
1523 | { | 1549 | { |
@@ -1527,14 +1553,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | |||
1527 | pte_t *orig_pte; | 1553 | pte_t *orig_pte; |
1528 | pte_t *pte; | 1554 | pte_t *pte; |
1529 | 1555 | ||
1556 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
1530 | ptl = pmd_trans_huge_lock(pmd, vma); | 1557 | ptl = pmd_trans_huge_lock(pmd, vma); |
1531 | if (ptl) { | 1558 | if (ptl) { |
1532 | pte_t huge_pte = *(pte_t *)pmd; | ||
1533 | struct page *page; | 1559 | struct page *page; |
1534 | 1560 | ||
1535 | page = can_gather_numa_stats(huge_pte, vma, addr); | 1561 | page = can_gather_numa_stats_pmd(*pmd, vma, addr); |
1536 | if (page) | 1562 | if (page) |
1537 | gather_stats(page, md, pte_dirty(huge_pte), | 1563 | gather_stats(page, md, pmd_dirty(*pmd), |
1538 | HPAGE_PMD_SIZE/PAGE_SIZE); | 1564 | HPAGE_PMD_SIZE/PAGE_SIZE); |
1539 | spin_unlock(ptl); | 1565 | spin_unlock(ptl); |
1540 | return 0; | 1566 | return 0; |
@@ -1542,6 +1568,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, | |||
1542 | 1568 | ||
1543 | if (pmd_trans_unstable(pmd)) | 1569 | if (pmd_trans_unstable(pmd)) |
1544 | return 0; | 1570 | return 0; |
1571 | #endif | ||
1545 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); | 1572 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); |
1546 | do { | 1573 | do { |
1547 | struct page *page = can_gather_numa_stats(*pte, vma, addr); | 1574 | struct page *page = can_gather_numa_stats(*pte, vma, addr); |
diff --git a/fs/udf/super.c b/fs/udf/super.c index fa92fe839fda..36661acaf33b 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
@@ -919,14 +919,14 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block) | |||
919 | #endif | 919 | #endif |
920 | } | 920 | } |
921 | 921 | ||
922 | ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32); | 922 | ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32); |
923 | if (ret < 0) | 923 | if (ret < 0) |
924 | goto out_bh; | 924 | goto out_bh; |
925 | 925 | ||
926 | strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret); | 926 | strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret); |
927 | udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident); | 927 | udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident); |
928 | 928 | ||
929 | ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128); | 929 | ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128); |
930 | if (ret < 0) | 930 | if (ret < 0) |
931 | goto out_bh; | 931 | goto out_bh; |
932 | 932 | ||
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index 972b70625614..263829ef1873 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h | |||
@@ -212,7 +212,7 @@ extern int udf_get_filename(struct super_block *, const uint8_t *, int, | |||
212 | uint8_t *, int); | 212 | uint8_t *, int); |
213 | extern int udf_put_filename(struct super_block *, const uint8_t *, int, | 213 | extern int udf_put_filename(struct super_block *, const uint8_t *, int, |
214 | uint8_t *, int); | 214 | uint8_t *, int); |
215 | extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int); | 215 | extern int udf_dstrCS0toUTF8(uint8_t *, int, const uint8_t *, int); |
216 | 216 | ||
217 | /* ialloc.c */ | 217 | /* ialloc.c */ |
218 | extern void udf_free_inode(struct inode *); | 218 | extern void udf_free_inode(struct inode *); |
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c index 3ff42f4437f3..695389a4fc23 100644 --- a/fs/udf/unicode.c +++ b/fs/udf/unicode.c | |||
@@ -335,9 +335,21 @@ try_again: | |||
335 | return u_len; | 335 | return u_len; |
336 | } | 336 | } |
337 | 337 | ||
338 | int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len) | 338 | int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len, |
339 | const uint8_t *ocu_i, int i_len) | ||
339 | { | 340 | { |
340 | return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len, | 341 | int s_len = 0; |
342 | |||
343 | if (i_len > 0) { | ||
344 | s_len = ocu_i[i_len - 1]; | ||
345 | if (s_len >= i_len) { | ||
346 | pr_err("incorrect dstring lengths (%d/%d)\n", | ||
347 | s_len, i_len); | ||
348 | return -EINVAL; | ||
349 | } | ||
350 | } | ||
351 | |||
352 | return udf_name_from_CS0(utf_o, o_len, ocu_i, s_len, | ||
341 | udf_uni2char_utf8, 0); | 353 | udf_uni2char_utf8, 0); |
342 | } | 354 | } |
343 | 355 | ||
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 14362a84c78e..3a932501d690 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -394,13 +394,13 @@ struct acpi_data_node { | |||
394 | 394 | ||
395 | static inline bool is_acpi_node(struct fwnode_handle *fwnode) | 395 | static inline bool is_acpi_node(struct fwnode_handle *fwnode) |
396 | { | 396 | { |
397 | return fwnode && (fwnode->type == FWNODE_ACPI | 397 | return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI |
398 | || fwnode->type == FWNODE_ACPI_DATA); | 398 | || fwnode->type == FWNODE_ACPI_DATA); |
399 | } | 399 | } |
400 | 400 | ||
401 | static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) | 401 | static inline bool is_acpi_device_node(struct fwnode_handle *fwnode) |
402 | { | 402 | { |
403 | return fwnode && fwnode->type == FWNODE_ACPI; | 403 | return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI; |
404 | } | 404 | } |
405 | 405 | ||
406 | static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) | 406 | static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 21ee41b92e8a..f1d5c5acc8dd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -171,12 +171,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl); | |||
171 | void bpf_register_map_type(struct bpf_map_type_list *tl); | 171 | void bpf_register_map_type(struct bpf_map_type_list *tl); |
172 | 172 | ||
173 | struct bpf_prog *bpf_prog_get(u32 ufd); | 173 | struct bpf_prog *bpf_prog_get(u32 ufd); |
174 | struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); | ||
174 | void bpf_prog_put(struct bpf_prog *prog); | 175 | void bpf_prog_put(struct bpf_prog *prog); |
175 | void bpf_prog_put_rcu(struct bpf_prog *prog); | 176 | void bpf_prog_put_rcu(struct bpf_prog *prog); |
176 | 177 | ||
177 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); | 178 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); |
178 | struct bpf_map *__bpf_map_get(struct fd f); | 179 | struct bpf_map *__bpf_map_get(struct fd f); |
179 | void bpf_map_inc(struct bpf_map *map, bool uref); | 180 | struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref); |
180 | void bpf_map_put_with_uref(struct bpf_map *map); | 181 | void bpf_map_put_with_uref(struct bpf_map *map); |
181 | void bpf_map_put(struct bpf_map *map); | 182 | void bpf_map_put(struct bpf_map *map); |
182 | int bpf_map_precharge_memlock(u32 pages); | 183 | int bpf_map_precharge_memlock(u32 pages); |
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 260d78b587c4..1563265d2097 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h | |||
@@ -12,9 +12,12 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | struct ceph_auth_client; | 14 | struct ceph_auth_client; |
15 | struct ceph_authorizer; | ||
16 | struct ceph_msg; | 15 | struct ceph_msg; |
17 | 16 | ||
17 | struct ceph_authorizer { | ||
18 | void (*destroy)(struct ceph_authorizer *); | ||
19 | }; | ||
20 | |||
18 | struct ceph_auth_handshake { | 21 | struct ceph_auth_handshake { |
19 | struct ceph_authorizer *authorizer; | 22 | struct ceph_authorizer *authorizer; |
20 | void *authorizer_buf; | 23 | void *authorizer_buf; |
@@ -62,8 +65,6 @@ struct ceph_auth_client_ops { | |||
62 | struct ceph_auth_handshake *auth); | 65 | struct ceph_auth_handshake *auth); |
63 | int (*verify_authorizer_reply)(struct ceph_auth_client *ac, | 66 | int (*verify_authorizer_reply)(struct ceph_auth_client *ac, |
64 | struct ceph_authorizer *a, size_t len); | 67 | struct ceph_authorizer *a, size_t len); |
65 | void (*destroy_authorizer)(struct ceph_auth_client *ac, | ||
66 | struct ceph_authorizer *a); | ||
67 | void (*invalidate_authorizer)(struct ceph_auth_client *ac, | 68 | void (*invalidate_authorizer)(struct ceph_auth_client *ac, |
68 | int peer_type); | 69 | int peer_type); |
69 | 70 | ||
@@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); | |||
112 | extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, | 113 | extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, |
113 | int peer_type, | 114 | int peer_type, |
114 | struct ceph_auth_handshake *auth); | 115 | struct ceph_auth_handshake *auth); |
115 | extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, | 116 | void ceph_auth_destroy_authorizer(struct ceph_authorizer *a); |
116 | struct ceph_authorizer *a); | ||
117 | extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, | 117 | extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, |
118 | int peer_type, | 118 | int peer_type, |
119 | struct ceph_auth_handshake *a); | 119 | struct ceph_auth_handshake *a); |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 4343df806710..cbf460927c42 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
@@ -16,7 +16,6 @@ struct ceph_msg; | |||
16 | struct ceph_snap_context; | 16 | struct ceph_snap_context; |
17 | struct ceph_osd_request; | 17 | struct ceph_osd_request; |
18 | struct ceph_osd_client; | 18 | struct ceph_osd_client; |
19 | struct ceph_authorizer; | ||
20 | 19 | ||
21 | /* | 20 | /* |
22 | * completion callback for async writepages | 21 | * completion callback for async writepages |
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 3e39ae5bc799..5b17de62c962 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
@@ -444,6 +444,7 @@ struct cgroup_subsys { | |||
444 | int (*can_attach)(struct cgroup_taskset *tset); | 444 | int (*can_attach)(struct cgroup_taskset *tset); |
445 | void (*cancel_attach)(struct cgroup_taskset *tset); | 445 | void (*cancel_attach)(struct cgroup_taskset *tset); |
446 | void (*attach)(struct cgroup_taskset *tset); | 446 | void (*attach)(struct cgroup_taskset *tset); |
447 | void (*post_attach)(void); | ||
447 | int (*can_fork)(struct task_struct *task); | 448 | int (*can_fork)(struct task_struct *task); |
448 | void (*cancel_fork)(struct task_struct *task); | 449 | void (*cancel_fork)(struct task_struct *task); |
449 | void (*fork)(struct task_struct *task); | 450 | void (*fork)(struct task_struct *task); |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index fea160ee5803..85a868ccb493 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask) | |||
137 | task_unlock(current); | 137 | task_unlock(current); |
138 | } | 138 | } |
139 | 139 | ||
140 | extern void cpuset_post_attach_flush(void); | ||
141 | |||
142 | #else /* !CONFIG_CPUSETS */ | 140 | #else /* !CONFIG_CPUSETS */ |
143 | 141 | ||
144 | static inline bool cpusets_enabled(void) { return false; } | 142 | static inline bool cpusets_enabled(void) { return false; } |
@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq) | |||
245 | return false; | 243 | return false; |
246 | } | 244 | } |
247 | 245 | ||
248 | static inline void cpuset_post_attach_flush(void) | ||
249 | { | ||
250 | } | ||
251 | |||
252 | #endif /* !CONFIG_CPUSETS */ | 246 | #endif /* !CONFIG_CPUSETS */ |
253 | 247 | ||
254 | #endif /* _LINUX_CPUSET_H */ | 248 | #endif /* _LINUX_CPUSET_H */ |
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 358a4db72a27..5871f292b596 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h | |||
@@ -27,11 +27,11 @@ int devpts_new_index(struct pts_fs_info *); | |||
27 | void devpts_kill_index(struct pts_fs_info *, int); | 27 | void devpts_kill_index(struct pts_fs_info *, int); |
28 | 28 | ||
29 | /* mknod in devpts */ | 29 | /* mknod in devpts */ |
30 | struct inode *devpts_pty_new(struct pts_fs_info *, dev_t, int, void *); | 30 | struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *); |
31 | /* get private structure */ | 31 | /* get private structure */ |
32 | void *devpts_get_priv(struct inode *pts_inode); | 32 | void *devpts_get_priv(struct dentry *); |
33 | /* unlink */ | 33 | /* unlink */ |
34 | void devpts_pty_kill(struct inode *inode); | 34 | void devpts_pty_kill(struct dentry *); |
35 | 35 | ||
36 | #endif | 36 | #endif |
37 | 37 | ||
diff --git a/include/linux/hash.h b/include/linux/hash.h index 1afde47e1528..79c52fa81cac 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h | |||
@@ -32,12 +32,28 @@ | |||
32 | #error Wordsize not 32 or 64 | 32 | #error Wordsize not 32 or 64 |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* | ||
36 | * The above primes are actively bad for hashing, since they are | ||
37 | * too sparse. The 32-bit one is mostly ok, the 64-bit one causes | ||
38 | * real problems. Besides, the "prime" part is pointless for the | ||
39 | * multiplicative hash. | ||
40 | * | ||
41 | * Although a random odd number will do, it turns out that the golden | ||
42 | * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice | ||
43 | * properties. | ||
44 | * | ||
45 | * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2. | ||
46 | * (See Knuth vol 3, section 6.4, exercise 9.) | ||
47 | */ | ||
48 | #define GOLDEN_RATIO_32 0x61C88647 | ||
49 | #define GOLDEN_RATIO_64 0x61C8864680B583EBull | ||
50 | |||
35 | static __always_inline u64 hash_64(u64 val, unsigned int bits) | 51 | static __always_inline u64 hash_64(u64 val, unsigned int bits) |
36 | { | 52 | { |
37 | u64 hash = val; | 53 | u64 hash = val; |
38 | 54 | ||
39 | #if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 | 55 | #if BITS_PER_LONG == 64 |
40 | hash = hash * GOLDEN_RATIO_PRIME_64; | 56 | hash = hash * GOLDEN_RATIO_64; |
41 | #else | 57 | #else |
42 | /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ | 58 | /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ |
43 | u64 n = hash; | 59 | u64 n = hash; |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 7008623e24b1..d7b9e5346fba 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -152,6 +152,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | struct page *get_huge_zero_page(void); | 154 | struct page *get_huge_zero_page(void); |
155 | void put_huge_zero_page(void); | ||
155 | 156 | ||
156 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | 157 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
157 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) | 158 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) |
@@ -208,6 +209,10 @@ static inline bool is_huge_zero_page(struct page *page) | |||
208 | return false; | 209 | return false; |
209 | } | 210 | } |
210 | 211 | ||
212 | static inline void put_huge_zero_page(void) | ||
213 | { | ||
214 | BUILD_BUG(); | ||
215 | } | ||
211 | 216 | ||
212 | static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, | 217 | static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, |
213 | unsigned long addr, pmd_t *pmd, int flags) | 218 | unsigned long addr, pmd_t *pmd, int flags) |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index d5569734f672..548fd535fd02 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
@@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) | |||
28 | return (struct ethhdr *)skb_mac_header(skb); | 28 | return (struct ethhdr *)skb_mac_header(skb); |
29 | } | 29 | } |
30 | 30 | ||
31 | static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) | ||
32 | { | ||
33 | return (struct ethhdr *)skb_inner_mac_header(skb); | ||
34 | } | ||
35 | |||
31 | int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); | 36 | int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); |
32 | 37 | ||
33 | extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); | 38 | extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index d026b190c530..d10ef06971b5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -196,9 +196,11 @@ struct lock_list { | |||
196 | * We record lock dependency chains, so that we can cache them: | 196 | * We record lock dependency chains, so that we can cache them: |
197 | */ | 197 | */ |
198 | struct lock_chain { | 198 | struct lock_chain { |
199 | u8 irq_context; | 199 | /* see BUILD_BUG_ON()s in lookup_chain_cache() */ |
200 | u8 depth; | 200 | unsigned int irq_context : 2, |
201 | u16 base; | 201 | depth : 6, |
202 | base : 24; | ||
203 | /* 4 byte hole */ | ||
202 | struct hlist_node entry; | 204 | struct hlist_node entry; |
203 | u64 chain_key; | 205 | u64 chain_key; |
204 | }; | 206 | }; |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8156e3c9239c..b3575f392492 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -392,6 +392,17 @@ enum { | |||
392 | MLX5_CAP_OFF_CMDIF_CSUM = 46, | 392 | MLX5_CAP_OFF_CMDIF_CSUM = 46, |
393 | }; | 393 | }; |
394 | 394 | ||
395 | enum { | ||
396 | /* | ||
397 | * Max wqe size for rdma read is 512 bytes, so this | ||
398 | * limits our max_sge_rd as the wqe needs to fit: | ||
399 | * - ctrl segment (16 bytes) | ||
400 | * - rdma segment (16 bytes) | ||
401 | * - scatter elements (16 bytes each) | ||
402 | */ | ||
403 | MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 | ||
404 | }; | ||
405 | |||
395 | struct mlx5_inbox_hdr { | 406 | struct mlx5_inbox_hdr { |
396 | __be16 opcode; | 407 | __be16 opcode; |
397 | u8 rsvd[4]; | 408 | u8 rsvd[4]; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index dcd5ac8d3b14..369c837d40f5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -519,8 +519,9 @@ enum mlx5_device_state { | |||
519 | }; | 519 | }; |
520 | 520 | ||
521 | enum mlx5_interface_state { | 521 | enum mlx5_interface_state { |
522 | MLX5_INTERFACE_STATE_DOWN, | 522 | MLX5_INTERFACE_STATE_DOWN = BIT(0), |
523 | MLX5_INTERFACE_STATE_UP, | 523 | MLX5_INTERFACE_STATE_UP = BIT(1), |
524 | MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2), | ||
524 | }; | 525 | }; |
525 | 526 | ||
526 | enum mlx5_pci_status { | 527 | enum mlx5_pci_status { |
@@ -544,7 +545,7 @@ struct mlx5_core_dev { | |||
544 | enum mlx5_device_state state; | 545 | enum mlx5_device_state state; |
545 | /* sync interface state */ | 546 | /* sync interface state */ |
546 | struct mutex intf_state_mutex; | 547 | struct mutex intf_state_mutex; |
547 | enum mlx5_interface_state interface_state; | 548 | unsigned long intf_state; |
548 | void (*event) (struct mlx5_core_dev *dev, | 549 | void (*event) (struct mlx5_core_dev *dev, |
549 | enum mlx5_dev_event event, | 550 | enum mlx5_dev_event event, |
550 | unsigned long param); | 551 | unsigned long param); |
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index a1d145abd4eb..b30250ab7604 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h | |||
@@ -54,9 +54,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, | |||
54 | int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, | 54 | int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, |
55 | enum mlx5_port_status *status); | 55 | enum mlx5_port_status *status); |
56 | 56 | ||
57 | int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); | 57 | int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); |
58 | void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); | 58 | void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); |
59 | void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, | 59 | void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, |
60 | u8 port); | 60 | u8 port); |
61 | 61 | ||
62 | int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, | 62 | int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, |
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index bd93e6323603..301da4a5e6bf 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h | |||
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, | |||
45 | u16 vport, u8 *addr); | 45 | u16 vport, u8 *addr); |
46 | int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, | 46 | int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, |
47 | u16 vport, u8 *addr); | 47 | u16 vport, u8 *addr); |
48 | int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); | ||
49 | int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); | ||
48 | int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, | 50 | int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, |
49 | u64 *system_image_guid); | 51 | u64 *system_image_guid); |
50 | int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); | 52 | int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); |
diff --git a/include/linux/mm.h b/include/linux/mm.h index a55e5be0894f..864d7221de84 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1031,6 +1031,8 @@ static inline bool page_mapped(struct page *page) | |||
1031 | page = compound_head(page); | 1031 | page = compound_head(page); |
1032 | if (atomic_read(compound_mapcount_ptr(page)) >= 0) | 1032 | if (atomic_read(compound_mapcount_ptr(page)) >= 0) |
1033 | return true; | 1033 | return true; |
1034 | if (PageHuge(page)) | ||
1035 | return false; | ||
1034 | for (i = 0; i < hpage_nr_pages(page); i++) { | 1036 | for (i = 0; i < hpage_nr_pages(page); i++) { |
1035 | if (atomic_read(&page[i]._mapcount) >= 0) | 1037 | if (atomic_read(&page[i]._mapcount) >= 0) |
1036 | return true; | 1038 | return true; |
@@ -1138,6 +1140,8 @@ struct zap_details { | |||
1138 | 1140 | ||
1139 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | 1141 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, |
1140 | pte_t pte); | 1142 | pte_t pte); |
1143 | struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
1144 | pmd_t pmd); | ||
1141 | 1145 | ||
1142 | int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, | 1146 | int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, |
1143 | unsigned long size); | 1147 | unsigned long size); |
diff --git a/include/linux/net.h b/include/linux/net.h index 49175e4ced11..f840d77c6c31 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
@@ -246,7 +246,15 @@ do { \ | |||
246 | net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) | 246 | net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) |
247 | #define net_info_ratelimited(fmt, ...) \ | 247 | #define net_info_ratelimited(fmt, ...) \ |
248 | net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) | 248 | net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) |
249 | #if defined(DEBUG) | 249 | #if defined(CONFIG_DYNAMIC_DEBUG) |
250 | #define net_dbg_ratelimited(fmt, ...) \ | ||
251 | do { \ | ||
252 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | ||
253 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ | ||
254 | net_ratelimit()) \ | ||
255 | __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ | ||
256 | } while (0) | ||
257 | #elif defined(DEBUG) | ||
250 | #define net_dbg_ratelimited(fmt, ...) \ | 258 | #define net_dbg_ratelimited(fmt, ...) \ |
251 | net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) | 259 | net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) |
252 | #else | 260 | #else |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8395308a2445..b3c46b019ac1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -4004,7 +4004,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb); | |||
4004 | 4004 | ||
4005 | static inline bool net_gso_ok(netdev_features_t features, int gso_type) | 4005 | static inline bool net_gso_ok(netdev_features_t features, int gso_type) |
4006 | { | 4006 | { |
4007 | netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; | 4007 | netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; |
4008 | 4008 | ||
4009 | /* check flags correspondence */ | 4009 | /* check flags correspondence */ |
4010 | BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); | 4010 | BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); |
diff --git a/include/linux/of.h b/include/linux/of.h index 7fcb681baadf..31758036787c 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -133,7 +133,7 @@ void of_core_init(void); | |||
133 | 133 | ||
134 | static inline bool is_of_node(struct fwnode_handle *fwnode) | 134 | static inline bool is_of_node(struct fwnode_handle *fwnode) |
135 | { | 135 | { |
136 | return fwnode && fwnode->type == FWNODE_OF; | 136 | return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF; |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) | 139 | static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f4ed4f1b0c77..6b052aa7b5b7 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -517,6 +517,27 @@ static inline int PageTransCompound(struct page *page) | |||
517 | } | 517 | } |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * PageTransCompoundMap is the same as PageTransCompound, but it also | ||
521 | * guarantees the primary MMU has the entire compound page mapped | ||
522 | * through pmd_trans_huge, which in turn guarantees the secondary MMUs | ||
523 | * can also map the entire compound page. This allows the secondary | ||
524 | * MMUs to call get_user_pages() only once for each compound page and | ||
525 | * to immediately map the entire compound page with a single secondary | ||
526 | * MMU fault. If there will be a pmd split later, the secondary MMUs | ||
527 | * will get an update through the MMU notifier invalidation through | ||
528 | * split_huge_pmd(). | ||
529 | * | ||
530 | * Unlike PageTransCompound, this is safe to be called only while | ||
531 | * split_huge_pmd() cannot run from under us, like if protected by the | ||
532 | * MMU notifier, otherwise it may result in page->_mapcount < 0 false | ||
533 | * positives. | ||
534 | */ | ||
535 | static inline int PageTransCompoundMap(struct page *page) | ||
536 | { | ||
537 | return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; | ||
538 | } | ||
539 | |||
540 | /* | ||
520 | * PageTransTail returns true for both transparent huge pages | 541 | * PageTransTail returns true for both transparent huge pages |
521 | * and hugetlbfs pages, so it should only be called when it's known | 542 | * and hugetlbfs pages, so it should only be called when it's known |
522 | * that hugetlbfs pages aren't involved. | 543 | * that hugetlbfs pages aren't involved. |
@@ -559,6 +580,7 @@ static inline int TestClearPageDoubleMap(struct page *page) | |||
559 | #else | 580 | #else |
560 | TESTPAGEFLAG_FALSE(TransHuge) | 581 | TESTPAGEFLAG_FALSE(TransHuge) |
561 | TESTPAGEFLAG_FALSE(TransCompound) | 582 | TESTPAGEFLAG_FALSE(TransCompound) |
583 | TESTPAGEFLAG_FALSE(TransCompoundMap) | ||
562 | TESTPAGEFLAG_FALSE(TransTail) | 584 | TESTPAGEFLAG_FALSE(TransTail) |
563 | TESTPAGEFLAG_FALSE(DoubleMap) | 585 | TESTPAGEFLAG_FALSE(DoubleMap) |
564 | TESTSETFLAG_FALSE(DoubleMap) | 586 | TESTSETFLAG_FALSE(DoubleMap) |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 2b83359c19ca..0a4cd4703f40 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void) | |||
533 | #ifdef CONFIG_MEMCG | 533 | #ifdef CONFIG_MEMCG |
534 | static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) | 534 | static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) |
535 | { | 535 | { |
536 | /* Cgroup2 doesn't have per-cgroup swappiness */ | ||
537 | if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) | ||
538 | return vm_swappiness; | ||
539 | |||
536 | /* root ? */ | 540 | /* root ? */ |
537 | if (mem_cgroup_disabled() || !memcg->css.parent) | 541 | if (mem_cgroup_disabled() || !memcg->css.parent) |
538 | return vm_swappiness; | 542 | return vm_swappiness; |
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 161052477f77..b742b5e47cc2 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
@@ -7,7 +7,7 @@ | |||
7 | * defined; unless noted otherwise, they are optional, and can be | 7 | * defined; unless noted otherwise, they are optional, and can be |
8 | * filled in with a null pointer. | 8 | * filled in with a null pointer. |
9 | * | 9 | * |
10 | * struct tty_struct * (*lookup)(struct tty_driver *self, int idx) | 10 | * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx) |
11 | * | 11 | * |
12 | * Return the tty device corresponding to idx, NULL if there is not | 12 | * Return the tty device corresponding to idx, NULL if there is not |
13 | * one currently in use and an ERR_PTR value on error. Called under | 13 | * one currently in use and an ERR_PTR value on error. Called under |
@@ -250,7 +250,7 @@ struct serial_icounter_struct; | |||
250 | 250 | ||
251 | struct tty_operations { | 251 | struct tty_operations { |
252 | struct tty_struct * (*lookup)(struct tty_driver *driver, | 252 | struct tty_struct * (*lookup)(struct tty_driver *driver, |
253 | struct inode *inode, int idx); | 253 | struct file *filp, int idx); |
254 | int (*install)(struct tty_driver *driver, struct tty_struct *tty); | 254 | int (*install)(struct tty_driver *driver, struct tty_struct *tty); |
255 | void (*remove)(struct tty_driver *driver, struct tty_struct *tty); | 255 | void (*remove)(struct tty_driver *driver, struct tty_struct *tty); |
256 | int (*open)(struct tty_struct * tty, struct file * filp); | 256 | int (*open)(struct tty_struct * tty, struct file * filp); |
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 8a0f55b6c2ba..88e3ab496e8f 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h | |||
@@ -375,6 +375,9 @@ struct vb2_ops { | |||
375 | /** | 375 | /** |
376 | * struct vb2_ops - driver-specific callbacks | 376 | * struct vb2_ops - driver-specific callbacks |
377 | * | 377 | * |
378 | * @verify_planes_array: Verify that a given user space structure contains | ||
379 | * enough planes for the buffer. This is called | ||
380 | * for each dequeued buffer. | ||
378 | * @fill_user_buffer: given a vb2_buffer fill in the userspace structure. | 381 | * @fill_user_buffer: given a vb2_buffer fill in the userspace structure. |
379 | * For V4L2 this is a struct v4l2_buffer. | 382 | * For V4L2 this is a struct v4l2_buffer. |
380 | * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer. | 383 | * @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer. |
@@ -384,6 +387,7 @@ struct vb2_ops { | |||
384 | * the vb2_buffer struct. | 387 | * the vb2_buffer struct. |
385 | */ | 388 | */ |
386 | struct vb2_buf_ops { | 389 | struct vb2_buf_ops { |
390 | int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb); | ||
387 | void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); | 391 | void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb); |
388 | int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb, | 392 | int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb, |
389 | struct vb2_plane *planes); | 393 | struct vb2_plane *planes); |
@@ -400,6 +404,9 @@ struct vb2_buf_ops { | |||
400 | * @fileio_read_once: report EOF after reading the first buffer | 404 | * @fileio_read_once: report EOF after reading the first buffer |
401 | * @fileio_write_immediately: queue buffer after each write() call | 405 | * @fileio_write_immediately: queue buffer after each write() call |
402 | * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver | 406 | * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver |
407 | * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF | ||
408 | * has not been called. This is a vb1 idiom that has been adopted | ||
409 | * also by vb2. | ||
403 | * @lock: pointer to a mutex that protects the vb2_queue struct. The | 410 | * @lock: pointer to a mutex that protects the vb2_queue struct. The |
404 | * driver can set this to a mutex to let the v4l2 core serialize | 411 | * driver can set this to a mutex to let the v4l2 core serialize |
405 | * the queuing ioctls. If the driver wants to handle locking | 412 | * the queuing ioctls. If the driver wants to handle locking |
@@ -463,6 +470,7 @@ struct vb2_queue { | |||
463 | unsigned fileio_read_once:1; | 470 | unsigned fileio_read_once:1; |
464 | unsigned fileio_write_immediately:1; | 471 | unsigned fileio_write_immediately:1; |
465 | unsigned allow_zero_bytesused:1; | 472 | unsigned allow_zero_bytesused:1; |
473 | unsigned quirk_poll_must_check_waiting_for_buffers:1; | ||
466 | 474 | ||
467 | struct mutex *lock; | 475 | struct mutex *lock; |
468 | void *owner; | 476 | void *owner; |
diff --git a/include/net/switchdev.h b/include/net/switchdev.h index d451122e8404..51d77b2ce2b2 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h | |||
@@ -54,6 +54,8 @@ struct switchdev_attr { | |||
54 | struct net_device *orig_dev; | 54 | struct net_device *orig_dev; |
55 | enum switchdev_attr_id id; | 55 | enum switchdev_attr_id id; |
56 | u32 flags; | 56 | u32 flags; |
57 | void *complete_priv; | ||
58 | void (*complete)(struct net_device *dev, int err, void *priv); | ||
57 | union { | 59 | union { |
58 | struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ | 60 | struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ |
59 | u8 stp_state; /* PORT_STP_STATE */ | 61 | u8 stp_state; /* PORT_STP_STATE */ |
@@ -75,6 +77,8 @@ struct switchdev_obj { | |||
75 | struct net_device *orig_dev; | 77 | struct net_device *orig_dev; |
76 | enum switchdev_obj_id id; | 78 | enum switchdev_obj_id id; |
77 | u32 flags; | 79 | u32 flags; |
80 | void *complete_priv; | ||
81 | void (*complete)(struct net_device *dev, int err, void *priv); | ||
78 | }; | 82 | }; |
79 | 83 | ||
80 | /* SWITCHDEV_OBJ_ID_PORT_VLAN */ | 84 | /* SWITCHDEV_OBJ_ID_PORT_VLAN */ |
diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 73ed2e951c02..35437c779da8 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h | |||
@@ -252,7 +252,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, | |||
252 | (skb->inner_protocol_type != ENCAP_TYPE_ETHER || | 252 | (skb->inner_protocol_type != ENCAP_TYPE_ETHER || |
253 | skb->inner_protocol != htons(ETH_P_TEB) || | 253 | skb->inner_protocol != htons(ETH_P_TEB) || |
254 | (skb_inner_mac_header(skb) - skb_transport_header(skb) != | 254 | (skb_inner_mac_header(skb) - skb_transport_header(skb) != |
255 | sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) | 255 | sizeof(struct udphdr) + sizeof(struct vxlanhdr)) || |
256 | (skb->ip_summed != CHECKSUM_NONE && | ||
257 | !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto)))) | ||
256 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); | 258 | return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
257 | 259 | ||
258 | return features; | 260 | return features; |
diff --git a/include/rdma/ib.h b/include/rdma/ib.h index cf8f9e700e48..a6b93706b0fc 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h | |||
@@ -34,6 +34,7 @@ | |||
34 | #define _RDMA_IB_H | 34 | #define _RDMA_IB_H |
35 | 35 | ||
36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
37 | #include <linux/sched.h> | ||
37 | 38 | ||
38 | struct ib_addr { | 39 | struct ib_addr { |
39 | union { | 40 | union { |
@@ -86,4 +87,19 @@ struct sockaddr_ib { | |||
86 | __u64 sib_scope_id; | 87 | __u64 sib_scope_id; |
87 | }; | 88 | }; |
88 | 89 | ||
90 | /* | ||
91 | * The IB interfaces that use write() as bi-directional ioctl() are | ||
92 | * fundamentally unsafe, since there are lots of ways to trigger "write()" | ||
93 | * calls from various contexts with elevated privileges. That includes the | ||
94 | * traditional suid executable error message writes, but also various kernel | ||
95 | * interfaces that can write to file descriptors. | ||
96 | * | ||
97 | * This function provides protection for the legacy API by restricting the | ||
98 | * calling context. | ||
99 | */ | ||
100 | static inline bool ib_safe_file_access(struct file *filp) | ||
101 | { | ||
102 | return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS); | ||
103 | } | ||
104 | |||
89 | #endif /* _RDMA_IB_H */ | 105 | #endif /* _RDMA_IB_H */ |
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h index fa341fcb5829..f5842bcd9c94 100644 --- a/include/sound/hda_i915.h +++ b/include/sound/hda_i915.h | |||
@@ -9,7 +9,7 @@ | |||
9 | #ifdef CONFIG_SND_HDA_I915 | 9 | #ifdef CONFIG_SND_HDA_I915 |
10 | int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); | 10 | int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); |
11 | int snd_hdac_display_power(struct hdac_bus *bus, bool enable); | 11 | int snd_hdac_display_power(struct hdac_bus *bus, bool enable); |
12 | int snd_hdac_get_display_clk(struct hdac_bus *bus); | 12 | void snd_hdac_i915_set_bclk(struct hdac_bus *bus); |
13 | int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); | 13 | int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); |
14 | int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, | 14 | int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, |
15 | bool *audio_enabled, char *buffer, int max_bytes); | 15 | bool *audio_enabled, char *buffer, int max_bytes); |
@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable) | |||
25 | { | 25 | { |
26 | return 0; | 26 | return 0; |
27 | } | 27 | } |
28 | static inline int snd_hdac_get_display_clk(struct hdac_bus *bus) | 28 | static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus) |
29 | { | 29 | { |
30 | return 0; | ||
31 | } | 30 | } |
32 | static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, | 31 | static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, |
33 | int rate) | 32 | int rate) |
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 6e0f5f01734c..c51afb71bfab 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h | |||
@@ -718,9 +718,9 @@ __SYSCALL(__NR_mlock2, sys_mlock2) | |||
718 | #define __NR_copy_file_range 285 | 718 | #define __NR_copy_file_range 285 |
719 | __SYSCALL(__NR_copy_file_range, sys_copy_file_range) | 719 | __SYSCALL(__NR_copy_file_range, sys_copy_file_range) |
720 | #define __NR_preadv2 286 | 720 | #define __NR_preadv2 286 |
721 | __SYSCALL(__NR_preadv2, sys_preadv2) | 721 | __SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2) |
722 | #define __NR_pwritev2 287 | 722 | #define __NR_pwritev2 287 |
723 | __SYSCALL(__NR_pwritev2, sys_pwritev2) | 723 | __SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2) |
724 | 724 | ||
725 | #undef __NR_syscalls | 725 | #undef __NR_syscalls |
726 | #define __NR_syscalls 288 | 726 | #define __NR_syscalls 288 |
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h index 26b0d1e3e3e7..4c58d9917aa4 100644 --- a/include/uapi/linux/if_macsec.h +++ b/include/uapi/linux/if_macsec.h | |||
@@ -19,8 +19,8 @@ | |||
19 | 19 | ||
20 | #define MACSEC_MAX_KEY_LEN 128 | 20 | #define MACSEC_MAX_KEY_LEN 128 |
21 | 21 | ||
22 | #define DEFAULT_CIPHER_ID 0x0080020001000001ULL | 22 | #define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL |
23 | #define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL | 23 | #define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL |
24 | 24 | ||
25 | #define MACSEC_MIN_ICV_LEN 8 | 25 | #define MACSEC_MIN_ICV_LEN 8 |
26 | #define MACSEC_MAX_ICV_LEN 32 | 26 | #define MACSEC_MAX_ICV_LEN 32 |
diff --git a/include/linux/rio_mport_cdev.h b/include/uapi/linux/rio_mport_cdev.h index b65d19df76d2..5796bf1d06ad 100644 --- a/include/linux/rio_mport_cdev.h +++ b/include/uapi/linux/rio_mport_cdev.h | |||
@@ -39,16 +39,16 @@ | |||
39 | #ifndef _RIO_MPORT_CDEV_H_ | 39 | #ifndef _RIO_MPORT_CDEV_H_ |
40 | #define _RIO_MPORT_CDEV_H_ | 40 | #define _RIO_MPORT_CDEV_H_ |
41 | 41 | ||
42 | #ifndef __user | 42 | #include <linux/ioctl.h> |
43 | #define __user | 43 | #include <linux/types.h> |
44 | #endif | ||
45 | 44 | ||
46 | struct rio_mport_maint_io { | 45 | struct rio_mport_maint_io { |
47 | uint32_t rioid; /* destID of remote device */ | 46 | __u16 rioid; /* destID of remote device */ |
48 | uint32_t hopcount; /* hopcount to remote device */ | 47 | __u8 hopcount; /* hopcount to remote device */ |
49 | uint32_t offset; /* offset in register space */ | 48 | __u8 pad0[5]; |
50 | size_t length; /* length in bytes */ | 49 | __u32 offset; /* offset in register space */ |
51 | void __user *buffer; /* data buffer */ | 50 | __u32 length; /* length in bytes */ |
51 | __u64 buffer; /* pointer to data buffer */ | ||
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* | 54 | /* |
@@ -66,22 +66,23 @@ struct rio_mport_maint_io { | |||
66 | #define RIO_CAP_MAP_INB (1 << 7) | 66 | #define RIO_CAP_MAP_INB (1 << 7) |
67 | 67 | ||
68 | struct rio_mport_properties { | 68 | struct rio_mport_properties { |
69 | uint16_t hdid; | 69 | __u16 hdid; |
70 | uint8_t id; /* Physical port ID */ | 70 | __u8 id; /* Physical port ID */ |
71 | uint8_t index; | 71 | __u8 index; |
72 | uint32_t flags; | 72 | __u32 flags; |
73 | uint32_t sys_size; /* Default addressing size */ | 73 | __u32 sys_size; /* Default addressing size */ |
74 | uint8_t port_ok; | 74 | __u8 port_ok; |
75 | uint8_t link_speed; | 75 | __u8 link_speed; |
76 | uint8_t link_width; | 76 | __u8 link_width; |
77 | uint32_t dma_max_sge; | 77 | __u8 pad0; |
78 | uint32_t dma_max_size; | 78 | __u32 dma_max_sge; |
79 | uint32_t dma_align; | 79 | __u32 dma_max_size; |
80 | uint32_t transfer_mode; /* Default transfer mode */ | 80 | __u32 dma_align; |
81 | uint32_t cap_sys_size; /* Capable system sizes */ | 81 | __u32 transfer_mode; /* Default transfer mode */ |
82 | uint32_t cap_addr_size; /* Capable addressing sizes */ | 82 | __u32 cap_sys_size; /* Capable system sizes */ |
83 | uint32_t cap_transfer_mode; /* Capable transfer modes */ | 83 | __u32 cap_addr_size; /* Capable addressing sizes */ |
84 | uint32_t cap_mport; /* Mport capabilities */ | 84 | __u32 cap_transfer_mode; /* Capable transfer modes */ |
85 | __u32 cap_mport; /* Mport capabilities */ | ||
85 | }; | 86 | }; |
86 | 87 | ||
87 | /* | 88 | /* |
@@ -93,54 +94,57 @@ struct rio_mport_properties { | |||
93 | #define RIO_PORTWRITE (1 << 1) | 94 | #define RIO_PORTWRITE (1 << 1) |
94 | 95 | ||
95 | struct rio_doorbell { | 96 | struct rio_doorbell { |
96 | uint32_t rioid; | 97 | __u16 rioid; |
97 | uint16_t payload; | 98 | __u16 payload; |
98 | }; | 99 | }; |
99 | 100 | ||
100 | struct rio_doorbell_filter { | 101 | struct rio_doorbell_filter { |
101 | uint32_t rioid; /* 0xffffffff to match all ids */ | 102 | __u16 rioid; /* Use RIO_INVALID_DESTID to match all ids */ |
102 | uint16_t low; | 103 | __u16 low; |
103 | uint16_t high; | 104 | __u16 high; |
105 | __u16 pad0; | ||
104 | }; | 106 | }; |
105 | 107 | ||
106 | 108 | ||
107 | struct rio_portwrite { | 109 | struct rio_portwrite { |
108 | uint32_t payload[16]; | 110 | __u32 payload[16]; |
109 | }; | 111 | }; |
110 | 112 | ||
111 | struct rio_pw_filter { | 113 | struct rio_pw_filter { |
112 | uint32_t mask; | 114 | __u32 mask; |
113 | uint32_t low; | 115 | __u32 low; |
114 | uint32_t high; | 116 | __u32 high; |
117 | __u32 pad0; | ||
115 | }; | 118 | }; |
116 | 119 | ||
117 | /* RapidIO base address for inbound requests set to value defined below | 120 | /* RapidIO base address for inbound requests set to value defined below |
118 | * indicates that no specific RIO-to-local address translation is requested | 121 | * indicates that no specific RIO-to-local address translation is requested |
119 | * and driver should use direct (one-to-one) address mapping. | 122 | * and driver should use direct (one-to-one) address mapping. |
120 | */ | 123 | */ |
121 | #define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0)) | 124 | #define RIO_MAP_ANY_ADDR (__u64)(~((__u64) 0)) |
122 | 125 | ||
123 | struct rio_mmap { | 126 | struct rio_mmap { |
124 | uint32_t rioid; | 127 | __u16 rioid; |
125 | uint64_t rio_addr; | 128 | __u16 pad0[3]; |
126 | uint64_t length; | 129 | __u64 rio_addr; |
127 | uint64_t handle; | 130 | __u64 length; |
128 | void *address; | 131 | __u64 handle; |
132 | __u64 address; | ||
129 | }; | 133 | }; |
130 | 134 | ||
131 | struct rio_dma_mem { | 135 | struct rio_dma_mem { |
132 | uint64_t length; /* length of DMA memory */ | 136 | __u64 length; /* length of DMA memory */ |
133 | uint64_t dma_handle; /* handle associated with this memory */ | 137 | __u64 dma_handle; /* handle associated with this memory */ |
134 | void *buffer; /* pointer to this memory */ | 138 | __u64 address; |
135 | }; | 139 | }; |
136 | 140 | ||
137 | |||
138 | struct rio_event { | 141 | struct rio_event { |
139 | unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ | 142 | __u32 header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ |
140 | union { | 143 | union { |
141 | struct rio_doorbell doorbell; /* header for RIO_DOORBELL */ | 144 | struct rio_doorbell doorbell; /* header for RIO_DOORBELL */ |
142 | struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */ | 145 | struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */ |
143 | } u; | 146 | } u; |
147 | __u32 pad0; | ||
144 | }; | 148 | }; |
145 | 149 | ||
146 | enum rio_transfer_sync { | 150 | enum rio_transfer_sync { |
@@ -184,35 +188,37 @@ enum rio_exchange { | |||
184 | }; | 188 | }; |
185 | 189 | ||
186 | struct rio_transfer_io { | 190 | struct rio_transfer_io { |
187 | uint32_t rioid; /* Target destID */ | 191 | __u64 rio_addr; /* Address in target's RIO mem space */ |
188 | uint64_t rio_addr; /* Address in target's RIO mem space */ | 192 | __u64 loc_addr; |
189 | enum rio_exchange method; /* Data exchange method */ | 193 | __u64 handle; |
190 | void __user *loc_addr; | 194 | __u64 offset; /* Offset in buffer */ |
191 | uint64_t handle; | 195 | __u64 length; /* Length in bytes */ |
192 | uint64_t offset; /* Offset in buffer */ | 196 | __u16 rioid; /* Target destID */ |
193 | uint64_t length; /* Length in bytes */ | 197 | __u16 method; /* Data exchange method, one of rio_exchange enum */ |
194 | uint32_t completion_code; /* Completion code for this transfer */ | 198 | __u32 completion_code; /* Completion code for this transfer */ |
195 | }; | 199 | }; |
196 | 200 | ||
197 | struct rio_transaction { | 201 | struct rio_transaction { |
198 | uint32_t transfer_mode; /* Data transfer mode */ | 202 | __u64 block; /* Pointer to array of <count> transfers */ |
199 | enum rio_transfer_sync sync; /* Synchronization method */ | 203 | __u32 count; /* Number of transfers */ |
200 | enum rio_transfer_dir dir; /* Transfer direction */ | 204 | __u32 transfer_mode; /* Data transfer mode */ |
201 | size_t count; /* Number of transfers */ | 205 | __u16 sync; /* Synch method, one of rio_transfer_sync enum */ |
202 | struct rio_transfer_io __user *block; /* Array of <count> transfers */ | 206 | __u16 dir; /* Transfer direction, one of rio_transfer_dir enum */ |
207 | __u32 pad0; | ||
203 | }; | 208 | }; |
204 | 209 | ||
205 | struct rio_async_tx_wait { | 210 | struct rio_async_tx_wait { |
206 | uint32_t token; /* DMA transaction ID token */ | 211 | __u32 token; /* DMA transaction ID token */ |
207 | uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */ | 212 | __u32 timeout; /* Wait timeout in msec, if 0 use default TO */ |
208 | }; | 213 | }; |
209 | 214 | ||
210 | #define RIO_MAX_DEVNAME_SZ 20 | 215 | #define RIO_MAX_DEVNAME_SZ 20 |
211 | 216 | ||
212 | struct rio_rdev_info { | 217 | struct rio_rdev_info { |
213 | uint32_t destid; | 218 | __u16 destid; |
214 | uint8_t hopcount; | 219 | __u8 hopcount; |
215 | uint32_t comptag; | 220 | __u8 pad0; |
221 | __u32 comptag; | ||
216 | char name[RIO_MAX_DEVNAME_SZ + 1]; | 222 | char name[RIO_MAX_DEVNAME_SZ + 1]; |
217 | }; | 223 | }; |
218 | 224 | ||
@@ -220,11 +226,11 @@ struct rio_rdev_info { | |||
220 | #define RIO_MPORT_DRV_MAGIC 'm' | 226 | #define RIO_MPORT_DRV_MAGIC 'm' |
221 | 227 | ||
222 | #define RIO_MPORT_MAINT_HDID_SET \ | 228 | #define RIO_MPORT_MAINT_HDID_SET \ |
223 | _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t) | 229 | _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16) |
224 | #define RIO_MPORT_MAINT_COMPTAG_SET \ | 230 | #define RIO_MPORT_MAINT_COMPTAG_SET \ |
225 | _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t) | 231 | _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32) |
226 | #define RIO_MPORT_MAINT_PORT_IDX_GET \ | 232 | #define RIO_MPORT_MAINT_PORT_IDX_GET \ |
227 | _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t) | 233 | _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32) |
228 | #define RIO_MPORT_GET_PROPERTIES \ | 234 | #define RIO_MPORT_GET_PROPERTIES \ |
229 | _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties) | 235 | _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties) |
230 | #define RIO_MPORT_MAINT_READ_LOCAL \ | 236 | #define RIO_MPORT_MAINT_READ_LOCAL \ |
@@ -244,9 +250,9 @@ struct rio_rdev_info { | |||
244 | #define RIO_DISABLE_PORTWRITE_RANGE \ | 250 | #define RIO_DISABLE_PORTWRITE_RANGE \ |
245 | _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter) | 251 | _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter) |
246 | #define RIO_SET_EVENT_MASK \ | 252 | #define RIO_SET_EVENT_MASK \ |
247 | _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int) | 253 | _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32) |
248 | #define RIO_GET_EVENT_MASK \ | 254 | #define RIO_GET_EVENT_MASK \ |
249 | _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int) | 255 | _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32) |
250 | #define RIO_MAP_OUTBOUND \ | 256 | #define RIO_MAP_OUTBOUND \ |
251 | _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap) | 257 | _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap) |
252 | #define RIO_UNMAP_OUTBOUND \ | 258 | #define RIO_UNMAP_OUTBOUND \ |
@@ -254,11 +260,11 @@ struct rio_rdev_info { | |||
254 | #define RIO_MAP_INBOUND \ | 260 | #define RIO_MAP_INBOUND \ |
255 | _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap) | 261 | _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap) |
256 | #define RIO_UNMAP_INBOUND \ | 262 | #define RIO_UNMAP_INBOUND \ |
257 | _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t) | 263 | _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64) |
258 | #define RIO_ALLOC_DMA \ | 264 | #define RIO_ALLOC_DMA \ |
259 | _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem) | 265 | _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem) |
260 | #define RIO_FREE_DMA \ | 266 | #define RIO_FREE_DMA \ |
261 | _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t) | 267 | _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64) |
262 | #define RIO_TRANSFER \ | 268 | #define RIO_TRANSFER \ |
263 | _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction) | 269 | _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction) |
264 | #define RIO_WAIT_FOR_ASYNC \ | 270 | #define RIO_WAIT_FOR_ASYNC \ |
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 3f10e5317b46..8f3a8f606fd9 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h | |||
@@ -45,9 +45,7 @@ | |||
45 | 45 | ||
46 | static inline __attribute_const__ __u16 __fswab16(__u16 val) | 46 | static inline __attribute_const__ __u16 __fswab16(__u16 val) |
47 | { | 47 | { |
48 | #ifdef __HAVE_BUILTIN_BSWAP16__ | 48 | #if defined (__arch_swab16) |
49 | return __builtin_bswap16(val); | ||
50 | #elif defined (__arch_swab16) | ||
51 | return __arch_swab16(val); | 49 | return __arch_swab16(val); |
52 | #else | 50 | #else |
53 | return ___constant_swab16(val); | 51 | return ___constant_swab16(val); |
@@ -56,9 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val) | |||
56 | 54 | ||
57 | static inline __attribute_const__ __u32 __fswab32(__u32 val) | 55 | static inline __attribute_const__ __u32 __fswab32(__u32 val) |
58 | { | 56 | { |
59 | #ifdef __HAVE_BUILTIN_BSWAP32__ | 57 | #if defined(__arch_swab32) |
60 | return __builtin_bswap32(val); | ||
61 | #elif defined(__arch_swab32) | ||
62 | return __arch_swab32(val); | 58 | return __arch_swab32(val); |
63 | #else | 59 | #else |
64 | return ___constant_swab32(val); | 60 | return ___constant_swab32(val); |
@@ -67,9 +63,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val) | |||
67 | 63 | ||
68 | static inline __attribute_const__ __u64 __fswab64(__u64 val) | 64 | static inline __attribute_const__ __u64 __fswab64(__u64 val) |
69 | { | 65 | { |
70 | #ifdef __HAVE_BUILTIN_BSWAP64__ | 66 | #if defined (__arch_swab64) |
71 | return __builtin_bswap64(val); | ||
72 | #elif defined (__arch_swab64) | ||
73 | return __arch_swab64(val); | 67 | return __arch_swab64(val); |
74 | #elif defined(__SWAB_64_THRU_32__) | 68 | #elif defined(__SWAB_64_THRU_32__) |
75 | __u32 h = val >> 32; | 69 | __u32 h = val >> 32; |
@@ -102,28 +96,40 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) | |||
102 | * __swab16 - return a byteswapped 16-bit value | 96 | * __swab16 - return a byteswapped 16-bit value |
103 | * @x: value to byteswap | 97 | * @x: value to byteswap |
104 | */ | 98 | */ |
99 | #ifdef __HAVE_BUILTIN_BSWAP16__ | ||
100 | #define __swab16(x) (__u16)__builtin_bswap16((__u16)(x)) | ||
101 | #else | ||
105 | #define __swab16(x) \ | 102 | #define __swab16(x) \ |
106 | (__builtin_constant_p((__u16)(x)) ? \ | 103 | (__builtin_constant_p((__u16)(x)) ? \ |
107 | ___constant_swab16(x) : \ | 104 | ___constant_swab16(x) : \ |
108 | __fswab16(x)) | 105 | __fswab16(x)) |
106 | #endif | ||
109 | 107 | ||
110 | /** | 108 | /** |
111 | * __swab32 - return a byteswapped 32-bit value | 109 | * __swab32 - return a byteswapped 32-bit value |
112 | * @x: value to byteswap | 110 | * @x: value to byteswap |
113 | */ | 111 | */ |
112 | #ifdef __HAVE_BUILTIN_BSWAP32__ | ||
113 | #define __swab32(x) (__u32)__builtin_bswap32((__u32)(x)) | ||
114 | #else | ||
114 | #define __swab32(x) \ | 115 | #define __swab32(x) \ |
115 | (__builtin_constant_p((__u32)(x)) ? \ | 116 | (__builtin_constant_p((__u32)(x)) ? \ |
116 | ___constant_swab32(x) : \ | 117 | ___constant_swab32(x) : \ |
117 | __fswab32(x)) | 118 | __fswab32(x)) |
119 | #endif | ||
118 | 120 | ||
119 | /** | 121 | /** |
120 | * __swab64 - return a byteswapped 64-bit value | 122 | * __swab64 - return a byteswapped 64-bit value |
121 | * @x: value to byteswap | 123 | * @x: value to byteswap |
122 | */ | 124 | */ |
125 | #ifdef __HAVE_BUILTIN_BSWAP64__ | ||
126 | #define __swab64(x) (__u64)__builtin_bswap64((__u64)(x)) | ||
127 | #else | ||
123 | #define __swab64(x) \ | 128 | #define __swab64(x) \ |
124 | (__builtin_constant_p((__u64)(x)) ? \ | 129 | (__builtin_constant_p((__u64)(x)) ? \ |
125 | ___constant_swab64(x) : \ | 130 | ___constant_swab64(x) : \ |
126 | __fswab64(x)) | 131 | __fswab64(x)) |
132 | #endif | ||
127 | 133 | ||
128 | /** | 134 | /** |
129 | * __swahw32 - return a word-swapped 32-bit value | 135 | * __swahw32 - return a word-swapped 32-bit value |
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h index c039f1d68a09..086168e18ca8 100644 --- a/include/uapi/linux/v4l2-dv-timings.h +++ b/include/uapi/linux/v4l2-dv-timings.h | |||
@@ -183,7 +183,8 @@ | |||
183 | 183 | ||
184 | #define V4L2_DV_BT_CEA_3840X2160P24 { \ | 184 | #define V4L2_DV_BT_CEA_3840X2160P24 { \ |
185 | .type = V4L2_DV_BT_656_1120, \ | 185 | .type = V4L2_DV_BT_656_1120, \ |
186 | V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ | 186 | V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ |
187 | V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ | ||
187 | 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \ | 188 | 297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \ |
188 | V4L2_DV_BT_STD_CEA861, \ | 189 | V4L2_DV_BT_STD_CEA861, \ |
189 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ | 190 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ |
@@ -191,14 +192,16 @@ | |||
191 | 192 | ||
192 | #define V4L2_DV_BT_CEA_3840X2160P25 { \ | 193 | #define V4L2_DV_BT_CEA_3840X2160P25 { \ |
193 | .type = V4L2_DV_BT_656_1120, \ | 194 | .type = V4L2_DV_BT_656_1120, \ |
194 | V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ | 195 | V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ |
196 | V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ | ||
195 | 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ | 197 | 297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ |
196 | V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ | 198 | V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ |
197 | } | 199 | } |
198 | 200 | ||
199 | #define V4L2_DV_BT_CEA_3840X2160P30 { \ | 201 | #define V4L2_DV_BT_CEA_3840X2160P30 { \ |
200 | .type = V4L2_DV_BT_656_1120, \ | 202 | .type = V4L2_DV_BT_656_1120, \ |
201 | V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ | 203 | V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ |
204 | V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ | ||
202 | 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ | 205 | 297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ |
203 | V4L2_DV_BT_STD_CEA861, \ | 206 | V4L2_DV_BT_STD_CEA861, \ |
204 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ | 207 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ |
@@ -206,14 +209,16 @@ | |||
206 | 209 | ||
207 | #define V4L2_DV_BT_CEA_3840X2160P50 { \ | 210 | #define V4L2_DV_BT_CEA_3840X2160P50 { \ |
208 | .type = V4L2_DV_BT_656_1120, \ | 211 | .type = V4L2_DV_BT_656_1120, \ |
209 | V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ | 212 | V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ |
213 | V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ | ||
210 | 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ | 214 | 594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \ |
211 | V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ | 215 | V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ |
212 | } | 216 | } |
213 | 217 | ||
214 | #define V4L2_DV_BT_CEA_3840X2160P60 { \ | 218 | #define V4L2_DV_BT_CEA_3840X2160P60 { \ |
215 | .type = V4L2_DV_BT_656_1120, \ | 219 | .type = V4L2_DV_BT_656_1120, \ |
216 | V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ | 220 | V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \ |
221 | V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ | ||
217 | 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ | 222 | 594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \ |
218 | V4L2_DV_BT_STD_CEA861, \ | 223 | V4L2_DV_BT_STD_CEA861, \ |
219 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ | 224 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ |
@@ -221,7 +226,8 @@ | |||
221 | 226 | ||
222 | #define V4L2_DV_BT_CEA_4096X2160P24 { \ | 227 | #define V4L2_DV_BT_CEA_4096X2160P24 { \ |
223 | .type = V4L2_DV_BT_656_1120, \ | 228 | .type = V4L2_DV_BT_656_1120, \ |
224 | V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ | 229 | V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ |
230 | V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ | ||
225 | 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \ | 231 | 297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \ |
226 | V4L2_DV_BT_STD_CEA861, \ | 232 | V4L2_DV_BT_STD_CEA861, \ |
227 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ | 233 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ |
@@ -229,14 +235,16 @@ | |||
229 | 235 | ||
230 | #define V4L2_DV_BT_CEA_4096X2160P25 { \ | 236 | #define V4L2_DV_BT_CEA_4096X2160P25 { \ |
231 | .type = V4L2_DV_BT_656_1120, \ | 237 | .type = V4L2_DV_BT_656_1120, \ |
232 | V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ | 238 | V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ |
239 | V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ | ||
233 | 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ | 240 | 297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ |
234 | V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ | 241 | V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ |
235 | } | 242 | } |
236 | 243 | ||
237 | #define V4L2_DV_BT_CEA_4096X2160P30 { \ | 244 | #define V4L2_DV_BT_CEA_4096X2160P30 { \ |
238 | .type = V4L2_DV_BT_656_1120, \ | 245 | .type = V4L2_DV_BT_656_1120, \ |
239 | V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ | 246 | V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ |
247 | V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ | ||
240 | 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ | 248 | 297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ |
241 | V4L2_DV_BT_STD_CEA861, \ | 249 | V4L2_DV_BT_STD_CEA861, \ |
242 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ | 250 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ |
@@ -244,14 +252,16 @@ | |||
244 | 252 | ||
245 | #define V4L2_DV_BT_CEA_4096X2160P50 { \ | 253 | #define V4L2_DV_BT_CEA_4096X2160P50 { \ |
246 | .type = V4L2_DV_BT_656_1120, \ | 254 | .type = V4L2_DV_BT_656_1120, \ |
247 | V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ | 255 | V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ |
256 | V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ | ||
248 | 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ | 257 | 594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \ |
249 | V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ | 258 | V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \ |
250 | } | 259 | } |
251 | 260 | ||
252 | #define V4L2_DV_BT_CEA_4096X2160P60 { \ | 261 | #define V4L2_DV_BT_CEA_4096X2160P60 { \ |
253 | .type = V4L2_DV_BT_656_1120, \ | 262 | .type = V4L2_DV_BT_656_1120, \ |
254 | V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \ | 263 | V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \ |
264 | V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \ | ||
255 | 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ | 265 | 594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \ |
256 | V4L2_DV_BT_STD_CEA861, \ | 266 | V4L2_DV_BT_STD_CEA861, \ |
257 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ | 267 | V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \ |
diff --git a/include/xen/page.h b/include/xen/page.h index 96294ac93755..9dc46cb8a0fd 100644 --- a/include/xen/page.h +++ b/include/xen/page.h | |||
@@ -15,9 +15,9 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define xen_pfn_to_page(xen_pfn) \ | 17 | #define xen_pfn_to_page(xen_pfn) \ |
18 | ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT))) | 18 | (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT))) |
19 | #define page_to_xen_pfn(page) \ | 19 | #define page_to_xen_pfn(page) \ |
20 | (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT) | 20 | ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT)) |
21 | 21 | ||
22 | #define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE) | 22 | #define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE) |
23 | 23 | ||
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index f2ece3c174a5..8f94ca1860cf 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c | |||
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type) | |||
31 | { | 31 | { |
32 | switch (type) { | 32 | switch (type) { |
33 | case BPF_TYPE_PROG: | 33 | case BPF_TYPE_PROG: |
34 | atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); | 34 | raw = bpf_prog_inc(raw); |
35 | break; | 35 | break; |
36 | case BPF_TYPE_MAP: | 36 | case BPF_TYPE_MAP: |
37 | bpf_map_inc(raw, true); | 37 | raw = bpf_map_inc(raw, true); |
38 | break; | 38 | break; |
39 | default: | 39 | default: |
40 | WARN_ON_ONCE(1); | 40 | WARN_ON_ONCE(1); |
@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname, | |||
297 | goto out; | 297 | goto out; |
298 | 298 | ||
299 | raw = bpf_any_get(inode->i_private, *type); | 299 | raw = bpf_any_get(inode->i_private, *type); |
300 | touch_atime(&path); | 300 | if (!IS_ERR(raw)) |
301 | touch_atime(&path); | ||
301 | 302 | ||
302 | path_put(&path); | 303 | path_put(&path); |
303 | return raw; | 304 | return raw; |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index adc5e4bd74f8..cf5e9f7ad13a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f) | |||
218 | return f.file->private_data; | 218 | return f.file->private_data; |
219 | } | 219 | } |
220 | 220 | ||
221 | void bpf_map_inc(struct bpf_map *map, bool uref) | 221 | /* prog's and map's refcnt limit */ |
222 | #define BPF_MAX_REFCNT 32768 | ||
223 | |||
224 | struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) | ||
222 | { | 225 | { |
223 | atomic_inc(&map->refcnt); | 226 | if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { |
227 | atomic_dec(&map->refcnt); | ||
228 | return ERR_PTR(-EBUSY); | ||
229 | } | ||
224 | if (uref) | 230 | if (uref) |
225 | atomic_inc(&map->usercnt); | 231 | atomic_inc(&map->usercnt); |
232 | return map; | ||
226 | } | 233 | } |
227 | 234 | ||
228 | struct bpf_map *bpf_map_get_with_uref(u32 ufd) | 235 | struct bpf_map *bpf_map_get_with_uref(u32 ufd) |
@@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd) | |||
234 | if (IS_ERR(map)) | 241 | if (IS_ERR(map)) |
235 | return map; | 242 | return map; |
236 | 243 | ||
237 | bpf_map_inc(map, true); | 244 | map = bpf_map_inc(map, true); |
238 | fdput(f); | 245 | fdput(f); |
239 | 246 | ||
240 | return map; | 247 | return map; |
@@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f) | |||
658 | return f.file->private_data; | 665 | return f.file->private_data; |
659 | } | 666 | } |
660 | 667 | ||
668 | struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) | ||
669 | { | ||
670 | if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) { | ||
671 | atomic_dec(&prog->aux->refcnt); | ||
672 | return ERR_PTR(-EBUSY); | ||
673 | } | ||
674 | return prog; | ||
675 | } | ||
676 | |||
661 | /* called by sockets/tracing/seccomp before attaching program to an event | 677 | /* called by sockets/tracing/seccomp before attaching program to an event |
662 | * pairs with bpf_prog_put() | 678 | * pairs with bpf_prog_put() |
663 | */ | 679 | */ |
@@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd) | |||
670 | if (IS_ERR(prog)) | 686 | if (IS_ERR(prog)) |
671 | return prog; | 687 | return prog; |
672 | 688 | ||
673 | atomic_inc(&prog->aux->refcnt); | 689 | prog = bpf_prog_inc(prog); |
674 | fdput(f); | 690 | fdput(f); |
675 | 691 | ||
676 | return prog; | 692 | return prog; |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 618ef77c302a..c5c17a62f509 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -239,16 +239,6 @@ static const char * const reg_type_str[] = { | |||
239 | [CONST_IMM] = "imm", | 239 | [CONST_IMM] = "imm", |
240 | }; | 240 | }; |
241 | 241 | ||
242 | static const struct { | ||
243 | int map_type; | ||
244 | int func_id; | ||
245 | } func_limit[] = { | ||
246 | {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call}, | ||
247 | {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read}, | ||
248 | {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output}, | ||
249 | {BPF_MAP_TYPE_STACK_TRACE, BPF_FUNC_get_stackid}, | ||
250 | }; | ||
251 | |||
252 | static void print_verifier_state(struct verifier_env *env) | 242 | static void print_verifier_state(struct verifier_env *env) |
253 | { | 243 | { |
254 | enum bpf_reg_type t; | 244 | enum bpf_reg_type t; |
@@ -921,27 +911,52 @@ static int check_func_arg(struct verifier_env *env, u32 regno, | |||
921 | 911 | ||
922 | static int check_map_func_compatibility(struct bpf_map *map, int func_id) | 912 | static int check_map_func_compatibility(struct bpf_map *map, int func_id) |
923 | { | 913 | { |
924 | bool bool_map, bool_func; | ||
925 | int i; | ||
926 | |||
927 | if (!map) | 914 | if (!map) |
928 | return 0; | 915 | return 0; |
929 | 916 | ||
930 | for (i = 0; i < ARRAY_SIZE(func_limit); i++) { | 917 | /* We need a two way check, first is from map perspective ... */ |
931 | bool_map = (map->map_type == func_limit[i].map_type); | 918 | switch (map->map_type) { |
932 | bool_func = (func_id == func_limit[i].func_id); | 919 | case BPF_MAP_TYPE_PROG_ARRAY: |
933 | /* only when map & func pair match it can continue. | 920 | if (func_id != BPF_FUNC_tail_call) |
934 | * don't allow any other map type to be passed into | 921 | goto error; |
935 | * the special func; | 922 | break; |
936 | */ | 923 | case BPF_MAP_TYPE_PERF_EVENT_ARRAY: |
937 | if (bool_func && bool_map != bool_func) { | 924 | if (func_id != BPF_FUNC_perf_event_read && |
938 | verbose("cannot pass map_type %d into func %d\n", | 925 | func_id != BPF_FUNC_perf_event_output) |
939 | map->map_type, func_id); | 926 | goto error; |
940 | return -EINVAL; | 927 | break; |
941 | } | 928 | case BPF_MAP_TYPE_STACK_TRACE: |
929 | if (func_id != BPF_FUNC_get_stackid) | ||
930 | goto error; | ||
931 | break; | ||
932 | default: | ||
933 | break; | ||
934 | } | ||
935 | |||
936 | /* ... and second from the function itself. */ | ||
937 | switch (func_id) { | ||
938 | case BPF_FUNC_tail_call: | ||
939 | if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) | ||
940 | goto error; | ||
941 | break; | ||
942 | case BPF_FUNC_perf_event_read: | ||
943 | case BPF_FUNC_perf_event_output: | ||
944 | if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) | ||
945 | goto error; | ||
946 | break; | ||
947 | case BPF_FUNC_get_stackid: | ||
948 | if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) | ||
949 | goto error; | ||
950 | break; | ||
951 | default: | ||
952 | break; | ||
942 | } | 953 | } |
943 | 954 | ||
944 | return 0; | 955 | return 0; |
956 | error: | ||
957 | verbose("cannot pass map_type %d into func %d\n", | ||
958 | map->map_type, func_id); | ||
959 | return -EINVAL; | ||
945 | } | 960 | } |
946 | 961 | ||
947 | static int check_call(struct verifier_env *env, int func_id) | 962 | static int check_call(struct verifier_env *env, int func_id) |
@@ -2030,7 +2045,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) | |||
2030 | if (IS_ERR(map)) { | 2045 | if (IS_ERR(map)) { |
2031 | verbose("fd %d is not pointing to valid bpf_map\n", | 2046 | verbose("fd %d is not pointing to valid bpf_map\n", |
2032 | insn->imm); | 2047 | insn->imm); |
2033 | fdput(f); | ||
2034 | return PTR_ERR(map); | 2048 | return PTR_ERR(map); |
2035 | } | 2049 | } |
2036 | 2050 | ||
@@ -2050,15 +2064,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) | |||
2050 | return -E2BIG; | 2064 | return -E2BIG; |
2051 | } | 2065 | } |
2052 | 2066 | ||
2053 | /* remember this map */ | ||
2054 | env->used_maps[env->used_map_cnt++] = map; | ||
2055 | |||
2056 | /* hold the map. If the program is rejected by verifier, | 2067 | /* hold the map. If the program is rejected by verifier, |
2057 | * the map will be released by release_maps() or it | 2068 | * the map will be released by release_maps() or it |
2058 | * will be used by the valid program until it's unloaded | 2069 | * will be used by the valid program until it's unloaded |
2059 | * and all maps are released in free_bpf_prog_info() | 2070 | * and all maps are released in free_bpf_prog_info() |
2060 | */ | 2071 | */ |
2061 | bpf_map_inc(map, false); | 2072 | map = bpf_map_inc(map, false); |
2073 | if (IS_ERR(map)) { | ||
2074 | fdput(f); | ||
2075 | return PTR_ERR(map); | ||
2076 | } | ||
2077 | env->used_maps[env->used_map_cnt++] = map; | ||
2078 | |||
2062 | fdput(f); | 2079 | fdput(f); |
2063 | next_insn: | 2080 | next_insn: |
2064 | insn++; | 2081 | insn++; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 671dc05c0b0f..909a7d31ffd3 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2825,9 +2825,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, | |||
2825 | size_t nbytes, loff_t off, bool threadgroup) | 2825 | size_t nbytes, loff_t off, bool threadgroup) |
2826 | { | 2826 | { |
2827 | struct task_struct *tsk; | 2827 | struct task_struct *tsk; |
2828 | struct cgroup_subsys *ss; | ||
2828 | struct cgroup *cgrp; | 2829 | struct cgroup *cgrp; |
2829 | pid_t pid; | 2830 | pid_t pid; |
2830 | int ret; | 2831 | int ssid, ret; |
2831 | 2832 | ||
2832 | if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) | 2833 | if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) |
2833 | return -EINVAL; | 2834 | return -EINVAL; |
@@ -2875,8 +2876,10 @@ out_unlock_rcu: | |||
2875 | rcu_read_unlock(); | 2876 | rcu_read_unlock(); |
2876 | out_unlock_threadgroup: | 2877 | out_unlock_threadgroup: |
2877 | percpu_up_write(&cgroup_threadgroup_rwsem); | 2878 | percpu_up_write(&cgroup_threadgroup_rwsem); |
2879 | for_each_subsys(ss, ssid) | ||
2880 | if (ss->post_attach) | ||
2881 | ss->post_attach(); | ||
2878 | cgroup_kn_unlock(of->kn); | 2882 | cgroup_kn_unlock(of->kn); |
2879 | cpuset_post_attach_flush(); | ||
2880 | return ret ?: nbytes; | 2883 | return ret ?: nbytes; |
2881 | } | 2884 | } |
2882 | 2885 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 00ab5c2b7c5b..1902956baba1 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -58,7 +58,6 @@ | |||
58 | #include <asm/uaccess.h> | 58 | #include <asm/uaccess.h> |
59 | #include <linux/atomic.h> | 59 | #include <linux/atomic.h> |
60 | #include <linux/mutex.h> | 60 | #include <linux/mutex.h> |
61 | #include <linux/workqueue.h> | ||
62 | #include <linux/cgroup.h> | 61 | #include <linux/cgroup.h> |
63 | #include <linux/wait.h> | 62 | #include <linux/wait.h> |
64 | 63 | ||
@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
1016 | } | 1015 | } |
1017 | } | 1016 | } |
1018 | 1017 | ||
1019 | void cpuset_post_attach_flush(void) | 1018 | static void cpuset_post_attach(void) |
1020 | { | 1019 | { |
1021 | flush_workqueue(cpuset_migrate_mm_wq); | 1020 | flush_workqueue(cpuset_migrate_mm_wq); |
1022 | } | 1021 | } |
@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = { | |||
2087 | .can_attach = cpuset_can_attach, | 2086 | .can_attach = cpuset_can_attach, |
2088 | .cancel_attach = cpuset_cancel_attach, | 2087 | .cancel_attach = cpuset_cancel_attach, |
2089 | .attach = cpuset_attach, | 2088 | .attach = cpuset_attach, |
2089 | .post_attach = cpuset_post_attach, | ||
2090 | .bind = cpuset_bind, | 2090 | .bind = cpuset_bind, |
2091 | .legacy_cftypes = files, | 2091 | .legacy_cftypes = files, |
2092 | .early_init = true, | 2092 | .early_init = true, |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 52bedc5a5aaa..4e2ebf6f2f1f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -412,7 +412,8 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, | |||
412 | if (ret || !write) | 412 | if (ret || !write) |
413 | return ret; | 413 | return ret; |
414 | 414 | ||
415 | if (sysctl_perf_cpu_time_max_percent == 100) { | 415 | if (sysctl_perf_cpu_time_max_percent == 100 || |
416 | sysctl_perf_cpu_time_max_percent == 0) { | ||
416 | printk(KERN_WARNING | 417 | printk(KERN_WARNING |
417 | "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); | 418 | "perf: Dynamic interrupt throttling disabled, can hang your system!\n"); |
418 | WRITE_ONCE(perf_sample_allowed_ns, 0); | 419 | WRITE_ONCE(perf_sample_allowed_ns, 0); |
@@ -1105,6 +1106,7 @@ static void put_ctx(struct perf_event_context *ctx) | |||
1105 | * function. | 1106 | * function. |
1106 | * | 1107 | * |
1107 | * Lock order: | 1108 | * Lock order: |
1109 | * cred_guard_mutex | ||
1108 | * task_struct::perf_event_mutex | 1110 | * task_struct::perf_event_mutex |
1109 | * perf_event_context::mutex | 1111 | * perf_event_context::mutex |
1110 | * perf_event::child_mutex; | 1112 | * perf_event::child_mutex; |
@@ -3420,7 +3422,6 @@ static struct task_struct * | |||
3420 | find_lively_task_by_vpid(pid_t vpid) | 3422 | find_lively_task_by_vpid(pid_t vpid) |
3421 | { | 3423 | { |
3422 | struct task_struct *task; | 3424 | struct task_struct *task; |
3423 | int err; | ||
3424 | 3425 | ||
3425 | rcu_read_lock(); | 3426 | rcu_read_lock(); |
3426 | if (!vpid) | 3427 | if (!vpid) |
@@ -3434,16 +3435,7 @@ find_lively_task_by_vpid(pid_t vpid) | |||
3434 | if (!task) | 3435 | if (!task) |
3435 | return ERR_PTR(-ESRCH); | 3436 | return ERR_PTR(-ESRCH); |
3436 | 3437 | ||
3437 | /* Reuse ptrace permission checks for now. */ | ||
3438 | err = -EACCES; | ||
3439 | if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) | ||
3440 | goto errout; | ||
3441 | |||
3442 | return task; | 3438 | return task; |
3443 | errout: | ||
3444 | put_task_struct(task); | ||
3445 | return ERR_PTR(err); | ||
3446 | |||
3447 | } | 3439 | } |
3448 | 3440 | ||
3449 | /* | 3441 | /* |
@@ -8413,6 +8405,24 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8413 | 8405 | ||
8414 | get_online_cpus(); | 8406 | get_online_cpus(); |
8415 | 8407 | ||
8408 | if (task) { | ||
8409 | err = mutex_lock_interruptible(&task->signal->cred_guard_mutex); | ||
8410 | if (err) | ||
8411 | goto err_cpus; | ||
8412 | |||
8413 | /* | ||
8414 | * Reuse ptrace permission checks for now. | ||
8415 | * | ||
8416 | * We must hold cred_guard_mutex across this and any potential | ||
8417 | * perf_install_in_context() call for this new event to | ||
8418 | * serialize against exec() altering our credentials (and the | ||
8419 | * perf_event_exit_task() that could imply). | ||
8420 | */ | ||
8421 | err = -EACCES; | ||
8422 | if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) | ||
8423 | goto err_cred; | ||
8424 | } | ||
8425 | |||
8416 | if (flags & PERF_FLAG_PID_CGROUP) | 8426 | if (flags & PERF_FLAG_PID_CGROUP) |
8417 | cgroup_fd = pid; | 8427 | cgroup_fd = pid; |
8418 | 8428 | ||
@@ -8420,7 +8430,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8420 | NULL, NULL, cgroup_fd); | 8430 | NULL, NULL, cgroup_fd); |
8421 | if (IS_ERR(event)) { | 8431 | if (IS_ERR(event)) { |
8422 | err = PTR_ERR(event); | 8432 | err = PTR_ERR(event); |
8423 | goto err_cpus; | 8433 | goto err_cred; |
8424 | } | 8434 | } |
8425 | 8435 | ||
8426 | if (is_sampling_event(event)) { | 8436 | if (is_sampling_event(event)) { |
@@ -8479,11 +8489,6 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8479 | goto err_context; | 8489 | goto err_context; |
8480 | } | 8490 | } |
8481 | 8491 | ||
8482 | if (task) { | ||
8483 | put_task_struct(task); | ||
8484 | task = NULL; | ||
8485 | } | ||
8486 | |||
8487 | /* | 8492 | /* |
8488 | * Look up the group leader (we will attach this event to it): | 8493 | * Look up the group leader (we will attach this event to it): |
8489 | */ | 8494 | */ |
@@ -8581,6 +8586,11 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8581 | 8586 | ||
8582 | WARN_ON_ONCE(ctx->parent_ctx); | 8587 | WARN_ON_ONCE(ctx->parent_ctx); |
8583 | 8588 | ||
8589 | /* | ||
8590 | * This is the point on no return; we cannot fail hereafter. This is | ||
8591 | * where we start modifying current state. | ||
8592 | */ | ||
8593 | |||
8584 | if (move_group) { | 8594 | if (move_group) { |
8585 | /* | 8595 | /* |
8586 | * See perf_event_ctx_lock() for comments on the details | 8596 | * See perf_event_ctx_lock() for comments on the details |
@@ -8652,6 +8662,11 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8652 | mutex_unlock(&gctx->mutex); | 8662 | mutex_unlock(&gctx->mutex); |
8653 | mutex_unlock(&ctx->mutex); | 8663 | mutex_unlock(&ctx->mutex); |
8654 | 8664 | ||
8665 | if (task) { | ||
8666 | mutex_unlock(&task->signal->cred_guard_mutex); | ||
8667 | put_task_struct(task); | ||
8668 | } | ||
8669 | |||
8655 | put_online_cpus(); | 8670 | put_online_cpus(); |
8656 | 8671 | ||
8657 | mutex_lock(¤t->perf_event_mutex); | 8672 | mutex_lock(¤t->perf_event_mutex); |
@@ -8684,6 +8699,9 @@ err_alloc: | |||
8684 | */ | 8699 | */ |
8685 | if (!event_file) | 8700 | if (!event_file) |
8686 | free_event(event); | 8701 | free_event(event); |
8702 | err_cred: | ||
8703 | if (task) | ||
8704 | mutex_unlock(&task->signal->cred_guard_mutex); | ||
8687 | err_cpus: | 8705 | err_cpus: |
8688 | put_online_cpus(); | 8706 | put_online_cpus(); |
8689 | err_task: | 8707 | err_task: |
@@ -8968,6 +8986,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) | |||
8968 | 8986 | ||
8969 | /* | 8987 | /* |
8970 | * When a child task exits, feed back event values to parent events. | 8988 | * When a child task exits, feed back event values to parent events. |
8989 | * | ||
8990 | * Can be called with cred_guard_mutex held when called from | ||
8991 | * install_exec_creds(). | ||
8971 | */ | 8992 | */ |
8972 | void perf_event_exit_task(struct task_struct *child) | 8993 | void perf_event_exit_task(struct task_struct *child) |
8973 | { | 8994 | { |
diff --git a/kernel/kcov.c b/kernel/kcov.c index 3efbee0834a8..a02f2dddd1d7 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #define pr_fmt(fmt) "kcov: " fmt | 1 | #define pr_fmt(fmt) "kcov: " fmt |
2 | 2 | ||
3 | #define DISABLE_BRANCH_PROFILING | ||
3 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
4 | #include <linux/types.h> | 5 | #include <linux/types.h> |
5 | #include <linux/file.h> | 6 | #include <linux/file.h> |
@@ -43,7 +44,7 @@ struct kcov { | |||
43 | * Entry point from instrumented code. | 44 | * Entry point from instrumented code. |
44 | * This is called once per basic-block/edge. | 45 | * This is called once per basic-block/edge. |
45 | */ | 46 | */ |
46 | void __sanitizer_cov_trace_pc(void) | 47 | void notrace __sanitizer_cov_trace_pc(void) |
47 | { | 48 | { |
48 | struct task_struct *t; | 49 | struct task_struct *t; |
49 | enum kcov_mode mode; | 50 | enum kcov_mode mode; |
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 8d34308ea449..1391d3ee3b86 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c | |||
@@ -1415,6 +1415,9 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
1415 | VMCOREINFO_OFFSET(page, lru); | 1415 | VMCOREINFO_OFFSET(page, lru); |
1416 | VMCOREINFO_OFFSET(page, _mapcount); | 1416 | VMCOREINFO_OFFSET(page, _mapcount); |
1417 | VMCOREINFO_OFFSET(page, private); | 1417 | VMCOREINFO_OFFSET(page, private); |
1418 | VMCOREINFO_OFFSET(page, compound_dtor); | ||
1419 | VMCOREINFO_OFFSET(page, compound_order); | ||
1420 | VMCOREINFO_OFFSET(page, compound_head); | ||
1418 | VMCOREINFO_OFFSET(pglist_data, node_zones); | 1421 | VMCOREINFO_OFFSET(pglist_data, node_zones); |
1419 | VMCOREINFO_OFFSET(pglist_data, nr_zones); | 1422 | VMCOREINFO_OFFSET(pglist_data, nr_zones); |
1420 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 1423 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
@@ -1447,8 +1450,8 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
1447 | #ifdef CONFIG_X86 | 1450 | #ifdef CONFIG_X86 |
1448 | VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); | 1451 | VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); |
1449 | #endif | 1452 | #endif |
1450 | #ifdef CONFIG_HUGETLBFS | 1453 | #ifdef CONFIG_HUGETLB_PAGE |
1451 | VMCOREINFO_SYMBOL(free_huge_page); | 1454 | VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR); |
1452 | #endif | 1455 | #endif |
1453 | 1456 | ||
1454 | arch_crash_save_vmcoreinfo(); | 1457 | arch_crash_save_vmcoreinfo(); |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index ed9410936a22..78c1c0ee6dc1 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -2176,15 +2176,37 @@ cache_hit: | |||
2176 | chain->irq_context = hlock->irq_context; | 2176 | chain->irq_context = hlock->irq_context; |
2177 | i = get_first_held_lock(curr, hlock); | 2177 | i = get_first_held_lock(curr, hlock); |
2178 | chain->depth = curr->lockdep_depth + 1 - i; | 2178 | chain->depth = curr->lockdep_depth + 1 - i; |
2179 | |||
2180 | BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks)); | ||
2181 | BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); | ||
2182 | BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes)); | ||
2183 | |||
2179 | if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { | 2184 | if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { |
2180 | chain->base = nr_chain_hlocks; | 2185 | chain->base = nr_chain_hlocks; |
2181 | nr_chain_hlocks += chain->depth; | ||
2182 | for (j = 0; j < chain->depth - 1; j++, i++) { | 2186 | for (j = 0; j < chain->depth - 1; j++, i++) { |
2183 | int lock_id = curr->held_locks[i].class_idx - 1; | 2187 | int lock_id = curr->held_locks[i].class_idx - 1; |
2184 | chain_hlocks[chain->base + j] = lock_id; | 2188 | chain_hlocks[chain->base + j] = lock_id; |
2185 | } | 2189 | } |
2186 | chain_hlocks[chain->base + j] = class - lock_classes; | 2190 | chain_hlocks[chain->base + j] = class - lock_classes; |
2187 | } | 2191 | } |
2192 | |||
2193 | if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS) | ||
2194 | nr_chain_hlocks += chain->depth; | ||
2195 | |||
2196 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
2197 | /* | ||
2198 | * Important for check_no_collision(). | ||
2199 | */ | ||
2200 | if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) { | ||
2201 | if (debug_locks_off_graph_unlock()) | ||
2202 | return 0; | ||
2203 | |||
2204 | print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); | ||
2205 | dump_stack(); | ||
2206 | return 0; | ||
2207 | } | ||
2208 | #endif | ||
2209 | |||
2188 | hlist_add_head_rcu(&chain->entry, hash_head); | 2210 | hlist_add_head_rcu(&chain->entry, hash_head); |
2189 | debug_atomic_inc(chain_lookup_misses); | 2211 | debug_atomic_inc(chain_lookup_misses); |
2190 | inc_chains(); | 2212 | inc_chains(); |
@@ -2932,6 +2954,11 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) | |||
2932 | return 1; | 2954 | return 1; |
2933 | } | 2955 | } |
2934 | 2956 | ||
2957 | static inline unsigned int task_irq_context(struct task_struct *task) | ||
2958 | { | ||
2959 | return 2 * !!task->hardirq_context + !!task->softirq_context; | ||
2960 | } | ||
2961 | |||
2935 | static int separate_irq_context(struct task_struct *curr, | 2962 | static int separate_irq_context(struct task_struct *curr, |
2936 | struct held_lock *hlock) | 2963 | struct held_lock *hlock) |
2937 | { | 2964 | { |
@@ -2940,8 +2967,6 @@ static int separate_irq_context(struct task_struct *curr, | |||
2940 | /* | 2967 | /* |
2941 | * Keep track of points where we cross into an interrupt context: | 2968 | * Keep track of points where we cross into an interrupt context: |
2942 | */ | 2969 | */ |
2943 | hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + | ||
2944 | curr->softirq_context; | ||
2945 | if (depth) { | 2970 | if (depth) { |
2946 | struct held_lock *prev_hlock; | 2971 | struct held_lock *prev_hlock; |
2947 | 2972 | ||
@@ -2973,6 +2998,11 @@ static inline int mark_irqflags(struct task_struct *curr, | |||
2973 | return 1; | 2998 | return 1; |
2974 | } | 2999 | } |
2975 | 3000 | ||
3001 | static inline unsigned int task_irq_context(struct task_struct *task) | ||
3002 | { | ||
3003 | return 0; | ||
3004 | } | ||
3005 | |||
2976 | static inline int separate_irq_context(struct task_struct *curr, | 3006 | static inline int separate_irq_context(struct task_struct *curr, |
2977 | struct held_lock *hlock) | 3007 | struct held_lock *hlock) |
2978 | { | 3008 | { |
@@ -3241,6 +3271,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3241 | hlock->acquire_ip = ip; | 3271 | hlock->acquire_ip = ip; |
3242 | hlock->instance = lock; | 3272 | hlock->instance = lock; |
3243 | hlock->nest_lock = nest_lock; | 3273 | hlock->nest_lock = nest_lock; |
3274 | hlock->irq_context = task_irq_context(curr); | ||
3244 | hlock->trylock = trylock; | 3275 | hlock->trylock = trylock; |
3245 | hlock->read = read; | 3276 | hlock->read = read; |
3246 | hlock->check = check; | 3277 | hlock->check = check; |
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index dbb61a302548..a0f61effad25 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c | |||
@@ -141,6 +141,8 @@ static int lc_show(struct seq_file *m, void *v) | |||
141 | int i; | 141 | int i; |
142 | 142 | ||
143 | if (v == SEQ_START_TOKEN) { | 143 | if (v == SEQ_START_TOKEN) { |
144 | if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS) | ||
145 | seq_printf(m, "(buggered) "); | ||
144 | seq_printf(m, "all lock chains:\n"); | 146 | seq_printf(m, "all lock chains:\n"); |
145 | return 0; | 147 | return 0; |
146 | } | 148 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8b489fcac37b..d1f7149f8704 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -596,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq) | |||
596 | return false; | 596 | return false; |
597 | 597 | ||
598 | /* | 598 | /* |
599 | * FIFO realtime policy runs the highest priority task (after DEADLINE). | 599 | * If there are more than one RR tasks, we need the tick to effect the |
600 | * Other runnable tasks are of a lower priority. The scheduler tick | 600 | * actual RR behaviour. |
601 | * isn't needed. | ||
602 | */ | ||
603 | fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; | ||
604 | if (fifo_nr_running) | ||
605 | return true; | ||
606 | |||
607 | /* | ||
608 | * Round-robin realtime tasks time slice with other tasks at the same | ||
609 | * realtime priority. | ||
610 | */ | 601 | */ |
611 | if (rq->rt.rr_nr_running) { | 602 | if (rq->rt.rr_nr_running) { |
612 | if (rq->rt.rr_nr_running == 1) | 603 | if (rq->rt.rr_nr_running == 1) |
@@ -615,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq) | |||
615 | return false; | 606 | return false; |
616 | } | 607 | } |
617 | 608 | ||
618 | /* Normal multitasking need periodic preemption checks */ | 609 | /* |
619 | if (rq->cfs.nr_running > 1) | 610 | * If there's no RR tasks, but FIFO tasks, we can skip the tick, no |
611 | * forced preemption between FIFO tasks. | ||
612 | */ | ||
613 | fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; | ||
614 | if (fifo_nr_running) | ||
615 | return true; | ||
616 | |||
617 | /* | ||
618 | * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; | ||
619 | * if there's more than one we need the tick for involuntary | ||
620 | * preemption. | ||
621 | */ | ||
622 | if (rq->nr_running > 1) | ||
620 | return false; | 623 | return false; |
621 | 624 | ||
622 | return true; | 625 | return true; |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 05ddc0820771..6f965864cc02 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -2095,8 +2095,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file) | |||
2095 | trace_create_file("filter", 0644, file->dir, file, | 2095 | trace_create_file("filter", 0644, file->dir, file, |
2096 | &ftrace_event_filter_fops); | 2096 | &ftrace_event_filter_fops); |
2097 | 2097 | ||
2098 | trace_create_file("trigger", 0644, file->dir, file, | 2098 | /* |
2099 | &event_trigger_fops); | 2099 | * Only event directories that can be enabled should have |
2100 | * triggers. | ||
2101 | */ | ||
2102 | if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) | ||
2103 | trace_create_file("trigger", 0644, file->dir, file, | ||
2104 | &event_trigger_fops); | ||
2100 | 2105 | ||
2101 | trace_create_file("format", 0444, file->dir, call, | 2106 | trace_create_file("format", 0444, file->dir, call, |
2102 | &ftrace_event_format_fops); | 2107 | &ftrace_event_format_fops); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2232ae3e3ad6..3bfdff06eea7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, | |||
666 | */ | 666 | */ |
667 | smp_wmb(); | 667 | smp_wmb(); |
668 | set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); | 668 | set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); |
669 | /* | ||
670 | * The following mb guarantees that previous clear of a PENDING bit | ||
671 | * will not be reordered with any speculative LOADS or STORES from | ||
672 | * work->current_func, which is executed afterwards. This possible | ||
673 | * reordering can lead to a missed execution on attempt to qeueue | ||
674 | * the same @work. E.g. consider this case: | ||
675 | * | ||
676 | * CPU#0 CPU#1 | ||
677 | * ---------------------------- -------------------------------- | ||
678 | * | ||
679 | * 1 STORE event_indicated | ||
680 | * 2 queue_work_on() { | ||
681 | * 3 test_and_set_bit(PENDING) | ||
682 | * 4 } set_..._and_clear_pending() { | ||
683 | * 5 set_work_data() # clear bit | ||
684 | * 6 smp_mb() | ||
685 | * 7 work->current_func() { | ||
686 | * 8 LOAD event_indicated | ||
687 | * } | ||
688 | * | ||
689 | * Without an explicit full barrier speculative LOAD on line 8 can | ||
690 | * be executed before CPU#0 does STORE on line 1. If that happens, | ||
691 | * CPU#0 observes the PENDING bit is still set and new execution of | ||
692 | * a @work is not queued in a hope, that CPU#1 will eventually | ||
693 | * finish the queued @work. Meanwhile CPU#1 does not see | ||
694 | * event_indicated is set, because speculative LOAD was executed | ||
695 | * before actual STORE. | ||
696 | */ | ||
697 | smp_mb(); | ||
669 | } | 698 | } |
670 | 699 | ||
671 | static void clear_work_data(struct work_struct *work) | 700 | static void clear_work_data(struct work_struct *work) |
diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 654c9d87e83a..53ad6c0831ae 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c | |||
@@ -42,12 +42,14 @@ | |||
42 | 42 | ||
43 | #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) | 43 | #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) |
44 | 44 | ||
45 | #define STACK_ALLOC_NULL_PROTECTION_BITS 1 | ||
45 | #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ | 46 | #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ |
46 | #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) | 47 | #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) |
47 | #define STACK_ALLOC_ALIGN 4 | 48 | #define STACK_ALLOC_ALIGN 4 |
48 | #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ | 49 | #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ |
49 | STACK_ALLOC_ALIGN) | 50 | STACK_ALLOC_ALIGN) |
50 | #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS) | 51 | #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ |
52 | STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) | ||
51 | #define STACK_ALLOC_SLABS_CAP 1024 | 53 | #define STACK_ALLOC_SLABS_CAP 1024 |
52 | #define STACK_ALLOC_MAX_SLABS \ | 54 | #define STACK_ALLOC_MAX_SLABS \ |
53 | (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ | 55 | (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ |
@@ -59,6 +61,7 @@ union handle_parts { | |||
59 | struct { | 61 | struct { |
60 | u32 slabindex : STACK_ALLOC_INDEX_BITS; | 62 | u32 slabindex : STACK_ALLOC_INDEX_BITS; |
61 | u32 offset : STACK_ALLOC_OFFSET_BITS; | 63 | u32 offset : STACK_ALLOC_OFFSET_BITS; |
64 | u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS; | ||
62 | }; | 65 | }; |
63 | }; | 66 | }; |
64 | 67 | ||
@@ -136,6 +139,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, | |||
136 | stack->size = size; | 139 | stack->size = size; |
137 | stack->handle.slabindex = depot_index; | 140 | stack->handle.slabindex = depot_index; |
138 | stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; | 141 | stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; |
142 | stack->handle.valid = 1; | ||
139 | memcpy(stack->entries, entries, size * sizeof(unsigned long)); | 143 | memcpy(stack->entries, entries, size * sizeof(unsigned long)); |
140 | depot_offset += required_size; | 144 | depot_offset += required_size; |
141 | 145 | ||
@@ -210,10 +214,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, | |||
210 | goto fast_exit; | 214 | goto fast_exit; |
211 | 215 | ||
212 | hash = hash_stack(trace->entries, trace->nr_entries); | 216 | hash = hash_stack(trace->entries, trace->nr_entries); |
213 | /* Bad luck, we won't store this stack. */ | ||
214 | if (hash == 0) | ||
215 | goto exit; | ||
216 | |||
217 | bucket = &stack_table[hash & STACK_HASH_MASK]; | 217 | bucket = &stack_table[hash & STACK_HASH_MASK]; |
218 | 218 | ||
219 | /* | 219 | /* |
diff --git a/mm/compaction.c b/mm/compaction.c index ccf97b02b85f..8fa254043801 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -852,16 +852,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, | |||
852 | pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, | 852 | pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, |
853 | ISOLATE_UNEVICTABLE); | 853 | ISOLATE_UNEVICTABLE); |
854 | 854 | ||
855 | /* | 855 | if (!pfn) |
856 | * In case of fatal failure, release everything that might | ||
857 | * have been isolated in the previous iteration, and signal | ||
858 | * the failure back to caller. | ||
859 | */ | ||
860 | if (!pfn) { | ||
861 | putback_movable_pages(&cc->migratepages); | ||
862 | cc->nr_migratepages = 0; | ||
863 | break; | 856 | break; |
864 | } | ||
865 | 857 | ||
866 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) | 858 | if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) |
867 | break; | 859 | break; |
@@ -1741,7 +1733,7 @@ void compaction_unregister_node(struct node *node) | |||
1741 | 1733 | ||
1742 | static inline bool kcompactd_work_requested(pg_data_t *pgdat) | 1734 | static inline bool kcompactd_work_requested(pg_data_t *pgdat) |
1743 | { | 1735 | { |
1744 | return pgdat->kcompactd_max_order > 0; | 1736 | return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); |
1745 | } | 1737 | } |
1746 | 1738 | ||
1747 | static bool kcompactd_node_suitable(pg_data_t *pgdat) | 1739 | static bool kcompactd_node_suitable(pg_data_t *pgdat) |
@@ -1805,6 +1797,8 @@ static void kcompactd_do_work(pg_data_t *pgdat) | |||
1805 | INIT_LIST_HEAD(&cc.freepages); | 1797 | INIT_LIST_HEAD(&cc.freepages); |
1806 | INIT_LIST_HEAD(&cc.migratepages); | 1798 | INIT_LIST_HEAD(&cc.migratepages); |
1807 | 1799 | ||
1800 | if (kthread_should_stop()) | ||
1801 | return; | ||
1808 | status = compact_zone(zone, &cc); | 1802 | status = compact_zone(zone, &cc); |
1809 | 1803 | ||
1810 | if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone), | 1804 | if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone), |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 86f9f8b82f8e..f7daa7de8f48 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -232,7 +232,7 @@ retry: | |||
232 | return READ_ONCE(huge_zero_page); | 232 | return READ_ONCE(huge_zero_page); |
233 | } | 233 | } |
234 | 234 | ||
235 | static void put_huge_zero_page(void) | 235 | void put_huge_zero_page(void) |
236 | { | 236 | { |
237 | /* | 237 | /* |
238 | * Counter should never go to zero here. Only shrinker can put | 238 | * Counter should never go to zero here. Only shrinker can put |
@@ -1684,12 +1684,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
1684 | if (vma_is_dax(vma)) { | 1684 | if (vma_is_dax(vma)) { |
1685 | spin_unlock(ptl); | 1685 | spin_unlock(ptl); |
1686 | if (is_huge_zero_pmd(orig_pmd)) | 1686 | if (is_huge_zero_pmd(orig_pmd)) |
1687 | put_huge_zero_page(); | 1687 | tlb_remove_page(tlb, pmd_page(orig_pmd)); |
1688 | } else if (is_huge_zero_pmd(orig_pmd)) { | 1688 | } else if (is_huge_zero_pmd(orig_pmd)) { |
1689 | pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); | 1689 | pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); |
1690 | atomic_long_dec(&tlb->mm->nr_ptes); | 1690 | atomic_long_dec(&tlb->mm->nr_ptes); |
1691 | spin_unlock(ptl); | 1691 | spin_unlock(ptl); |
1692 | put_huge_zero_page(); | 1692 | tlb_remove_page(tlb, pmd_page(orig_pmd)); |
1693 | } else { | 1693 | } else { |
1694 | struct page *page = pmd_page(orig_pmd); | 1694 | struct page *page = pmd_page(orig_pmd); |
1695 | page_remove_rmap(page, true); | 1695 | page_remove_rmap(page, true); |
@@ -1960,10 +1960,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma, | |||
1960 | * page fault if needed. | 1960 | * page fault if needed. |
1961 | */ | 1961 | */ |
1962 | return 0; | 1962 | return 0; |
1963 | if (vma->vm_ops) | 1963 | if (vma->vm_ops || (vm_flags & VM_NO_THP)) |
1964 | /* khugepaged not yet working on file or special mappings */ | 1964 | /* khugepaged not yet working on file or special mappings */ |
1965 | return 0; | 1965 | return 0; |
1966 | VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma); | ||
1967 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; | 1966 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
1968 | hend = vma->vm_end & HPAGE_PMD_MASK; | 1967 | hend = vma->vm_end & HPAGE_PMD_MASK; |
1969 | if (hstart < hend) | 1968 | if (hstart < hend) |
@@ -2352,8 +2351,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) | |||
2352 | return false; | 2351 | return false; |
2353 | if (is_vma_temporary_stack(vma)) | 2352 | if (is_vma_temporary_stack(vma)) |
2354 | return false; | 2353 | return false; |
2355 | VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); | 2354 | return !(vma->vm_flags & VM_NO_THP); |
2356 | return true; | ||
2357 | } | 2355 | } |
2358 | 2356 | ||
2359 | static void collapse_huge_page(struct mm_struct *mm, | 2357 | static void collapse_huge_page(struct mm_struct *mm, |
@@ -3454,7 +3452,7 @@ next: | |||
3454 | } | 3452 | } |
3455 | } | 3453 | } |
3456 | 3454 | ||
3457 | pr_info("%lu of %lu THP split", split, total); | 3455 | pr_info("%lu of %lu THP split\n", split, total); |
3458 | 3456 | ||
3459 | return 0; | 3457 | return 0; |
3460 | } | 3458 | } |
@@ -3465,7 +3463,7 @@ static int __init split_huge_pages_debugfs(void) | |||
3465 | { | 3463 | { |
3466 | void *ret; | 3464 | void *ret; |
3467 | 3465 | ||
3468 | ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL, | 3466 | ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL, |
3469 | &split_huge_pages_fops); | 3467 | &split_huge_pages_fops); |
3470 | if (!ret) | 3468 | if (!ret) |
3471 | pr_warn("Failed to create split_huge_pages in debugfs"); | 3469 | pr_warn("Failed to create split_huge_pages in debugfs"); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 36db05fa8acb..fe787f5c41bd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); | |||
207 | /* "mc" and its members are protected by cgroup_mutex */ | 207 | /* "mc" and its members are protected by cgroup_mutex */ |
208 | static struct move_charge_struct { | 208 | static struct move_charge_struct { |
209 | spinlock_t lock; /* for from, to */ | 209 | spinlock_t lock; /* for from, to */ |
210 | struct mm_struct *mm; | ||
210 | struct mem_cgroup *from; | 211 | struct mem_cgroup *from; |
211 | struct mem_cgroup *to; | 212 | struct mem_cgroup *to; |
212 | unsigned long flags; | 213 | unsigned long flags; |
@@ -4667,6 +4668,8 @@ static void __mem_cgroup_clear_mc(void) | |||
4667 | 4668 | ||
4668 | static void mem_cgroup_clear_mc(void) | 4669 | static void mem_cgroup_clear_mc(void) |
4669 | { | 4670 | { |
4671 | struct mm_struct *mm = mc.mm; | ||
4672 | |||
4670 | /* | 4673 | /* |
4671 | * we must clear moving_task before waking up waiters at the end of | 4674 | * we must clear moving_task before waking up waiters at the end of |
4672 | * task migration. | 4675 | * task migration. |
@@ -4676,7 +4679,10 @@ static void mem_cgroup_clear_mc(void) | |||
4676 | spin_lock(&mc.lock); | 4679 | spin_lock(&mc.lock); |
4677 | mc.from = NULL; | 4680 | mc.from = NULL; |
4678 | mc.to = NULL; | 4681 | mc.to = NULL; |
4682 | mc.mm = NULL; | ||
4679 | spin_unlock(&mc.lock); | 4683 | spin_unlock(&mc.lock); |
4684 | |||
4685 | mmput(mm); | ||
4680 | } | 4686 | } |
4681 | 4687 | ||
4682 | static int mem_cgroup_can_attach(struct cgroup_taskset *tset) | 4688 | static int mem_cgroup_can_attach(struct cgroup_taskset *tset) |
@@ -4733,6 +4739,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) | |||
4733 | VM_BUG_ON(mc.moved_swap); | 4739 | VM_BUG_ON(mc.moved_swap); |
4734 | 4740 | ||
4735 | spin_lock(&mc.lock); | 4741 | spin_lock(&mc.lock); |
4742 | mc.mm = mm; | ||
4736 | mc.from = from; | 4743 | mc.from = from; |
4737 | mc.to = memcg; | 4744 | mc.to = memcg; |
4738 | mc.flags = move_flags; | 4745 | mc.flags = move_flags; |
@@ -4742,8 +4749,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) | |||
4742 | ret = mem_cgroup_precharge_mc(mm); | 4749 | ret = mem_cgroup_precharge_mc(mm); |
4743 | if (ret) | 4750 | if (ret) |
4744 | mem_cgroup_clear_mc(); | 4751 | mem_cgroup_clear_mc(); |
4752 | } else { | ||
4753 | mmput(mm); | ||
4745 | } | 4754 | } |
4746 | mmput(mm); | ||
4747 | return ret; | 4755 | return ret; |
4748 | } | 4756 | } |
4749 | 4757 | ||
@@ -4852,11 +4860,11 @@ put: /* get_mctgt_type() gets the page */ | |||
4852 | return ret; | 4860 | return ret; |
4853 | } | 4861 | } |
4854 | 4862 | ||
4855 | static void mem_cgroup_move_charge(struct mm_struct *mm) | 4863 | static void mem_cgroup_move_charge(void) |
4856 | { | 4864 | { |
4857 | struct mm_walk mem_cgroup_move_charge_walk = { | 4865 | struct mm_walk mem_cgroup_move_charge_walk = { |
4858 | .pmd_entry = mem_cgroup_move_charge_pte_range, | 4866 | .pmd_entry = mem_cgroup_move_charge_pte_range, |
4859 | .mm = mm, | 4867 | .mm = mc.mm, |
4860 | }; | 4868 | }; |
4861 | 4869 | ||
4862 | lru_add_drain_all(); | 4870 | lru_add_drain_all(); |
@@ -4868,7 +4876,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) | |||
4868 | atomic_inc(&mc.from->moving_account); | 4876 | atomic_inc(&mc.from->moving_account); |
4869 | synchronize_rcu(); | 4877 | synchronize_rcu(); |
4870 | retry: | 4878 | retry: |
4871 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { | 4879 | if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { |
4872 | /* | 4880 | /* |
4873 | * Someone who are holding the mmap_sem might be waiting in | 4881 | * Someone who are holding the mmap_sem might be waiting in |
4874 | * waitq. So we cancel all extra charges, wake up all waiters, | 4882 | * waitq. So we cancel all extra charges, wake up all waiters, |
@@ -4885,23 +4893,16 @@ retry: | |||
4885 | * additional charge, the page walk just aborts. | 4893 | * additional charge, the page walk just aborts. |
4886 | */ | 4894 | */ |
4887 | walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); | 4895 | walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); |
4888 | up_read(&mm->mmap_sem); | 4896 | up_read(&mc.mm->mmap_sem); |
4889 | atomic_dec(&mc.from->moving_account); | 4897 | atomic_dec(&mc.from->moving_account); |
4890 | } | 4898 | } |
4891 | 4899 | ||
4892 | static void mem_cgroup_move_task(struct cgroup_taskset *tset) | 4900 | static void mem_cgroup_move_task(void) |
4893 | { | 4901 | { |
4894 | struct cgroup_subsys_state *css; | 4902 | if (mc.to) { |
4895 | struct task_struct *p = cgroup_taskset_first(tset, &css); | 4903 | mem_cgroup_move_charge(); |
4896 | struct mm_struct *mm = get_task_mm(p); | ||
4897 | |||
4898 | if (mm) { | ||
4899 | if (mc.to) | ||
4900 | mem_cgroup_move_charge(mm); | ||
4901 | mmput(mm); | ||
4902 | } | ||
4903 | if (mc.to) | ||
4904 | mem_cgroup_clear_mc(); | 4904 | mem_cgroup_clear_mc(); |
4905 | } | ||
4905 | } | 4906 | } |
4906 | #else /* !CONFIG_MMU */ | 4907 | #else /* !CONFIG_MMU */ |
4907 | static int mem_cgroup_can_attach(struct cgroup_taskset *tset) | 4908 | static int mem_cgroup_can_attach(struct cgroup_taskset *tset) |
@@ -4911,7 +4912,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) | |||
4911 | static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) | 4912 | static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) |
4912 | { | 4913 | { |
4913 | } | 4914 | } |
4914 | static void mem_cgroup_move_task(struct cgroup_taskset *tset) | 4915 | static void mem_cgroup_move_task(void) |
4915 | { | 4916 | { |
4916 | } | 4917 | } |
4917 | #endif | 4918 | #endif |
@@ -5195,7 +5196,7 @@ struct cgroup_subsys memory_cgrp_subsys = { | |||
5195 | .css_reset = mem_cgroup_css_reset, | 5196 | .css_reset = mem_cgroup_css_reset, |
5196 | .can_attach = mem_cgroup_can_attach, | 5197 | .can_attach = mem_cgroup_can_attach, |
5197 | .cancel_attach = mem_cgroup_cancel_attach, | 5198 | .cancel_attach = mem_cgroup_cancel_attach, |
5198 | .attach = mem_cgroup_move_task, | 5199 | .post_attach = mem_cgroup_move_task, |
5199 | .bind = mem_cgroup_bind, | 5200 | .bind = mem_cgroup_bind, |
5200 | .dfl_cftypes = memory_files, | 5201 | .dfl_cftypes = memory_files, |
5201 | .legacy_cftypes = mem_cgroup_legacy_files, | 5202 | .legacy_cftypes = mem_cgroup_legacy_files, |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 78f5f2641b91..ca5acee53b7a 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -888,7 +888,15 @@ int get_hwpoison_page(struct page *page) | |||
888 | } | 888 | } |
889 | } | 889 | } |
890 | 890 | ||
891 | return get_page_unless_zero(head); | 891 | if (get_page_unless_zero(head)) { |
892 | if (head == compound_head(page)) | ||
893 | return 1; | ||
894 | |||
895 | pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page)); | ||
896 | put_page(head); | ||
897 | } | ||
898 | |||
899 | return 0; | ||
892 | } | 900 | } |
893 | EXPORT_SYMBOL_GPL(get_hwpoison_page); | 901 | EXPORT_SYMBOL_GPL(get_hwpoison_page); |
894 | 902 | ||
diff --git a/mm/memory.c b/mm/memory.c index 93897f23cc11..52c218e2b724 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -789,6 +789,46 @@ out: | |||
789 | return pfn_to_page(pfn); | 789 | return pfn_to_page(pfn); |
790 | } | 790 | } |
791 | 791 | ||
792 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
793 | struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
794 | pmd_t pmd) | ||
795 | { | ||
796 | unsigned long pfn = pmd_pfn(pmd); | ||
797 | |||
798 | /* | ||
799 | * There is no pmd_special() but there may be special pmds, e.g. | ||
800 | * in a direct-access (dax) mapping, so let's just replicate the | ||
801 | * !HAVE_PTE_SPECIAL case from vm_normal_page() here. | ||
802 | */ | ||
803 | if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { | ||
804 | if (vma->vm_flags & VM_MIXEDMAP) { | ||
805 | if (!pfn_valid(pfn)) | ||
806 | return NULL; | ||
807 | goto out; | ||
808 | } else { | ||
809 | unsigned long off; | ||
810 | off = (addr - vma->vm_start) >> PAGE_SHIFT; | ||
811 | if (pfn == vma->vm_pgoff + off) | ||
812 | return NULL; | ||
813 | if (!is_cow_mapping(vma->vm_flags)) | ||
814 | return NULL; | ||
815 | } | ||
816 | } | ||
817 | |||
818 | if (is_zero_pfn(pfn)) | ||
819 | return NULL; | ||
820 | if (unlikely(pfn > highest_memmap_pfn)) | ||
821 | return NULL; | ||
822 | |||
823 | /* | ||
824 | * NOTE! We still have PageReserved() pages in the page tables. | ||
825 | * eg. VDSO mappings can cause them to exist. | ||
826 | */ | ||
827 | out: | ||
828 | return pfn_to_page(pfn); | ||
829 | } | ||
830 | #endif | ||
831 | |||
792 | /* | 832 | /* |
793 | * copy one vm_area from one task to the other. Assumes the page tables | 833 | * copy one vm_area from one task to the other. Assumes the page tables |
794 | * already present in the new task to be cleared in the whole range | 834 | * already present in the new task to be cleared in the whole range |
@@ -1182,15 +1222,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, | |||
1182 | next = pmd_addr_end(addr, end); | 1222 | next = pmd_addr_end(addr, end); |
1183 | if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { | 1223 | if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { |
1184 | if (next - addr != HPAGE_PMD_SIZE) { | 1224 | if (next - addr != HPAGE_PMD_SIZE) { |
1185 | #ifdef CONFIG_DEBUG_VM | 1225 | VM_BUG_ON_VMA(vma_is_anonymous(vma) && |
1186 | if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { | 1226 | !rwsem_is_locked(&tlb->mm->mmap_sem), vma); |
1187 | pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", | ||
1188 | __func__, addr, end, | ||
1189 | vma->vm_start, | ||
1190 | vma->vm_end); | ||
1191 | BUG(); | ||
1192 | } | ||
1193 | #endif | ||
1194 | split_huge_pmd(vma, pmd, addr); | 1227 | split_huge_pmd(vma, pmd, addr); |
1195 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) | 1228 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) |
1196 | goto next; | 1229 | goto next; |
diff --git a/mm/migrate.c b/mm/migrate.c index 6c822a7b27e0..f9dfb18a4eba 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -975,7 +975,13 @@ out: | |||
975 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 975 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
976 | page_is_file_cache(page)); | 976 | page_is_file_cache(page)); |
977 | /* Soft-offlined page shouldn't go through lru cache list */ | 977 | /* Soft-offlined page shouldn't go through lru cache list */ |
978 | if (reason == MR_MEMORY_FAILURE) { | 978 | if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) { |
979 | /* | ||
980 | * With this release, we free successfully migrated | ||
981 | * page and set PG_HWPoison on just freed page | ||
982 | * intentionally. Although it's rather weird, it's how | ||
983 | * HWPoison flag works at the moment. | ||
984 | */ | ||
979 | put_page(page); | 985 | put_page(page); |
980 | if (!test_set_page_hwpoison(page)) | 986 | if (!test_set_page_hwpoison(page)) |
981 | num_poisoned_pages_inc(); | 987 | num_poisoned_pages_inc(); |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 999792d35ccc..bc5149d5ec38 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1910,7 +1910,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) | |||
1910 | if (gdtc->dirty > gdtc->bg_thresh) | 1910 | if (gdtc->dirty > gdtc->bg_thresh) |
1911 | return true; | 1911 | return true; |
1912 | 1912 | ||
1913 | if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc)) | 1913 | if (wb_stat(wb, WB_RECLAIMABLE) > |
1914 | wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) | ||
1914 | return true; | 1915 | return true; |
1915 | 1916 | ||
1916 | if (mdtc) { | 1917 | if (mdtc) { |
@@ -1924,7 +1925,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) | |||
1924 | if (mdtc->dirty > mdtc->bg_thresh) | 1925 | if (mdtc->dirty > mdtc->bg_thresh) |
1925 | return true; | 1926 | return true; |
1926 | 1927 | ||
1927 | if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc)) | 1928 | if (wb_stat(wb, WB_RECLAIMABLE) > |
1929 | wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) | ||
1928 | return true; | 1930 | return true; |
1929 | } | 1931 | } |
1930 | 1932 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 59de90d5d3a3..c1069efcc4d7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -6485,7 +6485,7 @@ int __meminit init_per_zone_wmark_min(void) | |||
6485 | setup_per_zone_inactive_ratio(); | 6485 | setup_per_zone_inactive_ratio(); |
6486 | return 0; | 6486 | return 0; |
6487 | } | 6487 | } |
6488 | module_init(init_per_zone_wmark_min) | 6488 | core_initcall(init_per_zone_wmark_min) |
6489 | 6489 | ||
6490 | /* | 6490 | /* |
6491 | * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so | 6491 | * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so |
diff --git a/mm/page_io.c b/mm/page_io.c index cd92e3d67a32..985f23cfa79b 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -353,7 +353,11 @@ int swap_readpage(struct page *page) | |||
353 | 353 | ||
354 | ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); | 354 | ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); |
355 | if (!ret) { | 355 | if (!ret) { |
356 | swap_slot_free_notify(page); | 356 | if (trylock_page(page)) { |
357 | swap_slot_free_notify(page); | ||
358 | unlock_page(page); | ||
359 | } | ||
360 | |||
357 | count_vm_event(PSWPIN); | 361 | count_vm_event(PSWPIN); |
358 | return 0; | 362 | return 0; |
359 | } | 363 | } |
@@ -728,6 +728,11 @@ void release_pages(struct page **pages, int nr, bool cold) | |||
728 | zone = NULL; | 728 | zone = NULL; |
729 | } | 729 | } |
730 | 730 | ||
731 | if (is_huge_zero_page(page)) { | ||
732 | put_huge_zero_page(); | ||
733 | continue; | ||
734 | } | ||
735 | |||
731 | page = compound_head(page); | 736 | page = compound_head(page); |
732 | if (!put_page_testzero(page)) | 737 | if (!put_page_testzero(page)) |
733 | continue; | 738 | continue; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index b934223eaa45..142cb61f4822 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2553,7 +2553,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) | |||
2553 | sc->gfp_mask |= __GFP_HIGHMEM; | 2553 | sc->gfp_mask |= __GFP_HIGHMEM; |
2554 | 2554 | ||
2555 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 2555 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
2556 | requested_highidx, sc->nodemask) { | 2556 | gfp_zone(sc->gfp_mask), sc->nodemask) { |
2557 | enum zone_type classzone_idx; | 2557 | enum zone_type classzone_idx; |
2558 | 2558 | ||
2559 | if (!populated_zone(zone)) | 2559 | if (!populated_zone(zone)) |
@@ -3318,6 +3318,20 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, | |||
3318 | /* Try to sleep for a short interval */ | 3318 | /* Try to sleep for a short interval */ |
3319 | if (prepare_kswapd_sleep(pgdat, order, remaining, | 3319 | if (prepare_kswapd_sleep(pgdat, order, remaining, |
3320 | balanced_classzone_idx)) { | 3320 | balanced_classzone_idx)) { |
3321 | /* | ||
3322 | * Compaction records what page blocks it recently failed to | ||
3323 | * isolate pages from and skips them in the future scanning. | ||
3324 | * When kswapd is going to sleep, it is reasonable to assume | ||
3325 | * that pages and compaction may succeed so reset the cache. | ||
3326 | */ | ||
3327 | reset_isolation_suitable(pgdat); | ||
3328 | |||
3329 | /* | ||
3330 | * We have freed the memory, now we should compact it to make | ||
3331 | * allocation of the requested order possible. | ||
3332 | */ | ||
3333 | wakeup_kcompactd(pgdat, order, classzone_idx); | ||
3334 | |||
3321 | remaining = schedule_timeout(HZ/10); | 3335 | remaining = schedule_timeout(HZ/10); |
3322 | finish_wait(&pgdat->kswapd_wait, &wait); | 3336 | finish_wait(&pgdat->kswapd_wait, &wait); |
3323 | prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); | 3337 | prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); |
@@ -3341,20 +3355,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, | |||
3341 | */ | 3355 | */ |
3342 | set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); | 3356 | set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold); |
3343 | 3357 | ||
3344 | /* | ||
3345 | * Compaction records what page blocks it recently failed to | ||
3346 | * isolate pages from and skips them in the future scanning. | ||
3347 | * When kswapd is going to sleep, it is reasonable to assume | ||
3348 | * that pages and compaction may succeed so reset the cache. | ||
3349 | */ | ||
3350 | reset_isolation_suitable(pgdat); | ||
3351 | |||
3352 | /* | ||
3353 | * We have freed the memory, now we should compact it to make | ||
3354 | * allocation of the requested order possible. | ||
3355 | */ | ||
3356 | wakeup_kcompactd(pgdat, order, classzone_idx); | ||
3357 | |||
3358 | if (!kthread_should_stop()) | 3358 | if (!kthread_should_stop()) |
3359 | schedule(); | 3359 | schedule(); |
3360 | 3360 | ||
diff --git a/mm/zswap.c b/mm/zswap.c index 91dad80d068b..de0f119b1780 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; | |||
170 | static LIST_HEAD(zswap_pools); | 170 | static LIST_HEAD(zswap_pools); |
171 | /* protects zswap_pools list modification */ | 171 | /* protects zswap_pools list modification */ |
172 | static DEFINE_SPINLOCK(zswap_pools_lock); | 172 | static DEFINE_SPINLOCK(zswap_pools_lock); |
173 | /* pool counter to provide unique names to zpool */ | ||
174 | static atomic_t zswap_pools_count = ATOMIC_INIT(0); | ||
173 | 175 | ||
174 | /* used by param callback function */ | 176 | /* used by param callback function */ |
175 | static bool zswap_init_started; | 177 | static bool zswap_init_started; |
@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) | |||
565 | static struct zswap_pool *zswap_pool_create(char *type, char *compressor) | 567 | static struct zswap_pool *zswap_pool_create(char *type, char *compressor) |
566 | { | 568 | { |
567 | struct zswap_pool *pool; | 569 | struct zswap_pool *pool; |
570 | char name[38]; /* 'zswap' + 32 char (max) num + \0 */ | ||
568 | gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; | 571 | gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; |
569 | 572 | ||
570 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | 573 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); |
@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) | |||
573 | return NULL; | 576 | return NULL; |
574 | } | 577 | } |
575 | 578 | ||
576 | pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops); | 579 | /* unique name for each pool specifically required by zsmalloc */ |
580 | snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); | ||
581 | |||
582 | pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); | ||
577 | if (!pool->zpool) { | 583 | if (!pool->zpool) { |
578 | pr_err("%s zpool not available\n", type); | 584 | pr_err("%s zpool not available\n", type); |
579 | goto error; | 585 | goto error; |
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 3315b9a598af..4026f198a734 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c | |||
@@ -32,10 +32,21 @@ | |||
32 | 32 | ||
33 | #include "bat_v_elp.h" | 33 | #include "bat_v_elp.h" |
34 | #include "bat_v_ogm.h" | 34 | #include "bat_v_ogm.h" |
35 | #include "hard-interface.h" | ||
35 | #include "hash.h" | 36 | #include "hash.h" |
36 | #include "originator.h" | 37 | #include "originator.h" |
37 | #include "packet.h" | 38 | #include "packet.h" |
38 | 39 | ||
40 | static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface) | ||
41 | { | ||
42 | /* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can | ||
43 | * set the interface as ACTIVE right away, without any risk of race | ||
44 | * condition | ||
45 | */ | ||
46 | if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED) | ||
47 | hard_iface->if_status = BATADV_IF_ACTIVE; | ||
48 | } | ||
49 | |||
39 | static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface) | 50 | static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface) |
40 | { | 51 | { |
41 | int ret; | 52 | int ret; |
@@ -274,6 +285,7 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1, | |||
274 | 285 | ||
275 | static struct batadv_algo_ops batadv_batman_v __read_mostly = { | 286 | static struct batadv_algo_ops batadv_batman_v __read_mostly = { |
276 | .name = "BATMAN_V", | 287 | .name = "BATMAN_V", |
288 | .bat_iface_activate = batadv_v_iface_activate, | ||
277 | .bat_iface_enable = batadv_v_iface_enable, | 289 | .bat_iface_enable = batadv_v_iface_enable, |
278 | .bat_iface_disable = batadv_v_iface_disable, | 290 | .bat_iface_disable = batadv_v_iface_disable, |
279 | .bat_iface_update_mac = batadv_v_iface_update_mac, | 291 | .bat_iface_update_mac = batadv_v_iface_update_mac, |
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index e96d7c745b4a..3e6b2624f980 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
@@ -568,6 +568,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, | |||
568 | * be sent to | 568 | * be sent to |
569 | * @bat_priv: the bat priv with all the soft interface information | 569 | * @bat_priv: the bat priv with all the soft interface information |
570 | * @ip_dst: ipv4 to look up in the DHT | 570 | * @ip_dst: ipv4 to look up in the DHT |
571 | * @vid: VLAN identifier | ||
571 | * | 572 | * |
572 | * An originator O is selected if and only if its DHT_ID value is one of three | 573 | * An originator O is selected if and only if its DHT_ID value is one of three |
573 | * closest values (from the LEFT, with wrap around if needed) then the hash | 574 | * closest values (from the LEFT, with wrap around if needed) then the hash |
@@ -576,7 +577,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, | |||
576 | * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM. | 577 | * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM. |
577 | */ | 578 | */ |
578 | static struct batadv_dat_candidate * | 579 | static struct batadv_dat_candidate * |
579 | batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) | 580 | batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst, |
581 | unsigned short vid) | ||
580 | { | 582 | { |
581 | int select; | 583 | int select; |
582 | batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; | 584 | batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; |
@@ -592,7 +594,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) | |||
592 | return NULL; | 594 | return NULL; |
593 | 595 | ||
594 | dat.ip = ip_dst; | 596 | dat.ip = ip_dst; |
595 | dat.vid = 0; | 597 | dat.vid = vid; |
596 | ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat, | 598 | ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat, |
597 | BATADV_DAT_ADDR_MAX); | 599 | BATADV_DAT_ADDR_MAX); |
598 | 600 | ||
@@ -612,6 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) | |||
612 | * @bat_priv: the bat priv with all the soft interface information | 614 | * @bat_priv: the bat priv with all the soft interface information |
613 | * @skb: payload to send | 615 | * @skb: payload to send |
614 | * @ip: the DHT key | 616 | * @ip: the DHT key |
617 | * @vid: VLAN identifier | ||
615 | * @packet_subtype: unicast4addr packet subtype to use | 618 | * @packet_subtype: unicast4addr packet subtype to use |
616 | * | 619 | * |
617 | * This function copies the skb with pskb_copy() and is sent as unicast packet | 620 | * This function copies the skb with pskb_copy() and is sent as unicast packet |
@@ -622,7 +625,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) | |||
622 | */ | 625 | */ |
623 | static bool batadv_dat_send_data(struct batadv_priv *bat_priv, | 626 | static bool batadv_dat_send_data(struct batadv_priv *bat_priv, |
624 | struct sk_buff *skb, __be32 ip, | 627 | struct sk_buff *skb, __be32 ip, |
625 | int packet_subtype) | 628 | unsigned short vid, int packet_subtype) |
626 | { | 629 | { |
627 | int i; | 630 | int i; |
628 | bool ret = false; | 631 | bool ret = false; |
@@ -631,7 +634,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv, | |||
631 | struct sk_buff *tmp_skb; | 634 | struct sk_buff *tmp_skb; |
632 | struct batadv_dat_candidate *cand; | 635 | struct batadv_dat_candidate *cand; |
633 | 636 | ||
634 | cand = batadv_dat_select_candidates(bat_priv, ip); | 637 | cand = batadv_dat_select_candidates(bat_priv, ip, vid); |
635 | if (!cand) | 638 | if (!cand) |
636 | goto out; | 639 | goto out; |
637 | 640 | ||
@@ -1022,7 +1025,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, | |||
1022 | ret = true; | 1025 | ret = true; |
1023 | } else { | 1026 | } else { |
1024 | /* Send the request to the DHT */ | 1027 | /* Send the request to the DHT */ |
1025 | ret = batadv_dat_send_data(bat_priv, skb, ip_dst, | 1028 | ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid, |
1026 | BATADV_P_DAT_DHT_GET); | 1029 | BATADV_P_DAT_DHT_GET); |
1027 | } | 1030 | } |
1028 | out: | 1031 | out: |
@@ -1150,8 +1153,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, | |||
1150 | /* Send the ARP reply to the candidates for both the IP addresses that | 1153 | /* Send the ARP reply to the candidates for both the IP addresses that |
1151 | * the node obtained from the ARP reply | 1154 | * the node obtained from the ARP reply |
1152 | */ | 1155 | */ |
1153 | batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT); | 1156 | batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT); |
1154 | batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT); | 1157 | batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT); |
1155 | } | 1158 | } |
1156 | 1159 | ||
1157 | /** | 1160 | /** |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index b22b2775a0a5..0a7deaf2670a 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -407,6 +407,9 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface) | |||
407 | 407 | ||
408 | batadv_update_min_mtu(hard_iface->soft_iface); | 408 | batadv_update_min_mtu(hard_iface->soft_iface); |
409 | 409 | ||
410 | if (bat_priv->bat_algo_ops->bat_iface_activate) | ||
411 | bat_priv->bat_algo_ops->bat_iface_activate(hard_iface); | ||
412 | |||
410 | out: | 413 | out: |
411 | if (primary_if) | 414 | if (primary_if) |
412 | batadv_hardif_put(primary_if); | 415 | batadv_hardif_put(primary_if); |
@@ -572,8 +575,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, | |||
572 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 575 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
573 | struct batadv_hard_iface *primary_if = NULL; | 576 | struct batadv_hard_iface *primary_if = NULL; |
574 | 577 | ||
575 | if (hard_iface->if_status == BATADV_IF_ACTIVE) | 578 | batadv_hardif_deactivate_interface(hard_iface); |
576 | batadv_hardif_deactivate_interface(hard_iface); | ||
577 | 579 | ||
578 | if (hard_iface->if_status != BATADV_IF_INACTIVE) | 580 | if (hard_iface->if_status != BATADV_IF_INACTIVE) |
579 | goto out; | 581 | goto out; |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index e4cbb0753e37..c355a824713c 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -250,7 +250,6 @@ static void batadv_neigh_node_release(struct kref *ref) | |||
250 | { | 250 | { |
251 | struct hlist_node *node_tmp; | 251 | struct hlist_node *node_tmp; |
252 | struct batadv_neigh_node *neigh_node; | 252 | struct batadv_neigh_node *neigh_node; |
253 | struct batadv_hardif_neigh_node *hardif_neigh; | ||
254 | struct batadv_neigh_ifinfo *neigh_ifinfo; | 253 | struct batadv_neigh_ifinfo *neigh_ifinfo; |
255 | struct batadv_algo_ops *bao; | 254 | struct batadv_algo_ops *bao; |
256 | 255 | ||
@@ -262,13 +261,7 @@ static void batadv_neigh_node_release(struct kref *ref) | |||
262 | batadv_neigh_ifinfo_put(neigh_ifinfo); | 261 | batadv_neigh_ifinfo_put(neigh_ifinfo); |
263 | } | 262 | } |
264 | 263 | ||
265 | hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming, | 264 | batadv_hardif_neigh_put(neigh_node->hardif_neigh); |
266 | neigh_node->addr); | ||
267 | if (hardif_neigh) { | ||
268 | /* batadv_hardif_neigh_get() increases refcount too */ | ||
269 | batadv_hardif_neigh_put(hardif_neigh); | ||
270 | batadv_hardif_neigh_put(hardif_neigh); | ||
271 | } | ||
272 | 265 | ||
273 | if (bao->bat_neigh_free) | 266 | if (bao->bat_neigh_free) |
274 | bao->bat_neigh_free(neigh_node); | 267 | bao->bat_neigh_free(neigh_node); |
@@ -663,6 +656,11 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node, | |||
663 | ether_addr_copy(neigh_node->addr, neigh_addr); | 656 | ether_addr_copy(neigh_node->addr, neigh_addr); |
664 | neigh_node->if_incoming = hard_iface; | 657 | neigh_node->if_incoming = hard_iface; |
665 | neigh_node->orig_node = orig_node; | 658 | neigh_node->orig_node = orig_node; |
659 | neigh_node->last_seen = jiffies; | ||
660 | |||
661 | /* increment unique neighbor refcount */ | ||
662 | kref_get(&hardif_neigh->refcount); | ||
663 | neigh_node->hardif_neigh = hardif_neigh; | ||
666 | 664 | ||
667 | /* extra reference for return */ | 665 | /* extra reference for return */ |
668 | kref_init(&neigh_node->refcount); | 666 | kref_init(&neigh_node->refcount); |
@@ -672,9 +670,6 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node, | |||
672 | hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); | 670 | hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); |
673 | spin_unlock_bh(&orig_node->neigh_list_lock); | 671 | spin_unlock_bh(&orig_node->neigh_list_lock); |
674 | 672 | ||
675 | /* increment unique neighbor refcount */ | ||
676 | kref_get(&hardif_neigh->refcount); | ||
677 | |||
678 | batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv, | 673 | batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv, |
679 | "Creating new neighbor %pM for orig_node %pM on interface %s\n", | 674 | "Creating new neighbor %pM for orig_node %pM on interface %s\n", |
680 | neigh_addr, orig_node->orig, hard_iface->net_dev->name); | 675 | neigh_addr, orig_node->orig, hard_iface->net_dev->name); |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 4dd646a52f1a..b781bf753250 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -105,6 +105,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv, | |||
105 | neigh_node = NULL; | 105 | neigh_node = NULL; |
106 | 106 | ||
107 | spin_lock_bh(&orig_node->neigh_list_lock); | 107 | spin_lock_bh(&orig_node->neigh_list_lock); |
108 | /* curr_router used earlier may not be the current orig_ifinfo->router | ||
109 | * anymore because it was dereferenced outside of the neigh_list_lock | ||
110 | * protected region. After the new best neighbor has replace the current | ||
111 | * best neighbor the reference counter needs to decrease. Consequently, | ||
112 | * the code needs to ensure the curr_router variable contains a pointer | ||
113 | * to the replaced best neighbor. | ||
114 | */ | ||
115 | curr_router = rcu_dereference_protected(orig_ifinfo->router, true); | ||
116 | |||
108 | rcu_assign_pointer(orig_ifinfo->router, neigh_node); | 117 | rcu_assign_pointer(orig_ifinfo->router, neigh_node); |
109 | spin_unlock_bh(&orig_node->neigh_list_lock); | 118 | spin_unlock_bh(&orig_node->neigh_list_lock); |
110 | batadv_orig_ifinfo_put(orig_ifinfo); | 119 | batadv_orig_ifinfo_put(orig_ifinfo); |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 3ce06e0a91b1..76417850d3fc 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -675,6 +675,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | |||
675 | 675 | ||
676 | if (pending) { | 676 | if (pending) { |
677 | hlist_del(&forw_packet->list); | 677 | hlist_del(&forw_packet->list); |
678 | if (!forw_packet->own) | ||
679 | atomic_inc(&bat_priv->bcast_queue_left); | ||
680 | |||
678 | batadv_forw_packet_free(forw_packet); | 681 | batadv_forw_packet_free(forw_packet); |
679 | } | 682 | } |
680 | } | 683 | } |
@@ -702,6 +705,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | |||
702 | 705 | ||
703 | if (pending) { | 706 | if (pending) { |
704 | hlist_del(&forw_packet->list); | 707 | hlist_del(&forw_packet->list); |
708 | if (!forw_packet->own) | ||
709 | atomic_inc(&bat_priv->batman_queue_left); | ||
710 | |||
705 | batadv_forw_packet_free(forw_packet); | 711 | batadv_forw_packet_free(forw_packet); |
706 | } | 712 | } |
707 | } | 713 | } |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 0710379491bf..8a136b6a1ff0 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -408,11 +408,17 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
408 | */ | 408 | */ |
409 | nf_reset(skb); | 409 | nf_reset(skb); |
410 | 410 | ||
411 | if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) | ||
412 | goto dropped; | ||
413 | |||
411 | vid = batadv_get_vid(skb, 0); | 414 | vid = batadv_get_vid(skb, 0); |
412 | ethhdr = eth_hdr(skb); | 415 | ethhdr = eth_hdr(skb); |
413 | 416 | ||
414 | switch (ntohs(ethhdr->h_proto)) { | 417 | switch (ntohs(ethhdr->h_proto)) { |
415 | case ETH_P_8021Q: | 418 | case ETH_P_8021Q: |
419 | if (!pskb_may_pull(skb, VLAN_ETH_HLEN)) | ||
420 | goto dropped; | ||
421 | |||
416 | vhdr = (struct vlan_ethhdr *)skb->data; | 422 | vhdr = (struct vlan_ethhdr *)skb->data; |
417 | 423 | ||
418 | if (vhdr->h_vlan_encapsulated_proto != ethertype) | 424 | if (vhdr->h_vlan_encapsulated_proto != ethertype) |
@@ -424,8 +430,6 @@ void batadv_interface_rx(struct net_device *soft_iface, | |||
424 | } | 430 | } |
425 | 431 | ||
426 | /* skb->dev & skb->pkt_type are set here */ | 432 | /* skb->dev & skb->pkt_type are set here */ |
427 | if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) | ||
428 | goto dropped; | ||
429 | skb->protocol = eth_type_trans(skb, soft_iface); | 433 | skb->protocol = eth_type_trans(skb, soft_iface); |
430 | 434 | ||
431 | /* should not be necessary anymore as we use skb_pull_rcsum() | 435 | /* should not be necessary anymore as we use skb_pull_rcsum() |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 0b43e86328a5..9b4551a86535 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -215,6 +215,8 @@ static void batadv_tt_local_entry_release(struct kref *ref) | |||
215 | tt_local_entry = container_of(ref, struct batadv_tt_local_entry, | 215 | tt_local_entry = container_of(ref, struct batadv_tt_local_entry, |
216 | common.refcount); | 216 | common.refcount); |
217 | 217 | ||
218 | batadv_softif_vlan_put(tt_local_entry->vlan); | ||
219 | |||
218 | kfree_rcu(tt_local_entry, common.rcu); | 220 | kfree_rcu(tt_local_entry, common.rcu); |
219 | } | 221 | } |
220 | 222 | ||
@@ -673,6 +675,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, | |||
673 | kref_get(&tt_local->common.refcount); | 675 | kref_get(&tt_local->common.refcount); |
674 | tt_local->last_seen = jiffies; | 676 | tt_local->last_seen = jiffies; |
675 | tt_local->common.added_at = tt_local->last_seen; | 677 | tt_local->common.added_at = tt_local->last_seen; |
678 | tt_local->vlan = vlan; | ||
676 | 679 | ||
677 | /* the batman interface mac and multicast addresses should never be | 680 | /* the batman interface mac and multicast addresses should never be |
678 | * purged | 681 | * purged |
@@ -991,7 +994,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
991 | struct batadv_tt_common_entry *tt_common_entry; | 994 | struct batadv_tt_common_entry *tt_common_entry; |
992 | struct batadv_tt_local_entry *tt_local; | 995 | struct batadv_tt_local_entry *tt_local; |
993 | struct batadv_hard_iface *primary_if; | 996 | struct batadv_hard_iface *primary_if; |
994 | struct batadv_softif_vlan *vlan; | ||
995 | struct hlist_head *head; | 997 | struct hlist_head *head; |
996 | unsigned short vid; | 998 | unsigned short vid; |
997 | u32 i; | 999 | u32 i; |
@@ -1027,14 +1029,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
1027 | last_seen_msecs = last_seen_msecs % 1000; | 1029 | last_seen_msecs = last_seen_msecs % 1000; |
1028 | 1030 | ||
1029 | no_purge = tt_common_entry->flags & np_flag; | 1031 | no_purge = tt_common_entry->flags & np_flag; |
1030 | |||
1031 | vlan = batadv_softif_vlan_get(bat_priv, vid); | ||
1032 | if (!vlan) { | ||
1033 | seq_printf(seq, "Cannot retrieve VLAN %d\n", | ||
1034 | BATADV_PRINT_VID(vid)); | ||
1035 | continue; | ||
1036 | } | ||
1037 | |||
1038 | seq_printf(seq, | 1032 | seq_printf(seq, |
1039 | " * %pM %4i [%c%c%c%c%c%c] %3u.%03u (%#.8x)\n", | 1033 | " * %pM %4i [%c%c%c%c%c%c] %3u.%03u (%#.8x)\n", |
1040 | tt_common_entry->addr, | 1034 | tt_common_entry->addr, |
@@ -1052,9 +1046,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
1052 | BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'), | 1046 | BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'), |
1053 | no_purge ? 0 : last_seen_secs, | 1047 | no_purge ? 0 : last_seen_secs, |
1054 | no_purge ? 0 : last_seen_msecs, | 1048 | no_purge ? 0 : last_seen_msecs, |
1055 | vlan->tt.crc); | 1049 | tt_local->vlan->tt.crc); |
1056 | |||
1057 | batadv_softif_vlan_put(vlan); | ||
1058 | } | 1050 | } |
1059 | rcu_read_unlock(); | 1051 | rcu_read_unlock(); |
1060 | } | 1052 | } |
@@ -1099,7 +1091,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, | |||
1099 | { | 1091 | { |
1100 | struct batadv_tt_local_entry *tt_local_entry; | 1092 | struct batadv_tt_local_entry *tt_local_entry; |
1101 | u16 flags, curr_flags = BATADV_NO_FLAGS; | 1093 | u16 flags, curr_flags = BATADV_NO_FLAGS; |
1102 | struct batadv_softif_vlan *vlan; | ||
1103 | void *tt_entry_exists; | 1094 | void *tt_entry_exists; |
1104 | 1095 | ||
1105 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); | 1096 | tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); |
@@ -1139,14 +1130,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, | |||
1139 | /* extra call to free the local tt entry */ | 1130 | /* extra call to free the local tt entry */ |
1140 | batadv_tt_local_entry_put(tt_local_entry); | 1131 | batadv_tt_local_entry_put(tt_local_entry); |
1141 | 1132 | ||
1142 | /* decrease the reference held for this vlan */ | ||
1143 | vlan = batadv_softif_vlan_get(bat_priv, vid); | ||
1144 | if (!vlan) | ||
1145 | goto out; | ||
1146 | |||
1147 | batadv_softif_vlan_put(vlan); | ||
1148 | batadv_softif_vlan_put(vlan); | ||
1149 | |||
1150 | out: | 1133 | out: |
1151 | if (tt_local_entry) | 1134 | if (tt_local_entry) |
1152 | batadv_tt_local_entry_put(tt_local_entry); | 1135 | batadv_tt_local_entry_put(tt_local_entry); |
@@ -1219,7 +1202,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) | |||
1219 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 1202 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
1220 | struct batadv_tt_common_entry *tt_common_entry; | 1203 | struct batadv_tt_common_entry *tt_common_entry; |
1221 | struct batadv_tt_local_entry *tt_local; | 1204 | struct batadv_tt_local_entry *tt_local; |
1222 | struct batadv_softif_vlan *vlan; | ||
1223 | struct hlist_node *node_tmp; | 1205 | struct hlist_node *node_tmp; |
1224 | struct hlist_head *head; | 1206 | struct hlist_head *head; |
1225 | u32 i; | 1207 | u32 i; |
@@ -1241,14 +1223,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) | |||
1241 | struct batadv_tt_local_entry, | 1223 | struct batadv_tt_local_entry, |
1242 | common); | 1224 | common); |
1243 | 1225 | ||
1244 | /* decrease the reference held for this vlan */ | ||
1245 | vlan = batadv_softif_vlan_get(bat_priv, | ||
1246 | tt_common_entry->vid); | ||
1247 | if (vlan) { | ||
1248 | batadv_softif_vlan_put(vlan); | ||
1249 | batadv_softif_vlan_put(vlan); | ||
1250 | } | ||
1251 | |||
1252 | batadv_tt_local_entry_put(tt_local); | 1226 | batadv_tt_local_entry_put(tt_local); |
1253 | } | 1227 | } |
1254 | spin_unlock_bh(list_lock); | 1228 | spin_unlock_bh(list_lock); |
@@ -3309,7 +3283,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) | |||
3309 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; | 3283 | struct batadv_hashtable *hash = bat_priv->tt.local_hash; |
3310 | struct batadv_tt_common_entry *tt_common; | 3284 | struct batadv_tt_common_entry *tt_common; |
3311 | struct batadv_tt_local_entry *tt_local; | 3285 | struct batadv_tt_local_entry *tt_local; |
3312 | struct batadv_softif_vlan *vlan; | ||
3313 | struct hlist_node *node_tmp; | 3286 | struct hlist_node *node_tmp; |
3314 | struct hlist_head *head; | 3287 | struct hlist_head *head; |
3315 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 3288 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
@@ -3339,13 +3312,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) | |||
3339 | struct batadv_tt_local_entry, | 3312 | struct batadv_tt_local_entry, |
3340 | common); | 3313 | common); |
3341 | 3314 | ||
3342 | /* decrease the reference held for this vlan */ | ||
3343 | vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid); | ||
3344 | if (vlan) { | ||
3345 | batadv_softif_vlan_put(vlan); | ||
3346 | batadv_softif_vlan_put(vlan); | ||
3347 | } | ||
3348 | |||
3349 | batadv_tt_local_entry_put(tt_local); | 3315 | batadv_tt_local_entry_put(tt_local); |
3350 | } | 3316 | } |
3351 | spin_unlock_bh(list_lock); | 3317 | spin_unlock_bh(list_lock); |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 9abfb3e73c34..1e47fbe8bb7b 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -433,6 +433,7 @@ struct batadv_hardif_neigh_node { | |||
433 | * @ifinfo_lock: lock protecting private ifinfo members and list | 433 | * @ifinfo_lock: lock protecting private ifinfo members and list |
434 | * @if_incoming: pointer to incoming hard-interface | 434 | * @if_incoming: pointer to incoming hard-interface |
435 | * @last_seen: when last packet via this neighbor was received | 435 | * @last_seen: when last packet via this neighbor was received |
436 | * @hardif_neigh: hardif_neigh of this neighbor | ||
436 | * @refcount: number of contexts the object is used | 437 | * @refcount: number of contexts the object is used |
437 | * @rcu: struct used for freeing in an RCU-safe manner | 438 | * @rcu: struct used for freeing in an RCU-safe manner |
438 | */ | 439 | */ |
@@ -444,6 +445,7 @@ struct batadv_neigh_node { | |||
444 | spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */ | 445 | spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */ |
445 | struct batadv_hard_iface *if_incoming; | 446 | struct batadv_hard_iface *if_incoming; |
446 | unsigned long last_seen; | 447 | unsigned long last_seen; |
448 | struct batadv_hardif_neigh_node *hardif_neigh; | ||
447 | struct kref refcount; | 449 | struct kref refcount; |
448 | struct rcu_head rcu; | 450 | struct rcu_head rcu; |
449 | }; | 451 | }; |
@@ -1073,10 +1075,12 @@ struct batadv_tt_common_entry { | |||
1073 | * struct batadv_tt_local_entry - translation table local entry data | 1075 | * struct batadv_tt_local_entry - translation table local entry data |
1074 | * @common: general translation table data | 1076 | * @common: general translation table data |
1075 | * @last_seen: timestamp used for purging stale tt local entries | 1077 | * @last_seen: timestamp used for purging stale tt local entries |
1078 | * @vlan: soft-interface vlan of the entry | ||
1076 | */ | 1079 | */ |
1077 | struct batadv_tt_local_entry { | 1080 | struct batadv_tt_local_entry { |
1078 | struct batadv_tt_common_entry common; | 1081 | struct batadv_tt_common_entry common; |
1079 | unsigned long last_seen; | 1082 | unsigned long last_seen; |
1083 | struct batadv_softif_vlan *vlan; | ||
1080 | }; | 1084 | }; |
1081 | 1085 | ||
1082 | /** | 1086 | /** |
@@ -1250,6 +1254,8 @@ struct batadv_forw_packet { | |||
1250 | * struct batadv_algo_ops - mesh algorithm callbacks | 1254 | * struct batadv_algo_ops - mesh algorithm callbacks |
1251 | * @list: list node for the batadv_algo_list | 1255 | * @list: list node for the batadv_algo_list |
1252 | * @name: name of the algorithm | 1256 | * @name: name of the algorithm |
1257 | * @bat_iface_activate: start routing mechanisms when hard-interface is brought | ||
1258 | * up | ||
1253 | * @bat_iface_enable: init routing info when hard-interface is enabled | 1259 | * @bat_iface_enable: init routing info when hard-interface is enabled |
1254 | * @bat_iface_disable: de-init routing info when hard-interface is disabled | 1260 | * @bat_iface_disable: de-init routing info when hard-interface is disabled |
1255 | * @bat_iface_update_mac: (re-)init mac addresses of the protocol information | 1261 | * @bat_iface_update_mac: (re-)init mac addresses of the protocol information |
@@ -1277,6 +1283,7 @@ struct batadv_forw_packet { | |||
1277 | struct batadv_algo_ops { | 1283 | struct batadv_algo_ops { |
1278 | struct hlist_node list; | 1284 | struct hlist_node list; |
1279 | char *name; | 1285 | char *name; |
1286 | void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface); | ||
1280 | int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface); | 1287 | int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface); |
1281 | void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface); | 1288 | void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface); |
1282 | void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface); | 1289 | void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface); |
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 253bc77eda3b..7dbc80d01eb0 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -61,6 +61,19 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) | |||
61 | e->flags |= MDB_FLAGS_OFFLOAD; | 61 | e->flags |= MDB_FLAGS_OFFLOAD; |
62 | } | 62 | } |
63 | 63 | ||
64 | static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) | ||
65 | { | ||
66 | memset(ip, 0, sizeof(struct br_ip)); | ||
67 | ip->vid = entry->vid; | ||
68 | ip->proto = entry->addr.proto; | ||
69 | if (ip->proto == htons(ETH_P_IP)) | ||
70 | ip->u.ip4 = entry->addr.u.ip4; | ||
71 | #if IS_ENABLED(CONFIG_IPV6) | ||
72 | else | ||
73 | ip->u.ip6 = entry->addr.u.ip6; | ||
74 | #endif | ||
75 | } | ||
76 | |||
64 | static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, | 77 | static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, |
65 | struct net_device *dev) | 78 | struct net_device *dev) |
66 | { | 79 | { |
@@ -243,9 +256,45 @@ static inline size_t rtnl_mdb_nlmsg_size(void) | |||
243 | + nla_total_size(sizeof(struct br_mdb_entry)); | 256 | + nla_total_size(sizeof(struct br_mdb_entry)); |
244 | } | 257 | } |
245 | 258 | ||
246 | static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, | 259 | struct br_mdb_complete_info { |
247 | int type, struct net_bridge_port_group *pg) | 260 | struct net_bridge_port *port; |
261 | struct br_ip ip; | ||
262 | }; | ||
263 | |||
264 | static void br_mdb_complete(struct net_device *dev, int err, void *priv) | ||
248 | { | 265 | { |
266 | struct br_mdb_complete_info *data = priv; | ||
267 | struct net_bridge_port_group __rcu **pp; | ||
268 | struct net_bridge_port_group *p; | ||
269 | struct net_bridge_mdb_htable *mdb; | ||
270 | struct net_bridge_mdb_entry *mp; | ||
271 | struct net_bridge_port *port = data->port; | ||
272 | struct net_bridge *br = port->br; | ||
273 | |||
274 | if (err) | ||
275 | goto err; | ||
276 | |||
277 | spin_lock_bh(&br->multicast_lock); | ||
278 | mdb = mlock_dereference(br->mdb, br); | ||
279 | mp = br_mdb_ip_get(mdb, &data->ip); | ||
280 | if (!mp) | ||
281 | goto out; | ||
282 | for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; | ||
283 | pp = &p->next) { | ||
284 | if (p->port != port) | ||
285 | continue; | ||
286 | p->flags |= MDB_PG_FLAGS_OFFLOAD; | ||
287 | } | ||
288 | out: | ||
289 | spin_unlock_bh(&br->multicast_lock); | ||
290 | err: | ||
291 | kfree(priv); | ||
292 | } | ||
293 | |||
294 | static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, | ||
295 | struct br_mdb_entry *entry, int type) | ||
296 | { | ||
297 | struct br_mdb_complete_info *complete_info; | ||
249 | struct switchdev_obj_port_mdb mdb = { | 298 | struct switchdev_obj_port_mdb mdb = { |
250 | .obj = { | 299 | .obj = { |
251 | .id = SWITCHDEV_OBJ_ID_PORT_MDB, | 300 | .id = SWITCHDEV_OBJ_ID_PORT_MDB, |
@@ -268,9 +317,14 @@ static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, | |||
268 | 317 | ||
269 | mdb.obj.orig_dev = port_dev; | 318 | mdb.obj.orig_dev = port_dev; |
270 | if (port_dev && type == RTM_NEWMDB) { | 319 | if (port_dev && type == RTM_NEWMDB) { |
271 | err = switchdev_port_obj_add(port_dev, &mdb.obj); | 320 | complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); |
272 | if (!err && pg) | 321 | if (complete_info) { |
273 | pg->flags |= MDB_PG_FLAGS_OFFLOAD; | 322 | complete_info->port = p; |
323 | __mdb_entry_to_br_ip(entry, &complete_info->ip); | ||
324 | mdb.obj.complete_priv = complete_info; | ||
325 | mdb.obj.complete = br_mdb_complete; | ||
326 | switchdev_port_obj_add(port_dev, &mdb.obj); | ||
327 | } | ||
274 | } else if (port_dev && type == RTM_DELMDB) { | 328 | } else if (port_dev && type == RTM_DELMDB) { |
275 | switchdev_port_obj_del(port_dev, &mdb.obj); | 329 | switchdev_port_obj_del(port_dev, &mdb.obj); |
276 | } | 330 | } |
@@ -291,21 +345,21 @@ errout: | |||
291 | rtnl_set_sk_err(net, RTNLGRP_MDB, err); | 345 | rtnl_set_sk_err(net, RTNLGRP_MDB, err); |
292 | } | 346 | } |
293 | 347 | ||
294 | void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, | 348 | void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, |
295 | int type) | 349 | struct br_ip *group, int type, u8 flags) |
296 | { | 350 | { |
297 | struct br_mdb_entry entry; | 351 | struct br_mdb_entry entry; |
298 | 352 | ||
299 | memset(&entry, 0, sizeof(entry)); | 353 | memset(&entry, 0, sizeof(entry)); |
300 | entry.ifindex = pg->port->dev->ifindex; | 354 | entry.ifindex = port->dev->ifindex; |
301 | entry.addr.proto = pg->addr.proto; | 355 | entry.addr.proto = group->proto; |
302 | entry.addr.u.ip4 = pg->addr.u.ip4; | 356 | entry.addr.u.ip4 = group->u.ip4; |
303 | #if IS_ENABLED(CONFIG_IPV6) | 357 | #if IS_ENABLED(CONFIG_IPV6) |
304 | entry.addr.u.ip6 = pg->addr.u.ip6; | 358 | entry.addr.u.ip6 = group->u.ip6; |
305 | #endif | 359 | #endif |
306 | entry.vid = pg->addr.vid; | 360 | entry.vid = group->vid; |
307 | __mdb_entry_fill_flags(&entry, pg->flags); | 361 | __mdb_entry_fill_flags(&entry, flags); |
308 | __br_mdb_notify(dev, &entry, type, pg); | 362 | __br_mdb_notify(dev, port, &entry, type); |
309 | } | 363 | } |
310 | 364 | ||
311 | static int nlmsg_populate_rtr_fill(struct sk_buff *skb, | 365 | static int nlmsg_populate_rtr_fill(struct sk_buff *skb, |
@@ -450,8 +504,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
450 | } | 504 | } |
451 | 505 | ||
452 | static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, | 506 | static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, |
453 | struct br_ip *group, unsigned char state, | 507 | struct br_ip *group, unsigned char state) |
454 | struct net_bridge_port_group **pg) | ||
455 | { | 508 | { |
456 | struct net_bridge_mdb_entry *mp; | 509 | struct net_bridge_mdb_entry *mp; |
457 | struct net_bridge_port_group *p; | 510 | struct net_bridge_port_group *p; |
@@ -482,7 +535,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, | |||
482 | if (unlikely(!p)) | 535 | if (unlikely(!p)) |
483 | return -ENOMEM; | 536 | return -ENOMEM; |
484 | rcu_assign_pointer(*pp, p); | 537 | rcu_assign_pointer(*pp, p); |
485 | *pg = p; | ||
486 | if (state == MDB_TEMPORARY) | 538 | if (state == MDB_TEMPORARY) |
487 | mod_timer(&p->timer, now + br->multicast_membership_interval); | 539 | mod_timer(&p->timer, now + br->multicast_membership_interval); |
488 | 540 | ||
@@ -490,8 +542,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, | |||
490 | } | 542 | } |
491 | 543 | ||
492 | static int __br_mdb_add(struct net *net, struct net_bridge *br, | 544 | static int __br_mdb_add(struct net *net, struct net_bridge *br, |
493 | struct br_mdb_entry *entry, | 545 | struct br_mdb_entry *entry) |
494 | struct net_bridge_port_group **pg) | ||
495 | { | 546 | { |
496 | struct br_ip ip; | 547 | struct br_ip ip; |
497 | struct net_device *dev; | 548 | struct net_device *dev; |
@@ -509,18 +560,10 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, | |||
509 | if (!p || p->br != br || p->state == BR_STATE_DISABLED) | 560 | if (!p || p->br != br || p->state == BR_STATE_DISABLED) |
510 | return -EINVAL; | 561 | return -EINVAL; |
511 | 562 | ||
512 | memset(&ip, 0, sizeof(ip)); | 563 | __mdb_entry_to_br_ip(entry, &ip); |
513 | ip.vid = entry->vid; | ||
514 | ip.proto = entry->addr.proto; | ||
515 | if (ip.proto == htons(ETH_P_IP)) | ||
516 | ip.u.ip4 = entry->addr.u.ip4; | ||
517 | #if IS_ENABLED(CONFIG_IPV6) | ||
518 | else | ||
519 | ip.u.ip6 = entry->addr.u.ip6; | ||
520 | #endif | ||
521 | 564 | ||
522 | spin_lock_bh(&br->multicast_lock); | 565 | spin_lock_bh(&br->multicast_lock); |
523 | ret = br_mdb_add_group(br, p, &ip, entry->state, pg); | 566 | ret = br_mdb_add_group(br, p, &ip, entry->state); |
524 | spin_unlock_bh(&br->multicast_lock); | 567 | spin_unlock_bh(&br->multicast_lock); |
525 | return ret; | 568 | return ret; |
526 | } | 569 | } |
@@ -528,7 +571,6 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, | |||
528 | static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) | 571 | static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) |
529 | { | 572 | { |
530 | struct net *net = sock_net(skb->sk); | 573 | struct net *net = sock_net(skb->sk); |
531 | struct net_bridge_port_group *pg; | ||
532 | struct net_bridge_vlan_group *vg; | 574 | struct net_bridge_vlan_group *vg; |
533 | struct net_device *dev, *pdev; | 575 | struct net_device *dev, *pdev; |
534 | struct br_mdb_entry *entry; | 576 | struct br_mdb_entry *entry; |
@@ -558,15 +600,15 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
558 | if (br_vlan_enabled(br) && vg && entry->vid == 0) { | 600 | if (br_vlan_enabled(br) && vg && entry->vid == 0) { |
559 | list_for_each_entry(v, &vg->vlan_list, vlist) { | 601 | list_for_each_entry(v, &vg->vlan_list, vlist) { |
560 | entry->vid = v->vid; | 602 | entry->vid = v->vid; |
561 | err = __br_mdb_add(net, br, entry, &pg); | 603 | err = __br_mdb_add(net, br, entry); |
562 | if (err) | 604 | if (err) |
563 | break; | 605 | break; |
564 | __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); | 606 | __br_mdb_notify(dev, p, entry, RTM_NEWMDB); |
565 | } | 607 | } |
566 | } else { | 608 | } else { |
567 | err = __br_mdb_add(net, br, entry, &pg); | 609 | err = __br_mdb_add(net, br, entry); |
568 | if (!err) | 610 | if (!err) |
569 | __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); | 611 | __br_mdb_notify(dev, p, entry, RTM_NEWMDB); |
570 | } | 612 | } |
571 | 613 | ||
572 | return err; | 614 | return err; |
@@ -584,15 +626,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) | |||
584 | if (!netif_running(br->dev) || br->multicast_disabled) | 626 | if (!netif_running(br->dev) || br->multicast_disabled) |
585 | return -EINVAL; | 627 | return -EINVAL; |
586 | 628 | ||
587 | memset(&ip, 0, sizeof(ip)); | 629 | __mdb_entry_to_br_ip(entry, &ip); |
588 | ip.vid = entry->vid; | ||
589 | ip.proto = entry->addr.proto; | ||
590 | if (ip.proto == htons(ETH_P_IP)) | ||
591 | ip.u.ip4 = entry->addr.u.ip4; | ||
592 | #if IS_ENABLED(CONFIG_IPV6) | ||
593 | else | ||
594 | ip.u.ip6 = entry->addr.u.ip6; | ||
595 | #endif | ||
596 | 630 | ||
597 | spin_lock_bh(&br->multicast_lock); | 631 | spin_lock_bh(&br->multicast_lock); |
598 | mdb = mlock_dereference(br->mdb, br); | 632 | mdb = mlock_dereference(br->mdb, br); |
@@ -662,12 +696,12 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
662 | entry->vid = v->vid; | 696 | entry->vid = v->vid; |
663 | err = __br_mdb_del(br, entry); | 697 | err = __br_mdb_del(br, entry); |
664 | if (!err) | 698 | if (!err) |
665 | __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); | 699 | __br_mdb_notify(dev, p, entry, RTM_DELMDB); |
666 | } | 700 | } |
667 | } else { | 701 | } else { |
668 | err = __br_mdb_del(br, entry); | 702 | err = __br_mdb_del(br, entry); |
669 | if (!err) | 703 | if (!err) |
670 | __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); | 704 | __br_mdb_notify(dev, p, entry, RTM_DELMDB); |
671 | } | 705 | } |
672 | 706 | ||
673 | return err; | 707 | return err; |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index a4c15df2b792..191ea66e4d92 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -283,7 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br, | |||
283 | rcu_assign_pointer(*pp, p->next); | 283 | rcu_assign_pointer(*pp, p->next); |
284 | hlist_del_init(&p->mglist); | 284 | hlist_del_init(&p->mglist); |
285 | del_timer(&p->timer); | 285 | del_timer(&p->timer); |
286 | br_mdb_notify(br->dev, p, RTM_DELMDB); | 286 | br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, |
287 | p->flags); | ||
287 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 288 | call_rcu_bh(&p->rcu, br_multicast_free_pg); |
288 | 289 | ||
289 | if (!mp->ports && !mp->mglist && | 290 | if (!mp->ports && !mp->mglist && |
@@ -705,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
705 | if (unlikely(!p)) | 706 | if (unlikely(!p)) |
706 | goto err; | 707 | goto err; |
707 | rcu_assign_pointer(*pp, p); | 708 | rcu_assign_pointer(*pp, p); |
708 | br_mdb_notify(br->dev, p, RTM_NEWMDB); | 709 | br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); |
709 | 710 | ||
710 | found: | 711 | found: |
711 | mod_timer(&p->timer, now + br->multicast_membership_interval); | 712 | mod_timer(&p->timer, now + br->multicast_membership_interval); |
@@ -1461,7 +1462,8 @@ br_multicast_leave_group(struct net_bridge *br, | |||
1461 | hlist_del_init(&p->mglist); | 1462 | hlist_del_init(&p->mglist); |
1462 | del_timer(&p->timer); | 1463 | del_timer(&p->timer); |
1463 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 1464 | call_rcu_bh(&p->rcu, br_multicast_free_pg); |
1464 | br_mdb_notify(br->dev, p, RTM_DELMDB); | 1465 | br_mdb_notify(br->dev, port, group, RTM_DELMDB, |
1466 | p->flags); | ||
1465 | 1467 | ||
1466 | if (!mp->ports && !mp->mglist && | 1468 | if (!mp->ports && !mp->mglist && |
1467 | netif_running(br->dev)) | 1469 | netif_running(br->dev)) |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 1b5d145dfcbf..d9da857182ef 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -560,8 +560,8 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group, | |||
560 | unsigned char flags); | 560 | unsigned char flags); |
561 | void br_mdb_init(void); | 561 | void br_mdb_init(void); |
562 | void br_mdb_uninit(void); | 562 | void br_mdb_uninit(void); |
563 | void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, | 563 | void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, |
564 | int type); | 564 | struct br_ip *group, int type, u8 flags); |
565 | void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, | 565 | void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, |
566 | int type); | 566 | int type); |
567 | 567 | ||
diff --git a/net/ceph/auth.c b/net/ceph/auth.c index 6b923bcaa2a4..2bc5965fdd1e 100644 --- a/net/ceph/auth.c +++ b/net/ceph/auth.c | |||
@@ -293,13 +293,9 @@ int ceph_auth_create_authorizer(struct ceph_auth_client *ac, | |||
293 | } | 293 | } |
294 | EXPORT_SYMBOL(ceph_auth_create_authorizer); | 294 | EXPORT_SYMBOL(ceph_auth_create_authorizer); |
295 | 295 | ||
296 | void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, | 296 | void ceph_auth_destroy_authorizer(struct ceph_authorizer *a) |
297 | struct ceph_authorizer *a) | ||
298 | { | 297 | { |
299 | mutex_lock(&ac->mutex); | 298 | a->destroy(a); |
300 | if (ac->ops && ac->ops->destroy_authorizer) | ||
301 | ac->ops->destroy_authorizer(ac, a); | ||
302 | mutex_unlock(&ac->mutex); | ||
303 | } | 299 | } |
304 | EXPORT_SYMBOL(ceph_auth_destroy_authorizer); | 300 | EXPORT_SYMBOL(ceph_auth_destroy_authorizer); |
305 | 301 | ||
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c index 8c93fa8d81bc..5f836f02ae36 100644 --- a/net/ceph/auth_none.c +++ b/net/ceph/auth_none.c | |||
@@ -16,7 +16,6 @@ static void reset(struct ceph_auth_client *ac) | |||
16 | struct ceph_auth_none_info *xi = ac->private; | 16 | struct ceph_auth_none_info *xi = ac->private; |
17 | 17 | ||
18 | xi->starting = true; | 18 | xi->starting = true; |
19 | xi->built_authorizer = false; | ||
20 | } | 19 | } |
21 | 20 | ||
22 | static void destroy(struct ceph_auth_client *ac) | 21 | static void destroy(struct ceph_auth_client *ac) |
@@ -39,6 +38,27 @@ static int should_authenticate(struct ceph_auth_client *ac) | |||
39 | return xi->starting; | 38 | return xi->starting; |
40 | } | 39 | } |
41 | 40 | ||
41 | static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac, | ||
42 | struct ceph_none_authorizer *au) | ||
43 | { | ||
44 | void *p = au->buf; | ||
45 | void *const end = p + sizeof(au->buf); | ||
46 | int ret; | ||
47 | |||
48 | ceph_encode_8_safe(&p, end, 1, e_range); | ||
49 | ret = ceph_entity_name_encode(ac->name, &p, end); | ||
50 | if (ret < 0) | ||
51 | return ret; | ||
52 | |||
53 | ceph_encode_64_safe(&p, end, ac->global_id, e_range); | ||
54 | au->buf_len = p - (void *)au->buf; | ||
55 | dout("%s built authorizer len %d\n", __func__, au->buf_len); | ||
56 | return 0; | ||
57 | |||
58 | e_range: | ||
59 | return -ERANGE; | ||
60 | } | ||
61 | |||
42 | static int build_request(struct ceph_auth_client *ac, void *buf, void *end) | 62 | static int build_request(struct ceph_auth_client *ac, void *buf, void *end) |
43 | { | 63 | { |
44 | return 0; | 64 | return 0; |
@@ -57,32 +77,32 @@ static int handle_reply(struct ceph_auth_client *ac, int result, | |||
57 | return result; | 77 | return result; |
58 | } | 78 | } |
59 | 79 | ||
80 | static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a) | ||
81 | { | ||
82 | kfree(a); | ||
83 | } | ||
84 | |||
60 | /* | 85 | /* |
61 | * build an 'authorizer' with our entity_name and global_id. we can | 86 | * build an 'authorizer' with our entity_name and global_id. it is |
62 | * reuse a single static copy since it is identical for all services | 87 | * identical for all services we connect to. |
63 | * we connect to. | ||
64 | */ | 88 | */ |
65 | static int ceph_auth_none_create_authorizer( | 89 | static int ceph_auth_none_create_authorizer( |
66 | struct ceph_auth_client *ac, int peer_type, | 90 | struct ceph_auth_client *ac, int peer_type, |
67 | struct ceph_auth_handshake *auth) | 91 | struct ceph_auth_handshake *auth) |
68 | { | 92 | { |
69 | struct ceph_auth_none_info *ai = ac->private; | 93 | struct ceph_none_authorizer *au; |
70 | struct ceph_none_authorizer *au = &ai->au; | ||
71 | void *p, *end; | ||
72 | int ret; | 94 | int ret; |
73 | 95 | ||
74 | if (!ai->built_authorizer) { | 96 | au = kmalloc(sizeof(*au), GFP_NOFS); |
75 | p = au->buf; | 97 | if (!au) |
76 | end = p + sizeof(au->buf); | 98 | return -ENOMEM; |
77 | ceph_encode_8(&p, 1); | 99 | |
78 | ret = ceph_entity_name_encode(ac->name, &p, end - 8); | 100 | au->base.destroy = ceph_auth_none_destroy_authorizer; |
79 | if (ret < 0) | 101 | |
80 | goto bad; | 102 | ret = ceph_auth_none_build_authorizer(ac, au); |
81 | ceph_decode_need(&p, end, sizeof(u64), bad2); | 103 | if (ret) { |
82 | ceph_encode_64(&p, ac->global_id); | 104 | kfree(au); |
83 | au->buf_len = p - (void *)au->buf; | 105 | return ret; |
84 | ai->built_authorizer = true; | ||
85 | dout("built authorizer len %d\n", au->buf_len); | ||
86 | } | 106 | } |
87 | 107 | ||
88 | auth->authorizer = (struct ceph_authorizer *) au; | 108 | auth->authorizer = (struct ceph_authorizer *) au; |
@@ -92,17 +112,6 @@ static int ceph_auth_none_create_authorizer( | |||
92 | auth->authorizer_reply_buf_len = sizeof (au->reply_buf); | 112 | auth->authorizer_reply_buf_len = sizeof (au->reply_buf); |
93 | 113 | ||
94 | return 0; | 114 | return 0; |
95 | |||
96 | bad2: | ||
97 | ret = -ERANGE; | ||
98 | bad: | ||
99 | return ret; | ||
100 | } | ||
101 | |||
102 | static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac, | ||
103 | struct ceph_authorizer *a) | ||
104 | { | ||
105 | /* nothing to do */ | ||
106 | } | 115 | } |
107 | 116 | ||
108 | static const struct ceph_auth_client_ops ceph_auth_none_ops = { | 117 | static const struct ceph_auth_client_ops ceph_auth_none_ops = { |
@@ -114,7 +123,6 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = { | |||
114 | .build_request = build_request, | 123 | .build_request = build_request, |
115 | .handle_reply = handle_reply, | 124 | .handle_reply = handle_reply, |
116 | .create_authorizer = ceph_auth_none_create_authorizer, | 125 | .create_authorizer = ceph_auth_none_create_authorizer, |
117 | .destroy_authorizer = ceph_auth_none_destroy_authorizer, | ||
118 | }; | 126 | }; |
119 | 127 | ||
120 | int ceph_auth_none_init(struct ceph_auth_client *ac) | 128 | int ceph_auth_none_init(struct ceph_auth_client *ac) |
@@ -127,7 +135,6 @@ int ceph_auth_none_init(struct ceph_auth_client *ac) | |||
127 | return -ENOMEM; | 135 | return -ENOMEM; |
128 | 136 | ||
129 | xi->starting = true; | 137 | xi->starting = true; |
130 | xi->built_authorizer = false; | ||
131 | 138 | ||
132 | ac->protocol = CEPH_AUTH_NONE; | 139 | ac->protocol = CEPH_AUTH_NONE; |
133 | ac->private = xi; | 140 | ac->private = xi; |
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h index 059a3ce4b53f..62021535ae4a 100644 --- a/net/ceph/auth_none.h +++ b/net/ceph/auth_none.h | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | struct ceph_none_authorizer { | 14 | struct ceph_none_authorizer { |
15 | struct ceph_authorizer base; | ||
15 | char buf[128]; | 16 | char buf[128]; |
16 | int buf_len; | 17 | int buf_len; |
17 | char reply_buf[0]; | 18 | char reply_buf[0]; |
@@ -19,8 +20,6 @@ struct ceph_none_authorizer { | |||
19 | 20 | ||
20 | struct ceph_auth_none_info { | 21 | struct ceph_auth_none_info { |
21 | bool starting; | 22 | bool starting; |
22 | bool built_authorizer; | ||
23 | struct ceph_none_authorizer au; /* we only need one; it's static */ | ||
24 | }; | 23 | }; |
25 | 24 | ||
26 | int ceph_auth_none_init(struct ceph_auth_client *ac); | 25 | int ceph_auth_none_init(struct ceph_auth_client *ac); |
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 9e43a315e662..a0905f04bd13 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c | |||
@@ -565,6 +565,14 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, | |||
565 | return -EAGAIN; | 565 | return -EAGAIN; |
566 | } | 566 | } |
567 | 567 | ||
568 | static void ceph_x_destroy_authorizer(struct ceph_authorizer *a) | ||
569 | { | ||
570 | struct ceph_x_authorizer *au = (void *)a; | ||
571 | |||
572 | ceph_x_authorizer_cleanup(au); | ||
573 | kfree(au); | ||
574 | } | ||
575 | |||
568 | static int ceph_x_create_authorizer( | 576 | static int ceph_x_create_authorizer( |
569 | struct ceph_auth_client *ac, int peer_type, | 577 | struct ceph_auth_client *ac, int peer_type, |
570 | struct ceph_auth_handshake *auth) | 578 | struct ceph_auth_handshake *auth) |
@@ -581,6 +589,8 @@ static int ceph_x_create_authorizer( | |||
581 | if (!au) | 589 | if (!au) |
582 | return -ENOMEM; | 590 | return -ENOMEM; |
583 | 591 | ||
592 | au->base.destroy = ceph_x_destroy_authorizer; | ||
593 | |||
584 | ret = ceph_x_build_authorizer(ac, th, au); | 594 | ret = ceph_x_build_authorizer(ac, th, au); |
585 | if (ret) { | 595 | if (ret) { |
586 | kfree(au); | 596 | kfree(au); |
@@ -643,16 +653,6 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, | |||
643 | return ret; | 653 | return ret; |
644 | } | 654 | } |
645 | 655 | ||
646 | static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, | ||
647 | struct ceph_authorizer *a) | ||
648 | { | ||
649 | struct ceph_x_authorizer *au = (void *)a; | ||
650 | |||
651 | ceph_x_authorizer_cleanup(au); | ||
652 | kfree(au); | ||
653 | } | ||
654 | |||
655 | |||
656 | static void ceph_x_reset(struct ceph_auth_client *ac) | 656 | static void ceph_x_reset(struct ceph_auth_client *ac) |
657 | { | 657 | { |
658 | struct ceph_x_info *xi = ac->private; | 658 | struct ceph_x_info *xi = ac->private; |
@@ -770,7 +770,6 @@ static const struct ceph_auth_client_ops ceph_x_ops = { | |||
770 | .create_authorizer = ceph_x_create_authorizer, | 770 | .create_authorizer = ceph_x_create_authorizer, |
771 | .update_authorizer = ceph_x_update_authorizer, | 771 | .update_authorizer = ceph_x_update_authorizer, |
772 | .verify_authorizer_reply = ceph_x_verify_authorizer_reply, | 772 | .verify_authorizer_reply = ceph_x_verify_authorizer_reply, |
773 | .destroy_authorizer = ceph_x_destroy_authorizer, | ||
774 | .invalidate_authorizer = ceph_x_invalidate_authorizer, | 773 | .invalidate_authorizer = ceph_x_invalidate_authorizer, |
775 | .reset = ceph_x_reset, | 774 | .reset = ceph_x_reset, |
776 | .destroy = ceph_x_destroy, | 775 | .destroy = ceph_x_destroy, |
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h index 40b1a3cf7397..21a5af904bae 100644 --- a/net/ceph/auth_x.h +++ b/net/ceph/auth_x.h | |||
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler { | |||
26 | 26 | ||
27 | 27 | ||
28 | struct ceph_x_authorizer { | 28 | struct ceph_x_authorizer { |
29 | struct ceph_authorizer base; | ||
29 | struct ceph_crypto_key session_key; | 30 | struct ceph_crypto_key session_key; |
30 | struct ceph_buffer *buf; | 31 | struct ceph_buffer *buf; |
31 | unsigned int service; | 32 | unsigned int service; |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 32355d9d0103..40a53a70efdf 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -1087,10 +1087,8 @@ static void put_osd(struct ceph_osd *osd) | |||
1087 | dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), | 1087 | dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), |
1088 | atomic_read(&osd->o_ref) - 1); | 1088 | atomic_read(&osd->o_ref) - 1); |
1089 | if (atomic_dec_and_test(&osd->o_ref)) { | 1089 | if (atomic_dec_and_test(&osd->o_ref)) { |
1090 | struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; | ||
1091 | |||
1092 | if (osd->o_auth.authorizer) | 1090 | if (osd->o_auth.authorizer) |
1093 | ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); | 1091 | ceph_auth_destroy_authorizer(osd->o_auth.authorizer); |
1094 | kfree(osd); | 1092 | kfree(osd); |
1095 | } | 1093 | } |
1096 | } | 1094 | } |
@@ -2984,7 +2982,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, | |||
2984 | struct ceph_auth_handshake *auth = &o->o_auth; | 2982 | struct ceph_auth_handshake *auth = &o->o_auth; |
2985 | 2983 | ||
2986 | if (force_new && auth->authorizer) { | 2984 | if (force_new && auth->authorizer) { |
2987 | ceph_auth_destroy_authorizer(ac, auth->authorizer); | 2985 | ceph_auth_destroy_authorizer(auth->authorizer); |
2988 | auth->authorizer = NULL; | 2986 | auth->authorizer = NULL; |
2989 | } | 2987 | } |
2990 | if (!auth->authorizer) { | 2988 | if (!auth->authorizer) { |
diff --git a/net/core/dev.c b/net/core/dev.c index 77a71cd68535..5c925ac50b95 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2802,7 +2802,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb, | |||
2802 | 2802 | ||
2803 | if (skb->ip_summed != CHECKSUM_NONE && | 2803 | if (skb->ip_summed != CHECKSUM_NONE && |
2804 | !can_checksum_protocol(features, type)) { | 2804 | !can_checksum_protocol(features, type)) { |
2805 | features &= ~NETIF_F_CSUM_MASK; | 2805 | features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); |
2806 | } else if (illegal_highdma(skb->dev, skb)) { | 2806 | } else if (illegal_highdma(skb->dev, skb)) { |
2807 | features &= ~NETIF_F_SG; | 2807 | features &= ~NETIF_F_SG; |
2808 | } | 2808 | } |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 8a9246deccfe..63566ec54794 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -904,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) | |||
904 | if (ifa->ifa_flags & IFA_F_SECONDARY) { | 904 | if (ifa->ifa_flags & IFA_F_SECONDARY) { |
905 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); | 905 | prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); |
906 | if (!prim) { | 906 | if (!prim) { |
907 | pr_warn("%s: bug: prim == NULL\n", __func__); | 907 | /* if the device has been deleted, we don't perform |
908 | * address promotion | ||
909 | */ | ||
910 | if (!in_dev->dead) | ||
911 | pr_warn("%s: bug: prim == NULL\n", __func__); | ||
908 | return; | 912 | return; |
909 | } | 913 | } |
910 | if (iprim && iprim != prim) { | 914 | if (iprim && iprim != prim) { |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index bc68eced0105..0d9e9d7bb029 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -470,6 +470,7 @@ static int inet_reuseport_add_sock(struct sock *sk, | |||
470 | const struct sock *sk2, | 470 | const struct sock *sk2, |
471 | bool match_wildcard)) | 471 | bool match_wildcard)) |
472 | { | 472 | { |
473 | struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; | ||
473 | struct sock *sk2; | 474 | struct sock *sk2; |
474 | struct hlist_nulls_node *node; | 475 | struct hlist_nulls_node *node; |
475 | kuid_t uid = sock_i_uid(sk); | 476 | kuid_t uid = sock_i_uid(sk); |
@@ -479,6 +480,7 @@ static int inet_reuseport_add_sock(struct sock *sk, | |||
479 | sk2->sk_family == sk->sk_family && | 480 | sk2->sk_family == sk->sk_family && |
480 | ipv6_only_sock(sk2) == ipv6_only_sock(sk) && | 481 | ipv6_only_sock(sk2) == ipv6_only_sock(sk) && |
481 | sk2->sk_bound_dev_if == sk->sk_bound_dev_if && | 482 | sk2->sk_bound_dev_if == sk->sk_bound_dev_if && |
483 | inet_csk(sk2)->icsk_bind_hash == tb && | ||
482 | sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && | 484 | sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && |
483 | saddr_same(sk, sk2, false)) | 485 | saddr_same(sk, sk2, false)) |
484 | return reuseport_add_sock(sk, sk2); | 486 | return reuseport_add_sock(sk, sk2); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index af5d1f38217f..205a2b8a5a84 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -179,6 +179,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags) | |||
179 | return flags; | 179 | return flags; |
180 | } | 180 | } |
181 | 181 | ||
182 | /* Fills in tpi and returns header length to be pulled. */ | ||
182 | static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, | 183 | static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, |
183 | bool *csum_err) | 184 | bool *csum_err) |
184 | { | 185 | { |
@@ -238,7 +239,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
238 | return -EINVAL; | 239 | return -EINVAL; |
239 | } | 240 | } |
240 | } | 241 | } |
241 | return iptunnel_pull_header(skb, hdr_len, tpi->proto, false); | 242 | return hdr_len; |
242 | } | 243 | } |
243 | 244 | ||
244 | static void ipgre_err(struct sk_buff *skb, u32 info, | 245 | static void ipgre_err(struct sk_buff *skb, u32 info, |
@@ -341,7 +342,7 @@ static void gre_err(struct sk_buff *skb, u32 info) | |||
341 | struct tnl_ptk_info tpi; | 342 | struct tnl_ptk_info tpi; |
342 | bool csum_err = false; | 343 | bool csum_err = false; |
343 | 344 | ||
344 | if (parse_gre_header(skb, &tpi, &csum_err)) { | 345 | if (parse_gre_header(skb, &tpi, &csum_err) < 0) { |
345 | if (!csum_err) /* ignore csum errors. */ | 346 | if (!csum_err) /* ignore csum errors. */ |
346 | return; | 347 | return; |
347 | } | 348 | } |
@@ -419,6 +420,7 @@ static int gre_rcv(struct sk_buff *skb) | |||
419 | { | 420 | { |
420 | struct tnl_ptk_info tpi; | 421 | struct tnl_ptk_info tpi; |
421 | bool csum_err = false; | 422 | bool csum_err = false; |
423 | int hdr_len; | ||
422 | 424 | ||
423 | #ifdef CONFIG_NET_IPGRE_BROADCAST | 425 | #ifdef CONFIG_NET_IPGRE_BROADCAST |
424 | if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { | 426 | if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { |
@@ -428,7 +430,10 @@ static int gre_rcv(struct sk_buff *skb) | |||
428 | } | 430 | } |
429 | #endif | 431 | #endif |
430 | 432 | ||
431 | if (parse_gre_header(skb, &tpi, &csum_err) < 0) | 433 | hdr_len = parse_gre_header(skb, &tpi, &csum_err); |
434 | if (hdr_len < 0) | ||
435 | goto drop; | ||
436 | if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false) < 0) | ||
432 | goto drop; | 437 | goto drop; |
433 | 438 | ||
434 | if (ipgre_rcv(skb, &tpi) == PACKET_RCVD) | 439 | if (ipgre_rcv(skb, &tpi) == PACKET_RCVD) |
@@ -523,7 +528,8 @@ static struct rtable *gre_get_rt(struct sk_buff *skb, | |||
523 | return ip_route_output_key(net, fl); | 528 | return ip_route_output_key(net, fl); |
524 | } | 529 | } |
525 | 530 | ||
526 | static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) | 531 | static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev, |
532 | __be16 proto) | ||
527 | { | 533 | { |
528 | struct ip_tunnel_info *tun_info; | 534 | struct ip_tunnel_info *tun_info; |
529 | const struct ip_tunnel_key *key; | 535 | const struct ip_tunnel_key *key; |
@@ -575,7 +581,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) | |||
575 | } | 581 | } |
576 | 582 | ||
577 | flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); | 583 | flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); |
578 | build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), | 584 | build_header(skb, tunnel_hlen, flags, proto, |
579 | tunnel_id_to_key(tun_info->key.tun_id), 0); | 585 | tunnel_id_to_key(tun_info->key.tun_id), 0); |
580 | 586 | ||
581 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; | 587 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; |
@@ -616,7 +622,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, | |||
616 | const struct iphdr *tnl_params; | 622 | const struct iphdr *tnl_params; |
617 | 623 | ||
618 | if (tunnel->collect_md) { | 624 | if (tunnel->collect_md) { |
619 | gre_fb_xmit(skb, dev); | 625 | gre_fb_xmit(skb, dev, skb->protocol); |
620 | return NETDEV_TX_OK; | 626 | return NETDEV_TX_OK; |
621 | } | 627 | } |
622 | 628 | ||
@@ -660,7 +666,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, | |||
660 | struct ip_tunnel *tunnel = netdev_priv(dev); | 666 | struct ip_tunnel *tunnel = netdev_priv(dev); |
661 | 667 | ||
662 | if (tunnel->collect_md) { | 668 | if (tunnel->collect_md) { |
663 | gre_fb_xmit(skb, dev); | 669 | gre_fb_xmit(skb, dev, htons(ETH_P_TEB)); |
664 | return NETDEV_TX_OK; | 670 | return NETDEV_TX_OK; |
665 | } | 671 | } |
666 | 672 | ||
@@ -893,7 +899,7 @@ static int ipgre_tunnel_init(struct net_device *dev) | |||
893 | netif_keep_dst(dev); | 899 | netif_keep_dst(dev); |
894 | dev->addr_len = 4; | 900 | dev->addr_len = 4; |
895 | 901 | ||
896 | if (iph->daddr) { | 902 | if (iph->daddr && !tunnel->collect_md) { |
897 | #ifdef CONFIG_NET_IPGRE_BROADCAST | 903 | #ifdef CONFIG_NET_IPGRE_BROADCAST |
898 | if (ipv4_is_multicast(iph->daddr)) { | 904 | if (ipv4_is_multicast(iph->daddr)) { |
899 | if (!iph->saddr) | 905 | if (!iph->saddr) |
@@ -902,8 +908,9 @@ static int ipgre_tunnel_init(struct net_device *dev) | |||
902 | dev->header_ops = &ipgre_header_ops; | 908 | dev->header_ops = &ipgre_header_ops; |
903 | } | 909 | } |
904 | #endif | 910 | #endif |
905 | } else | 911 | } else if (!tunnel->collect_md) { |
906 | dev->header_ops = &ipgre_header_ops; | 912 | dev->header_ops = &ipgre_header_ops; |
913 | } | ||
907 | 914 | ||
908 | return ip_tunnel_init(dev); | 915 | return ip_tunnel_init(dev); |
909 | } | 916 | } |
@@ -946,6 +953,11 @@ static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
946 | if (flags & (GRE_VERSION|GRE_ROUTING)) | 953 | if (flags & (GRE_VERSION|GRE_ROUTING)) |
947 | return -EINVAL; | 954 | return -EINVAL; |
948 | 955 | ||
956 | if (data[IFLA_GRE_COLLECT_METADATA] && | ||
957 | data[IFLA_GRE_ENCAP_TYPE] && | ||
958 | nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE) | ||
959 | return -EINVAL; | ||
960 | |||
949 | return 0; | 961 | return 0; |
950 | } | 962 | } |
951 | 963 | ||
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 6aad0192443d..a69ed94bda1b 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -326,12 +326,12 @@ static int ip_tunnel_bind_dev(struct net_device *dev) | |||
326 | 326 | ||
327 | if (!IS_ERR(rt)) { | 327 | if (!IS_ERR(rt)) { |
328 | tdev = rt->dst.dev; | 328 | tdev = rt->dst.dev; |
329 | dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst, | ||
330 | fl4.saddr); | ||
331 | ip_rt_put(rt); | 329 | ip_rt_put(rt); |
332 | } | 330 | } |
333 | if (dev->type != ARPHRD_ETHER) | 331 | if (dev->type != ARPHRD_ETHER) |
334 | dev->flags |= IFF_POINTOPOINT; | 332 | dev->flags |= IFF_POINTOPOINT; |
333 | |||
334 | dst_cache_reset(&tunnel->dst_cache); | ||
335 | } | 335 | } |
336 | 336 | ||
337 | if (!tdev && tunnel->parms.link) | 337 | if (!tdev && tunnel->parms.link) |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 23cec53b568a..8ec4b3089e20 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3176,35 +3176,9 @@ static void addrconf_gre_config(struct net_device *dev) | |||
3176 | } | 3176 | } |
3177 | #endif | 3177 | #endif |
3178 | 3178 | ||
3179 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) | ||
3180 | /* If the host route is cached on the addr struct make sure it is associated | ||
3181 | * with the proper table. e.g., enslavement can change and if so the cached | ||
3182 | * host route needs to move to the new table. | ||
3183 | */ | ||
3184 | static void l3mdev_check_host_rt(struct inet6_dev *idev, | ||
3185 | struct inet6_ifaddr *ifp) | ||
3186 | { | ||
3187 | if (ifp->rt) { | ||
3188 | u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; | ||
3189 | |||
3190 | if (tb_id != ifp->rt->rt6i_table->tb6_id) { | ||
3191 | ip6_del_rt(ifp->rt); | ||
3192 | ifp->rt = NULL; | ||
3193 | } | ||
3194 | } | ||
3195 | } | ||
3196 | #else | ||
3197 | static void l3mdev_check_host_rt(struct inet6_dev *idev, | ||
3198 | struct inet6_ifaddr *ifp) | ||
3199 | { | ||
3200 | } | ||
3201 | #endif | ||
3202 | |||
3203 | static int fixup_permanent_addr(struct inet6_dev *idev, | 3179 | static int fixup_permanent_addr(struct inet6_dev *idev, |
3204 | struct inet6_ifaddr *ifp) | 3180 | struct inet6_ifaddr *ifp) |
3205 | { | 3181 | { |
3206 | l3mdev_check_host_rt(idev, ifp); | ||
3207 | |||
3208 | if (!ifp->rt) { | 3182 | if (!ifp->rt) { |
3209 | struct rt6_info *rt; | 3183 | struct rt6_info *rt; |
3210 | 3184 | ||
@@ -3304,6 +3278,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3304 | break; | 3278 | break; |
3305 | 3279 | ||
3306 | if (event == NETDEV_UP) { | 3280 | if (event == NETDEV_UP) { |
3281 | /* restore routes for permanent addresses */ | ||
3282 | addrconf_permanent_addr(dev); | ||
3283 | |||
3307 | if (!addrconf_qdisc_ok(dev)) { | 3284 | if (!addrconf_qdisc_ok(dev)) { |
3308 | /* device is not ready yet. */ | 3285 | /* device is not ready yet. */ |
3309 | pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", | 3286 | pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", |
@@ -3337,9 +3314,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
3337 | run_pending = 1; | 3314 | run_pending = 1; |
3338 | } | 3315 | } |
3339 | 3316 | ||
3340 | /* restore routes for permanent addresses */ | ||
3341 | addrconf_permanent_addr(dev); | ||
3342 | |||
3343 | switch (dev->type) { | 3317 | switch (dev->type) { |
3344 | #if IS_ENABLED(CONFIG_IPV6_SIT) | 3318 | #if IS_ENABLED(CONFIG_IPV6_SIT) |
3345 | case ARPHRD_SIT: | 3319 | case ARPHRD_SIT: |
@@ -3556,6 +3530,8 @@ restart: | |||
3556 | 3530 | ||
3557 | INIT_LIST_HEAD(&del_list); | 3531 | INIT_LIST_HEAD(&del_list); |
3558 | list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { | 3532 | list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { |
3533 | struct rt6_info *rt = NULL; | ||
3534 | |||
3559 | addrconf_del_dad_work(ifa); | 3535 | addrconf_del_dad_work(ifa); |
3560 | 3536 | ||
3561 | write_unlock_bh(&idev->lock); | 3537 | write_unlock_bh(&idev->lock); |
@@ -3568,6 +3544,9 @@ restart: | |||
3568 | ifa->state = 0; | 3544 | ifa->state = 0; |
3569 | if (!(ifa->flags & IFA_F_NODAD)) | 3545 | if (!(ifa->flags & IFA_F_NODAD)) |
3570 | ifa->flags |= IFA_F_TENTATIVE; | 3546 | ifa->flags |= IFA_F_TENTATIVE; |
3547 | |||
3548 | rt = ifa->rt; | ||
3549 | ifa->rt = NULL; | ||
3571 | } else { | 3550 | } else { |
3572 | state = ifa->state; | 3551 | state = ifa->state; |
3573 | ifa->state = INET6_IFADDR_STATE_DEAD; | 3552 | ifa->state = INET6_IFADDR_STATE_DEAD; |
@@ -3578,6 +3557,9 @@ restart: | |||
3578 | 3557 | ||
3579 | spin_unlock_bh(&ifa->lock); | 3558 | spin_unlock_bh(&ifa->lock); |
3580 | 3559 | ||
3560 | if (rt) | ||
3561 | ip6_del_rt(rt); | ||
3562 | |||
3581 | if (state != INET6_IFADDR_STATE_DEAD) { | 3563 | if (state != INET6_IFADDR_STATE_DEAD) { |
3582 | __ipv6_ifa_notify(RTM_DELADDR, ifa); | 3564 | __ipv6_ifa_notify(RTM_DELADDR, ifa); |
3583 | inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); | 3565 | inet6addr_notifier_call_chain(NETDEV_DOWN, ifa); |
@@ -5343,10 +5325,10 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
5343 | if (rt) | 5325 | if (rt) |
5344 | ip6_del_rt(rt); | 5326 | ip6_del_rt(rt); |
5345 | } | 5327 | } |
5346 | dst_hold(&ifp->rt->dst); | 5328 | if (ifp->rt) { |
5347 | 5329 | dst_hold(&ifp->rt->dst); | |
5348 | ip6_del_rt(ifp->rt); | 5330 | ip6_del_rt(ifp->rt); |
5349 | 5331 | } | |
5350 | rt_genid_bump_ipv6(net); | 5332 | rt_genid_bump_ipv6(net); |
5351 | break; | 5333 | break; |
5352 | } | 5334 | } |
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index 2ae3c4fd8aab..41f18de5dcc2 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c | |||
@@ -120,8 +120,7 @@ nla_put_failure: | |||
120 | 120 | ||
121 | static int ila_encap_nlsize(struct lwtunnel_state *lwtstate) | 121 | static int ila_encap_nlsize(struct lwtunnel_state *lwtstate) |
122 | { | 122 | { |
123 | /* No encapsulation overhead */ | 123 | return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */ |
124 | return 0; | ||
125 | } | 124 | } |
126 | 125 | ||
127 | static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) | 126 | static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index afca2eb4dfa7..6edfa9980314 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1376,9 +1376,9 @@ static int l2tp_tunnel_sock_create(struct net *net, | |||
1376 | memcpy(&udp_conf.peer_ip6, cfg->peer_ip6, | 1376 | memcpy(&udp_conf.peer_ip6, cfg->peer_ip6, |
1377 | sizeof(udp_conf.peer_ip6)); | 1377 | sizeof(udp_conf.peer_ip6)); |
1378 | udp_conf.use_udp6_tx_checksums = | 1378 | udp_conf.use_udp6_tx_checksums = |
1379 | cfg->udp6_zero_tx_checksums; | 1379 | ! cfg->udp6_zero_tx_checksums; |
1380 | udp_conf.use_udp6_rx_checksums = | 1380 | udp_conf.use_udp6_rx_checksums = |
1381 | cfg->udp6_zero_rx_checksums; | 1381 | ! cfg->udp6_zero_rx_checksums; |
1382 | } else | 1382 | } else |
1383 | #endif | 1383 | #endif |
1384 | { | 1384 | { |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 453b4e741780..e1cb22c16530 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1761,7 +1761,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
1761 | 1761 | ||
1762 | ret = dev_alloc_name(ndev, ndev->name); | 1762 | ret = dev_alloc_name(ndev, ndev->name); |
1763 | if (ret < 0) { | 1763 | if (ret < 0) { |
1764 | free_netdev(ndev); | 1764 | ieee80211_if_free(ndev); |
1765 | return ret; | 1765 | return ret; |
1766 | } | 1766 | } |
1767 | 1767 | ||
@@ -1847,7 +1847,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
1847 | 1847 | ||
1848 | ret = register_netdevice(ndev); | 1848 | ret = register_netdevice(ndev); |
1849 | if (ret) { | 1849 | if (ret) { |
1850 | free_netdev(ndev); | 1850 | ieee80211_if_free(ndev); |
1851 | return ret; | 1851 | return ret; |
1852 | } | 1852 | } |
1853 | } | 1853 | } |
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 61ed2a8764ba..86187dad1440 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
@@ -127,7 +127,7 @@ void rds_tcp_restore_callbacks(struct socket *sock, | |||
127 | 127 | ||
128 | /* | 128 | /* |
129 | * This is the only path that sets tc->t_sock. Send and receive trust that | 129 | * This is the only path that sets tc->t_sock. Send and receive trust that |
130 | * it is set. The RDS_CONN_CONNECTED bit protects those paths from being | 130 | * it is set. The RDS_CONN_UP bit protects those paths from being |
131 | * called while it isn't set. | 131 | * called while it isn't set. |
132 | */ | 132 | */ |
133 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) | 133 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) |
@@ -216,6 +216,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) | |||
216 | if (!tc) | 216 | if (!tc) |
217 | return -ENOMEM; | 217 | return -ENOMEM; |
218 | 218 | ||
219 | mutex_init(&tc->t_conn_lock); | ||
219 | tc->t_sock = NULL; | 220 | tc->t_sock = NULL; |
220 | tc->t_tinc = NULL; | 221 | tc->t_tinc = NULL; |
221 | tc->t_tinc_hdr_rem = sizeof(struct rds_header); | 222 | tc->t_tinc_hdr_rem = sizeof(struct rds_header); |
diff --git a/net/rds/tcp.h b/net/rds/tcp.h index 64f873c0c6b6..41c228300525 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h | |||
@@ -12,6 +12,10 @@ struct rds_tcp_connection { | |||
12 | 12 | ||
13 | struct list_head t_tcp_node; | 13 | struct list_head t_tcp_node; |
14 | struct rds_connection *conn; | 14 | struct rds_connection *conn; |
15 | /* t_conn_lock synchronizes the connection establishment between | ||
16 | * rds_tcp_accept_one and rds_tcp_conn_connect | ||
17 | */ | ||
18 | struct mutex t_conn_lock; | ||
15 | struct socket *t_sock; | 19 | struct socket *t_sock; |
16 | void *t_orig_write_space; | 20 | void *t_orig_write_space; |
17 | void *t_orig_data_ready; | 21 | void *t_orig_data_ready; |
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index 5cb16875c460..49a3fcfed360 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c | |||
@@ -78,7 +78,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn) | |||
78 | struct socket *sock = NULL; | 78 | struct socket *sock = NULL; |
79 | struct sockaddr_in src, dest; | 79 | struct sockaddr_in src, dest; |
80 | int ret; | 80 | int ret; |
81 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
82 | |||
83 | mutex_lock(&tc->t_conn_lock); | ||
81 | 84 | ||
85 | if (rds_conn_up(conn)) { | ||
86 | mutex_unlock(&tc->t_conn_lock); | ||
87 | return 0; | ||
88 | } | ||
82 | ret = sock_create_kern(rds_conn_net(conn), PF_INET, | 89 | ret = sock_create_kern(rds_conn_net(conn), PF_INET, |
83 | SOCK_STREAM, IPPROTO_TCP, &sock); | 90 | SOCK_STREAM, IPPROTO_TCP, &sock); |
84 | if (ret < 0) | 91 | if (ret < 0) |
@@ -120,6 +127,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn) | |||
120 | } | 127 | } |
121 | 128 | ||
122 | out: | 129 | out: |
130 | mutex_unlock(&tc->t_conn_lock); | ||
123 | if (sock) | 131 | if (sock) |
124 | sock_release(sock); | 132 | sock_release(sock); |
125 | return ret; | 133 | return ret; |
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 0936a4a32b47..be263cdf268b 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
@@ -76,7 +76,9 @@ int rds_tcp_accept_one(struct socket *sock) | |||
76 | struct rds_connection *conn; | 76 | struct rds_connection *conn; |
77 | int ret; | 77 | int ret; |
78 | struct inet_sock *inet; | 78 | struct inet_sock *inet; |
79 | struct rds_tcp_connection *rs_tcp; | 79 | struct rds_tcp_connection *rs_tcp = NULL; |
80 | int conn_state; | ||
81 | struct sock *nsk; | ||
80 | 82 | ||
81 | ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, | 83 | ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, |
82 | sock->sk->sk_type, sock->sk->sk_protocol, | 84 | sock->sk->sk_type, sock->sk->sk_protocol, |
@@ -115,28 +117,44 @@ int rds_tcp_accept_one(struct socket *sock) | |||
115 | * rds_tcp_state_change() will do that cleanup | 117 | * rds_tcp_state_change() will do that cleanup |
116 | */ | 118 | */ |
117 | rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data; | 119 | rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data; |
118 | if (rs_tcp->t_sock && | ||
119 | ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) { | ||
120 | struct sock *nsk = new_sock->sk; | ||
121 | |||
122 | nsk->sk_user_data = NULL; | ||
123 | nsk->sk_prot->disconnect(nsk, 0); | ||
124 | tcp_done(nsk); | ||
125 | new_sock = NULL; | ||
126 | ret = 0; | ||
127 | goto out; | ||
128 | } else if (rs_tcp->t_sock) { | ||
129 | rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp); | ||
130 | conn->c_outgoing = 0; | ||
131 | } | ||
132 | |||
133 | rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING); | 120 | rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING); |
121 | mutex_lock(&rs_tcp->t_conn_lock); | ||
122 | conn_state = rds_conn_state(conn); | ||
123 | if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP) | ||
124 | goto rst_nsk; | ||
125 | if (rs_tcp->t_sock) { | ||
126 | /* Need to resolve a duelling SYN between peers. | ||
127 | * We have an outstanding SYN to this peer, which may | ||
128 | * potentially have transitioned to the RDS_CONN_UP state, | ||
129 | * so we must quiesce any send threads before resetting | ||
130 | * c_transport_data. | ||
131 | */ | ||
132 | wait_event(conn->c_waitq, | ||
133 | !test_bit(RDS_IN_XMIT, &conn->c_flags)); | ||
134 | if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) { | ||
135 | goto rst_nsk; | ||
136 | } else if (rs_tcp->t_sock) { | ||
137 | rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp); | ||
138 | conn->c_outgoing = 0; | ||
139 | } | ||
140 | } | ||
134 | rds_tcp_set_callbacks(new_sock, conn); | 141 | rds_tcp_set_callbacks(new_sock, conn); |
135 | rds_connect_complete(conn); | 142 | rds_connect_complete(conn); /* marks RDS_CONN_UP */ |
143 | new_sock = NULL; | ||
144 | ret = 0; | ||
145 | goto out; | ||
146 | rst_nsk: | ||
147 | /* reset the newly returned accept sock and bail */ | ||
148 | nsk = new_sock->sk; | ||
149 | rds_tcp_stats_inc(s_tcp_listen_closed_stale); | ||
150 | nsk->sk_user_data = NULL; | ||
151 | nsk->sk_prot->disconnect(nsk, 0); | ||
152 | tcp_done(nsk); | ||
136 | new_sock = NULL; | 153 | new_sock = NULL; |
137 | ret = 0; | 154 | ret = 0; |
138 | |||
139 | out: | 155 | out: |
156 | if (rs_tcp) | ||
157 | mutex_unlock(&rs_tcp->t_conn_lock); | ||
140 | if (new_sock) | 158 | if (new_sock) |
141 | sock_release(new_sock); | 159 | sock_release(new_sock); |
142 | return ret; | 160 | return ret; |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 9640bb39a5d2..4befe97a9034 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) | |||
395 | sch->q.qlen++; | 395 | sch->q.qlen++; |
396 | } | 396 | } |
397 | 397 | ||
398 | /* netem can't properly corrupt a megapacket (like we get from GSO), so instead | ||
399 | * when we statistically choose to corrupt one, we instead segment it, returning | ||
400 | * the first packet to be corrupted, and re-enqueue the remaining frames | ||
401 | */ | ||
402 | static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) | ||
403 | { | ||
404 | struct sk_buff *segs; | ||
405 | netdev_features_t features = netif_skb_features(skb); | ||
406 | |||
407 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); | ||
408 | |||
409 | if (IS_ERR_OR_NULL(segs)) { | ||
410 | qdisc_reshape_fail(skb, sch); | ||
411 | return NULL; | ||
412 | } | ||
413 | consume_skb(skb); | ||
414 | return segs; | ||
415 | } | ||
416 | |||
398 | /* | 417 | /* |
399 | * Insert one skb into qdisc. | 418 | * Insert one skb into qdisc. |
400 | * Note: parent depends on return value to account for queue length. | 419 | * Note: parent depends on return value to account for queue length. |
@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
407 | /* We don't fill cb now as skb_unshare() may invalidate it */ | 426 | /* We don't fill cb now as skb_unshare() may invalidate it */ |
408 | struct netem_skb_cb *cb; | 427 | struct netem_skb_cb *cb; |
409 | struct sk_buff *skb2; | 428 | struct sk_buff *skb2; |
429 | struct sk_buff *segs = NULL; | ||
430 | unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb); | ||
431 | int nb = 0; | ||
410 | int count = 1; | 432 | int count = 1; |
433 | int rc = NET_XMIT_SUCCESS; | ||
411 | 434 | ||
412 | /* Random duplication */ | 435 | /* Random duplication */ |
413 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) | 436 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) |
@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
453 | * do it now in software before we mangle it. | 476 | * do it now in software before we mangle it. |
454 | */ | 477 | */ |
455 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { | 478 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { |
479 | if (skb_is_gso(skb)) { | ||
480 | segs = netem_segment(skb, sch); | ||
481 | if (!segs) | ||
482 | return NET_XMIT_DROP; | ||
483 | } else { | ||
484 | segs = skb; | ||
485 | } | ||
486 | |||
487 | skb = segs; | ||
488 | segs = segs->next; | ||
489 | |||
456 | if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || | 490 | if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || |
457 | (skb->ip_summed == CHECKSUM_PARTIAL && | 491 | (skb->ip_summed == CHECKSUM_PARTIAL && |
458 | skb_checksum_help(skb))) | 492 | skb_checksum_help(skb))) { |
459 | return qdisc_drop(skb, sch); | 493 | rc = qdisc_drop(skb, sch); |
494 | goto finish_segs; | ||
495 | } | ||
460 | 496 | ||
461 | skb->data[prandom_u32() % skb_headlen(skb)] ^= | 497 | skb->data[prandom_u32() % skb_headlen(skb)] ^= |
462 | 1<<(prandom_u32() % 8); | 498 | 1<<(prandom_u32() % 8); |
@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
516 | sch->qstats.requeues++; | 552 | sch->qstats.requeues++; |
517 | } | 553 | } |
518 | 554 | ||
555 | finish_segs: | ||
556 | if (segs) { | ||
557 | while (segs) { | ||
558 | skb2 = segs->next; | ||
559 | segs->next = NULL; | ||
560 | qdisc_skb_cb(segs)->pkt_len = segs->len; | ||
561 | last_len = segs->len; | ||
562 | rc = qdisc_enqueue(segs, sch); | ||
563 | if (rc != NET_XMIT_SUCCESS) { | ||
564 | if (net_xmit_drop_count(rc)) | ||
565 | qdisc_qstats_drop(sch); | ||
566 | } else { | ||
567 | nb++; | ||
568 | len += last_len; | ||
569 | } | ||
570 | segs = skb2; | ||
571 | } | ||
572 | sch->q.qlen += nb; | ||
573 | if (nb > 1) | ||
574 | qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); | ||
575 | } | ||
519 | return NET_XMIT_SUCCESS; | 576 | return NET_XMIT_SUCCESS; |
520 | } | 577 | } |
521 | 578 | ||
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 2b9b98f1c2ff..b7e01d88bdc5 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c | |||
@@ -305,6 +305,8 @@ static void switchdev_port_attr_set_deferred(struct net_device *dev, | |||
305 | if (err && err != -EOPNOTSUPP) | 305 | if (err && err != -EOPNOTSUPP) |
306 | netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", | 306 | netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", |
307 | err, attr->id); | 307 | err, attr->id); |
308 | if (attr->complete) | ||
309 | attr->complete(dev, err, attr->complete_priv); | ||
308 | } | 310 | } |
309 | 311 | ||
310 | static int switchdev_port_attr_set_defer(struct net_device *dev, | 312 | static int switchdev_port_attr_set_defer(struct net_device *dev, |
@@ -434,6 +436,8 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev, | |||
434 | if (err && err != -EOPNOTSUPP) | 436 | if (err && err != -EOPNOTSUPP) |
435 | netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", | 437 | netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", |
436 | err, obj->id); | 438 | err, obj->id); |
439 | if (obj->complete) | ||
440 | obj->complete(dev, err, obj->complete_priv); | ||
437 | } | 441 | } |
438 | 442 | ||
439 | static int switchdev_port_obj_add_defer(struct net_device *dev, | 443 | static int switchdev_port_obj_add_defer(struct net_device *dev, |
@@ -502,6 +506,8 @@ static void switchdev_port_obj_del_deferred(struct net_device *dev, | |||
502 | if (err && err != -EOPNOTSUPP) | 506 | if (err && err != -EOPNOTSUPP) |
503 | netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", | 507 | netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", |
504 | err, obj->id); | 508 | err, obj->id); |
509 | if (obj->complete) | ||
510 | obj->complete(dev, err, obj->complete_priv); | ||
505 | } | 511 | } |
506 | 512 | ||
507 | static int switchdev_port_obj_del_defer(struct net_device *dev, | 513 | static int switchdev_port_obj_del_defer(struct net_device *dev, |
diff --git a/net/tipc/node.c b/net/tipc/node.c index ace178fd3850..9aaa1bc566ae 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -1444,6 +1444,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) | |||
1444 | int bearer_id = b->identity; | 1444 | int bearer_id = b->identity; |
1445 | struct tipc_link_entry *le; | 1445 | struct tipc_link_entry *le; |
1446 | u16 bc_ack = msg_bcast_ack(hdr); | 1446 | u16 bc_ack = msg_bcast_ack(hdr); |
1447 | u32 self = tipc_own_addr(net); | ||
1447 | int rc = 0; | 1448 | int rc = 0; |
1448 | 1449 | ||
1449 | __skb_queue_head_init(&xmitq); | 1450 | __skb_queue_head_init(&xmitq); |
@@ -1460,6 +1461,10 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) | |||
1460 | return tipc_node_bc_rcv(net, skb, bearer_id); | 1461 | return tipc_node_bc_rcv(net, skb, bearer_id); |
1461 | } | 1462 | } |
1462 | 1463 | ||
1464 | /* Discard unicast link messages destined for another node */ | ||
1465 | if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self))) | ||
1466 | goto discard; | ||
1467 | |||
1463 | /* Locate neighboring node that sent packet */ | 1468 | /* Locate neighboring node that sent packet */ |
1464 | n = tipc_node_find(net, msg_prevnode(hdr)); | 1469 | n = tipc_node_find(net, msg_prevnode(hdr)); |
1465 | if (unlikely(!n)) | 1470 | if (unlikely(!n)) |
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c index 8d8d1ec429eb..9b96f4fb8cea 100644 --- a/samples/bpf/trace_output_kern.c +++ b/samples/bpf/trace_output_kern.c | |||
@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx) | |||
18 | u64 cookie; | 18 | u64 cookie; |
19 | } data; | 19 | } data; |
20 | 20 | ||
21 | memset(&data, 0, sizeof(data)); | ||
22 | data.pid = bpf_get_current_pid_tgid(); | 21 | data.pid = bpf_get_current_pid_tgid(); |
23 | data.cookie = 0x12345678; | 22 | data.cookie = 0x12345678; |
24 | 23 | ||
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 161dd0d67da8..a9155077feef 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c | |||
@@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size, | |||
371 | do_usb_entry_multi(symval + i, mod); | 371 | do_usb_entry_multi(symval + i, mod); |
372 | } | 372 | } |
373 | 373 | ||
374 | static void do_of_entry_multi(void *symval, struct module *mod) | ||
375 | { | ||
376 | char alias[500]; | ||
377 | int len; | ||
378 | char *tmp; | ||
379 | |||
380 | DEF_FIELD_ADDR(symval, of_device_id, name); | ||
381 | DEF_FIELD_ADDR(symval, of_device_id, type); | ||
382 | DEF_FIELD_ADDR(symval, of_device_id, compatible); | ||
383 | |||
384 | len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", | ||
385 | (*type)[0] ? *type : "*"); | ||
386 | |||
387 | if (compatible[0]) | ||
388 | sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", | ||
389 | *compatible); | ||
390 | |||
391 | /* Replace all whitespace with underscores */ | ||
392 | for (tmp = alias; tmp && *tmp; tmp++) | ||
393 | if (isspace(*tmp)) | ||
394 | *tmp = '_'; | ||
395 | |||
396 | buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); | ||
397 | strcat(alias, "C"); | ||
398 | add_wildcard(alias); | ||
399 | buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias); | ||
400 | } | ||
401 | |||
402 | static void do_of_table(void *symval, unsigned long size, | ||
403 | struct module *mod) | ||
404 | { | ||
405 | unsigned int i; | ||
406 | const unsigned long id_size = SIZE_of_device_id; | ||
407 | |||
408 | device_id_check(mod->name, "of", size, id_size, symval); | ||
409 | |||
410 | /* Leave last one: it's the terminator. */ | ||
411 | size -= id_size; | ||
412 | |||
413 | for (i = 0; i < size; i += id_size) | ||
414 | do_of_entry_multi(symval + i, mod); | ||
415 | } | ||
416 | |||
374 | /* Looks like: hid:bNvNpN */ | 417 | /* Looks like: hid:bNvNpN */ |
375 | static int do_hid_entry(const char *filename, | 418 | static int do_hid_entry(const char *filename, |
376 | void *symval, char *alias) | 419 | void *symval, char *alias) |
@@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename, | |||
684 | } | 727 | } |
685 | ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry); | 728 | ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry); |
686 | 729 | ||
687 | static int do_of_entry (const char *filename, void *symval, char *alias) | ||
688 | { | ||
689 | int len; | ||
690 | char *tmp; | ||
691 | DEF_FIELD_ADDR(symval, of_device_id, name); | ||
692 | DEF_FIELD_ADDR(symval, of_device_id, type); | ||
693 | DEF_FIELD_ADDR(symval, of_device_id, compatible); | ||
694 | |||
695 | len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", | ||
696 | (*type)[0] ? *type : "*"); | ||
697 | |||
698 | if (compatible[0]) | ||
699 | sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", | ||
700 | *compatible); | ||
701 | |||
702 | /* Replace all whitespace with underscores */ | ||
703 | for (tmp = alias; tmp && *tmp; tmp++) | ||
704 | if (isspace (*tmp)) | ||
705 | *tmp = '_'; | ||
706 | |||
707 | return 1; | ||
708 | } | ||
709 | ADD_TO_DEVTABLE("of", of_device_id, do_of_entry); | ||
710 | |||
711 | static int do_vio_entry(const char *filename, void *symval, | 730 | static int do_vio_entry(const char *filename, void *symval, |
712 | char *alias) | 731 | char *alias) |
713 | { | 732 | { |
@@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, | |||
1348 | /* First handle the "special" cases */ | 1367 | /* First handle the "special" cases */ |
1349 | if (sym_is(name, namelen, "usb")) | 1368 | if (sym_is(name, namelen, "usb")) |
1350 | do_usb_table(symval, sym->st_size, mod); | 1369 | do_usb_table(symval, sym->st_size, mod); |
1370 | if (sym_is(name, namelen, "of")) | ||
1371 | do_of_table(symval, sym->st_size, mod); | ||
1351 | else if (sym_is(name, namelen, "pnp")) | 1372 | else if (sym_is(name, namelen, "pnp")) |
1352 | do_pnp_device_entry(symval, sym->st_size, mod); | 1373 | do_pnp_device_entry(symval, sym->st_size, mod); |
1353 | else if (sym_is(name, namelen, "pnp_card")) | 1374 | else if (sym_is(name, namelen, "pnp_card")) |
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index be09e2cacf82..3cd0a58672dd 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c | |||
@@ -884,10 +884,10 @@ static char *func_tokens[] = { | |||
884 | "BPRM_CHECK", | 884 | "BPRM_CHECK", |
885 | "MODULE_CHECK", | 885 | "MODULE_CHECK", |
886 | "FIRMWARE_CHECK", | 886 | "FIRMWARE_CHECK", |
887 | "POST_SETATTR", | ||
887 | "KEXEC_KERNEL_CHECK", | 888 | "KEXEC_KERNEL_CHECK", |
888 | "KEXEC_INITRAMFS_CHECK", | 889 | "KEXEC_INITRAMFS_CHECK", |
889 | "POLICY_CHECK", | 890 | "POLICY_CHECK" |
890 | "POST_SETATTR" | ||
891 | }; | 891 | }; |
892 | 892 | ||
893 | void *ima_policy_start(struct seq_file *m, loff_t *pos) | 893 | void *ima_policy_start(struct seq_file *m, loff_t *pos) |
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c index 023cc4cad5c1..626f3bb24c55 100644 --- a/sound/hda/ext/hdac_ext_stream.c +++ b/sound/hda/ext/hdac_ext_stream.c | |||
@@ -104,12 +104,11 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_init_all); | |||
104 | */ | 104 | */ |
105 | void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus) | 105 | void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus) |
106 | { | 106 | { |
107 | struct hdac_stream *s; | 107 | struct hdac_stream *s, *_s; |
108 | struct hdac_ext_stream *stream; | 108 | struct hdac_ext_stream *stream; |
109 | struct hdac_bus *bus = ebus_to_hbus(ebus); | 109 | struct hdac_bus *bus = ebus_to_hbus(ebus); |
110 | 110 | ||
111 | while (!list_empty(&bus->stream_list)) { | 111 | list_for_each_entry_safe(s, _s, &bus->stream_list, list) { |
112 | s = list_first_entry(&bus->stream_list, struct hdac_stream, list); | ||
113 | stream = stream_to_hdac_ext_stream(s); | 112 | stream = stream_to_hdac_ext_stream(s); |
114 | snd_hdac_ext_stream_decouple(ebus, stream, false); | 113 | snd_hdac_ext_stream_decouple(ebus, stream, false); |
115 | list_del(&s->list); | 114 | list_del(&s->list); |
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 54babe1c0b16..607bbeaebddf 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <sound/core.h> | 20 | #include <sound/core.h> |
21 | #include <sound/hdaudio.h> | 21 | #include <sound/hdaudio.h> |
22 | #include <sound/hda_i915.h> | 22 | #include <sound/hda_i915.h> |
23 | #include <sound/hda_register.h> | ||
23 | 24 | ||
24 | static struct i915_audio_component *hdac_acomp; | 25 | static struct i915_audio_component *hdac_acomp; |
25 | 26 | ||
@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable) | |||
97 | } | 98 | } |
98 | EXPORT_SYMBOL_GPL(snd_hdac_display_power); | 99 | EXPORT_SYMBOL_GPL(snd_hdac_display_power); |
99 | 100 | ||
101 | #define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ | ||
102 | ((pci)->device == 0x0c0c) || \ | ||
103 | ((pci)->device == 0x0d0c) || \ | ||
104 | ((pci)->device == 0x160c)) | ||
105 | |||
100 | /** | 106 | /** |
101 | * snd_hdac_get_display_clk - Get CDCLK in kHz | 107 | * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW |
102 | * @bus: HDA core bus | 108 | * @bus: HDA core bus |
103 | * | 109 | * |
104 | * This function is supposed to be used only by a HD-audio controller | 110 | * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK |
105 | * driver that needs the interaction with i915 graphics. | 111 | * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value) |
112 | * are used to convert CDClk (Core Display Clock) to 24MHz BCLK: | ||
113 | * BCLK = CDCLK * M / N | ||
114 | * The values will be lost when the display power well is disabled and need to | ||
115 | * be restored to avoid abnormal playback speed. | ||
106 | * | 116 | * |
107 | * This function queries CDCLK value in kHz from the graphics driver and | 117 | * Call this function at initializing and changing power well, as well as |
108 | * returns the value. A negative code is returned in error. | 118 | * at ELD notifier for the hotplug. |
109 | */ | 119 | */ |
110 | int snd_hdac_get_display_clk(struct hdac_bus *bus) | 120 | void snd_hdac_i915_set_bclk(struct hdac_bus *bus) |
111 | { | 121 | { |
112 | struct i915_audio_component *acomp = bus->audio_component; | 122 | struct i915_audio_component *acomp = bus->audio_component; |
123 | struct pci_dev *pci = to_pci_dev(bus->dev); | ||
124 | int cdclk_freq; | ||
125 | unsigned int bclk_m, bclk_n; | ||
126 | |||
127 | if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq) | ||
128 | return; /* only for i915 binding */ | ||
129 | if (!CONTROLLER_IN_GPU(pci)) | ||
130 | return; /* only HSW/BDW */ | ||
131 | |||
132 | cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev); | ||
133 | switch (cdclk_freq) { | ||
134 | case 337500: | ||
135 | bclk_m = 16; | ||
136 | bclk_n = 225; | ||
137 | break; | ||
138 | |||
139 | case 450000: | ||
140 | default: /* default CDCLK 450MHz */ | ||
141 | bclk_m = 4; | ||
142 | bclk_n = 75; | ||
143 | break; | ||
144 | |||
145 | case 540000: | ||
146 | bclk_m = 4; | ||
147 | bclk_n = 90; | ||
148 | break; | ||
149 | |||
150 | case 675000: | ||
151 | bclk_m = 8; | ||
152 | bclk_n = 225; | ||
153 | break; | ||
154 | } | ||
113 | 155 | ||
114 | if (!acomp || !acomp->ops) | 156 | snd_hdac_chip_writew(bus, HSW_EM4, bclk_m); |
115 | return -ENODEV; | 157 | snd_hdac_chip_writew(bus, HSW_EM5, bclk_n); |
116 | |||
117 | return acomp->ops->get_cdclk_freq(acomp->dev); | ||
118 | } | 158 | } |
119 | EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk); | 159 | EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk); |
120 | 160 | ||
121 | /* There is a fixed mapping between audio pin node and display port | 161 | /* There is a fixed mapping between audio pin node and display port |
122 | * on current Intel platforms: | 162 | * on current Intel platforms: |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 637b8a0e2a91..9a0d1445ca5c 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp) | |||
857 | #define azx_del_card_list(chip) /* NOP */ | 857 | #define azx_del_card_list(chip) /* NOP */ |
858 | #endif /* CONFIG_PM */ | 858 | #endif /* CONFIG_PM */ |
859 | 859 | ||
860 | /* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK | ||
861 | * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value) | ||
862 | * are used to convert CDClk (Core Display Clock) to 24MHz BCLK: | ||
863 | * BCLK = CDCLK * M / N | ||
864 | * The values will be lost when the display power well is disabled and need to | ||
865 | * be restored to avoid abnormal playback speed. | ||
866 | */ | ||
867 | static void haswell_set_bclk(struct hda_intel *hda) | ||
868 | { | ||
869 | struct azx *chip = &hda->chip; | ||
870 | int cdclk_freq; | ||
871 | unsigned int bclk_m, bclk_n; | ||
872 | |||
873 | if (!hda->need_i915_power) | ||
874 | return; | ||
875 | |||
876 | cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip)); | ||
877 | switch (cdclk_freq) { | ||
878 | case 337500: | ||
879 | bclk_m = 16; | ||
880 | bclk_n = 225; | ||
881 | break; | ||
882 | |||
883 | case 450000: | ||
884 | default: /* default CDCLK 450MHz */ | ||
885 | bclk_m = 4; | ||
886 | bclk_n = 75; | ||
887 | break; | ||
888 | |||
889 | case 540000: | ||
890 | bclk_m = 4; | ||
891 | bclk_n = 90; | ||
892 | break; | ||
893 | |||
894 | case 675000: | ||
895 | bclk_m = 8; | ||
896 | bclk_n = 225; | ||
897 | break; | ||
898 | } | ||
899 | |||
900 | azx_writew(chip, HSW_EM4, bclk_m); | ||
901 | azx_writew(chip, HSW_EM5, bclk_n); | ||
902 | } | ||
903 | |||
904 | #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO) | 860 | #if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO) |
905 | /* | 861 | /* |
906 | * power management | 862 | * power management |
@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev) | |||
958 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL | 914 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL |
959 | && hda->need_i915_power) { | 915 | && hda->need_i915_power) { |
960 | snd_hdac_display_power(azx_bus(chip), true); | 916 | snd_hdac_display_power(azx_bus(chip), true); |
961 | haswell_set_bclk(hda); | 917 | snd_hdac_i915_set_bclk(azx_bus(chip)); |
962 | } | 918 | } |
963 | if (chip->msi) | 919 | if (chip->msi) |
964 | if (pci_enable_msi(pci) < 0) | 920 | if (pci_enable_msi(pci) < 0) |
@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev) | |||
1058 | bus = azx_bus(chip); | 1014 | bus = azx_bus(chip); |
1059 | if (hda->need_i915_power) { | 1015 | if (hda->need_i915_power) { |
1060 | snd_hdac_display_power(bus, true); | 1016 | snd_hdac_display_power(bus, true); |
1061 | haswell_set_bclk(hda); | 1017 | snd_hdac_i915_set_bclk(bus); |
1062 | } else { | 1018 | } else { |
1063 | /* toggle codec wakeup bit for STATESTS read */ | 1019 | /* toggle codec wakeup bit for STATESTS read */ |
1064 | snd_hdac_set_codec_wakeup(bus, true); | 1020 | snd_hdac_set_codec_wakeup(bus, true); |
@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip) | |||
1796 | /* initialize chip */ | 1752 | /* initialize chip */ |
1797 | azx_init_pci(chip); | 1753 | azx_init_pci(chip); |
1798 | 1754 | ||
1799 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { | 1755 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) |
1800 | struct hda_intel *hda; | 1756 | snd_hdac_i915_set_bclk(bus); |
1801 | |||
1802 | hda = container_of(chip, struct hda_intel, chip); | ||
1803 | haswell_set_bclk(hda); | ||
1804 | } | ||
1805 | 1757 | ||
1806 | hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0); | 1758 | hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0); |
1807 | 1759 | ||
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 40933aa33afe..1483f85999ec 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -2232,6 +2232,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port) | |||
2232 | if (atomic_read(&(codec)->core.in_pm)) | 2232 | if (atomic_read(&(codec)->core.in_pm)) |
2233 | return; | 2233 | return; |
2234 | 2234 | ||
2235 | snd_hdac_i915_set_bclk(&codec->bus->core); | ||
2235 | check_presence_and_report(codec, pin_nid); | 2236 | check_presence_and_report(codec, pin_nid); |
2236 | } | 2237 | } |
2237 | 2238 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 810bceee4fd2..ac4490a96863 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -5584,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5584 | SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), | 5584 | SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), |
5585 | SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), | 5585 | SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), |
5586 | SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), | 5586 | SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), |
5587 | SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), | ||
5587 | SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), | 5588 | SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), |
5588 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5589 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
5589 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), | 5590 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), |
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 649e92a252ae..7ef3a0c16478 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig | |||
@@ -629,6 +629,7 @@ config SND_SOC_RT5514 | |||
629 | 629 | ||
630 | config SND_SOC_RT5616 | 630 | config SND_SOC_RT5616 |
631 | tristate "Realtek RT5616 CODEC" | 631 | tristate "Realtek RT5616 CODEC" |
632 | depends on I2C | ||
632 | 633 | ||
633 | config SND_SOC_RT5631 | 634 | config SND_SOC_RT5631 |
634 | tristate "Realtek ALC5631/RT5631 CODEC" | 635 | tristate "Realtek ALC5631/RT5631 CODEC" |
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index 92d22a018d68..83959312f7a0 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c | |||
@@ -249,6 +249,18 @@ int arizona_init_spk(struct snd_soc_codec *codec) | |||
249 | } | 249 | } |
250 | EXPORT_SYMBOL_GPL(arizona_init_spk); | 250 | EXPORT_SYMBOL_GPL(arizona_init_spk); |
251 | 251 | ||
252 | int arizona_free_spk(struct snd_soc_codec *codec) | ||
253 | { | ||
254 | struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec); | ||
255 | struct arizona *arizona = priv->arizona; | ||
256 | |||
257 | arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona); | ||
258 | arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona); | ||
259 | |||
260 | return 0; | ||
261 | } | ||
262 | EXPORT_SYMBOL_GPL(arizona_free_spk); | ||
263 | |||
252 | static const struct snd_soc_dapm_route arizona_mono_routes[] = { | 264 | static const struct snd_soc_dapm_route arizona_mono_routes[] = { |
253 | { "OUT1R", NULL, "OUT1L" }, | 265 | { "OUT1R", NULL, "OUT1L" }, |
254 | { "OUT2R", NULL, "OUT2L" }, | 266 | { "OUT2R", NULL, "OUT2L" }, |
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h index 1ea8e4ecf8d4..ce0531b8c632 100644 --- a/sound/soc/codecs/arizona.h +++ b/sound/soc/codecs/arizona.h | |||
@@ -307,6 +307,8 @@ extern int arizona_init_spk(struct snd_soc_codec *codec); | |||
307 | extern int arizona_init_gpio(struct snd_soc_codec *codec); | 307 | extern int arizona_init_gpio(struct snd_soc_codec *codec); |
308 | extern int arizona_init_mono(struct snd_soc_codec *codec); | 308 | extern int arizona_init_mono(struct snd_soc_codec *codec); |
309 | 309 | ||
310 | extern int arizona_free_spk(struct snd_soc_codec *codec); | ||
311 | |||
310 | extern int arizona_init_dai(struct arizona_priv *priv, int dai); | 312 | extern int arizona_init_dai(struct arizona_priv *priv, int dai); |
311 | 313 | ||
312 | int arizona_set_output_mode(struct snd_soc_codec *codec, int output, | 314 | int arizona_set_output_mode(struct snd_soc_codec *codec, int output, |
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c index 44c30fe3e315..287d13740be4 100644 --- a/sound/soc/codecs/cs35l32.c +++ b/sound/soc/codecs/cs35l32.c | |||
@@ -274,7 +274,9 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client, | |||
274 | if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0) | 274 | if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0) |
275 | pdata->sdout_share = val; | 275 | pdata->sdout_share = val; |
276 | 276 | ||
277 | of_property_read_u32(np, "cirrus,boost-manager", &val); | 277 | if (of_property_read_u32(np, "cirrus,boost-manager", &val)) |
278 | val = -1u; | ||
279 | |||
278 | switch (val) { | 280 | switch (val) { |
279 | case CS35L32_BOOST_MGR_AUTO: | 281 | case CS35L32_BOOST_MGR_AUTO: |
280 | case CS35L32_BOOST_MGR_AUTO_AUDIO: | 282 | case CS35L32_BOOST_MGR_AUTO_AUDIO: |
@@ -282,13 +284,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client, | |||
282 | case CS35L32_BOOST_MGR_FIXED: | 284 | case CS35L32_BOOST_MGR_FIXED: |
283 | pdata->boost_mng = val; | 285 | pdata->boost_mng = val; |
284 | break; | 286 | break; |
287 | case -1u: | ||
285 | default: | 288 | default: |
286 | dev_err(&i2c_client->dev, | 289 | dev_err(&i2c_client->dev, |
287 | "Wrong cirrus,boost-manager DT value %d\n", val); | 290 | "Wrong cirrus,boost-manager DT value %d\n", val); |
288 | pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS; | 291 | pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS; |
289 | } | 292 | } |
290 | 293 | ||
291 | of_property_read_u32(np, "cirrus,sdout-datacfg", &val); | 294 | if (of_property_read_u32(np, "cirrus,sdout-datacfg", &val)) |
295 | val = -1u; | ||
292 | switch (val) { | 296 | switch (val) { |
293 | case CS35L32_DATA_CFG_LR_VP: | 297 | case CS35L32_DATA_CFG_LR_VP: |
294 | case CS35L32_DATA_CFG_LR_STAT: | 298 | case CS35L32_DATA_CFG_LR_STAT: |
@@ -296,13 +300,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client, | |||
296 | case CS35L32_DATA_CFG_LR_VPSTAT: | 300 | case CS35L32_DATA_CFG_LR_VPSTAT: |
297 | pdata->sdout_datacfg = val; | 301 | pdata->sdout_datacfg = val; |
298 | break; | 302 | break; |
303 | case -1u: | ||
299 | default: | 304 | default: |
300 | dev_err(&i2c_client->dev, | 305 | dev_err(&i2c_client->dev, |
301 | "Wrong cirrus,sdout-datacfg DT value %d\n", val); | 306 | "Wrong cirrus,sdout-datacfg DT value %d\n", val); |
302 | pdata->sdout_datacfg = CS35L32_DATA_CFG_LR; | 307 | pdata->sdout_datacfg = CS35L32_DATA_CFG_LR; |
303 | } | 308 | } |
304 | 309 | ||
305 | of_property_read_u32(np, "cirrus,battery-threshold", &val); | 310 | if (of_property_read_u32(np, "cirrus,battery-threshold", &val)) |
311 | val = -1u; | ||
306 | switch (val) { | 312 | switch (val) { |
307 | case CS35L32_BATT_THRESH_3_1V: | 313 | case CS35L32_BATT_THRESH_3_1V: |
308 | case CS35L32_BATT_THRESH_3_2V: | 314 | case CS35L32_BATT_THRESH_3_2V: |
@@ -310,13 +316,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client, | |||
310 | case CS35L32_BATT_THRESH_3_4V: | 316 | case CS35L32_BATT_THRESH_3_4V: |
311 | pdata->batt_thresh = val; | 317 | pdata->batt_thresh = val; |
312 | break; | 318 | break; |
319 | case -1u: | ||
313 | default: | 320 | default: |
314 | dev_err(&i2c_client->dev, | 321 | dev_err(&i2c_client->dev, |
315 | "Wrong cirrus,battery-threshold DT value %d\n", val); | 322 | "Wrong cirrus,battery-threshold DT value %d\n", val); |
316 | pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V; | 323 | pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V; |
317 | } | 324 | } |
318 | 325 | ||
319 | of_property_read_u32(np, "cirrus,battery-recovery", &val); | 326 | if (of_property_read_u32(np, "cirrus,battery-recovery", &val)) |
327 | val = -1u; | ||
320 | switch (val) { | 328 | switch (val) { |
321 | case CS35L32_BATT_RECOV_3_1V: | 329 | case CS35L32_BATT_RECOV_3_1V: |
322 | case CS35L32_BATT_RECOV_3_2V: | 330 | case CS35L32_BATT_RECOV_3_2V: |
@@ -326,6 +334,7 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client, | |||
326 | case CS35L32_BATT_RECOV_3_6V: | 334 | case CS35L32_BATT_RECOV_3_6V: |
327 | pdata->batt_recov = val; | 335 | pdata->batt_recov = val; |
328 | break; | 336 | break; |
337 | case -1u: | ||
329 | default: | 338 | default: |
330 | dev_err(&i2c_client->dev, | 339 | dev_err(&i2c_client->dev, |
331 | "Wrong cirrus,battery-recovery DT value %d\n", val); | 340 | "Wrong cirrus,battery-recovery DT value %d\n", val); |
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c index 576087bda330..00e9b6fc1b5c 100644 --- a/sound/soc/codecs/cs47l24.c +++ b/sound/soc/codecs/cs47l24.c | |||
@@ -1108,6 +1108,9 @@ static int cs47l24_codec_remove(struct snd_soc_codec *codec) | |||
1108 | priv->core.arizona->dapm = NULL; | 1108 | priv->core.arizona->dapm = NULL; |
1109 | 1109 | ||
1110 | arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); | 1110 | arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); |
1111 | |||
1112 | arizona_free_spk(codec); | ||
1113 | |||
1111 | return 0; | 1114 | return 0; |
1112 | } | 1115 | } |
1113 | 1116 | ||
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c index 26f9459cb3bc..aaa038ffc8a5 100644 --- a/sound/soc/codecs/hdac_hdmi.c +++ b/sound/soc/codecs/hdac_hdmi.c | |||
@@ -1420,32 +1420,39 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec) | |||
1420 | } | 1420 | } |
1421 | 1421 | ||
1422 | #ifdef CONFIG_PM | 1422 | #ifdef CONFIG_PM |
1423 | static int hdmi_codec_resume(struct snd_soc_codec *codec) | 1423 | static int hdmi_codec_prepare(struct device *dev) |
1424 | { | 1424 | { |
1425 | struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec); | 1425 | struct hdac_ext_device *edev = to_hda_ext_device(dev); |
1426 | struct hdac_device *hdac = &edev->hdac; | ||
1427 | |||
1428 | pm_runtime_get_sync(&edev->hdac.dev); | ||
1429 | |||
1430 | /* | ||
1431 | * Power down afg. | ||
1432 | * codec_read is preferred over codec_write to set the power state. | ||
1433 | * This way verb is send to set the power state and response | ||
1434 | * is received. So setting power state is ensured without using loop | ||
1435 | * to read the state. | ||
1436 | */ | ||
1437 | snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE, | ||
1438 | AC_PWRST_D3); | ||
1439 | |||
1440 | return 0; | ||
1441 | } | ||
1442 | |||
1443 | static void hdmi_codec_complete(struct device *dev) | ||
1444 | { | ||
1445 | struct hdac_ext_device *edev = to_hda_ext_device(dev); | ||
1426 | struct hdac_hdmi_priv *hdmi = edev->private_data; | 1446 | struct hdac_hdmi_priv *hdmi = edev->private_data; |
1427 | struct hdac_hdmi_pin *pin; | 1447 | struct hdac_hdmi_pin *pin; |
1428 | struct hdac_device *hdac = &edev->hdac; | 1448 | struct hdac_device *hdac = &edev->hdac; |
1429 | struct hdac_bus *bus = hdac->bus; | ||
1430 | int err; | ||
1431 | unsigned long timeout; | ||
1432 | |||
1433 | hdac_hdmi_skl_enable_all_pins(&edev->hdac); | ||
1434 | hdac_hdmi_skl_enable_dp12(&edev->hdac); | ||
1435 | 1449 | ||
1436 | /* Power up afg */ | 1450 | /* Power up afg */ |
1437 | if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) { | 1451 | snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE, |
1438 | 1452 | AC_PWRST_D0); | |
1439 | snd_hdac_codec_write(hdac, hdac->afg, 0, | ||
1440 | AC_VERB_SET_POWER_STATE, AC_PWRST_D0); | ||
1441 | 1453 | ||
1442 | /* Wait till power state is set to D0 */ | 1454 | hdac_hdmi_skl_enable_all_pins(&edev->hdac); |
1443 | timeout = jiffies + msecs_to_jiffies(1000); | 1455 | hdac_hdmi_skl_enable_dp12(&edev->hdac); |
1444 | while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0) | ||
1445 | && time_before(jiffies, timeout)) { | ||
1446 | msleep(50); | ||
1447 | } | ||
1448 | } | ||
1449 | 1456 | ||
1450 | /* | 1457 | /* |
1451 | * As the ELD notify callback request is not entertained while the | 1458 | * As the ELD notify callback request is not entertained while the |
@@ -1455,28 +1462,16 @@ static int hdmi_codec_resume(struct snd_soc_codec *codec) | |||
1455 | list_for_each_entry(pin, &hdmi->pin_list, head) | 1462 | list_for_each_entry(pin, &hdmi->pin_list, head) |
1456 | hdac_hdmi_present_sense(pin, 1); | 1463 | hdac_hdmi_present_sense(pin, 1); |
1457 | 1464 | ||
1458 | /* | 1465 | pm_runtime_put_sync(&edev->hdac.dev); |
1459 | * Codec power is turned ON during controller resume. | ||
1460 | * Turn it OFF here | ||
1461 | */ | ||
1462 | err = snd_hdac_display_power(bus, false); | ||
1463 | if (err < 0) { | ||
1464 | dev_err(bus->dev, | ||
1465 | "Cannot turn OFF display power on i915, err: %d\n", | ||
1466 | err); | ||
1467 | return err; | ||
1468 | } | ||
1469 | |||
1470 | return 0; | ||
1471 | } | 1466 | } |
1472 | #else | 1467 | #else |
1473 | #define hdmi_codec_resume NULL | 1468 | #define hdmi_codec_prepare NULL |
1469 | #define hdmi_codec_complete NULL | ||
1474 | #endif | 1470 | #endif |
1475 | 1471 | ||
1476 | static struct snd_soc_codec_driver hdmi_hda_codec = { | 1472 | static struct snd_soc_codec_driver hdmi_hda_codec = { |
1477 | .probe = hdmi_codec_probe, | 1473 | .probe = hdmi_codec_probe, |
1478 | .remove = hdmi_codec_remove, | 1474 | .remove = hdmi_codec_remove, |
1479 | .resume = hdmi_codec_resume, | ||
1480 | .idle_bias_off = true, | 1475 | .idle_bias_off = true, |
1481 | }; | 1476 | }; |
1482 | 1477 | ||
@@ -1561,7 +1556,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev) | |||
1561 | struct hdac_ext_device *edev = to_hda_ext_device(dev); | 1556 | struct hdac_ext_device *edev = to_hda_ext_device(dev); |
1562 | struct hdac_device *hdac = &edev->hdac; | 1557 | struct hdac_device *hdac = &edev->hdac; |
1563 | struct hdac_bus *bus = hdac->bus; | 1558 | struct hdac_bus *bus = hdac->bus; |
1564 | unsigned long timeout; | ||
1565 | int err; | 1559 | int err; |
1566 | 1560 | ||
1567 | dev_dbg(dev, "Enter: %s\n", __func__); | 1561 | dev_dbg(dev, "Enter: %s\n", __func__); |
@@ -1570,20 +1564,15 @@ static int hdac_hdmi_runtime_suspend(struct device *dev) | |||
1570 | if (!bus) | 1564 | if (!bus) |
1571 | return 0; | 1565 | return 0; |
1572 | 1566 | ||
1573 | /* Power down afg */ | 1567 | /* |
1574 | if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)) { | 1568 | * Power down afg. |
1575 | snd_hdac_codec_write(hdac, hdac->afg, 0, | 1569 | * codec_read is preferred over codec_write to set the power state. |
1576 | AC_VERB_SET_POWER_STATE, AC_PWRST_D3); | 1570 | * This way verb is send to set the power state and response |
1577 | 1571 | * is received. So setting power state is ensured without using loop | |
1578 | /* Wait till power state is set to D3 */ | 1572 | * to read the state. |
1579 | timeout = jiffies + msecs_to_jiffies(1000); | 1573 | */ |
1580 | while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3) | 1574 | snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE, |
1581 | && time_before(jiffies, timeout)) { | 1575 | AC_PWRST_D3); |
1582 | |||
1583 | msleep(50); | ||
1584 | } | ||
1585 | } | ||
1586 | |||
1587 | err = snd_hdac_display_power(bus, false); | 1576 | err = snd_hdac_display_power(bus, false); |
1588 | if (err < 0) { | 1577 | if (err < 0) { |
1589 | dev_err(bus->dev, "Cannot turn on display power on i915\n"); | 1578 | dev_err(bus->dev, "Cannot turn on display power on i915\n"); |
@@ -1616,9 +1605,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev) | |||
1616 | hdac_hdmi_skl_enable_dp12(&edev->hdac); | 1605 | hdac_hdmi_skl_enable_dp12(&edev->hdac); |
1617 | 1606 | ||
1618 | /* Power up afg */ | 1607 | /* Power up afg */ |
1619 | if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) | 1608 | snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE, |
1620 | snd_hdac_codec_write(hdac, hdac->afg, 0, | 1609 | AC_PWRST_D0); |
1621 | AC_VERB_SET_POWER_STATE, AC_PWRST_D0); | ||
1622 | 1610 | ||
1623 | return 0; | 1611 | return 0; |
1624 | } | 1612 | } |
@@ -1629,6 +1617,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev) | |||
1629 | 1617 | ||
1630 | static const struct dev_pm_ops hdac_hdmi_pm = { | 1618 | static const struct dev_pm_ops hdac_hdmi_pm = { |
1631 | SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) | 1619 | SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL) |
1620 | .prepare = hdmi_codec_prepare, | ||
1621 | .complete = hdmi_codec_complete, | ||
1632 | }; | 1622 | }; |
1633 | 1623 | ||
1634 | static const struct hda_device_id hdmi_list[] = { | 1624 | static const struct hda_device_id hdmi_list[] = { |
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c index 1c8729984c2b..683769f0f246 100644 --- a/sound/soc/codecs/nau8825.c +++ b/sound/soc/codecs/nau8825.c | |||
@@ -343,9 +343,12 @@ static const struct snd_soc_dapm_widget nau8825_dapm_widgets[] = { | |||
343 | SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL, | 343 | SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL, |
344 | 0), | 344 | 0), |
345 | 345 | ||
346 | /* ADC for button press detection */ | 346 | /* ADC for button press detection. A dapm supply widget is used to |
347 | SND_SOC_DAPM_ADC("SAR", NULL, NAU8825_REG_SAR_CTRL, | 347 | * prevent dapm_power_widgets keeping the codec at SND_SOC_BIAS_ON |
348 | NAU8825_SAR_ADC_EN_SFT, 0), | 348 | * during suspend. |
349 | */ | ||
350 | SND_SOC_DAPM_SUPPLY("SAR", NAU8825_REG_SAR_CTRL, | ||
351 | NAU8825_SAR_ADC_EN_SFT, 0, NULL, 0), | ||
349 | 352 | ||
350 | SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0), | 353 | SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0), |
351 | SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0), | 354 | SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0), |
@@ -607,6 +610,16 @@ static bool nau8825_is_jack_inserted(struct regmap *regmap) | |||
607 | 610 | ||
608 | static void nau8825_restart_jack_detection(struct regmap *regmap) | 611 | static void nau8825_restart_jack_detection(struct regmap *regmap) |
609 | { | 612 | { |
613 | /* Chip needs one FSCLK cycle in order to generate interrupts, | ||
614 | * as we cannot guarantee one will be provided by the system. Turning | ||
615 | * master mode on then off enables us to generate that FSCLK cycle | ||
616 | * with a minimum of contention on the clock bus. | ||
617 | */ | ||
618 | regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2, | ||
619 | NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER); | ||
620 | regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2, | ||
621 | NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE); | ||
622 | |||
610 | /* this will restart the entire jack detection process including MIC/GND | 623 | /* this will restart the entire jack detection process including MIC/GND |
611 | * switching and create interrupts. We have to go from 0 to 1 and back | 624 | * switching and create interrupts. We have to go from 0 to 1 and back |
612 | * to 0 to restart. | 625 | * to 0 to restart. |
@@ -728,7 +741,10 @@ static irqreturn_t nau8825_interrupt(int irq, void *data) | |||
728 | struct regmap *regmap = nau8825->regmap; | 741 | struct regmap *regmap = nau8825->regmap; |
729 | int active_irq, clear_irq = 0, event = 0, event_mask = 0; | 742 | int active_irq, clear_irq = 0, event = 0, event_mask = 0; |
730 | 743 | ||
731 | regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq); | 744 | if (regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq)) { |
745 | dev_err(nau8825->dev, "failed to read irq status\n"); | ||
746 | return IRQ_NONE; | ||
747 | } | ||
732 | 748 | ||
733 | if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) == | 749 | if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) == |
734 | NAU8825_JACK_EJECTION_DETECTED) { | 750 | NAU8825_JACK_EJECTION_DETECTED) { |
@@ -1141,33 +1157,74 @@ static int nau8825_set_bias_level(struct snd_soc_codec *codec, | |||
1141 | return ret; | 1157 | return ret; |
1142 | } | 1158 | } |
1143 | } | 1159 | } |
1144 | |||
1145 | ret = regcache_sync(nau8825->regmap); | ||
1146 | if (ret) { | ||
1147 | dev_err(codec->dev, | ||
1148 | "Failed to sync cache: %d\n", ret); | ||
1149 | return ret; | ||
1150 | } | ||
1151 | } | 1160 | } |
1152 | |||
1153 | break; | 1161 | break; |
1154 | 1162 | ||
1155 | case SND_SOC_BIAS_OFF: | 1163 | case SND_SOC_BIAS_OFF: |
1156 | if (nau8825->mclk_freq) | 1164 | if (nau8825->mclk_freq) |
1157 | clk_disable_unprepare(nau8825->mclk); | 1165 | clk_disable_unprepare(nau8825->mclk); |
1158 | |||
1159 | regcache_mark_dirty(nau8825->regmap); | ||
1160 | break; | 1166 | break; |
1161 | } | 1167 | } |
1162 | return 0; | 1168 | return 0; |
1163 | } | 1169 | } |
1164 | 1170 | ||
1171 | #ifdef CONFIG_PM | ||
1172 | static int nau8825_suspend(struct snd_soc_codec *codec) | ||
1173 | { | ||
1174 | struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec); | ||
1175 | |||
1176 | disable_irq(nau8825->irq); | ||
1177 | regcache_cache_only(nau8825->regmap, true); | ||
1178 | regcache_mark_dirty(nau8825->regmap); | ||
1179 | |||
1180 | return 0; | ||
1181 | } | ||
1182 | |||
1183 | static int nau8825_resume(struct snd_soc_codec *codec) | ||
1184 | { | ||
1185 | struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec); | ||
1186 | |||
1187 | /* The chip may lose power and reset in S3. regcache_sync restores | ||
1188 | * register values including configurations for sysclk, irq, and | ||
1189 | * jack/button detection. | ||
1190 | */ | ||
1191 | regcache_cache_only(nau8825->regmap, false); | ||
1192 | regcache_sync(nau8825->regmap); | ||
1193 | |||
1194 | /* Check the jack plug status directly. If the headset is unplugged | ||
1195 | * during S3 when the chip has no power, there will be no jack | ||
1196 | * detection irq even after the nau8825_restart_jack_detection below, | ||
1197 | * because the chip just thinks no headset has ever been plugged in. | ||
1198 | */ | ||
1199 | if (!nau8825_is_jack_inserted(nau8825->regmap)) { | ||
1200 | nau8825_eject_jack(nau8825); | ||
1201 | snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET); | ||
1202 | } | ||
1203 | |||
1204 | enable_irq(nau8825->irq); | ||
1205 | |||
1206 | /* Run jack detection to check the type (OMTP or CTIA) of the headset | ||
1207 | * if there is one. This handles the case where a different type of | ||
1208 | * headset is plugged in during S3. This triggers an IRQ iff a headset | ||
1209 | * is already plugged in. | ||
1210 | */ | ||
1211 | nau8825_restart_jack_detection(nau8825->regmap); | ||
1212 | |||
1213 | return 0; | ||
1214 | } | ||
1215 | #else | ||
1216 | #define nau8825_suspend NULL | ||
1217 | #define nau8825_resume NULL | ||
1218 | #endif | ||
1219 | |||
1165 | static struct snd_soc_codec_driver nau8825_codec_driver = { | 1220 | static struct snd_soc_codec_driver nau8825_codec_driver = { |
1166 | .probe = nau8825_codec_probe, | 1221 | .probe = nau8825_codec_probe, |
1167 | .set_sysclk = nau8825_set_sysclk, | 1222 | .set_sysclk = nau8825_set_sysclk, |
1168 | .set_pll = nau8825_set_pll, | 1223 | .set_pll = nau8825_set_pll, |
1169 | .set_bias_level = nau8825_set_bias_level, | 1224 | .set_bias_level = nau8825_set_bias_level, |
1170 | .suspend_bias_off = true, | 1225 | .suspend_bias_off = true, |
1226 | .suspend = nau8825_suspend, | ||
1227 | .resume = nau8825_resume, | ||
1171 | 1228 | ||
1172 | .controls = nau8825_controls, | 1229 | .controls = nau8825_controls, |
1173 | .num_controls = ARRAY_SIZE(nau8825_controls), | 1230 | .num_controls = ARRAY_SIZE(nau8825_controls), |
@@ -1277,16 +1334,6 @@ static int nau8825_setup_irq(struct nau8825 *nau8825) | |||
1277 | regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL, | 1334 | regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL, |
1278 | NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR); | 1335 | NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR); |
1279 | 1336 | ||
1280 | /* Chip needs one FSCLK cycle in order to generate interrupts, | ||
1281 | * as we cannot guarantee one will be provided by the system. Turning | ||
1282 | * master mode on then off enables us to generate that FSCLK cycle | ||
1283 | * with a minimum of contention on the clock bus. | ||
1284 | */ | ||
1285 | regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2, | ||
1286 | NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER); | ||
1287 | regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2, | ||
1288 | NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE); | ||
1289 | |||
1290 | ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL, | 1337 | ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL, |
1291 | nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, | 1338 | nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT, |
1292 | "nau8825", nau8825); | 1339 | "nau8825", nau8825); |
@@ -1354,36 +1401,6 @@ static int nau8825_i2c_remove(struct i2c_client *client) | |||
1354 | return 0; | 1401 | return 0; |
1355 | } | 1402 | } |
1356 | 1403 | ||
1357 | #ifdef CONFIG_PM_SLEEP | ||
1358 | static int nau8825_suspend(struct device *dev) | ||
1359 | { | ||
1360 | struct i2c_client *client = to_i2c_client(dev); | ||
1361 | struct nau8825 *nau8825 = dev_get_drvdata(dev); | ||
1362 | |||
1363 | disable_irq(client->irq); | ||
1364 | regcache_cache_only(nau8825->regmap, true); | ||
1365 | regcache_mark_dirty(nau8825->regmap); | ||
1366 | |||
1367 | return 0; | ||
1368 | } | ||
1369 | |||
1370 | static int nau8825_resume(struct device *dev) | ||
1371 | { | ||
1372 | struct i2c_client *client = to_i2c_client(dev); | ||
1373 | struct nau8825 *nau8825 = dev_get_drvdata(dev); | ||
1374 | |||
1375 | regcache_cache_only(nau8825->regmap, false); | ||
1376 | regcache_sync(nau8825->regmap); | ||
1377 | enable_irq(client->irq); | ||
1378 | |||
1379 | return 0; | ||
1380 | } | ||
1381 | #endif | ||
1382 | |||
1383 | static const struct dev_pm_ops nau8825_pm = { | ||
1384 | SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume) | ||
1385 | }; | ||
1386 | |||
1387 | static const struct i2c_device_id nau8825_i2c_ids[] = { | 1404 | static const struct i2c_device_id nau8825_i2c_ids[] = { |
1388 | { "nau8825", 0 }, | 1405 | { "nau8825", 0 }, |
1389 | { } | 1406 | { } |
@@ -1410,7 +1427,6 @@ static struct i2c_driver nau8825_driver = { | |||
1410 | .name = "nau8825", | 1427 | .name = "nau8825", |
1411 | .of_match_table = of_match_ptr(nau8825_of_ids), | 1428 | .of_match_table = of_match_ptr(nau8825_of_ids), |
1412 | .acpi_match_table = ACPI_PTR(nau8825_acpi_match), | 1429 | .acpi_match_table = ACPI_PTR(nau8825_acpi_match), |
1413 | .pm = &nau8825_pm, | ||
1414 | }, | 1430 | }, |
1415 | .probe = nau8825_i2c_probe, | 1431 | .probe = nau8825_i2c_probe, |
1416 | .remove = nau8825_i2c_remove, | 1432 | .remove = nau8825_i2c_remove, |
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c index e8b5ba04417a..09e8988bbb2d 100644 --- a/sound/soc/codecs/rt5640.c +++ b/sound/soc/codecs/rt5640.c | |||
@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv, | |||
359 | 359 | ||
360 | /* Interface data select */ | 360 | /* Interface data select */ |
361 | static const char * const rt5640_data_select[] = { | 361 | static const char * const rt5640_data_select[] = { |
362 | "Normal", "left copy to right", "right copy to left", "Swap"}; | 362 | "Normal", "Swap", "left copy to right", "right copy to left"}; |
363 | 363 | ||
364 | static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA, | 364 | static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA, |
365 | RT5640_IF1_DAC_SEL_SFT, rt5640_data_select); | 365 | RT5640_IF1_DAC_SEL_SFT, rt5640_data_select); |
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h index 1761c3a98b76..58b664b06c16 100644 --- a/sound/soc/codecs/rt5640.h +++ b/sound/soc/codecs/rt5640.h | |||
@@ -443,39 +443,39 @@ | |||
443 | #define RT5640_IF1_DAC_SEL_MASK (0x3 << 14) | 443 | #define RT5640_IF1_DAC_SEL_MASK (0x3 << 14) |
444 | #define RT5640_IF1_DAC_SEL_SFT 14 | 444 | #define RT5640_IF1_DAC_SEL_SFT 14 |
445 | #define RT5640_IF1_DAC_SEL_NOR (0x0 << 14) | 445 | #define RT5640_IF1_DAC_SEL_NOR (0x0 << 14) |
446 | #define RT5640_IF1_DAC_SEL_L2R (0x1 << 14) | 446 | #define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14) |
447 | #define RT5640_IF1_DAC_SEL_R2L (0x2 << 14) | 447 | #define RT5640_IF1_DAC_SEL_L2R (0x2 << 14) |
448 | #define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14) | 448 | #define RT5640_IF1_DAC_SEL_R2L (0x3 << 14) |
449 | #define RT5640_IF1_ADC_SEL_MASK (0x3 << 12) | 449 | #define RT5640_IF1_ADC_SEL_MASK (0x3 << 12) |
450 | #define RT5640_IF1_ADC_SEL_SFT 12 | 450 | #define RT5640_IF1_ADC_SEL_SFT 12 |
451 | #define RT5640_IF1_ADC_SEL_NOR (0x0 << 12) | 451 | #define RT5640_IF1_ADC_SEL_NOR (0x0 << 12) |
452 | #define RT5640_IF1_ADC_SEL_L2R (0x1 << 12) | 452 | #define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12) |
453 | #define RT5640_IF1_ADC_SEL_R2L (0x2 << 12) | 453 | #define RT5640_IF1_ADC_SEL_L2R (0x2 << 12) |
454 | #define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12) | 454 | #define RT5640_IF1_ADC_SEL_R2L (0x3 << 12) |
455 | #define RT5640_IF2_DAC_SEL_MASK (0x3 << 10) | 455 | #define RT5640_IF2_DAC_SEL_MASK (0x3 << 10) |
456 | #define RT5640_IF2_DAC_SEL_SFT 10 | 456 | #define RT5640_IF2_DAC_SEL_SFT 10 |
457 | #define RT5640_IF2_DAC_SEL_NOR (0x0 << 10) | 457 | #define RT5640_IF2_DAC_SEL_NOR (0x0 << 10) |
458 | #define RT5640_IF2_DAC_SEL_L2R (0x1 << 10) | 458 | #define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10) |
459 | #define RT5640_IF2_DAC_SEL_R2L (0x2 << 10) | 459 | #define RT5640_IF2_DAC_SEL_L2R (0x2 << 10) |
460 | #define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10) | 460 | #define RT5640_IF2_DAC_SEL_R2L (0x3 << 10) |
461 | #define RT5640_IF2_ADC_SEL_MASK (0x3 << 8) | 461 | #define RT5640_IF2_ADC_SEL_MASK (0x3 << 8) |
462 | #define RT5640_IF2_ADC_SEL_SFT 8 | 462 | #define RT5640_IF2_ADC_SEL_SFT 8 |
463 | #define RT5640_IF2_ADC_SEL_NOR (0x0 << 8) | 463 | #define RT5640_IF2_ADC_SEL_NOR (0x0 << 8) |
464 | #define RT5640_IF2_ADC_SEL_L2R (0x1 << 8) | 464 | #define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8) |
465 | #define RT5640_IF2_ADC_SEL_R2L (0x2 << 8) | 465 | #define RT5640_IF2_ADC_SEL_L2R (0x2 << 8) |
466 | #define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8) | 466 | #define RT5640_IF2_ADC_SEL_R2L (0x3 << 8) |
467 | #define RT5640_IF3_DAC_SEL_MASK (0x3 << 6) | 467 | #define RT5640_IF3_DAC_SEL_MASK (0x3 << 6) |
468 | #define RT5640_IF3_DAC_SEL_SFT 6 | 468 | #define RT5640_IF3_DAC_SEL_SFT 6 |
469 | #define RT5640_IF3_DAC_SEL_NOR (0x0 << 6) | 469 | #define RT5640_IF3_DAC_SEL_NOR (0x0 << 6) |
470 | #define RT5640_IF3_DAC_SEL_L2R (0x1 << 6) | 470 | #define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6) |
471 | #define RT5640_IF3_DAC_SEL_R2L (0x2 << 6) | 471 | #define RT5640_IF3_DAC_SEL_L2R (0x2 << 6) |
472 | #define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6) | 472 | #define RT5640_IF3_DAC_SEL_R2L (0x3 << 6) |
473 | #define RT5640_IF3_ADC_SEL_MASK (0x3 << 4) | 473 | #define RT5640_IF3_ADC_SEL_MASK (0x3 << 4) |
474 | #define RT5640_IF3_ADC_SEL_SFT 4 | 474 | #define RT5640_IF3_ADC_SEL_SFT 4 |
475 | #define RT5640_IF3_ADC_SEL_NOR (0x0 << 4) | 475 | #define RT5640_IF3_ADC_SEL_NOR (0x0 << 4) |
476 | #define RT5640_IF3_ADC_SEL_L2R (0x1 << 4) | 476 | #define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4) |
477 | #define RT5640_IF3_ADC_SEL_R2L (0x2 << 4) | 477 | #define RT5640_IF3_ADC_SEL_L2R (0x2 << 4) |
478 | #define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4) | 478 | #define RT5640_IF3_ADC_SEL_R2L (0x3 << 4) |
479 | 479 | ||
480 | /* REC Left Mixer Control 1 (0x3b) */ | 480 | /* REC Left Mixer Control 1 (0x3b) */ |
481 | #define RT5640_G_HP_L_RM_L_MASK (0x7 << 13) | 481 | #define RT5640_G_HP_L_RM_L_MASK (0x7 << 13) |
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index a8b3e3f701f9..1bae17ee8817 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c | |||
@@ -1955,11 +1955,16 @@ err_adsp2_codec_probe: | |||
1955 | static int wm5102_codec_remove(struct snd_soc_codec *codec) | 1955 | static int wm5102_codec_remove(struct snd_soc_codec *codec) |
1956 | { | 1956 | { |
1957 | struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec); | 1957 | struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec); |
1958 | struct arizona *arizona = priv->core.arizona; | ||
1958 | 1959 | ||
1959 | wm_adsp2_codec_remove(&priv->core.adsp[0], codec); | 1960 | wm_adsp2_codec_remove(&priv->core.adsp[0], codec); |
1960 | 1961 | ||
1961 | priv->core.arizona->dapm = NULL; | 1962 | priv->core.arizona->dapm = NULL; |
1962 | 1963 | ||
1964 | arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); | ||
1965 | |||
1966 | arizona_free_spk(codec); | ||
1967 | |||
1963 | return 0; | 1968 | return 0; |
1964 | } | 1969 | } |
1965 | 1970 | ||
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index 83ba70fe16e6..2728ac545ffe 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c | |||
@@ -2298,6 +2298,8 @@ static int wm5110_codec_remove(struct snd_soc_codec *codec) | |||
2298 | 2298 | ||
2299 | arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); | 2299 | arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv); |
2300 | 2300 | ||
2301 | arizona_free_spk(codec); | ||
2302 | |||
2301 | return 0; | 2303 | return 0; |
2302 | } | 2304 | } |
2303 | 2305 | ||
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 88223608a33f..720a14e0687d 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c | |||
@@ -2471,7 +2471,7 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec) | |||
2471 | break; | 2471 | break; |
2472 | default: | 2472 | default: |
2473 | dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n"); | 2473 | dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n"); |
2474 | dspclk = wm8962->sysclk; | 2474 | dspclk = wm8962->sysclk_rate; |
2475 | } | 2475 | } |
2476 | 2476 | ||
2477 | dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk); | 2477 | dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk); |
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c index 52d766efe14f..6b0785b5a5c5 100644 --- a/sound/soc/codecs/wm8997.c +++ b/sound/soc/codecs/wm8997.c | |||
@@ -1072,6 +1072,8 @@ static int wm8997_codec_remove(struct snd_soc_codec *codec) | |||
1072 | 1072 | ||
1073 | priv->core.arizona->dapm = NULL; | 1073 | priv->core.arizona->dapm = NULL; |
1074 | 1074 | ||
1075 | arizona_free_spk(codec); | ||
1076 | |||
1075 | return 0; | 1077 | return 0; |
1076 | } | 1078 | } |
1077 | 1079 | ||
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c index 012396074a8a..449f66636205 100644 --- a/sound/soc/codecs/wm8998.c +++ b/sound/soc/codecs/wm8998.c | |||
@@ -1324,6 +1324,8 @@ static int wm8998_codec_remove(struct snd_soc_codec *codec) | |||
1324 | 1324 | ||
1325 | priv->core.arizona->dapm = NULL; | 1325 | priv->core.arizona->dapm = NULL; |
1326 | 1326 | ||
1327 | arizona_free_spk(codec); | ||
1328 | |||
1327 | return 0; | 1329 | return 0; |
1328 | } | 1330 | } |
1329 | 1331 | ||
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index b3e6c2300457..1120f4f4d011 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig | |||
@@ -163,7 +163,6 @@ config SND_SOC_INTEL_SKYLAKE | |||
163 | tristate | 163 | tristate |
164 | select SND_HDA_EXT_CORE | 164 | select SND_HDA_EXT_CORE |
165 | select SND_SOC_TOPOLOGY | 165 | select SND_SOC_TOPOLOGY |
166 | select SND_HDA_I915 | ||
167 | select SND_SOC_INTEL_SST | 166 | select SND_SOC_INTEL_SST |
168 | 167 | ||
169 | config SND_SOC_INTEL_SKL_RT286_MACH | 168 | config SND_SOC_INTEL_SKL_RT286_MACH |
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c index ac60f1301e21..91565229d074 100644 --- a/sound/soc/intel/haswell/sst-haswell-ipc.c +++ b/sound/soc/intel/haswell/sst-haswell-ipc.c | |||
@@ -1345,7 +1345,7 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream) | |||
1345 | return 0; | 1345 | return 0; |
1346 | 1346 | ||
1347 | /* wait for pause to complete before we reset the stream */ | 1347 | /* wait for pause to complete before we reset the stream */ |
1348 | while (stream->running && tries--) | 1348 | while (stream->running && --tries) |
1349 | msleep(1); | 1349 | msleep(1); |
1350 | if (!tries) { | 1350 | if (!tries) { |
1351 | dev_err(hsw->dev, "error: reset stream %d still running\n", | 1351 | dev_err(hsw->dev, "error: reset stream %d still running\n", |
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c index a5267e8a96e0..2962ef22fc84 100644 --- a/sound/soc/intel/skylake/skl-sst-dsp.c +++ b/sound/soc/intel/skylake/skl-sst-dsp.c | |||
@@ -336,6 +336,11 @@ void skl_dsp_free(struct sst_dsp *dsp) | |||
336 | skl_ipc_int_disable(dsp); | 336 | skl_ipc_int_disable(dsp); |
337 | 337 | ||
338 | free_irq(dsp->irq, dsp); | 338 | free_irq(dsp->irq, dsp); |
339 | dsp->cl_dev.ops.cl_cleanup_controller(dsp); | ||
340 | skl_cldma_int_disable(dsp); | ||
341 | skl_ipc_op_int_disable(dsp); | ||
342 | skl_ipc_int_disable(dsp); | ||
343 | |||
339 | skl_dsp_disable_core(dsp); | 344 | skl_dsp_disable_core(dsp); |
340 | } | 345 | } |
341 | EXPORT_SYMBOL_GPL(skl_dsp_free); | 346 | EXPORT_SYMBOL_GPL(skl_dsp_free); |
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c index 545b4e77b8aa..cdb78b7e5a14 100644 --- a/sound/soc/intel/skylake/skl-topology.c +++ b/sound/soc/intel/skylake/skl-topology.c | |||
@@ -239,6 +239,7 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx, | |||
239 | { | 239 | { |
240 | int multiplier = 1; | 240 | int multiplier = 1; |
241 | struct skl_module_fmt *in_fmt, *out_fmt; | 241 | struct skl_module_fmt *in_fmt, *out_fmt; |
242 | int in_rate, out_rate; | ||
242 | 243 | ||
243 | 244 | ||
244 | /* Since fixups is applied to pin 0 only, ibs, obs needs | 245 | /* Since fixups is applied to pin 0 only, ibs, obs needs |
@@ -249,15 +250,24 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx, | |||
249 | 250 | ||
250 | if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) | 251 | if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) |
251 | multiplier = 5; | 252 | multiplier = 5; |
252 | mcfg->ibs = (in_fmt->s_freq / 1000) * | 253 | |
253 | (mcfg->in_fmt->channels) * | 254 | if (in_fmt->s_freq % 1000) |
254 | (mcfg->in_fmt->bit_depth >> 3) * | 255 | in_rate = (in_fmt->s_freq / 1000) + 1; |
255 | multiplier; | 256 | else |
256 | 257 | in_rate = (in_fmt->s_freq / 1000); | |
257 | mcfg->obs = (mcfg->out_fmt->s_freq / 1000) * | 258 | |
258 | (mcfg->out_fmt->channels) * | 259 | mcfg->ibs = in_rate * (mcfg->in_fmt->channels) * |
259 | (mcfg->out_fmt->bit_depth >> 3) * | 260 | (mcfg->in_fmt->bit_depth >> 3) * |
260 | multiplier; | 261 | multiplier; |
262 | |||
263 | if (mcfg->out_fmt->s_freq % 1000) | ||
264 | out_rate = (mcfg->out_fmt->s_freq / 1000) + 1; | ||
265 | else | ||
266 | out_rate = (mcfg->out_fmt->s_freq / 1000); | ||
267 | |||
268 | mcfg->obs = out_rate * (mcfg->out_fmt->channels) * | ||
269 | (mcfg->out_fmt->bit_depth >> 3) * | ||
270 | multiplier; | ||
261 | } | 271 | } |
262 | 272 | ||
263 | static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, | 273 | static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, |
@@ -485,11 +495,15 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) | |||
485 | if (!skl_is_pipe_mcps_avail(skl, mconfig)) | 495 | if (!skl_is_pipe_mcps_avail(skl, mconfig)) |
486 | return -ENOMEM; | 496 | return -ENOMEM; |
487 | 497 | ||
498 | skl_tplg_alloc_pipe_mcps(skl, mconfig); | ||
499 | |||
488 | if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { | 500 | if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { |
489 | ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, | 501 | ret = ctx->dsp->fw_ops.load_mod(ctx->dsp, |
490 | mconfig->id.module_id, mconfig->guid); | 502 | mconfig->id.module_id, mconfig->guid); |
491 | if (ret < 0) | 503 | if (ret < 0) |
492 | return ret; | 504 | return ret; |
505 | |||
506 | mconfig->m_state = SKL_MODULE_LOADED; | ||
493 | } | 507 | } |
494 | 508 | ||
495 | /* update blob if blob is null for be with default value */ | 509 | /* update blob if blob is null for be with default value */ |
@@ -509,7 +523,6 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) | |||
509 | ret = skl_tplg_set_module_params(w, ctx); | 523 | ret = skl_tplg_set_module_params(w, ctx); |
510 | if (ret < 0) | 524 | if (ret < 0) |
511 | return ret; | 525 | return ret; |
512 | skl_tplg_alloc_pipe_mcps(skl, mconfig); | ||
513 | } | 526 | } |
514 | 527 | ||
515 | return 0; | 528 | return 0; |
@@ -524,7 +537,8 @@ static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx, | |||
524 | list_for_each_entry(w_module, &pipe->w_list, node) { | 537 | list_for_each_entry(w_module, &pipe->w_list, node) { |
525 | mconfig = w_module->w->priv; | 538 | mconfig = w_module->w->priv; |
526 | 539 | ||
527 | if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod) | 540 | if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod && |
541 | mconfig->m_state > SKL_MODULE_UNINIT) | ||
528 | return ctx->dsp->fw_ops.unload_mod(ctx->dsp, | 542 | return ctx->dsp->fw_ops.unload_mod(ctx->dsp, |
529 | mconfig->id.module_id); | 543 | mconfig->id.module_id); |
530 | } | 544 | } |
@@ -558,6 +572,9 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, | |||
558 | if (!skl_is_pipe_mem_avail(skl, mconfig)) | 572 | if (!skl_is_pipe_mem_avail(skl, mconfig)) |
559 | return -ENOMEM; | 573 | return -ENOMEM; |
560 | 574 | ||
575 | skl_tplg_alloc_pipe_mem(skl, mconfig); | ||
576 | skl_tplg_alloc_pipe_mcps(skl, mconfig); | ||
577 | |||
561 | /* | 578 | /* |
562 | * Create a list of modules for pipe. | 579 | * Create a list of modules for pipe. |
563 | * This list contains modules from source to sink | 580 | * This list contains modules from source to sink |
@@ -601,9 +618,6 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, | |||
601 | src_module = dst_module; | 618 | src_module = dst_module; |
602 | } | 619 | } |
603 | 620 | ||
604 | skl_tplg_alloc_pipe_mem(skl, mconfig); | ||
605 | skl_tplg_alloc_pipe_mcps(skl, mconfig); | ||
606 | |||
607 | return 0; | 621 | return 0; |
608 | } | 622 | } |
609 | 623 | ||
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h index de3c401284d9..d2d923002d5c 100644 --- a/sound/soc/intel/skylake/skl-topology.h +++ b/sound/soc/intel/skylake/skl-topology.h | |||
@@ -274,10 +274,10 @@ struct skl_pipe { | |||
274 | 274 | ||
275 | enum skl_module_state { | 275 | enum skl_module_state { |
276 | SKL_MODULE_UNINIT = 0, | 276 | SKL_MODULE_UNINIT = 0, |
277 | SKL_MODULE_INIT_DONE = 1, | 277 | SKL_MODULE_LOADED = 1, |
278 | SKL_MODULE_LOADED = 2, | 278 | SKL_MODULE_INIT_DONE = 2, |
279 | SKL_MODULE_UNLOADED = 3, | 279 | SKL_MODULE_BIND_DONE = 3, |
280 | SKL_MODULE_BIND_DONE = 4 | 280 | SKL_MODULE_UNLOADED = 4, |
281 | }; | 281 | }; |
282 | 282 | ||
283 | struct skl_module_cfg { | 283 | struct skl_module_cfg { |
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index ab5e25aaeee3..3982f5536f2d 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c | |||
@@ -222,6 +222,7 @@ static int skl_suspend(struct device *dev) | |||
222 | struct hdac_ext_bus *ebus = pci_get_drvdata(pci); | 222 | struct hdac_ext_bus *ebus = pci_get_drvdata(pci); |
223 | struct skl *skl = ebus_to_skl(ebus); | 223 | struct skl *skl = ebus_to_skl(ebus); |
224 | struct hdac_bus *bus = ebus_to_hbus(ebus); | 224 | struct hdac_bus *bus = ebus_to_hbus(ebus); |
225 | int ret = 0; | ||
225 | 226 | ||
226 | /* | 227 | /* |
227 | * Do not suspend if streams which are marked ignore suspend are | 228 | * Do not suspend if streams which are marked ignore suspend are |
@@ -232,10 +233,20 @@ static int skl_suspend(struct device *dev) | |||
232 | enable_irq_wake(bus->irq); | 233 | enable_irq_wake(bus->irq); |
233 | pci_save_state(pci); | 234 | pci_save_state(pci); |
234 | pci_disable_device(pci); | 235 | pci_disable_device(pci); |
235 | return 0; | ||
236 | } else { | 236 | } else { |
237 | return _skl_suspend(ebus); | 237 | ret = _skl_suspend(ebus); |
238 | if (ret < 0) | ||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { | ||
243 | ret = snd_hdac_display_power(bus, false); | ||
244 | if (ret < 0) | ||
245 | dev_err(bus->dev, | ||
246 | "Cannot turn OFF display power on i915\n"); | ||
238 | } | 247 | } |
248 | |||
249 | return ret; | ||
239 | } | 250 | } |
240 | 251 | ||
241 | static int skl_resume(struct device *dev) | 252 | static int skl_resume(struct device *dev) |
@@ -316,17 +327,20 @@ static int skl_free(struct hdac_ext_bus *ebus) | |||
316 | 327 | ||
317 | if (bus->irq >= 0) | 328 | if (bus->irq >= 0) |
318 | free_irq(bus->irq, (void *)bus); | 329 | free_irq(bus->irq, (void *)bus); |
319 | if (bus->remap_addr) | ||
320 | iounmap(bus->remap_addr); | ||
321 | |||
322 | snd_hdac_bus_free_stream_pages(bus); | 330 | snd_hdac_bus_free_stream_pages(bus); |
323 | snd_hdac_stream_free_all(ebus); | 331 | snd_hdac_stream_free_all(ebus); |
324 | snd_hdac_link_free_all(ebus); | 332 | snd_hdac_link_free_all(ebus); |
333 | |||
334 | if (bus->remap_addr) | ||
335 | iounmap(bus->remap_addr); | ||
336 | |||
325 | pci_release_regions(skl->pci); | 337 | pci_release_regions(skl->pci); |
326 | pci_disable_device(skl->pci); | 338 | pci_disable_device(skl->pci); |
327 | 339 | ||
328 | snd_hdac_ext_bus_exit(ebus); | 340 | snd_hdac_ext_bus_exit(ebus); |
329 | 341 | ||
342 | if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) | ||
343 | snd_hdac_i915_exit(&ebus->bus); | ||
330 | return 0; | 344 | return 0; |
331 | } | 345 | } |
332 | 346 | ||
@@ -719,12 +733,12 @@ static void skl_remove(struct pci_dev *pci) | |||
719 | if (skl->tplg) | 733 | if (skl->tplg) |
720 | release_firmware(skl->tplg); | 734 | release_firmware(skl->tplg); |
721 | 735 | ||
722 | if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) | ||
723 | snd_hdac_i915_exit(&ebus->bus); | ||
724 | |||
725 | if (pci_dev_run_wake(pci)) | 736 | if (pci_dev_run_wake(pci)) |
726 | pm_runtime_get_noresume(&pci->dev); | 737 | pm_runtime_get_noresume(&pci->dev); |
727 | pci_dev_put(pci); | 738 | |
739 | /* codec removal, invoke bus_device_remove */ | ||
740 | snd_hdac_ext_bus_device_remove(ebus); | ||
741 | |||
728 | skl_platform_unregister(&pci->dev); | 742 | skl_platform_unregister(&pci->dev); |
729 | skl_free_dsp(skl); | 743 | skl_free_dsp(skl); |
730 | skl_machine_device_unregister(skl); | 744 | skl_machine_device_unregister(skl); |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 801ae1a81dfd..c4464858bf01 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt, | |||
2188 | int count = 0; | 2188 | int count = 0; |
2189 | char *state = "not set"; | 2189 | char *state = "not set"; |
2190 | 2190 | ||
2191 | /* card won't be set for the dummy component, as a spot fix | ||
2192 | * we're checking for that case specifically here but in future | ||
2193 | * we will ensure that the dummy component looks like others. | ||
2194 | */ | ||
2195 | if (!cmpnt->card) | ||
2196 | return 0; | ||
2197 | |||
2191 | list_for_each_entry(w, &cmpnt->card->widgets, list) { | 2198 | list_for_each_entry(w, &cmpnt->card->widgets, list) { |
2192 | if (w->dapm != dapm) | 2199 | if (w->dapm != dapm) |
2193 | continue; | 2200 | continue; |