aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarcin Wojtas <mw@semihalf.com>2016-04-01 09:21:18 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-01 15:16:37 -0400
commitdb5dd0db2d8352bb7fd5e9d16e17b79d66c7e4e3 (patch)
tree2ee755c9338719b60c84ba0fb60be01d035fa643
parentce2a04c15f4b943015aab0c7476cb1460654e914 (diff)
net: mvneta: fix changing MTU when using per-cpu processing
After enabling per-cpu processing it appeared that under heavy load changing MTU can result in blocking all port's interrupts and transmitting data is not possible after the change. This commit fixes above issue by disabling percpu interrupts for the time, when TXQs and RXQs are reconfigured. Signed-off-by: Marcin Wojtas <mw@semihalf.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b1db000f1927..7fc490225da5 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3042,6 +3042,20 @@ static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
3042 return mtu; 3042 return mtu;
3043} 3043}
3044 3044
3045static void mvneta_percpu_enable(void *arg)
3046{
3047 struct mvneta_port *pp = arg;
3048
3049 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3050}
3051
3052static void mvneta_percpu_disable(void *arg)
3053{
3054 struct mvneta_port *pp = arg;
3055
3056 disable_percpu_irq(pp->dev->irq);
3057}
3058
3045/* Change the device mtu */ 3059/* Change the device mtu */
3046static int mvneta_change_mtu(struct net_device *dev, int mtu) 3060static int mvneta_change_mtu(struct net_device *dev, int mtu)
3047{ 3061{
@@ -3066,6 +3080,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
3066 * reallocation of the queues 3080 * reallocation of the queues
3067 */ 3081 */
3068 mvneta_stop_dev(pp); 3082 mvneta_stop_dev(pp);
3083 on_each_cpu(mvneta_percpu_disable, pp, true);
3069 3084
3070 mvneta_cleanup_txqs(pp); 3085 mvneta_cleanup_txqs(pp);
3071 mvneta_cleanup_rxqs(pp); 3086 mvneta_cleanup_rxqs(pp);
@@ -3089,6 +3104,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
3089 return ret; 3104 return ret;
3090 } 3105 }
3091 3106
3107 on_each_cpu(mvneta_percpu_enable, pp, true);
3092 mvneta_start_dev(pp); 3108 mvneta_start_dev(pp);
3093 mvneta_port_up(pp); 3109 mvneta_port_up(pp);
3094 3110
@@ -3242,20 +3258,6 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
3242 pp->phy_dev = NULL; 3258 pp->phy_dev = NULL;
3243} 3259}
3244 3260
3245static void mvneta_percpu_enable(void *arg)
3246{
3247 struct mvneta_port *pp = arg;
3248
3249 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3250}
3251
3252static void mvneta_percpu_disable(void *arg)
3253{
3254 struct mvneta_port *pp = arg;
3255
3256 disable_percpu_irq(pp->dev->irq);
3257}
3258
3259/* Electing a CPU must be done in an atomic way: it should be done 3261/* Electing a CPU must be done in an atomic way: it should be done
3260 * after or before the removal/insertion of a CPU and this function is 3262 * after or before the removal/insertion of a CPU and this function is
3261 * not reentrant. 3263 * not reentrant.