aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-10-27 11:10:24 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-27 11:10:24 -0400
commitc9d0dc4b119ee9cab57977924f8d9aeb2bd48eb9 (patch)
tree11fd975c8ad77663a2732d83e231fc62d4356720 /drivers/net
parentcc49c8ff68144d64de18fc5d0039356e0060c0e4 (diff)
parentca32fb034c19e00cfb5e0fd7217eb92f81302048 (diff)
Merge branch 'qualcomm-rmnet-Add-64-bit-stats-and-GRO'
Subash Abhinov Kasiviswanathan says: ==================== net: qualcomm: rmnet: Add 64 bit stats and GRO This series adds support for 64 bit per cpu stats and GRO Patches 1-2 are cleanups of return code and a redundant condition Patch 3 adds support for 64 bit per cpu stats Patch 4 adds support for GRO using GRO cells v1->v2: Since gro_cells_init() could potentially fail, move it from device setup to ndo_init() as mentioned by Eric. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/Kconfig1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h16
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c38
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c4
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c84
6 files changed, 114 insertions, 32 deletions
diff --git a/drivers/net/ethernet/qualcomm/rmnet/Kconfig b/drivers/net/ethernet/qualcomm/rmnet/Kconfig
index 6e2587af47a4..9bb06d284644 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/Kconfig
+++ b/drivers/net/ethernet/qualcomm/rmnet/Kconfig
@@ -5,6 +5,7 @@
5menuconfig RMNET 5menuconfig RMNET
6 tristate "RmNet MAP driver" 6 tristate "RmNet MAP driver"
7 default n 7 default n
8 select GRO_CELLS
8 ---help--- 9 ---help---
9 If you select this, you will enable the RMNET module which is used 10 If you select this, you will enable the RMNET module which is used
10 for handling data in the multiplexing and aggregation protocol (MAP) 11 for handling data in the multiplexing and aggregation protocol (MAP)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 60115e69e415..c19259eea99e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <net/gro_cells.h>
17 18
18#ifndef _RMNET_CONFIG_H_ 19#ifndef _RMNET_CONFIG_H_
19#define _RMNET_CONFIG_H_ 20#define _RMNET_CONFIG_H_
@@ -41,9 +42,24 @@ struct rmnet_port {
41 42
42extern struct rtnl_link_ops rmnet_link_ops; 43extern struct rtnl_link_ops rmnet_link_ops;
43 44
45struct rmnet_vnd_stats {
46 u64 rx_pkts;
47 u64 rx_bytes;
48 u64 tx_pkts;
49 u64 tx_bytes;
50 u32 tx_drops;
51};
52
53struct rmnet_pcpu_stats {
54 struct rmnet_vnd_stats stats;
55 struct u64_stats_sync syncp;
56};
57
44struct rmnet_priv { 58struct rmnet_priv {
45 u8 mux_id; 59 u8 mux_id;
46 struct net_device *real_dev; 60 struct net_device *real_dev;
61 struct rmnet_pcpu_stats __percpu *pcpu_stats;
62 struct gro_cells gro_cells;
47}; 63};
48 64
49struct rmnet_port *rmnet_get_port(struct net_device *real_dev); 65struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index df3d2d16ce55..29842ccc91a9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -43,22 +43,23 @@ static void rmnet_set_skb_proto(struct sk_buff *skb)
43 43
44/* Generic handler */ 44/* Generic handler */
45 45
46static rx_handler_result_t 46static void
47rmnet_deliver_skb(struct sk_buff *skb) 47rmnet_deliver_skb(struct sk_buff *skb)
48{ 48{
49 struct rmnet_priv *priv = netdev_priv(skb->dev);
50
49 skb_reset_transport_header(skb); 51 skb_reset_transport_header(skb);
50 skb_reset_network_header(skb); 52 skb_reset_network_header(skb);
51 rmnet_vnd_rx_fixup(skb, skb->dev); 53 rmnet_vnd_rx_fixup(skb, skb->dev);
52 54
53 skb->pkt_type = PACKET_HOST; 55 skb->pkt_type = PACKET_HOST;
54 skb_set_mac_header(skb, 0); 56 skb_set_mac_header(skb, 0);
55 netif_receive_skb(skb); 57 gro_cells_receive(&priv->gro_cells, skb);
56 return RX_HANDLER_CONSUMED;
57} 58}
58 59
59/* MAP handler */ 60/* MAP handler */
60 61
61static rx_handler_result_t 62static void
62__rmnet_map_ingress_handler(struct sk_buff *skb, 63__rmnet_map_ingress_handler(struct sk_buff *skb,
63 struct rmnet_port *port) 64 struct rmnet_port *port)
64{ 65{
@@ -84,38 +85,33 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
84 if (!ep) 85 if (!ep)
85 goto free_skb; 86 goto free_skb;
86 87
87 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING) 88 skb->dev = ep->egress_dev;
88 skb->dev = ep->egress_dev;
89 89
90 /* Subtract MAP header */ 90 /* Subtract MAP header */
91 skb_pull(skb, sizeof(struct rmnet_map_header)); 91 skb_pull(skb, sizeof(struct rmnet_map_header));
92 skb_trim(skb, len); 92 skb_trim(skb, len);
93 rmnet_set_skb_proto(skb); 93 rmnet_set_skb_proto(skb);
94 return rmnet_deliver_skb(skb); 94 rmnet_deliver_skb(skb);
95 return;
95 96
96free_skb: 97free_skb:
97 kfree_skb(skb); 98 kfree_skb(skb);
98 return RX_HANDLER_CONSUMED;
99} 99}
100 100
101static rx_handler_result_t 101static void
102rmnet_map_ingress_handler(struct sk_buff *skb, 102rmnet_map_ingress_handler(struct sk_buff *skb,
103 struct rmnet_port *port) 103 struct rmnet_port *port)
104{ 104{
105 struct sk_buff *skbn; 105 struct sk_buff *skbn;
106 int rc;
107 106
108 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { 107 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
109 while ((skbn = rmnet_map_deaggregate(skb)) != NULL) 108 while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
110 __rmnet_map_ingress_handler(skbn, port); 109 __rmnet_map_ingress_handler(skbn, port);
111 110
112 consume_skb(skb); 111 consume_skb(skb);
113 rc = RX_HANDLER_CONSUMED;
114 } else { 112 } else {
115 rc = __rmnet_map_ingress_handler(skb, port); 113 __rmnet_map_ingress_handler(skb, port);
116 } 114 }
117
118 return rc;
119} 115}
120 116
121static int rmnet_map_egress_handler(struct sk_buff *skb, 117static int rmnet_map_egress_handler(struct sk_buff *skb,
@@ -149,15 +145,13 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
149 return RMNET_MAP_SUCCESS; 145 return RMNET_MAP_SUCCESS;
150} 146}
151 147
152static rx_handler_result_t 148static void
153rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) 149rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
154{ 150{
155 if (bridge_dev) { 151 if (bridge_dev) {
156 skb->dev = bridge_dev; 152 skb->dev = bridge_dev;
157 dev_queue_xmit(skb); 153 dev_queue_xmit(skb);
158 } 154 }
159
160 return RX_HANDLER_CONSUMED;
161} 155}
162 156
163/* Ingress / Egress Entry Points */ 157/* Ingress / Egress Entry Points */
@@ -168,13 +162,12 @@ rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
168 */ 162 */
169rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) 163rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
170{ 164{
171 int rc = RX_HANDLER_CONSUMED;
172 struct sk_buff *skb = *pskb; 165 struct sk_buff *skb = *pskb;
173 struct rmnet_port *port; 166 struct rmnet_port *port;
174 struct net_device *dev; 167 struct net_device *dev;
175 168
176 if (!skb) 169 if (!skb)
177 return RX_HANDLER_CONSUMED; 170 goto done;
178 171
179 dev = skb->dev; 172 dev = skb->dev;
180 port = rmnet_get_port(dev); 173 port = rmnet_get_port(dev);
@@ -182,14 +175,15 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
182 switch (port->rmnet_mode) { 175 switch (port->rmnet_mode) {
183 case RMNET_EPMODE_VND: 176 case RMNET_EPMODE_VND:
184 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) 177 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
185 rc = rmnet_map_ingress_handler(skb, port); 178 rmnet_map_ingress_handler(skb, port);
186 break; 179 break;
187 case RMNET_EPMODE_BRIDGE: 180 case RMNET_EPMODE_BRIDGE:
188 rc = rmnet_bridge_handler(skb, port->bridge_ep); 181 rmnet_bridge_handler(skb, port->bridge_ep);
189 break; 182 break;
190 } 183 }
191 184
192 return rc; 185done:
186 return RX_HANDLER_CONSUMED;
193} 187}
194 188
195/* Modifies packet as per logical endpoint configuration and egress data format 189/* Modifies packet as per logical endpoint configuration and egress data format
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index ce2302c25b12..3af3fe7b5457 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -80,7 +80,6 @@ u8 rmnet_map_demultiplex(struct sk_buff *skb);
80struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb); 80struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
81struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, 81struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
82 int hdrlen, int pad); 82 int hdrlen, int pad);
83rx_handler_result_t rmnet_map_command(struct sk_buff *skb, 83void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
84 struct rmnet_port *port);
85 84
86#endif /* _RMNET_MAP_H_ */ 85#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 74d362f71cce..51e604923ac1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -76,8 +76,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
76/* Process MAP command frame and send N/ACK message as appropriate. Message cmd 76/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
77 * name is decoded here and appropriate handler is called. 77 * name is decoded here and appropriate handler is called.
78 */ 78 */
79rx_handler_result_t rmnet_map_command(struct sk_buff *skb, 79void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
80 struct rmnet_port *port)
81{ 80{
82 struct rmnet_map_control_command *cmd; 81 struct rmnet_map_control_command *cmd;
83 unsigned char command_name; 82 unsigned char command_name;
@@ -102,5 +101,4 @@ rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
102 } 101 }
103 if (rc == RMNET_MAP_COMMAND_ACK) 102 if (rc == RMNET_MAP_COMMAND_ACK)
104 rmnet_map_send_ack(skb, rc); 103 rmnet_map_send_ack(skb, rc);
105 return RX_HANDLER_CONSUMED;
106} 104}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 12bd0bbd5235..9caa5e387450 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -27,14 +27,28 @@
27 27
28void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev) 28void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
29{ 29{
30 dev->stats.rx_packets++; 30 struct rmnet_priv *priv = netdev_priv(dev);
31 dev->stats.rx_bytes += skb->len; 31 struct rmnet_pcpu_stats *pcpu_ptr;
32
33 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
34
35 u64_stats_update_begin(&pcpu_ptr->syncp);
36 pcpu_ptr->stats.rx_pkts++;
37 pcpu_ptr->stats.rx_bytes += skb->len;
38 u64_stats_update_end(&pcpu_ptr->syncp);
32} 39}
33 40
34void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev) 41void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
35{ 42{
36 dev->stats.tx_packets++; 43 struct rmnet_priv *priv = netdev_priv(dev);
37 dev->stats.tx_bytes += skb->len; 44 struct rmnet_pcpu_stats *pcpu_ptr;
45
46 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
47
48 u64_stats_update_begin(&pcpu_ptr->syncp);
49 pcpu_ptr->stats.tx_pkts++;
50 pcpu_ptr->stats.tx_bytes += skb->len;
51 u64_stats_update_end(&pcpu_ptr->syncp);
38} 52}
39 53
40/* Network Device Operations */ 54/* Network Device Operations */
@@ -48,7 +62,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
48 if (priv->real_dev) { 62 if (priv->real_dev) {
49 rmnet_egress_handler(skb); 63 rmnet_egress_handler(skb);
50 } else { 64 } else {
51 dev->stats.tx_dropped++; 65 this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
52 kfree_skb(skb); 66 kfree_skb(skb);
53 } 67 }
54 return NETDEV_TX_OK; 68 return NETDEV_TX_OK;
@@ -70,12 +84,72 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev)
70 return priv->real_dev->ifindex; 84 return priv->real_dev->ifindex;
71} 85}
72 86
87static int rmnet_vnd_init(struct net_device *dev)
88{
89 struct rmnet_priv *priv = netdev_priv(dev);
90 int err;
91
92 priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
93 if (!priv->pcpu_stats)
94 return -ENOMEM;
95
96 err = gro_cells_init(&priv->gro_cells, dev);
97 if (err) {
98 free_percpu(priv->pcpu_stats);
99 return err;
100 }
101
102 return 0;
103}
104
105static void rmnet_vnd_uninit(struct net_device *dev)
106{
107 struct rmnet_priv *priv = netdev_priv(dev);
108
109 gro_cells_destroy(&priv->gro_cells);
110 free_percpu(priv->pcpu_stats);
111}
112
113static void rmnet_get_stats64(struct net_device *dev,
114 struct rtnl_link_stats64 *s)
115{
116 struct rmnet_priv *priv = netdev_priv(dev);
117 struct rmnet_vnd_stats total_stats;
118 struct rmnet_pcpu_stats *pcpu_ptr;
119 unsigned int cpu, start;
120
121 memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
122
123 for_each_possible_cpu(cpu) {
124 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
125
126 do {
127 start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
128 total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
129 total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
130 total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
131 total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
132 } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
133
134 total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
135 }
136
137 s->rx_packets = total_stats.rx_pkts;
138 s->rx_bytes = total_stats.rx_bytes;
139 s->tx_packets = total_stats.tx_pkts;
140 s->tx_bytes = total_stats.tx_bytes;
141 s->tx_dropped = total_stats.tx_drops;
142}
143
73static const struct net_device_ops rmnet_vnd_ops = { 144static const struct net_device_ops rmnet_vnd_ops = {
74 .ndo_start_xmit = rmnet_vnd_start_xmit, 145 .ndo_start_xmit = rmnet_vnd_start_xmit,
75 .ndo_change_mtu = rmnet_vnd_change_mtu, 146 .ndo_change_mtu = rmnet_vnd_change_mtu,
76 .ndo_get_iflink = rmnet_vnd_get_iflink, 147 .ndo_get_iflink = rmnet_vnd_get_iflink,
77 .ndo_add_slave = rmnet_add_bridge, 148 .ndo_add_slave = rmnet_add_bridge,
78 .ndo_del_slave = rmnet_del_bridge, 149 .ndo_del_slave = rmnet_del_bridge,
150 .ndo_init = rmnet_vnd_init,
151 .ndo_uninit = rmnet_vnd_uninit,
152 .ndo_get_stats64 = rmnet_get_stats64,
79}; 153};
80 154
81/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU, 155/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,