aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sun
diff options
context:
space:
mode:
authorDavid L Stevens <david.stevens@oracle.com>2014-12-02 15:31:38 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-08 21:19:04 -0500
commit368e36ed4c76a408608091045914f9a3d11d7209 (patch)
treefa68a5241fb0198dcbe9011b863ec22429cab627 /drivers/net/ethernet/sun
parent9a72dd4d6b0302c8853d3fece4ed7452181df6ee (diff)
sunvnet: add TSO support
This patch adds TSO support for the sunvnet driver. Signed-off-by: David L Stevens <david.stevens@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/sun')
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c95
-rw-r--r--drivers/net/ethernet/sun/sunvnet.h9
2 files changed, 95 insertions, 9 deletions
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index d19b3582cdf0..aac7d933319b 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -120,8 +120,15 @@ static int vnet_send_attr(struct vio_driver_state *vio)
120 pkt.mtu = framelen + VLAN_HLEN; 120 pkt.mtu = framelen + VLAN_HLEN;
121 } 121 }
122 122
123 pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
124 pkt.cflags = 0; 123 pkt.cflags = 0;
124 if (vio_version_after_eq(vio, 1, 7) && port->tso) {
125 pkt.cflags |= VNET_LSO_IPV4_CAPAB;
126 if (!port->tsolen)
127 port->tsolen = VNET_MAXTSO;
128 pkt.ipv4_lso_maxlen = port->tsolen;
129 }
130
131 pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
125 132
126 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " 133 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
127 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " 134 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
@@ -175,6 +182,26 @@ static int handle_attr_info(struct vio_driver_state *vio,
175 } 182 }
176 port->rmtu = localmtu; 183 port->rmtu = localmtu;
177 184
185 /* LSO negotiation */
186 if (vio_version_after_eq(vio, 1, 7))
187 port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
188 else
189 port->tso = false;
190 if (port->tso) {
191 if (!port->tsolen)
192 port->tsolen = VNET_MAXTSO;
193 port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
194 if (port->tsolen < VNET_MINTSO) {
195 port->tso = false;
196 port->tsolen = 0;
197 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
198 }
199 pkt->ipv4_lso_maxlen = port->tsolen;
200 } else {
201 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
202 pkt->ipv4_lso_maxlen = 0;
203 }
204
178 /* for version >= 1.6, ACK packet mode we support */ 205 /* for version >= 1.6, ACK packet mode we support */
179 if (vio_version_after_eq(vio, 1, 6)) { 206 if (vio_version_after_eq(vio, 1, 6)) {
180 pkt->xfer_mode = VIO_NEW_DRING_MODE; 207 pkt->xfer_mode = VIO_NEW_DRING_MODE;
@@ -721,6 +748,8 @@ ldc_ctrl:
721 748
722 if (event == LDC_EVENT_RESET) { 749 if (event == LDC_EVENT_RESET) {
723 port->rmtu = 0; 750 port->rmtu = 0;
751 port->tso = true;
752 port->tsolen = 0;
724 vio_port_up(vio); 753 vio_port_up(vio);
725 } 754 }
726 port->rx_event = 0; 755 port->rx_event = 0;
@@ -1131,10 +1160,36 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
1131 struct net_device *dev = port->vp->dev; 1160 struct net_device *dev = port->vp->dev;
1132 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 1161 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1133 struct sk_buff *segs; 1162 struct sk_buff *segs;
1134 int maclen; 1163 int maclen, datalen;
1135 int status; 1164 int status;
1165 int gso_size, gso_type, gso_segs;
1166 int hlen = skb_transport_header(skb) - skb_mac_header(skb);
1167 int proto = IPPROTO_IP;
1168
1169 if (skb->protocol == htons(ETH_P_IP))
1170 proto = ip_hdr(skb)->protocol;
1171 else if (skb->protocol == htons(ETH_P_IPV6))
1172 proto = ipv6_hdr(skb)->nexthdr;
1173
1174 if (proto == IPPROTO_TCP)
1175 hlen += tcp_hdr(skb)->doff * 4;
1176 else if (proto == IPPROTO_UDP)
1177 hlen += sizeof(struct udphdr);
1178 else {
1179 pr_err("vnet_handle_offloads GSO with unknown transport "
1180 "protocol %d tproto %d\n", skb->protocol, proto);
1181 hlen = 128; /* XXX */
1182 }
1183 datalen = port->tsolen - hlen;
1184
1185 gso_size = skb_shinfo(skb)->gso_size;
1186 gso_type = skb_shinfo(skb)->gso_type;
1187 gso_segs = skb_shinfo(skb)->gso_segs;
1188
1189 if (port->tso && gso_size < datalen)
1190 gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
1136 1191
1137 if (unlikely(vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)) { 1192 if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1138 struct netdev_queue *txq; 1193 struct netdev_queue *txq;
1139 1194
1140 txq = netdev_get_tx_queue(dev, port->q_index); 1195 txq = netdev_get_tx_queue(dev, port->q_index);
@@ -1147,7 +1202,19 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
1147 maclen = skb_network_header(skb) - skb_mac_header(skb); 1202 maclen = skb_network_header(skb) - skb_mac_header(skb);
1148 skb_pull(skb, maclen); 1203 skb_pull(skb, maclen);
1149 1204
1150 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 1205 if (port->tso && gso_size < datalen) {
1206 /* segment to TSO size */
1207 skb_shinfo(skb)->gso_size = datalen;
1208 skb_shinfo(skb)->gso_segs = gso_segs;
1209
1210 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1211
1212 /* restore gso_size & gso_segs */
1213 skb_shinfo(skb)->gso_size = gso_size;
1214 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hlen,
1215 gso_size);
1216 } else
1217 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1151 if (IS_ERR(segs)) { 1218 if (IS_ERR(segs)) {
1152 dev->stats.tx_dropped++; 1219 dev->stats.tx_dropped++;
1153 return NETDEV_TX_OK; 1220 return NETDEV_TX_OK;
@@ -1162,6 +1229,13 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
1162 1229
1163 segs = segs->next; 1230 segs = segs->next;
1164 curr->next = NULL; 1231 curr->next = NULL;
1232 if (port->tso && curr->len > dev->mtu) {
1233 skb_shinfo(curr)->gso_size = gso_size;
1234 skb_shinfo(curr)->gso_type = gso_type;
1235 skb_shinfo(curr)->gso_segs =
1236 DIV_ROUND_UP(curr->len - hlen, gso_size);
1237 } else
1238 skb_shinfo(curr)->gso_size = 0;
1165 1239
1166 skb_push(curr, maclen); 1240 skb_push(curr, maclen);
1167 skb_reset_mac_header(curr); 1241 skb_reset_mac_header(curr);
@@ -1203,13 +1277,13 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1203 goto out_dropped; 1277 goto out_dropped;
1204 } 1278 }
1205 1279
1206 if (skb_is_gso(skb)) { 1280 if (skb_is_gso(skb) && skb->len > port->tsolen) {
1207 err = vnet_handle_offloads(port, skb); 1281 err = vnet_handle_offloads(port, skb);
1208 rcu_read_unlock(); 1282 rcu_read_unlock();
1209 return err; 1283 return err;
1210 } 1284 }
1211 1285
1212 if (skb->len > port->rmtu) { 1286 if (!skb_is_gso(skb) && skb->len > port->rmtu) {
1213 unsigned long localmtu = port->rmtu - ETH_HLEN; 1287 unsigned long localmtu = port->rmtu - ETH_HLEN;
1214 1288
1215 if (vio_version_after_eq(&port->vio, 1, 3)) 1289 if (vio_version_after_eq(&port->vio, 1, 3))
@@ -1306,6 +1380,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1306 struct vio_net_dext *dext = vio_net_ext(d); 1380 struct vio_net_dext *dext = vio_net_ext(d);
1307 1381
1308 memset(dext, 0, sizeof(*dext)); 1382 memset(dext, 0, sizeof(*dext));
1383 if (skb_is_gso(port->tx_bufs[txi].skb)) {
1384 dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
1385 ->gso_size;
1386 dext->flags |= VNET_PKT_IPV4_LSO;
1387 }
1309 if (vio_version_after_eq(&port->vio, 1, 8) && 1388 if (vio_version_after_eq(&port->vio, 1, 8) &&
1310 !port->switch_port) { 1389 !port->switch_port) {
1311 dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; 1390 dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
@@ -1712,7 +1791,7 @@ static struct vnet *vnet_new(const u64 *local_mac)
1712 dev->ethtool_ops = &vnet_ethtool_ops; 1791 dev->ethtool_ops = &vnet_ethtool_ops;
1713 dev->watchdog_timeo = VNET_TX_TIMEOUT; 1792 dev->watchdog_timeo = VNET_TX_TIMEOUT;
1714 1793
1715 dev->hw_features = NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | 1794 dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
1716 NETIF_F_HW_CSUM | NETIF_F_SG; 1795 NETIF_F_HW_CSUM | NETIF_F_SG;
1717 dev->features = dev->hw_features; 1796 dev->features = dev->hw_features;
1718 1797
@@ -1892,6 +1971,8 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1892 if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) 1971 if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
1893 switch_port = 1; 1972 switch_port = 1;
1894 port->switch_port = switch_port; 1973 port->switch_port = switch_port;
1974 port->tso = true;
1975 port->tsolen = 0;
1895 1976
1896 spin_lock_irqsave(&vp->lock, flags); 1977 spin_lock_irqsave(&vp->lock, flags);
1897 if (switch_port) 1978 if (switch_port)
diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h
index cd5d343ea232..01ca78191683 100644
--- a/drivers/net/ethernet/sun/sunvnet.h
+++ b/drivers/net/ethernet/sun/sunvnet.h
@@ -20,6 +20,9 @@
20#define VNET_TX_RING_SIZE 512 20#define VNET_TX_RING_SIZE 512
21#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) 21#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4)
22 22
23#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */
24#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */
25
23/* VNET packets are sent in buffers with the first 6 bytes skipped 26/* VNET packets are sent in buffers with the first 6 bytes skipped
24 * so that after the ethernet header the IPv4/IPv6 headers are aligned 27 * so that after the ethernet header the IPv4/IPv6 headers are aligned
25 * properly. 28 * properly.
@@ -40,8 +43,9 @@ struct vnet_port {
40 43
41 struct hlist_node hash; 44 struct hlist_node hash;
42 u8 raddr[ETH_ALEN]; 45 u8 raddr[ETH_ALEN];
43 u8 switch_port; 46 unsigned switch_port:1;
44 u8 __pad; 47 unsigned tso:1;
48 unsigned __pad:14;
45 49
46 struct vnet *vp; 50 struct vnet *vp;
47 51
@@ -56,6 +60,7 @@ struct vnet_port {
56 struct timer_list clean_timer; 60 struct timer_list clean_timer;
57 61
58 u64 rmtu; 62 u64 rmtu;
63 u16 tsolen;
59 64
60 struct napi_struct napi; 65 struct napi_struct napi;
61 u32 napi_stop_idx; 66 u32 napi_stop_idx;