aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sun/sunvnet.c
diff options
context:
space:
mode:
authorDavid L Stevens <david.stevens@oracle.com>2014-12-02 15:31:04 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-08 21:18:29 -0500
commit6d0ba919915f0117a651a1fb732f48529b2c4091 (patch)
tree5eb14d5137e6419774554770042685e6acf60b5b /drivers/net/ethernet/sun/sunvnet.c
parentd6732489f079ff65f8ea551392ccae469cba4653 (diff)
sunvnet: add VIO v1.7 and v1.8 support
This patch adds support for VIO v1.7 (extended descriptor format) and v1.8 (receive-side checksumming) to the sunvnet driver. Signed-off-by: David L Stevens <david.stevens@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/sun/sunvnet.c')
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c102
1 files changed, 92 insertions, 10 deletions
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 62823fa488e8..7a8da56dcdf3 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -21,6 +21,7 @@
21#include <linux/icmpv6.h> 21#include <linux/icmpv6.h>
22#endif 22#endif
23 23
24#include <net/ip.h>
24#include <net/icmp.h> 25#include <net/icmp.h>
25#include <net/route.h> 26#include <net/route.h>
26 27
@@ -51,6 +52,8 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
51 52
52/* Ordered from largest major to lowest */ 53/* Ordered from largest major to lowest */
53static struct vio_version vnet_versions[] = { 54static struct vio_version vnet_versions[] = {
55 { .major = 1, .minor = 8 },
56 { .major = 1, .minor = 7 },
54 { .major = 1, .minor = 6 }, 57 { .major = 1, .minor = 6 },
55 { .major = 1, .minor = 0 }, 58 { .major = 1, .minor = 0 },
56}; 59};
@@ -282,10 +285,42 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
282 return skb; 285 return skb;
283} 286}
284 287
285static int vnet_rx_one(struct vnet_port *port, unsigned int len, 288static inline void vnet_fullcsum(struct sk_buff *skb)
286 struct ldc_trans_cookie *cookies, int ncookies) 289{
290 struct iphdr *iph = ip_hdr(skb);
291 int offset = skb_transport_offset(skb);
292
293 if (skb->protocol != htons(ETH_P_IP))
294 return;
295 if (iph->protocol != IPPROTO_TCP &&
296 iph->protocol != IPPROTO_UDP)
297 return;
298 skb->ip_summed = CHECKSUM_NONE;
299 skb->csum_level = 1;
300 skb->csum = 0;
301 if (iph->protocol == IPPROTO_TCP) {
302 struct tcphdr *ptcp = tcp_hdr(skb);
303
304 ptcp->check = 0;
305 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
306 ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
307 skb->len - offset, IPPROTO_TCP,
308 skb->csum);
309 } else if (iph->protocol == IPPROTO_UDP) {
310 struct udphdr *pudp = udp_hdr(skb);
311
312 pudp->check = 0;
313 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
314 pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
315 skb->len - offset, IPPROTO_UDP,
316 skb->csum);
317 }
318}
319
320static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
287{ 321{
288 struct net_device *dev = port->vp->dev; 322 struct net_device *dev = port->vp->dev;
323 unsigned int len = desc->size;
289 unsigned int copy_len; 324 unsigned int copy_len;
290 struct sk_buff *skb; 325 struct sk_buff *skb;
291 int err; 326 int err;
@@ -307,7 +342,7 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
307 skb_put(skb, copy_len); 342 skb_put(skb, copy_len);
308 err = ldc_copy(port->vio.lp, LDC_COPY_IN, 343 err = ldc_copy(port->vio.lp, LDC_COPY_IN,
309 skb->data, copy_len, 0, 344 skb->data, copy_len, 0,
310 cookies, ncookies); 345 desc->cookies, desc->ncookies);
311 if (unlikely(err < 0)) { 346 if (unlikely(err < 0)) {
312 dev->stats.rx_frame_errors++; 347 dev->stats.rx_frame_errors++;
313 goto out_free_skb; 348 goto out_free_skb;
@@ -317,6 +352,28 @@ static int vnet_rx_one(struct vnet_port *port, unsigned int len,
317 skb_trim(skb, len); 352 skb_trim(skb, len);
318 skb->protocol = eth_type_trans(skb, dev); 353 skb->protocol = eth_type_trans(skb, dev);
319 354
355 if (vio_version_after_eq(&port->vio, 1, 8)) {
356 struct vio_net_dext *dext = vio_net_ext(desc);
357
358 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
359 if (skb->protocol == ETH_P_IP) {
360 struct iphdr *iph = (struct iphdr *)skb->data;
361
362 iph->check = 0;
363 ip_send_check(iph);
364 }
365 }
366 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
367 skb->ip_summed == CHECKSUM_NONE)
368 vnet_fullcsum(skb);
369 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
370 skb->ip_summed = CHECKSUM_PARTIAL;
371 skb->csum_level = 0;
372 if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
373 skb->csum_level = 1;
374 }
375 }
376
320 dev->stats.rx_packets++; 377 dev->stats.rx_packets++;
321 dev->stats.rx_bytes += len; 378 dev->stats.rx_bytes += len;
322 napi_gro_receive(&port->napi, skb); 379 napi_gro_receive(&port->napi, skb);
@@ -451,7 +508,7 @@ static int vnet_walk_rx_one(struct vnet_port *port,
451 desc->cookies[0].cookie_addr, 508 desc->cookies[0].cookie_addr,
452 desc->cookies[0].cookie_size); 509 desc->cookies[0].cookie_size);
453 510
454 err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); 511 err = vnet_rx_one(port, desc);
455 if (err == -ECONNRESET) 512 if (err == -ECONNRESET)
456 return err; 513 return err;
457 desc->hdr.state = VIO_DESC_DONE; 514 desc->hdr.state = VIO_DESC_DONE;
@@ -940,8 +997,22 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, void **pstart,
940 if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || 997 if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
941 skb_tailroom(skb) < pad || 998 skb_tailroom(skb) < pad ||
942 skb_headroom(skb) < VNET_PACKET_SKIP) { 999 skb_headroom(skb) < VNET_PACKET_SKIP) {
1000 int offset;
1001
943 nskb = alloc_and_align_skb(skb->dev, skb->len); 1002 nskb = alloc_and_align_skb(skb->dev, skb->len);
944 skb_reserve(nskb, VNET_PACKET_SKIP); 1003 skb_reserve(nskb, VNET_PACKET_SKIP);
1004
1005 nskb->protocol = skb->protocol;
1006 offset = skb_mac_header(skb) - skb->data;
1007 skb_set_mac_header(nskb, offset);
1008 offset = skb_network_header(skb) - skb->data;
1009 skb_set_network_header(nskb, offset);
1010 offset = skb_transport_header(skb) - skb->data;
1011 skb_set_transport_header(nskb, offset);
1012
1013 nskb->csum_offset = skb->csum_offset;
1014 nskb->ip_summed = skb->ip_summed;
1015
945 if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { 1016 if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
946 dev_kfree_skb(nskb); 1017 dev_kfree_skb(nskb);
947 dev_kfree_skb(skb); 1018 dev_kfree_skb(skb);
@@ -1078,6 +1149,16 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1078 d->ncookies = port->tx_bufs[txi].ncookies; 1149 d->ncookies = port->tx_bufs[txi].ncookies;
1079 for (i = 0; i < d->ncookies; i++) 1150 for (i = 0; i < d->ncookies; i++)
1080 d->cookies[i] = port->tx_bufs[txi].cookies[i]; 1151 d->cookies[i] = port->tx_bufs[txi].cookies[i];
1152 if (vio_version_after_eq(&port->vio, 1, 7)) {
1153 struct vio_net_dext *dext = vio_net_ext(d);
1154
1155 memset(dext, 0, sizeof(*dext));
1156 if (vio_version_after_eq(&port->vio, 1, 8) &&
1157 !port->switch_port) {
1158 dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
1159 dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
1160 }
1161 }
1081 1162
1082 /* This has to be a non-SMP write barrier because we are writing 1163 /* This has to be a non-SMP write barrier because we are writing
1083 * to memory which is shared with the peer LDOM. 1164 * to memory which is shared with the peer LDOM.
@@ -1370,15 +1451,17 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
1370static int vnet_port_alloc_tx_ring(struct vnet_port *port) 1451static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1371{ 1452{
1372 struct vio_dring_state *dr; 1453 struct vio_dring_state *dr;
1373 unsigned long len; 1454 unsigned long len, elen;
1374 int i, err, ncookies; 1455 int i, err, ncookies;
1375 void *dring; 1456 void *dring;
1376 1457
1377 dr = &port->vio.drings[VIO_DRIVER_TX_RING]; 1458 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1378 1459
1379 len = (VNET_TX_RING_SIZE * 1460 elen = sizeof(struct vio_net_desc) +
1380 (sizeof(struct vio_net_desc) + 1461 sizeof(struct ldc_trans_cookie) * 2;
1381 (sizeof(struct ldc_trans_cookie) * 2))); 1462 if (vio_version_after_eq(&port->vio, 1, 7))
1463 elen += sizeof(struct vio_net_dext);
1464 len = VNET_TX_RING_SIZE * elen;
1382 1465
1383 ncookies = VIO_MAX_RING_COOKIES; 1466 ncookies = VIO_MAX_RING_COOKIES;
1384 dring = ldc_alloc_exp_dring(port->vio.lp, len, 1467 dring = ldc_alloc_exp_dring(port->vio.lp, len,
@@ -1392,8 +1475,7 @@ static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1392 } 1475 }
1393 1476
1394 dr->base = dring; 1477 dr->base = dring;
1395 dr->entry_size = (sizeof(struct vio_net_desc) + 1478 dr->entry_size = elen;
1396 (sizeof(struct ldc_trans_cookie) * 2));
1397 dr->num_entries = VNET_TX_RING_SIZE; 1479 dr->num_entries = VNET_TX_RING_SIZE;
1398 dr->prod = dr->cons = 0; 1480 dr->prod = dr->cons = 0;
1399 port->start_cons = true; /* need an initial trigger */ 1481 port->start_cons = true; /* need an initial trigger */