aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/net/qeth_l3_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/net/qeth_l3_main.c')
-rw-r--r--drivers/s390/net/qeth_l3_main.c123
1 files changed, 77 insertions, 46 deletions
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fea50bdc8f41..38071a0e0c31 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -19,15 +19,15 @@
19#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
20#include <linux/mii.h> 20#include <linux/mii.h>
21#include <linux/ip.h> 21#include <linux/ip.h>
22#include <linux/reboot.h> 22#include <linux/ipv6.h>
23#include <linux/inetdevice.h> 23#include <linux/inetdevice.h>
24#include <linux/igmp.h> 24#include <linux/igmp.h>
25 25
26#include <net/ip.h> 26#include <net/ip.h>
27#include <net/arp.h> 27#include <net/arp.h>
28#include <net/ip6_checksum.h>
28 29
29#include "qeth_l3.h" 30#include "qeth_l3.h"
30#include "qeth_core_offl.h"
31 31
32static int qeth_l3_set_offline(struct ccwgroup_device *); 32static int qeth_l3_set_offline(struct ccwgroup_device *);
33static int qeth_l3_recover(void *); 33static int qeth_l3_recover(void *);
@@ -2577,12 +2577,63 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2577 } 2577 }
2578} 2578}
2579 2579
2580static void qeth_tso_fill_header(struct qeth_card *card,
2581 struct qeth_hdr *qhdr, struct sk_buff *skb)
2582{
2583 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
2584 struct tcphdr *tcph = tcp_hdr(skb);
2585 struct iphdr *iph = ip_hdr(skb);
2586 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2587
2588 /*fix header to TSO values ...*/
2589 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
2590 /*set values which are fix for the first approach ...*/
2591 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
2592 hdr->ext.imb_hdr_no = 1;
2593 hdr->ext.hdr_type = 1;
2594 hdr->ext.hdr_version = 1;
2595 hdr->ext.hdr_len = 28;
2596 /*insert non-fix values */
2597 hdr->ext.mss = skb_shinfo(skb)->gso_size;
2598 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
2599 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
2600 sizeof(struct qeth_hdr_tso));
2601 tcph->check = 0;
2602 if (skb->protocol == ETH_P_IPV6) {
2603 ip6h->payload_len = 0;
2604 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
2605 0, IPPROTO_TCP, 0);
2606 } else {
2607 /*OSA want us to set these values ...*/
2608 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2609 0, IPPROTO_TCP, 0);
2610 iph->tot_len = 0;
2611 iph->check = 0;
2612 }
2613}
2614
2615static void qeth_tx_csum(struct sk_buff *skb)
2616{
2617 __wsum csum;
2618 int offset;
2619
2620 skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb));
2621 offset = skb->csum_start - skb_headroom(skb);
2622 BUG_ON(offset >= skb_headlen(skb));
2623 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2624
2625 offset += skb->csum_offset;
2626 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2627 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2628}
2629
2580static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 2630static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2581{ 2631{
2582 int rc; 2632 int rc;
2583 u16 *tag; 2633 u16 *tag;
2584 struct qeth_hdr *hdr = NULL; 2634 struct qeth_hdr *hdr = NULL;
2585 int elements_needed = 0; 2635 int elements_needed = 0;
2636 int elems;
2586 struct qeth_card *card = dev->ml_priv; 2637 struct qeth_card *card = dev->ml_priv;
2587 struct sk_buff *new_skb = NULL; 2638 struct sk_buff *new_skb = NULL;
2588 int ipv = qeth_get_ip_version(skb); 2639 int ipv = qeth_get_ip_version(skb);
@@ -2591,8 +2642,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2591 [qeth_get_priority_queue(card, skb, ipv, cast_type)]; 2642 [qeth_get_priority_queue(card, skb, ipv, cast_type)];
2592 int tx_bytes = skb->len; 2643 int tx_bytes = skb->len;
2593 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; 2644 enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
2594 struct qeth_eddp_context *ctx = NULL;
2595 int data_offset = -1; 2645 int data_offset = -1;
2646 int nr_frags;
2596 2647
2597 if ((card->info.type == QETH_CARD_TYPE_IQD) && 2648 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
2598 (skb->protocol != htons(ETH_P_IPV6)) && 2649 (skb->protocol != htons(ETH_P_IPV6)) &&
@@ -2615,6 +2666,12 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2615 2666
2616 if (skb_is_gso(skb)) 2667 if (skb_is_gso(skb))
2617 large_send = card->options.large_send; 2668 large_send = card->options.large_send;
2669 else
2670 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2671 qeth_tx_csum(skb);
2672 if (card->options.performance_stats)
2673 card->perf_stats.tx_csum++;
2674 }
2618 2675
2619 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && 2676 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
2620 (skb_shinfo(skb)->nr_frags == 0)) { 2677 (skb_shinfo(skb)->nr_frags == 0)) {
@@ -2661,12 +2718,13 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2661 netif_stop_queue(dev); 2718 netif_stop_queue(dev);
2662 2719
2663 /* fix hardware limitation: as long as we do not have sbal 2720 /* fix hardware limitation: as long as we do not have sbal
2664 * chaining we can not send long frag lists so we temporary 2721 * chaining we can not send long frag lists
2665 * switch to EDDP
2666 */ 2722 */
2667 if ((large_send == QETH_LARGE_SEND_TSO) && 2723 if ((large_send == QETH_LARGE_SEND_TSO) &&
2668 ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) 2724 ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) {
2669 large_send = QETH_LARGE_SEND_EDDP; 2725 if (skb_linearize(new_skb))
2726 goto tx_drop;
2727 }
2670 2728
2671 if ((large_send == QETH_LARGE_SEND_TSO) && 2729 if ((large_send == QETH_LARGE_SEND_TSO) &&
2672 (cast_type == RTN_UNSPEC)) { 2730 (cast_type == RTN_UNSPEC)) {
@@ -2689,40 +2747,22 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2689 } 2747 }
2690 } 2748 }
2691 2749
2692 if (large_send == QETH_LARGE_SEND_EDDP) { 2750 elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
2693 /* new_skb is not owned by a socket so we use skb to get
2694 * the protocol
2695 */
2696 ctx = qeth_eddp_create_context(card, new_skb, hdr,
2697 skb->sk->sk_protocol);
2698 if (ctx == NULL) {
2699 QETH_DBF_MESSAGE(2, "could not create eddp context\n");
2700 goto tx_drop;
2701 }
2702 } else {
2703 int elems = qeth_get_elements_no(card, (void *)hdr, new_skb,
2704 elements_needed); 2751 elements_needed);
2705 if (!elems) { 2752 if (!elems) {
2706 if (data_offset >= 0) 2753 if (data_offset >= 0)
2707 kmem_cache_free(qeth_core_header_cache, hdr); 2754 kmem_cache_free(qeth_core_header_cache, hdr);
2708 goto tx_drop; 2755 goto tx_drop;
2709 }
2710 elements_needed += elems;
2711 }
2712
2713 if ((large_send == QETH_LARGE_SEND_NO) &&
2714 (new_skb->ip_summed == CHECKSUM_PARTIAL)) {
2715 qeth_tx_csum(new_skb);
2716 if (card->options.performance_stats)
2717 card->perf_stats.tx_csum++;
2718 } 2756 }
2757 elements_needed += elems;
2758 nr_frags = skb_shinfo(new_skb)->nr_frags;
2719 2759
2720 if (card->info.type != QETH_CARD_TYPE_IQD) 2760 if (card->info.type != QETH_CARD_TYPE_IQD)
2721 rc = qeth_do_send_packet(card, queue, new_skb, hdr, 2761 rc = qeth_do_send_packet(card, queue, new_skb, hdr,
2722 elements_needed, ctx); 2762 elements_needed);
2723 else 2763 else
2724 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, 2764 rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr,
2725 elements_needed, ctx, data_offset, 0); 2765 elements_needed, data_offset, 0);
2726 2766
2727 if (!rc) { 2767 if (!rc) {
2728 card->stats.tx_packets++; 2768 card->stats.tx_packets++;
@@ -2734,22 +2774,13 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2734 card->perf_stats.large_send_bytes += tx_bytes; 2774 card->perf_stats.large_send_bytes += tx_bytes;
2735 card->perf_stats.large_send_cnt++; 2775 card->perf_stats.large_send_cnt++;
2736 } 2776 }
2737 if (skb_shinfo(new_skb)->nr_frags > 0) { 2777 if (nr_frags) {
2738 card->perf_stats.sg_skbs_sent++; 2778 card->perf_stats.sg_skbs_sent++;
2739 /* nr_frags + skb->data */ 2779 /* nr_frags + skb->data */
2740 card->perf_stats.sg_frags_sent += 2780 card->perf_stats.sg_frags_sent += nr_frags + 1;
2741 skb_shinfo(new_skb)->nr_frags + 1;
2742 } 2781 }
2743 } 2782 }
2744
2745 if (ctx != NULL) {
2746 qeth_eddp_put_context(ctx);
2747 dev_kfree_skb_any(new_skb);
2748 }
2749 } else { 2783 } else {
2750 if (ctx != NULL)
2751 qeth_eddp_put_context(ctx);
2752
2753 if (data_offset >= 0) 2784 if (data_offset >= 0)
2754 kmem_cache_free(qeth_core_header_cache, hdr); 2785 kmem_cache_free(qeth_core_header_cache, hdr);
2755 2786
@@ -2844,7 +2875,7 @@ static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2844 if (data) { 2875 if (data) {
2845 if (card->options.large_send == QETH_LARGE_SEND_NO) { 2876 if (card->options.large_send == QETH_LARGE_SEND_NO) {
2846 if (card->info.type == QETH_CARD_TYPE_IQD) 2877 if (card->info.type == QETH_CARD_TYPE_IQD)
2847 card->options.large_send = QETH_LARGE_SEND_EDDP; 2878 return -EPERM;
2848 else 2879 else
2849 card->options.large_send = QETH_LARGE_SEND_TSO; 2880 card->options.large_send = QETH_LARGE_SEND_TSO;
2850 dev->features |= NETIF_F_TSO; 2881 dev->features |= NETIF_F_TSO;