aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorFrank Pavlic <pavlic@de.ibm.com>2005-05-12 14:39:09 -0400
committerJeff Garzik <jgarzik@pobox.com>2005-05-15 18:06:17 -0400
commit05e08a2a297371564020f76d1bf8b3a931d5e1ed (patch)
tree0aac8cafdec747a792c46dff1b934db642b0a054 /drivers/s390
parent9a4558193662e933588ee53e0202c103a68c9366 (diff)
[PATCH] s390: qeth bug fixes
[patch 10/10] s390: qeth bug fixes. From: Frank Pavlic <pavlic@de.ibm.com> qeth network driver related changes: - due to OSA hardware changes in TCP Segmentation Offload support we are able now to pack TSO packets too. This fits perfectly in design of qeth buffer handling and sending data respectively. - remove skb_realloc_headroom from the sending path since hard_header_len value provides enough headroom now. - device recovery behaviour improvement - bug fixed in Enhanced Device Driver Packing functionality Signed-off-by: Frank Pavlic <pavlic@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/net/Makefile2
-rw-r--r--drivers/s390/net/qeth.h41
-rw-r--r--drivers/s390/net/qeth_eddp.c40
-rw-r--r--drivers/s390/net/qeth_main.c145
-rw-r--r--drivers/s390/net/qeth_tso.c256
-rw-r--r--drivers/s390/net/qeth_tso.h193
6 files changed, 273 insertions, 404 deletions
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 85b590c5701d..90d4d0ef3dd4 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -10,6 +10,6 @@ obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
10obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o 10obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
11obj-$(CONFIG_LCS) += lcs.o cu3088.o 11obj-$(CONFIG_LCS) += lcs.o cu3088.o
12obj-$(CONFIG_CLAW) += claw.o cu3088.o 12obj-$(CONFIG_CLAW) += claw.o cu3088.o
13qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o qeth_tso.o 13qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o
14qeth-$(CONFIG_PROC_FS) += qeth_proc.o 14qeth-$(CONFIG_PROC_FS) += qeth_proc.o
15obj-$(CONFIG_QETH) += qeth.o 15obj-$(CONFIG_QETH) += qeth.o
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index d13c105f74e6..a755b57db46b 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -24,7 +24,7 @@
24 24
25#include "qeth_mpc.h" 25#include "qeth_mpc.h"
26 26
27#define VERSION_QETH_H "$Revision: 1.137 $" 27#define VERSION_QETH_H "$Revision: 1.139 $"
28 28
29#ifdef CONFIG_QETH_IPV6 29#ifdef CONFIG_QETH_IPV6
30#define QETH_VERSION_IPV6 ":IPv6" 30#define QETH_VERSION_IPV6 ":IPv6"
@@ -370,6 +370,25 @@ struct qeth_hdr {
370 } hdr; 370 } hdr;
371} __attribute__ ((packed)); 371} __attribute__ ((packed));
372 372
373/*TCP Segmentation Offload header*/
374struct qeth_hdr_ext_tso {
375 __u16 hdr_tot_len;
376 __u8 imb_hdr_no;
377 __u8 reserved;
378 __u8 hdr_type;
379 __u8 hdr_version;
380 __u16 hdr_len;
381 __u32 payload_len;
382 __u16 mss;
383 __u16 dg_hdr_len;
384 __u8 padding[16];
385} __attribute__ ((packed));
386
387struct qeth_hdr_tso {
388 struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
389 struct qeth_hdr_ext_tso ext;
390} __attribute__ ((packed));
391
373 392
374/* flags for qeth_hdr.flags */ 393/* flags for qeth_hdr.flags */
375#define QETH_HDR_PASSTHRU 0x10 394#define QETH_HDR_PASSTHRU 0x10
@@ -867,16 +886,6 @@ qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
867 return hdr; 886 return hdr;
868} 887}
869 888
870static inline int
871qeth_get_skb_data_len(struct sk_buff *skb)
872{
873 int len = skb->len;
874 int i;
875
876 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i)
877 len -= skb_shinfo(skb)->frags[i].size;
878 return len;
879}
880 889
881inline static int 890inline static int
882qeth_get_hlen(__u8 link_type) 891qeth_get_hlen(__u8 link_type)
@@ -885,19 +894,19 @@ qeth_get_hlen(__u8 link_type)
885 switch (link_type) { 894 switch (link_type) {
886 case QETH_LINK_TYPE_HSTR: 895 case QETH_LINK_TYPE_HSTR:
887 case QETH_LINK_TYPE_LANE_TR: 896 case QETH_LINK_TYPE_LANE_TR:
888 return sizeof(struct qeth_hdr) + TR_HLEN; 897 return sizeof(struct qeth_hdr_tso) + TR_HLEN;
889 default: 898 default:
890#ifdef CONFIG_QETH_VLAN 899#ifdef CONFIG_QETH_VLAN
891 return sizeof(struct qeth_hdr) + VLAN_ETH_HLEN; 900 return sizeof(struct qeth_hdr_tso) + VLAN_ETH_HLEN;
892#else 901#else
893 return sizeof(struct qeth_hdr) + ETH_HLEN; 902 return sizeof(struct qeth_hdr_tso) + ETH_HLEN;
894#endif 903#endif
895 } 904 }
896#else /* CONFIG_QETH_IPV6 */ 905#else /* CONFIG_QETH_IPV6 */
897#ifdef CONFIG_QETH_VLAN 906#ifdef CONFIG_QETH_VLAN
898 return sizeof(struct qeth_hdr) + VLAN_HLEN; 907 return sizeof(struct qeth_hdr_tso) + VLAN_HLEN;
899#else 908#else
900 return sizeof(struct qeth_hdr); 909 return sizeof(struct qeth_hdr_tso);
901#endif 910#endif
902#endif /* CONFIG_QETH_IPV6 */ 911#endif /* CONFIG_QETH_IPV6 */
903} 912}
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 45aa4a962daf..f94f1f25eec6 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.12 $) 3 * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.13 $)
4 * 4 *
5 * Enhanced Device Driver Packing (EDDP) support for the qeth driver. 5 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
6 * 6 *
@@ -8,7 +8,7 @@
8 * 8 *
9 * Author(s): Thomas Spatzier <tspat@de.ibm.com> 9 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
10 * 10 *
11 * $Revision: 1.12 $ $Date: 2005/04/01 21:40:40 $ 11 * $Revision: 1.13 $ $Date: 2005/05/04 20:19:18 $
12 * 12 *
13 */ 13 */
14#include <linux/config.h> 14#include <linux/config.h>
@@ -85,7 +85,7 @@ void
85qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) 85qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
86{ 86{
87 struct qeth_eddp_context_reference *ref; 87 struct qeth_eddp_context_reference *ref;
88 88
89 QETH_DBF_TEXT(trace, 6, "eddprctx"); 89 QETH_DBF_TEXT(trace, 6, "eddprctx");
90 while (!list_empty(&buf->ctx_list)){ 90 while (!list_empty(&buf->ctx_list)){
91 ref = list_entry(buf->ctx_list.next, 91 ref = list_entry(buf->ctx_list.next,
@@ -139,7 +139,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
139 "buffer!\n"); 139 "buffer!\n");
140 goto out; 140 goto out;
141 } 141 }
142 } 142 }
143 /* check if the whole next skb fits into current buffer */ 143 /* check if the whole next skb fits into current buffer */
144 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) - 144 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
145 buf->next_element_to_fill) 145 buf->next_element_to_fill)
@@ -152,7 +152,7 @@ qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
152 * and increment ctx's refcnt */ 152 * and increment ctx's refcnt */
153 must_refcnt = 1; 153 must_refcnt = 1;
154 continue; 154 continue;
155 } 155 }
156 if (must_refcnt){ 156 if (must_refcnt){
157 must_refcnt = 0; 157 must_refcnt = 0;
158 if (qeth_eddp_buf_ref_context(buf, ctx)){ 158 if (qeth_eddp_buf_ref_context(buf, ctx)){
@@ -204,27 +204,27 @@ out:
204 204
205static inline void 205static inline void
206qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, 206qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
207 struct qeth_eddp_data *eddp) 207 struct qeth_eddp_data *eddp, int data_len)
208{ 208{
209 u8 *page; 209 u8 *page;
210 int page_remainder; 210 int page_remainder;
211 int page_offset; 211 int page_offset;
212 int hdr_len; 212 int pkt_len;
213 struct qeth_eddp_element *element; 213 struct qeth_eddp_element *element;
214 214
215 QETH_DBF_TEXT(trace, 5, "eddpcrsh"); 215 QETH_DBF_TEXT(trace, 5, "eddpcrsh");
216 page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 216 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
217 page_offset = ctx->offset % PAGE_SIZE; 217 page_offset = ctx->offset % PAGE_SIZE;
218 element = &ctx->elements[ctx->num_elements]; 218 element = &ctx->elements[ctx->num_elements];
219 hdr_len = eddp->nhl + eddp->thl; 219 pkt_len = eddp->nhl + eddp->thl + data_len;
220 /* FIXME: layer2 and VLAN !!! */ 220 /* FIXME: layer2 and VLAN !!! */
221 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) 221 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
222 hdr_len += ETH_HLEN; 222 pkt_len += ETH_HLEN;
223 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) 223 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
224 hdr_len += VLAN_HLEN; 224 pkt_len += VLAN_HLEN;
225 /* does complete header fit in current page ? */ 225 /* does complete packet fit in current page ? */
226 page_remainder = PAGE_SIZE - page_offset; 226 page_remainder = PAGE_SIZE - page_offset;
227 if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){ 227 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
228 /* no -> go to start of next page */ 228 /* no -> go to start of next page */
229 ctx->offset += page_remainder; 229 ctx->offset += page_remainder;
230 page = ctx->pages[ctx->offset >> PAGE_SHIFT]; 230 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
@@ -270,7 +270,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
270 int left_in_frag; 270 int left_in_frag;
271 int copy_len; 271 int copy_len;
272 u8 *src; 272 u8 *src;
273 273
274 QETH_DBF_TEXT(trace, 5, "eddpcdtc"); 274 QETH_DBF_TEXT(trace, 5, "eddpcdtc");
275 if (skb_shinfo(eddp->skb)->nr_frags == 0) { 275 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
276 memcpy(dst, eddp->skb->data + eddp->skb_offset, len); 276 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
@@ -281,7 +281,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
281 while (len > 0) { 281 while (len > 0) {
282 if (eddp->frag < 0) { 282 if (eddp->frag < 0) {
283 /* we're in skb->data */ 283 /* we're in skb->data */
284 left_in_frag = qeth_get_skb_data_len(eddp->skb) 284 left_in_frag = (eddp->skb->len - eddp->skb->data_len)
285 - eddp->skb_offset; 285 - eddp->skb_offset;
286 src = eddp->skb->data + eddp->skb_offset; 286 src = eddp->skb->data + eddp->skb_offset;
287 } else { 287 } else {
@@ -413,7 +413,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
413 struct tcphdr *tcph; 413 struct tcphdr *tcph;
414 int data_len; 414 int data_len;
415 u32 hcsum; 415 u32 hcsum;
416 416
417 QETH_DBF_TEXT(trace, 5, "eddpftcp"); 417 QETH_DBF_TEXT(trace, 5, "eddpftcp");
418 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; 418 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
419 tcph = eddp->skb->h.th; 419 tcph = eddp->skb->h.th;
@@ -453,7 +453,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
453 else 453 else
454 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len); 454 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
455 /* fill the next segment into the context */ 455 /* fill the next segment into the context */
456 qeth_eddp_create_segment_hdrs(ctx, eddp); 456 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
457 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum); 457 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
458 if (eddp->skb_offset >= eddp->skb->len) 458 if (eddp->skb_offset >= eddp->skb->len)
459 break; 459 break;
@@ -463,13 +463,13 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
463 eddp->th.tcp.h.seq += data_len; 463 eddp->th.tcp.h.seq += data_len;
464 } 464 }
465} 465}
466 466
467static inline int 467static inline int
468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
469 struct sk_buff *skb, struct qeth_hdr *qhdr) 469 struct sk_buff *skb, struct qeth_hdr *qhdr)
470{ 470{
471 struct qeth_eddp_data *eddp = NULL; 471 struct qeth_eddp_data *eddp = NULL;
472 472
473 QETH_DBF_TEXT(trace, 5, "eddpficx"); 473 QETH_DBF_TEXT(trace, 5, "eddpficx");
474 /* create our segmentation headers and copy original headers */ 474 /* create our segmentation headers and copy original headers */
475 if (skb->protocol == ETH_P_IP) 475 if (skb->protocol == ETH_P_IP)
@@ -509,7 +509,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
509 int hdr_len) 509 int hdr_len)
510{ 510{
511 int skbs_per_page; 511 int skbs_per_page;
512 512
513 QETH_DBF_TEXT(trace, 5, "eddpcanp"); 513 QETH_DBF_TEXT(trace, 5, "eddpcanp");
514 /* can we put multiple skbs in one page? */ 514 /* can we put multiple skbs in one page? */
515 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len); 515 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
@@ -589,7 +589,7 @@ qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
589 struct qeth_hdr *qhdr) 589 struct qeth_hdr *qhdr)
590{ 590{
591 struct qeth_eddp_context *ctx = NULL; 591 struct qeth_eddp_context *ctx = NULL;
592 592
593 QETH_DBF_TEXT(trace, 5, "creddpct"); 593 QETH_DBF_TEXT(trace, 5, "creddpct");
594 if (skb->protocol == ETH_P_IP) 594 if (skb->protocol == ETH_P_IP)
595 ctx = qeth_eddp_create_context_generic(card, skb, 595 ctx = qeth_eddp_create_context_generic(card, skb,
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index e18bcf9fb588..208127a5033a 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.210 $) 3 * linux/drivers/s390/net/qeth_main.c ($Revision: 1.214 $)
4 * 4 *
5 * Linux on zSeries OSA Express and HiperSockets support 5 * Linux on zSeries OSA Express and HiperSockets support
6 * 6 *
@@ -12,7 +12,7 @@
12 * Frank Pavlic (pavlic@de.ibm.com) and 12 * Frank Pavlic (pavlic@de.ibm.com) and
13 * Thomas Spatzier <tspat@de.ibm.com> 13 * Thomas Spatzier <tspat@de.ibm.com>
14 * 14 *
15 * $Revision: 1.210 $ $Date: 2005/04/18 17:27:39 $ 15 * $Revision: 1.214 $ $Date: 2005/05/04 20:19:18 $
16 * 16 *
17 * This program is free software; you can redistribute it and/or modify 17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by 18 * it under the terms of the GNU General Public License as published by
@@ -80,7 +80,7 @@ qeth_eyecatcher(void)
80#include "qeth_eddp.h" 80#include "qeth_eddp.h"
81#include "qeth_tso.h" 81#include "qeth_tso.h"
82 82
83#define VERSION_QETH_C "$Revision: 1.210 $" 83#define VERSION_QETH_C "$Revision: 1.214 $"
84static const char *version = "qeth S/390 OSA-Express driver"; 84static const char *version = "qeth S/390 OSA-Express driver";
85 85
86/** 86/**
@@ -158,6 +158,9 @@ qeth_irq_tasklet(unsigned long);
158static int 158static int
159qeth_set_online(struct ccwgroup_device *); 159qeth_set_online(struct ccwgroup_device *);
160 160
161static int
162__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode);
163
161static struct qeth_ipaddr * 164static struct qeth_ipaddr *
162qeth_get_addr_buffer(enum qeth_prot_versions); 165qeth_get_addr_buffer(enum qeth_prot_versions);
163 166
@@ -510,10 +513,10 @@ qeth_irq_tasklet(unsigned long data)
510 wake_up(&card->wait_q); 513 wake_up(&card->wait_q);
511} 514}
512 515
513static int qeth_stop_card(struct qeth_card *); 516static int qeth_stop_card(struct qeth_card *, int);
514 517
515static int 518static int
516qeth_set_offline(struct ccwgroup_device *cgdev) 519__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
517{ 520{
518 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data; 521 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
519 int rc = 0; 522 int rc = 0;
@@ -523,7 +526,7 @@ qeth_set_offline(struct ccwgroup_device *cgdev)
523 QETH_DBF_HEX(setup, 3, &card, sizeof(void *)); 526 QETH_DBF_HEX(setup, 3, &card, sizeof(void *));
524 527
525 recover_flag = card->state; 528 recover_flag = card->state;
526 if (qeth_stop_card(card) == -ERESTARTSYS){ 529 if (qeth_stop_card(card, recovery_mode) == -ERESTARTSYS){
527 PRINT_WARN("Stopping card %s interrupted by user!\n", 530 PRINT_WARN("Stopping card %s interrupted by user!\n",
528 CARD_BUS_ID(card)); 531 CARD_BUS_ID(card));
529 return -ERESTARTSYS; 532 return -ERESTARTSYS;
@@ -540,6 +543,12 @@ qeth_set_offline(struct ccwgroup_device *cgdev)
540} 543}
541 544
542static int 545static int
546qeth_set_offline(struct ccwgroup_device *cgdev)
547{
548 return __qeth_set_offline(cgdev, 0);
549}
550
551static int
543qeth_wait_for_threads(struct qeth_card *card, unsigned long threads); 552qeth_wait_for_threads(struct qeth_card *card, unsigned long threads);
544 553
545 554
@@ -953,8 +962,8 @@ qeth_recover(void *ptr)
953 PRINT_WARN("Recovery of device %s started ...\n", 962 PRINT_WARN("Recovery of device %s started ...\n",
954 CARD_BUS_ID(card)); 963 CARD_BUS_ID(card));
955 card->use_hard_stop = 1; 964 card->use_hard_stop = 1;
956 qeth_set_offline(card->gdev); 965 __qeth_set_offline(card->gdev,1);
957 rc = qeth_set_online(card->gdev); 966 rc = __qeth_set_online(card->gdev,1);
958 if (!rc) 967 if (!rc)
959 PRINT_INFO("Device %s successfully recovered!\n", 968 PRINT_INFO("Device %s successfully recovered!\n",
960 CARD_BUS_ID(card)); 969 CARD_BUS_ID(card));
@@ -3786,16 +3795,12 @@ static inline int
3786qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb, 3795qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb,
3787 struct qeth_hdr **hdr, int ipv) 3796 struct qeth_hdr **hdr, int ipv)
3788{ 3797{
3789 int rc = 0;
3790#ifdef CONFIG_QETH_VLAN 3798#ifdef CONFIG_QETH_VLAN
3791 u16 *tag; 3799 u16 *tag;
3792#endif 3800#endif
3793 3801
3794 QETH_DBF_TEXT(trace, 6, "prepskb"); 3802 QETH_DBF_TEXT(trace, 6, "prepskb");
3795 3803
3796 rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr));
3797 if (rc)
3798 return rc;
3799#ifdef CONFIG_QETH_VLAN 3804#ifdef CONFIG_QETH_VLAN
3800 if (card->vlangrp && vlan_tx_tag_present(*skb) && 3805 if (card->vlangrp && vlan_tx_tag_present(*skb) &&
3801 ((ipv == 6) || card->options.layer2) ) { 3806 ((ipv == 6) || card->options.layer2) ) {
@@ -3977,25 +3982,28 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3977 3982
3978static inline void 3983static inline void
3979__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, 3984__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
3980 int *next_element_to_fill) 3985 int is_tso, int *next_element_to_fill)
3981{ 3986{
3982 int length = skb->len; 3987 int length = skb->len;
3983 int length_here; 3988 int length_here;
3984 int element; 3989 int element;
3985 char *data; 3990 char *data;
3986 int first_lap = 1; 3991 int first_lap ;
3987 3992
3988 element = *next_element_to_fill; 3993 element = *next_element_to_fill;
3989 data = skb->data; 3994 data = skb->data;
3995 first_lap = (is_tso == 0 ? 1 : 0);
3996
3990 while (length > 0) { 3997 while (length > 0) {
3991 /* length_here is the remaining amount of data in this page */ 3998 /* length_here is the remaining amount of data in this page */
3992 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); 3999 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
3993 if (length < length_here) 4000 if (length < length_here)
3994 length_here = length; 4001 length_here = length;
4002
3995 buffer->element[element].addr = data; 4003 buffer->element[element].addr = data;
3996 buffer->element[element].length = length_here; 4004 buffer->element[element].length = length_here;
3997 length -= length_here; 4005 length -= length_here;
3998 if (!length){ 4006 if (!length) {
3999 if (first_lap) 4007 if (first_lap)
4000 buffer->element[element].flags = 0; 4008 buffer->element[element].flags = 0;
4001 else 4009 else
@@ -4022,17 +4030,35 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4022 struct sk_buff *skb) 4030 struct sk_buff *skb)
4023{ 4031{
4024 struct qdio_buffer *buffer; 4032 struct qdio_buffer *buffer;
4025 int flush_cnt = 0; 4033 struct qeth_hdr_tso *hdr;
4034 int flush_cnt = 0, hdr_len, large_send = 0;
4026 4035
4027 QETH_DBF_TEXT(trace, 6, "qdfillbf"); 4036 QETH_DBF_TEXT(trace, 6, "qdfillbf");
4037
4028 buffer = buf->buffer; 4038 buffer = buf->buffer;
4029 atomic_inc(&skb->users); 4039 atomic_inc(&skb->users);
4030 skb_queue_tail(&buf->skb_list, skb); 4040 skb_queue_tail(&buf->skb_list, skb);
4041
4042 hdr = (struct qeth_hdr_tso *) skb->data;
4043 /*check first on TSO ....*/
4044 if (hdr->hdr.hdr.l3.id == QETH_HEADER_TYPE_TSO) {
4045 int element = buf->next_element_to_fill;
4046
4047 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
4048 /*fill first buffer entry only with header information */
4049 buffer->element[element].addr = skb->data;
4050 buffer->element[element].length = hdr_len;
4051 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
4052 buf->next_element_to_fill++;
4053 skb->data += hdr_len;
4054 skb->len -= hdr_len;
4055 large_send = 1;
4056 }
4031 if (skb_shinfo(skb)->nr_frags == 0) 4057 if (skb_shinfo(skb)->nr_frags == 0)
4032 __qeth_fill_buffer(skb, buffer, 4058 __qeth_fill_buffer(skb, buffer, large_send,
4033 (int *)&buf->next_element_to_fill); 4059 (int *)&buf->next_element_to_fill);
4034 else 4060 else
4035 __qeth_fill_buffer_frag(skb, buffer, 0, 4061 __qeth_fill_buffer_frag(skb, buffer, large_send,
4036 (int *)&buf->next_element_to_fill); 4062 (int *)&buf->next_element_to_fill);
4037 4063
4038 if (!queue->do_pack) { 4064 if (!queue->do_pack) {
@@ -4225,6 +4251,25 @@ out:
4225} 4251}
4226 4252
4227static inline int 4253static inline int
4254qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb)
4255{
4256 int elements_needed = 0;
4257
4258 if (skb_shinfo(skb)->nr_frags > 0) {
4259 elements_needed = (skb_shinfo(skb)->nr_frags + 1);
4260 }
4261 if (elements_needed == 0 )
4262 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE)
4263 + skb->len) >> PAGE_SHIFT);
4264 if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){
4265 PRINT_ERR("qeth_do_send_packet: invalid size of "
4266 "IP packet. Discarded.");
4267 return 0;
4268 }
4269 return elements_needed;
4270}
4271
4272static inline int
4228qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) 4273qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4229{ 4274{
4230 int ipv = 0; 4275 int ipv = 0;
@@ -4266,19 +4311,25 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4266 if (skb_shinfo(skb)->tso_size) 4311 if (skb_shinfo(skb)->tso_size)
4267 large_send = card->options.large_send; 4312 large_send = card->options.large_send;
4268 4313
4269 if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))){
4270 QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
4271 return rc;
4272 }
4273 /*are we able to do TSO ? If so ,prepare and send it from here */ 4314 /*are we able to do TSO ? If so ,prepare and send it from here */
4274 if ((large_send == QETH_LARGE_SEND_TSO) && 4315 if ((large_send == QETH_LARGE_SEND_TSO) &&
4275 (cast_type == RTN_UNSPEC)) { 4316 (cast_type == RTN_UNSPEC)) {
4276 rc = qeth_tso_send_packet(card, skb, queue, 4317 rc = qeth_tso_prepare_packet(card, skb, ipv, cast_type);
4277 ipv, cast_type); 4318 if (rc) {
4278 goto do_statistics; 4319 card->stats.tx_dropped++;
4320 card->stats.tx_errors++;
4321 dev_kfree_skb_any(skb);
4322 return NETDEV_TX_OK;
4323 }
4324 elements_needed++;
4325 } else {
4326 if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))) {
4327 QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc);
4328 return rc;
4329 }
4330 qeth_fill_header(card, hdr, skb, ipv, cast_type);
4279 } 4331 }
4280 4332
4281 qeth_fill_header(card, hdr, skb, ipv, cast_type);
4282 if (large_send == QETH_LARGE_SEND_EDDP) { 4333 if (large_send == QETH_LARGE_SEND_EDDP) {
4283 ctx = qeth_eddp_create_context(card, skb, hdr); 4334 ctx = qeth_eddp_create_context(card, skb, hdr);
4284 if (ctx == NULL) { 4335 if (ctx == NULL) {
@@ -4286,7 +4337,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4286 return -EINVAL; 4337 return -EINVAL;
4287 } 4338 }
4288 } else { 4339 } else {
4289 elements_needed = qeth_get_elements_no(card,(void*) hdr, skb); 4340 elements_needed += qeth_get_elements_no(card,(void*) hdr, skb);
4290 if (!elements_needed) 4341 if (!elements_needed)
4291 return -EINVAL; 4342 return -EINVAL;
4292 } 4343 }
@@ -4297,12 +4348,12 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4297 else 4348 else
4298 rc = qeth_do_send_packet_fast(card, queue, skb, hdr, 4349 rc = qeth_do_send_packet_fast(card, queue, skb, hdr,
4299 elements_needed, ctx); 4350 elements_needed, ctx);
4300do_statistics:
4301 if (!rc){ 4351 if (!rc){
4302 card->stats.tx_packets++; 4352 card->stats.tx_packets++;
4303 card->stats.tx_bytes += skb->len; 4353 card->stats.tx_bytes += skb->len;
4304#ifdef CONFIG_QETH_PERF_STATS 4354#ifdef CONFIG_QETH_PERF_STATS
4305 if (skb_shinfo(skb)->tso_size) { 4355 if (skb_shinfo(skb)->tso_size &&
4356 !(large_send == QETH_LARGE_SEND_NO)) {
4306 card->perf_stats.large_send_bytes += skb->len; 4357 card->perf_stats.large_send_bytes += skb->len;
4307 card->perf_stats.large_send_cnt++; 4358 card->perf_stats.large_send_cnt++;
4308 } 4359 }
@@ -7199,7 +7250,7 @@ qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
7199} 7250}
7200 7251
7201static int 7252static int
7202qeth_stop_card(struct qeth_card *card) 7253qeth_stop_card(struct qeth_card *card, int recovery_mode)
7203{ 7254{
7204 int rc = 0; 7255 int rc = 0;
7205 7256
@@ -7212,9 +7263,13 @@ qeth_stop_card(struct qeth_card *card)
7212 if (card->read.state == CH_STATE_UP && 7263 if (card->read.state == CH_STATE_UP &&
7213 card->write.state == CH_STATE_UP && 7264 card->write.state == CH_STATE_UP &&
7214 (card->state == CARD_STATE_UP)) { 7265 (card->state == CARD_STATE_UP)) {
7215 rtnl_lock(); 7266 if(recovery_mode) {
7216 dev_close(card->dev); 7267 qeth_stop(card->dev);
7217 rtnl_unlock(); 7268 } else {
7269 rtnl_lock();
7270 dev_close(card->dev);
7271 rtnl_unlock();
7272 }
7218 if (!card->use_hard_stop) { 7273 if (!card->use_hard_stop) {
7219 __u8 *mac = &card->dev->dev_addr[0]; 7274 __u8 *mac = &card->dev->dev_addr[0];
7220 rc = qeth_layer2_send_delmac(card, mac); 7275 rc = qeth_layer2_send_delmac(card, mac);
@@ -7386,13 +7441,17 @@ qeth_register_netdev(struct qeth_card *card)
7386} 7441}
7387 7442
7388static void 7443static void
7389qeth_start_again(struct qeth_card *card) 7444qeth_start_again(struct qeth_card *card, int recovery_mode)
7390{ 7445{
7391 QETH_DBF_TEXT(setup ,2, "startag"); 7446 QETH_DBF_TEXT(setup ,2, "startag");
7392 7447
7393 rtnl_lock(); 7448 if(recovery_mode) {
7394 dev_open(card->dev); 7449 qeth_open(card->dev);
7395 rtnl_unlock(); 7450 } else {
7451 rtnl_lock();
7452 dev_open(card->dev);
7453 rtnl_unlock();
7454 }
7396 /* this also sets saved unicast addresses */ 7455 /* this also sets saved unicast addresses */
7397 qeth_set_multicast_list(card->dev); 7456 qeth_set_multicast_list(card->dev);
7398} 7457}
@@ -7449,7 +7508,7 @@ static void qeth_make_parameters_consistent(struct qeth_card *card)
7449 7508
7450 7509
7451static int 7510static int
7452qeth_set_online(struct ccwgroup_device *gdev) 7511__qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode)
7453{ 7512{
7454 struct qeth_card *card = gdev->dev.driver_data; 7513 struct qeth_card *card = gdev->dev.driver_data;
7455 int rc = 0; 7514 int rc = 0;
@@ -7509,12 +7568,12 @@ qeth_set_online(struct ccwgroup_device *gdev)
7509 * we can also use this state for recovery purposes*/ 7568 * we can also use this state for recovery purposes*/
7510 qeth_set_allowed_threads(card, 0xffffffff, 0); 7569 qeth_set_allowed_threads(card, 0xffffffff, 0);
7511 if (recover_flag == CARD_STATE_RECOVER) 7570 if (recover_flag == CARD_STATE_RECOVER)
7512 qeth_start_again(card); 7571 qeth_start_again(card, recovery_mode);
7513 qeth_notify_processes(); 7572 qeth_notify_processes();
7514 return 0; 7573 return 0;
7515out_remove: 7574out_remove:
7516 card->use_hard_stop = 1; 7575 card->use_hard_stop = 1;
7517 qeth_stop_card(card); 7576 qeth_stop_card(card, 0);
7518 ccw_device_set_offline(CARD_DDEV(card)); 7577 ccw_device_set_offline(CARD_DDEV(card));
7519 ccw_device_set_offline(CARD_WDEV(card)); 7578 ccw_device_set_offline(CARD_WDEV(card));
7520 ccw_device_set_offline(CARD_RDEV(card)); 7579 ccw_device_set_offline(CARD_RDEV(card));
@@ -7525,6 +7584,12 @@ out_remove:
7525 return -ENODEV; 7584 return -ENODEV;
7526} 7585}
7527 7586
7587static int
7588qeth_set_online(struct ccwgroup_device *gdev)
7589{
7590 return __qeth_set_online(gdev, 0);
7591}
7592
7528static struct ccw_device_id qeth_ids[] = { 7593static struct ccw_device_id qeth_ids[] = {
7529 {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE}, 7594 {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE},
7530 {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD}, 7595 {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD},
diff --git a/drivers/s390/net/qeth_tso.c b/drivers/s390/net/qeth_tso.c
deleted file mode 100644
index 4e58f19cb71c..000000000000
--- a/drivers/s390/net/qeth_tso.c
+++ /dev/null
@@ -1,256 +0,0 @@
1/*
2 * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.7 $)
3 *
4 * Header file for qeth TCP Segmentation Offload support.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
9 *
10 * $Revision: 1.7 $ $Date: 2005/04/01 21:40:41 $
11 *
12 */
13
14#include <linux/skbuff.h>
15#include <linux/tcp.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <net/ip6_checksum.h>
19#include "qeth.h"
20#include "qeth_mpc.h"
21#include "qeth_tso.h"
22
23/**
24 * skb already partially prepared
25 * classic qdio header in skb->data
26 * */
27static inline struct qeth_hdr_tso *
28qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
29{
30 int rc = 0;
31
32 QETH_DBF_TEXT(trace, 5, "tsoprsk");
33 rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso));
34 if (rc)
35 return NULL;
36
37 return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso));
38}
39
40/**
41 * fill header for a TSO packet
42 */
43static inline void
44qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
45{
46 struct qeth_hdr_tso *hdr;
47 struct tcphdr *tcph;
48 struct iphdr *iph;
49
50 QETH_DBF_TEXT(trace, 5, "tsofhdr");
51
52 hdr = (struct qeth_hdr_tso *) skb->data;
53 iph = skb->nh.iph;
54 tcph = skb->h.th;
55 /*fix header to TSO values ...*/
56 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
57 /*set values which are fix for the first approach ...*/
58 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
59 hdr->ext.imb_hdr_no = 1;
60 hdr->ext.hdr_type = 1;
61 hdr->ext.hdr_version = 1;
62 hdr->ext.hdr_len = 28;
63 /*insert non-fix values */
64 hdr->ext.mss = skb_shinfo(skb)->tso_size;
65 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
66 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
67 sizeof(struct qeth_hdr_tso));
68}
69
70/**
71 * change some header values as requested by hardware
72 */
73static inline void
74qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
75{
76 struct iphdr *iph;
77 struct ipv6hdr *ip6h;
78 struct tcphdr *tcph;
79
80 iph = skb->nh.iph;
81 ip6h = skb->nh.ipv6h;
82 tcph = skb->h.th;
83
84 tcph->check = 0;
85 if (skb->protocol == ETH_P_IPV6) {
86 ip6h->payload_len = 0;
87 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
88 0, IPPROTO_TCP, 0);
89 return;
90 }
91 /*OSA want us to set these values ...*/
92 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
93 0, IPPROTO_TCP, 0);
94 iph->tot_len = 0;
95 iph->check = 0;
96}
97
98static inline struct qeth_hdr_tso *
99qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
100 int ipv, int cast_type)
101{
102 struct qeth_hdr_tso *hdr;
103 int rc = 0;
104
105 QETH_DBF_TEXT(trace, 5, "tsoprep");
106
107 /*get headroom for tso qdio header */
108 hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
109 if (hdr == NULL) {
110 QETH_DBF_TEXT_(trace, 4, "2err%d", rc);
111 return NULL;
112 }
113 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
114 /*fill first 32 bytes of qdio header as used
115 *FIXME: TSO has two struct members
116 * with different names but same size
117 * */
118 qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
119 qeth_tso_fill_header(card, skb);
120 qeth_tso_set_tcpip_header(card, skb);
121 return hdr;
122}
123
124static inline int
125qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue)
126{
127 struct qeth_qdio_out_buffer *buffer;
128 int flush_cnt = 0;
129
130 QETH_DBF_TEXT(trace, 5, "tsobuf");
131
132 /* force to non-packing*/
133 if (queue->do_pack)
134 queue->do_pack = 0;
135 buffer = &queue->bufs[queue->next_buf_to_fill];
136 /* get a new buffer if current is already in use*/
137 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
138 (buffer->next_element_to_fill > 0)) {
139 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
140 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
141 QDIO_MAX_BUFFERS_PER_Q;
142 flush_cnt++;
143 }
144 return flush_cnt;
145}
146
147
148static inline int
149qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf,
150 struct sk_buff *skb)
151{
152 int length, length_here, element;
153 int hdr_len;
154 struct qdio_buffer *buffer;
155 struct qeth_hdr_tso *hdr;
156 char *data;
157
158 QETH_DBF_TEXT(trace, 3, "tsfilbuf");
159
160 /*increment user count and queue skb ...*/
161 atomic_inc(&skb->users);
162 skb_queue_tail(&buf->skb_list, skb);
163
164 /*initialize all variables...*/
165 buffer = buf->buffer;
166 hdr = (struct qeth_hdr_tso *)skb->data;
167 hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len;
168 data = skb->data + hdr_len;
169 length = skb->len - hdr_len;
170 element = buf->next_element_to_fill;
171 /*fill first buffer entry only with header information */
172 buffer->element[element].addr = skb->data;
173 buffer->element[element].length = hdr_len;
174 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG;
175 buf->next_element_to_fill++;
176 /*check if we have frags ...*/
177 if (skb_shinfo(skb)->nr_frags > 0) {
178 skb->len = length;
179 skb->data = data;
180 __qeth_fill_buffer_frag(skb, buffer,1,
181 (int *)&buf->next_element_to_fill);
182 goto out;
183 }
184
185 /*... if not, use this */
186 element++;
187 while (length > 0) {
188 /* length_here is the remaining amount of data in this page */
189 length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
190 if (length < length_here)
191 length_here = length;
192 buffer->element[element].addr = data;
193 buffer->element[element].length = length_here;
194 length -= length_here;
195 if (!length)
196 buffer->element[element].flags =
197 SBAL_FLAGS_LAST_FRAG;
198 else
199 buffer->element[element].flags =
200 SBAL_FLAGS_MIDDLE_FRAG;
201 data += length_here;
202 element++;
203 }
204 buf->next_element_to_fill = element;
205out:
206 /*prime buffer now ...*/
207 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
208 return 1;
209}
210
211int
212qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb,
213 struct qeth_qdio_out_q *queue, int ipv, int cast_type)
214{
215 int flush_cnt = 0;
216 struct qeth_hdr_tso *hdr;
217 struct qeth_qdio_out_buffer *buffer;
218 int start_index;
219
220 QETH_DBF_TEXT(trace, 3, "tsosend");
221
222 if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type)))
223 return -ENOMEM;
224 /*check if skb fits in one SBAL ...*/
225 if (!(qeth_get_elements_no(card, (void*)hdr, skb)))
226 return -EINVAL;
227 /*lock queue, force switching to non-packing and send it ...*/
228 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED,
229 QETH_OUT_Q_LOCKED,
230 &queue->state));
231 start_index = queue->next_buf_to_fill;
232 buffer = &queue->bufs[queue->next_buf_to_fill];
233 /*check if card is too busy ...*/
234 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){
235 card->stats.tx_dropped++;
236 goto out;
237 }
238 /*let's force to non-packing and get a new SBAL*/
239 flush_cnt += qeth_tso_get_queue_buffer(queue);
240 buffer = &queue->bufs[queue->next_buf_to_fill];
241 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
242 card->stats.tx_dropped++;
243 goto out;
244 }
245 flush_cnt += qeth_tso_fill_buffer(buffer, skb);
246 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
247 QDIO_MAX_BUFFERS_PER_Q;
248out:
249 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
250 if (flush_cnt)
251 qeth_flush_buffers(queue, 0, start_index, flush_cnt);
252 /*do some statistics */
253 card->stats.tx_packets++;
254 card->stats.tx_bytes += skb->len;
255 return 0;
256}
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h
index ff585ae49b6c..ad33e6f466f1 100644
--- a/drivers/s390/net/qeth_tso.h
+++ b/drivers/s390/net/qeth_tso.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/s390/net/qeth_tso.h ($Revision: 1.5 $) 2 * linux/drivers/s390/net/qeth_tso.h ($Revision: 1.7 $)
3 * 3 *
4 * Header file for qeth TCP Segmentation Offload support. 4 * Header file for qeth TCP Segmentation Offload support.
5 * 5 *
@@ -7,97 +7,148 @@
7 * 7 *
8 * Author(s): Frank Pavlic <pavlic@de.ibm.com> 8 * Author(s): Frank Pavlic <pavlic@de.ibm.com>
9 * 9 *
10 * $Revision: 1.5 $ $Date: 2005/04/01 21:40:41 $ 10 * $Revision: 1.7 $ $Date: 2005/05/04 20:19:18 $
11 * 11 *
12 */ 12 */
13#ifndef __QETH_TSO_H__ 13#ifndef __QETH_TSO_H__
14#define __QETH_TSO_H__ 14#define __QETH_TSO_H__
15 15
16#include <linux/skbuff.h>
17#include <linux/tcp.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <net/ip6_checksum.h>
21#include "qeth.h"
22#include "qeth_mpc.h"
16 23
17extern int
18qeth_tso_send_packet(struct qeth_card *, struct sk_buff *,
19 struct qeth_qdio_out_q *, int , int);
20 24
21struct qeth_hdr_ext_tso { 25static inline struct qeth_hdr_tso *
22 __u16 hdr_tot_len; 26qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb)
23 __u8 imb_hdr_no; 27{
24 __u8 reserved; 28 QETH_DBF_TEXT(trace, 5, "tsoprsk");
25 __u8 hdr_type; 29 return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_tso));
26 __u8 hdr_version; 30}
27 __u16 hdr_len; 31
28 __u32 payload_len; 32/**
29 __u16 mss; 33 * fill header for a TSO packet
30 __u16 dg_hdr_len; 34 */
31 __u8 padding[16]; 35static inline void
32} __attribute__ ((packed)); 36qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb)
37{
38 struct qeth_hdr_tso *hdr;
39 struct tcphdr *tcph;
40 struct iphdr *iph;
33 41
34struct qeth_hdr_tso { 42 QETH_DBF_TEXT(trace, 5, "tsofhdr");
35 struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/ 43
36 struct qeth_hdr_ext_tso ext; 44 hdr = (struct qeth_hdr_tso *) skb->data;
37} __attribute__ ((packed)); 45 iph = skb->nh.iph;
46 tcph = skb->h.th;
47 /*fix header to TSO values ...*/
48 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
49 /*set values which are fix for the first approach ...*/
50 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
51 hdr->ext.imb_hdr_no = 1;
52 hdr->ext.hdr_type = 1;
53 hdr->ext.hdr_version = 1;
54 hdr->ext.hdr_len = 28;
55 /*insert non-fix values */
56 hdr->ext.mss = skb_shinfo(skb)->tso_size;
57 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
58 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
59 sizeof(struct qeth_hdr_tso));
60}
61
62/**
63 * change some header values as requested by hardware
64 */
65static inline void
66qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb)
67{
68 struct iphdr *iph;
69 struct ipv6hdr *ip6h;
70 struct tcphdr *tcph;
71
72 iph = skb->nh.iph;
73 ip6h = skb->nh.ipv6h;
74 tcph = skb->h.th;
75
76 tcph->check = 0;
77 if (skb->protocol == ETH_P_IPV6) {
78 ip6h->payload_len = 0;
79 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
80 0, IPPROTO_TCP, 0);
81 return;
82 }
83 /*OSA want us to set these values ...*/
84 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
85 0, IPPROTO_TCP, 0);
86 iph->tot_len = 0;
87 iph->check = 0;
88}
38 89
39/*some helper functions*/
40static inline int 90static inline int
41qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb) 91qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb,
92 int ipv, int cast_type)
42{ 93{
43 int elements_needed = 0; 94 struct qeth_hdr_tso *hdr;
44 95
45 if (skb_shinfo(skb)->nr_frags > 0) 96 QETH_DBF_TEXT(trace, 5, "tsoprep");
46 elements_needed = (skb_shinfo(skb)->nr_frags + 1); 97
47 if (elements_needed == 0 ) 98 hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb);
48 elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) 99 if (hdr == NULL) {
49 + skb->len) >> PAGE_SHIFT); 100 QETH_DBF_TEXT(trace, 4, "tsoperr");
50 if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){ 101 return -ENOMEM;
51 PRINT_ERR("qeth_do_send_packet: invalid size of " 102 }
52 "IP packet. Discarded."); 103 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
53 return 0; 104 /*fill first 32 bytes of qdio header as used
54 } 105 *FIXME: TSO has two struct members
55 return elements_needed; 106 * with different names but same size
107 * */
108 qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type);
109 qeth_tso_fill_header(card, skb);
110 qeth_tso_set_tcpip_header(card, skb);
111 return 0;
56} 112}
57 113
58static inline void 114static inline void
59__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer, 115__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer,
60 int is_tso, int *next_element_to_fill) 116 int is_tso, int *next_element_to_fill)
61{ 117{
62 int length = skb->len;
63 struct skb_frag_struct *frag; 118 struct skb_frag_struct *frag;
64 int fragno; 119 int fragno;
65 unsigned long addr; 120 unsigned long addr;
66 int element; 121 int element, cnt, dlen;
67 int first_lap = 1; 122
68 123 fragno = skb_shinfo(skb)->nr_frags;
69 fragno = skb_shinfo(skb)->nr_frags; /* start with last frag */ 124 element = *next_element_to_fill;
70 element = *next_element_to_fill + fragno; 125 dlen = 0;
71 while (length > 0) { 126
72 if (fragno > 0) { 127 if (is_tso)
73 frag = &skb_shinfo(skb)->frags[fragno - 1]; 128 buffer->element[element].flags =
74 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) + 129 SBAL_FLAGS_MIDDLE_FRAG;
75 frag->page_offset; 130 else
76 buffer->element[element].addr = (char *)addr; 131 buffer->element[element].flags =
77 buffer->element[element].length = frag->size; 132 SBAL_FLAGS_FIRST_FRAG;
78 length -= frag->size; 133 if ( (dlen = (skb->len - skb->data_len)) ) {
79 if (first_lap) 134 buffer->element[element].addr = skb->data;
80 buffer->element[element].flags = 135 buffer->element[element].length = dlen;
81 SBAL_FLAGS_LAST_FRAG; 136 element++;
82 else
83 buffer->element[element].flags =
84 SBAL_FLAGS_MIDDLE_FRAG;
85 } else {
86 buffer->element[element].addr = skb->data;
87 buffer->element[element].length = length;
88 length = 0;
89 if (is_tso)
90 buffer->element[element].flags =
91 SBAL_FLAGS_MIDDLE_FRAG;
92 else
93 buffer->element[element].flags =
94 SBAL_FLAGS_FIRST_FRAG;
95 }
96 element--;
97 fragno--;
98 first_lap = 0;
99 } 137 }
100 *next_element_to_fill += skb_shinfo(skb)->nr_frags + 1; 138 for (cnt = 0; cnt < fragno; cnt++) {
139 frag = &skb_shinfo(skb)->frags[cnt];
140 addr = (page_to_pfn(frag->page) << PAGE_SHIFT) +
141 frag->page_offset;
142 buffer->element[element].addr = (char *)addr;
143 buffer->element[element].length = frag->size;
144 if (cnt < (fragno - 1))
145 buffer->element[element].flags =
146 SBAL_FLAGS_MIDDLE_FRAG;
147 else
148 buffer->element[element].flags =
149 SBAL_FLAGS_LAST_FRAG;
150 element++;
151 }
152 *next_element_to_fill = element;
101} 153}
102
103#endif /* __QETH_TSO_H__ */ 154#endif /* __QETH_TSO_H__ */