aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/net/qeth_eddp.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2006-06-22 05:40:14 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-23 05:07:29 -0400
commit7967168cefdbc63bf332d6b1548eca7cd65ebbcc (patch)
treec45759149ae0acdc89d746e556a0ae278d11776d /drivers/s390/net/qeth_eddp.c
parentd4828d85d188dc70ed172802e798d3978bb6e29e (diff)
[NET]: Merge TSO/UFO fields in sk_buff
Having separate fields in sk_buff for TSO/UFO (tso_size/ufo_size) is not going to scale if we add any more segmentation methods (e.g., DCCP). So let's merge them. They were used to tell the protocol of a packet. This function has been subsumed by the new gso_type field. This is essentially a set of netdev feature bits (shifted by 16 bits) that are required to process a specific skb. As such it's easy to tell whether a given device can process a GSO skb: you just have to and the gso_type field and the netdev's features field. I've made gso_type a conjunction. The idea is that you have a base type (e.g., SKB_GSO_TCPV4) that can be modified further to support new features. For example, if we add a hardware TSO type that supports ECN, they would declare NETIF_F_TSO | NETIF_F_TSO_ECN. All TSO packets with CWR set would have a gso_type of SKB_GSO_TCPV4 | SKB_GSO_TCPV4_ECN while all other TSO packets would be SKB_GSO_TCPV4. This means that only the CWR packets need to be emulated in software. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/s390/net/qeth_eddp.c')
-rw-r--r--drivers/s390/net/qeth_eddp.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 0bab60a20309..38aad8321456 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -420,7 +420,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
420 } 420 }
421 tcph = eddp->skb->h.th; 421 tcph = eddp->skb->h.th;
422 while (eddp->skb_offset < eddp->skb->len) { 422 while (eddp->skb_offset < eddp->skb->len) {
423 data_len = min((int)skb_shinfo(eddp->skb)->tso_size, 423 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
424 (int)(eddp->skb->len - eddp->skb_offset)); 424 (int)(eddp->skb->len - eddp->skb_offset));
425 /* prepare qdio hdr */ 425 /* prepare qdio hdr */
426 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ 426 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
@@ -515,20 +515,20 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
515 515
516 QETH_DBF_TEXT(trace, 5, "eddpcanp"); 516 QETH_DBF_TEXT(trace, 5, "eddpcanp");
517 /* can we put multiple skbs in one page? */ 517 /* can we put multiple skbs in one page? */
518 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len); 518 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
519 if (skbs_per_page > 1){ 519 if (skbs_per_page > 1){
520 ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) / 520 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
521 skbs_per_page + 1; 521 skbs_per_page + 1;
522 ctx->elements_per_skb = 1; 522 ctx->elements_per_skb = 1;
523 } else { 523 } else {
524 /* no -> how many elements per skb? */ 524 /* no -> how many elements per skb? */
525 ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len + 525 ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
526 PAGE_SIZE) >> PAGE_SHIFT; 526 PAGE_SIZE) >> PAGE_SHIFT;
527 ctx->num_pages = ctx->elements_per_skb * 527 ctx->num_pages = ctx->elements_per_skb *
528 (skb_shinfo(skb)->tso_segs + 1); 528 (skb_shinfo(skb)->gso_segs + 1);
529 } 529 }
530 ctx->num_elements = ctx->elements_per_skb * 530 ctx->num_elements = ctx->elements_per_skb *
531 (skb_shinfo(skb)->tso_segs + 1); 531 (skb_shinfo(skb)->gso_segs + 1);
532} 532}
533 533
534static inline struct qeth_eddp_context * 534static inline struct qeth_eddp_context *