diff options
author | Frank Blaschka <frank.blaschka@de.ibm.com> | 2009-03-24 16:57:16 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-03-25 03:06:21 -0400 |
commit | 64ef8957986f6a04f61e7c95fa6ffeb3a86a6661 (patch) | |
tree | e24cd2b2fba794c83e297774b5500adfbf65f734 /drivers | |
parent | f61a0d0538ca62547a127fd270d9f3c6e713027f (diff) |
qeth: remove EDDP
Performance measurements showed EDDP does not lower CPU costs but increase
them. So we dump out EDDP code from qeth driver.
Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/s390/net/Makefile | 2 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core.h | 7 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_main.c | 99 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_offl.c | 699 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_offl.h | 76 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_sys.c | 4 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l2_main.c | 81 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_main.c | 123 |
8 files changed, 111 insertions, 980 deletions
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index 6382c04d2bdf..96eddb3b1d08 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile | |||
@@ -8,7 +8,7 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o | |||
8 | obj-$(CONFIG_SMSGIUCV) += smsgiucv.o | 8 | obj-$(CONFIG_SMSGIUCV) += smsgiucv.o |
9 | obj-$(CONFIG_LCS) += lcs.o cu3088.o | 9 | obj-$(CONFIG_LCS) += lcs.o cu3088.o |
10 | obj-$(CONFIG_CLAW) += claw.o cu3088.o | 10 | obj-$(CONFIG_CLAW) += claw.o cu3088.o |
11 | qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_core_offl.o | 11 | qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o |
12 | obj-$(CONFIG_QETH) += qeth.o | 12 | obj-$(CONFIG_QETH) += qeth.o |
13 | qeth_l2-y += qeth_l2_main.o | 13 | qeth_l2-y += qeth_l2_main.o |
14 | obj-$(CONFIG_QETH_L2) += qeth_l2.o | 14 | obj-$(CONFIG_QETH_L2) += qeth_l2.o |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index fd34f63dc232..447e1d19581a 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -404,7 +404,6 @@ struct qeth_qdio_q { | |||
404 | /* possible types of qeth large_send support */ | 404 | /* possible types of qeth large_send support */ |
405 | enum qeth_large_send_types { | 405 | enum qeth_large_send_types { |
406 | QETH_LARGE_SEND_NO, | 406 | QETH_LARGE_SEND_NO, |
407 | QETH_LARGE_SEND_EDDP, | ||
408 | QETH_LARGE_SEND_TSO, | 407 | QETH_LARGE_SEND_TSO, |
409 | }; | 408 | }; |
410 | 409 | ||
@@ -839,11 +838,9 @@ int qeth_get_cast_type(struct qeth_card *, struct sk_buff *); | |||
839 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); | 838 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); |
840 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); | 839 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); |
841 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, | 840 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, |
842 | struct sk_buff *, struct qeth_hdr *, int, | 841 | struct sk_buff *, struct qeth_hdr *, int, int, int); |
843 | struct qeth_eddp_context *, int, int); | ||
844 | int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, | 842 | int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, |
845 | struct sk_buff *, struct qeth_hdr *, | 843 | struct sk_buff *, struct qeth_hdr *, int); |
846 | int, struct qeth_eddp_context *); | ||
847 | int qeth_core_get_stats_count(struct net_device *); | 844 | int qeth_core_get_stats_count(struct net_device *); |
848 | void qeth_core_get_ethtool_stats(struct net_device *, | 845 | void qeth_core_get_ethtool_stats(struct net_device *, |
849 | struct ethtool_stats *, u64 *); | 846 | struct ethtool_stats *, u64 *); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 1a361b3bf62a..1b7b08e791a1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/ip.h> | 19 | #include <linux/ip.h> |
20 | #include <linux/ipv6.h> | ||
21 | #include <linux/tcp.h> | 20 | #include <linux/tcp.h> |
22 | #include <linux/mii.h> | 21 | #include <linux/mii.h> |
23 | #include <linux/kthread.h> | 22 | #include <linux/kthread.h> |
@@ -26,7 +25,6 @@ | |||
26 | #include <asm/io.h> | 25 | #include <asm/io.h> |
27 | 26 | ||
28 | #include "qeth_core.h" | 27 | #include "qeth_core.h" |
29 | #include "qeth_core_offl.h" | ||
30 | 28 | ||
31 | struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { | 29 | struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { |
32 | /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ | 30 | /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ |
@@ -285,17 +283,6 @@ int qeth_set_large_send(struct qeth_card *card, | |||
285 | netif_tx_disable(card->dev); | 283 | netif_tx_disable(card->dev); |
286 | card->options.large_send = type; | 284 | card->options.large_send = type; |
287 | switch (card->options.large_send) { | 285 | switch (card->options.large_send) { |
288 | case QETH_LARGE_SEND_EDDP: | ||
289 | if (card->info.type != QETH_CARD_TYPE_IQD) { | ||
290 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG | | ||
291 | NETIF_F_HW_CSUM; | ||
292 | } else { | ||
293 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | | ||
294 | NETIF_F_HW_CSUM); | ||
295 | card->options.large_send = QETH_LARGE_SEND_NO; | ||
296 | rc = -EOPNOTSUPP; | ||
297 | } | ||
298 | break; | ||
299 | case QETH_LARGE_SEND_TSO: | 286 | case QETH_LARGE_SEND_TSO: |
300 | if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { | 287 | if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { |
301 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG | | 288 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG | |
@@ -956,7 +943,6 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | |||
956 | dev_kfree_skb_any(skb); | 943 | dev_kfree_skb_any(skb); |
957 | skb = skb_dequeue(&buf->skb_list); | 944 | skb = skb_dequeue(&buf->skb_list); |
958 | } | 945 | } |
959 | qeth_eddp_buf_release_contexts(buf); | ||
960 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { | 946 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { |
961 | if (buf->buffer->element[i].addr && buf->is_header[i]) | 947 | if (buf->buffer->element[i].addr && buf->is_header[i]) |
962 | kmem_cache_free(qeth_core_header_cache, | 948 | kmem_cache_free(qeth_core_header_cache, |
@@ -3187,11 +3173,9 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, | |||
3187 | int qeth_do_send_packet_fast(struct qeth_card *card, | 3173 | int qeth_do_send_packet_fast(struct qeth_card *card, |
3188 | struct qeth_qdio_out_q *queue, struct sk_buff *skb, | 3174 | struct qeth_qdio_out_q *queue, struct sk_buff *skb, |
3189 | struct qeth_hdr *hdr, int elements_needed, | 3175 | struct qeth_hdr *hdr, int elements_needed, |
3190 | struct qeth_eddp_context *ctx, int offset, int hd_len) | 3176 | int offset, int hd_len) |
3191 | { | 3177 | { |
3192 | struct qeth_qdio_out_buffer *buffer; | 3178 | struct qeth_qdio_out_buffer *buffer; |
3193 | int buffers_needed = 0; | ||
3194 | int flush_cnt = 0; | ||
3195 | int index; | 3179 | int index; |
3196 | 3180 | ||
3197 | /* spin until we get the queue ... */ | 3181 | /* spin until we get the queue ... */ |
@@ -3206,27 +3190,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card, | |||
3206 | */ | 3190 | */ |
3207 | if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) | 3191 | if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) |
3208 | goto out; | 3192 | goto out; |
3209 | if (ctx == NULL) | 3193 | queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % |
3210 | queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % | ||
3211 | QDIO_MAX_BUFFERS_PER_Q; | 3194 | QDIO_MAX_BUFFERS_PER_Q; |
3212 | else { | ||
3213 | buffers_needed = qeth_eddp_check_buffers_for_context(queue, | ||
3214 | ctx); | ||
3215 | if (buffers_needed < 0) | ||
3216 | goto out; | ||
3217 | queue->next_buf_to_fill = | ||
3218 | (queue->next_buf_to_fill + buffers_needed) % | ||
3219 | QDIO_MAX_BUFFERS_PER_Q; | ||
3220 | } | ||
3221 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); | 3195 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); |
3222 | if (ctx == NULL) { | 3196 | qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); |
3223 | qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); | 3197 | qeth_flush_buffers(queue, index, 1); |
3224 | qeth_flush_buffers(queue, index, 1); | ||
3225 | } else { | ||
3226 | flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index); | ||
3227 | WARN_ON(buffers_needed != flush_cnt); | ||
3228 | qeth_flush_buffers(queue, index, flush_cnt); | ||
3229 | } | ||
3230 | return 0; | 3198 | return 0; |
3231 | out: | 3199 | out: |
3232 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); | 3200 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); |
@@ -3236,7 +3204,7 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); | |||
3236 | 3204 | ||
3237 | int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | 3205 | int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, |
3238 | struct sk_buff *skb, struct qeth_hdr *hdr, | 3206 | struct sk_buff *skb, struct qeth_hdr *hdr, |
3239 | int elements_needed, struct qeth_eddp_context *ctx) | 3207 | int elements_needed) |
3240 | { | 3208 | { |
3241 | struct qeth_qdio_out_buffer *buffer; | 3209 | struct qeth_qdio_out_buffer *buffer; |
3242 | int start_index; | 3210 | int start_index; |
@@ -3262,53 +3230,32 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
3262 | qeth_switch_to_packing_if_needed(queue); | 3230 | qeth_switch_to_packing_if_needed(queue); |
3263 | if (queue->do_pack) { | 3231 | if (queue->do_pack) { |
3264 | do_pack = 1; | 3232 | do_pack = 1; |
3265 | if (ctx == NULL) { | 3233 | /* does packet fit in current buffer? */ |
3266 | /* does packet fit in current buffer? */ | 3234 | if ((QETH_MAX_BUFFER_ELEMENTS(card) - |
3267 | if ((QETH_MAX_BUFFER_ELEMENTS(card) - | 3235 | buffer->next_element_to_fill) < elements_needed) { |
3268 | buffer->next_element_to_fill) < elements_needed) { | 3236 | /* ... no -> set state PRIMED */ |
3269 | /* ... no -> set state PRIMED */ | 3237 | atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); |
3270 | atomic_set(&buffer->state, | 3238 | flush_count++; |
3271 | QETH_QDIO_BUF_PRIMED); | 3239 | queue->next_buf_to_fill = |
3272 | flush_count++; | 3240 | (queue->next_buf_to_fill + 1) % |
3273 | queue->next_buf_to_fill = | 3241 | QDIO_MAX_BUFFERS_PER_Q; |
3274 | (queue->next_buf_to_fill + 1) % | 3242 | buffer = &queue->bufs[queue->next_buf_to_fill]; |
3275 | QDIO_MAX_BUFFERS_PER_Q; | 3243 | /* we did a step forward, so check buffer state |
3276 | buffer = &queue->bufs[queue->next_buf_to_fill]; | 3244 | * again */ |
3277 | /* we did a step forward, so check buffer state | 3245 | if (atomic_read(&buffer->state) != |
3278 | * again */ | 3246 | QETH_QDIO_BUF_EMPTY) { |
3279 | if (atomic_read(&buffer->state) != | 3247 | qeth_flush_buffers(queue, start_index, |
3280 | QETH_QDIO_BUF_EMPTY){ | ||
3281 | qeth_flush_buffers(queue, start_index, | ||
3282 | flush_count); | 3248 | flush_count); |
3283 | atomic_set(&queue->state, | 3249 | atomic_set(&queue->state, |
3284 | QETH_OUT_Q_UNLOCKED); | 3250 | QETH_OUT_Q_UNLOCKED); |
3285 | return -EBUSY; | 3251 | return -EBUSY; |
3286 | } | ||
3287 | } | ||
3288 | } else { | ||
3289 | /* check if we have enough elements (including following | ||
3290 | * free buffers) to handle eddp context */ | ||
3291 | if (qeth_eddp_check_buffers_for_context(queue, ctx) | ||
3292 | < 0) { | ||
3293 | rc = -EBUSY; | ||
3294 | goto out; | ||
3295 | } | 3252 | } |
3296 | } | 3253 | } |
3297 | } | 3254 | } |
3298 | if (ctx == NULL) | 3255 | tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); |
3299 | tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); | ||
3300 | else { | ||
3301 | tmp = qeth_eddp_fill_buffer(queue, ctx, | ||
3302 | queue->next_buf_to_fill); | ||
3303 | if (tmp < 0) { | ||
3304 | rc = -EBUSY; | ||
3305 | goto out; | ||
3306 | } | ||
3307 | } | ||
3308 | queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % | 3256 | queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % |
3309 | QDIO_MAX_BUFFERS_PER_Q; | 3257 | QDIO_MAX_BUFFERS_PER_Q; |
3310 | flush_count += tmp; | 3258 | flush_count += tmp; |
3311 | out: | ||
3312 | if (flush_count) | 3259 | if (flush_count) |
3313 | qeth_flush_buffers(queue, start_index, flush_count); | 3260 | qeth_flush_buffers(queue, start_index, flush_count); |
3314 | else if (!atomic_read(&queue->set_pci_flags_count)) | 3261 | else if (!atomic_read(&queue->set_pci_flags_count)) |
diff --git a/drivers/s390/net/qeth_core_offl.c b/drivers/s390/net/qeth_core_offl.c index 4080126ca48c..e69de29bb2d1 100644 --- a/drivers/s390/net/qeth_core_offl.c +++ b/drivers/s390/net/qeth_core_offl.c | |||
@@ -1,699 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/s390/net/qeth_core_offl.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Thomas Spatzier <tspat@de.ibm.com>, | ||
6 | * Frank Blaschka <frank.blaschka@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/errno.h> | ||
10 | #include <linux/ip.h> | ||
11 | #include <linux/inetdevice.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/tcp.h> | ||
15 | #include <net/tcp.h> | ||
16 | #include <linux/skbuff.h> | ||
17 | |||
18 | #include <net/ip.h> | ||
19 | #include <net/ip6_checksum.h> | ||
20 | |||
21 | #include "qeth_core.h" | ||
22 | #include "qeth_core_mpc.h" | ||
23 | #include "qeth_core_offl.h" | ||
24 | |||
25 | int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, | ||
26 | struct qeth_eddp_context *ctx) | ||
27 | { | ||
28 | int index = queue->next_buf_to_fill; | ||
29 | int elements_needed = ctx->num_elements; | ||
30 | int elements_in_buffer; | ||
31 | int skbs_in_buffer; | ||
32 | int buffers_needed = 0; | ||
33 | |||
34 | QETH_DBF_TEXT(TRACE, 5, "eddpcbfc"); | ||
35 | while (elements_needed > 0) { | ||
36 | buffers_needed++; | ||
37 | if (atomic_read(&queue->bufs[index].state) != | ||
38 | QETH_QDIO_BUF_EMPTY) | ||
39 | return -EBUSY; | ||
40 | |||
41 | elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) - | ||
42 | queue->bufs[index].next_element_to_fill; | ||
43 | skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb; | ||
44 | elements_needed -= skbs_in_buffer * ctx->elements_per_skb; | ||
45 | index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; | ||
46 | } | ||
47 | return buffers_needed; | ||
48 | } | ||
49 | |||
50 | static void qeth_eddp_free_context(struct qeth_eddp_context *ctx) | ||
51 | { | ||
52 | int i; | ||
53 | |||
54 | QETH_DBF_TEXT(TRACE, 5, "eddpfctx"); | ||
55 | for (i = 0; i < ctx->num_pages; ++i) | ||
56 | free_page((unsigned long)ctx->pages[i]); | ||
57 | kfree(ctx->pages); | ||
58 | kfree(ctx->elements); | ||
59 | kfree(ctx); | ||
60 | } | ||
61 | |||
62 | |||
63 | static void qeth_eddp_get_context(struct qeth_eddp_context *ctx) | ||
64 | { | ||
65 | atomic_inc(&ctx->refcnt); | ||
66 | } | ||
67 | |||
68 | void qeth_eddp_put_context(struct qeth_eddp_context *ctx) | ||
69 | { | ||
70 | if (atomic_dec_return(&ctx->refcnt) == 0) | ||
71 | qeth_eddp_free_context(ctx); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(qeth_eddp_put_context); | ||
74 | |||
75 | void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) | ||
76 | { | ||
77 | struct qeth_eddp_context_reference *ref; | ||
78 | |||
79 | QETH_DBF_TEXT(TRACE, 6, "eddprctx"); | ||
80 | while (!list_empty(&buf->ctx_list)) { | ||
81 | ref = list_entry(buf->ctx_list.next, | ||
82 | struct qeth_eddp_context_reference, list); | ||
83 | qeth_eddp_put_context(ref->ctx); | ||
84 | list_del(&ref->list); | ||
85 | kfree(ref); | ||
86 | } | ||
87 | } | ||
88 | |||
89 | static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, | ||
90 | struct qeth_eddp_context *ctx) | ||
91 | { | ||
92 | struct qeth_eddp_context_reference *ref; | ||
93 | |||
94 | QETH_DBF_TEXT(TRACE, 6, "eddprfcx"); | ||
95 | ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC); | ||
96 | if (ref == NULL) | ||
97 | return -ENOMEM; | ||
98 | qeth_eddp_get_context(ctx); | ||
99 | ref->ctx = ctx; | ||
100 | list_add_tail(&ref->list, &buf->ctx_list); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, | ||
105 | struct qeth_eddp_context *ctx, int index) | ||
106 | { | ||
107 | struct qeth_qdio_out_buffer *buf = NULL; | ||
108 | struct qdio_buffer *buffer; | ||
109 | int elements = ctx->num_elements; | ||
110 | int element = 0; | ||
111 | int flush_cnt = 0; | ||
112 | int must_refcnt = 1; | ||
113 | int i; | ||
114 | |||
115 | QETH_DBF_TEXT(TRACE, 5, "eddpfibu"); | ||
116 | while (elements > 0) { | ||
117 | buf = &queue->bufs[index]; | ||
118 | if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) { | ||
119 | /* normally this should not happen since we checked for | ||
120 | * available elements in qeth_check_elements_for_context | ||
121 | */ | ||
122 | if (element == 0) | ||
123 | return -EBUSY; | ||
124 | else { | ||
125 | QETH_DBF_MESSAGE(2, "could only partially fill" | ||
126 | "eddp buffer!\n"); | ||
127 | goto out; | ||
128 | } | ||
129 | } | ||
130 | /* check if the whole next skb fits into current buffer */ | ||
131 | if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) - | ||
132 | buf->next_element_to_fill) | ||
133 | < ctx->elements_per_skb){ | ||
134 | /* no -> go to next buffer */ | ||
135 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); | ||
136 | index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; | ||
137 | flush_cnt++; | ||
138 | /* new buffer, so we have to add ctx to buffer'ctx_list | ||
139 | * and increment ctx's refcnt */ | ||
140 | must_refcnt = 1; | ||
141 | continue; | ||
142 | } | ||
143 | if (must_refcnt) { | ||
144 | must_refcnt = 0; | ||
145 | if (qeth_eddp_buf_ref_context(buf, ctx)) { | ||
146 | goto out_check; | ||
147 | } | ||
148 | } | ||
149 | buffer = buf->buffer; | ||
150 | /* fill one skb into buffer */ | ||
151 | for (i = 0; i < ctx->elements_per_skb; ++i) { | ||
152 | if (ctx->elements[element].length != 0) { | ||
153 | buffer->element[buf->next_element_to_fill]. | ||
154 | addr = ctx->elements[element].addr; | ||
155 | buffer->element[buf->next_element_to_fill]. | ||
156 | length = ctx->elements[element].length; | ||
157 | buffer->element[buf->next_element_to_fill]. | ||
158 | flags = ctx->elements[element].flags; | ||
159 | buf->next_element_to_fill++; | ||
160 | } | ||
161 | element++; | ||
162 | elements--; | ||
163 | } | ||
164 | } | ||
165 | out_check: | ||
166 | if (!queue->do_pack) { | ||
167 | QETH_DBF_TEXT(TRACE, 6, "fillbfnp"); | ||
168 | /* set state to PRIMED -> will be flushed */ | ||
169 | if (buf->next_element_to_fill > 0) { | ||
170 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); | ||
171 | flush_cnt++; | ||
172 | } | ||
173 | } else { | ||
174 | if (queue->card->options.performance_stats) | ||
175 | queue->card->perf_stats.skbs_sent_pack++; | ||
176 | QETH_DBF_TEXT(TRACE, 6, "fillbfpa"); | ||
177 | if (buf->next_element_to_fill >= | ||
178 | QETH_MAX_BUFFER_ELEMENTS(queue->card)) { | ||
179 | /* | ||
180 | * packed buffer if full -> set state PRIMED | ||
181 | * -> will be flushed | ||
182 | */ | ||
183 | atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); | ||
184 | flush_cnt++; | ||
185 | } | ||
186 | } | ||
187 | out: | ||
188 | return flush_cnt; | ||
189 | } | ||
190 | |||
191 | static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, | ||
192 | struct qeth_eddp_data *eddp, int data_len) | ||
193 | { | ||
194 | u8 *page; | ||
195 | int page_remainder; | ||
196 | int page_offset; | ||
197 | int pkt_len; | ||
198 | struct qeth_eddp_element *element; | ||
199 | |||
200 | QETH_DBF_TEXT(TRACE, 5, "eddpcrsh"); | ||
201 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; | ||
202 | page_offset = ctx->offset % PAGE_SIZE; | ||
203 | element = &ctx->elements[ctx->num_elements]; | ||
204 | pkt_len = eddp->nhl + eddp->thl + data_len; | ||
205 | /* FIXME: layer2 and VLAN !!! */ | ||
206 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) | ||
207 | pkt_len += ETH_HLEN; | ||
208 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) | ||
209 | pkt_len += VLAN_HLEN; | ||
210 | /* does complete packet fit in current page ? */ | ||
211 | page_remainder = PAGE_SIZE - page_offset; | ||
212 | if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) { | ||
213 | /* no -> go to start of next page */ | ||
214 | ctx->offset += page_remainder; | ||
215 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; | ||
216 | page_offset = 0; | ||
217 | } | ||
218 | memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr)); | ||
219 | element->addr = page + page_offset; | ||
220 | element->length = sizeof(struct qeth_hdr); | ||
221 | ctx->offset += sizeof(struct qeth_hdr); | ||
222 | page_offset += sizeof(struct qeth_hdr); | ||
223 | /* add mac header (?) */ | ||
224 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | ||
225 | memcpy(page + page_offset, &eddp->mac, ETH_HLEN); | ||
226 | element->length += ETH_HLEN; | ||
227 | ctx->offset += ETH_HLEN; | ||
228 | page_offset += ETH_HLEN; | ||
229 | } | ||
230 | /* add VLAN tag */ | ||
231 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { | ||
232 | memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN); | ||
233 | element->length += VLAN_HLEN; | ||
234 | ctx->offset += VLAN_HLEN; | ||
235 | page_offset += VLAN_HLEN; | ||
236 | } | ||
237 | /* add network header */ | ||
238 | memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl); | ||
239 | element->length += eddp->nhl; | ||
240 | eddp->nh_in_ctx = page + page_offset; | ||
241 | ctx->offset += eddp->nhl; | ||
242 | page_offset += eddp->nhl; | ||
243 | /* add transport header */ | ||
244 | memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl); | ||
245 | element->length += eddp->thl; | ||
246 | eddp->th_in_ctx = page + page_offset; | ||
247 | ctx->offset += eddp->thl; | ||
248 | } | ||
249 | |||
250 | static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, | ||
251 | int len, __wsum *hcsum) | ||
252 | { | ||
253 | struct skb_frag_struct *frag; | ||
254 | int left_in_frag; | ||
255 | int copy_len; | ||
256 | u8 *src; | ||
257 | |||
258 | QETH_DBF_TEXT(TRACE, 5, "eddpcdtc"); | ||
259 | if (skb_shinfo(eddp->skb)->nr_frags == 0) { | ||
260 | skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset, | ||
261 | dst, len); | ||
262 | *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, | ||
263 | *hcsum); | ||
264 | eddp->skb_offset += len; | ||
265 | } else { | ||
266 | while (len > 0) { | ||
267 | if (eddp->frag < 0) { | ||
268 | /* we're in skb->data */ | ||
269 | left_in_frag = (eddp->skb->len - | ||
270 | eddp->skb->data_len) | ||
271 | - eddp->skb_offset; | ||
272 | src = eddp->skb->data + eddp->skb_offset; | ||
273 | } else { | ||
274 | frag = &skb_shinfo(eddp->skb)->frags[ | ||
275 | eddp->frag]; | ||
276 | left_in_frag = frag->size - eddp->frag_offset; | ||
277 | src = (u8 *)((page_to_pfn(frag->page) << | ||
278 | PAGE_SHIFT) + frag->page_offset + | ||
279 | eddp->frag_offset); | ||
280 | } | ||
281 | if (left_in_frag <= 0) { | ||
282 | eddp->frag++; | ||
283 | eddp->frag_offset = 0; | ||
284 | continue; | ||
285 | } | ||
286 | copy_len = min(left_in_frag, len); | ||
287 | memcpy(dst, src, copy_len); | ||
288 | *hcsum = csum_partial(src, copy_len, *hcsum); | ||
289 | dst += copy_len; | ||
290 | eddp->frag_offset += copy_len; | ||
291 | eddp->skb_offset += copy_len; | ||
292 | len -= copy_len; | ||
293 | } | ||
294 | } | ||
295 | } | ||
296 | |||
297 | static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, | ||
298 | struct qeth_eddp_data *eddp, int data_len, __wsum hcsum) | ||
299 | { | ||
300 | u8 *page; | ||
301 | int page_remainder; | ||
302 | int page_offset; | ||
303 | struct qeth_eddp_element *element; | ||
304 | int first_lap = 1; | ||
305 | |||
306 | QETH_DBF_TEXT(TRACE, 5, "eddpcsdt"); | ||
307 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; | ||
308 | page_offset = ctx->offset % PAGE_SIZE; | ||
309 | element = &ctx->elements[ctx->num_elements]; | ||
310 | while (data_len) { | ||
311 | page_remainder = PAGE_SIZE - page_offset; | ||
312 | if (page_remainder < data_len) { | ||
313 | qeth_eddp_copy_data_tcp(page + page_offset, eddp, | ||
314 | page_remainder, &hcsum); | ||
315 | element->length += page_remainder; | ||
316 | if (first_lap) | ||
317 | element->flags = SBAL_FLAGS_FIRST_FRAG; | ||
318 | else | ||
319 | element->flags = SBAL_FLAGS_MIDDLE_FRAG; | ||
320 | ctx->num_elements++; | ||
321 | element++; | ||
322 | data_len -= page_remainder; | ||
323 | ctx->offset += page_remainder; | ||
324 | page = ctx->pages[ctx->offset >> PAGE_SHIFT]; | ||
325 | page_offset = 0; | ||
326 | element->addr = page + page_offset; | ||
327 | } else { | ||
328 | qeth_eddp_copy_data_tcp(page + page_offset, eddp, | ||
329 | data_len, &hcsum); | ||
330 | element->length += data_len; | ||
331 | if (!first_lap) | ||
332 | element->flags = SBAL_FLAGS_LAST_FRAG; | ||
333 | ctx->num_elements++; | ||
334 | ctx->offset += data_len; | ||
335 | data_len = 0; | ||
336 | } | ||
337 | first_lap = 0; | ||
338 | } | ||
339 | ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); | ||
340 | } | ||
341 | |||
342 | static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, | ||
343 | int data_len) | ||
344 | { | ||
345 | __wsum phcsum; /* pseudo header checksum */ | ||
346 | |||
347 | QETH_DBF_TEXT(TRACE, 5, "eddpckt4"); | ||
348 | eddp->th.tcp.h.check = 0; | ||
349 | /* compute pseudo header checksum */ | ||
350 | phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr, | ||
351 | eddp->thl + data_len, IPPROTO_TCP, 0); | ||
352 | /* compute checksum of tcp header */ | ||
353 | return csum_partial(&eddp->th, eddp->thl, phcsum); | ||
354 | } | ||
355 | |||
356 | static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, | ||
357 | int data_len) | ||
358 | { | ||
359 | __be32 proto; | ||
360 | __wsum phcsum; /* pseudo header checksum */ | ||
361 | |||
362 | QETH_DBF_TEXT(TRACE, 5, "eddpckt6"); | ||
363 | eddp->th.tcp.h.check = 0; | ||
364 | /* compute pseudo header checksum */ | ||
365 | phcsum = csum_partial(&eddp->nh.ip6.h.saddr, | ||
366 | sizeof(struct in6_addr), 0); | ||
367 | phcsum = csum_partial(&eddp->nh.ip6.h.daddr, | ||
368 | sizeof(struct in6_addr), phcsum); | ||
369 | proto = htonl(IPPROTO_TCP); | ||
370 | phcsum = csum_partial(&proto, sizeof(u32), phcsum); | ||
371 | return phcsum; | ||
372 | } | ||
373 | |||
374 | static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh, | ||
375 | u8 *nh, u8 nhl, u8 *th, u8 thl) | ||
376 | { | ||
377 | struct qeth_eddp_data *eddp; | ||
378 | |||
379 | QETH_DBF_TEXT(TRACE, 5, "eddpcrda"); | ||
380 | eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); | ||
381 | if (eddp) { | ||
382 | eddp->nhl = nhl; | ||
383 | eddp->thl = thl; | ||
384 | memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr)); | ||
385 | memcpy(&eddp->nh, nh, nhl); | ||
386 | memcpy(&eddp->th, th, thl); | ||
387 | eddp->frag = -1; /* initially we're in skb->data */ | ||
388 | } | ||
389 | return eddp; | ||
390 | } | ||
391 | |||
392 | static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | ||
393 | struct qeth_eddp_data *eddp) | ||
394 | { | ||
395 | struct tcphdr *tcph; | ||
396 | int data_len; | ||
397 | __wsum hcsum; | ||
398 | |||
399 | QETH_DBF_TEXT(TRACE, 5, "eddpftcp"); | ||
400 | eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; | ||
401 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | ||
402 | eddp->skb_offset += sizeof(struct ethhdr); | ||
403 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) | ||
404 | eddp->skb_offset += VLAN_HLEN; | ||
405 | } | ||
406 | tcph = tcp_hdr(eddp->skb); | ||
407 | while (eddp->skb_offset < eddp->skb->len) { | ||
408 | data_len = min((int)skb_shinfo(eddp->skb)->gso_size, | ||
409 | (int)(eddp->skb->len - eddp->skb_offset)); | ||
410 | /* prepare qdio hdr */ | ||
411 | if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | ||
412 | eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN + | ||
413 | eddp->nhl + eddp->thl; | ||
414 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) | ||
415 | eddp->qh.hdr.l2.pkt_length += VLAN_HLEN; | ||
416 | } else | ||
417 | eddp->qh.hdr.l3.length = data_len + eddp->nhl + | ||
418 | eddp->thl; | ||
419 | /* prepare ip hdr */ | ||
420 | if (eddp->skb->protocol == htons(ETH_P_IP)) { | ||
421 | eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl + | ||
422 | eddp->thl); | ||
423 | eddp->nh.ip4.h.check = 0; | ||
424 | eddp->nh.ip4.h.check = | ||
425 | ip_fast_csum((u8 *)&eddp->nh.ip4.h, | ||
426 | eddp->nh.ip4.h.ihl); | ||
427 | } else | ||
428 | eddp->nh.ip6.h.payload_len = htons(data_len + | ||
429 | eddp->thl); | ||
430 | /* prepare tcp hdr */ | ||
431 | if (data_len == (eddp->skb->len - eddp->skb_offset)) { | ||
432 | /* last segment -> set FIN and PSH flags */ | ||
433 | eddp->th.tcp.h.fin = tcph->fin; | ||
434 | eddp->th.tcp.h.psh = tcph->psh; | ||
435 | } | ||
436 | if (eddp->skb->protocol == htons(ETH_P_IP)) | ||
437 | hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len); | ||
438 | else | ||
439 | hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len); | ||
440 | /* fill the next segment into the context */ | ||
441 | qeth_eddp_create_segment_hdrs(ctx, eddp, data_len); | ||
442 | qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum); | ||
443 | if (eddp->skb_offset >= eddp->skb->len) | ||
444 | break; | ||
445 | /* prepare headers for next round */ | ||
446 | if (eddp->skb->protocol == htons(ETH_P_IP)) | ||
447 | eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1); | ||
448 | eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) + | ||
449 | data_len); | ||
450 | } | ||
451 | } | ||
452 | |||
453 | static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | ||
454 | struct sk_buff *skb, struct qeth_hdr *qhdr) | ||
455 | { | ||
456 | struct qeth_eddp_data *eddp = NULL; | ||
457 | |||
458 | QETH_DBF_TEXT(TRACE, 5, "eddpficx"); | ||
459 | /* create our segmentation headers and copy original headers */ | ||
460 | if (skb->protocol == htons(ETH_P_IP)) | ||
461 | eddp = qeth_eddp_create_eddp_data(qhdr, | ||
462 | skb_network_header(skb), | ||
463 | ip_hdrlen(skb), | ||
464 | skb_transport_header(skb), | ||
465 | tcp_hdrlen(skb)); | ||
466 | else | ||
467 | eddp = qeth_eddp_create_eddp_data(qhdr, | ||
468 | skb_network_header(skb), | ||
469 | sizeof(struct ipv6hdr), | ||
470 | skb_transport_header(skb), | ||
471 | tcp_hdrlen(skb)); | ||
472 | |||
473 | if (eddp == NULL) { | ||
474 | QETH_DBF_TEXT(TRACE, 2, "eddpfcnm"); | ||
475 | return -ENOMEM; | ||
476 | } | ||
477 | if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { | ||
478 | skb_set_mac_header(skb, sizeof(struct qeth_hdr)); | ||
479 | memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); | ||
480 | if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { | ||
481 | eddp->vlan[0] = skb->protocol; | ||
482 | eddp->vlan[1] = htons(vlan_tx_tag_get(skb)); | ||
483 | } | ||
484 | } | ||
485 | /* the next flags will only be set on the last segment */ | ||
486 | eddp->th.tcp.h.fin = 0; | ||
487 | eddp->th.tcp.h.psh = 0; | ||
488 | eddp->skb = skb; | ||
489 | /* begin segmentation and fill context */ | ||
490 | __qeth_eddp_fill_context_tcp(ctx, eddp); | ||
491 | kfree(eddp); | ||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, | ||
496 | struct sk_buff *skb, int hdr_len) | ||
497 | { | ||
498 | int skbs_per_page; | ||
499 | |||
500 | QETH_DBF_TEXT(TRACE, 5, "eddpcanp"); | ||
501 | /* can we put multiple skbs in one page? */ | ||
502 | skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len); | ||
503 | if (skbs_per_page > 1) { | ||
504 | ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) / | ||
505 | skbs_per_page + 1; | ||
506 | ctx->elements_per_skb = 1; | ||
507 | } else { | ||
508 | /* no -> how many elements per skb? */ | ||
509 | ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len + | ||
510 | PAGE_SIZE) >> PAGE_SHIFT; | ||
511 | ctx->num_pages = ctx->elements_per_skb * | ||
512 | (skb_shinfo(skb)->gso_segs + 1); | ||
513 | } | ||
514 | ctx->num_elements = ctx->elements_per_skb * | ||
515 | (skb_shinfo(skb)->gso_segs + 1); | ||
516 | } | ||
517 | |||
518 | static struct qeth_eddp_context *qeth_eddp_create_context_generic( | ||
519 | struct qeth_card *card, struct sk_buff *skb, int hdr_len) | ||
520 | { | ||
521 | struct qeth_eddp_context *ctx = NULL; | ||
522 | u8 *addr; | ||
523 | int i; | ||
524 | |||
525 | QETH_DBF_TEXT(TRACE, 5, "creddpcg"); | ||
526 | /* create the context and allocate pages */ | ||
527 | ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); | ||
528 | if (ctx == NULL) { | ||
529 | QETH_DBF_TEXT(TRACE, 2, "ceddpcn1"); | ||
530 | return NULL; | ||
531 | } | ||
532 | ctx->type = QETH_LARGE_SEND_EDDP; | ||
533 | qeth_eddp_calc_num_pages(ctx, skb, hdr_len); | ||
534 | if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) { | ||
535 | QETH_DBF_TEXT(TRACE, 2, "ceddpcis"); | ||
536 | kfree(ctx); | ||
537 | return NULL; | ||
538 | } | ||
539 | ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC); | ||
540 | if (ctx->pages == NULL) { | ||
541 | QETH_DBF_TEXT(TRACE, 2, "ceddpcn2"); | ||
542 | kfree(ctx); | ||
543 | return NULL; | ||
544 | } | ||
545 | for (i = 0; i < ctx->num_pages; ++i) { | ||
546 | addr = (u8 *)get_zeroed_page(GFP_ATOMIC); | ||
547 | if (addr == NULL) { | ||
548 | QETH_DBF_TEXT(TRACE, 2, "ceddpcn3"); | ||
549 | ctx->num_pages = i; | ||
550 | qeth_eddp_free_context(ctx); | ||
551 | return NULL; | ||
552 | } | ||
553 | ctx->pages[i] = addr; | ||
554 | } | ||
555 | ctx->elements = kcalloc(ctx->num_elements, | ||
556 | sizeof(struct qeth_eddp_element), GFP_ATOMIC); | ||
557 | if (ctx->elements == NULL) { | ||
558 | QETH_DBF_TEXT(TRACE, 2, "ceddpcn4"); | ||
559 | qeth_eddp_free_context(ctx); | ||
560 | return NULL; | ||
561 | } | ||
562 | /* reset num_elements; will be incremented again in fill_buffer to | ||
563 | * reflect number of actually used elements */ | ||
564 | ctx->num_elements = 0; | ||
565 | return ctx; | ||
566 | } | ||
567 | |||
568 | static struct qeth_eddp_context *qeth_eddp_create_context_tcp( | ||
569 | struct qeth_card *card, struct sk_buff *skb, | ||
570 | struct qeth_hdr *qhdr) | ||
571 | { | ||
572 | struct qeth_eddp_context *ctx = NULL; | ||
573 | |||
574 | QETH_DBF_TEXT(TRACE, 5, "creddpct"); | ||
575 | if (skb->protocol == htons(ETH_P_IP)) | ||
576 | ctx = qeth_eddp_create_context_generic(card, skb, | ||
577 | (sizeof(struct qeth_hdr) + | ||
578 | ip_hdrlen(skb) + | ||
579 | tcp_hdrlen(skb))); | ||
580 | else if (skb->protocol == htons(ETH_P_IPV6)) | ||
581 | ctx = qeth_eddp_create_context_generic(card, skb, | ||
582 | sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + | ||
583 | tcp_hdrlen(skb)); | ||
584 | else | ||
585 | QETH_DBF_TEXT(TRACE, 2, "cetcpinv"); | ||
586 | |||
587 | if (ctx == NULL) { | ||
588 | QETH_DBF_TEXT(TRACE, 2, "creddpnl"); | ||
589 | return NULL; | ||
590 | } | ||
591 | if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) { | ||
592 | QETH_DBF_TEXT(TRACE, 2, "ceddptfe"); | ||
593 | qeth_eddp_free_context(ctx); | ||
594 | return NULL; | ||
595 | } | ||
596 | atomic_set(&ctx->refcnt, 1); | ||
597 | return ctx; | ||
598 | } | ||
599 | |||
600 | struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card, | ||
601 | struct sk_buff *skb, struct qeth_hdr *qhdr, | ||
602 | unsigned char sk_protocol) | ||
603 | { | ||
604 | QETH_DBF_TEXT(TRACE, 5, "creddpc"); | ||
605 | switch (sk_protocol) { | ||
606 | case IPPROTO_TCP: | ||
607 | return qeth_eddp_create_context_tcp(card, skb, qhdr); | ||
608 | default: | ||
609 | QETH_DBF_TEXT(TRACE, 2, "eddpinvp"); | ||
610 | } | ||
611 | return NULL; | ||
612 | } | ||
613 | EXPORT_SYMBOL_GPL(qeth_eddp_create_context); | ||
614 | |||
615 | void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr, | ||
616 | struct sk_buff *skb) | ||
617 | { | ||
618 | struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr; | ||
619 | struct tcphdr *tcph = tcp_hdr(skb); | ||
620 | struct iphdr *iph = ip_hdr(skb); | ||
621 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | ||
622 | |||
623 | QETH_DBF_TEXT(TRACE, 5, "tsofhdr"); | ||
624 | |||
625 | /*fix header to TSO values ...*/ | ||
626 | hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; | ||
627 | /*set values which are fix for the first approach ...*/ | ||
628 | hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); | ||
629 | hdr->ext.imb_hdr_no = 1; | ||
630 | hdr->ext.hdr_type = 1; | ||
631 | hdr->ext.hdr_version = 1; | ||
632 | hdr->ext.hdr_len = 28; | ||
633 | /*insert non-fix values */ | ||
634 | hdr->ext.mss = skb_shinfo(skb)->gso_size; | ||
635 | hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); | ||
636 | hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - | ||
637 | sizeof(struct qeth_hdr_tso)); | ||
638 | tcph->check = 0; | ||
639 | if (skb->protocol == ETH_P_IPV6) { | ||
640 | ip6h->payload_len = 0; | ||
641 | tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, | ||
642 | 0, IPPROTO_TCP, 0); | ||
643 | } else { | ||
644 | /*OSA want us to set these values ...*/ | ||
645 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
646 | 0, IPPROTO_TCP, 0); | ||
647 | iph->tot_len = 0; | ||
648 | iph->check = 0; | ||
649 | } | ||
650 | } | ||
651 | EXPORT_SYMBOL_GPL(qeth_tso_fill_header); | ||
652 | |||
653 | void qeth_tx_csum(struct sk_buff *skb) | ||
654 | { | ||
655 | int tlen; | ||
656 | if (skb->protocol == htons(ETH_P_IP)) { | ||
657 | tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2); | ||
658 | switch (ip_hdr(skb)->protocol) { | ||
659 | case IPPROTO_TCP: | ||
660 | tcp_hdr(skb)->check = 0; | ||
661 | tcp_hdr(skb)->check = csum_tcpudp_magic( | ||
662 | ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, | ||
663 | tlen, ip_hdr(skb)->protocol, | ||
664 | skb_checksum(skb, skb_transport_offset(skb), | ||
665 | tlen, 0)); | ||
666 | break; | ||
667 | case IPPROTO_UDP: | ||
668 | udp_hdr(skb)->check = 0; | ||
669 | udp_hdr(skb)->check = csum_tcpudp_magic( | ||
670 | ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, | ||
671 | tlen, ip_hdr(skb)->protocol, | ||
672 | skb_checksum(skb, skb_transport_offset(skb), | ||
673 | tlen, 0)); | ||
674 | break; | ||
675 | } | ||
676 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | ||
677 | switch (ipv6_hdr(skb)->nexthdr) { | ||
678 | case IPPROTO_TCP: | ||
679 | tcp_hdr(skb)->check = 0; | ||
680 | tcp_hdr(skb)->check = csum_ipv6_magic( | ||
681 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, | ||
682 | ipv6_hdr(skb)->payload_len, | ||
683 | ipv6_hdr(skb)->nexthdr, | ||
684 | skb_checksum(skb, skb_transport_offset(skb), | ||
685 | ipv6_hdr(skb)->payload_len, 0)); | ||
686 | break; | ||
687 | case IPPROTO_UDP: | ||
688 | udp_hdr(skb)->check = 0; | ||
689 | udp_hdr(skb)->check = csum_ipv6_magic( | ||
690 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, | ||
691 | ipv6_hdr(skb)->payload_len, | ||
692 | ipv6_hdr(skb)->nexthdr, | ||
693 | skb_checksum(skb, skb_transport_offset(skb), | ||
694 | ipv6_hdr(skb)->payload_len, 0)); | ||
695 | break; | ||
696 | } | ||
697 | } | ||
698 | } | ||
699 | EXPORT_SYMBOL_GPL(qeth_tx_csum); | ||
diff --git a/drivers/s390/net/qeth_core_offl.h b/drivers/s390/net/qeth_core_offl.h index 86bf7df8cf16..e69de29bb2d1 100644 --- a/drivers/s390/net/qeth_core_offl.h +++ b/drivers/s390/net/qeth_core_offl.h | |||
@@ -1,76 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/s390/net/qeth_core_offl.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Thomas Spatzier <tspat@de.ibm.com>, | ||
6 | * Frank Blaschka <frank.blaschka@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef __QETH_CORE_OFFL_H__ | ||
10 | #define __QETH_CORE_OFFL_H__ | ||
11 | |||
12 | struct qeth_eddp_element { | ||
13 | u32 flags; | ||
14 | u32 length; | ||
15 | void *addr; | ||
16 | }; | ||
17 | |||
18 | struct qeth_eddp_context { | ||
19 | atomic_t refcnt; | ||
20 | enum qeth_large_send_types type; | ||
21 | int num_pages; /* # of allocated pages */ | ||
22 | u8 **pages; /* pointers to pages */ | ||
23 | int offset; /* offset in ctx during creation */ | ||
24 | int num_elements; /* # of required 'SBALEs' */ | ||
25 | struct qeth_eddp_element *elements; /* array of 'SBALEs' */ | ||
26 | int elements_per_skb; /* # of 'SBALEs' per skb **/ | ||
27 | }; | ||
28 | |||
29 | struct qeth_eddp_context_reference { | ||
30 | struct list_head list; | ||
31 | struct qeth_eddp_context *ctx; | ||
32 | }; | ||
33 | |||
34 | struct qeth_eddp_data { | ||
35 | struct qeth_hdr qh; | ||
36 | struct ethhdr mac; | ||
37 | __be16 vlan[2]; | ||
38 | union { | ||
39 | struct { | ||
40 | struct iphdr h; | ||
41 | u8 options[40]; | ||
42 | } ip4; | ||
43 | struct { | ||
44 | struct ipv6hdr h; | ||
45 | } ip6; | ||
46 | } nh; | ||
47 | u8 nhl; | ||
48 | void *nh_in_ctx; /* address of nh within the ctx */ | ||
49 | union { | ||
50 | struct { | ||
51 | struct tcphdr h; | ||
52 | u8 options[40]; | ||
53 | } tcp; | ||
54 | } th; | ||
55 | u8 thl; | ||
56 | void *th_in_ctx; /* address of th within the ctx */ | ||
57 | struct sk_buff *skb; | ||
58 | int skb_offset; | ||
59 | int frag; | ||
60 | int frag_offset; | ||
61 | } __attribute__ ((packed)); | ||
62 | |||
63 | extern struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *, | ||
64 | struct sk_buff *, struct qeth_hdr *, unsigned char); | ||
65 | extern void qeth_eddp_put_context(struct qeth_eddp_context *); | ||
66 | extern int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *, | ||
67 | struct qeth_eddp_context *, int); | ||
68 | extern void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *); | ||
69 | extern int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *, | ||
70 | struct qeth_eddp_context *); | ||
71 | |||
72 | void qeth_tso_fill_header(struct qeth_card *, struct qeth_hdr *, | ||
73 | struct sk_buff *); | ||
74 | void qeth_tx_csum(struct sk_buff *skb); | ||
75 | |||
76 | #endif /* __QETH_CORE_EDDP_H__ */ | ||
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index c26e842ad905..568465d7517f 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c | |||
@@ -427,8 +427,6 @@ static ssize_t qeth_dev_large_send_show(struct device *dev, | |||
427 | switch (card->options.large_send) { | 427 | switch (card->options.large_send) { |
428 | case QETH_LARGE_SEND_NO: | 428 | case QETH_LARGE_SEND_NO: |
429 | return sprintf(buf, "%s\n", "no"); | 429 | return sprintf(buf, "%s\n", "no"); |
430 | case QETH_LARGE_SEND_EDDP: | ||
431 | return sprintf(buf, "%s\n", "EDDP"); | ||
432 | case QETH_LARGE_SEND_TSO: | 430 | case QETH_LARGE_SEND_TSO: |
433 | return sprintf(buf, "%s\n", "TSO"); | 431 | return sprintf(buf, "%s\n", "TSO"); |
434 | default: | 432 | default: |
@@ -449,8 +447,6 @@ static ssize_t qeth_dev_large_send_store(struct device *dev, | |||
449 | tmp = strsep((char **) &buf, "\n"); | 447 | tmp = strsep((char **) &buf, "\n"); |
450 | if (!strcmp(tmp, "no")) { | 448 | if (!strcmp(tmp, "no")) { |
451 | type = QETH_LARGE_SEND_NO; | 449 | type = QETH_LARGE_SEND_NO; |
452 | } else if (!strcmp(tmp, "EDDP")) { | ||
453 | type = QETH_LARGE_SEND_EDDP; | ||
454 | } else if (!strcmp(tmp, "TSO")) { | 450 | } else if (!strcmp(tmp, "TSO")) { |
455 | type = QETH_LARGE_SEND_TSO; | 451 | type = QETH_LARGE_SEND_TSO; |
456 | } else { | 452 | } else { |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 7632d1208844..9e628b322bd3 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/ip.h> | 21 | #include <linux/ip.h> |
22 | 22 | ||
23 | #include "qeth_core.h" | 23 | #include "qeth_core.h" |
24 | #include "qeth_core_offl.h" | ||
25 | 24 | ||
26 | static int qeth_l2_set_offline(struct ccwgroup_device *); | 25 | static int qeth_l2_set_offline(struct ccwgroup_device *); |
27 | static int qeth_l2_stop(struct net_device *); | 26 | static int qeth_l2_stop(struct net_device *); |
@@ -634,8 +633,6 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
634 | struct qeth_qdio_out_q *queue = card->qdio.out_qs | 633 | struct qeth_qdio_out_q *queue = card->qdio.out_qs |
635 | [qeth_get_priority_queue(card, skb, ipv, cast_type)]; | 634 | [qeth_get_priority_queue(card, skb, ipv, cast_type)]; |
636 | int tx_bytes = skb->len; | 635 | int tx_bytes = skb->len; |
637 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; | ||
638 | struct qeth_eddp_context *ctx = NULL; | ||
639 | int data_offset = -1; | 636 | int data_offset = -1; |
640 | int elements_needed = 0; | 637 | int elements_needed = 0; |
641 | int hd_len = 0; | 638 | int hd_len = 0; |
@@ -655,14 +652,10 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
655 | } | 652 | } |
656 | netif_stop_queue(dev); | 653 | netif_stop_queue(dev); |
657 | 654 | ||
658 | if (skb_is_gso(skb)) | ||
659 | large_send = QETH_LARGE_SEND_EDDP; | ||
660 | |||
661 | if (card->info.type == QETH_CARD_TYPE_OSN) | 655 | if (card->info.type == QETH_CARD_TYPE_OSN) |
662 | hdr = (struct qeth_hdr *)skb->data; | 656 | hdr = (struct qeth_hdr *)skb->data; |
663 | else { | 657 | else { |
664 | if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && | 658 | if (card->info.type == QETH_CARD_TYPE_IQD) { |
665 | (skb_shinfo(skb)->nr_frags == 0)) { | ||
666 | new_skb = skb; | 659 | new_skb = skb; |
667 | data_offset = ETH_HLEN; | 660 | data_offset = ETH_HLEN; |
668 | hd_len = ETH_HLEN; | 661 | hd_len = ETH_HLEN; |
@@ -689,62 +682,26 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
689 | } | 682 | } |
690 | } | 683 | } |
691 | 684 | ||
692 | if (large_send == QETH_LARGE_SEND_EDDP) { | 685 | elements = qeth_get_elements_no(card, (void *)hdr, new_skb, |
693 | ctx = qeth_eddp_create_context(card, new_skb, hdr, | ||
694 | skb->sk->sk_protocol); | ||
695 | if (ctx == NULL) { | ||
696 | QETH_DBF_MESSAGE(2, "could not create eddp context\n"); | ||
697 | goto tx_drop; | ||
698 | } | ||
699 | } else { | ||
700 | elements = qeth_get_elements_no(card, (void *)hdr, new_skb, | ||
701 | elements_needed); | 686 | elements_needed); |
702 | if (!elements) { | 687 | if (!elements) { |
703 | if (data_offset >= 0) | 688 | if (data_offset >= 0) |
704 | kmem_cache_free(qeth_core_header_cache, hdr); | 689 | kmem_cache_free(qeth_core_header_cache, hdr); |
705 | goto tx_drop; | 690 | goto tx_drop; |
706 | } | ||
707 | } | ||
708 | |||
709 | if ((large_send == QETH_LARGE_SEND_NO) && | ||
710 | (skb->ip_summed == CHECKSUM_PARTIAL)) { | ||
711 | qeth_tx_csum(new_skb); | ||
712 | if (card->options.performance_stats) | ||
713 | card->perf_stats.tx_csum++; | ||
714 | } | 691 | } |
715 | 692 | ||
716 | if (card->info.type != QETH_CARD_TYPE_IQD) | 693 | if (card->info.type != QETH_CARD_TYPE_IQD) |
717 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, | 694 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, |
718 | elements, ctx); | 695 | elements); |
719 | else | 696 | else |
720 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, | 697 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, |
721 | elements, ctx, data_offset, hd_len); | 698 | elements, data_offset, hd_len); |
722 | if (!rc) { | 699 | if (!rc) { |
723 | card->stats.tx_packets++; | 700 | card->stats.tx_packets++; |
724 | card->stats.tx_bytes += tx_bytes; | 701 | card->stats.tx_bytes += tx_bytes; |
725 | if (new_skb != skb) | 702 | if (new_skb != skb) |
726 | dev_kfree_skb_any(skb); | 703 | dev_kfree_skb_any(skb); |
727 | if (card->options.performance_stats) { | ||
728 | if (large_send != QETH_LARGE_SEND_NO) { | ||
729 | card->perf_stats.large_send_bytes += tx_bytes; | ||
730 | card->perf_stats.large_send_cnt++; | ||
731 | } | ||
732 | if (skb_shinfo(new_skb)->nr_frags > 0) { | ||
733 | card->perf_stats.sg_skbs_sent++; | ||
734 | /* nr_frags + skb->data */ | ||
735 | card->perf_stats.sg_frags_sent += | ||
736 | skb_shinfo(new_skb)->nr_frags + 1; | ||
737 | } | ||
738 | } | ||
739 | |||
740 | if (ctx != NULL) { | ||
741 | qeth_eddp_put_context(ctx); | ||
742 | dev_kfree_skb_any(new_skb); | ||
743 | } | ||
744 | } else { | 704 | } else { |
745 | if (ctx != NULL) | ||
746 | qeth_eddp_put_context(ctx); | ||
747 | |||
748 | if (data_offset >= 0) | 705 | if (data_offset >= 0) |
749 | kmem_cache_free(qeth_core_header_cache, hdr); | 706 | kmem_cache_free(qeth_core_header_cache, hdr); |
750 | 707 | ||
@@ -881,30 +838,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) | |||
881 | return; | 838 | return; |
882 | } | 839 | } |
883 | 840 | ||
884 | static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data) | ||
885 | { | ||
886 | struct qeth_card *card = dev->ml_priv; | ||
887 | |||
888 | if (data) { | ||
889 | if (card->options.large_send == QETH_LARGE_SEND_NO) { | ||
890 | card->options.large_send = QETH_LARGE_SEND_EDDP; | ||
891 | dev->features |= NETIF_F_TSO; | ||
892 | } | ||
893 | } else { | ||
894 | dev->features &= ~NETIF_F_TSO; | ||
895 | card->options.large_send = QETH_LARGE_SEND_NO; | ||
896 | } | ||
897 | return 0; | ||
898 | } | ||
899 | |||
900 | static struct ethtool_ops qeth_l2_ethtool_ops = { | 841 | static struct ethtool_ops qeth_l2_ethtool_ops = { |
901 | .get_link = ethtool_op_get_link, | 842 | .get_link = ethtool_op_get_link, |
902 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
903 | .set_tx_csum = ethtool_op_set_tx_hw_csum, | ||
904 | .get_sg = ethtool_op_get_sg, | ||
905 | .set_sg = ethtool_op_set_sg, | ||
906 | .get_tso = ethtool_op_get_tso, | ||
907 | .set_tso = qeth_l2_ethtool_set_tso, | ||
908 | .get_strings = qeth_core_get_strings, | 843 | .get_strings = qeth_core_get_strings, |
909 | .get_ethtool_stats = qeth_core_get_ethtool_stats, | 844 | .get_ethtool_stats = qeth_core_get_ethtool_stats, |
910 | .get_stats_count = qeth_core_get_stats_count, | 845 | .get_stats_count = qeth_core_get_stats_count, |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index fea50bdc8f41..38071a0e0c31 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -19,15 +19,15 @@ | |||
19 | #include <linux/etherdevice.h> | 19 | #include <linux/etherdevice.h> |
20 | #include <linux/mii.h> | 20 | #include <linux/mii.h> |
21 | #include <linux/ip.h> | 21 | #include <linux/ip.h> |
22 | #include <linux/reboot.h> | 22 | #include <linux/ipv6.h> |
23 | #include <linux/inetdevice.h> | 23 | #include <linux/inetdevice.h> |
24 | #include <linux/igmp.h> | 24 | #include <linux/igmp.h> |
25 | 25 | ||
26 | #include <net/ip.h> | 26 | #include <net/ip.h> |
27 | #include <net/arp.h> | 27 | #include <net/arp.h> |
28 | #include <net/ip6_checksum.h> | ||
28 | 29 | ||
29 | #include "qeth_l3.h" | 30 | #include "qeth_l3.h" |
30 | #include "qeth_core_offl.h" | ||
31 | 31 | ||
32 | static int qeth_l3_set_offline(struct ccwgroup_device *); | 32 | static int qeth_l3_set_offline(struct ccwgroup_device *); |
33 | static int qeth_l3_recover(void *); | 33 | static int qeth_l3_recover(void *); |
@@ -2577,12 +2577,63 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
2577 | } | 2577 | } |
2578 | } | 2578 | } |
2579 | 2579 | ||
2580 | static void qeth_tso_fill_header(struct qeth_card *card, | ||
2581 | struct qeth_hdr *qhdr, struct sk_buff *skb) | ||
2582 | { | ||
2583 | struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr; | ||
2584 | struct tcphdr *tcph = tcp_hdr(skb); | ||
2585 | struct iphdr *iph = ip_hdr(skb); | ||
2586 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | ||
2587 | |||
2588 | /*fix header to TSO values ...*/ | ||
2589 | hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; | ||
2590 | /*set values which are fix for the first approach ...*/ | ||
2591 | hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); | ||
2592 | hdr->ext.imb_hdr_no = 1; | ||
2593 | hdr->ext.hdr_type = 1; | ||
2594 | hdr->ext.hdr_version = 1; | ||
2595 | hdr->ext.hdr_len = 28; | ||
2596 | /*insert non-fix values */ | ||
2597 | hdr->ext.mss = skb_shinfo(skb)->gso_size; | ||
2598 | hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); | ||
2599 | hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - | ||
2600 | sizeof(struct qeth_hdr_tso)); | ||
2601 | tcph->check = 0; | ||
2602 | if (skb->protocol == ETH_P_IPV6) { | ||
2603 | ip6h->payload_len = 0; | ||
2604 | tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, | ||
2605 | 0, IPPROTO_TCP, 0); | ||
2606 | } else { | ||
2607 | /*OSA want us to set these values ...*/ | ||
2608 | tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, | ||
2609 | 0, IPPROTO_TCP, 0); | ||
2610 | iph->tot_len = 0; | ||
2611 | iph->check = 0; | ||
2612 | } | ||
2613 | } | ||
2614 | |||
2615 | static void qeth_tx_csum(struct sk_buff *skb) | ||
2616 | { | ||
2617 | __wsum csum; | ||
2618 | int offset; | ||
2619 | |||
2620 | skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb)); | ||
2621 | offset = skb->csum_start - skb_headroom(skb); | ||
2622 | BUG_ON(offset >= skb_headlen(skb)); | ||
2623 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | ||
2624 | |||
2625 | offset += skb->csum_offset; | ||
2626 | BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb)); | ||
2627 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | ||
2628 | } | ||
2629 | |||
2580 | static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | 2630 | static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) |
2581 | { | 2631 | { |
2582 | int rc; | 2632 | int rc; |
2583 | u16 *tag; | 2633 | u16 *tag; |
2584 | struct qeth_hdr *hdr = NULL; | 2634 | struct qeth_hdr *hdr = NULL; |
2585 | int elements_needed = 0; | 2635 | int elements_needed = 0; |
2636 | int elems; | ||
2586 | struct qeth_card *card = dev->ml_priv; | 2637 | struct qeth_card *card = dev->ml_priv; |
2587 | struct sk_buff *new_skb = NULL; | 2638 | struct sk_buff *new_skb = NULL; |
2588 | int ipv = qeth_get_ip_version(skb); | 2639 | int ipv = qeth_get_ip_version(skb); |
@@ -2591,8 +2642,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2591 | [qeth_get_priority_queue(card, skb, ipv, cast_type)]; | 2642 | [qeth_get_priority_queue(card, skb, ipv, cast_type)]; |
2592 | int tx_bytes = skb->len; | 2643 | int tx_bytes = skb->len; |
2593 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; | 2644 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; |
2594 | struct qeth_eddp_context *ctx = NULL; | ||
2595 | int data_offset = -1; | 2645 | int data_offset = -1; |
2646 | int nr_frags; | ||
2596 | 2647 | ||
2597 | if ((card->info.type == QETH_CARD_TYPE_IQD) && | 2648 | if ((card->info.type == QETH_CARD_TYPE_IQD) && |
2598 | (skb->protocol != htons(ETH_P_IPV6)) && | 2649 | (skb->protocol != htons(ETH_P_IPV6)) && |
@@ -2615,6 +2666,12 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2615 | 2666 | ||
2616 | if (skb_is_gso(skb)) | 2667 | if (skb_is_gso(skb)) |
2617 | large_send = card->options.large_send; | 2668 | large_send = card->options.large_send; |
2669 | else | ||
2670 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
2671 | qeth_tx_csum(skb); | ||
2672 | if (card->options.performance_stats) | ||
2673 | card->perf_stats.tx_csum++; | ||
2674 | } | ||
2618 | 2675 | ||
2619 | if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && | 2676 | if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && |
2620 | (skb_shinfo(skb)->nr_frags == 0)) { | 2677 | (skb_shinfo(skb)->nr_frags == 0)) { |
@@ -2661,12 +2718,13 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2661 | netif_stop_queue(dev); | 2718 | netif_stop_queue(dev); |
2662 | 2719 | ||
2663 | /* fix hardware limitation: as long as we do not have sbal | 2720 | /* fix hardware limitation: as long as we do not have sbal |
2664 | * chaining we can not send long frag lists so we temporary | 2721 | * chaining we can not send long frag lists |
2665 | * switch to EDDP | ||
2666 | */ | 2722 | */ |
2667 | if ((large_send == QETH_LARGE_SEND_TSO) && | 2723 | if ((large_send == QETH_LARGE_SEND_TSO) && |
2668 | ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) | 2724 | ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) { |
2669 | large_send = QETH_LARGE_SEND_EDDP; | 2725 | if (skb_linearize(new_skb)) |
2726 | goto tx_drop; | ||
2727 | } | ||
2670 | 2728 | ||
2671 | if ((large_send == QETH_LARGE_SEND_TSO) && | 2729 | if ((large_send == QETH_LARGE_SEND_TSO) && |
2672 | (cast_type == RTN_UNSPEC)) { | 2730 | (cast_type == RTN_UNSPEC)) { |
@@ -2689,40 +2747,22 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2689 | } | 2747 | } |
2690 | } | 2748 | } |
2691 | 2749 | ||
2692 | if (large_send == QETH_LARGE_SEND_EDDP) { | 2750 | elems = qeth_get_elements_no(card, (void *)hdr, new_skb, |
2693 | /* new_skb is not owned by a socket so we use skb to get | ||
2694 | * the protocol | ||
2695 | */ | ||
2696 | ctx = qeth_eddp_create_context(card, new_skb, hdr, | ||
2697 | skb->sk->sk_protocol); | ||
2698 | if (ctx == NULL) { | ||
2699 | QETH_DBF_MESSAGE(2, "could not create eddp context\n"); | ||
2700 | goto tx_drop; | ||
2701 | } | ||
2702 | } else { | ||
2703 | int elems = qeth_get_elements_no(card, (void *)hdr, new_skb, | ||
2704 | elements_needed); | 2751 | elements_needed); |
2705 | if (!elems) { | 2752 | if (!elems) { |
2706 | if (data_offset >= 0) | 2753 | if (data_offset >= 0) |
2707 | kmem_cache_free(qeth_core_header_cache, hdr); | 2754 | kmem_cache_free(qeth_core_header_cache, hdr); |
2708 | goto tx_drop; | 2755 | goto tx_drop; |
2709 | } | ||
2710 | elements_needed += elems; | ||
2711 | } | ||
2712 | |||
2713 | if ((large_send == QETH_LARGE_SEND_NO) && | ||
2714 | (new_skb->ip_summed == CHECKSUM_PARTIAL)) { | ||
2715 | qeth_tx_csum(new_skb); | ||
2716 | if (card->options.performance_stats) | ||
2717 | card->perf_stats.tx_csum++; | ||
2718 | } | 2756 | } |
2757 | elements_needed += elems; | ||
2758 | nr_frags = skb_shinfo(new_skb)->nr_frags; | ||
2719 | 2759 | ||
2720 | if (card->info.type != QETH_CARD_TYPE_IQD) | 2760 | if (card->info.type != QETH_CARD_TYPE_IQD) |
2721 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, | 2761 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, |
2722 | elements_needed, ctx); | 2762 | elements_needed); |
2723 | else | 2763 | else |
2724 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, | 2764 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, |
2725 | elements_needed, ctx, data_offset, 0); | 2765 | elements_needed, data_offset, 0); |
2726 | 2766 | ||
2727 | if (!rc) { | 2767 | if (!rc) { |
2728 | card->stats.tx_packets++; | 2768 | card->stats.tx_packets++; |
@@ -2734,22 +2774,13 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2734 | card->perf_stats.large_send_bytes += tx_bytes; | 2774 | card->perf_stats.large_send_bytes += tx_bytes; |
2735 | card->perf_stats.large_send_cnt++; | 2775 | card->perf_stats.large_send_cnt++; |
2736 | } | 2776 | } |
2737 | if (skb_shinfo(new_skb)->nr_frags > 0) { | 2777 | if (nr_frags) { |
2738 | card->perf_stats.sg_skbs_sent++; | 2778 | card->perf_stats.sg_skbs_sent++; |
2739 | /* nr_frags + skb->data */ | 2779 | /* nr_frags + skb->data */ |
2740 | card->perf_stats.sg_frags_sent += | 2780 | card->perf_stats.sg_frags_sent += nr_frags + 1; |
2741 | skb_shinfo(new_skb)->nr_frags + 1; | ||
2742 | } | 2781 | } |
2743 | } | 2782 | } |
2744 | |||
2745 | if (ctx != NULL) { | ||
2746 | qeth_eddp_put_context(ctx); | ||
2747 | dev_kfree_skb_any(new_skb); | ||
2748 | } | ||
2749 | } else { | 2783 | } else { |
2750 | if (ctx != NULL) | ||
2751 | qeth_eddp_put_context(ctx); | ||
2752 | |||
2753 | if (data_offset >= 0) | 2784 | if (data_offset >= 0) |
2754 | kmem_cache_free(qeth_core_header_cache, hdr); | 2785 | kmem_cache_free(qeth_core_header_cache, hdr); |
2755 | 2786 | ||
@@ -2844,7 +2875,7 @@ static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) | |||
2844 | if (data) { | 2875 | if (data) { |
2845 | if (card->options.large_send == QETH_LARGE_SEND_NO) { | 2876 | if (card->options.large_send == QETH_LARGE_SEND_NO) { |
2846 | if (card->info.type == QETH_CARD_TYPE_IQD) | 2877 | if (card->info.type == QETH_CARD_TYPE_IQD) |
2847 | card->options.large_send = QETH_LARGE_SEND_EDDP; | 2878 | return -EPERM; |
2848 | else | 2879 | else |
2849 | card->options.large_send = QETH_LARGE_SEND_TSO; | 2880 | card->options.large_send = QETH_LARGE_SEND_TSO; |
2850 | dev->features |= NETIF_F_TSO; | 2881 | dev->features |= NETIF_F_TSO; |