diff options
Diffstat (limited to 'drivers/s390/net/qeth_main.c')
-rw-r--r-- | drivers/s390/net/qeth_main.c | 158 |
1 files changed, 118 insertions, 40 deletions
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 57f69434fb..f3e6fbeb21 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -561,7 +561,7 @@ qeth_set_offline(struct ccwgroup_device *cgdev) | |||
561 | } | 561 | } |
562 | 562 | ||
563 | static int | 563 | static int |
564 | qeth_wait_for_threads(struct qeth_card *card, unsigned long threads); | 564 | qeth_threads_running(struct qeth_card *card, unsigned long threads); |
565 | 565 | ||
566 | 566 | ||
567 | static void | 567 | static void |
@@ -576,8 +576,7 @@ qeth_remove_device(struct ccwgroup_device *cgdev) | |||
576 | if (!card) | 576 | if (!card) |
577 | return; | 577 | return; |
578 | 578 | ||
579 | if (qeth_wait_for_threads(card, 0xffffffff)) | 579 | wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); |
580 | return; | ||
581 | 580 | ||
582 | if (cgdev->state == CCWGROUP_ONLINE){ | 581 | if (cgdev->state == CCWGROUP_ONLINE){ |
583 | card->use_hard_stop = 1; | 582 | card->use_hard_stop = 1; |
@@ -1542,16 +1541,21 @@ qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) | |||
1542 | card = CARD_FROM_CDEV(channel->ccwdev); | 1541 | card = CARD_FROM_CDEV(channel->ccwdev); |
1543 | 1542 | ||
1544 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { | 1543 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { |
1545 | PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative " | 1544 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) |
1546 | "reply\n", CARD_WDEV_ID(card)); | 1545 | PRINT_ERR("IDX_ACTIVATE on write channel device %s: " |
1546 | "adapter exclusively used by another host\n", | ||
1547 | CARD_WDEV_ID(card)); | ||
1548 | else | ||
1549 | PRINT_ERR("IDX_ACTIVATE on write channel device %s: " | ||
1550 | "negative reply\n", CARD_WDEV_ID(card)); | ||
1547 | goto out; | 1551 | goto out; |
1548 | } | 1552 | } |
1549 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); | 1553 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); |
1550 | if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { | 1554 | if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { |
1551 | PRINT_WARN("IDX_ACTIVATE on write channel device %s: " | 1555 | PRINT_WARN("IDX_ACTIVATE on write channel device %s: " |
1552 | "function level mismatch " | 1556 | "function level mismatch " |
1553 | "(sent: 0x%x, received: 0x%x)\n", | 1557 | "(sent: 0x%x, received: 0x%x)\n", |
1554 | CARD_WDEV_ID(card), card->info.func_level, temp); | 1558 | CARD_WDEV_ID(card), card->info.func_level, temp); |
1555 | goto out; | 1559 | goto out; |
1556 | } | 1560 | } |
1557 | channel->state = CH_STATE_UP; | 1561 | channel->state = CH_STATE_UP; |
@@ -1597,8 +1601,13 @@ qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) | |||
1597 | goto out; | 1601 | goto out; |
1598 | } | 1602 | } |
1599 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { | 1603 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { |
1600 | PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative " | 1604 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) |
1601 | "reply\n", CARD_RDEV_ID(card)); | 1605 | PRINT_ERR("IDX_ACTIVATE on read channel device %s: " |
1606 | "adapter exclusively used by another host\n", | ||
1607 | CARD_RDEV_ID(card)); | ||
1608 | else | ||
1609 | PRINT_ERR("IDX_ACTIVATE on read channel device %s: " | ||
1610 | "negative reply\n", CARD_RDEV_ID(card)); | ||
1602 | goto out; | 1611 | goto out; |
1603 | } | 1612 | } |
1604 | 1613 | ||
@@ -1613,8 +1622,8 @@ qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) | |||
1613 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); | 1622 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); |
1614 | if (temp != qeth_peer_func_level(card->info.func_level)) { | 1623 | if (temp != qeth_peer_func_level(card->info.func_level)) { |
1615 | PRINT_WARN("IDX_ACTIVATE on read channel device %s: function " | 1624 | PRINT_WARN("IDX_ACTIVATE on read channel device %s: function " |
1616 | "level mismatch (sent: 0x%x, received: 0x%x)\n", | 1625 | "level mismatch (sent: 0x%x, received: 0x%x)\n", |
1617 | CARD_RDEV_ID(card), card->info.func_level, temp); | 1626 | CARD_RDEV_ID(card), card->info.func_level, temp); |
1618 | goto out; | 1627 | goto out; |
1619 | } | 1628 | } |
1620 | memcpy(&card->token.issuer_rm_r, | 1629 | memcpy(&card->token.issuer_rm_r, |
@@ -2496,7 +2505,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, | |||
2496 | struct iphdr *ip_hdr; | 2505 | struct iphdr *ip_hdr; |
2497 | 2506 | ||
2498 | QETH_DBF_TEXT(trace,5,"skbfktr"); | 2507 | QETH_DBF_TEXT(trace,5,"skbfktr"); |
2499 | skb_set_mac_header(skb, -QETH_FAKE_LL_LEN_TR); | 2508 | skb_set_mac_header(skb, (int)-QETH_FAKE_LL_LEN_TR); |
2500 | /* this is a fake ethernet header */ | 2509 | /* this is a fake ethernet header */ |
2501 | fake_hdr = tr_hdr(skb); | 2510 | fake_hdr = tr_hdr(skb); |
2502 | 2511 | ||
@@ -2804,13 +2813,16 @@ qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
2804 | if (newcount < count) { | 2813 | if (newcount < count) { |
2805 | /* we are in memory shortage so we switch back to | 2814 | /* we are in memory shortage so we switch back to |
2806 | traditional skb allocation and drop packages */ | 2815 | traditional skb allocation and drop packages */ |
2807 | if (atomic_cmpxchg(&card->force_alloc_skb, 0, 1)) | 2816 | if (!atomic_read(&card->force_alloc_skb) && |
2808 | printk(KERN_WARNING | 2817 | net_ratelimit()) |
2809 | "qeth: switch to alloc skb\n"); | 2818 | PRINT_WARN("Switch to alloc skb\n"); |
2819 | atomic_set(&card->force_alloc_skb, 3); | ||
2810 | count = newcount; | 2820 | count = newcount; |
2811 | } else { | 2821 | } else { |
2812 | if (atomic_cmpxchg(&card->force_alloc_skb, 1, 0)) | 2822 | if ((atomic_read(&card->force_alloc_skb) == 1) && |
2813 | printk(KERN_WARNING "qeth: switch to sg\n"); | 2823 | net_ratelimit()) |
2824 | PRINT_WARN("Switch to sg\n"); | ||
2825 | atomic_add_unless(&card->force_alloc_skb, -1, 0); | ||
2814 | } | 2826 | } |
2815 | 2827 | ||
2816 | /* | 2828 | /* |
@@ -3354,10 +3366,12 @@ out_freeoutq: | |||
3354 | while (i > 0) | 3366 | while (i > 0) |
3355 | kfree(card->qdio.out_qs[--i]); | 3367 | kfree(card->qdio.out_qs[--i]); |
3356 | kfree(card->qdio.out_qs); | 3368 | kfree(card->qdio.out_qs); |
3369 | card->qdio.out_qs = NULL; | ||
3357 | out_freepool: | 3370 | out_freepool: |
3358 | qeth_free_buffer_pool(card); | 3371 | qeth_free_buffer_pool(card); |
3359 | out_freeinq: | 3372 | out_freeinq: |
3360 | kfree(card->qdio.in_q); | 3373 | kfree(card->qdio.in_q); |
3374 | card->qdio.in_q = NULL; | ||
3361 | out_nomem: | 3375 | out_nomem: |
3362 | atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); | 3376 | atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); |
3363 | return -ENOMEM; | 3377 | return -ENOMEM; |
@@ -3373,16 +3387,20 @@ qeth_free_qdio_buffers(struct qeth_card *card) | |||
3373 | QETH_QDIO_UNINITIALIZED) | 3387 | QETH_QDIO_UNINITIALIZED) |
3374 | return; | 3388 | return; |
3375 | kfree(card->qdio.in_q); | 3389 | kfree(card->qdio.in_q); |
3390 | card->qdio.in_q = NULL; | ||
3376 | /* inbound buffer pool */ | 3391 | /* inbound buffer pool */ |
3377 | qeth_free_buffer_pool(card); | 3392 | qeth_free_buffer_pool(card); |
3378 | /* free outbound qdio_qs */ | 3393 | /* free outbound qdio_qs */ |
3379 | for (i = 0; i < card->qdio.no_out_queues; ++i){ | 3394 | if (card->qdio.out_qs) { |
3380 | for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) | 3395 | for (i = 0; i < card->qdio.no_out_queues; ++i) { |
3381 | qeth_clear_output_buffer(card->qdio.out_qs[i], | 3396 | for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) |
3382 | &card->qdio.out_qs[i]->bufs[j]); | 3397 | qeth_clear_output_buffer(card->qdio.out_qs[i], |
3383 | kfree(card->qdio.out_qs[i]); | 3398 | &card->qdio.out_qs[i]->bufs[j]); |
3399 | kfree(card->qdio.out_qs[i]); | ||
3400 | } | ||
3401 | kfree(card->qdio.out_qs); | ||
3402 | card->qdio.out_qs = NULL; | ||
3384 | } | 3403 | } |
3385 | kfree(card->qdio.out_qs); | ||
3386 | } | 3404 | } |
3387 | 3405 | ||
3388 | static void | 3406 | static void |
@@ -3393,7 +3411,7 @@ qeth_clear_qdio_buffers(struct qeth_card *card) | |||
3393 | QETH_DBF_TEXT(trace, 2, "clearqdbf"); | 3411 | QETH_DBF_TEXT(trace, 2, "clearqdbf"); |
3394 | /* clear outbound buffers to free skbs */ | 3412 | /* clear outbound buffers to free skbs */ |
3395 | for (i = 0; i < card->qdio.no_out_queues; ++i) | 3413 | for (i = 0; i < card->qdio.no_out_queues; ++i) |
3396 | if (card->qdio.out_qs[i]){ | 3414 | if (card->qdio.out_qs && card->qdio.out_qs[i]) { |
3397 | for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) | 3415 | for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) |
3398 | qeth_clear_output_buffer(card->qdio.out_qs[i], | 3416 | qeth_clear_output_buffer(card->qdio.out_qs[i], |
3399 | &card->qdio.out_qs[i]->bufs[j]); | 3417 | &card->qdio.out_qs[i]->bufs[j]); |
@@ -4553,6 +4571,53 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
4553 | return elements_needed; | 4571 | return elements_needed; |
4554 | } | 4572 | } |
4555 | 4573 | ||
4574 | static void qeth_tx_csum(struct sk_buff *skb) | ||
4575 | { | ||
4576 | int tlen; | ||
4577 | |||
4578 | if (skb->protocol == htons(ETH_P_IP)) { | ||
4579 | tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2); | ||
4580 | switch (ip_hdr(skb)->protocol) { | ||
4581 | case IPPROTO_TCP: | ||
4582 | tcp_hdr(skb)->check = 0; | ||
4583 | tcp_hdr(skb)->check = csum_tcpudp_magic( | ||
4584 | ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, | ||
4585 | tlen, ip_hdr(skb)->protocol, | ||
4586 | skb_checksum(skb, skb_transport_offset(skb), | ||
4587 | tlen, 0)); | ||
4588 | break; | ||
4589 | case IPPROTO_UDP: | ||
4590 | udp_hdr(skb)->check = 0; | ||
4591 | udp_hdr(skb)->check = csum_tcpudp_magic( | ||
4592 | ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, | ||
4593 | tlen, ip_hdr(skb)->protocol, | ||
4594 | skb_checksum(skb, skb_transport_offset(skb), | ||
4595 | tlen, 0)); | ||
4596 | break; | ||
4597 | } | ||
4598 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | ||
4599 | switch (ipv6_hdr(skb)->nexthdr) { | ||
4600 | case IPPROTO_TCP: | ||
4601 | tcp_hdr(skb)->check = 0; | ||
4602 | tcp_hdr(skb)->check = csum_ipv6_magic( | ||
4603 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, | ||
4604 | ipv6_hdr(skb)->payload_len, | ||
4605 | ipv6_hdr(skb)->nexthdr, | ||
4606 | skb_checksum(skb, skb_transport_offset(skb), | ||
4607 | ipv6_hdr(skb)->payload_len, 0)); | ||
4608 | break; | ||
4609 | case IPPROTO_UDP: | ||
4610 | udp_hdr(skb)->check = 0; | ||
4611 | udp_hdr(skb)->check = csum_ipv6_magic( | ||
4612 | &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, | ||
4613 | ipv6_hdr(skb)->payload_len, | ||
4614 | ipv6_hdr(skb)->nexthdr, | ||
4615 | skb_checksum(skb, skb_transport_offset(skb), | ||
4616 | ipv6_hdr(skb)->payload_len, 0)); | ||
4617 | break; | ||
4618 | } | ||
4619 | } | ||
4620 | } | ||
4556 | 4621 | ||
4557 | static int | 4622 | static int |
4558 | qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | 4623 | qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) |
@@ -4638,12 +4703,22 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | |||
4638 | elements_needed += elems; | 4703 | elements_needed += elems; |
4639 | } | 4704 | } |
4640 | 4705 | ||
4706 | if ((large_send == QETH_LARGE_SEND_NO) && | ||
4707 | (skb->ip_summed == CHECKSUM_PARTIAL)) | ||
4708 | qeth_tx_csum(new_skb); | ||
4709 | |||
4641 | if (card->info.type != QETH_CARD_TYPE_IQD) | 4710 | if (card->info.type != QETH_CARD_TYPE_IQD) |
4642 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, | 4711 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, |
4643 | elements_needed, ctx); | 4712 | elements_needed, ctx); |
4644 | else | 4713 | else { |
4714 | if ((skb->protocol == htons(ETH_P_ARP)) && | ||
4715 | (card->dev->flags & IFF_NOARP)) { | ||
4716 | __qeth_free_new_skb(skb, new_skb); | ||
4717 | return -EPERM; | ||
4718 | } | ||
4645 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, | 4719 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, |
4646 | elements_needed, ctx); | 4720 | elements_needed, ctx); |
4721 | } | ||
4647 | if (!rc) { | 4722 | if (!rc) { |
4648 | card->stats.tx_packets++; | 4723 | card->stats.tx_packets++; |
4649 | card->stats.tx_bytes += tx_bytes; | 4724 | card->stats.tx_bytes += tx_bytes; |
@@ -6385,20 +6460,18 @@ qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr) | |||
6385 | static u32 | 6460 | static u32 |
6386 | qeth_ethtool_get_tx_csum(struct net_device *dev) | 6461 | qeth_ethtool_get_tx_csum(struct net_device *dev) |
6387 | { | 6462 | { |
6388 | /* We may need to say that we support tx csum offload if | 6463 | return (dev->features & NETIF_F_HW_CSUM) != 0; |
6389 | * we do EDDP or TSO. There are discussions going on to | ||
6390 | * enforce rules in the stack and in ethtool that make | ||
6391 | * SG and TSO depend on HW_CSUM. At the moment there are | ||
6392 | * no such rules.... | ||
6393 | * If we say yes here, we have to checksum outbound packets | ||
6394 | * any time. */ | ||
6395 | return 0; | ||
6396 | } | 6464 | } |
6397 | 6465 | ||
6398 | static int | 6466 | static int |
6399 | qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data) | 6467 | qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data) |
6400 | { | 6468 | { |
6401 | return -EINVAL; | 6469 | if (data) |
6470 | dev->features |= NETIF_F_HW_CSUM; | ||
6471 | else | ||
6472 | dev->features &= ~NETIF_F_HW_CSUM; | ||
6473 | |||
6474 | return 0; | ||
6402 | } | 6475 | } |
6403 | 6476 | ||
6404 | static u32 | 6477 | static u32 |
@@ -7412,7 +7485,8 @@ qeth_start_ipa_tso(struct qeth_card *card) | |||
7412 | } | 7485 | } |
7413 | if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){ | 7486 | if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){ |
7414 | card->options.large_send = QETH_LARGE_SEND_NO; | 7487 | card->options.large_send = QETH_LARGE_SEND_NO; |
7415 | card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG); | 7488 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | |
7489 | NETIF_F_HW_CSUM); | ||
7416 | } | 7490 | } |
7417 | return rc; | 7491 | return rc; |
7418 | } | 7492 | } |
@@ -7552,22 +7626,26 @@ qeth_set_large_send(struct qeth_card *card, enum qeth_large_send_types type) | |||
7552 | card->options.large_send = type; | 7626 | card->options.large_send = type; |
7553 | switch (card->options.large_send) { | 7627 | switch (card->options.large_send) { |
7554 | case QETH_LARGE_SEND_EDDP: | 7628 | case QETH_LARGE_SEND_EDDP: |
7555 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG; | 7629 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG | |
7630 | NETIF_F_HW_CSUM; | ||
7556 | break; | 7631 | break; |
7557 | case QETH_LARGE_SEND_TSO: | 7632 | case QETH_LARGE_SEND_TSO: |
7558 | if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){ | 7633 | if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){ |
7559 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG; | 7634 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG | |
7635 | NETIF_F_HW_CSUM; | ||
7560 | } else { | 7636 | } else { |
7561 | PRINT_WARN("TSO not supported on %s. " | 7637 | PRINT_WARN("TSO not supported on %s. " |
7562 | "large_send set to 'no'.\n", | 7638 | "large_send set to 'no'.\n", |
7563 | card->dev->name); | 7639 | card->dev->name); |
7564 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); | 7640 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | |
7641 | NETIF_F_HW_CSUM); | ||
7565 | card->options.large_send = QETH_LARGE_SEND_NO; | 7642 | card->options.large_send = QETH_LARGE_SEND_NO; |
7566 | rc = -EOPNOTSUPP; | 7643 | rc = -EOPNOTSUPP; |
7567 | } | 7644 | } |
7568 | break; | 7645 | break; |
7569 | default: /* includes QETH_LARGE_SEND_NO */ | 7646 | default: /* includes QETH_LARGE_SEND_NO */ |
7570 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); | 7647 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | |
7648 | NETIF_F_HW_CSUM); | ||
7571 | break; | 7649 | break; |
7572 | } | 7650 | } |
7573 | if (card->state == CARD_STATE_UP) | 7651 | if (card->state == CARD_STATE_UP) |