diff options
author | David S. Miller <davem@davemloft.net> | 2014-03-07 16:24:54 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-07 16:24:54 -0500 |
commit | a5d5ff572c33acc629779e383f02c048a98859e0 (patch) | |
tree | fe0712d62e1b8022612c12fa7777540d1df598cf | |
parent | 91bd66e4c8e0042fdea945c5e455363f1d44959a (diff) | |
parent | 6128d1bb30748d0ff56a63898d14f312126e404c (diff) |
Merge branch 'r8152'
Hayes Wang says:
====================
r8152: tx/rx improvement
- Select the suitable spin lock for each function.
- Add additional check to reduce the spin lock.
- Up the priority of the tx to avoid interrupted by rx.
- Support rx checksum, large send, and IPv6 hw checksum.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/usb/r8152.c | 323 |
1 files changed, 263 insertions, 60 deletions
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index b8eee365e15d..c7ef30dee1b9 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -21,9 +21,10 @@ | |||
21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
22 | #include <linux/ip.h> | 22 | #include <linux/ip.h> |
23 | #include <linux/ipv6.h> | 23 | #include <linux/ipv6.h> |
24 | #include <net/ip6_checksum.h> | ||
24 | 25 | ||
25 | /* Version Information */ | 26 | /* Version Information */ |
26 | #define DRIVER_VERSION "v1.05.0 (2014/02/18)" | 27 | #define DRIVER_VERSION "v1.06.0 (2014/03/03)" |
27 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" | 28 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" |
28 | #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" | 29 | #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" |
29 | #define MODULENAME "r8152" | 30 | #define MODULENAME "r8152" |
@@ -447,6 +448,7 @@ enum rtl8152_flags { | |||
447 | RTL8152_LINK_CHG, | 448 | RTL8152_LINK_CHG, |
448 | SELECTIVE_SUSPEND, | 449 | SELECTIVE_SUSPEND, |
449 | PHY_RESET, | 450 | PHY_RESET, |
451 | SCHEDULE_TASKLET, | ||
450 | }; | 452 | }; |
451 | 453 | ||
452 | /* Define these values to match your device */ | 454 | /* Define these values to match your device */ |
@@ -466,8 +468,18 @@ enum rtl8152_flags { | |||
466 | struct rx_desc { | 468 | struct rx_desc { |
467 | __le32 opts1; | 469 | __le32 opts1; |
468 | #define RX_LEN_MASK 0x7fff | 470 | #define RX_LEN_MASK 0x7fff |
471 | |||
469 | __le32 opts2; | 472 | __le32 opts2; |
473 | #define RD_UDP_CS (1 << 23) | ||
474 | #define RD_TCP_CS (1 << 22) | ||
475 | #define RD_IPV6_CS (1 << 20) | ||
476 | #define RD_IPV4_CS (1 << 19) | ||
477 | |||
470 | __le32 opts3; | 478 | __le32 opts3; |
479 | #define IPF (1 << 23) /* IP checksum fail */ | ||
480 | #define UDPF (1 << 22) /* UDP checksum fail */ | ||
481 | #define TCPF (1 << 21) /* TCP checksum fail */ | ||
482 | |||
471 | __le32 opts4; | 483 | __le32 opts4; |
472 | __le32 opts5; | 484 | __le32 opts5; |
473 | __le32 opts6; | 485 | __le32 opts6; |
@@ -477,13 +489,21 @@ struct tx_desc { | |||
477 | __le32 opts1; | 489 | __le32 opts1; |
478 | #define TX_FS (1 << 31) /* First segment of a packet */ | 490 | #define TX_FS (1 << 31) /* First segment of a packet */ |
479 | #define TX_LS (1 << 30) /* Final segment of a packet */ | 491 | #define TX_LS (1 << 30) /* Final segment of a packet */ |
480 | #define TX_LEN_MASK 0x3ffff | 492 | #define GTSENDV4 (1 << 28) |
493 | #define GTSENDV6 (1 << 27) | ||
494 | #define GTTCPHO_SHIFT 18 | ||
495 | #define GTTCPHO_MAX 0x7fU | ||
496 | #define TX_LEN_MAX 0x3ffffU | ||
481 | 497 | ||
482 | __le32 opts2; | 498 | __le32 opts2; |
483 | #define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */ | 499 | #define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */ |
484 | #define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */ | 500 | #define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */ |
485 | #define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */ | 501 | #define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */ |
486 | #define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */ | 502 | #define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */ |
503 | #define MSS_SHIFT 17 | ||
504 | #define MSS_MAX 0x7ffU | ||
505 | #define TCPHO_SHIFT 17 | ||
506 | #define TCPHO_MAX 0x7ffU | ||
487 | }; | 507 | }; |
488 | 508 | ||
489 | struct r8152; | 509 | struct r8152; |
@@ -550,12 +570,21 @@ enum rtl_version { | |||
550 | RTL_VER_MAX | 570 | RTL_VER_MAX |
551 | }; | 571 | }; |
552 | 572 | ||
573 | enum tx_csum_stat { | ||
574 | TX_CSUM_SUCCESS = 0, | ||
575 | TX_CSUM_TSO, | ||
576 | TX_CSUM_NONE | ||
577 | }; | ||
578 | |||
553 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | 579 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). |
554 | * The RTL chips use a 64 element hash table based on the Ethernet CRC. | 580 | * The RTL chips use a 64 element hash table based on the Ethernet CRC. |
555 | */ | 581 | */ |
556 | static const int multicast_filter_limit = 32; | 582 | static const int multicast_filter_limit = 32; |
557 | static unsigned int rx_buf_sz = 16384; | 583 | static unsigned int rx_buf_sz = 16384; |
558 | 584 | ||
585 | #define RTL_LIMITED_TSO_SIZE (rx_buf_sz - sizeof(struct tx_desc) - \ | ||
586 | VLAN_ETH_HLEN - VLAN_HLEN) | ||
587 | |||
559 | static | 588 | static |
560 | int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) | 589 | int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) |
561 | { | 590 | { |
@@ -963,7 +992,6 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) | |||
963 | static void read_bulk_callback(struct urb *urb) | 992 | static void read_bulk_callback(struct urb *urb) |
964 | { | 993 | { |
965 | struct net_device *netdev; | 994 | struct net_device *netdev; |
966 | unsigned long flags; | ||
967 | int status = urb->status; | 995 | int status = urb->status; |
968 | struct rx_agg *agg; | 996 | struct rx_agg *agg; |
969 | struct r8152 *tp; | 997 | struct r8152 *tp; |
@@ -997,9 +1025,9 @@ static void read_bulk_callback(struct urb *urb) | |||
997 | if (urb->actual_length < ETH_ZLEN) | 1025 | if (urb->actual_length < ETH_ZLEN) |
998 | break; | 1026 | break; |
999 | 1027 | ||
1000 | spin_lock_irqsave(&tp->rx_lock, flags); | 1028 | spin_lock(&tp->rx_lock); |
1001 | list_add_tail(&agg->list, &tp->rx_done); | 1029 | list_add_tail(&agg->list, &tp->rx_done); |
1002 | spin_unlock_irqrestore(&tp->rx_lock, flags); | 1030 | spin_unlock(&tp->rx_lock); |
1003 | tasklet_schedule(&tp->tl); | 1031 | tasklet_schedule(&tp->tl); |
1004 | return; | 1032 | return; |
1005 | case -ESHUTDOWN: | 1033 | case -ESHUTDOWN: |
@@ -1022,9 +1050,9 @@ static void read_bulk_callback(struct urb *urb) | |||
1022 | if (result == -ENODEV) { | 1050 | if (result == -ENODEV) { |
1023 | netif_device_detach(tp->netdev); | 1051 | netif_device_detach(tp->netdev); |
1024 | } else if (result) { | 1052 | } else if (result) { |
1025 | spin_lock_irqsave(&tp->rx_lock, flags); | 1053 | spin_lock(&tp->rx_lock); |
1026 | list_add_tail(&agg->list, &tp->rx_done); | 1054 | list_add_tail(&agg->list, &tp->rx_done); |
1027 | spin_unlock_irqrestore(&tp->rx_lock, flags); | 1055 | spin_unlock(&tp->rx_lock); |
1028 | tasklet_schedule(&tp->tl); | 1056 | tasklet_schedule(&tp->tl); |
1029 | } | 1057 | } |
1030 | } | 1058 | } |
@@ -1033,7 +1061,6 @@ static void write_bulk_callback(struct urb *urb) | |||
1033 | { | 1061 | { |
1034 | struct net_device_stats *stats; | 1062 | struct net_device_stats *stats; |
1035 | struct net_device *netdev; | 1063 | struct net_device *netdev; |
1036 | unsigned long flags; | ||
1037 | struct tx_agg *agg; | 1064 | struct tx_agg *agg; |
1038 | struct r8152 *tp; | 1065 | struct r8152 *tp; |
1039 | int status = urb->status; | 1066 | int status = urb->status; |
@@ -1057,9 +1084,9 @@ static void write_bulk_callback(struct urb *urb) | |||
1057 | stats->tx_bytes += agg->skb_len; | 1084 | stats->tx_bytes += agg->skb_len; |
1058 | } | 1085 | } |
1059 | 1086 | ||
1060 | spin_lock_irqsave(&tp->tx_lock, flags); | 1087 | spin_lock(&tp->tx_lock); |
1061 | list_add_tail(&agg->list, &tp->tx_free); | 1088 | list_add_tail(&agg->list, &tp->tx_free); |
1062 | spin_unlock_irqrestore(&tp->tx_lock, flags); | 1089 | spin_unlock(&tp->tx_lock); |
1063 | 1090 | ||
1064 | usb_autopm_put_interface_async(tp->intf); | 1091 | usb_autopm_put_interface_async(tp->intf); |
1065 | 1092 | ||
@@ -1073,7 +1100,7 @@ static void write_bulk_callback(struct urb *urb) | |||
1073 | return; | 1100 | return; |
1074 | 1101 | ||
1075 | if (!skb_queue_empty(&tp->tx_queue)) | 1102 | if (!skb_queue_empty(&tp->tx_queue)) |
1076 | schedule_delayed_work(&tp->schedule, 0); | 1103 | tasklet_schedule(&tp->tl); |
1077 | } | 1104 | } |
1078 | 1105 | ||
1079 | static void intr_callback(struct urb *urb) | 1106 | static void intr_callback(struct urb *urb) |
@@ -1268,6 +1295,9 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp) | |||
1268 | struct tx_agg *agg = NULL; | 1295 | struct tx_agg *agg = NULL; |
1269 | unsigned long flags; | 1296 | unsigned long flags; |
1270 | 1297 | ||
1298 | if (list_empty(&tp->tx_free)) | ||
1299 | return NULL; | ||
1300 | |||
1271 | spin_lock_irqsave(&tp->tx_lock, flags); | 1301 | spin_lock_irqsave(&tp->tx_lock, flags); |
1272 | if (!list_empty(&tp->tx_free)) { | 1302 | if (!list_empty(&tp->tx_free)) { |
1273 | struct list_head *cursor; | 1303 | struct list_head *cursor; |
@@ -1281,24 +1311,130 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp) | |||
1281 | return agg; | 1311 | return agg; |
1282 | } | 1312 | } |
1283 | 1313 | ||
1284 | static void | 1314 | static inline __be16 get_protocol(struct sk_buff *skb) |
1285 | r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb) | ||
1286 | { | 1315 | { |
1287 | memset(desc, 0, sizeof(*desc)); | 1316 | __be16 protocol; |
1288 | 1317 | ||
1289 | desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS); | 1318 | if (skb->protocol == htons(ETH_P_8021Q)) |
1319 | protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; | ||
1320 | else | ||
1321 | protocol = skb->protocol; | ||
1322 | |||
1323 | return protocol; | ||
1324 | } | ||
1325 | |||
1326 | /* | ||
1327 | * r8152_csum_workaround() | ||
1328 | * The hw limites the value the transport offset. When the offset is out of the | ||
1329 | * range, calculate the checksum by sw. | ||
1330 | */ | ||
1331 | static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb, | ||
1332 | struct sk_buff_head *list) | ||
1333 | { | ||
1334 | if (skb_shinfo(skb)->gso_size) { | ||
1335 | netdev_features_t features = tp->netdev->features; | ||
1336 | struct sk_buff_head seg_list; | ||
1337 | struct sk_buff *segs, *nskb; | ||
1338 | |||
1339 | features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO); | ||
1340 | segs = skb_gso_segment(skb, features); | ||
1341 | if (IS_ERR(segs) || !segs) | ||
1342 | goto drop; | ||
1343 | |||
1344 | __skb_queue_head_init(&seg_list); | ||
1345 | |||
1346 | do { | ||
1347 | nskb = segs; | ||
1348 | segs = segs->next; | ||
1349 | nskb->next = NULL; | ||
1350 | __skb_queue_tail(&seg_list, nskb); | ||
1351 | } while (segs); | ||
1352 | |||
1353 | skb_queue_splice(&seg_list, list); | ||
1354 | dev_kfree_skb(skb); | ||
1355 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1356 | if (skb_checksum_help(skb) < 0) | ||
1357 | goto drop; | ||
1358 | |||
1359 | __skb_queue_head(list, skb); | ||
1360 | } else { | ||
1361 | struct net_device_stats *stats; | ||
1362 | |||
1363 | drop: | ||
1364 | stats = &tp->netdev->stats; | ||
1365 | stats->tx_dropped++; | ||
1366 | dev_kfree_skb(skb); | ||
1367 | } | ||
1368 | } | ||
1369 | |||
1370 | /* | ||
1371 | * msdn_giant_send_check() | ||
1372 | * According to the document of microsoft, the TCP Pseudo Header excludes the | ||
1373 | * packet length for IPv6 TCP large packets. | ||
1374 | */ | ||
1375 | static int msdn_giant_send_check(struct sk_buff *skb) | ||
1376 | { | ||
1377 | const struct ipv6hdr *ipv6h; | ||
1378 | struct tcphdr *th; | ||
1379 | |||
1380 | ipv6h = ipv6_hdr(skb); | ||
1381 | th = tcp_hdr(skb); | ||
1382 | |||
1383 | th->check = 0; | ||
1384 | th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); | ||
1385 | |||
1386 | return 0; | ||
1387 | } | ||
1388 | |||
1389 | static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, | ||
1390 | struct sk_buff *skb, u32 len, u32 transport_offset) | ||
1391 | { | ||
1392 | u32 mss = skb_shinfo(skb)->gso_size; | ||
1393 | u32 opts1, opts2 = 0; | ||
1394 | int ret = TX_CSUM_SUCCESS; | ||
1395 | |||
1396 | WARN_ON_ONCE(len > TX_LEN_MAX); | ||
1397 | |||
1398 | opts1 = len | TX_FS | TX_LS; | ||
1399 | |||
1400 | if (mss) { | ||
1401 | if (transport_offset > GTTCPHO_MAX) { | ||
1402 | netif_warn(tp, tx_err, tp->netdev, | ||
1403 | "Invalid transport offset 0x%x for TSO\n", | ||
1404 | transport_offset); | ||
1405 | ret = TX_CSUM_TSO; | ||
1406 | goto unavailable; | ||
1407 | } | ||
1408 | |||
1409 | switch (get_protocol(skb)) { | ||
1410 | case htons(ETH_P_IP): | ||
1411 | opts1 |= GTSENDV4; | ||
1412 | break; | ||
1413 | |||
1414 | case htons(ETH_P_IPV6): | ||
1415 | opts1 |= GTSENDV6; | ||
1416 | msdn_giant_send_check(skb); | ||
1417 | break; | ||
1290 | 1418 | ||
1291 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1419 | default: |
1292 | __be16 protocol; | 1420 | WARN_ON_ONCE(1); |
1421 | break; | ||
1422 | } | ||
1423 | |||
1424 | opts1 |= transport_offset << GTTCPHO_SHIFT; | ||
1425 | opts2 |= min(mss, MSS_MAX) << MSS_SHIFT; | ||
1426 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1293 | u8 ip_protocol; | 1427 | u8 ip_protocol; |
1294 | u32 opts2 = 0; | ||
1295 | 1428 | ||
1296 | if (skb->protocol == htons(ETH_P_8021Q)) | 1429 | if (transport_offset > TCPHO_MAX) { |
1297 | protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; | 1430 | netif_warn(tp, tx_err, tp->netdev, |
1298 | else | 1431 | "Invalid transport offset 0x%x\n", |
1299 | protocol = skb->protocol; | 1432 | transport_offset); |
1433 | ret = TX_CSUM_NONE; | ||
1434 | goto unavailable; | ||
1435 | } | ||
1300 | 1436 | ||
1301 | switch (protocol) { | 1437 | switch (get_protocol(skb)) { |
1302 | case htons(ETH_P_IP): | 1438 | case htons(ETH_P_IP): |
1303 | opts2 |= IPV4_CS; | 1439 | opts2 |= IPV4_CS; |
1304 | ip_protocol = ip_hdr(skb)->protocol; | 1440 | ip_protocol = ip_hdr(skb)->protocol; |
@@ -1314,30 +1450,33 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb) | |||
1314 | break; | 1450 | break; |
1315 | } | 1451 | } |
1316 | 1452 | ||
1317 | if (ip_protocol == IPPROTO_TCP) { | 1453 | if (ip_protocol == IPPROTO_TCP) |
1318 | opts2 |= TCP_CS; | 1454 | opts2 |= TCP_CS; |
1319 | opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17; | 1455 | else if (ip_protocol == IPPROTO_UDP) |
1320 | } else if (ip_protocol == IPPROTO_UDP) { | ||
1321 | opts2 |= UDP_CS; | 1456 | opts2 |= UDP_CS; |
1322 | } else { | 1457 | else |
1323 | WARN_ON_ONCE(1); | 1458 | WARN_ON_ONCE(1); |
1324 | } | ||
1325 | 1459 | ||
1326 | desc->opts2 = cpu_to_le32(opts2); | 1460 | opts2 |= transport_offset << TCPHO_SHIFT; |
1327 | } | 1461 | } |
1462 | |||
1463 | desc->opts2 = cpu_to_le32(opts2); | ||
1464 | desc->opts1 = cpu_to_le32(opts1); | ||
1465 | |||
1466 | unavailable: | ||
1467 | return ret; | ||
1328 | } | 1468 | } |
1329 | 1469 | ||
1330 | static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) | 1470 | static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) |
1331 | { | 1471 | { |
1332 | struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; | 1472 | struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; |
1333 | unsigned long flags; | ||
1334 | int remain, ret; | 1473 | int remain, ret; |
1335 | u8 *tx_data; | 1474 | u8 *tx_data; |
1336 | 1475 | ||
1337 | __skb_queue_head_init(&skb_head); | 1476 | __skb_queue_head_init(&skb_head); |
1338 | spin_lock_irqsave(&tx_queue->lock, flags); | 1477 | spin_lock(&tx_queue->lock); |
1339 | skb_queue_splice_init(tx_queue, &skb_head); | 1478 | skb_queue_splice_init(tx_queue, &skb_head); |
1340 | spin_unlock_irqrestore(&tx_queue->lock, flags); | 1479 | spin_unlock(&tx_queue->lock); |
1341 | 1480 | ||
1342 | tx_data = agg->head; | 1481 | tx_data = agg->head; |
1343 | agg->skb_num = agg->skb_len = 0; | 1482 | agg->skb_num = agg->skb_len = 0; |
@@ -1347,47 +1486,65 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) | |||
1347 | struct tx_desc *tx_desc; | 1486 | struct tx_desc *tx_desc; |
1348 | struct sk_buff *skb; | 1487 | struct sk_buff *skb; |
1349 | unsigned int len; | 1488 | unsigned int len; |
1489 | u32 offset; | ||
1350 | 1490 | ||
1351 | skb = __skb_dequeue(&skb_head); | 1491 | skb = __skb_dequeue(&skb_head); |
1352 | if (!skb) | 1492 | if (!skb) |
1353 | break; | 1493 | break; |
1354 | 1494 | ||
1355 | remain -= sizeof(*tx_desc); | 1495 | len = skb->len + sizeof(*tx_desc); |
1356 | len = skb->len; | 1496 | |
1357 | if (remain < len) { | 1497 | if (len > remain) { |
1358 | __skb_queue_head(&skb_head, skb); | 1498 | __skb_queue_head(&skb_head, skb); |
1359 | break; | 1499 | break; |
1360 | } | 1500 | } |
1361 | 1501 | ||
1362 | tx_data = tx_agg_align(tx_data); | 1502 | tx_data = tx_agg_align(tx_data); |
1363 | tx_desc = (struct tx_desc *)tx_data; | 1503 | tx_desc = (struct tx_desc *)tx_data; |
1504 | |||
1505 | offset = (u32)skb_transport_offset(skb); | ||
1506 | |||
1507 | if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) { | ||
1508 | r8152_csum_workaround(tp, skb, &skb_head); | ||
1509 | continue; | ||
1510 | } | ||
1511 | |||
1364 | tx_data += sizeof(*tx_desc); | 1512 | tx_data += sizeof(*tx_desc); |
1365 | 1513 | ||
1366 | r8152_tx_csum(tp, tx_desc, skb); | 1514 | len = skb->len; |
1367 | memcpy(tx_data, skb->data, len); | 1515 | if (skb_copy_bits(skb, 0, tx_data, len) < 0) { |
1368 | agg->skb_num++; | 1516 | struct net_device_stats *stats = &tp->netdev->stats; |
1517 | |||
1518 | stats->tx_dropped++; | ||
1519 | dev_kfree_skb_any(skb); | ||
1520 | tx_data -= sizeof(*tx_desc); | ||
1521 | continue; | ||
1522 | } | ||
1523 | |||
1524 | tx_data += len; | ||
1369 | agg->skb_len += len; | 1525 | agg->skb_len += len; |
1526 | agg->skb_num++; | ||
1527 | |||
1370 | dev_kfree_skb_any(skb); | 1528 | dev_kfree_skb_any(skb); |
1371 | 1529 | ||
1372 | tx_data += len; | ||
1373 | remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); | 1530 | remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); |
1374 | } | 1531 | } |
1375 | 1532 | ||
1376 | if (!skb_queue_empty(&skb_head)) { | 1533 | if (!skb_queue_empty(&skb_head)) { |
1377 | spin_lock_irqsave(&tx_queue->lock, flags); | 1534 | spin_lock(&tx_queue->lock); |
1378 | skb_queue_splice(&skb_head, tx_queue); | 1535 | skb_queue_splice(&skb_head, tx_queue); |
1379 | spin_unlock_irqrestore(&tx_queue->lock, flags); | 1536 | spin_unlock(&tx_queue->lock); |
1380 | } | 1537 | } |
1381 | 1538 | ||
1382 | netif_tx_lock_bh(tp->netdev); | 1539 | netif_tx_lock(tp->netdev); |
1383 | 1540 | ||
1384 | if (netif_queue_stopped(tp->netdev) && | 1541 | if (netif_queue_stopped(tp->netdev) && |
1385 | skb_queue_len(&tp->tx_queue) < tp->tx_qlen) | 1542 | skb_queue_len(&tp->tx_queue) < tp->tx_qlen) |
1386 | netif_wake_queue(tp->netdev); | 1543 | netif_wake_queue(tp->netdev); |
1387 | 1544 | ||
1388 | netif_tx_unlock_bh(tp->netdev); | 1545 | netif_tx_unlock(tp->netdev); |
1389 | 1546 | ||
1390 | ret = usb_autopm_get_interface(tp->intf); | 1547 | ret = usb_autopm_get_interface_async(tp->intf); |
1391 | if (ret < 0) | 1548 | if (ret < 0) |
1392 | goto out_tx_fill; | 1549 | goto out_tx_fill; |
1393 | 1550 | ||
@@ -1395,14 +1552,45 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) | |||
1395 | agg->head, (int)(tx_data - (u8 *)agg->head), | 1552 | agg->head, (int)(tx_data - (u8 *)agg->head), |
1396 | (usb_complete_t)write_bulk_callback, agg); | 1553 | (usb_complete_t)write_bulk_callback, agg); |
1397 | 1554 | ||
1398 | ret = usb_submit_urb(agg->urb, GFP_KERNEL); | 1555 | ret = usb_submit_urb(agg->urb, GFP_ATOMIC); |
1399 | if (ret < 0) | 1556 | if (ret < 0) |
1400 | usb_autopm_put_interface(tp->intf); | 1557 | usb_autopm_put_interface_async(tp->intf); |
1401 | 1558 | ||
1402 | out_tx_fill: | 1559 | out_tx_fill: |
1403 | return ret; | 1560 | return ret; |
1404 | } | 1561 | } |
1405 | 1562 | ||
1563 | static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) | ||
1564 | { | ||
1565 | u8 checksum = CHECKSUM_NONE; | ||
1566 | u32 opts2, opts3; | ||
1567 | |||
1568 | if (tp->version == RTL_VER_01) | ||
1569 | goto return_result; | ||
1570 | |||
1571 | opts2 = le32_to_cpu(rx_desc->opts2); | ||
1572 | opts3 = le32_to_cpu(rx_desc->opts3); | ||
1573 | |||
1574 | if (opts2 & RD_IPV4_CS) { | ||
1575 | if (opts3 & IPF) | ||
1576 | checksum = CHECKSUM_NONE; | ||
1577 | else if ((opts2 & RD_UDP_CS) && (opts3 & UDPF)) | ||
1578 | checksum = CHECKSUM_NONE; | ||
1579 | else if ((opts2 & RD_TCP_CS) && (opts3 & TCPF)) | ||
1580 | checksum = CHECKSUM_NONE; | ||
1581 | else | ||
1582 | checksum = CHECKSUM_UNNECESSARY; | ||
1583 | } else if (RD_IPV6_CS) { | ||
1584 | if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) | ||
1585 | checksum = CHECKSUM_UNNECESSARY; | ||
1586 | else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) | ||
1587 | checksum = CHECKSUM_UNNECESSARY; | ||
1588 | } | ||
1589 | |||
1590 | return_result: | ||
1591 | return checksum; | ||
1592 | } | ||
1593 | |||
1406 | static void rx_bottom(struct r8152 *tp) | 1594 | static void rx_bottom(struct r8152 *tp) |
1407 | { | 1595 | { |
1408 | unsigned long flags; | 1596 | unsigned long flags; |
@@ -1455,8 +1643,10 @@ static void rx_bottom(struct r8152 *tp) | |||
1455 | skb = netdev_alloc_skb_ip_align(netdev, pkt_len); | 1643 | skb = netdev_alloc_skb_ip_align(netdev, pkt_len); |
1456 | if (!skb) { | 1644 | if (!skb) { |
1457 | stats->rx_dropped++; | 1645 | stats->rx_dropped++; |
1458 | break; | 1646 | goto find_next_rx; |
1459 | } | 1647 | } |
1648 | |||
1649 | skb->ip_summed = r8152_rx_csum(tp, rx_desc); | ||
1460 | memcpy(skb->data, rx_data, pkt_len); | 1650 | memcpy(skb->data, rx_data, pkt_len); |
1461 | skb_put(skb, pkt_len); | 1651 | skb_put(skb, pkt_len); |
1462 | skb->protocol = eth_type_trans(skb, netdev); | 1652 | skb->protocol = eth_type_trans(skb, netdev); |
@@ -1464,6 +1654,7 @@ static void rx_bottom(struct r8152 *tp) | |||
1464 | stats->rx_packets++; | 1654 | stats->rx_packets++; |
1465 | stats->rx_bytes += pkt_len; | 1655 | stats->rx_bytes += pkt_len; |
1466 | 1656 | ||
1657 | find_next_rx: | ||
1467 | rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE); | 1658 | rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE); |
1468 | rx_desc = (struct rx_desc *)rx_data; | 1659 | rx_desc = (struct rx_desc *)rx_data; |
1469 | len_used = (int)(rx_data - (u8 *)agg->head); | 1660 | len_used = (int)(rx_data - (u8 *)agg->head); |
@@ -1535,6 +1726,7 @@ static void bottom_half(unsigned long data) | |||
1535 | return; | 1726 | return; |
1536 | 1727 | ||
1537 | rx_bottom(tp); | 1728 | rx_bottom(tp); |
1729 | tx_bottom(tp); | ||
1538 | } | 1730 | } |
1539 | 1731 | ||
1540 | static | 1732 | static |
@@ -1551,16 +1743,15 @@ static void rtl_drop_queued_tx(struct r8152 *tp) | |||
1551 | { | 1743 | { |
1552 | struct net_device_stats *stats = &tp->netdev->stats; | 1744 | struct net_device_stats *stats = &tp->netdev->stats; |
1553 | struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; | 1745 | struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue; |
1554 | unsigned long flags; | ||
1555 | struct sk_buff *skb; | 1746 | struct sk_buff *skb; |
1556 | 1747 | ||
1557 | if (skb_queue_empty(tx_queue)) | 1748 | if (skb_queue_empty(tx_queue)) |
1558 | return; | 1749 | return; |
1559 | 1750 | ||
1560 | __skb_queue_head_init(&skb_head); | 1751 | __skb_queue_head_init(&skb_head); |
1561 | spin_lock_irqsave(&tx_queue->lock, flags); | 1752 | spin_lock_bh(&tx_queue->lock); |
1562 | skb_queue_splice_init(tx_queue, &skb_head); | 1753 | skb_queue_splice_init(tx_queue, &skb_head); |
1563 | spin_unlock_irqrestore(&tx_queue->lock, flags); | 1754 | spin_unlock_bh(&tx_queue->lock); |
1564 | 1755 | ||
1565 | while ((skb = __skb_dequeue(&skb_head))) { | 1756 | while ((skb = __skb_dequeue(&skb_head))) { |
1566 | dev_kfree_skb(skb); | 1757 | dev_kfree_skb(skb); |
@@ -1631,7 +1822,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) | |||
1631 | } | 1822 | } |
1632 | 1823 | ||
1633 | static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, | 1824 | static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, |
1634 | struct net_device *netdev) | 1825 | struct net_device *netdev) |
1635 | { | 1826 | { |
1636 | struct r8152 *tp = netdev_priv(netdev); | 1827 | struct r8152 *tp = netdev_priv(netdev); |
1637 | 1828 | ||
@@ -1639,13 +1830,17 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, | |||
1639 | 1830 | ||
1640 | skb_queue_tail(&tp->tx_queue, skb); | 1831 | skb_queue_tail(&tp->tx_queue, skb); |
1641 | 1832 | ||
1642 | if (list_empty(&tp->tx_free) && | 1833 | if (!list_empty(&tp->tx_free)) { |
1643 | skb_queue_len(&tp->tx_queue) > tp->tx_qlen) | 1834 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { |
1835 | set_bit(SCHEDULE_TASKLET, &tp->flags); | ||
1836 | schedule_delayed_work(&tp->schedule, 0); | ||
1837 | } else { | ||
1838 | usb_mark_last_busy(tp->udev); | ||
1839 | tasklet_schedule(&tp->tl); | ||
1840 | } | ||
1841 | } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) | ||
1644 | netif_stop_queue(netdev); | 1842 | netif_stop_queue(netdev); |
1645 | 1843 | ||
1646 | if (!list_empty(&tp->tx_free)) | ||
1647 | schedule_delayed_work(&tp->schedule, 0); | ||
1648 | |||
1649 | return NETDEV_TX_OK; | 1844 | return NETDEV_TX_OK; |
1650 | } | 1845 | } |
1651 | 1846 | ||
@@ -2524,8 +2719,11 @@ static void rtl_work_func_t(struct work_struct *work) | |||
2524 | if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) | 2719 | if (test_bit(RTL8152_SET_RX_MODE, &tp->flags)) |
2525 | _rtl8152_set_rx_mode(tp->netdev); | 2720 | _rtl8152_set_rx_mode(tp->netdev); |
2526 | 2721 | ||
2527 | if (tp->speed & LINK_STATUS) | 2722 | if (test_bit(SCHEDULE_TASKLET, &tp->flags) && |
2528 | tx_bottom(tp); | 2723 | (tp->speed & LINK_STATUS)) { |
2724 | clear_bit(SCHEDULE_TASKLET, &tp->flags); | ||
2725 | tasklet_schedule(&tp->tl); | ||
2726 | } | ||
2529 | 2727 | ||
2530 | if (test_bit(PHY_RESET, &tp->flags)) | 2728 | if (test_bit(PHY_RESET, &tp->flags)) |
2531 | rtl_phy_reset(tp); | 2729 | rtl_phy_reset(tp); |
@@ -3094,10 +3292,15 @@ static int rtl8152_probe(struct usb_interface *intf, | |||
3094 | netdev->netdev_ops = &rtl8152_netdev_ops; | 3292 | netdev->netdev_ops = &rtl8152_netdev_ops; |
3095 | netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; | 3293 | netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; |
3096 | 3294 | ||
3097 | netdev->features |= NETIF_F_IP_CSUM; | 3295 | netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | |
3098 | netdev->hw_features = NETIF_F_IP_CSUM; | 3296 | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | |
3297 | NETIF_F_TSO6; | ||
3298 | netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | | ||
3299 | NETIF_F_TSO | NETIF_F_FRAGLIST | | ||
3300 | NETIF_F_IPV6_CSUM | NETIF_F_TSO6; | ||
3099 | 3301 | ||
3100 | SET_ETHTOOL_OPS(netdev, &ops); | 3302 | SET_ETHTOOL_OPS(netdev, &ops); |
3303 | netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); | ||
3101 | 3304 | ||
3102 | tp->mii.dev = netdev; | 3305 | tp->mii.dev = netdev; |
3103 | tp->mii.mdio_read = read_mii_word; | 3306 | tp->mii.mdio_read = read_mii_word; |