diff options
author | David S. Miller <davem@davemloft.net> | 2012-07-19 16:39:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-19 16:39:27 -0400 |
commit | 769162e38b91e1d300752e666260fa6c7b203fbc (patch) | |
tree | f79d3ad3231e638f13a8018358f8871c97dd41fc /drivers/net | |
parent | f31fd383821555cbd77ee83e17837f7060825395 (diff) | |
parent | c8ab13fb97472dfb112af98ccec88470f292d8e6 (diff) |
Merge branch 'net' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/tile/tilegx.c | 61 |
1 files changed, 34 insertions, 27 deletions
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 7f500288f6b3..4e2a1628484d 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c | |||
@@ -123,6 +123,7 @@ struct tile_net_comps { | |||
123 | 123 | ||
124 | /* The transmit wake timer for a given cpu and echannel. */ | 124 | /* The transmit wake timer for a given cpu and echannel. */ |
125 | struct tile_net_tx_wake { | 125 | struct tile_net_tx_wake { |
126 | int tx_queue_idx; | ||
126 | struct hrtimer timer; | 127 | struct hrtimer timer; |
127 | struct net_device *dev; | 128 | struct net_device *dev; |
128 | }; | 129 | }; |
@@ -573,12 +574,14 @@ static void add_comp(gxio_mpipe_equeue_t *equeue, | |||
573 | comps->comp_next++; | 574 | comps->comp_next++; |
574 | } | 575 | } |
575 | 576 | ||
576 | static void tile_net_schedule_tx_wake_timer(struct net_device *dev) | 577 | static void tile_net_schedule_tx_wake_timer(struct net_device *dev, |
578 | int tx_queue_idx) | ||
577 | { | 579 | { |
578 | struct tile_net_info *info = &__get_cpu_var(per_cpu_info); | 580 | struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx); |
579 | struct tile_net_priv *priv = netdev_priv(dev); | 581 | struct tile_net_priv *priv = netdev_priv(dev); |
582 | struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel]; | ||
580 | 583 | ||
581 | hrtimer_start(&info->tx_wake[priv->echannel].timer, | 584 | hrtimer_start(&tx_wake->timer, |
582 | ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), | 585 | ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL), |
583 | HRTIMER_MODE_REL_PINNED); | 586 | HRTIMER_MODE_REL_PINNED); |
584 | } | 587 | } |
@@ -587,7 +590,7 @@ static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t) | |||
587 | { | 590 | { |
588 | struct tile_net_tx_wake *tx_wake = | 591 | struct tile_net_tx_wake *tx_wake = |
589 | container_of(t, struct tile_net_tx_wake, timer); | 592 | container_of(t, struct tile_net_tx_wake, timer); |
590 | netif_wake_subqueue(tx_wake->dev, smp_processor_id()); | 593 | netif_wake_subqueue(tx_wake->dev, tx_wake->tx_queue_idx); |
591 | return HRTIMER_NORESTART; | 594 | return HRTIMER_NORESTART; |
592 | } | 595 | } |
593 | 596 | ||
@@ -1218,6 +1221,7 @@ static int tile_net_open(struct net_device *dev) | |||
1218 | 1221 | ||
1219 | hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, | 1222 | hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC, |
1220 | HRTIMER_MODE_REL); | 1223 | HRTIMER_MODE_REL); |
1224 | tx_wake->tx_queue_idx = cpu; | ||
1221 | tx_wake->timer.function = tile_net_handle_tx_wake_timer; | 1225 | tx_wake->timer.function = tile_net_handle_tx_wake_timer; |
1222 | tx_wake->dev = dev; | 1226 | tx_wake->dev = dev; |
1223 | } | 1227 | } |
@@ -1291,6 +1295,7 @@ static inline void *tile_net_frag_buf(skb_frag_t *f) | |||
1291 | * stop the queue and schedule the tx_wake timer. | 1295 | * stop the queue and schedule the tx_wake timer. |
1292 | */ | 1296 | */ |
1293 | static s64 tile_net_equeue_try_reserve(struct net_device *dev, | 1297 | static s64 tile_net_equeue_try_reserve(struct net_device *dev, |
1298 | int tx_queue_idx, | ||
1294 | struct tile_net_comps *comps, | 1299 | struct tile_net_comps *comps, |
1295 | gxio_mpipe_equeue_t *equeue, | 1300 | gxio_mpipe_equeue_t *equeue, |
1296 | int num_edescs) | 1301 | int num_edescs) |
@@ -1313,8 +1318,8 @@ static s64 tile_net_equeue_try_reserve(struct net_device *dev, | |||
1313 | } | 1318 | } |
1314 | 1319 | ||
1315 | /* Still nothing; give up and stop the queue for a short while. */ | 1320 | /* Still nothing; give up and stop the queue for a short while. */ |
1316 | netif_stop_subqueue(dev, smp_processor_id()); | 1321 | netif_stop_subqueue(dev, tx_queue_idx); |
1317 | tile_net_schedule_tx_wake_timer(dev); | 1322 | tile_net_schedule_tx_wake_timer(dev, tx_queue_idx); |
1318 | return -1; | 1323 | return -1; |
1319 | } | 1324 | } |
1320 | 1325 | ||
@@ -1328,11 +1333,12 @@ static s64 tile_net_equeue_try_reserve(struct net_device *dev, | |||
1328 | static int tso_count_edescs(struct sk_buff *skb) | 1333 | static int tso_count_edescs(struct sk_buff *skb) |
1329 | { | 1334 | { |
1330 | struct skb_shared_info *sh = skb_shinfo(skb); | 1335 | struct skb_shared_info *sh = skb_shinfo(skb); |
1331 | unsigned int data_len = skb->data_len; | 1336 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
1337 | unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; | ||
1332 | unsigned int p_len = sh->gso_size; | 1338 | unsigned int p_len = sh->gso_size; |
1333 | long f_id = -1; /* id of the current fragment */ | 1339 | long f_id = -1; /* id of the current fragment */ |
1334 | long f_size = -1; /* size of the current fragment */ | 1340 | long f_size = skb->hdr_len; /* size of the current fragment */ |
1335 | long f_used = -1; /* bytes used from the current fragment */ | 1341 | long f_used = sh_len; /* bytes used from the current fragment */ |
1336 | long n; /* size of the current piece of payload */ | 1342 | long n; /* size of the current piece of payload */ |
1337 | int num_edescs = 0; | 1343 | int num_edescs = 0; |
1338 | int segment; | 1344 | int segment; |
@@ -1377,13 +1383,14 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, | |||
1377 | struct skb_shared_info *sh = skb_shinfo(skb); | 1383 | struct skb_shared_info *sh = skb_shinfo(skb); |
1378 | struct iphdr *ih; | 1384 | struct iphdr *ih; |
1379 | struct tcphdr *th; | 1385 | struct tcphdr *th; |
1380 | unsigned int data_len = skb->data_len; | 1386 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
1387 | unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; | ||
1381 | unsigned char *data = skb->data; | 1388 | unsigned char *data = skb->data; |
1382 | unsigned int ih_off, th_off, sh_len, p_len; | 1389 | unsigned int ih_off, th_off, p_len; |
1383 | unsigned int isum_seed, tsum_seed, id, seq; | 1390 | unsigned int isum_seed, tsum_seed, id, seq; |
1384 | long f_id = -1; /* id of the current fragment */ | 1391 | long f_id = -1; /* id of the current fragment */ |
1385 | long f_size = -1; /* size of the current fragment */ | 1392 | long f_size = skb->hdr_len; /* size of the current fragment */ |
1386 | long f_used = -1; /* bytes used from the current fragment */ | 1393 | long f_used = sh_len; /* bytes used from the current fragment */ |
1387 | long n; /* size of the current piece of payload */ | 1394 | long n; /* size of the current piece of payload */ |
1388 | int segment; | 1395 | int segment; |
1389 | 1396 | ||
@@ -1392,14 +1399,13 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers, | |||
1392 | th = tcp_hdr(skb); | 1399 | th = tcp_hdr(skb); |
1393 | ih_off = skb_network_offset(skb); | 1400 | ih_off = skb_network_offset(skb); |
1394 | th_off = skb_transport_offset(skb); | 1401 | th_off = skb_transport_offset(skb); |
1395 | sh_len = th_off + tcp_hdrlen(skb); | ||
1396 | p_len = sh->gso_size; | 1402 | p_len = sh->gso_size; |
1397 | 1403 | ||
1398 | /* Set up seed values for IP and TCP csum and initialize id and seq. */ | 1404 | /* Set up seed values for IP and TCP csum and initialize id and seq. */ |
1399 | isum_seed = ((0xFFFF - ih->check) + | 1405 | isum_seed = ((0xFFFF - ih->check) + |
1400 | (0xFFFF - ih->tot_len) + | 1406 | (0xFFFF - ih->tot_len) + |
1401 | (0xFFFF - ih->id)); | 1407 | (0xFFFF - ih->id)); |
1402 | tsum_seed = th->check + (0xFFFF ^ htons(skb->len)); | 1408 | tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len)); |
1403 | id = ntohs(ih->id); | 1409 | id = ntohs(ih->id); |
1404 | seq = ntohl(th->seq); | 1410 | seq = ntohl(th->seq); |
1405 | 1411 | ||
@@ -1471,21 +1477,22 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, | |||
1471 | { | 1477 | { |
1472 | struct tile_net_priv *priv = netdev_priv(dev); | 1478 | struct tile_net_priv *priv = netdev_priv(dev); |
1473 | struct skb_shared_info *sh = skb_shinfo(skb); | 1479 | struct skb_shared_info *sh = skb_shinfo(skb); |
1474 | unsigned int data_len = skb->data_len; | 1480 | unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
1481 | unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; | ||
1475 | unsigned int p_len = sh->gso_size; | 1482 | unsigned int p_len = sh->gso_size; |
1476 | gxio_mpipe_edesc_t edesc_head = { { 0 } }; | 1483 | gxio_mpipe_edesc_t edesc_head = { { 0 } }; |
1477 | gxio_mpipe_edesc_t edesc_body = { { 0 } }; | 1484 | gxio_mpipe_edesc_t edesc_body = { { 0 } }; |
1478 | long f_id = -1; /* id of the current fragment */ | 1485 | long f_id = -1; /* id of the current fragment */ |
1479 | long f_size = -1; /* size of the current fragment */ | 1486 | long f_size = skb->hdr_len; /* size of the current fragment */ |
1480 | long f_used = -1; /* bytes used from the current fragment */ | 1487 | long f_used = sh_len; /* bytes used from the current fragment */ |
1488 | void *f_data = skb->data; | ||
1481 | long n; /* size of the current piece of payload */ | 1489 | long n; /* size of the current piece of payload */ |
1482 | unsigned long tx_packets = 0, tx_bytes = 0; | 1490 | unsigned long tx_packets = 0, tx_bytes = 0; |
1483 | unsigned int csum_start, sh_len; | 1491 | unsigned int csum_start; |
1484 | int segment; | 1492 | int segment; |
1485 | 1493 | ||
1486 | /* Prepare to egress the headers: set up header edesc. */ | 1494 | /* Prepare to egress the headers: set up header edesc. */ |
1487 | csum_start = skb_checksum_start_offset(skb); | 1495 | csum_start = skb_checksum_start_offset(skb); |
1488 | sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
1489 | edesc_head.csum = 1; | 1496 | edesc_head.csum = 1; |
1490 | edesc_head.csum_start = csum_start; | 1497 | edesc_head.csum_start = csum_start; |
1491 | edesc_head.csum_dest = csum_start + skb->csum_offset; | 1498 | edesc_head.csum_dest = csum_start + skb->csum_offset; |
@@ -1497,7 +1504,6 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, | |||
1497 | 1504 | ||
1498 | /* Egress all the edescs. */ | 1505 | /* Egress all the edescs. */ |
1499 | for (segment = 0; segment < sh->gso_segs; segment++) { | 1506 | for (segment = 0; segment < sh->gso_segs; segment++) { |
1500 | void *va; | ||
1501 | unsigned char *buf; | 1507 | unsigned char *buf; |
1502 | unsigned int p_used = 0; | 1508 | unsigned int p_used = 0; |
1503 | 1509 | ||
@@ -1516,10 +1522,9 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, | |||
1516 | f_id++; | 1522 | f_id++; |
1517 | f_size = sh->frags[f_id].size; | 1523 | f_size = sh->frags[f_id].size; |
1518 | f_used = 0; | 1524 | f_used = 0; |
1525 | f_data = tile_net_frag_buf(&sh->frags[f_id]); | ||
1519 | } | 1526 | } |
1520 | 1527 | ||
1521 | va = tile_net_frag_buf(&sh->frags[f_id]) + f_used; | ||
1522 | |||
1523 | /* Use bytes from the current fragment. */ | 1528 | /* Use bytes from the current fragment. */ |
1524 | n = p_len - p_used; | 1529 | n = p_len - p_used; |
1525 | if (n > f_size - f_used) | 1530 | if (n > f_size - f_used) |
@@ -1528,7 +1533,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue, | |||
1528 | p_used += n; | 1533 | p_used += n; |
1529 | 1534 | ||
1530 | /* Egress a piece of the payload. */ | 1535 | /* Egress a piece of the payload. */ |
1531 | edesc_body.va = va_to_tile_io_addr(va); | 1536 | edesc_body.va = va_to_tile_io_addr(f_data) + f_used; |
1532 | edesc_body.xfer_size = n; | 1537 | edesc_body.xfer_size = n; |
1533 | edesc_body.bound = !(p_used < p_len); | 1538 | edesc_body.bound = !(p_used < p_len); |
1534 | gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); | 1539 | gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); |
@@ -1580,7 +1585,8 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | |||
1580 | local_irq_save(irqflags); | 1585 | local_irq_save(irqflags); |
1581 | 1586 | ||
1582 | /* Try to acquire a completion entry and an egress slot. */ | 1587 | /* Try to acquire a completion entry and an egress slot. */ |
1583 | slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | 1588 | slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps, |
1589 | equeue, num_edescs); | ||
1584 | if (slot < 0) { | 1590 | if (slot < 0) { |
1585 | local_irq_restore(irqflags); | 1591 | local_irq_restore(irqflags); |
1586 | return NETDEV_TX_BUSY; | 1592 | return NETDEV_TX_BUSY; |
@@ -1674,7 +1680,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |||
1674 | local_irq_save(irqflags); | 1680 | local_irq_save(irqflags); |
1675 | 1681 | ||
1676 | /* Try to acquire a completion entry and an egress slot. */ | 1682 | /* Try to acquire a completion entry and an egress slot. */ |
1677 | slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs); | 1683 | slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps, |
1684 | equeue, num_edescs); | ||
1678 | if (slot < 0) { | 1685 | if (slot < 0) { |
1679 | local_irq_restore(irqflags); | 1686 | local_irq_restore(irqflags); |
1680 | return NETDEV_TX_BUSY; | 1687 | return NETDEV_TX_BUSY; |
@@ -1844,7 +1851,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac) | |||
1844 | memcpy(dev->dev_addr, mac, 6); | 1851 | memcpy(dev->dev_addr, mac, 6); |
1845 | dev->addr_len = 6; | 1852 | dev->addr_len = 6; |
1846 | } else { | 1853 | } else { |
1847 | eth_random_addr(dev->dev_addr); | 1854 | eth_hw_addr_random(dev); |
1848 | } | 1855 | } |
1849 | 1856 | ||
1850 | /* Register the network device. */ | 1857 | /* Register the network device. */ |