aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJay Cliburn <jacliburn@bellsouth.net>2008-02-02 20:50:07 -0500
committerJeff Garzik <jeff@garzik.org>2008-03-17 07:49:25 -0400
commit401c0aabec4b97320f962a0161a846d230a6f7aa (patch)
treeb33fc31e6ae60741f59ccb95848f5d3e9a7af6d3
parentc67c9a2f11d97a545c0e8f56b2ca3e5e36566a94 (diff)
atl1: simplify tx packet descriptor
The transmit packet descriptor consists of four 32-bit words, with word 3 upper bits overloaded depending upon the condition of its bits 3 and 4. The driver currently duplicates all word 2 and some word 3 register bit definitions unnecessarily and also uses a set of nested structures in its definition of the TPD without good cause. This patch adds a lengthy comment describing the TPD, eliminates duplicate TPD bit definitions, and simplifies the TPD structure itself. It also expands the TSO check to correctly handle custom checksum versus TSO processing using the revised TPD definitions. Finally, shorten some variable names in the transmit processing path to reduce line lengths, rename some variables to better describe their purpose (e.g., nseg versus m), and add a comment or two to better describe what the code is doing. Signed-off-by: Jay Cliburn <jacliburn@bellsouth.net> Acked-by: Chris Snook <csnook@redhat.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r--drivers/net/atlx/atl1.c265
-rw-r--r--drivers/net/atlx/atl1.h201
2 files changed, 246 insertions, 220 deletions
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 1f564f03d9d6..f4add3cafe24 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1259,8 +1259,6 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
1259 dev_kfree_skb_irq(buffer_info->skb); 1259 dev_kfree_skb_irq(buffer_info->skb);
1260 buffer_info->skb = NULL; 1260 buffer_info->skb = NULL;
1261 } 1261 }
1262 tpd->buffer_addr = 0;
1263 tpd->desc.data = 0;
1264 1262
1265 if (++sw_tpd_next_to_clean == tpd_ring->count) 1263 if (++sw_tpd_next_to_clean == tpd_ring->count)
1266 sw_tpd_next_to_clean = 0; 1264 sw_tpd_next_to_clean = 0;
@@ -1282,48 +1280,69 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
1282} 1280}
1283 1281
1284static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, 1282static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1285 struct tso_param *tso) 1283 struct tx_packet_desc *ptpd)
1286{ 1284{
1287 /* We enter this function holding a spinlock. */ 1285 /* spinlock held */
1288 u8 ipofst; 1286 u8 hdr_len, ip_off;
1287 u32 real_len;
1289 int err; 1288 int err;
1290 1289
1291 if (skb_shinfo(skb)->gso_size) { 1290 if (skb_shinfo(skb)->gso_size) {
1292 if (skb_header_cloned(skb)) { 1291 if (skb_header_cloned(skb)) {
1293 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1292 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1294 if (unlikely(err)) 1293 if (unlikely(err))
1295 return err; 1294 return -1;
1296 } 1295 }
1297 1296
1298 if (skb->protocol == ntohs(ETH_P_IP)) { 1297 if (skb->protocol == ntohs(ETH_P_IP)) {
1299 struct iphdr *iph = ip_hdr(skb); 1298 struct iphdr *iph = ip_hdr(skb);
1300 1299
1301 iph->tot_len = 0; 1300 real_len = (((unsigned char *)iph - skb->data) +
1301 ntohs(iph->tot_len));
1302 if (real_len < skb->len)
1303 pskb_trim(skb, real_len);
1304 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1305 if (skb->len == hdr_len) {
1306 iph->check = 0;
1307 tcp_hdr(skb)->check =
1308 ~csum_tcpudp_magic(iph->saddr,
1309 iph->daddr, tcp_hdrlen(skb),
1310 IPPROTO_TCP, 0);
1311 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1312 TPD_IPHL_SHIFT;
1313 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1314 TPD_TCPHDRLEN_MASK) <<
1315 TPD_TCPHDRLEN_SHIFT;
1316 ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
1317 ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
1318 return 1;
1319 }
1320
1302 iph->check = 0; 1321 iph->check = 0;
1303 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1322 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1304 iph->daddr, 0, IPPROTO_TCP, 0); 1323 iph->daddr, 0, IPPROTO_TCP, 0);
1305 ipofst = skb_network_offset(skb); 1324 ip_off = (unsigned char *)iph -
1306 if (ipofst != ETH_HLEN) /* 802.3 frame */ 1325 (unsigned char *) skb_network_header(skb);
1307 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; 1326 if (ip_off == 8) /* 802.3-SNAP frame */
1308 1327 ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
1309 tso->tsopl |= (iph->ihl & 1328 else if (ip_off != 0)
1310 TSO_PARAM_IPHL_MASK) << TSO_PARAM_IPHL_SHIFT; 1329 return -2;
1311 tso->tsopl |= ((tcp_hdrlen(skb) >> 2) & 1330
1312 TSO_PARAM_TCPHDRLEN_MASK) << 1331 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1313 TSO_PARAM_TCPHDRLEN_SHIFT; 1332 TPD_IPHL_SHIFT;
1314 tso->tsopl |= (skb_shinfo(skb)->gso_size & 1333 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1315 TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; 1334 TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT;
1316 tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; 1335 ptpd->word3 |= (skb_shinfo(skb)->gso_size &
1317 tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT; 1336 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1318 tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT; 1337 ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
1319 return true; 1338 return 3;
1320 } 1339 }
1321 } 1340 }
1322 return false; 1341 return false;
1323} 1342}
1324 1343
1325static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, 1344static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1326 struct csum_param *csum) 1345 struct tx_packet_desc *ptpd)
1327{ 1346{
1328 u8 css, cso; 1347 u8 css, cso;
1329 1348
@@ -1335,115 +1354,116 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1335 "payload offset not an even number\n"); 1354 "payload offset not an even number\n");
1336 return -1; 1355 return -1;
1337 } 1356 }
1338 csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) << 1357 ptpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) <<
1339 CSUM_PARAM_PLOADOFFSET_SHIFT; 1358 TPD_PLOADOFFSET_SHIFT;
1340 csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) << 1359 ptpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) <<
1341 CSUM_PARAM_XSUMOFFSET_SHIFT; 1360 TPD_CCSUMOFFSET_SHIFT;
1342 csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT; 1361 ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
1343 return true; 1362 return true;
1344 } 1363 }
1345 1364 return 0;
1346 return true;
1347} 1365}
1348 1366
1349static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, 1367static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1350 bool tcp_seg) 1368 struct tx_packet_desc *ptpd)
1351{ 1369{
1352 /* We enter this function holding a spinlock. */ 1370 /* spinlock held */
1353 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1371 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1354 struct atl1_buffer *buffer_info; 1372 struct atl1_buffer *buffer_info;
1373 u16 buf_len = skb->len;
1355 struct page *page; 1374 struct page *page;
1356 int first_buf_len = skb->len;
1357 unsigned long offset; 1375 unsigned long offset;
1358 unsigned int nr_frags; 1376 unsigned int nr_frags;
1359 unsigned int f; 1377 unsigned int f;
1360 u16 tpd_next_to_use; 1378 int retval;
1361 u16 proto_hdr_len; 1379 u16 next_to_use;
1362 u16 len12; 1380 u16 data_len;
1381 u8 hdr_len;
1363 1382
1364 first_buf_len -= skb->data_len; 1383 buf_len -= skb->data_len;
1365 nr_frags = skb_shinfo(skb)->nr_frags; 1384 nr_frags = skb_shinfo(skb)->nr_frags;
1366 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); 1385 next_to_use = atomic_read(&tpd_ring->next_to_use);
1367 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; 1386 buffer_info = &tpd_ring->buffer_info[next_to_use];
1368 if (unlikely(buffer_info->skb)) 1387 if (unlikely(buffer_info->skb))
1369 BUG(); 1388 BUG();
1370 /* put skb in last TPD */ 1389 /* put skb in last TPD */
1371 buffer_info->skb = NULL; 1390 buffer_info->skb = NULL;
1372 1391
1373 if (tcp_seg) { 1392 retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
1374 /* TSO/GSO */ 1393 if (retval) {
1375 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1394 /* TSO */
1376 buffer_info->length = proto_hdr_len; 1395 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1396 buffer_info->length = hdr_len;
1377 page = virt_to_page(skb->data); 1397 page = virt_to_page(skb->data);
1378 offset = (unsigned long)skb->data & ~PAGE_MASK; 1398 offset = (unsigned long)skb->data & ~PAGE_MASK;
1379 buffer_info->dma = pci_map_page(adapter->pdev, page, 1399 buffer_info->dma = pci_map_page(adapter->pdev, page,
1380 offset, proto_hdr_len, 1400 offset, hdr_len,
1381 PCI_DMA_TODEVICE); 1401 PCI_DMA_TODEVICE);
1382 1402
1383 if (++tpd_next_to_use == tpd_ring->count) 1403 if (++next_to_use == tpd_ring->count)
1384 tpd_next_to_use = 0; 1404 next_to_use = 0;
1385 1405
1386 if (first_buf_len > proto_hdr_len) { 1406 if (buf_len > hdr_len) {
1387 int i, m; 1407 int i, nseg;
1388 1408
1389 len12 = first_buf_len - proto_hdr_len; 1409 data_len = buf_len - hdr_len;
1390 m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / 1410 nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
1391 ATL1_MAX_TX_BUF_LEN; 1411 ATL1_MAX_TX_BUF_LEN;
1392 for (i = 0; i < m; i++) { 1412 for (i = 0; i < nseg; i++) {
1393 buffer_info = 1413 buffer_info =
1394 &tpd_ring->buffer_info[tpd_next_to_use]; 1414 &tpd_ring->buffer_info[next_to_use];
1395 buffer_info->skb = NULL; 1415 buffer_info->skb = NULL;
1396 buffer_info->length = 1416 buffer_info->length =
1397 (ATL1_MAX_TX_BUF_LEN >= 1417 (ATL1_MAX_TX_BUF_LEN >=
1398 len12) ? ATL1_MAX_TX_BUF_LEN : len12; 1418 data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
1399 len12 -= buffer_info->length; 1419 data_len -= buffer_info->length;
1400 page = virt_to_page(skb->data + 1420 page = virt_to_page(skb->data +
1401 (proto_hdr_len + 1421 (hdr_len + i * ATL1_MAX_TX_BUF_LEN));
1402 i * ATL1_MAX_TX_BUF_LEN));
1403 offset = (unsigned long)(skb->data + 1422 offset = (unsigned long)(skb->data +
1404 (proto_hdr_len + 1423 (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
1405 i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; 1424 ~PAGE_MASK;
1406 buffer_info->dma = pci_map_page(adapter->pdev, 1425 buffer_info->dma = pci_map_page(adapter->pdev,
1407 page, offset, buffer_info->length, 1426 page, offset, buffer_info->length,
1408 PCI_DMA_TODEVICE); 1427 PCI_DMA_TODEVICE);
1409 if (++tpd_next_to_use == tpd_ring->count) 1428 if (++next_to_use == tpd_ring->count)
1410 tpd_next_to_use = 0; 1429 next_to_use = 0;
1411 } 1430 }
1412 } 1431 }
1413 } else { 1432 } else {
1414 /* not TSO/GSO */ 1433 /* not TSO */
1415 buffer_info->length = first_buf_len; 1434 buffer_info->length = buf_len;
1416 page = virt_to_page(skb->data); 1435 page = virt_to_page(skb->data);
1417 offset = (unsigned long)skb->data & ~PAGE_MASK; 1436 offset = (unsigned long)skb->data & ~PAGE_MASK;
1418 buffer_info->dma = pci_map_page(adapter->pdev, page, 1437 buffer_info->dma = pci_map_page(adapter->pdev, page,
1419 offset, first_buf_len, PCI_DMA_TODEVICE); 1438 offset, buf_len, PCI_DMA_TODEVICE);
1420 if (++tpd_next_to_use == tpd_ring->count) 1439 if (++next_to_use == tpd_ring->count)
1421 tpd_next_to_use = 0; 1440 next_to_use = 0;
1422 } 1441 }
1423 1442
1424 for (f = 0; f < nr_frags; f++) { 1443 for (f = 0; f < nr_frags; f++) {
1425 struct skb_frag_struct *frag; 1444 struct skb_frag_struct *frag;
1426 u16 lenf, i, m; 1445 u16 i, nseg;
1427 1446
1428 frag = &skb_shinfo(skb)->frags[f]; 1447 frag = &skb_shinfo(skb)->frags[f];
1429 lenf = frag->size; 1448 buf_len = frag->size;
1430 1449
1431 m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; 1450 nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
1432 for (i = 0; i < m; i++) { 1451 ATL1_MAX_TX_BUF_LEN;
1433 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; 1452 for (i = 0; i < nseg; i++) {
1453 buffer_info = &tpd_ring->buffer_info[next_to_use];
1434 if (unlikely(buffer_info->skb)) 1454 if (unlikely(buffer_info->skb))
1435 BUG(); 1455 BUG();
1436 buffer_info->skb = NULL; 1456 buffer_info->skb = NULL;
1437 buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? 1457 buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
1438 ATL1_MAX_TX_BUF_LEN : lenf; 1458 ATL1_MAX_TX_BUF_LEN : buf_len;
1439 lenf -= buffer_info->length; 1459 buf_len -= buffer_info->length;
1440 buffer_info->dma = pci_map_page(adapter->pdev, 1460 buffer_info->dma = pci_map_page(adapter->pdev,
1441 frag->page, 1461 frag->page,
1442 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), 1462 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
1443 buffer_info->length, PCI_DMA_TODEVICE); 1463 buffer_info->length, PCI_DMA_TODEVICE);
1444 1464
1445 if (++tpd_next_to_use == tpd_ring->count) 1465 if (++next_to_use == tpd_ring->count)
1446 tpd_next_to_use = 0; 1466 next_to_use = 0;
1447 } 1467 }
1448 } 1468 }
1449 1469
@@ -1451,39 +1471,44 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1451 buffer_info->skb = skb; 1471 buffer_info->skb = skb;
1452} 1472}
1453 1473
1454static void atl1_tx_queue(struct atl1_adapter *adapter, int count, 1474static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
1455 union tpd_descr *descr) 1475 struct tx_packet_desc *ptpd)
1456{ 1476{
1457 /* We enter this function holding a spinlock. */ 1477 /* spinlock held */
1458 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1478 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1459 int j;
1460 u32 val;
1461 struct atl1_buffer *buffer_info; 1479 struct atl1_buffer *buffer_info;
1462 struct tx_packet_desc *tpd; 1480 struct tx_packet_desc *tpd;
1463 u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); 1481 u16 j;
1482 u32 val;
1483 u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
1464 1484
1465 for (j = 0; j < count; j++) { 1485 for (j = 0; j < count; j++) {
1466 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; 1486 buffer_info = &tpd_ring->buffer_info[next_to_use];
1467 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use); 1487 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
1468 tpd->desc.csum.csumpu = descr->csum.csumpu; 1488 if (tpd != ptpd)
1469 tpd->desc.csum.csumpl = descr->csum.csumpl; 1489 memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
1470 tpd->desc.tso.tsopu = descr->tso.tsopu;
1471 tpd->desc.tso.tsopl = descr->tso.tsopl;
1472 tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 1490 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1473 tpd->desc.data = descr->data; 1491 tpd->word2 = (cpu_to_le16(buffer_info->length) &
1474 tpd->desc.tso.tsopu |= (cpu_to_le16(buffer_info->length) & 1492 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
1475 TSO_PARAM_BUFLEN_MASK) << TSO_PARAM_BUFLEN_SHIFT;
1476 1493
1477 val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & 1494 /*
1478 TSO_PARAM_SEGMENT_MASK; 1495 * if this is the first packet in a TSO chain, set
1479 if (val && !j) 1496 * TPD_HDRFLAG, otherwise, clear it.
1480 tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT; 1497 */
1498 val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
1499 TPD_SEGMENT_EN_MASK;
1500 if (val) {
1501 if (!j)
1502 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
1503 else
1504 tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
1505 }
1481 1506
1482 if (j == (count - 1)) 1507 if (j == (count - 1))
1483 tpd->desc.tso.tsopl |= 1 << TSO_PARAM_EOP_SHIFT; 1508 tpd->word3 |= 1 << TPD_EOP_SHIFT;
1484 1509
1485 if (++tpd_next_to_use == tpd_ring->count) 1510 if (++next_to_use == tpd_ring->count)
1486 tpd_next_to_use = 0; 1511 next_to_use = 0;
1487 } 1512 }
1488 /* 1513 /*
1489 * Force memory writes to complete before letting h/w 1514 * Force memory writes to complete before letting h/w
@@ -1493,18 +1518,18 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
1493 */ 1518 */
1494 wmb(); 1519 wmb();
1495 1520
1496 atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); 1521 atomic_set(&tpd_ring->next_to_use, next_to_use);
1497} 1522}
1498 1523
1499static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1524static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1500{ 1525{
1501 struct atl1_adapter *adapter = netdev_priv(netdev); 1526 struct atl1_adapter *adapter = netdev_priv(netdev);
1527 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1502 int len = skb->len; 1528 int len = skb->len;
1503 int tso; 1529 int tso;
1504 int count = 1; 1530 int count = 1;
1505 int ret_val; 1531 int ret_val;
1506 u32 val; 1532 struct tx_packet_desc *ptpd;
1507 union tpd_descr param;
1508 u16 frag_size; 1533 u16 frag_size;
1509 u16 vlan_tag; 1534 u16 vlan_tag;
1510 unsigned long flags; 1535 unsigned long flags;
@@ -1515,18 +1540,11 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1515 1540
1516 len -= skb->data_len; 1541 len -= skb->data_len;
1517 1542
1518 if (unlikely(skb->len == 0)) { 1543 if (unlikely(skb->len <= 0)) {
1519 dev_kfree_skb_any(skb); 1544 dev_kfree_skb_any(skb);
1520 return NETDEV_TX_OK; 1545 return NETDEV_TX_OK;
1521 } 1546 }
1522 1547
1523 param.data = 0;
1524 param.tso.tsopu = 0;
1525 param.tso.tsopl = 0;
1526 param.csum.csumpu = 0;
1527 param.csum.csumpl = 0;
1528
1529 /* nr_frags will be nonzero if we're doing scatter/gather (SG) */
1530 nr_frags = skb_shinfo(skb)->nr_frags; 1548 nr_frags = skb_shinfo(skb)->nr_frags;
1531 for (f = 0; f < nr_frags; f++) { 1549 for (f = 0; f < nr_frags; f++) {
1532 frag_size = skb_shinfo(skb)->frags[f].size; 1550 frag_size = skb_shinfo(skb)->frags[f].size;
@@ -1535,10 +1553,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1535 ATL1_MAX_TX_BUF_LEN; 1553 ATL1_MAX_TX_BUF_LEN;
1536 } 1554 }
1537 1555
1538 /* mss will be nonzero if we're doing segment offload (TSO/GSO) */
1539 mss = skb_shinfo(skb)->gso_size; 1556 mss = skb_shinfo(skb)->gso_size;
1540 if (mss) { 1557 if (mss) {
1541 if (skb->protocol == htons(ETH_P_IP)) { 1558 if (skb->protocol == ntohs(ETH_P_IP)) {
1542 proto_hdr_len = (skb_transport_offset(skb) + 1559 proto_hdr_len = (skb_transport_offset(skb) +
1543 tcp_hdrlen(skb)); 1560 tcp_hdrlen(skb));
1544 if (unlikely(proto_hdr_len > len)) { 1561 if (unlikely(proto_hdr_len > len)) {
@@ -1567,18 +1584,20 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1567 return NETDEV_TX_BUSY; 1584 return NETDEV_TX_BUSY;
1568 } 1585 }
1569 1586
1570 param.data = 0; 1587 ptpd = ATL1_TPD_DESC(tpd_ring,
1588 (u16) atomic_read(&tpd_ring->next_to_use));
1589 memset(ptpd, 0, sizeof(struct tx_packet_desc));
1571 1590
1572 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 1591 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1573 vlan_tag = vlan_tx_tag_get(skb); 1592 vlan_tag = vlan_tx_tag_get(skb);
1574 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | 1593 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1575 ((vlan_tag >> 9) & 0x8); 1594 ((vlan_tag >> 9) & 0x8);
1576 param.tso.tsopl |= 1 << TSO_PARAM_INSVLAG_SHIFT; 1595 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
1577 param.tso.tsopu |= (vlan_tag & TSO_PARAM_VLANTAG_MASK) << 1596 ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) <<
1578 TSO_PARAM_VLAN_SHIFT; 1597 TPD_VL_TAGGED_SHIFT;
1579 } 1598 }
1580 1599
1581 tso = atl1_tso(adapter, skb, &param.tso); 1600 tso = atl1_tso(adapter, skb, ptpd);
1582 if (tso < 0) { 1601 if (tso < 0) {
1583 spin_unlock_irqrestore(&adapter->lock, flags); 1602 spin_unlock_irqrestore(&adapter->lock, flags);
1584 dev_kfree_skb_any(skb); 1603 dev_kfree_skb_any(skb);
@@ -1586,7 +1605,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1586 } 1605 }
1587 1606
1588 if (!tso) { 1607 if (!tso) {
1589 ret_val = atl1_tx_csum(adapter, skb, &param.csum); 1608 ret_val = atl1_tx_csum(adapter, skb, ptpd);
1590 if (ret_val < 0) { 1609 if (ret_val < 0) {
1591 spin_unlock_irqrestore(&adapter->lock, flags); 1610 spin_unlock_irqrestore(&adapter->lock, flags);
1592 dev_kfree_skb_any(skb); 1611 dev_kfree_skb_any(skb);
@@ -1594,13 +1613,11 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1594 } 1613 }
1595 } 1614 }
1596 1615
1597 val = (param.tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & 1616 atl1_tx_map(adapter, skb, ptpd);
1598 TSO_PARAM_SEGMENT_MASK; 1617 atl1_tx_queue(adapter, count, ptpd);
1599 atl1_tx_map(adapter, skb, 1 == val);
1600 atl1_tx_queue(adapter, count, &param);
1601 netdev->trans_start = jiffies;
1602 spin_unlock_irqrestore(&adapter->lock, flags);
1603 atl1_update_mailbox(adapter); 1618 atl1_update_mailbox(adapter);
1619 spin_unlock_irqrestore(&adapter->lock, flags);
1620 netdev->trans_start = jiffies;
1604 return NETDEV_TX_OK; 1621 return NETDEV_TX_OK;
1605} 1622}
1606 1623
@@ -2759,7 +2776,7 @@ const struct ethtool_ops atl1_ethtool_ops = {
2759 .get_ringparam = atl1_get_ringparam, 2776 .get_ringparam = atl1_get_ringparam,
2760 .set_ringparam = atl1_set_ringparam, 2777 .set_ringparam = atl1_set_ringparam,
2761 .get_pauseparam = atl1_get_pauseparam, 2778 .get_pauseparam = atl1_get_pauseparam,
2762 .set_pauseparam = atl1_set_pauseparam, 2779 .set_pauseparam = atl1_set_pauseparam,
2763 .get_rx_csum = atl1_get_rx_csum, 2780 .get_rx_csum = atl1_get_rx_csum,
2764 .set_tx_csum = ethtool_op_set_tx_hw_csum, 2781 .set_tx_csum = ethtool_op_set_tx_hw_csum,
2765 .get_link = ethtool_op_get_link, 2782 .get_link = ethtool_op_get_link,
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 30c5a8d72f3a..4d3d65b0cf57 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -452,106 +452,115 @@ struct rx_free_desc {
452 /* __attribute__ ((packed)) is required */ 452 /* __attribute__ ((packed)) is required */
453} __attribute__ ((packed)); 453} __attribute__ ((packed));
454 454
455/* tsopu defines */ 455/*
456#define TSO_PARAM_BUFLEN_MASK 0x3FFF 456 * The L1 transmit packet descriptor is comprised of four 32-bit words.
457#define TSO_PARAM_BUFLEN_SHIFT 0 457 *
458#define TSO_PARAM_DMAINT_MASK 0x0001 458 * 31 0
459#define TSO_PARAM_DMAINT_SHIFT 14 459 * +---------------------------------------+
460#define TSO_PARAM_PKTNT_MASK 0x0001 460 * | Word 0: Buffer addr lo |
461#define TSO_PARAM_PKTINT_SHIFT 15 461 * +---------------------------------------+
462#define TSO_PARAM_VLANTAG_MASK 0xFFFF 462 * | Word 1: Buffer addr hi |
463#define TSO_PARAM_VLAN_SHIFT 16 463 * +---------------------------------------+
464 464 * | Word 2 |
465/* tsopl defines */ 465 * +---------------------------------------+
466#define TSO_PARAM_EOP_MASK 0x0001 466 * | Word 3 |
467#define TSO_PARAM_EOP_SHIFT 0 467 * +---------------------------------------+
468#define TSO_PARAM_COALESCE_MASK 0x0001 468 *
469#define TSO_PARAM_COALESCE_SHIFT 1 469 * Words 0 and 1 combine to form a 64-bit buffer address.
470#define TSO_PARAM_INSVLAG_MASK 0x0001 470 *
471#define TSO_PARAM_INSVLAG_SHIFT 2 471 * Word 2 is self explanatory in the #define block below.
472#define TSO_PARAM_CUSTOMCKSUM_MASK 0x0001 472 *
473#define TSO_PARAM_CUSTOMCKSUM_SHIFT 3 473 * Word 3 has two forms, depending upon the state of bits 3 and 4.
474#define TSO_PARAM_SEGMENT_MASK 0x0001 474 * If bits 3 and 4 are both zero, then bits 14:31 are unused by the
475#define TSO_PARAM_SEGMENT_SHIFT 4 475 * hardware. Otherwise, if either bit 3 or 4 is set, the definition
476#define TSO_PARAM_IPCKSUM_MASK 0x0001 476 * of bits 14:31 vary according to the following depiction.
477#define TSO_PARAM_IPCKSUM_SHIFT 5 477 *
478#define TSO_PARAM_TCPCKSUM_MASK 0x0001 478 * 0 End of packet 0 End of packet
479#define TSO_PARAM_TCPCKSUM_SHIFT 6 479 * 1 Coalesce 1 Coalesce
480#define TSO_PARAM_UDPCKSUM_MASK 0x0001 480 * 2 Insert VLAN tag 2 Insert VLAN tag
481#define TSO_PARAM_UDPCKSUM_SHIFT 7 481 * 3 Custom csum enable = 0 3 Custom csum enable = 1
482#define TSO_PARAM_VLANTAGGED_MASK 0x0001 482 * 4 Segment enable = 1 4 Segment enable = 0
483#define TSO_PARAM_VLANTAGGED_SHIFT 8 483 * 5 Generate IP checksum 5 Generate IP checksum
484#define TSO_PARAM_ETHTYPE_MASK 0x0001 484 * 6 Generate TCP checksum 6 Generate TCP checksum
485#define TSO_PARAM_ETHTYPE_SHIFT 9 485 * 7 Generate UDP checksum 7 Generate UDP checksum
486#define TSO_PARAM_IPHL_MASK 0x000F 486 * 8 VLAN tagged 8 VLAN tagged
487#define TSO_PARAM_IPHL_SHIFT 10 487 * 9 Ethernet frame type 9 Ethernet frame type
488#define TSO_PARAM_TCPHDRLEN_MASK 0x000F 488 * 10-+ 10-+
489#define TSO_PARAM_TCPHDRLEN_SHIFT 14 489 * 11 | IP hdr length (10:13) 11 | IP hdr length (10:13)
490#define TSO_PARAM_HDRFLAG_MASK 0x0001 490 * 12 | (num 32-bit words) 12 | (num 32-bit words)
491#define TSO_PARAM_HDRFLAG_SHIFT 18 491 * 13-+ 13-+
492#define TSO_PARAM_MSS_MASK 0x1FFF 492 * 14-+ 14 Unused
493#define TSO_PARAM_MSS_SHIFT 19 493 * 15 | TCP hdr length (14:17) 15 Unused
494 494 * 16 | (num 32-bit words) 16-+
495/* csumpu defines */ 495 * 17-+ 17 |
496#define CSUM_PARAM_BUFLEN_MASK 0x3FFF 496 * 18 Header TPD flag 18 |
497#define CSUM_PARAM_BUFLEN_SHIFT 0 497 * 19-+ 19 | Payload offset
498#define CSUM_PARAM_DMAINT_MASK 0x0001 498 * 20 | 20 | (16:23)
499#define CSUM_PARAM_DMAINT_SHIFT 14 499 * 21 | 21 |
500#define CSUM_PARAM_PKTINT_MASK 0x0001 500 * 22 | 22 |
501#define CSUM_PARAM_PKTINT_SHIFT 15 501 * 23 | 23-+
502#define CSUM_PARAM_VALANTAG_MASK 0xFFFF 502 * 24 | 24-+
503#define CSUM_PARAM_VALAN_SHIFT 16 503 * 25 | MSS (19:31) 25 |
504 504 * 26 | 26 |
505/* csumpl defines*/ 505 * 27 | 27 | Custom csum offset
506#define CSUM_PARAM_EOP_MASK 0x0001 506 * 28 | 28 | (24:31)
507#define CSUM_PARAM_EOP_SHIFT 0 507 * 29 | 29 |
508#define CSUM_PARAM_COALESCE_MASK 0x0001 508 * 30 | 30 |
509#define CSUM_PARAM_COALESCE_SHIFT 1 509 * 31-+ 31-+
510#define CSUM_PARAM_INSVLAG_MASK 0x0001 510 */
511#define CSUM_PARAM_INSVLAG_SHIFT 2
512#define CSUM_PARAM_CUSTOMCKSUM_MASK 0x0001
513#define CSUM_PARAM_CUSTOMCKSUM_SHIFT 3
514#define CSUM_PARAM_SEGMENT_MASK 0x0001
515#define CSUM_PARAM_SEGMENT_SHIFT 4
516#define CSUM_PARAM_IPCKSUM_MASK 0x0001
517#define CSUM_PARAM_IPCKSUM_SHIFT 5
518#define CSUM_PARAM_TCPCKSUM_MASK 0x0001
519#define CSUM_PARAM_TCPCKSUM_SHIFT 6
520#define CSUM_PARAM_UDPCKSUM_MASK 0x0001
521#define CSUM_PARAM_UDPCKSUM_SHIFT 7
522#define CSUM_PARAM_VLANTAGGED_MASK 0x0001
523#define CSUM_PARAM_VLANTAGGED_SHIFT 8
524#define CSUM_PARAM_ETHTYPE_MASK 0x0001
525#define CSUM_PARAM_ETHTYPE_SHIFT 9
526#define CSUM_PARAM_IPHL_MASK 0x000F
527#define CSUM_PARAM_IPHL_SHIFT 10
528#define CSUM_PARAM_PLOADOFFSET_MASK 0x00FF
529#define CSUM_PARAM_PLOADOFFSET_SHIFT 16
530#define CSUM_PARAM_XSUMOFFSET_MASK 0x00FF
531#define CSUM_PARAM_XSUMOFFSET_SHIFT 24
532
533/* TPD descriptor */
534struct tso_param {
535 /* The order of these declarations is important -- don't change it */
536 u32 tsopu; /* tso_param upper word */
537 u32 tsopl; /* tso_param lower word */
538};
539
540struct csum_param {
541 /* The order of these declarations is important -- don't change it */
542 u32 csumpu; /* csum_param upper word */
543 u32 csumpl; /* csum_param lower word */
544};
545 511
546union tpd_descr { 512/* tpd word 2 */
547 u64 data; 513#define TPD_BUFLEN_MASK 0x3FFF
548 struct csum_param csum; 514#define TPD_BUFLEN_SHIFT 0
549 struct tso_param tso; 515#define TPD_DMAINT_MASK 0x0001
550}; 516#define TPD_DMAINT_SHIFT 14
517#define TPD_PKTNT_MASK 0x0001
518#define TPD_PKTINT_SHIFT 15
519#define TPD_VLANTAG_MASK 0xFFFF
520#define TPD_VLAN_SHIFT 16
521
522/* tpd word 3 bits 0:13 */
523#define TPD_EOP_MASK 0x0001
524#define TPD_EOP_SHIFT 0
525#define TPD_COALESCE_MASK 0x0001
526#define TPD_COALESCE_SHIFT 1
527#define TPD_INS_VL_TAG_MASK 0x0001
528#define TPD_INS_VL_TAG_SHIFT 2
529#define TPD_CUST_CSUM_EN_MASK 0x0001
530#define TPD_CUST_CSUM_EN_SHIFT 3
531#define TPD_SEGMENT_EN_MASK 0x0001
532#define TPD_SEGMENT_EN_SHIFT 4
533#define TPD_IP_CSUM_MASK 0x0001
534#define TPD_IP_CSUM_SHIFT 5
535#define TPD_TCP_CSUM_MASK 0x0001
536#define TPD_TCP_CSUM_SHIFT 6
537#define TPD_UDP_CSUM_MASK 0x0001
538#define TPD_UDP_CSUM_SHIFT 7
539#define TPD_VL_TAGGED_MASK 0x0001
540#define TPD_VL_TAGGED_SHIFT 8
541#define TPD_ETHTYPE_MASK 0x0001
542#define TPD_ETHTYPE_SHIFT 9
543#define TPD_IPHL_MASK 0x000F
544#define TPD_IPHL_SHIFT 10
545
546/* tpd word 3 bits 14:31 if segment enabled */
547#define TPD_TCPHDRLEN_MASK 0x000F
548#define TPD_TCPHDRLEN_SHIFT 14
549#define TPD_HDRFLAG_MASK 0x0001
550#define TPD_HDRFLAG_SHIFT 18
551#define TPD_MSS_MASK 0x1FFF
552#define TPD_MSS_SHIFT 19
553
554/* tpd word 3 bits 16:31 if custom csum enabled */
555#define TPD_PLOADOFFSET_MASK 0x00FF
556#define TPD_PLOADOFFSET_SHIFT 16
557#define TPD_CCSUMOFFSET_MASK 0x00FF
558#define TPD_CCSUMOFFSET_SHIFT 24
551 559
552struct tx_packet_desc { 560struct tx_packet_desc {
553 __le64 buffer_addr; 561 __le64 buffer_addr;
554 union tpd_descr desc; 562 __le32 word2;
563 __le32 word3;
555}; 564};
556 565
557/* DMA Order Settings */ 566/* DMA Order Settings */