aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-10-27 19:50:57 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-28 06:25:58 -0400
commitcdfd01fcc674cc1c0c7b54084d74c2b684bf67c2 (patch)
tree66db64d94aee0ebdfabea0d72520c9dec6c4d227 /drivers/net/igb
parent2e5655e758736488abbe9c024c8cda0e367214e5 (diff)
igb: cleanup igb xmit frame path
This patch cleans up the xmit frame path for igb to better handle xmit frame errors and avoid null pointer exceptions. It also cleans up some whitespace issues found in the xmit frame path. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/igb_main.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index cb1acca9ac91..8f8b7ccc7db5 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3245,9 +3245,9 @@ set_itr_now:
3245#define IGB_TX_FLAGS_VLAN 0x00000002 3245#define IGB_TX_FLAGS_VLAN 0x00000002
3246#define IGB_TX_FLAGS_TSO 0x00000004 3246#define IGB_TX_FLAGS_TSO 0x00000004
3247#define IGB_TX_FLAGS_IPV4 0x00000008 3247#define IGB_TX_FLAGS_IPV4 0x00000008
3248#define IGB_TX_FLAGS_TSTAMP 0x00000010 3248#define IGB_TX_FLAGS_TSTAMP 0x00000010
3249#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 3249#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3250#define IGB_TX_FLAGS_VLAN_SHIFT 16 3250#define IGB_TX_FLAGS_VLAN_SHIFT 16
3251 3251
3252static inline int igb_tso_adv(struct igb_ring *tx_ring, 3252static inline int igb_tso_adv(struct igb_ring *tx_ring,
3253 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 3253 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
@@ -3346,6 +3346,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3346 3346
3347 if (tx_flags & IGB_TX_FLAGS_VLAN) 3347 if (tx_flags & IGB_TX_FLAGS_VLAN)
3348 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); 3348 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3349
3349 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 3350 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3350 if (skb->ip_summed == CHECKSUM_PARTIAL) 3351 if (skb->ip_summed == CHECKSUM_PARTIAL)
3351 info |= skb_network_header_len(skb); 3352 info |= skb_network_header_len(skb);
@@ -3462,17 +3463,17 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3462 tx_ring->buffer_info[i].skb = skb; 3463 tx_ring->buffer_info[i].skb = skb;
3463 tx_ring->buffer_info[first].next_to_watch = i; 3464 tx_ring->buffer_info[first].next_to_watch = i;
3464 3465
3465 return count + 1; 3466 return ++count;
3466} 3467}
3467 3468
3468static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, 3469static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3469 int tx_flags, int count, u32 paylen, 3470 int tx_flags, int count, u32 paylen,
3470 u8 hdr_len) 3471 u8 hdr_len)
3471{ 3472{
3472 union e1000_adv_tx_desc *tx_desc = NULL; 3473 union e1000_adv_tx_desc *tx_desc;
3473 struct igb_buffer *buffer_info; 3474 struct igb_buffer *buffer_info;
3474 u32 olinfo_status = 0, cmd_type_len; 3475 u32 olinfo_status = 0, cmd_type_len;
3475 unsigned int i; 3476 unsigned int i = tx_ring->next_to_use;
3476 3477
3477 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 3478 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3478 E1000_ADVTXD_DCMD_DEXT); 3479 E1000_ADVTXD_DCMD_DEXT);
@@ -3505,18 +3506,18 @@ static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3505 3506
3506 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 3507 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3507 3508
3508 i = tx_ring->next_to_use; 3509 do {
3509 while (count--) {
3510 buffer_info = &tx_ring->buffer_info[i]; 3510 buffer_info = &tx_ring->buffer_info[i];
3511 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 3511 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3512 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 3512 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3513 tx_desc->read.cmd_type_len = 3513 tx_desc->read.cmd_type_len =
3514 cpu_to_le32(cmd_type_len | buffer_info->length); 3514 cpu_to_le32(cmd_type_len | buffer_info->length);
3515 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3515 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3516 count--;
3516 i++; 3517 i++;
3517 if (i == tx_ring->count) 3518 if (i == tx_ring->count)
3518 i = 0; 3519 i = 0;
3519 } 3520 } while (count > 0);
3520 3521
3521 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD); 3522 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
3522 /* Force memory writes to complete before letting h/w 3523 /* Force memory writes to complete before letting h/w
@@ -3568,8 +3569,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3568 unsigned int first; 3569 unsigned int first;
3569 unsigned int tx_flags = 0; 3570 unsigned int tx_flags = 0;
3570 u8 hdr_len = 0; 3571 u8 hdr_len = 0;
3571 int count = 0; 3572 int tso = 0, count;
3572 int tso = 0;
3573 union skb_shared_tx *shtx = skb_tx(skb); 3573 union skb_shared_tx *shtx = skb_tx(skb);
3574 3574
3575 /* need: 1 descriptor per page, 3575 /* need: 1 descriptor per page,
@@ -3587,7 +3587,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3587 tx_flags |= IGB_TX_FLAGS_TSTAMP; 3587 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3588 } 3588 }
3589 3589
3590 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3590 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
3591 tx_flags |= IGB_TX_FLAGS_VLAN; 3591 tx_flags |= IGB_TX_FLAGS_VLAN;
3592 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 3592 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3593 } 3593 }
@@ -3598,6 +3598,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3598 first = tx_ring->next_to_use; 3598 first = tx_ring->next_to_use;
3599 if (skb_is_gso(skb)) { 3599 if (skb_is_gso(skb)) {
3600 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len); 3600 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3601
3601 if (tso < 0) { 3602 if (tso < 0) {
3602 dev_kfree_skb_any(skb); 3603 dev_kfree_skb_any(skb);
3603 return NETDEV_TX_OK; 3604 return NETDEV_TX_OK;
@@ -3611,12 +3612,11 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3611 tx_flags |= IGB_TX_FLAGS_CSUM; 3612 tx_flags |= IGB_TX_FLAGS_CSUM;
3612 3613
3613 /* 3614 /*
3614 * count reflects descriptors mapped, if 0 then mapping error 3615 * count reflects descriptors mapped, if 0 or less then mapping error
3615 * has occured and we need to rewind the descriptor queue 3616 * has occured and we need to rewind the descriptor queue
3616 */ 3617 */
3617 count = igb_tx_map_adv(tx_ring, skb, first); 3618 count = igb_tx_map_adv(tx_ring, skb, first);
3618 3619 if (count <= 0) {
3619 if (!count) {
3620 dev_kfree_skb_any(skb); 3620 dev_kfree_skb_any(skb);
3621 tx_ring->buffer_info[first].time_stamp = 0; 3621 tx_ring->buffer_info[first].time_stamp = 0;
3622 tx_ring->next_to_use = first; 3622 tx_ring->next_to_use = first;