aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2013-02-08 23:27:48 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2013-02-15 04:31:19 -0500
commit21ba6fe19370f8008d1edd9aedd6dadd7e3fa8f8 (patch)
treec6b171a04255964314d035a41c05830754a1071f /drivers/net/ethernet/intel
parent2c7d7724bc5b12a8bc038880d2dfe8ea496c618d (diff)
igb: Update igb to use a path similar to ixgbe to determine when to stop Tx
After reviewing the igb and ixgbe code I realized there are a few issues in how the code is structured. Specifically we are not checking the size of the buffers being used in transmits and we are not using the same value to determine when to stop or start a Tx queue. As such the code is prone to be buggy. This patch makes it so that we have one value DESC_NEEDED that we will use for starting and stopping the queue. In addition we will check the size of buffers being used when setting up a transmit so as to avoid a possible buffer overrun if we were to receive a frame with a block of data larger than 32K in skb->data. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h13
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c30
2 files changed, 28 insertions, 15 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index afdb8bbcf6ce..d27edbc63923 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -139,8 +139,6 @@ struct vf_data_storage {
139#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 139#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
140#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 140#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
141 141
142/* How many Tx Descriptors do we need to call netif_wake_queue ? */
143#define IGB_TX_QUEUE_WAKE 16
144/* How many Rx Buffers do we bundle into one write to the hardware ? */ 142/* How many Rx Buffers do we bundle into one write to the hardware ? */
145#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 143#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
146 144
@@ -169,6 +167,17 @@ enum igb_tx_flags {
169#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 167#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
170#define IGB_TX_FLAGS_VLAN_SHIFT 16 168#define IGB_TX_FLAGS_VLAN_SHIFT 16
171 169
170/*
171 * The largest size we can write to the descriptor is 65535. In order to
172 * maintain a power of two alignment we have to limit ourselves to 32K.
173 */
174#define IGB_MAX_TXD_PWR 15
175#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
176
177/* Tx Descriptors needed, worst case */
178#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
179#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
180
172/* wrapper around a pointer to a socket buffer, 181/* wrapper around a pointer to a socket buffer,
173 * so a DMA handle can be stored along with the buffer */ 182 * so a DMA handle can be stored along with the buffer */
174struct igb_tx_buffer { 183struct igb_tx_buffer {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index bf559721248c..ed79a1c53b59 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4434,13 +4434,6 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4434 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 4434 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
4435} 4435}
4436 4436
4437/*
4438 * The largest size we can write to the descriptor is 65535. In order to
4439 * maintain a power of two alignment we have to limit ourselves to 32K.
4440 */
4441#define IGB_MAX_TXD_PWR 15
4442#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
4443
4444static void igb_tx_map(struct igb_ring *tx_ring, 4437static void igb_tx_map(struct igb_ring *tx_ring,
4445 struct igb_tx_buffer *first, 4438 struct igb_tx_buffer *first,
4446 const u8 hdr_len) 4439 const u8 hdr_len)
@@ -4609,15 +4602,25 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4609 struct igb_tx_buffer *first; 4602 struct igb_tx_buffer *first;
4610 int tso; 4603 int tso;
4611 u32 tx_flags = 0; 4604 u32 tx_flags = 0;
4605 u16 count = TXD_USE_COUNT(skb_headlen(skb));
4612 __be16 protocol = vlan_get_protocol(skb); 4606 __be16 protocol = vlan_get_protocol(skb);
4613 u8 hdr_len = 0; 4607 u8 hdr_len = 0;
4614 4608
4615 /* need: 1 descriptor per page, 4609 /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
4610 * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
4616 * + 2 desc gap to keep tail from touching head, 4611 * + 2 desc gap to keep tail from touching head,
4617 * + 1 desc for skb->data,
4618 * + 1 desc for context descriptor, 4612 * + 1 desc for context descriptor,
4619 * otherwise try next time */ 4613 * otherwise try next time
4620 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) { 4614 */
4615 if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
4616 unsigned short f;
4617 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
4618 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4619 } else {
4620 count += skb_shinfo(skb)->nr_frags;
4621 }
4622
4623 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
4621 /* this is a hard error */ 4624 /* this is a hard error */
4622 return NETDEV_TX_BUSY; 4625 return NETDEV_TX_BUSY;
4623 } 4626 }
@@ -4659,7 +4662,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4659 igb_tx_map(tx_ring, first, hdr_len); 4662 igb_tx_map(tx_ring, first, hdr_len);
4660 4663
4661 /* Make sure there is space in the ring for the next send. */ 4664 /* Make sure there is space in the ring for the next send. */
4662 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); 4665 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
4663 4666
4664 return NETDEV_TX_OK; 4667 return NETDEV_TX_OK;
4665 4668
@@ -6063,9 +6066,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6063 } 6066 }
6064 } 6067 }
6065 6068
6069#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
6066 if (unlikely(total_packets && 6070 if (unlikely(total_packets &&
6067 netif_carrier_ok(tx_ring->netdev) && 6071 netif_carrier_ok(tx_ring->netdev) &&
6068 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { 6072 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
6069 /* Make sure that anybody stopping the queue after this 6073 /* Make sure that anybody stopping the queue after this
6070 * sees the new next_to_clean. 6074 * sees the new next_to_clean.
6071 */ 6075 */