aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-10-27 11:53:06 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-28 04:20:32 -0400
commite694e964fc1241b4981873bdccce70438d5f0394 (patch)
tree5073282de0459052a9647ba247763c143d23ffc3 /drivers/net
parent85ad76b2f9c4956ec90c86298b22bb35c326e772 (diff)
igb: place a pointer to the netdev struct in the ring itself
This change adds a pointer to the netdev to the ring itself. The idea being at some point in the future it will be possible to support multiple netdevs from a single adapter struct. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/igb/igb.h3
-rw-r--r--drivers/net/igb/igb_main.c29
2 files changed, 16 insertions, 16 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 0c30c5e375c7..2416c12af3fe 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -175,9 +175,10 @@ struct igb_q_vector {
175 175
176struct igb_ring { 176struct igb_ring {
177 struct igb_q_vector *q_vector; /* backlink to q_vector */ 177 struct igb_q_vector *q_vector; /* backlink to q_vector */
178 void *desc; /* descriptor ring memory */ 178 struct net_device *netdev; /* back pointer to net_device */
179 struct pci_dev *pdev; /* pci device for dma mapping */ 179 struct pci_dev *pdev; /* pci device for dma mapping */
180 dma_addr_t dma; /* phys address of the ring */ 180 dma_addr_t dma; /* phys address of the ring */
181 void *desc; /* descriptor ring memory */
181 unsigned int size; /* length of desc. ring in bytes */ 182 unsigned int size; /* length of desc. ring in bytes */
182 unsigned int count; /* number of desc. in the ring */ 183 unsigned int count; /* number of desc. in the ring */
183 u16 next_to_use; 184 u16 next_to_use;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 00f3f2db2948..3dc8e88c5188 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -101,7 +101,6 @@ static void igb_update_phy_info(unsigned long);
101static void igb_watchdog(unsigned long); 101static void igb_watchdog(unsigned long);
102static void igb_watchdog_task(struct work_struct *); 102static void igb_watchdog_task(struct work_struct *);
103static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, 103static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
104 struct net_device *,
105 struct igb_ring *); 104 struct igb_ring *);
106static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, 105static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
107 struct net_device *); 106 struct net_device *);
@@ -437,6 +436,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
437 ring->count = adapter->tx_ring_count; 436 ring->count = adapter->tx_ring_count;
438 ring->queue_index = i; 437 ring->queue_index = i;
439 ring->pdev = adapter->pdev; 438 ring->pdev = adapter->pdev;
439 ring->netdev = adapter->netdev;
440 /* For 82575, context index must be unique per ring. */ 440 /* For 82575, context index must be unique per ring. */
441 if (adapter->hw.mac.type == e1000_82575) 441 if (adapter->hw.mac.type == e1000_82575)
442 ring->flags = IGB_RING_FLAG_TX_CTX_IDX; 442 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
@@ -447,6 +447,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
447 ring->count = adapter->rx_ring_count; 447 ring->count = adapter->rx_ring_count;
448 ring->queue_index = i; 448 ring->queue_index = i;
449 ring->pdev = adapter->pdev; 449 ring->pdev = adapter->pdev;
450 ring->netdev = adapter->netdev;
450 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 451 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
451 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ 452 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
452 /* set flag indicating ring supports SCTP checksum offload */ 453 /* set flag indicating ring supports SCTP checksum offload */
@@ -3550,9 +3551,10 @@ static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3550 mmiowb(); 3551 mmiowb();
3551} 3552}
3552 3553
3553static int __igb_maybe_stop_tx(struct net_device *netdev, 3554static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3554 struct igb_ring *tx_ring, int size)
3555{ 3555{
3556 struct net_device *netdev = tx_ring->netdev;
3557
3556 netif_stop_subqueue(netdev, tx_ring->queue_index); 3558 netif_stop_subqueue(netdev, tx_ring->queue_index);
3557 3559
3558 /* Herbert's original patch had: 3560 /* Herbert's original patch had:
@@ -3571,19 +3573,17 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
3571 return 0; 3573 return 0;
3572} 3574}
3573 3575
3574static int igb_maybe_stop_tx(struct net_device *netdev, 3576static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3575 struct igb_ring *tx_ring, int size)
3576{ 3577{
3577 if (igb_desc_unused(tx_ring) >= size) 3578 if (igb_desc_unused(tx_ring) >= size)
3578 return 0; 3579 return 0;
3579 return __igb_maybe_stop_tx(netdev, tx_ring, size); 3580 return __igb_maybe_stop_tx(tx_ring, size);
3580} 3581}
3581 3582
3582static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, 3583static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3583 struct net_device *netdev,
3584 struct igb_ring *tx_ring) 3584 struct igb_ring *tx_ring)
3585{ 3585{
3586 struct igb_adapter *adapter = netdev_priv(netdev); 3586 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
3587 unsigned int first; 3587 unsigned int first;
3588 unsigned int tx_flags = 0; 3588 unsigned int tx_flags = 0;
3589 u8 hdr_len = 0; 3589 u8 hdr_len = 0;
@@ -3606,7 +3606,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3606 * + 1 desc for skb->data, 3606 * + 1 desc for skb->data,
3607 * + 1 desc for context descriptor, 3607 * + 1 desc for context descriptor,
3608 * otherwise try next time */ 3608 * otherwise try next time */
3609 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { 3609 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3610 /* this is a hard error */ 3610 /* this is a hard error */
3611 return NETDEV_TX_BUSY; 3611 return NETDEV_TX_BUSY;
3612 } 3612 }
@@ -3665,7 +3665,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3665 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); 3665 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3666 3666
3667 /* Make sure there is space in the ring for the next send. */ 3667 /* Make sure there is space in the ring for the next send. */
3668 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); 3668 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
3669 3669
3670 return NETDEV_TX_OK; 3670 return NETDEV_TX_OK;
3671} 3671}
@@ -3684,7 +3684,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3684 * to a flow. Right now, performance is impacted slightly negatively 3684 * to a flow. Right now, performance is impacted slightly negatively
3685 * if using multiple tx queues. If the stack breaks away from a 3685 * if using multiple tx queues. If the stack breaks away from a
3686 * single qdisc implementation, we can look at this again. */ 3686 * single qdisc implementation, we can look at this again. */
3687 return igb_xmit_frame_ring_adv(skb, netdev, tx_ring); 3687 return igb_xmit_frame_ring_adv(skb, tx_ring);
3688} 3688}
3689 3689
3690/** 3690/**
@@ -4667,7 +4667,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4667{ 4667{
4668 struct igb_adapter *adapter = q_vector->adapter; 4668 struct igb_adapter *adapter = q_vector->adapter;
4669 struct igb_ring *tx_ring = q_vector->tx_ring; 4669 struct igb_ring *tx_ring = q_vector->tx_ring;
4670 struct net_device *netdev = adapter->netdev; 4670 struct net_device *netdev = tx_ring->netdev;
4671 struct e1000_hw *hw = &adapter->hw; 4671 struct e1000_hw *hw = &adapter->hw;
4672 struct igb_buffer *buffer_info; 4672 struct igb_buffer *buffer_info;
4673 struct sk_buff *skb; 4673 struct sk_buff *skb;
@@ -4841,8 +4841,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4841 int *work_done, int budget) 4841 int *work_done, int budget)
4842{ 4842{
4843 struct igb_adapter *adapter = q_vector->adapter; 4843 struct igb_adapter *adapter = q_vector->adapter;
4844 struct net_device *netdev = adapter->netdev;
4845 struct igb_ring *rx_ring = q_vector->rx_ring; 4844 struct igb_ring *rx_ring = q_vector->rx_ring;
4845 struct net_device *netdev = rx_ring->netdev;
4846 struct e1000_hw *hw = &adapter->hw; 4846 struct e1000_hw *hw = &adapter->hw;
4847 struct pci_dev *pdev = rx_ring->pdev; 4847 struct pci_dev *pdev = rx_ring->pdev;
4848 union e1000_adv_rx_desc *rx_desc , *next_rxd; 4848 union e1000_adv_rx_desc *rx_desc , *next_rxd;
@@ -5018,8 +5018,7 @@ next_desc:
5018static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, 5018static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
5019 int cleaned_count) 5019 int cleaned_count)
5020{ 5020{
5021 struct igb_adapter *adapter = rx_ring->q_vector->adapter; 5021 struct net_device *netdev = rx_ring->netdev;
5022 struct net_device *netdev = adapter->netdev;
5023 union e1000_adv_rx_desc *rx_desc; 5022 union e1000_adv_rx_desc *rx_desc;
5024 struct igb_buffer *buffer_info; 5023 struct igb_buffer *buffer_info;
5025 struct sk_buff *skb; 5024 struct sk_buff *skb;