aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/tg3.c
diff options
context:
space:
mode:
authorMatt Carlson <mcarlson@broadcom.com>2009-09-01 09:09:39 -0400
committerDavid S. Miller <davem@davemloft.net>2009-09-02 03:43:44 -0400
commitfe5f5787f0866e9f883bdd90018a354f2f3defd1 (patch)
tree85412719b0f4742d1a3ebf3b9592a3b1a5fafd94 /drivers/net/tg3.c
parent89aeb3bceaa1a02651206a76a7b9dcb8f3884702 (diff)
tg3: Add TSS support
This patch exposes the additional transmit rings to the kernel and makes the necessary modifications to transmit, open, and close paths. Signed-off-by: Matt Carlson <mcarlson@broadcom.com> Reviewed-by: Benjamin Li <benli@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tg3.c')
-rw-r--r--drivers/net/tg3.c54
1 files changed, 36 insertions, 18 deletions
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 53896541f7d2..2e0f4a50633f 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -704,11 +704,13 @@ static inline void tg3_netif_stop(struct tg3 *tp)
704static inline void tg3_netif_start(struct tg3 *tp) 704static inline void tg3_netif_start(struct tg3 *tp)
705{ 705{
706 struct tg3_napi *tnapi = &tp->napi[0]; 706 struct tg3_napi *tnapi = &tp->napi[0];
707 netif_wake_queue(tp->dev); 707
708 /* NOTE: unconditional netif_wake_queue is only appropriate 708 /* NOTE: unconditional netif_tx_wake_all_queues is only
709 * so long as all callers are assured to have free tx slots 709 * appropriate so long as all callers are assured to
710 * (such as after tg3_init_hw) 710 * have free tx slots (such as after tg3_init_hw)
711 */ 711 */
712 netif_tx_wake_all_queues(tp->dev);
713
712 napi_enable(&tnapi->napi); 714 napi_enable(&tnapi->napi);
713 tnapi->hw_status->status |= SD_STATUS_UPDATED; 715 tnapi->hw_status->status |= SD_STATUS_UPDATED;
714 tg3_enable_ints(tp); 716 tg3_enable_ints(tp);
@@ -4294,6 +4296,13 @@ static void tg3_tx(struct tg3_napi *tnapi)
4294 struct tg3 *tp = tnapi->tp; 4296 struct tg3 *tp = tnapi->tp;
4295 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; 4297 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4296 u32 sw_idx = tnapi->tx_cons; 4298 u32 sw_idx = tnapi->tx_cons;
4299 struct netdev_queue *txq;
4300 int index = tnapi - tp->napi;
4301
4302 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
4303 index--;
4304
4305 txq = netdev_get_tx_queue(tp->dev, index);
4297 4306
4298 while (sw_idx != hw_idx) { 4307 while (sw_idx != hw_idx) {
4299 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; 4308 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
@@ -4335,13 +4344,13 @@ static void tg3_tx(struct tg3_napi *tnapi)
4335 */ 4344 */
4336 smp_mb(); 4345 smp_mb();
4337 4346
4338 if (unlikely(netif_queue_stopped(tp->dev) && 4347 if (unlikely(netif_tx_queue_stopped(txq) &&
4339 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { 4348 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4340 netif_tx_lock(tp->dev); 4349 __netif_tx_lock(txq, smp_processor_id());
4341 if (netif_queue_stopped(tp->dev) && 4350 if (netif_tx_queue_stopped(txq) &&
4342 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) 4351 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4343 netif_wake_queue(tp->dev); 4352 netif_tx_wake_queue(txq);
4344 netif_tx_unlock(tp->dev); 4353 __netif_tx_unlock(txq);
4345 } 4354 }
4346} 4355}
4347 4356
@@ -5156,9 +5165,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5156 u32 len, entry, base_flags, mss; 5165 u32 len, entry, base_flags, mss;
5157 struct skb_shared_info *sp; 5166 struct skb_shared_info *sp;
5158 dma_addr_t mapping; 5167 dma_addr_t mapping;
5159 struct tg3_napi *tnapi = &tp->napi[0]; 5168 struct tg3_napi *tnapi;
5169 struct netdev_queue *txq;
5160 5170
5161 len = skb_headlen(skb); 5171 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5172 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5173 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5174 tnapi++;
5162 5175
5163 /* We are running in BH disabled context with netif_tx_lock 5176 /* We are running in BH disabled context with netif_tx_lock
5164 * and TX reclaim runs via tp->napi.poll inside of a software 5177 * and TX reclaim runs via tp->napi.poll inside of a software
@@ -5166,8 +5179,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5166 * no IRQ context deadlocks to worry about either. Rejoice! 5179 * no IRQ context deadlocks to worry about either. Rejoice!
5167 */ 5180 */
5168 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { 5181 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5169 if (!netif_queue_stopped(dev)) { 5182 if (!netif_tx_queue_stopped(txq)) {
5170 netif_stop_queue(dev); 5183 netif_tx_stop_queue(txq);
5171 5184
5172 /* This is a hard error, log it. */ 5185 /* This is a hard error, log it. */
5173 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 5186 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
@@ -5226,6 +5239,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5226 5239
5227 tnapi->tx_buffers[entry].skb = skb; 5240 tnapi->tx_buffers[entry].skb = skb;
5228 5241
5242 len = skb_headlen(skb);
5243
5229 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 5244 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5230 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 5245 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5231 5246
@@ -5255,9 +5270,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5255 5270
5256 tnapi->tx_prod = entry; 5271 tnapi->tx_prod = entry;
5257 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 5272 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5258 netif_stop_queue(dev); 5273 netif_tx_stop_queue(txq);
5259 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 5274 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5260 netif_wake_queue(tp->dev); 5275 netif_tx_wake_queue(txq);
5261 } 5276 }
5262 5277
5263out_unlock: 5278out_unlock:
@@ -8047,6 +8062,8 @@ static bool tg3_enable_msix(struct tg3 *tp)
8047 for (i = 0; i < tp->irq_max; i++) 8062 for (i = 0; i < tp->irq_max; i++)
8048 tp->napi[i].irq_vec = msix_ent[i].vector; 8063 tp->napi[i].irq_vec = msix_ent[i].vector;
8049 8064
8065 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8066
8050 return true; 8067 return true;
8051} 8068}
8052 8069
@@ -8076,6 +8093,7 @@ defcfg:
8076 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) { 8093 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8077 tp->irq_cnt = 1; 8094 tp->irq_cnt = 1;
8078 tp->napi[0].irq_vec = tp->pdev->irq; 8095 tp->napi[0].irq_vec = tp->pdev->irq;
8096 tp->dev->real_num_tx_queues = 1;
8079 } 8097 }
8080} 8098}
8081 8099
@@ -8211,7 +8229,7 @@ static int tg3_open(struct net_device *dev)
8211 8229
8212 tg3_full_unlock(tp); 8230 tg3_full_unlock(tp);
8213 8231
8214 netif_start_queue(dev); 8232 netif_tx_start_all_queues(dev);
8215 8233
8216 return 0; 8234 return 0;
8217 8235
@@ -8471,7 +8489,7 @@ static int tg3_close(struct net_device *dev)
8471 napi_disable(&tp->napi[0].napi); 8489 napi_disable(&tp->napi[0].napi);
8472 cancel_work_sync(&tp->reset_task); 8490 cancel_work_sync(&tp->reset_task);
8473 8491
8474 netif_stop_queue(dev); 8492 netif_tx_stop_all_queues(dev);
8475 8493
8476 del_timer_sync(&tp->timer); 8494 del_timer_sync(&tp->timer);
8477 8495
@@ -13560,7 +13578,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13560 goto err_out_free_res; 13578 goto err_out_free_res;
13561 } 13579 }
13562 13580
13563 dev = alloc_etherdev(sizeof(*tp)); 13581 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
13564 if (!dev) { 13582 if (!dev) {
13565 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 13583 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13566 err = -ENOMEM; 13584 err = -ENOMEM;