aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/niu.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-15 06:48:19 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:17 -0400
commitb4c21639ab0f6df07ab7624a8c2f974936708ae5 (patch)
tree0e7a87510e50ba42dbf766b451f4d06e6545a675 /drivers/net/niu.c
parent92831bc395ac8390bf759775c50cb6f90c6eb03d (diff)
niu: Add TX multiqueue support.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/niu.c')
-rw-r--r--drivers/net/niu.c45
1 files changed, 25 insertions, 20 deletions
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index de2a8a30199d..8ee7d7bb951b 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3236,10 +3236,14 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3236 3236
3237static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3237static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3238{ 3238{
3239 struct netdev_queue *txq;
3239 u16 pkt_cnt, tmp; 3240 u16 pkt_cnt, tmp;
3240 int cons; 3241 int cons, index;
3241 u64 cs; 3242 u64 cs;
3242 3243
3244 index = (rp - np->tx_rings);
3245 txq = netdev_get_tx_queue(np->dev, index);
3246
3243 cs = rp->tx_cs; 3247 cs = rp->tx_cs;
3244 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) 3248 if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK))))
3245 goto out; 3249 goto out;
@@ -3262,13 +3266,13 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3262 smp_mb(); 3266 smp_mb();
3263 3267
3264out: 3268out:
3265 if (unlikely(netif_queue_stopped(np->dev) && 3269 if (unlikely(netif_tx_queue_stopped(txq) &&
3266 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 3270 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
3267 netif_tx_lock(np->dev); 3271 __netif_tx_lock(txq, smp_processor_id());
3268 if (netif_queue_stopped(np->dev) && 3272 if (netif_tx_queue_stopped(txq) &&
3269 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) 3273 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))
3270 netif_wake_queue(np->dev); 3274 netif_tx_wake_queue(txq);
3271 netif_tx_unlock(np->dev); 3275 __netif_tx_unlock(txq);
3272 } 3276 }
3273} 3277}
3274 3278
@@ -4061,6 +4065,8 @@ static int niu_alloc_channels(struct niu *np)
4061 np->num_rx_rings = parent->rxchan_per_port[port]; 4065 np->num_rx_rings = parent->rxchan_per_port[port];
4062 np->num_tx_rings = parent->txchan_per_port[port]; 4066 np->num_tx_rings = parent->txchan_per_port[port];
4063 4067
4068 np->dev->real_num_tx_queues = np->num_tx_rings;
4069
4064 np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info), 4070 np->rx_rings = kzalloc(np->num_rx_rings * sizeof(struct rx_ring_info),
4065 GFP_KERNEL); 4071 GFP_KERNEL);
4066 err = -ENOMEM; 4072 err = -ENOMEM;
@@ -5686,7 +5692,7 @@ static int niu_open(struct net_device *dev)
5686 goto out_free_irq; 5692 goto out_free_irq;
5687 } 5693 }
5688 5694
5689 netif_start_queue(dev); 5695 netif_tx_start_all_queues(dev);
5690 5696
5691 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 5697 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
5692 netif_carrier_on(dev); 5698 netif_carrier_on(dev);
@@ -5710,7 +5716,7 @@ static void niu_full_shutdown(struct niu *np, struct net_device *dev)
5710 cancel_work_sync(&np->reset_task); 5716 cancel_work_sync(&np->reset_task);
5711 5717
5712 niu_disable_napi(np); 5718 niu_disable_napi(np);
5713 netif_stop_queue(dev); 5719 netif_tx_stop_all_queues(dev);
5714 5720
5715 del_timer_sync(&np->timer); 5721 del_timer_sync(&np->timer);
5716 5722
@@ -5971,7 +5977,7 @@ static void niu_netif_start(struct niu *np)
5971 * so long as all callers are assured to have free tx slots 5977 * so long as all callers are assured to have free tx slots
5972 * (such as after niu_init_hw). 5978 * (such as after niu_init_hw).
5973 */ 5979 */
5974 netif_wake_queue(np->dev); 5980 netif_tx_wake_all_queues(np->dev);
5975 5981
5976 niu_enable_napi(np); 5982 niu_enable_napi(np);
5977 5983
@@ -6097,15 +6103,11 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6097 return ret; 6103 return ret;
6098} 6104}
6099 6105
6100static struct tx_ring_info *tx_ring_select(struct niu *np, struct sk_buff *skb)
6101{
6102 return &np->tx_rings[0];
6103}
6104
6105static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) 6106static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
6106{ 6107{
6107 struct niu *np = netdev_priv(dev); 6108 struct niu *np = netdev_priv(dev);
6108 unsigned long align, headroom; 6109 unsigned long align, headroom;
6110 struct netdev_queue *txq;
6109 struct tx_ring_info *rp; 6111 struct tx_ring_info *rp;
6110 struct tx_pkt_hdr *tp; 6112 struct tx_pkt_hdr *tp;
6111 unsigned int len, nfg; 6113 unsigned int len, nfg;
@@ -6113,10 +6115,12 @@ static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
6113 int prod, i, tlen; 6115 int prod, i, tlen;
6114 u64 mapping, mrk; 6116 u64 mapping, mrk;
6115 6117
6116 rp = tx_ring_select(np, skb); 6118 i = skb_get_queue_mapping(skb);
6119 rp = &np->tx_rings[i];
6120 txq = netdev_get_tx_queue(dev, i);
6117 6121
6118 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) { 6122 if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
6119 netif_stop_queue(dev); 6123 netif_tx_stop_queue(txq);
6120 dev_err(np->device, PFX "%s: BUG! Tx ring full when " 6124 dev_err(np->device, PFX "%s: BUG! Tx ring full when "
6121 "queue awake!\n", dev->name); 6125 "queue awake!\n", dev->name);
6122 rp->tx_errors++; 6126 rp->tx_errors++;
@@ -6215,9 +6219,9 @@ static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev)
6215 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3)); 6219 nw64(TX_RING_KICK(rp->tx_channel), rp->wrap_bit | (prod << 3));
6216 6220
6217 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) { 6221 if (unlikely(niu_tx_avail(rp) <= (MAX_SKB_FRAGS + 1))) {
6218 netif_stop_queue(dev); 6222 netif_tx_stop_queue(txq);
6219 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)) 6223 if (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))
6220 netif_wake_queue(dev); 6224 netif_tx_wake_queue(txq);
6221 } 6225 }
6222 6226
6223 dev->trans_start = jiffies; 6227 dev->trans_start = jiffies;
@@ -6275,7 +6279,7 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
6275 spin_unlock_irq(&np->lock); 6279 spin_unlock_irq(&np->lock);
6276 6280
6277 if (!err) { 6281 if (!err) {
6278 netif_start_queue(dev); 6282 netif_tx_start_all_queues(dev);
6279 if (np->link_config.loopback_mode != LOOPBACK_DISABLED) 6283 if (np->link_config.loopback_mode != LOOPBACK_DISABLED)
6280 netif_carrier_on(dev); 6284 netif_carrier_on(dev);
6281 6285
@@ -8532,9 +8536,10 @@ static struct net_device * __devinit niu_alloc_and_init(
8532 struct of_device *op, const struct niu_ops *ops, 8536 struct of_device *op, const struct niu_ops *ops,
8533 u8 port) 8537 u8 port)
8534{ 8538{
8535 struct net_device *dev = alloc_etherdev(sizeof(struct niu)); 8539 struct net_device *dev;
8536 struct niu *np; 8540 struct niu *np;
8537 8541
8542 dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
8538 if (!dev) { 8543 if (!dev) {
8539 dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n"); 8544 dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n");
8540 return NULL; 8545 return NULL;