aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h36
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c554
2 files changed, 420 insertions, 170 deletions
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 6cac919272ea..d4a409139ea2 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -50,6 +50,10 @@ struct alx_buffer {
50}; 50};
51 51
52struct alx_rx_queue { 52struct alx_rx_queue {
53 struct net_device *netdev;
54 struct device *dev;
55 struct alx_napi *np;
56
53 struct alx_rrd *rrd; 57 struct alx_rrd *rrd;
54 dma_addr_t rrd_dma; 58 dma_addr_t rrd_dma;
55 59
@@ -58,16 +62,26 @@ struct alx_rx_queue {
58 62
59 struct alx_buffer *bufs; 63 struct alx_buffer *bufs;
60 64
65 u16 count;
61 u16 write_idx, read_idx; 66 u16 write_idx, read_idx;
62 u16 rrd_read_idx; 67 u16 rrd_read_idx;
68 u16 queue_idx;
63}; 69};
64#define ALX_RX_ALLOC_THRESH 32 70#define ALX_RX_ALLOC_THRESH 32
65 71
66struct alx_tx_queue { 72struct alx_tx_queue {
73 struct net_device *netdev;
74 struct device *dev;
75
67 struct alx_txd *tpd; 76 struct alx_txd *tpd;
68 dma_addr_t tpd_dma; 77 dma_addr_t tpd_dma;
78
69 struct alx_buffer *bufs; 79 struct alx_buffer *bufs;
80
81 u16 count;
70 u16 write_idx, read_idx; 82 u16 write_idx, read_idx;
83 u16 queue_idx;
84 u16 p_reg, c_reg;
71}; 85};
72 86
73#define ALX_DEFAULT_TX_WORK 128 87#define ALX_DEFAULT_TX_WORK 128
@@ -76,6 +90,18 @@ enum alx_device_quirks {
76 ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0), 90 ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
77}; 91};
78 92
93struct alx_napi {
94 struct napi_struct napi;
95 struct alx_priv *alx;
96 struct alx_rx_queue *rxq;
97 struct alx_tx_queue *txq;
98 int vec_idx;
99 u32 vec_mask;
100 char irq_lbl[IFNAMSIZ + 8];
101};
102
103#define ALX_MAX_NAPIS 8
104
79#define ALX_FLAG_USING_MSIX BIT(0) 105#define ALX_FLAG_USING_MSIX BIT(0)
80#define ALX_FLAG_USING_MSI BIT(1) 106#define ALX_FLAG_USING_MSI BIT(1)
81 107
@@ -87,7 +113,6 @@ struct alx_priv {
87 /* msi-x vectors */ 113 /* msi-x vectors */
88 int num_vec; 114 int num_vec;
89 struct msix_entry *msix_entries; 115 struct msix_entry *msix_entries;
90 char irq_lbl[IFNAMSIZ + 8];
91 116
92 /* all descriptor memory */ 117 /* all descriptor memory */
93 struct { 118 struct {
@@ -96,6 +121,11 @@ struct alx_priv {
96 unsigned int size; 121 unsigned int size;
97 } descmem; 122 } descmem;
98 123
124 struct alx_napi *qnapi[ALX_MAX_NAPIS];
125 int num_txq;
126 int num_rxq;
127 int num_napi;
128
99 /* protect int_mask updates */ 129 /* protect int_mask updates */
100 spinlock_t irq_lock; 130 spinlock_t irq_lock;
101 u32 int_mask; 131 u32 int_mask;
@@ -104,10 +134,6 @@ struct alx_priv {
104 unsigned int rx_ringsz; 134 unsigned int rx_ringsz;
105 unsigned int rxbuf_size; 135 unsigned int rxbuf_size;
106 136
107 struct napi_struct napi;
108 struct alx_tx_queue txq;
109 struct alx_rx_queue rxq;
110
111 struct work_struct link_check_wk; 137 struct work_struct link_check_wk;
112 struct work_struct reset_wk; 138 struct work_struct reset_wk;
113 139
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index eccbacd96201..c8f525574d68 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -51,16 +51,12 @@
51 51
52const char alx_drv_name[] = "alx"; 52const char alx_drv_name[] = "alx";
53 53
54static bool msix = false; 54static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
55module_param(msix, bool, 0);
56MODULE_PARM_DESC(msix, "Enable msi-x interrupt support");
57
58static void alx_free_txbuf(struct alx_priv *alx, int entry)
59{ 55{
60 struct alx_buffer *txb = &alx->txq.bufs[entry]; 56 struct alx_buffer *txb = &txq->bufs[entry];
61 57
62 if (dma_unmap_len(txb, size)) { 58 if (dma_unmap_len(txb, size)) {
63 dma_unmap_single(&alx->hw.pdev->dev, 59 dma_unmap_single(txq->dev,
64 dma_unmap_addr(txb, dma), 60 dma_unmap_addr(txb, dma),
65 dma_unmap_len(txb, size), 61 dma_unmap_len(txb, size),
66 DMA_TO_DEVICE); 62 DMA_TO_DEVICE);
@@ -75,7 +71,7 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
75 71
76static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) 72static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
77{ 73{
78 struct alx_rx_queue *rxq = &alx->rxq; 74 struct alx_rx_queue *rxq = alx->qnapi[0]->rxq;
79 struct sk_buff *skb; 75 struct sk_buff *skb;
80 struct alx_buffer *cur_buf; 76 struct alx_buffer *cur_buf;
81 dma_addr_t dma; 77 dma_addr_t dma;
@@ -143,24 +139,42 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
143 return count; 139 return count;
144} 140}
145 141
146static inline int alx_tpd_avail(struct alx_priv *alx) 142static struct alx_tx_queue *alx_tx_queue_mapping(struct alx_priv *alx,
143 struct sk_buff *skb)
147{ 144{
148 struct alx_tx_queue *txq = &alx->txq; 145 unsigned int r_idx = skb->queue_mapping;
146
147 if (r_idx >= alx->num_txq)
148 r_idx = r_idx % alx->num_txq;
149 149
150 return alx->qnapi[r_idx]->txq;
151}
152
153static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq)
154{
155 return netdev_get_tx_queue(txq->netdev, txq->queue_idx);
156}
157
158static inline int alx_tpd_avail(struct alx_tx_queue *txq)
159{
150 if (txq->write_idx >= txq->read_idx) 160 if (txq->write_idx >= txq->read_idx)
151 return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1; 161 return txq->count + txq->read_idx - txq->write_idx - 1;
152 return txq->read_idx - txq->write_idx - 1; 162 return txq->read_idx - txq->write_idx - 1;
153} 163}
154 164
155static bool alx_clean_tx_irq(struct alx_priv *alx) 165static bool alx_clean_tx_irq(struct alx_tx_queue *txq)
156{ 166{
157 struct alx_tx_queue *txq = &alx->txq; 167 struct alx_priv *alx;
168 struct netdev_queue *tx_queue;
158 u16 hw_read_idx, sw_read_idx; 169 u16 hw_read_idx, sw_read_idx;
159 unsigned int total_bytes = 0, total_packets = 0; 170 unsigned int total_bytes = 0, total_packets = 0;
160 int budget = ALX_DEFAULT_TX_WORK; 171 int budget = ALX_DEFAULT_TX_WORK;
161 172
173 alx = netdev_priv(txq->netdev);
174 tx_queue = alx_get_tx_queue(txq);
175
162 sw_read_idx = txq->read_idx; 176 sw_read_idx = txq->read_idx;
163 hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX); 177 hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg);
164 178
165 if (sw_read_idx != hw_read_idx) { 179 if (sw_read_idx != hw_read_idx) {
166 while (sw_read_idx != hw_read_idx && budget > 0) { 180 while (sw_read_idx != hw_read_idx && budget > 0) {
@@ -173,19 +187,19 @@ static bool alx_clean_tx_irq(struct alx_priv *alx)
173 budget--; 187 budget--;
174 } 188 }
175 189
176 alx_free_txbuf(alx, sw_read_idx); 190 alx_free_txbuf(txq, sw_read_idx);
177 191
178 if (++sw_read_idx == alx->tx_ringsz) 192 if (++sw_read_idx == txq->count)
179 sw_read_idx = 0; 193 sw_read_idx = 0;
180 } 194 }
181 txq->read_idx = sw_read_idx; 195 txq->read_idx = sw_read_idx;
182 196
183 netdev_completed_queue(alx->dev, total_packets, total_bytes); 197 netdev_tx_completed_queue(tx_queue, total_packets, total_bytes);
184 } 198 }
185 199
186 if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) && 200 if (netif_tx_queue_stopped(tx_queue) && netif_carrier_ok(alx->dev) &&
187 alx_tpd_avail(alx) > alx->tx_ringsz/4) 201 alx_tpd_avail(txq) > txq->count / 4)
188 netif_wake_queue(alx->dev); 202 netif_tx_wake_queue(tx_queue);
189 203
190 return sw_read_idx == hw_read_idx; 204 return sw_read_idx == hw_read_idx;
191} 205}
@@ -200,15 +214,17 @@ static void alx_schedule_reset(struct alx_priv *alx)
200 schedule_work(&alx->reset_wk); 214 schedule_work(&alx->reset_wk);
201} 215}
202 216
203static int alx_clean_rx_irq(struct alx_priv *alx, int budget) 217static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget)
204{ 218{
205 struct alx_rx_queue *rxq = &alx->rxq; 219 struct alx_priv *alx;
206 struct alx_rrd *rrd; 220 struct alx_rrd *rrd;
207 struct alx_buffer *rxb; 221 struct alx_buffer *rxb;
208 struct sk_buff *skb; 222 struct sk_buff *skb;
209 u16 length, rfd_cleaned = 0; 223 u16 length, rfd_cleaned = 0;
210 int work = 0; 224 int work = 0;
211 225
226 alx = netdev_priv(rxq->netdev);
227
212 while (work < budget) { 228 while (work < budget) {
213 rrd = &rxq->rrd[rxq->rrd_read_idx]; 229 rrd = &rxq->rrd[rxq->rrd_read_idx];
214 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) 230 if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
@@ -224,7 +240,7 @@ static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
224 } 240 }
225 241
226 rxb = &rxq->bufs[rxq->read_idx]; 242 rxb = &rxq->bufs[rxq->read_idx];
227 dma_unmap_single(&alx->hw.pdev->dev, 243 dma_unmap_single(rxq->dev,
228 dma_unmap_addr(rxb, dma), 244 dma_unmap_addr(rxb, dma),
229 dma_unmap_len(rxb, size), 245 dma_unmap_len(rxb, size),
230 DMA_FROM_DEVICE); 246 DMA_FROM_DEVICE);
@@ -242,7 +258,7 @@ static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
242 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3), 258 length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
243 RRD_PKTLEN) - ETH_FCS_LEN; 259 RRD_PKTLEN) - ETH_FCS_LEN;
244 skb_put(skb, length); 260 skb_put(skb, length);
245 skb->protocol = eth_type_trans(skb, alx->dev); 261 skb->protocol = eth_type_trans(skb, rxq->netdev);
246 262
247 skb_checksum_none_assert(skb); 263 skb_checksum_none_assert(skb);
248 if (alx->dev->features & NETIF_F_RXCSUM && 264 if (alx->dev->features & NETIF_F_RXCSUM &&
@@ -259,13 +275,13 @@ static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
259 } 275 }
260 } 276 }
261 277
262 napi_gro_receive(&alx->napi, skb); 278 napi_gro_receive(&rxq->np->napi, skb);
263 work++; 279 work++;
264 280
265next_pkt: 281next_pkt:
266 if (++rxq->read_idx == alx->rx_ringsz) 282 if (++rxq->read_idx == rxq->count)
267 rxq->read_idx = 0; 283 rxq->read_idx = 0;
268 if (++rxq->rrd_read_idx == alx->rx_ringsz) 284 if (++rxq->rrd_read_idx == rxq->count)
269 rxq->rrd_read_idx = 0; 285 rxq->rrd_read_idx = 0;
270 286
271 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH) 287 if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
@@ -280,23 +296,26 @@ next_pkt:
280 296
281static int alx_poll(struct napi_struct *napi, int budget) 297static int alx_poll(struct napi_struct *napi, int budget)
282{ 298{
283 struct alx_priv *alx = container_of(napi, struct alx_priv, napi); 299 struct alx_napi *np = container_of(napi, struct alx_napi, napi);
300 struct alx_priv *alx = np->alx;
284 struct alx_hw *hw = &alx->hw; 301 struct alx_hw *hw = &alx->hw;
285 unsigned long flags; 302 unsigned long flags;
286 bool tx_complete; 303 bool tx_complete = true;
287 int work; 304 int work = 0;
288 305
289 tx_complete = alx_clean_tx_irq(alx); 306 if (np->txq)
290 work = alx_clean_rx_irq(alx, budget); 307 tx_complete = alx_clean_tx_irq(np->txq);
308 if (np->rxq)
309 work = alx_clean_rx_irq(np->rxq, budget);
291 310
292 if (!tx_complete || work == budget) 311 if (!tx_complete || work == budget)
293 return budget; 312 return budget;
294 313
295 napi_complete(&alx->napi); 314 napi_complete(&np->napi);
296 315
297 /* enable interrupt */ 316 /* enable interrupt */
298 if (alx->flags & ALX_FLAG_USING_MSIX) { 317 if (alx->flags & ALX_FLAG_USING_MSIX) {
299 alx_mask_msix(hw, 1, false); 318 alx_mask_msix(hw, np->vec_idx, false);
300 } else { 319 } else {
301 spin_lock_irqsave(&alx->irq_lock, flags); 320 spin_lock_irqsave(&alx->irq_lock, flags);
302 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; 321 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
@@ -350,7 +369,7 @@ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
350 goto out; 369 goto out;
351 370
352 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) { 371 if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
353 napi_schedule(&alx->napi); 372 napi_schedule(&alx->qnapi[0]->napi);
354 /* mask rx/tx interrupt, enable them when napi complete */ 373 /* mask rx/tx interrupt, enable them when napi complete */
355 alx->int_mask &= ~ALX_ISR_ALL_QUEUES; 374 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
356 alx_write_mem32(hw, ALX_IMR, alx->int_mask); 375 alx_write_mem32(hw, ALX_IMR, alx->int_mask);
@@ -365,15 +384,15 @@ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
365 384
366static irqreturn_t alx_intr_msix_ring(int irq, void *data) 385static irqreturn_t alx_intr_msix_ring(int irq, void *data)
367{ 386{
368 struct alx_priv *alx = data; 387 struct alx_napi *np = data;
369 struct alx_hw *hw = &alx->hw; 388 struct alx_hw *hw = &np->alx->hw;
370 389
371 /* mask interrupt to ACK chip */ 390 /* mask interrupt to ACK chip */
372 alx_mask_msix(hw, 1, true); 391 alx_mask_msix(hw, np->vec_idx, true);
373 /* clear interrupt status */ 392 /* clear interrupt status */
374 alx_write_mem32(hw, ALX_ISR, (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)); 393 alx_write_mem32(hw, ALX_ISR, np->vec_mask);
375 394
376 napi_schedule(&alx->napi); 395 napi_schedule(&np->napi);
377 396
378 return IRQ_HANDLED; 397 return IRQ_HANDLED;
379} 398}
@@ -424,63 +443,79 @@ static irqreturn_t alx_intr_legacy(int irq, void *data)
424 return alx_intr_handle(alx, intr); 443 return alx_intr_handle(alx, intr);
425} 444}
426 445
446static const u16 txring_header_reg[] = {ALX_TPD_PRI0_ADDR_LO,
447 ALX_TPD_PRI1_ADDR_LO,
448 ALX_TPD_PRI2_ADDR_LO,
449 ALX_TPD_PRI3_ADDR_LO};
450
427static void alx_init_ring_ptrs(struct alx_priv *alx) 451static void alx_init_ring_ptrs(struct alx_priv *alx)
428{ 452{
429 struct alx_hw *hw = &alx->hw; 453 struct alx_hw *hw = &alx->hw;
430 u32 addr_hi = ((u64)alx->descmem.dma) >> 32; 454 u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
455 struct alx_napi *np;
456 int i;
457
458 for (i = 0; i < alx->num_napi; i++) {
459 np = alx->qnapi[i];
460 if (np->txq) {
461 np->txq->read_idx = 0;
462 np->txq->write_idx = 0;
463 alx_write_mem32(hw,
464 txring_header_reg[np->txq->queue_idx],
465 np->txq->tpd_dma);
466 }
467
468 if (np->rxq) {
469 np->rxq->read_idx = 0;
470 np->rxq->write_idx = 0;
471 np->rxq->rrd_read_idx = 0;
472 alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma);
473 alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma);
474 }
475 }
476
477 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
478 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
431 479
432 alx->rxq.read_idx = 0;
433 alx->rxq.write_idx = 0;
434 alx->rxq.rrd_read_idx = 0;
435 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi); 480 alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
436 alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
437 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz); 481 alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
438 alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
439 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz); 482 alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
440 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size); 483 alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
441 484
442 alx->txq.read_idx = 0;
443 alx->txq.write_idx = 0;
444 alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
445 alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
446 alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
447
448 /* load these pointers into the chip */ 485 /* load these pointers into the chip */
449 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR); 486 alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
450} 487}
451 488
452static void alx_free_txring_buf(struct alx_priv *alx) 489static void alx_free_txring_buf(struct alx_tx_queue *txq)
453{ 490{
454 struct alx_tx_queue *txq = &alx->txq;
455 int i; 491 int i;
456 492
457 if (!txq->bufs) 493 if (!txq->bufs)
458 return; 494 return;
459 495
460 for (i = 0; i < alx->tx_ringsz; i++) 496 for (i = 0; i < txq->count; i++)
461 alx_free_txbuf(alx, i); 497 alx_free_txbuf(txq, i);
462 498
463 memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer)); 499 memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer));
464 memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd)); 500 memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd));
465 txq->write_idx = 0; 501 txq->write_idx = 0;
466 txq->read_idx = 0; 502 txq->read_idx = 0;
467 503
468 netdev_reset_queue(alx->dev); 504 netdev_tx_reset_queue(alx_get_tx_queue(txq));
469} 505}
470 506
471static void alx_free_rxring_buf(struct alx_priv *alx) 507static void alx_free_rxring_buf(struct alx_rx_queue *rxq)
472{ 508{
473 struct alx_rx_queue *rxq = &alx->rxq;
474 struct alx_buffer *cur_buf; 509 struct alx_buffer *cur_buf;
475 u16 i; 510 u16 i;
476 511
477 if (rxq == NULL) 512 if (!rxq->bufs)
478 return; 513 return;
479 514
480 for (i = 0; i < alx->rx_ringsz; i++) { 515 for (i = 0; i < rxq->count; i++) {
481 cur_buf = rxq->bufs + i; 516 cur_buf = rxq->bufs + i;
482 if (cur_buf->skb) { 517 if (cur_buf->skb) {
483 dma_unmap_single(&alx->hw.pdev->dev, 518 dma_unmap_single(rxq->dev,
484 dma_unmap_addr(cur_buf, dma), 519 dma_unmap_addr(cur_buf, dma),
485 dma_unmap_len(cur_buf, size), 520 dma_unmap_len(cur_buf, size),
486 DMA_FROM_DEVICE); 521 DMA_FROM_DEVICE);
@@ -498,8 +533,14 @@ static void alx_free_rxring_buf(struct alx_priv *alx)
498 533
499static void alx_free_buffers(struct alx_priv *alx) 534static void alx_free_buffers(struct alx_priv *alx)
500{ 535{
501 alx_free_txring_buf(alx); 536 int i;
502 alx_free_rxring_buf(alx); 537
538 for (i = 0; i < alx->num_txq; i++)
539 if (alx->qnapi[i] && alx->qnapi[i]->txq)
540 alx_free_txring_buf(alx->qnapi[i]->txq);
541
542 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
543 alx_free_rxring_buf(alx->qnapi[0]->rxq);
503} 544}
504 545
505static int alx_reinit_rings(struct alx_priv *alx) 546static int alx_reinit_rings(struct alx_priv *alx)
@@ -573,19 +614,41 @@ static int alx_set_mac_address(struct net_device *netdev, void *data)
573 return 0; 614 return 0;
574} 615}
575 616
576static int alx_alloc_descriptors(struct alx_priv *alx) 617static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
618 int offset)
577{ 619{
578 alx->txq.bufs = kcalloc(alx->tx_ringsz, 620 txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL);
579 sizeof(struct alx_buffer), 621 if (!txq->bufs)
580 GFP_KERNEL);
581 if (!alx->txq.bufs)
582 return -ENOMEM; 622 return -ENOMEM;
583 623
584 alx->rxq.bufs = kcalloc(alx->rx_ringsz, 624 txq->tpd = alx->descmem.virt + offset;
585 sizeof(struct alx_buffer), 625 txq->tpd_dma = alx->descmem.dma + offset;
586 GFP_KERNEL); 626 offset += sizeof(struct alx_txd) * txq->count;
587 if (!alx->rxq.bufs) 627
588 goto out_free; 628 return offset;
629}
630
631static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
632 int offset)
633{
634 rxq->bufs = kcalloc(rxq->count, sizeof(struct alx_buffer), GFP_KERNEL);
635 if (!rxq->bufs)
636 return -ENOMEM;
637
638 rxq->rrd = alx->descmem.virt + offset;
639 rxq->rrd_dma = alx->descmem.dma + offset;
640 offset += sizeof(struct alx_rrd) * rxq->count;
641
642 rxq->rfd = alx->descmem.virt + offset;
643 rxq->rfd_dma = alx->descmem.dma + offset;
644 offset += sizeof(struct alx_rfd) * rxq->count;
645
646 return offset;
647}
648
649static int alx_alloc_rings(struct alx_priv *alx)
650{
651 int i, offset = 0;
589 652
590 /* physical tx/rx ring descriptors 653 /* physical tx/rx ring descriptors
591 * 654 *
@@ -593,7 +656,8 @@ static int alx_alloc_descriptors(struct alx_priv *alx)
593 * 4G boundary (hardware has a single register for high 32 bits 656 * 4G boundary (hardware has a single register for high 32 bits
594 * of addresses only) 657 * of addresses only)
595 */ 658 */
596 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz + 659 alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz *
660 alx->num_txq +
597 sizeof(struct alx_rrd) * alx->rx_ringsz + 661 sizeof(struct alx_rrd) * alx->rx_ringsz +
598 sizeof(struct alx_rfd) * alx->rx_ringsz; 662 sizeof(struct alx_rfd) * alx->rx_ringsz;
599 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, 663 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
@@ -601,87 +665,178 @@ static int alx_alloc_descriptors(struct alx_priv *alx)
601 &alx->descmem.dma, 665 &alx->descmem.dma,
602 GFP_KERNEL); 666 GFP_KERNEL);
603 if (!alx->descmem.virt) 667 if (!alx->descmem.virt)
604 goto out_free; 668 return -ENOMEM;
605
606 alx->txq.tpd = alx->descmem.virt;
607 alx->txq.tpd_dma = alx->descmem.dma;
608 669
609 /* alignment requirement for next block */ 670 /* alignment requirements */
610 BUILD_BUG_ON(sizeof(struct alx_txd) % 8); 671 BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
672 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
611 673
612 alx->rxq.rrd = 674 for (i = 0; i < alx->num_txq; i++) {
613 (void *)((u8 *)alx->descmem.virt + 675 offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset);
614 sizeof(struct alx_txd) * alx->tx_ringsz); 676 if (offset < 0) {
615 alx->rxq.rrd_dma = alx->descmem.dma + 677 netdev_err(alx->dev, "Allocation of tx buffer failed!\n");
616 sizeof(struct alx_txd) * alx->tx_ringsz; 678 return -ENOMEM;
679 }
680 }
617 681
618 /* alignment requirement for next block */ 682 offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset);
619 BUILD_BUG_ON(sizeof(struct alx_rrd) % 8); 683 if (offset < 0) {
684 netdev_err(alx->dev, "Allocation of rx buffer failed!\n");
685 return -ENOMEM;
686 }
620 687
621 alx->rxq.rfd = 688 alx_reinit_rings(alx);
622 (void *)((u8 *)alx->descmem.virt +
623 sizeof(struct alx_txd) * alx->tx_ringsz +
624 sizeof(struct alx_rrd) * alx->rx_ringsz);
625 alx->rxq.rfd_dma = alx->descmem.dma +
626 sizeof(struct alx_txd) * alx->tx_ringsz +
627 sizeof(struct alx_rrd) * alx->rx_ringsz;
628 689
629 return 0; 690 return 0;
630out_free:
631 kfree(alx->txq.bufs);
632 kfree(alx->rxq.bufs);
633 return -ENOMEM;
634} 691}
635 692
636static int alx_alloc_rings(struct alx_priv *alx) 693static void alx_free_rings(struct alx_priv *alx)
637{ 694{
638 int err; 695 int i;
639 696
640 err = alx_alloc_descriptors(alx); 697 alx_free_buffers(alx);
641 if (err)
642 return err;
643 698
644 alx->int_mask &= ~ALX_ISR_ALL_QUEUES; 699 for (i = 0; i < alx->num_txq; i++)
645 alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0; 700 if (alx->qnapi[i] && alx->qnapi[i]->txq)
701 kfree(alx->qnapi[i]->txq->bufs);
646 702
647 netif_napi_add(alx->dev, &alx->napi, alx_poll, 64); 703 if (alx->qnapi[0] && alx->qnapi[0]->rxq)
704 kfree(alx->qnapi[0]->rxq->bufs);
648 705
649 alx_reinit_rings(alx); 706 if (!alx->descmem.virt)
650 return 0; 707 dma_free_coherent(&alx->hw.pdev->dev,
708 alx->descmem.size,
709 alx->descmem.virt,
710 alx->descmem.dma);
651} 711}
652 712
653static void alx_free_rings(struct alx_priv *alx) 713static void alx_free_napis(struct alx_priv *alx)
654{ 714{
655 netif_napi_del(&alx->napi); 715 struct alx_napi *np;
656 alx_free_buffers(alx); 716 int i;
657 717
658 kfree(alx->txq.bufs); 718 for (i = 0; i < alx->num_napi; i++) {
659 kfree(alx->rxq.bufs); 719 np = alx->qnapi[i];
720 if (!np)
721 continue;
660 722
661 dma_free_coherent(&alx->hw.pdev->dev, 723 netif_napi_del(&np->napi);
662 alx->descmem.size, 724 kfree(np->txq);
663 alx->descmem.virt, 725 kfree(np->rxq);
664 alx->descmem.dma); 726 kfree(np);
727 alx->qnapi[i] = NULL;
728 }
729}
730
731static const u16 tx_pidx_reg[] = {ALX_TPD_PRI0_PIDX, ALX_TPD_PRI1_PIDX,
732 ALX_TPD_PRI2_PIDX, ALX_TPD_PRI3_PIDX};
733static const u16 tx_cidx_reg[] = {ALX_TPD_PRI0_CIDX, ALX_TPD_PRI1_CIDX,
734 ALX_TPD_PRI2_CIDX, ALX_TPD_PRI3_CIDX};
735static const u32 tx_vect_mask[] = {ALX_ISR_TX_Q0, ALX_ISR_TX_Q1,
736 ALX_ISR_TX_Q2, ALX_ISR_TX_Q3};
737static const u32 rx_vect_mask[] = {ALX_ISR_RX_Q0, ALX_ISR_RX_Q1,
738 ALX_ISR_RX_Q2, ALX_ISR_RX_Q3,
739 ALX_ISR_RX_Q4, ALX_ISR_RX_Q5,
740 ALX_ISR_RX_Q6, ALX_ISR_RX_Q7};
741
742static int alx_alloc_napis(struct alx_priv *alx)
743{
744 struct alx_napi *np;
745 struct alx_rx_queue *rxq;
746 struct alx_tx_queue *txq;
747 int i;
748
749 alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
750
751 /* allocate alx_napi structures */
752 for (i = 0; i < alx->num_napi; i++) {
753 np = kzalloc(sizeof(struct alx_napi), GFP_KERNEL);
754 if (!np)
755 goto err_out;
756
757 np->alx = alx;
758 netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
759 alx->qnapi[i] = np;
760 }
761
762 /* allocate tx queues */
763 for (i = 0; i < alx->num_txq; i++) {
764 np = alx->qnapi[i];
765 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
766 if (!txq)
767 goto err_out;
768
769 np->txq = txq;
770 txq->p_reg = tx_pidx_reg[i];
771 txq->c_reg = tx_cidx_reg[i];
772 txq->queue_idx = i;
773 txq->count = alx->tx_ringsz;
774 txq->netdev = alx->dev;
775 txq->dev = &alx->hw.pdev->dev;
776 np->vec_mask |= tx_vect_mask[i];
777 alx->int_mask |= tx_vect_mask[i];
778 }
779
780 /* allocate rx queues */
781 np = alx->qnapi[0];
782 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
783 if (!rxq)
784 goto err_out;
785
786 np->rxq = rxq;
787 rxq->np = alx->qnapi[0];
788 rxq->queue_idx = 0;
789 rxq->count = alx->rx_ringsz;
790 rxq->netdev = alx->dev;
791 rxq->dev = &alx->hw.pdev->dev;
792 np->vec_mask |= rx_vect_mask[0];
793 alx->int_mask |= rx_vect_mask[0];
794
795 return 0;
796
797err_out:
798 netdev_err(alx->dev, "error allocating internal structures\n");
799 alx_free_napis(alx);
800 return -ENOMEM;
665} 801}
666 802
803static const int txq_vec_mapping_shift[] = {
804 0, ALX_MSI_MAP_TBL1_TXQ0_SHIFT,
805 0, ALX_MSI_MAP_TBL1_TXQ1_SHIFT,
806 1, ALX_MSI_MAP_TBL2_TXQ2_SHIFT,
807 1, ALX_MSI_MAP_TBL2_TXQ3_SHIFT,
808};
809
667static void alx_config_vector_mapping(struct alx_priv *alx) 810static void alx_config_vector_mapping(struct alx_priv *alx)
668{ 811{
669 struct alx_hw *hw = &alx->hw; 812 struct alx_hw *hw = &alx->hw;
670 u32 tbl = 0; 813 u32 tbl[2] = {0, 0};
814 int i, vector, idx, shift;
671 815
672 if (alx->flags & ALX_FLAG_USING_MSIX) { 816 if (alx->flags & ALX_FLAG_USING_MSIX) {
673 tbl |= 1 << ALX_MSI_MAP_TBL1_TXQ0_SHIFT; 817 /* tx mappings */
674 tbl |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT; 818 for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
819 idx = txq_vec_mapping_shift[i * 2];
820 shift = txq_vec_mapping_shift[i * 2 + 1];
821 tbl[idx] |= vector << shift;
822 }
823
824 /* rx mapping */
825 tbl[0] |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
675 } 826 }
676 827
677 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl); 828 alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl[0]);
678 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0); 829 alx_write_mem32(hw, ALX_MSI_MAP_TBL2, tbl[1]);
679 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0); 830 alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
680} 831}
681 832
682static bool alx_enable_msix(struct alx_priv *alx) 833static bool alx_enable_msix(struct alx_priv *alx)
683{ 834{
684 int i, err, num_vec = 2; 835 int i, err, num_vec, num_txq, num_rxq;
836
837 num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES);
838 num_rxq = 1;
839 num_vec = max_t(int, num_txq, num_rxq) + 1;
685 840
686 alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry), 841 alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
687 GFP_KERNEL); 842 GFP_KERNEL);
@@ -701,6 +856,10 @@ static bool alx_enable_msix(struct alx_priv *alx)
701 } 856 }
702 857
703 alx->num_vec = num_vec; 858 alx->num_vec = num_vec;
859 alx->num_napi = num_vec - 1;
860 alx->num_txq = num_txq;
861 alx->num_rxq = num_rxq;
862
704 return true; 863 return true;
705} 864}
706 865
@@ -714,14 +873,29 @@ static int alx_request_msix(struct alx_priv *alx)
714 if (err) 873 if (err)
715 goto out_err; 874 goto out_err;
716 875
717 vector++; 876 for (i = 0; i < alx->num_napi; i++) {
718 sprintf(alx->irq_lbl, "%s-TxRx-0", netdev->name); 877 struct alx_napi *np = alx->qnapi[i];
719 878
720 err = request_irq(alx->msix_entries[vector].vector, 879 vector++;
721 alx_intr_msix_ring, 0, alx->irq_lbl, alx); 880
881 if (np->txq && np->rxq)
882 sprintf(np->irq_lbl, "%s-TxRx-%u", netdev->name,
883 np->txq->queue_idx);
884 else if (np->txq)
885 sprintf(np->irq_lbl, "%s-tx-%u", netdev->name,
886 np->txq->queue_idx);
887 else if (np->rxq)
888 sprintf(np->irq_lbl, "%s-rx-%u", netdev->name,
889 np->rxq->queue_idx);
890 else
891 sprintf(np->irq_lbl, "%s-unused", netdev->name);
892
893 np->vec_idx = vector;
894 err = request_irq(alx->msix_entries[vector].vector,
895 alx_intr_msix_ring, 0, np->irq_lbl, np);
722 if (err) 896 if (err)
723 goto out_free; 897 goto out_free;
724 898 }
725 return 0; 899 return 0;
726 900
727out_free: 901out_free:
@@ -729,7 +903,8 @@ out_free:
729 903
730 vector--; 904 vector--;
731 for (i = 0; i < vector; i++) 905 for (i = 0; i < vector; i++)
732 free_irq(alx->msix_entries[free_vector++].vector, alx); 906 free_irq(alx->msix_entries[free_vector++].vector,
907 alx->qnapi[i]);
733 908
734out_err: 909out_err:
735 return err; 910 return err;
@@ -744,6 +919,9 @@ static void alx_init_intr(struct alx_priv *alx, bool msix)
744 919
745 if (!(alx->flags & ALX_FLAG_USING_MSIX)) { 920 if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
746 alx->num_vec = 1; 921 alx->num_vec = 1;
922 alx->num_napi = 1;
923 alx->num_txq = 1;
924 alx->num_rxq = 1;
747 925
748 if (!pci_enable_msi(alx->hw.pdev)) 926 if (!pci_enable_msi(alx->hw.pdev))
749 alx->flags |= ALX_FLAG_USING_MSI; 927 alx->flags |= ALX_FLAG_USING_MSI;
@@ -799,6 +977,25 @@ static void alx_irq_disable(struct alx_priv *alx)
799 } 977 }
800} 978}
801 979
980static int alx_realloc_resources(struct alx_priv *alx)
981{
982 int err;
983
984 alx_free_rings(alx);
985 alx_free_napis(alx);
986 alx_disable_advanced_intr(alx);
987
988 err = alx_alloc_napis(alx);
989 if (err)
990 return err;
991
992 err = alx_alloc_rings(alx);
993 if (err)
994 return err;
995
996 return 0;
997}
998
802static int alx_request_irq(struct alx_priv *alx) 999static int alx_request_irq(struct alx_priv *alx)
803{ 1000{
804 struct pci_dev *pdev = alx->hw.pdev; 1001 struct pci_dev *pdev = alx->hw.pdev;
@@ -815,8 +1012,9 @@ static int alx_request_irq(struct alx_priv *alx)
815 goto out; 1012 goto out;
816 1013
817 /* msix request failed, realloc resources */ 1014 /* msix request failed, realloc resources */
818 alx_disable_advanced_intr(alx); 1015 err = alx_realloc_resources(alx);
819 alx_init_intr(alx, false); 1016 if (err)
1017 goto out;
820 } 1018 }
821 1019
822 if (alx->flags & ALX_FLAG_USING_MSI) { 1020 if (alx->flags & ALX_FLAG_USING_MSI) {
@@ -845,12 +1043,13 @@ out:
845static void alx_free_irq(struct alx_priv *alx) 1043static void alx_free_irq(struct alx_priv *alx)
846{ 1044{
847 struct pci_dev *pdev = alx->hw.pdev; 1045 struct pci_dev *pdev = alx->hw.pdev;
848 int i; 1046 int i, vector = 0;
849 1047
850 if (alx->flags & ALX_FLAG_USING_MSIX) { 1048 if (alx->flags & ALX_FLAG_USING_MSIX) {
851 /* we have only 2 vectors without multi queue support */ 1049 free_irq(alx->msix_entries[vector++].vector, alx);
852 for (i = 0; i < 2; i++) 1050 for (i = 0; i < alx->num_napi; i++)
853 free_irq(alx->msix_entries[i].vector, alx); 1051 free_irq(alx->msix_entries[vector++].vector,
1052 alx->qnapi[i]);
854 } else { 1053 } else {
855 free_irq(pdev->irq, alx); 1054 free_irq(pdev->irq, alx);
856 } 1055 }
@@ -935,11 +1134,14 @@ static netdev_features_t alx_fix_features(struct net_device *netdev,
935 1134
936static void alx_netif_stop(struct alx_priv *alx) 1135static void alx_netif_stop(struct alx_priv *alx)
937{ 1136{
1137 int i;
1138
938 netif_trans_update(alx->dev); 1139 netif_trans_update(alx->dev);
939 if (netif_carrier_ok(alx->dev)) { 1140 if (netif_carrier_ok(alx->dev)) {
940 netif_carrier_off(alx->dev); 1141 netif_carrier_off(alx->dev);
941 netif_tx_disable(alx->dev); 1142 netif_tx_disable(alx->dev);
942 napi_disable(&alx->napi); 1143 for (i = 0; i < alx->num_napi; i++)
1144 napi_disable(&alx->qnapi[i]->napi);
943 } 1145 }
944} 1146}
945 1147
@@ -1008,8 +1210,11 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
1008 1210
1009static void alx_netif_start(struct alx_priv *alx) 1211static void alx_netif_start(struct alx_priv *alx)
1010{ 1212{
1213 int i;
1214
1011 netif_tx_wake_all_queues(alx->dev); 1215 netif_tx_wake_all_queues(alx->dev);
1012 napi_enable(&alx->napi); 1216 for (i = 0; i < alx->num_napi; i++)
1217 napi_enable(&alx->qnapi[i]->napi);
1013 netif_carrier_on(alx->dev); 1218 netif_carrier_on(alx->dev);
1014} 1219}
1015 1220
@@ -1017,21 +1222,28 @@ static int __alx_open(struct alx_priv *alx, bool resume)
1017{ 1222{
1018 int err; 1223 int err;
1019 1224
1020 alx_init_intr(alx, msix); 1225 alx_init_intr(alx, true);
1021 1226
1022 if (!resume) 1227 if (!resume)
1023 netif_carrier_off(alx->dev); 1228 netif_carrier_off(alx->dev);
1024 1229
1025 err = alx_alloc_rings(alx); 1230 err = alx_alloc_napis(alx);
1026 if (err) 1231 if (err)
1027 goto out_disable_adv_intr; 1232 goto out_disable_adv_intr;
1028 1233
1234 err = alx_alloc_rings(alx);
1235 if (err)
1236 goto out_free_rings;
1237
1029 alx_configure(alx); 1238 alx_configure(alx);
1030 1239
1031 err = alx_request_irq(alx); 1240 err = alx_request_irq(alx);
1032 if (err) 1241 if (err)
1033 goto out_free_rings; 1242 goto out_free_rings;
1034 1243
1244 netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
1245 netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
1246
1035 /* clear old interrupts */ 1247 /* clear old interrupts */
1036 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS); 1248 alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1037 1249
@@ -1045,6 +1257,7 @@ static int __alx_open(struct alx_priv *alx, bool resume)
1045 1257
1046out_free_rings: 1258out_free_rings:
1047 alx_free_rings(alx); 1259 alx_free_rings(alx);
1260 alx_free_napis(alx);
1048out_disable_adv_intr: 1261out_disable_adv_intr:
1049 alx_disable_advanced_intr(alx); 1262 alx_disable_advanced_intr(alx);
1050 return err; 1263 return err;
@@ -1055,6 +1268,7 @@ static void __alx_stop(struct alx_priv *alx)
1055 alx_halt(alx); 1268 alx_halt(alx);
1056 alx_free_irq(alx); 1269 alx_free_irq(alx);
1057 alx_free_rings(alx); 1270 alx_free_rings(alx);
1271 alx_free_napis(alx);
1058} 1272}
1059 1273
1060static const char *alx_speed_desc(struct alx_hw *hw) 1274static const char *alx_speed_desc(struct alx_hw *hw)
@@ -1237,9 +1451,8 @@ static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
1237 return 1; 1451 return 1;
1238} 1452}
1239 1453
1240static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb) 1454static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
1241{ 1455{
1242 struct alx_tx_queue *txq = &alx->txq;
1243 struct alx_txd *tpd, *first_tpd; 1456 struct alx_txd *tpd, *first_tpd;
1244 dma_addr_t dma; 1457 dma_addr_t dma;
1245 int maplen, f, first_idx = txq->write_idx; 1458 int maplen, f, first_idx = txq->write_idx;
@@ -1248,7 +1461,7 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
1248 tpd = first_tpd; 1461 tpd = first_tpd;
1249 1462
1250 if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) { 1463 if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) {
1251 if (++txq->write_idx == alx->tx_ringsz) 1464 if (++txq->write_idx == txq->count)
1252 txq->write_idx = 0; 1465 txq->write_idx = 0;
1253 1466
1254 tpd = &txq->tpd[txq->write_idx]; 1467 tpd = &txq->tpd[txq->write_idx];
@@ -1258,9 +1471,9 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
1258 } 1471 }
1259 1472
1260 maplen = skb_headlen(skb); 1473 maplen = skb_headlen(skb);
1261 dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen, 1474 dma = dma_map_single(txq->dev, skb->data, maplen,
1262 DMA_TO_DEVICE); 1475 DMA_TO_DEVICE);
1263 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) 1476 if (dma_mapping_error(txq->dev, dma))
1264 goto err_dma; 1477 goto err_dma;
1265 1478
1266 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); 1479 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
@@ -1274,16 +1487,16 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
1274 1487
1275 frag = &skb_shinfo(skb)->frags[f]; 1488 frag = &skb_shinfo(skb)->frags[f];
1276 1489
1277 if (++txq->write_idx == alx->tx_ringsz) 1490 if (++txq->write_idx == txq->count)
1278 txq->write_idx = 0; 1491 txq->write_idx = 0;
1279 tpd = &txq->tpd[txq->write_idx]; 1492 tpd = &txq->tpd[txq->write_idx];
1280 1493
1281 tpd->word1 = first_tpd->word1; 1494 tpd->word1 = first_tpd->word1;
1282 1495
1283 maplen = skb_frag_size(frag); 1496 maplen = skb_frag_size(frag);
1284 dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0, 1497 dma = skb_frag_dma_map(txq->dev, frag, 0,
1285 maplen, DMA_TO_DEVICE); 1498 maplen, DMA_TO_DEVICE);
1286 if (dma_mapping_error(&alx->hw.pdev->dev, dma)) 1499 if (dma_mapping_error(txq->dev, dma))
1287 goto err_dma; 1500 goto err_dma;
1288 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen); 1501 dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1289 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma); 1502 dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
@@ -1296,7 +1509,7 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
1296 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT); 1509 tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
1297 txq->bufs[txq->write_idx].skb = skb; 1510 txq->bufs[txq->write_idx].skb = skb;
1298 1511
1299 if (++txq->write_idx == alx->tx_ringsz) 1512 if (++txq->write_idx == txq->count)
1300 txq->write_idx = 0; 1513 txq->write_idx = 0;
1301 1514
1302 return 0; 1515 return 0;
@@ -1304,23 +1517,24 @@ static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
1304err_dma: 1517err_dma:
1305 f = first_idx; 1518 f = first_idx;
1306 while (f != txq->write_idx) { 1519 while (f != txq->write_idx) {
1307 alx_free_txbuf(alx, f); 1520 alx_free_txbuf(txq, f);
1308 if (++f == alx->tx_ringsz) 1521 if (++f == txq->count)
1309 f = 0; 1522 f = 0;
1310 } 1523 }
1311 return -ENOMEM; 1524 return -ENOMEM;
1312} 1525}
1313 1526
1314static netdev_tx_t alx_start_xmit(struct sk_buff *skb, 1527static netdev_tx_t alx_start_xmit_ring(struct sk_buff *skb,
1315 struct net_device *netdev) 1528 struct alx_tx_queue *txq)
1316{ 1529{
1317 struct alx_priv *alx = netdev_priv(netdev); 1530 struct alx_priv *alx;
1318 struct alx_tx_queue *txq = &alx->txq;
1319 struct alx_txd *first; 1531 struct alx_txd *first;
1320 int tso; 1532 int tso;
1321 1533
1322 if (alx_tpd_avail(alx) < alx_tpd_req(skb)) { 1534 alx = netdev_priv(txq->netdev);
1323 netif_stop_queue(alx->dev); 1535
1536 if (alx_tpd_avail(txq) < alx_tpd_req(skb)) {
1537 netif_tx_stop_queue(alx_get_tx_queue(txq));
1324 goto drop; 1538 goto drop;
1325 } 1539 }
1326 1540
@@ -1333,17 +1547,17 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1333 else if (!tso && alx_tx_csum(skb, first)) 1547 else if (!tso && alx_tx_csum(skb, first))
1334 goto drop; 1548 goto drop;
1335 1549
1336 if (alx_map_tx_skb(alx, skb) < 0) 1550 if (alx_map_tx_skb(txq, skb) < 0)
1337 goto drop; 1551 goto drop;
1338 1552
1339 netdev_sent_queue(alx->dev, skb->len); 1553 netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len);
1340 1554
1341 /* flush updates before updating hardware */ 1555 /* flush updates before updating hardware */
1342 wmb(); 1556 wmb();
1343 alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx); 1557 alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx);
1344 1558
1345 if (alx_tpd_avail(alx) < alx->tx_ringsz/8) 1559 if (alx_tpd_avail(txq) < txq->count / 8)
1346 netif_stop_queue(alx->dev); 1560 netif_tx_stop_queue(alx_get_tx_queue(txq));
1347 1561
1348 return NETDEV_TX_OK; 1562 return NETDEV_TX_OK;
1349 1563
@@ -1352,6 +1566,13 @@ drop:
1352 return NETDEV_TX_OK; 1566 return NETDEV_TX_OK;
1353} 1567}
1354 1568
1569static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1570 struct net_device *netdev)
1571{
1572 struct alx_priv *alx = netdev_priv(netdev);
1573 return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
1574}
1575
1355static void alx_tx_timeout(struct net_device *dev) 1576static void alx_tx_timeout(struct net_device *dev)
1356{ 1577{
1357 struct alx_priv *alx = netdev_priv(dev); 1578 struct alx_priv *alx = netdev_priv(dev);
@@ -1409,10 +1630,12 @@ static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1409static void alx_poll_controller(struct net_device *netdev) 1630static void alx_poll_controller(struct net_device *netdev)
1410{ 1631{
1411 struct alx_priv *alx = netdev_priv(netdev); 1632 struct alx_priv *alx = netdev_priv(netdev);
1633 int i;
1412 1634
1413 if (alx->flags & ALX_FLAG_USING_MSIX) { 1635 if (alx->flags & ALX_FLAG_USING_MSIX) {
1414 alx_intr_msix_misc(0, alx); 1636 alx_intr_msix_misc(0, alx);
1415 alx_intr_msix_ring(0, alx); 1637 for (i = 0; i < alx->num_txq; i++)
1638 alx_intr_msix_ring(0, alx->qnapi[i]);
1416 } else if (alx->flags & ALX_FLAG_USING_MSI) 1639 } else if (alx->flags & ALX_FLAG_USING_MSI)
1417 alx_intr_msi(0, alx); 1640 alx_intr_msi(0, alx);
1418 else 1641 else
@@ -1529,7 +1752,8 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1529 goto out_pci_release; 1752 goto out_pci_release;
1530 } 1753 }
1531 1754
1532 netdev = alloc_etherdev(sizeof(*alx)); 1755 netdev = alloc_etherdev_mqs(sizeof(*alx),
1756 ALX_MAX_TX_QUEUES, 1);
1533 if (!netdev) { 1757 if (!netdev) {
1534 err = -ENOMEM; 1758 err = -ENOMEM;
1535 goto out_pci_release; 1759 goto out_pci_release;