aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2008-05-21 21:56:16 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-05-22 06:34:10 -0400
commitb1fb1f280d0969f47d4ef19334120f5c34e36080 (patch)
tree96b7a022e7b060b65babd3023d7a8d298dca2104
parent03194379a77b02df3404ec4848a50c6784e9a8a5 (diff)
cxgb3 - Fix dma mapping error path
Take potential dma mapping errors in account. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
-rw-r--r--drivers/net/cxgb3/sge.c53
1 files changed, 41 insertions, 12 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 796eb305cdc3..0741deb86ca6 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -376,13 +376,16 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
376 * Add a buffer of the given length to the supplied HW and SW Rx 376 * Add a buffer of the given length to the supplied HW and SW Rx
377 * descriptors. 377 * descriptors.
378 */ 378 */
379static inline void add_one_rx_buf(void *va, unsigned int len, 379static inline int add_one_rx_buf(void *va, unsigned int len,
380 struct rx_desc *d, struct rx_sw_desc *sd, 380 struct rx_desc *d, struct rx_sw_desc *sd,
381 unsigned int gen, struct pci_dev *pdev) 381 unsigned int gen, struct pci_dev *pdev)
382{ 382{
383 dma_addr_t mapping; 383 dma_addr_t mapping;
384 384
385 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); 385 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
386 if (unlikely(pci_dma_mapping_error(mapping)))
387 return -ENOMEM;
388
386 pci_unmap_addr_set(sd, dma_addr, mapping); 389 pci_unmap_addr_set(sd, dma_addr, mapping);
387 390
388 d->addr_lo = cpu_to_be32(mapping); 391 d->addr_lo = cpu_to_be32(mapping);
@@ -390,6 +393,7 @@ static inline void add_one_rx_buf(void *va, unsigned int len,
390 wmb(); 393 wmb();
391 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); 394 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
392 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); 395 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
396 return 0;
393} 397}
394 398
395static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp) 399static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
@@ -424,13 +428,16 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
424 * allocated with the supplied gfp flags. The caller must assure that 428 * allocated with the supplied gfp flags. The caller must assure that
425 * @n does not exceed the queue's capacity. 429 * @n does not exceed the queue's capacity.
426 */ 430 */
427static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) 431static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
428{ 432{
429 void *buf_start; 433 void *buf_start;
430 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 434 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
431 struct rx_desc *d = &q->desc[q->pidx]; 435 struct rx_desc *d = &q->desc[q->pidx];
436 unsigned int count = 0;
432 437
433 while (n--) { 438 while (n--) {
439 int err;
440
434 if (q->use_pages) { 441 if (q->use_pages) {
435 if (unlikely(alloc_pg_chunk(q, sd, gfp))) { 442 if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
436nomem: q->alloc_failed++; 443nomem: q->alloc_failed++;
@@ -447,8 +454,16 @@ nomem: q->alloc_failed++;
447 buf_start = skb->data; 454 buf_start = skb->data;
448 } 455 }
449 456
450 add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, 457 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
451 adap->pdev); 458 adap->pdev);
459 if (unlikely(err)) {
460 if (!q->use_pages) {
461 kfree_skb(sd->skb);
462 sd->skb = NULL;
463 }
464 break;
465 }
466
452 d++; 467 d++;
453 sd++; 468 sd++;
454 if (++q->pidx == q->size) { 469 if (++q->pidx == q->size) {
@@ -458,9 +473,13 @@ nomem: q->alloc_failed++;
458 d = q->desc; 473 d = q->desc;
459 } 474 }
460 q->credits++; 475 q->credits++;
476 count++;
461 } 477 }
462 wmb(); 478 wmb();
463 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); 479 if (likely(count))
480 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
481
482 return count;
464} 483}
465 484
466static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) 485static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
@@ -2618,7 +2637,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2618 int irq_vec_idx, const struct qset_params *p, 2637 int irq_vec_idx, const struct qset_params *p,
2619 int ntxq, struct net_device *dev) 2638 int ntxq, struct net_device *dev)
2620{ 2639{
2621 int i, ret = -ENOMEM; 2640 int i, avail, ret = -ENOMEM;
2622 struct sge_qset *q = &adapter->sge.qs[id]; 2641 struct sge_qset *q = &adapter->sge.qs[id];
2623 2642
2624 init_qset_cntxt(q, id); 2643 init_qset_cntxt(q, id);
@@ -2741,9 +2760,19 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2741 q->adap = adapter; 2760 q->adap = adapter;
2742 q->netdev = dev; 2761 q->netdev = dev;
2743 t3_update_qset_coalesce(q, p); 2762 t3_update_qset_coalesce(q, p);
2763 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
2764 if (!avail) {
2765 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
2766 goto err;
2767 }
2768 if (avail < q->fl[0].size)
2769 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
2770 avail);
2744 2771
2745 refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL); 2772 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
2746 refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL); 2773 if (avail < q->fl[1].size)
2774 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
2775 avail);
2747 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); 2776 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
2748 2777
2749 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | 2778 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
@@ -2752,9 +2781,9 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2752 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 2781 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
2753 return 0; 2782 return 0;
2754 2783
2755 err_unlock: 2784err_unlock:
2756 spin_unlock_irq(&adapter->sge.reg_lock); 2785 spin_unlock_irq(&adapter->sge.reg_lock);
2757 err: 2786err:
2758 t3_free_qset(adapter, q); 2787 t3_free_qset(adapter, q);
2759 return ret; 2788 return ret;
2760} 2789}