aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDon Fry <brazilnut@us.ibm.com>2006-06-29 16:54:38 -0400
committerJeff Garzik <jeff@garzik.org>2006-07-05 14:07:15 -0400
commit06c878500893c315795fcf944ecbd85c3d023040 (patch)
tree3d1d35bc8ab6238ace77c0e17ed1098fc58c50a2 /drivers/net
parent12fa30f35b52e85b4c37a2ef3c3320c158d510fa (diff)
[PATCH] pcnet32: Handle memory allocation failures cleanly when resizing tx/rx rings
Fix pcnet32_set_ringparam to handle memory allocation errors without leaving the adapter in an inoperative state and null pointers waiting to be dereferenced. Tested ia32 and ppc64. Signed-off-by: Don Fry <brazilnut@us.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/pcnet32.c275
1 files changed, 251 insertions, 24 deletions
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index f89b7a1e24d6..e79c3b6bee13 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -185,6 +185,23 @@ static int homepna[MAX_UNITS];
185 185
186#define PCNET32_TOTAL_SIZE 0x20 186#define PCNET32_TOTAL_SIZE 0x20
187 187
188#define CSR0 0
189#define CSR0_INIT 0x1
190#define CSR0_START 0x2
191#define CSR0_STOP 0x4
192#define CSR0_TXPOLL 0x8
193#define CSR0_INTEN 0x40
194#define CSR0_IDON 0x0100
195#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
196#define PCNET32_INIT_LOW 1
197#define PCNET32_INIT_HIGH 2
198#define CSR3 3
199#define CSR4 4
200#define CSR5 5
201#define CSR5_SUSPEND 0x0001
202#define CSR15 15
203#define PCNET32_MC_FILTER 8
204
188/* The PCNET32 Rx and Tx ring descriptors. */ 205/* The PCNET32 Rx and Tx ring descriptors. */
189struct pcnet32_rx_head { 206struct pcnet32_rx_head {
190 u32 base; 207 u32 base;
@@ -415,6 +432,219 @@ static struct pcnet32_access pcnet32_dwio = {
415 .reset = pcnet32_dwio_reset 432 .reset = pcnet32_dwio_reset
416}; 433};
417 434
435static void pcnet32_netif_stop(struct net_device *dev)
436{
437 dev->trans_start = jiffies;
438 netif_poll_disable(dev);
439 netif_tx_disable(dev);
440}
441
442static void pcnet32_netif_start(struct net_device *dev)
443{
444 netif_wake_queue(dev);
445 netif_poll_enable(dev);
446}
447
448/*
449 * Allocate space for the new sized tx ring.
450 * Free old resources
451 * Save new resources.
452 * Any failure keeps old resources.
453 * Must be called with lp->lock held.
454 */
455static void pcnet32_realloc_tx_ring(struct net_device *dev,
456 struct pcnet32_private *lp,
457 unsigned int size)
458{
459 dma_addr_t new_ring_dma_addr;
460 dma_addr_t *new_dma_addr_list;
461 struct pcnet32_tx_head *new_tx_ring;
462 struct sk_buff **new_skb_list;
463
464 pcnet32_purge_tx_ring(dev);
465
466 new_tx_ring = pci_alloc_consistent(lp->pci_dev,
467 sizeof(struct pcnet32_tx_head) *
468 (1 << size),
469 &new_ring_dma_addr);
470 if (new_tx_ring == NULL) {
471 if (netif_msg_drv(lp))
472 printk("\n" KERN_ERR
473 "%s: Consistent memory allocation failed.\n",
474 dev->name);
475 return;
476 }
477 memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
478
479 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
480 GFP_ATOMIC);
481 if (!new_dma_addr_list) {
482 if (netif_msg_drv(lp))
483 printk("\n" KERN_ERR
484 "%s: Memory allocation failed.\n", dev->name);
485 goto free_new_tx_ring;
486 }
487
488 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
489 GFP_ATOMIC);
490 if (!new_skb_list) {
491 if (netif_msg_drv(lp))
492 printk("\n" KERN_ERR
493 "%s: Memory allocation failed.\n", dev->name);
494 goto free_new_lists;
495 }
496
497 kfree(lp->tx_skbuff);
498 kfree(lp->tx_dma_addr);
499 pci_free_consistent(lp->pci_dev,
500 sizeof(struct pcnet32_tx_head) *
501 lp->tx_ring_size, lp->tx_ring,
502 lp->tx_ring_dma_addr);
503
504 lp->tx_ring_size = (1 << size);
505 lp->tx_mod_mask = lp->tx_ring_size - 1;
506 lp->tx_len_bits = (size << 12);
507 lp->tx_ring = new_tx_ring;
508 lp->tx_ring_dma_addr = new_ring_dma_addr;
509 lp->tx_dma_addr = new_dma_addr_list;
510 lp->tx_skbuff = new_skb_list;
511 return;
512
513 free_new_lists:
514 kfree(new_dma_addr_list);
515 free_new_tx_ring:
516 pci_free_consistent(lp->pci_dev,
517 sizeof(struct pcnet32_tx_head) *
518 (1 << size),
519 new_tx_ring,
520 new_ring_dma_addr);
521 return;
522}
523
524/*
525 * Allocate space for the new sized rx ring.
526 * Re-use old receive buffers.
527 * alloc extra buffers
528 * free unneeded buffers
529 * free unneeded buffers
530 * Save new resources.
531 * Any failure keeps old resources.
532 * Must be called with lp->lock held.
533 */
534static void pcnet32_realloc_rx_ring(struct net_device *dev,
535 struct pcnet32_private *lp,
536 unsigned int size)
537{
538 dma_addr_t new_ring_dma_addr;
539 dma_addr_t *new_dma_addr_list;
540 struct pcnet32_rx_head *new_rx_ring;
541 struct sk_buff **new_skb_list;
542 int new, overlap;
543
544 new_rx_ring = pci_alloc_consistent(lp->pci_dev,
545 sizeof(struct pcnet32_rx_head) *
546 (1 << size),
547 &new_ring_dma_addr);
548 if (new_rx_ring == NULL) {
549 if (netif_msg_drv(lp))
550 printk("\n" KERN_ERR
551 "%s: Consistent memory allocation failed.\n",
552 dev->name);
553 return;
554 }
555 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
556
557 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
558 GFP_ATOMIC);
559 if (!new_dma_addr_list) {
560 if (netif_msg_drv(lp))
561 printk("\n" KERN_ERR
562 "%s: Memory allocation failed.\n", dev->name);
563 goto free_new_rx_ring;
564 }
565
566 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
567 GFP_ATOMIC);
568 if (!new_skb_list) {
569 if (netif_msg_drv(lp))
570 printk("\n" KERN_ERR
571 "%s: Memory allocation failed.\n", dev->name);
572 goto free_new_lists;
573 }
574
575 /* first copy the current receive buffers */
576 overlap = min(size, lp->rx_ring_size);
577 for (new = 0; new < overlap; new++) {
578 new_rx_ring[new] = lp->rx_ring[new];
579 new_dma_addr_list[new] = lp->rx_dma_addr[new];
580 new_skb_list[new] = lp->rx_skbuff[new];
581 }
582 /* now allocate any new buffers needed */
583 for (; new < size; new++ ) {
584 struct sk_buff *rx_skbuff;
585 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ);
586 if (!(rx_skbuff = new_skb_list[new])) {
587 /* keep the original lists and buffers */
588 if (netif_msg_drv(lp))
589 printk(KERN_ERR
590 "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
591 dev->name);
592 goto free_all_new;
593 }
594 skb_reserve(rx_skbuff, 2);
595
596 new_dma_addr_list[new] =
597 pci_map_single(lp->pci_dev, rx_skbuff->data,
598 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
599 new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]);
600 new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
601 new_rx_ring[new].status = le16_to_cpu(0x8000);
602 }
603 /* and free any unneeded buffers */
604 for (; new < lp->rx_ring_size; new++) {
605 if (lp->rx_skbuff[new]) {
606 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
607 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
608 dev_kfree_skb(lp->rx_skbuff[new]);
609 }
610 }
611
612 kfree(lp->rx_skbuff);
613 kfree(lp->rx_dma_addr);
614 pci_free_consistent(lp->pci_dev,
615 sizeof(struct pcnet32_rx_head) *
616 lp->rx_ring_size, lp->rx_ring,
617 lp->rx_ring_dma_addr);
618
619 lp->rx_ring_size = (1 << size);
620 lp->rx_mod_mask = lp->rx_ring_size - 1;
621 lp->rx_len_bits = (size << 4);
622 lp->rx_ring = new_rx_ring;
623 lp->rx_ring_dma_addr = new_ring_dma_addr;
624 lp->rx_dma_addr = new_dma_addr_list;
625 lp->rx_skbuff = new_skb_list;
626 return;
627
628 free_all_new:
629 for (; --new >= lp->rx_ring_size; ) {
630 if (new_skb_list[new]) {
631 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
632 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
633 dev_kfree_skb(new_skb_list[new]);
634 }
635 }
636 kfree(new_skb_list);
637 free_new_lists:
638 kfree(new_dma_addr_list);
639 free_new_rx_ring:
640 pci_free_consistent(lp->pci_dev,
641 sizeof(struct pcnet32_rx_head) *
642 (1 << size),
643 new_rx_ring,
644 new_ring_dma_addr);
645 return;
646}
647
418#ifdef CONFIG_NET_POLL_CONTROLLER 648#ifdef CONFIG_NET_POLL_CONTROLLER
419static void pcnet32_poll_controller(struct net_device *dev) 649static void pcnet32_poll_controller(struct net_device *dev)
420{ 650{
@@ -526,56 +756,53 @@ static int pcnet32_set_ringparam(struct net_device *dev,
526{ 756{
527 struct pcnet32_private *lp = dev->priv; 757 struct pcnet32_private *lp = dev->priv;
528 unsigned long flags; 758 unsigned long flags;
759 unsigned int size;
760 ulong ioaddr = dev->base_addr;
529 int i; 761 int i;
530 762
531 if (ering->rx_mini_pending || ering->rx_jumbo_pending) 763 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
532 return -EINVAL; 764 return -EINVAL;
533 765
534 if (netif_running(dev)) 766 if (netif_running(dev))
535 pcnet32_close(dev); 767 pcnet32_netif_stop(dev);
536 768
537 spin_lock_irqsave(&lp->lock, flags); 769 spin_lock_irqsave(&lp->lock, flags);
538 pcnet32_free_ring(dev); 770 lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
539 lp->tx_ring_size = 771
540 min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); 772 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
541 lp->rx_ring_size =
542 min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
543 773
544 /* set the minimum ring size to 4, to allow the loopback test to work 774 /* set the minimum ring size to 4, to allow the loopback test to work
545 * unchanged. 775 * unchanged.
546 */ 776 */
547 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { 777 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
548 if (lp->tx_ring_size <= (1 << i)) 778 if (size <= (1 << i))
549 break; 779 break;
550 } 780 }
551 lp->tx_ring_size = (1 << i); 781 if ((1 << i) != lp->tx_ring_size)
552 lp->tx_mod_mask = lp->tx_ring_size - 1; 782 pcnet32_realloc_tx_ring(dev, lp, i);
553 lp->tx_len_bits = (i << 12); 783
554 784 size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
555 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { 785 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
556 if (lp->rx_ring_size <= (1 << i)) 786 if (size <= (1 << i))
557 break; 787 break;
558 } 788 }
559 lp->rx_ring_size = (1 << i); 789 if ((1 << i) != lp->rx_ring_size)
560 lp->rx_mod_mask = lp->rx_ring_size - 1; 790 pcnet32_realloc_rx_ring(dev, lp, i);
561 lp->rx_len_bits = (i << 4); 791
792 dev->weight = lp->rx_ring_size / 2;
562 793
563 if (pcnet32_alloc_ring(dev, dev->name)) { 794 if (netif_running(dev)) {
564 pcnet32_free_ring(dev); 795 pcnet32_netif_start(dev);
565 spin_unlock_irqrestore(&lp->lock, flags); 796 pcnet32_restart(dev, CSR0_NORMAL);
566 return -ENOMEM;
567 } 797 }
568 798
569 spin_unlock_irqrestore(&lp->lock, flags); 799 spin_unlock_irqrestore(&lp->lock, flags);
570 800
571 if (pcnet32_debug & NETIF_MSG_DRV) 801 if (netif_msg_drv(lp))
572 printk(KERN_INFO PFX 802 printk(KERN_INFO
573 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, 803 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
574 lp->rx_ring_size, lp->tx_ring_size); 804 lp->rx_ring_size, lp->tx_ring_size);
575 805
576 if (netif_running(dev))
577 pcnet32_open(dev);
578
579 return 0; 806 return 0;
580} 807}
581 808