aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ibmveth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r--drivers/net/ibmveth.c186
1 files changed, 107 insertions, 79 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index cbe9368a4d56..e5246f227c98 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -96,7 +96,7 @@ static void ibmveth_proc_unregister_driver(void);
96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); 96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
99static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); 99static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
100 100
101#ifdef CONFIG_PROC_FS 101#ifdef CONFIG_PROC_FS
102#define IBMVETH_PROC_DIR "net/ibmveth" 102#define IBMVETH_PROC_DIR "net/ibmveth"
@@ -181,6 +181,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
181 atomic_set(&pool->available, 0); 181 atomic_set(&pool->available, 0);
182 pool->producer_index = 0; 182 pool->producer_index = 0;
183 pool->consumer_index = 0; 183 pool->consumer_index = 0;
184 pool->active = 0;
184 185
185 return 0; 186 return 0;
186} 187}
@@ -236,7 +237,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
236 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 237 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
237 238
238 if(lpar_rc != H_Success) { 239 if(lpar_rc != H_Success) {
239 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 240 pool->free_map[free_index] = index;
240 pool->skbuff[index] = NULL; 241 pool->skbuff[index] = NULL;
241 pool->consumer_index--; 242 pool->consumer_index--;
242 dma_unmap_single(&adapter->vdev->dev, 243 dma_unmap_single(&adapter->vdev->dev,
@@ -255,37 +256,19 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
255 atomic_add(buffers_added, &(pool->available)); 256 atomic_add(buffers_added, &(pool->available));
256} 257}
257 258
258/* check if replenishing is needed. */ 259/* replenish routine */
259static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter)
260{
261 return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) ||
262 (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) ||
263 (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold));
264}
265
266/* kick the replenish tasklet if we need replenishing and it isn't already running */
267static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
268{
269 if(ibmveth_is_replenishing_needed(adapter) &&
270 (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
271 schedule_work(&adapter->replenish_task);
272 }
273}
274
275/* replenish tasklet routine */
276static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 260static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
277{ 261{
262 int i;
263
278 adapter->replenish_task_cycles++; 264 adapter->replenish_task_cycles++;
279 265
280 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 266 for(i = 0; i < IbmVethNumBufferPools; i++)
281 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 267 if(adapter->rx_buff_pool[i].active)
282 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]); 268 ibmveth_replenish_buffer_pool(adapter,
269 &adapter->rx_buff_pool[i]);
283 270
284 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 271 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
285
286 atomic_inc(&adapter->not_replenishing);
287
288 ibmveth_schedule_replenishing(adapter);
289} 272}
290 273
291/* empty and free ana buffer pool - also used to do cleanup in error paths */ 274/* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -293,10 +276,8 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
293{ 276{
294 int i; 277 int i;
295 278
296 if(pool->free_map) { 279 kfree(pool->free_map);
297 kfree(pool->free_map); 280 pool->free_map = NULL;
298 pool->free_map = NULL;
299 }
300 281
301 if(pool->skbuff && pool->dma_addr) { 282 if(pool->skbuff && pool->dma_addr) {
302 for(i = 0; i < pool->size; ++i) { 283 for(i = 0; i < pool->size; ++i) {
@@ -321,6 +302,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
321 kfree(pool->skbuff); 302 kfree(pool->skbuff);
322 pool->skbuff = NULL; 303 pool->skbuff = NULL;
323 } 304 }
305 pool->active = 0;
324} 306}
325 307
326/* remove a buffer from a pool */ 308/* remove a buffer from a pool */
@@ -379,6 +361,12 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
379 ibmveth_assert(pool < IbmVethNumBufferPools); 361 ibmveth_assert(pool < IbmVethNumBufferPools);
380 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 362 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
381 363
364 if(!adapter->rx_buff_pool[pool].active) {
365 ibmveth_rxq_harvest_buffer(adapter);
366 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
367 return;
368 }
369
382 desc.desc = 0; 370 desc.desc = 0;
383 desc.fields.valid = 1; 371 desc.fields.valid = 1;
384 desc.fields.length = adapter->rx_buff_pool[pool].buff_size; 372 desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
@@ -409,6 +397,8 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
409 397
410static void ibmveth_cleanup(struct ibmveth_adapter *adapter) 398static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
411{ 399{
400 int i;
401
412 if(adapter->buffer_list_addr != NULL) { 402 if(adapter->buffer_list_addr != NULL) {
413 if(!dma_mapping_error(adapter->buffer_list_dma)) { 403 if(!dma_mapping_error(adapter->buffer_list_dma)) {
414 dma_unmap_single(&adapter->vdev->dev, 404 dma_unmap_single(&adapter->vdev->dev,
@@ -443,26 +433,24 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
443 adapter->rx_queue.queue_addr = NULL; 433 adapter->rx_queue.queue_addr = NULL;
444 } 434 }
445 435
446 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 436 for(i = 0; i<IbmVethNumBufferPools; i++)
447 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 437 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
448 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
449} 438}
450 439
451static int ibmveth_open(struct net_device *netdev) 440static int ibmveth_open(struct net_device *netdev)
452{ 441{
453 struct ibmveth_adapter *adapter = netdev->priv; 442 struct ibmveth_adapter *adapter = netdev->priv;
454 u64 mac_address = 0; 443 u64 mac_address = 0;
455 int rxq_entries; 444 int rxq_entries = 1;
456 unsigned long lpar_rc; 445 unsigned long lpar_rc;
457 int rc; 446 int rc;
458 union ibmveth_buf_desc rxq_desc; 447 union ibmveth_buf_desc rxq_desc;
448 int i;
459 449
460 ibmveth_debug_printk("open starting\n"); 450 ibmveth_debug_printk("open starting\n");
461 451
462 rxq_entries = 452 for(i = 0; i<IbmVethNumBufferPools; i++)
463 adapter->rx_buff_pool[0].size + 453 rxq_entries += adapter->rx_buff_pool[i].size;
464 adapter->rx_buff_pool[1].size +
465 adapter->rx_buff_pool[2].size + 1;
466 454
467 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 455 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
468 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 456 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
@@ -502,14 +490,8 @@ static int ibmveth_open(struct net_device *netdev)
502 adapter->rx_queue.num_slots = rxq_entries; 490 adapter->rx_queue.num_slots = rxq_entries;
503 adapter->rx_queue.toggle = 1; 491 adapter->rx_queue.toggle = 1;
504 492
505 if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) || 493 /* call change_mtu to init the buffer pools based in initial mtu */
506 ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) || 494 ibmveth_change_mtu(netdev, netdev->mtu);
507 ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2]))
508 {
509 ibmveth_error_printk("unable to allocate buffer pools\n");
510 ibmveth_cleanup(adapter);
511 return -ENOMEM;
512 }
513 495
514 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 496 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
515 mac_address = mac_address >> 16; 497 mac_address = mac_address >> 16;
@@ -552,10 +534,10 @@ static int ibmveth_open(struct net_device *netdev)
552 return rc; 534 return rc;
553 } 535 }
554 536
555 netif_start_queue(netdev); 537 ibmveth_debug_printk("initial replenish cycle\n");
538 ibmveth_replenish_task(adapter);
556 539
557 ibmveth_debug_printk("scheduling initial replenish cycle\n"); 540 netif_start_queue(netdev);
558 ibmveth_schedule_replenishing(adapter);
559 541
560 ibmveth_debug_printk("open complete\n"); 542 ibmveth_debug_printk("open complete\n");
561 543
@@ -573,9 +555,6 @@ static int ibmveth_close(struct net_device *netdev)
573 555
574 free_irq(netdev->irq, netdev); 556 free_irq(netdev->irq, netdev);
575 557
576 cancel_delayed_work(&adapter->replenish_task);
577 flush_scheduled_work();
578
579 do { 558 do {
580 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 559 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
581 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy)); 560 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
@@ -640,12 +619,18 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
640 unsigned long lpar_rc; 619 unsigned long lpar_rc;
641 int nfrags = 0, curfrag; 620 int nfrags = 0, curfrag;
642 unsigned long correlator; 621 unsigned long correlator;
622 unsigned long flags;
643 unsigned int retry_count; 623 unsigned int retry_count;
624 unsigned int tx_dropped = 0;
625 unsigned int tx_bytes = 0;
626 unsigned int tx_packets = 0;
627 unsigned int tx_send_failed = 0;
628 unsigned int tx_map_failed = 0;
629
644 630
645 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { 631 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
646 adapter->stats.tx_dropped++; 632 tx_dropped++;
647 dev_kfree_skb(skb); 633 goto out;
648 return 0;
649 } 634 }
650 635
651 memset(&desc, 0, sizeof(desc)); 636 memset(&desc, 0, sizeof(desc));
@@ -664,10 +649,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
664 649
665 if(dma_mapping_error(desc[0].fields.address)) { 650 if(dma_mapping_error(desc[0].fields.address)) {
666 ibmveth_error_printk("tx: unable to map initial fragment\n"); 651 ibmveth_error_printk("tx: unable to map initial fragment\n");
667 adapter->tx_map_failed++; 652 tx_map_failed++;
668 adapter->stats.tx_dropped++; 653 tx_dropped++;
669 dev_kfree_skb(skb); 654 goto out;
670 return 0;
671 } 655 }
672 656
673 curfrag = nfrags; 657 curfrag = nfrags;
@@ -684,8 +668,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
684 668
685 if(dma_mapping_error(desc[curfrag+1].fields.address)) { 669 if(dma_mapping_error(desc[curfrag+1].fields.address)) {
686 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); 670 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
687 adapter->tx_map_failed++; 671 tx_map_failed++;
688 adapter->stats.tx_dropped++; 672 tx_dropped++;
689 /* Free all the mappings we just created */ 673 /* Free all the mappings we just created */
690 while(curfrag < nfrags) { 674 while(curfrag < nfrags) {
691 dma_unmap_single(&adapter->vdev->dev, 675 dma_unmap_single(&adapter->vdev->dev,
@@ -694,8 +678,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
694 DMA_TO_DEVICE); 678 DMA_TO_DEVICE);
695 curfrag++; 679 curfrag++;
696 } 680 }
697 dev_kfree_skb(skb); 681 goto out;
698 return 0;
699 } 682 }
700 } 683 }
701 684
@@ -720,11 +703,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
720 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, 703 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
721 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); 704 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
722 } 705 }
723 adapter->tx_send_failed++; 706 tx_send_failed++;
724 adapter->stats.tx_dropped++; 707 tx_dropped++;
725 } else { 708 } else {
726 adapter->stats.tx_packets++; 709 tx_packets++;
727 adapter->stats.tx_bytes += skb->len; 710 tx_bytes += skb->len;
711 netdev->trans_start = jiffies;
728 } 712 }
729 713
730 do { 714 do {
@@ -733,6 +717,14 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
733 desc[nfrags].fields.length, DMA_TO_DEVICE); 717 desc[nfrags].fields.length, DMA_TO_DEVICE);
734 } while(--nfrags >= 0); 718 } while(--nfrags >= 0);
735 719
720out: spin_lock_irqsave(&adapter->stats_lock, flags);
721 adapter->stats.tx_dropped += tx_dropped;
722 adapter->stats.tx_bytes += tx_bytes;
723 adapter->stats.tx_packets += tx_packets;
724 adapter->tx_send_failed += tx_send_failed;
725 adapter->tx_map_failed += tx_map_failed;
726 spin_unlock_irqrestore(&adapter->stats_lock, flags);
727
736 dev_kfree_skb(skb); 728 dev_kfree_skb(skb);
737 return 0; 729 return 0;
738} 730}
@@ -776,13 +768,14 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
776 adapter->stats.rx_packets++; 768 adapter->stats.rx_packets++;
777 adapter->stats.rx_bytes += length; 769 adapter->stats.rx_bytes += length;
778 frames_processed++; 770 frames_processed++;
771 netdev->last_rx = jiffies;
779 } 772 }
780 } else { 773 } else {
781 more_work = 0; 774 more_work = 0;
782 } 775 }
783 } while(more_work && (frames_processed < max_frames_to_process)); 776 } while(more_work && (frames_processed < max_frames_to_process));
784 777
785 ibmveth_schedule_replenishing(adapter); 778 ibmveth_replenish_task(adapter);
786 779
787 if(more_work) { 780 if(more_work) {
788 /* more work to do - return that we are not done yet */ 781 /* more work to do - return that we are not done yet */
@@ -883,17 +876,54 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
883 876
884static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 877static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
885{ 878{
886 if ((new_mtu < 68) || (new_mtu > (1<<20))) 879 struct ibmveth_adapter *adapter = dev->priv;
880 int i;
881 int prev_smaller = 1;
882
883 if ((new_mtu < 68) ||
884 (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
887 return -EINVAL; 885 return -EINVAL;
886
887 for(i = 0; i<IbmVethNumBufferPools; i++) {
888 int activate = 0;
889 if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) {
890 activate = 1;
891 prev_smaller= 1;
892 } else {
893 if (prev_smaller)
894 activate = 1;
895 prev_smaller= 0;
896 }
897
898 if (activate && !adapter->rx_buff_pool[i].active) {
899 struct ibmveth_buff_pool *pool =
900 &adapter->rx_buff_pool[i];
901 if(ibmveth_alloc_buffer_pool(pool)) {
902 ibmveth_error_printk("unable to alloc pool\n");
903 return -ENOMEM;
904 }
905 adapter->rx_buff_pool[i].active = 1;
906 } else if (!activate && adapter->rx_buff_pool[i].active) {
907 adapter->rx_buff_pool[i].active = 0;
908 h_free_logical_lan_buffer(adapter->vdev->unit_address,
909 (u64)pool_size[i]);
910 }
911
912 }
913
914 /* kick the interrupt handler so that the new buffer pools get
915 replenished or deallocated */
916 ibmveth_interrupt(dev->irq, dev, NULL);
917
888 dev->mtu = new_mtu; 918 dev->mtu = new_mtu;
889 return 0; 919 return 0;
890} 920}
891 921
892static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 922static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
893{ 923{
894 int rc; 924 int rc, i;
895 struct net_device *netdev; 925 struct net_device *netdev;
896 struct ibmveth_adapter *adapter; 926 struct ibmveth_adapter *adapter = NULL;
897 927
898 unsigned char *mac_addr_p; 928 unsigned char *mac_addr_p;
899 unsigned int *mcastFilterSize_p; 929 unsigned int *mcastFilterSize_p;
@@ -960,23 +990,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
960 netdev->ethtool_ops = &netdev_ethtool_ops; 990 netdev->ethtool_ops = &netdev_ethtool_ops;
961 netdev->change_mtu = ibmveth_change_mtu; 991 netdev->change_mtu = ibmveth_change_mtu;
962 SET_NETDEV_DEV(netdev, &dev->dev); 992 SET_NETDEV_DEV(netdev, &dev->dev);
993 netdev->features |= NETIF_F_LLTX;
994 spin_lock_init(&adapter->stats_lock);
963 995
964 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 996 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
965 997
966 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize); 998 for(i = 0; i<IbmVethNumBufferPools; i++)
967 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize); 999 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
968 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize); 1000 pool_count[i], pool_size[i]);
969 1001
970 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 1002 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
971 1003
972 INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
973
974 adapter->buffer_list_dma = DMA_ERROR_CODE; 1004 adapter->buffer_list_dma = DMA_ERROR_CODE;
975 adapter->filter_list_dma = DMA_ERROR_CODE; 1005 adapter->filter_list_dma = DMA_ERROR_CODE;
976 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 1006 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
977 1007
978 atomic_set(&adapter->not_replenishing, 1);
979
980 ibmveth_debug_printk("registering netdev...\n"); 1008 ibmveth_debug_printk("registering netdev...\n");
981 1009
982 rc = register_netdev(netdev); 1010 rc = register_netdev(netdev);