aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ibmveth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r--drivers/net/ibmveth.c48
1 files changed, 8 insertions, 40 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index d985b804a762..aea1598b2253 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -96,7 +96,6 @@ static void ibmveth_proc_unregister_driver(void);
96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); 96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
99static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*);
100static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter); 99static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
101 100
102#ifdef CONFIG_PROC_FS 101#ifdef CONFIG_PROC_FS
@@ -257,29 +256,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
257 atomic_add(buffers_added, &(pool->available)); 256 atomic_add(buffers_added, &(pool->available));
258} 257}
259 258
260/* check if replenishing is needed. */ 259/* replenish routine */
261static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter)
262{
263 int i;
264
265 for(i = 0; i < IbmVethNumBufferPools; i++)
266 if(adapter->rx_buff_pool[i].active &&
267 (atomic_read(&adapter->rx_buff_pool[i].available) <
268 adapter->rx_buff_pool[i].threshold))
269 return 1;
270 return 0;
271}
272
273/* kick the replenish tasklet if we need replenishing and it isn't already running */
274static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
275{
276 if(ibmveth_is_replenishing_needed(adapter) &&
277 (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
278 schedule_work(&adapter->replenish_task);
279 }
280}
281
282/* replenish tasklet routine */
283static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 260static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
284{ 261{
285 int i; 262 int i;
@@ -292,10 +269,6 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
292 &adapter->rx_buff_pool[i]); 269 &adapter->rx_buff_pool[i]);
293 270
294 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 271 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
295
296 atomic_inc(&adapter->not_replenishing);
297
298 ibmveth_schedule_replenishing(adapter);
299} 272}
300 273
301/* empty and free ana buffer pool - also used to do cleanup in error paths */ 274/* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -563,10 +536,10 @@ static int ibmveth_open(struct net_device *netdev)
563 return rc; 536 return rc;
564 } 537 }
565 538
566 netif_start_queue(netdev); 539 ibmveth_debug_printk("initial replenish cycle\n");
540 ibmveth_replenish_task(adapter);
567 541
568 ibmveth_debug_printk("scheduling initial replenish cycle\n"); 542 netif_start_queue(netdev);
569 ibmveth_schedule_replenishing(adapter);
570 543
571 ibmveth_debug_printk("open complete\n"); 544 ibmveth_debug_printk("open complete\n");
572 545
@@ -584,9 +557,6 @@ static int ibmveth_close(struct net_device *netdev)
584 557
585 free_irq(netdev->irq, netdev); 558 free_irq(netdev->irq, netdev);
586 559
587 cancel_delayed_work(&adapter->replenish_task);
588 flush_scheduled_work();
589
590 do { 560 do {
591 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 561 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
592 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy)); 562 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
@@ -795,7 +765,7 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
795 } 765 }
796 } while(more_work && (frames_processed < max_frames_to_process)); 766 } while(more_work && (frames_processed < max_frames_to_process));
797 767
798 ibmveth_schedule_replenishing(adapter); 768 ibmveth_replenish_task(adapter);
799 769
800 if(more_work) { 770 if(more_work) {
801 /* more work to do - return that we are not done yet */ 771 /* more work to do - return that we are not done yet */
@@ -931,8 +901,10 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
931 901
932 } 902 }
933 903
904 /* kick the interrupt handler so that the new buffer pools get
905 replenished or deallocated */
906 ibmveth_interrupt(dev->irq, dev, NULL);
934 907
935 ibmveth_schedule_replenishing(adapter);
936 dev->mtu = new_mtu; 908 dev->mtu = new_mtu;
937 return 0; 909 return 0;
938} 910}
@@ -1017,14 +989,10 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1017 989
1018 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 990 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
1019 991
1020 INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
1021
1022 adapter->buffer_list_dma = DMA_ERROR_CODE; 992 adapter->buffer_list_dma = DMA_ERROR_CODE;
1023 adapter->filter_list_dma = DMA_ERROR_CODE; 993 adapter->filter_list_dma = DMA_ERROR_CODE;
1024 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 994 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1025 995
1026 atomic_set(&adapter->not_replenishing, 1);
1027
1028 ibmveth_debug_printk("registering netdev...\n"); 996 ibmveth_debug_printk("registering netdev...\n");
1029 997
1030 rc = register_netdev(netdev); 998 rc = register_netdev(netdev);