aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorDale Farnsworth <dale@farnsworth.org>2006-03-03 12:05:26 -0500
committerJeff Garzik <jeff@garzik.org>2006-03-03 12:12:37 -0500
commitf78fb4743dc06719084239c29dc178ad38ad2e2f (patch)
tree62ebf8cc6a8de144c21a0b5cd2a3318a0e2445c2 /drivers/net/mv643xx_eth.c
parent468d09f8946d40228c56de26fe4874b2f98067ed (diff)
[PATCH] mv643xx_eth: Remove non-working feature: task level rx queue refill
The task level rx queue refill feature hasn't ever worked (at least in 2.6) and is of dubious value. Remove it. Signed-off-by: Dale Farnsworth <dale@farnsworth.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c59
1 files changed, 12 insertions, 47 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 30b0d5b1b155..9f2661355a4a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -132,25 +132,21 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
132} 132}
133 133
134/* 134/*
135 * mv643xx_eth_rx_task 135 * mv643xx_eth_rx_refill_descs
136 * 136 *
137 * Fills / refills RX queue on a certain gigabit ethernet port 137 * Fills / refills RX queue on a certain gigabit ethernet port
138 * 138 *
139 * Input : pointer to ethernet interface network device structure 139 * Input : pointer to ethernet interface network device structure
140 * Output : N/A 140 * Output : N/A
141 */ 141 */
142static void mv643xx_eth_rx_task(void *data) 142static void mv643xx_eth_rx_refill_descs(struct net_device *dev)
143{ 143{
144 struct net_device *dev = (struct net_device *)data;
145 struct mv643xx_private *mp = netdev_priv(dev); 144 struct mv643xx_private *mp = netdev_priv(dev);
146 struct pkt_info pkt_info; 145 struct pkt_info pkt_info;
147 struct sk_buff *skb; 146 struct sk_buff *skb;
148 int unaligned; 147 int unaligned;
149 148
150 if (test_and_set_bit(0, &mp->rx_task_busy)) 149 while (mp->rx_desc_count < mp->rx_ring_size) {
151 panic("%s: Error in test_set_bit / clear_bit", dev->name);
152
153 while (mp->rx_desc_count < (mp->rx_ring_size - 5)) {
154 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN); 150 skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN);
155 if (!skb) 151 if (!skb)
156 break; 152 break;
@@ -170,29 +166,19 @@ static void mv643xx_eth_rx_task(void *data)
170 } 166 }
171 skb_reserve(skb, ETH_HW_IP_ALIGN); 167 skb_reserve(skb, ETH_HW_IP_ALIGN);
172 } 168 }
173 clear_bit(0, &mp->rx_task_busy);
174 /* 169 /*
175 * If RX ring is empty of SKB, set a timer to try allocating 170 * If RX ring is empty of SKB, set a timer to try allocating
176 * again in a later time . 171 * again at a later time.
177 */ 172 */
178 if ((mp->rx_desc_count == 0) && (mp->rx_timer_flag == 0)) { 173 if (mp->rx_desc_count == 0) {
179 printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); 174 printk(KERN_INFO "%s: Rx ring is empty\n", dev->name);
180 /* After 100mSec */ 175 mp->timeout.expires = jiffies + (HZ / 10); /* 100 mSec */
181 mp->timeout.expires = jiffies + (HZ / 10);
182 add_timer(&mp->timeout); 176 add_timer(&mp->timeout);
183 mp->rx_timer_flag = 1;
184 }
185#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
186 else {
187 /* Return interrupts */
188 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
189 INT_UNMASK_ALL);
190 } 177 }
191#endif
192} 178}
193 179
194/* 180/*
195 * mv643xx_eth_rx_task_timer_wrapper 181 * mv643xx_eth_rx_refill_descs_timer_wrapper
196 * 182 *
197 * Timer routine to wake up RX queue filling task. This function is 183 * Timer routine to wake up RX queue filling task. This function is
198 * used only in case the RX queue is empty, and all alloc_skb has 184 * used only in case the RX queue is empty, and all alloc_skb has
@@ -201,13 +187,9 @@ static void mv643xx_eth_rx_task(void *data)
201 * Input : pointer to ethernet interface network device structure 187 * Input : pointer to ethernet interface network device structure
202 * Output : N/A 188 * Output : N/A
203 */ 189 */
204static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data) 190static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
205{ 191{
206 struct net_device *dev = (struct net_device *)data; 192 mv643xx_eth_rx_refill_descs((struct net_device *)data);
207 struct mv643xx_private *mp = netdev_priv(dev);
208
209 mp->rx_timer_flag = 0;
210 mv643xx_eth_rx_task((void *)data);
211} 193}
212 194
213/* 195/*
@@ -451,6 +433,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev, int budget)
451 } 433 }
452 dev->last_rx = jiffies; 434 dev->last_rx = jiffies;
453 } 435 }
436 mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
454 437
455 return received_packets; 438 return received_packets;
456} 439}
@@ -531,18 +514,6 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
531 eth_int_cause_ext = mv_read( 514 eth_int_cause_ext = mv_read(
532 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 515 MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
533 ETH_INT_UNMASK_ALL_EXT; 516 ETH_INT_UNMASK_ALL_EXT;
534#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
535 /* Mask all interrupts on ethernet port */
536 mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
537 INT_MASK_ALL);
538 /* wait for previous write to take effect */
539 mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
540
541 queue_task(&mp->rx_task, &tq_immediate);
542 mark_bh(IMMEDIATE_BH);
543#else
544 mp->rx_task.func(dev);
545#endif
546 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 517 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),
547 ~eth_int_cause_ext); 518 ~eth_int_cause_ext);
548 } 519 }
@@ -810,15 +781,10 @@ static int mv643xx_eth_open(struct net_device *dev)
810 781
811 eth_port_init(mp); 782 eth_port_init(mp);
812 783
813 INIT_WORK(&mp->rx_task, (void (*)(void *))mv643xx_eth_rx_task, dev);
814
815 memset(&mp->timeout, 0, sizeof(struct timer_list)); 784 memset(&mp->timeout, 0, sizeof(struct timer_list));
816 mp->timeout.function = mv643xx_eth_rx_task_timer_wrapper; 785 mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper;
817 mp->timeout.data = (unsigned long)dev; 786 mp->timeout.data = (unsigned long)dev;
818 787
819 mp->rx_task_busy = 0;
820 mp->rx_timer_flag = 0;
821
822 /* Allocate RX and TX skb rings */ 788 /* Allocate RX and TX skb rings */
823 mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, 789 mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size,
824 GFP_KERNEL); 790 GFP_KERNEL);
@@ -891,7 +857,7 @@ static int mv643xx_eth_open(struct net_device *dev)
891 857
892 ether_init_rx_desc_ring(mp); 858 ether_init_rx_desc_ring(mp);
893 859
894 mv643xx_eth_rx_task(dev); /* Fill RX ring with skb's */ 860 mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */
895 861
896 /* Clear any pending ethernet port interrupts */ 862 /* Clear any pending ethernet port interrupts */
897 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); 863 mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
@@ -1043,7 +1009,6 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
1043 if (orig_budget > dev->quota) 1009 if (orig_budget > dev->quota)
1044 orig_budget = dev->quota; 1010 orig_budget = dev->quota;
1045 work_done = mv643xx_eth_receive_queue(dev, orig_budget); 1011 work_done = mv643xx_eth_receive_queue(dev, orig_budget);
1046 mp->rx_task.func(dev);
1047 *budget -= work_done; 1012 *budget -= work_done;
1048 dev->quota -= work_done; 1013 dev->quota -= work_done;
1049 if (work_done >= orig_budget) 1014 if (work_done >= orig_budget)