aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a56d9d2df73f..a400d7115f78 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -393,12 +393,12 @@ struct mv643xx_eth_private {
393 struct work_struct tx_timeout_task; 393 struct work_struct tx_timeout_task;
394 394
395 struct napi_struct napi; 395 struct napi_struct napi;
396 u8 oom;
396 u8 work_link; 397 u8 work_link;
397 u8 work_tx; 398 u8 work_tx;
398 u8 work_tx_end; 399 u8 work_tx_end;
399 u8 work_rx; 400 u8 work_rx;
400 u8 work_rx_refill; 401 u8 work_rx_refill;
401 u8 work_rx_oom;
402 402
403 int skb_size; 403 int skb_size;
404 struct sk_buff_head rx_recycle; 404 struct sk_buff_head rx_recycle;
@@ -661,7 +661,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
661 dma_get_cache_alignment() - 1); 661 dma_get_cache_alignment() - 1);
662 662
663 if (skb == NULL) { 663 if (skb == NULL) {
664 mp->work_rx_oom |= 1 << rxq->index; 664 mp->oom = 1;
665 goto oom; 665 goto oom;
666 } 666 }
667 667
@@ -1255,7 +1255,6 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1255 1255
1256 spin_lock_bh(&mp->mib_counters_lock); 1256 spin_lock_bh(&mp->mib_counters_lock);
1257 p->good_octets_received += mib_read(mp, 0x00); 1257 p->good_octets_received += mib_read(mp, 0x00);
1258 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
1259 p->bad_octets_received += mib_read(mp, 0x08); 1258 p->bad_octets_received += mib_read(mp, 0x08);
1260 p->internal_mac_transmit_err += mib_read(mp, 0x0c); 1259 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1261 p->good_frames_received += mib_read(mp, 0x10); 1260 p->good_frames_received += mib_read(mp, 0x10);
@@ -1269,7 +1268,6 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1269 p->frames_512_to_1023_octets += mib_read(mp, 0x30); 1268 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1270 p->frames_1024_to_max_octets += mib_read(mp, 0x34); 1269 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1271 p->good_octets_sent += mib_read(mp, 0x38); 1270 p->good_octets_sent += mib_read(mp, 0x38);
1272 p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
1273 p->good_frames_sent += mib_read(mp, 0x40); 1271 p->good_frames_sent += mib_read(mp, 0x40);
1274 p->excessive_collision += mib_read(mp, 0x44); 1272 p->excessive_collision += mib_read(mp, 0x44);
1275 p->multicast_frames_sent += mib_read(mp, 0x48); 1273 p->multicast_frames_sent += mib_read(mp, 0x48);
@@ -2167,8 +2165,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2167 2165
2168 mp = container_of(napi, struct mv643xx_eth_private, napi); 2166 mp = container_of(napi, struct mv643xx_eth_private, napi);
2169 2167
2170 mp->work_rx_refill |= mp->work_rx_oom; 2168 if (unlikely(mp->oom)) {
2171 mp->work_rx_oom = 0; 2169 mp->oom = 0;
2170 del_timer(&mp->rx_oom);
2171 }
2172 2172
2173 work_done = 0; 2173 work_done = 0;
2174 while (work_done < budget) { 2174 while (work_done < budget) {
@@ -2182,8 +2182,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2182 continue; 2182 continue;
2183 } 2183 }
2184 2184
2185 queue_mask = mp->work_tx | mp->work_tx_end | 2185 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2186 mp->work_rx | mp->work_rx_refill; 2186 if (likely(!mp->oom))
2187 queue_mask |= mp->work_rx_refill;
2188
2187 if (!queue_mask) { 2189 if (!queue_mask) {
2188 if (mv643xx_eth_collect_events(mp)) 2190 if (mv643xx_eth_collect_events(mp))
2189 continue; 2191 continue;
@@ -2204,7 +2206,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2204 txq_maybe_wake(mp->txq + queue); 2206 txq_maybe_wake(mp->txq + queue);
2205 } else if (mp->work_rx & queue_mask) { 2207 } else if (mp->work_rx & queue_mask) {
2206 work_done += rxq_process(mp->rxq + queue, work_tbd); 2208 work_done += rxq_process(mp->rxq + queue, work_tbd);
2207 } else if (mp->work_rx_refill & queue_mask) { 2209 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
2208 work_done += rxq_refill(mp->rxq + queue, work_tbd); 2210 work_done += rxq_refill(mp->rxq + queue, work_tbd);
2209 } else { 2211 } else {
2210 BUG(); 2212 BUG();
@@ -2212,7 +2214,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
2212 } 2214 }
2213 2215
2214 if (work_done < budget) { 2216 if (work_done < budget) {
2215 if (mp->work_rx_oom) 2217 if (mp->oom)
2216 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 2218 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2217 napi_complete(napi); 2219 napi_complete(napi);
2218 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT); 2220 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
@@ -2274,8 +2276,6 @@ static void port_start(struct mv643xx_eth_private *mp)
2274 pscr |= FORCE_LINK_PASS; 2276 pscr |= FORCE_LINK_PASS;
2275 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 2277 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2276 2278
2277 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
2278
2279 /* 2279 /*
2280 * Configure TX path and queues. 2280 * Configure TX path and queues.
2281 */ 2281 */
@@ -2374,7 +2374,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2374 rxq_refill(mp->rxq + i, INT_MAX); 2374 rxq_refill(mp->rxq + i, INT_MAX);
2375 } 2375 }
2376 2376
2377 if (mp->work_rx_oom) { 2377 if (mp->oom) {
2378 mp->rx_oom.expires = jiffies + (HZ / 10); 2378 mp->rx_oom.expires = jiffies + (HZ / 10);
2379 add_timer(&mp->rx_oom); 2379 add_timer(&mp->rx_oom);
2380 } 2380 }
@@ -2957,6 +2957,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2957 2957
2958 netif_carrier_off(dev); 2958 netif_carrier_off(dev);
2959 2959
2960 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
2961
2960 set_rx_coal(mp, 250); 2962 set_rx_coal(mp, 250);
2961 set_tx_coal(mp, 0); 2963 set_tx_coal(mp, 0);
2962 2964