diff options
author | Anton Vorontsov <avorontsov@ru.mvista.com> | 2009-07-07 04:38:42 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-07-07 22:22:09 -0400 |
commit | 50f238fdf38e37f0350be17c36e3ec0fd298cc40 (patch) | |
tree | 6fcc80fc88ca09a58b7b4d645b0b1c21641aadec /drivers/net/ucc_geth.c | |
parent | ef0657c49e0f93dcebc9b4719e4fe0b478411f60 (diff) |
ucc_geth: Add support for skb recycling
We can reclaim transmitted skbs to use in the receive path, so-called
skb recycling support.
Also reorder ucc_geth_poll() steps, so that we'll clean tx ring firstly,
thus maybe reclaim some skbs for rx.
Signed-off-by: Anton Vorontsov <avorontsov@ru.mvista.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ucc_geth.c')
-rw-r--r-- | drivers/net/ucc_geth.c | 40 |
1 files changed, 28 insertions, 12 deletions
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 5ba95867a2be..ca476a58087c 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -209,9 +209,10 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, | |||
209 | { | 209 | { |
210 | struct sk_buff *skb = NULL; | 210 | struct sk_buff *skb = NULL; |
211 | 211 | ||
212 | skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + | 212 | skb = __skb_dequeue(&ugeth->rx_recycle); |
213 | UCC_GETH_RX_DATA_BUF_ALIGNMENT); | 213 | if (!skb) |
214 | 214 | skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + | |
215 | UCC_GETH_RX_DATA_BUF_ALIGNMENT); | ||
215 | if (skb == NULL) | 216 | if (skb == NULL) |
216 | return NULL; | 217 | return NULL; |
217 | 218 | ||
@@ -1986,6 +1987,8 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) | |||
1986 | iounmap(ugeth->ug_regs); | 1987 | iounmap(ugeth->ug_regs); |
1987 | ugeth->ug_regs = NULL; | 1988 | ugeth->ug_regs = NULL; |
1988 | } | 1989 | } |
1990 | |||
1991 | skb_queue_purge(&ugeth->rx_recycle); | ||
1989 | } | 1992 | } |
1990 | 1993 | ||
1991 | static void ucc_geth_set_multi(struct net_device *dev) | 1994 | static void ucc_geth_set_multi(struct net_device *dev) |
@@ -2202,6 +2205,8 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) | |||
2202 | return -ENOMEM; | 2205 | return -ENOMEM; |
2203 | } | 2206 | } |
2204 | 2207 | ||
2208 | skb_queue_head_init(&ugeth->rx_recycle); | ||
2209 | |||
2205 | return 0; | 2210 | return 0; |
2206 | } | 2211 | } |
2207 | 2212 | ||
@@ -3208,8 +3213,10 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit | |||
3208 | if (netif_msg_rx_err(ugeth)) | 3213 | if (netif_msg_rx_err(ugeth)) |
3209 | ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", | 3214 | ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", |
3210 | __func__, __LINE__, (u32) skb); | 3215 | __func__, __LINE__, (u32) skb); |
3211 | if (skb) | 3216 | if (skb) { |
3212 | dev_kfree_skb_any(skb); | 3217 | skb->data = skb->head + NET_SKB_PAD; |
3218 | __skb_queue_head(&ugeth->rx_recycle, skb); | ||
3219 | } | ||
3213 | 3220 | ||
3214 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; | 3221 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; |
3215 | dev->stats.rx_dropped++; | 3222 | dev->stats.rx_dropped++; |
@@ -3267,6 +3274,8 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3267 | 3274 | ||
3268 | /* Normal processing. */ | 3275 | /* Normal processing. */ |
3269 | while ((bd_status & T_R) == 0) { | 3276 | while ((bd_status & T_R) == 0) { |
3277 | struct sk_buff *skb; | ||
3278 | |||
3270 | /* BD contains already transmitted buffer. */ | 3279 | /* BD contains already transmitted buffer. */ |
3271 | /* Handle the transmitted buffer and release */ | 3280 | /* Handle the transmitted buffer and release */ |
3272 | /* the BD to be used with the current frame */ | 3281 | /* the BD to be used with the current frame */ |
@@ -3276,9 +3285,16 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3276 | 3285 | ||
3277 | dev->stats.tx_packets++; | 3286 | dev->stats.tx_packets++; |
3278 | 3287 | ||
3279 | /* Free the sk buffer associated with this TxBD */ | 3288 | skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; |
3280 | dev_kfree_skb(ugeth-> | 3289 | |
3281 | tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]); | 3290 | if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && |
3291 | skb_recycle_check(skb, | ||
3292 | ugeth->ug_info->uf_info.max_rx_buf_length + | ||
3293 | UCC_GETH_RX_DATA_BUF_ALIGNMENT)) | ||
3294 | __skb_queue_head(&ugeth->rx_recycle, skb); | ||
3295 | else | ||
3296 | dev_kfree_skb(skb); | ||
3297 | |||
3282 | ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; | 3298 | ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; |
3283 | ugeth->skb_dirtytx[txQ] = | 3299 | ugeth->skb_dirtytx[txQ] = |
3284 | (ugeth->skb_dirtytx[txQ] + | 3300 | (ugeth->skb_dirtytx[txQ] + |
@@ -3307,16 +3323,16 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget) | |||
3307 | 3323 | ||
3308 | ug_info = ugeth->ug_info; | 3324 | ug_info = ugeth->ug_info; |
3309 | 3325 | ||
3310 | howmany = 0; | ||
3311 | for (i = 0; i < ug_info->numQueuesRx; i++) | ||
3312 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); | ||
3313 | |||
3314 | /* Tx event processing */ | 3326 | /* Tx event processing */ |
3315 | spin_lock(&ugeth->lock); | 3327 | spin_lock(&ugeth->lock); |
3316 | for (i = 0; i < ug_info->numQueuesTx; i++) | 3328 | for (i = 0; i < ug_info->numQueuesTx; i++) |
3317 | ucc_geth_tx(ugeth->ndev, i); | 3329 | ucc_geth_tx(ugeth->ndev, i); |
3318 | spin_unlock(&ugeth->lock); | 3330 | spin_unlock(&ugeth->lock); |
3319 | 3331 | ||
3332 | howmany = 0; | ||
3333 | for (i = 0; i < ug_info->numQueuesRx; i++) | ||
3334 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); | ||
3335 | |||
3320 | if (howmany < budget) { | 3336 | if (howmany < budget) { |
3321 | napi_complete(napi); | 3337 | napi_complete(napi); |
3322 | setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); | 3338 | setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); |