aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAnton Vorontsov <avorontsov@ru.mvista.com>2009-10-12 02:00:39 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-13 02:54:05 -0400
commit8728327e7a7a7f21f3a7109e65503f4cc3305e78 (patch)
treeb06e9fa0da9176043652be8096c6d672b53a971b /drivers
parent8a102fe001cc016dabcc392247a2b008e37ffe6a (diff)
gianfar: Factor out gfar_init_bds() from gfar_alloc_skb_resources()
After hibernation we want to just reinitialize BDs, no need to allocate anything. So, factor out BDs initialization code from gfar_alloc_skb_resourses(). Also, teach gfar_init_bds() to reuse already allocated RX SKBs, i.e. just call gfar_init_rxbdp() if a SKB was already allocated and mapped. Signed-off-by: Anton Vorontsov <avorontsov@ru.mvista.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/gianfar.c96
1 files changed, 56 insertions, 40 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index a84363261673..c2a508fe1cce 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -164,19 +164,68 @@ static void gfar_init_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
164 bdp->lstatus = lstatus; 164 bdp->lstatus = lstatus;
165} 165}
166 166
167static int gfar_alloc_skb_resources(struct net_device *ndev) 167static int gfar_init_bds(struct net_device *ndev)
168{ 168{
169 struct gfar_private *priv = netdev_priv(ndev);
169 struct txbd8 *txbdp; 170 struct txbd8 *txbdp;
170 struct rxbd8 *rxbdp; 171 struct rxbd8 *rxbdp;
172 int i;
173
174 /* Initialize some variables in our dev structure */
175 priv->num_txbdfree = priv->tx_ring_size;
176 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
177 priv->cur_rx = priv->rx_bd_base;
178 priv->skb_curtx = priv->skb_dirtytx = 0;
179 priv->skb_currx = 0;
180
181 /* Initialize Transmit Descriptor Ring */
182 txbdp = priv->tx_bd_base;
183 for (i = 0; i < priv->tx_ring_size; i++) {
184 txbdp->lstatus = 0;
185 txbdp->bufPtr = 0;
186 txbdp++;
187 }
188
189 /* Set the last descriptor in the ring to indicate wrap */
190 txbdp--;
191 txbdp->status |= TXBD_WRAP;
192
193 rxbdp = priv->rx_bd_base;
194 for (i = 0; i < priv->rx_ring_size; i++) {
195 struct sk_buff *skb = priv->rx_skbuff[i];
196
197 if (skb) {
198 gfar_init_rxbdp(ndev, rxbdp, rxbdp->bufPtr);
199 } else {
200 skb = gfar_new_skb(ndev);
201 if (!skb) {
202 pr_err("%s: Can't allocate RX buffers\n",
203 ndev->name);
204 return -ENOMEM;
205 }
206 priv->rx_skbuff[i] = skb;
207
208 gfar_new_rxbdp(ndev, rxbdp, skb);
209 }
210
211 rxbdp++;
212 }
213
214 return 0;
215}
216
217static int gfar_alloc_skb_resources(struct net_device *ndev)
218{
171 void *vaddr; 219 void *vaddr;
172 int i; 220 int i;
173 struct gfar_private *priv = netdev_priv(ndev); 221 struct gfar_private *priv = netdev_priv(ndev);
174 struct device *dev = &priv->ofdev->dev; 222 struct device *dev = &priv->ofdev->dev;
175 223
176 /* Allocate memory for the buffer descriptors */ 224 /* Allocate memory for the buffer descriptors */
177 vaddr = dma_alloc_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size + 225 vaddr = dma_alloc_coherent(dev,
178 sizeof(*rxbdp) * priv->rx_ring_size, 226 sizeof(*priv->tx_bd_base) * priv->tx_ring_size +
179 &priv->tx_bd_dma_base, GFP_KERNEL); 227 sizeof(*priv->rx_bd_base) * priv->rx_ring_size,
228 &priv->tx_bd_dma_base, GFP_KERNEL);
180 if (!vaddr) { 229 if (!vaddr) {
181 if (netif_msg_ifup(priv)) 230 if (netif_msg_ifup(priv))
182 pr_err("%s: Could not allocate buffer descriptors!\n", 231 pr_err("%s: Could not allocate buffer descriptors!\n",
@@ -187,7 +236,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
187 priv->tx_bd_base = vaddr; 236 priv->tx_bd_base = vaddr;
188 237
189 /* Start the rx descriptor ring where the tx ring leaves off */ 238 /* Start the rx descriptor ring where the tx ring leaves off */
190 vaddr = vaddr + sizeof(*txbdp) * priv->tx_ring_size; 239 vaddr = vaddr + sizeof(*priv->tx_bd_base) * priv->tx_ring_size;
191 priv->rx_bd_base = vaddr; 240 priv->rx_bd_base = vaddr;
192 241
193 /* Setup the skbuff rings */ 242 /* Setup the skbuff rings */
@@ -215,41 +264,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
215 for (i = 0; i < priv->rx_ring_size; i++) 264 for (i = 0; i < priv->rx_ring_size; i++)
216 priv->rx_skbuff[i] = NULL; 265 priv->rx_skbuff[i] = NULL;
217 266
218 /* Initialize some variables in our dev structure */ 267 if (gfar_init_bds(ndev))
219 priv->num_txbdfree = priv->tx_ring_size; 268 goto cleanup;
220 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
221 priv->cur_rx = priv->rx_bd_base;
222 priv->skb_curtx = priv->skb_dirtytx = 0;
223 priv->skb_currx = 0;
224
225 /* Initialize Transmit Descriptor Ring */
226 txbdp = priv->tx_bd_base;
227 for (i = 0; i < priv->tx_ring_size; i++) {
228 txbdp->lstatus = 0;
229 txbdp->bufPtr = 0;
230 txbdp++;
231 }
232
233 /* Set the last descriptor in the ring to indicate wrap */
234 txbdp--;
235 txbdp->status |= TXBD_WRAP;
236
237 rxbdp = priv->rx_bd_base;
238 for (i = 0; i < priv->rx_ring_size; i++) {
239 struct sk_buff *skb;
240
241 skb = gfar_new_skb(ndev);
242 if (!skb) {
243 pr_err("%s: Can't allocate RX buffers\n", ndev->name);
244 goto cleanup;
245 }
246
247 priv->rx_skbuff[i] = skb;
248
249 gfar_new_rxbdp(ndev, rxbdp, skb);
250
251 rxbdp++;
252 }
253 269
254 return 0; 270 return 0;
255 271