aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/gianfar.c
diff options
context:
space:
mode:
authorAnton Vorontsov <avorontsov@ru.mvista.com>2009-10-12 02:00:34 -0400
committerDavid S. Miller <davem@davemloft.net>2009-10-13 02:54:03 -0400
commit826aa4a05669a46e435f65db901186e42bb43d8d (patch)
tree05dd59c09653610b06ee20d957c7387fbe9bf413 /drivers/net/gianfar.c
parent14231176b0dc358f8693f25b62017d222dd995e6 (diff)
gianfar: Split allocation and initialization steps out of startup_gfar()
Two new functions implemented: gfar_alloc_skb_resources() and gfar_init_mac(). We'll use gfar_init_mac() for restoring after hibernation. The patch just moves the code around, there should be no functional changes. Signed-off-by: Anton Vorontsov <avorontsov@ru.mvista.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r--drivers/net/gianfar.c334
1 files changed, 176 insertions, 158 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index f84974195507..c8735540b1ec 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -147,6 +147,176 @@ MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 147MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL"); 148MODULE_LICENSE("GPL");
149 149
150static int gfar_alloc_skb_resources(struct net_device *ndev)
151{
152 struct txbd8 *txbdp;
153 struct rxbd8 *rxbdp;
154 dma_addr_t addr = 0;
155 void *vaddr;
156 int i;
157 struct gfar_private *priv = netdev_priv(ndev);
158 struct device *dev = &priv->ofdev->dev;
159 struct gfar __iomem *regs = priv->regs;
160
161 /* Allocate memory for the buffer descriptors */
162 vaddr = dma_alloc_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size +
163 sizeof(*rxbdp) * priv->rx_ring_size,
164 &addr, GFP_KERNEL);
165 if (!vaddr) {
166 if (netif_msg_ifup(priv))
167 pr_err("%s: Could not allocate buffer descriptors!\n",
168 ndev->name);
169 return -ENOMEM;
170 }
171
172 priv->tx_bd_base = vaddr;
173
174 /* enet DMA only understands physical addresses */
175 gfar_write(&regs->tbase0, addr);
176
177 /* Start the rx descriptor ring where the tx ring leaves off */
178 addr = addr + sizeof(*txbdp) * priv->tx_ring_size;
179 vaddr = vaddr + sizeof(*txbdp) * priv->tx_ring_size;
180 priv->rx_bd_base = vaddr;
181 gfar_write(&regs->rbase0, addr);
182
183 /* Setup the skbuff rings */
184 priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) *
185 priv->tx_ring_size, GFP_KERNEL);
186 if (!priv->tx_skbuff) {
187 if (netif_msg_ifup(priv))
188 pr_err("%s: Could not allocate tx_skbuff\n",
189 ndev->name);
190 goto cleanup;
191 }
192
193 for (i = 0; i < priv->tx_ring_size; i++)
194 priv->tx_skbuff[i] = NULL;
195
196 priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) *
197 priv->rx_ring_size, GFP_KERNEL);
198 if (!priv->rx_skbuff) {
199 if (netif_msg_ifup(priv))
200 pr_err("%s: Could not allocate rx_skbuff\n",
201 ndev->name);
202 goto cleanup;
203 }
204
205 for (i = 0; i < priv->rx_ring_size; i++)
206 priv->rx_skbuff[i] = NULL;
207
208 /* Initialize some variables in our dev structure */
209 priv->num_txbdfree = priv->tx_ring_size;
210 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
211 priv->cur_rx = priv->rx_bd_base;
212 priv->skb_curtx = priv->skb_dirtytx = 0;
213 priv->skb_currx = 0;
214
215 /* Initialize Transmit Descriptor Ring */
216 txbdp = priv->tx_bd_base;
217 for (i = 0; i < priv->tx_ring_size; i++) {
218 txbdp->lstatus = 0;
219 txbdp->bufPtr = 0;
220 txbdp++;
221 }
222
223 /* Set the last descriptor in the ring to indicate wrap */
224 txbdp--;
225 txbdp->status |= TXBD_WRAP;
226
227 rxbdp = priv->rx_bd_base;
228 for (i = 0; i < priv->rx_ring_size; i++) {
229 struct sk_buff *skb;
230
231 skb = gfar_new_skb(ndev);
232 if (!skb) {
233 pr_err("%s: Can't allocate RX buffers\n", ndev->name);
234 goto cleanup;
235 }
236
237 priv->rx_skbuff[i] = skb;
238
239 gfar_new_rxbdp(ndev, rxbdp, skb);
240
241 rxbdp++;
242 }
243
244 return 0;
245
246cleanup:
247 free_skb_resources(priv);
248 return -ENOMEM;
249}
250
251static void gfar_init_mac(struct net_device *ndev)
252{
253 struct gfar_private *priv = netdev_priv(ndev);
254 struct gfar __iomem *regs = priv->regs;
255 u32 rctrl = 0;
256 u32 tctrl = 0;
257 u32 attrs = 0;
258
259 /* Configure the coalescing support */
260 gfar_write(&regs->txic, 0);
261 if (priv->txcoalescing)
262 gfar_write(&regs->txic, priv->txic);
263
264 gfar_write(&regs->rxic, 0);
265 if (priv->rxcoalescing)
266 gfar_write(&regs->rxic, priv->rxic);
267
268 if (priv->rx_csum_enable)
269 rctrl |= RCTRL_CHECKSUMMING;
270
271 if (priv->extended_hash) {
272 rctrl |= RCTRL_EXTHASH;
273
274 gfar_clear_exact_match(ndev);
275 rctrl |= RCTRL_EMEN;
276 }
277
278 if (priv->padding) {
279 rctrl &= ~RCTRL_PAL_MASK;
280 rctrl |= RCTRL_PADDING(priv->padding);
281 }
282
283 /* keep vlan related bits if it's enabled */
284 if (priv->vlgrp) {
285 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
286 tctrl |= TCTRL_VLINS;
287 }
288
289 /* Init rctrl based on our settings */
290 gfar_write(&regs->rctrl, rctrl);
291
292 if (ndev->features & NETIF_F_IP_CSUM)
293 tctrl |= TCTRL_INIT_CSUM;
294
295 gfar_write(&regs->tctrl, tctrl);
296
297 /* Set the extraction length and index */
298 attrs = ATTRELI_EL(priv->rx_stash_size) |
299 ATTRELI_EI(priv->rx_stash_index);
300
301 gfar_write(&regs->attreli, attrs);
302
303 /* Start with defaults, and add stashing or locking
304 * depending on the approprate variables */
305 attrs = ATTR_INIT_SETTINGS;
306
307 if (priv->bd_stash_en)
308 attrs |= ATTR_BDSTASH;
309
310 if (priv->rx_stash_size != 0)
311 attrs |= ATTR_BUFSTASH;
312
313 gfar_write(&regs->attr, attrs);
314
315 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
316 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
317 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
318}
319
150static const struct net_device_ops gfar_netdev_ops = { 320static const struct net_device_ops gfar_netdev_ops = {
151 .ndo_open = gfar_enet_open, 321 .ndo_open = gfar_enet_open,
152 .ndo_start_xmit = gfar_start_xmit, 322 .ndo_start_xmit = gfar_start_xmit,
@@ -927,106 +1097,17 @@ void gfar_start(struct net_device *dev)
927/* Bring the controller up and running */ 1097/* Bring the controller up and running */
928int startup_gfar(struct net_device *ndev) 1098int startup_gfar(struct net_device *ndev)
929{ 1099{
930 struct txbd8 *txbdp;
931 struct rxbd8 *rxbdp;
932 dma_addr_t addr = 0;
933 void *vaddr;
934 int i;
935 struct gfar_private *priv = netdev_priv(ndev); 1100 struct gfar_private *priv = netdev_priv(ndev);
936 struct device *dev = &priv->ofdev->dev;
937 struct gfar __iomem *regs = priv->regs; 1101 struct gfar __iomem *regs = priv->regs;
938 int err; 1102 int err;
939 u32 rctrl = 0;
940 u32 tctrl = 0;
941 u32 attrs = 0;
942 1103
943 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1104 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
944 1105
945 /* Allocate memory for the buffer descriptors */ 1106 err = gfar_alloc_skb_resources(ndev);
946 vaddr = dma_alloc_coherent(dev, sizeof(*txbdp) * priv->tx_ring_size + 1107 if (err)
947 sizeof(*rxbdp) * priv->rx_ring_size, 1108 return err;
948 &addr, GFP_KERNEL);
949 if (!vaddr) {
950 if (netif_msg_ifup(priv))
951 pr_err("%s: Could not allocate buffer descriptors!\n",
952 ndev->name);
953 return -ENOMEM;
954 }
955
956 priv->tx_bd_base = vaddr;
957
958 /* enet DMA only understands physical addresses */
959 gfar_write(&regs->tbase0, addr);
960
961 /* Start the rx descriptor ring where the tx ring leaves off */
962 addr = addr + sizeof(*txbdp) * priv->tx_ring_size;
963 vaddr = vaddr + sizeof(*txbdp) * priv->tx_ring_size;
964 priv->rx_bd_base = vaddr;
965 gfar_write(&regs->rbase0, addr);
966
967 /* Setup the skbuff rings */
968 priv->tx_skbuff = kmalloc(sizeof(*priv->tx_skbuff) *
969 priv->tx_ring_size, GFP_KERNEL);
970 if (!priv->tx_skbuff) {
971 if (netif_msg_ifup(priv))
972 pr_err("%s: Could not allocate tx_skbuff\n",
973 ndev->name);
974 err = -ENOMEM;
975 goto tx_skb_fail;
976 }
977
978 for (i = 0; i < priv->tx_ring_size; i++)
979 priv->tx_skbuff[i] = NULL;
980
981 priv->rx_skbuff = kmalloc(sizeof(*priv->rx_skbuff) *
982 priv->rx_ring_size, GFP_KERNEL);
983 if (!priv->rx_skbuff) {
984 if (netif_msg_ifup(priv))
985 pr_err("%s: Could not allocate rx_skbuff\n",
986 ndev->name);
987 err = -ENOMEM;
988 goto rx_skb_fail;
989 }
990
991 for (i = 0; i < priv->rx_ring_size; i++)
992 priv->rx_skbuff[i] = NULL;
993
994 /* Initialize some variables in our dev structure */
995 priv->num_txbdfree = priv->tx_ring_size;
996 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
997 priv->cur_rx = priv->rx_bd_base;
998 priv->skb_curtx = priv->skb_dirtytx = 0;
999 priv->skb_currx = 0;
1000
1001 /* Initialize Transmit Descriptor Ring */
1002 txbdp = priv->tx_bd_base;
1003 for (i = 0; i < priv->tx_ring_size; i++) {
1004 txbdp->lstatus = 0;
1005 txbdp->bufPtr = 0;
1006 txbdp++;
1007 }
1008
1009 /* Set the last descriptor in the ring to indicate wrap */
1010 txbdp--;
1011 txbdp->status |= TXBD_WRAP;
1012
1013 rxbdp = priv->rx_bd_base;
1014 for (i = 0; i < priv->rx_ring_size; i++) {
1015 struct sk_buff *skb;
1016
1017 skb = gfar_new_skb(ndev);
1018 if (!skb) {
1019 pr_err("%s: Can't allocate RX buffers\n", ndev->name);
1020 err = -ENOMEM;
1021 goto err_rxalloc_fail;
1022 }
1023
1024 priv->rx_skbuff[i] = skb;
1025
1026 gfar_new_rxbdp(ndev, rxbdp, skb);
1027 1109
1028 rxbdp++; 1110 gfar_init_mac(ndev);
1029 }
1030 1111
1031 /* If the device has multiple interrupts, register for 1112 /* If the device has multiple interrupts, register for
1032 * them. Otherwise, only register for the one */ 1113 * them. Otherwise, only register for the one */
@@ -1070,71 +1151,11 @@ int startup_gfar(struct net_device *ndev)
1070 } 1151 }
1071 } 1152 }
1072 1153
1073 phy_start(priv->phydev);
1074
1075 /* Configure the coalescing support */
1076 gfar_write(&regs->txic, 0);
1077 if (priv->txcoalescing)
1078 gfar_write(&regs->txic, priv->txic);
1079
1080 gfar_write(&regs->rxic, 0);
1081 if (priv->rxcoalescing)
1082 gfar_write(&regs->rxic, priv->rxic);
1083
1084 if (priv->rx_csum_enable)
1085 rctrl |= RCTRL_CHECKSUMMING;
1086
1087 if (priv->extended_hash) {
1088 rctrl |= RCTRL_EXTHASH;
1089
1090 gfar_clear_exact_match(ndev);
1091 rctrl |= RCTRL_EMEN;
1092 }
1093
1094 if (priv->padding) {
1095 rctrl &= ~RCTRL_PAL_MASK;
1096 rctrl |= RCTRL_PADDING(priv->padding);
1097 }
1098
1099 /* keep vlan related bits if it's enabled */
1100 if (priv->vlgrp) {
1101 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
1102 tctrl |= TCTRL_VLINS;
1103 }
1104
1105 /* Init rctrl based on our settings */
1106 gfar_write(&regs->rctrl, rctrl);
1107
1108 if (ndev->features & NETIF_F_IP_CSUM)
1109 tctrl |= TCTRL_INIT_CSUM;
1110
1111 gfar_write(&regs->tctrl, tctrl);
1112
1113 /* Set the extraction length and index */
1114 attrs = ATTRELI_EL(priv->rx_stash_size) |
1115 ATTRELI_EI(priv->rx_stash_index);
1116
1117 gfar_write(&regs->attreli, attrs);
1118
1119 /* Start with defaults, and add stashing or locking
1120 * depending on the approprate variables */
1121 attrs = ATTR_INIT_SETTINGS;
1122
1123 if (priv->bd_stash_en)
1124 attrs |= ATTR_BDSTASH;
1125
1126 if (priv->rx_stash_size != 0)
1127 attrs |= ATTR_BUFSTASH;
1128
1129 gfar_write(&regs->attr, attrs);
1130
1131 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
1132 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
1133 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1134
1135 /* Start the controller */ 1154 /* Start the controller */
1136 gfar_start(ndev); 1155 gfar_start(ndev);
1137 1156
1157 phy_start(priv->phydev);
1158
1138 return 0; 1159 return 0;
1139 1160
1140rx_irq_fail: 1161rx_irq_fail:
@@ -1142,9 +1163,6 @@ rx_irq_fail:
1142tx_irq_fail: 1163tx_irq_fail:
1143 free_irq(priv->interruptError, ndev); 1164 free_irq(priv->interruptError, ndev);
1144err_irq_fail: 1165err_irq_fail:
1145err_rxalloc_fail:
1146rx_skb_fail:
1147tx_skb_fail:
1148 free_skb_resources(priv); 1166 free_skb_resources(priv);
1149 return err; 1167 return err;
1150} 1168}