aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2012-02-26 21:36:29 -0500
committerDavid S. Miller <davem@davemloft.net>2012-03-04 20:54:01 -0500
commite19a82c18f0e6360ee9fd431721794eb0036c0cd (patch)
tree563c8e2c3490b151bdbab03be25c92e3201915a6
parent4b32da2bcf1de2b7a196a0e48389d231b4472c36 (diff)
ucc_geth: separate out rx/tx ring alloc and free operations
Factor out the the existing allocation and free operations so that they can be used individually. This is to improve code readability, and also to prepare for possible future changes like better error recovery and more dynamic configuration (e.g on-the-fly resizing of the rings). This change represents a straight up relocation of the existing code into separate routines without changing any of the contained code itself. Local variables are relocated as necessary. Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c453
1 files changed, 257 insertions, 196 deletions
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ec0905461312..4e3cd2f8debb 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1856,11 +1856,93 @@ static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *uge
1856 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ 1856 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
1857} 1857}
1858 1858
1859static void ucc_geth_memclean(struct ucc_geth_private *ugeth) 1859static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
1860{
1861 struct ucc_geth_info *ug_info;
1862 struct ucc_fast_info *uf_info;
1863 u16 i, j;
1864 u8 __iomem *bd;
1865
1866
1867 ug_info = ugeth->ug_info;
1868 uf_info = &ug_info->uf_info;
1869
1870 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1871 if (ugeth->p_rx_bd_ring[i]) {
1872 /* Return existing data buffers in ring */
1873 bd = ugeth->p_rx_bd_ring[i];
1874 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
1875 if (ugeth->rx_skbuff[i][j]) {
1876 dma_unmap_single(ugeth->dev,
1877 in_be32(&((struct qe_bd __iomem *)bd)->buf),
1878 ugeth->ug_info->
1879 uf_info.max_rx_buf_length +
1880 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
1881 DMA_FROM_DEVICE);
1882 dev_kfree_skb_any(
1883 ugeth->rx_skbuff[i][j]);
1884 ugeth->rx_skbuff[i][j] = NULL;
1885 }
1886 bd += sizeof(struct qe_bd);
1887 }
1888
1889 kfree(ugeth->rx_skbuff[i]);
1890
1891 if (ugeth->ug_info->uf_info.bd_mem_part ==
1892 MEM_PART_SYSTEM)
1893 kfree((void *)ugeth->rx_bd_ring_offset[i]);
1894 else if (ugeth->ug_info->uf_info.bd_mem_part ==
1895 MEM_PART_MURAM)
1896 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
1897 ugeth->p_rx_bd_ring[i] = NULL;
1898 }
1899 }
1900
1901}
1902
1903static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
1860{ 1904{
1905 struct ucc_geth_info *ug_info;
1906 struct ucc_fast_info *uf_info;
1861 u16 i, j; 1907 u16 i, j;
1862 u8 __iomem *bd; 1908 u8 __iomem *bd;
1863 1909
1910 ug_info = ugeth->ug_info;
1911 uf_info = &ug_info->uf_info;
1912
1913 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1914 bd = ugeth->p_tx_bd_ring[i];
1915 if (!bd)
1916 continue;
1917 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
1918 if (ugeth->tx_skbuff[i][j]) {
1919 dma_unmap_single(ugeth->dev,
1920 in_be32(&((struct qe_bd __iomem *)bd)->buf),
1921 (in_be32((u32 __iomem *)bd) &
1922 BD_LENGTH_MASK),
1923 DMA_TO_DEVICE);
1924 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
1925 ugeth->tx_skbuff[i][j] = NULL;
1926 }
1927 }
1928
1929 kfree(ugeth->tx_skbuff[i]);
1930
1931 if (ugeth->p_tx_bd_ring[i]) {
1932 if (ugeth->ug_info->uf_info.bd_mem_part ==
1933 MEM_PART_SYSTEM)
1934 kfree((void *)ugeth->tx_bd_ring_offset[i]);
1935 else if (ugeth->ug_info->uf_info.bd_mem_part ==
1936 MEM_PART_MURAM)
1937 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
1938 ugeth->p_tx_bd_ring[i] = NULL;
1939 }
1940 }
1941
1942}
1943
1944static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
1945{
1864 if (!ugeth) 1946 if (!ugeth)
1865 return; 1947 return;
1866 1948
@@ -1927,64 +2009,8 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
1927 kfree(ugeth->p_init_enet_param_shadow); 2009 kfree(ugeth->p_init_enet_param_shadow);
1928 ugeth->p_init_enet_param_shadow = NULL; 2010 ugeth->p_init_enet_param_shadow = NULL;
1929 } 2011 }
1930 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { 2012 ucc_geth_free_tx(ugeth);
1931 bd = ugeth->p_tx_bd_ring[i]; 2013 ucc_geth_free_rx(ugeth);
1932 if (!bd)
1933 continue;
1934 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
1935 if (ugeth->tx_skbuff[i][j]) {
1936 dma_unmap_single(ugeth->dev,
1937 in_be32(&((struct qe_bd __iomem *)bd)->buf),
1938 (in_be32((u32 __iomem *)bd) &
1939 BD_LENGTH_MASK),
1940 DMA_TO_DEVICE);
1941 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
1942 ugeth->tx_skbuff[i][j] = NULL;
1943 }
1944 }
1945
1946 kfree(ugeth->tx_skbuff[i]);
1947
1948 if (ugeth->p_tx_bd_ring[i]) {
1949 if (ugeth->ug_info->uf_info.bd_mem_part ==
1950 MEM_PART_SYSTEM)
1951 kfree((void *)ugeth->tx_bd_ring_offset[i]);
1952 else if (ugeth->ug_info->uf_info.bd_mem_part ==
1953 MEM_PART_MURAM)
1954 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
1955 ugeth->p_tx_bd_ring[i] = NULL;
1956 }
1957 }
1958 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1959 if (ugeth->p_rx_bd_ring[i]) {
1960 /* Return existing data buffers in ring */
1961 bd = ugeth->p_rx_bd_ring[i];
1962 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
1963 if (ugeth->rx_skbuff[i][j]) {
1964 dma_unmap_single(ugeth->dev,
1965 in_be32(&((struct qe_bd __iomem *)bd)->buf),
1966 ugeth->ug_info->
1967 uf_info.max_rx_buf_length +
1968 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
1969 DMA_FROM_DEVICE);
1970 dev_kfree_skb_any(
1971 ugeth->rx_skbuff[i][j]);
1972 ugeth->rx_skbuff[i][j] = NULL;
1973 }
1974 bd += sizeof(struct qe_bd);
1975 }
1976
1977 kfree(ugeth->rx_skbuff[i]);
1978
1979 if (ugeth->ug_info->uf_info.bd_mem_part ==
1980 MEM_PART_SYSTEM)
1981 kfree((void *)ugeth->rx_bd_ring_offset[i]);
1982 else if (ugeth->ug_info->uf_info.bd_mem_part ==
1983 MEM_PART_MURAM)
1984 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
1985 ugeth->p_rx_bd_ring[i] = NULL;
1986 }
1987 }
1988 while (!list_empty(&ugeth->group_hash_q)) 2014 while (!list_empty(&ugeth->group_hash_q))
1989 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 2015 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
1990 (dequeue(&ugeth->group_hash_q))); 2016 (dequeue(&ugeth->group_hash_q)));
@@ -2210,6 +2236,171 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2210 return 0; 2236 return 0;
2211} 2237}
2212 2238
2239static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
2240{
2241 struct ucc_geth_info *ug_info;
2242 struct ucc_fast_info *uf_info;
2243 int length;
2244 u16 i, j;
2245 u8 __iomem *bd;
2246
2247 ug_info = ugeth->ug_info;
2248 uf_info = &ug_info->uf_info;
2249
2250 /* Allocate Tx bds */
2251 for (j = 0; j < ug_info->numQueuesTx; j++) {
2252 /* Allocate in multiple of
2253 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2254 according to spec */
2255 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
2256 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2257 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2258 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
2259 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2260 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2261 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2262 u32 align = 4;
2263 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2264 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2265 ugeth->tx_bd_ring_offset[j] =
2266 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2267
2268 if (ugeth->tx_bd_ring_offset[j] != 0)
2269 ugeth->p_tx_bd_ring[j] =
2270 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
2271 align) & ~(align - 1));
2272 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2273 ugeth->tx_bd_ring_offset[j] =
2274 qe_muram_alloc(length,
2275 UCC_GETH_TX_BD_RING_ALIGNMENT);
2276 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
2277 ugeth->p_tx_bd_ring[j] =
2278 (u8 __iomem *) qe_muram_addr(ugeth->
2279 tx_bd_ring_offset[j]);
2280 }
2281 if (!ugeth->p_tx_bd_ring[j]) {
2282 if (netif_msg_ifup(ugeth))
2283 ugeth_err
2284 ("%s: Can not allocate memory for Tx bd rings.",
2285 __func__);
2286 return -ENOMEM;
2287 }
2288 /* Zero unused end of bd ring, according to spec */
2289 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
2290 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
2291 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
2292 }
2293
2294 /* Init Tx bds */
2295 for (j = 0; j < ug_info->numQueuesTx; j++) {
2296 /* Setup the skbuff rings */
2297 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2298 ugeth->ug_info->bdRingLenTx[j],
2299 GFP_KERNEL);
2300
2301 if (ugeth->tx_skbuff[j] == NULL) {
2302 if (netif_msg_ifup(ugeth))
2303 ugeth_err("%s: Could not allocate tx_skbuff",
2304 __func__);
2305 return -ENOMEM;
2306 }
2307
2308 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2309 ugeth->tx_skbuff[j][i] = NULL;
2310
2311 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2312 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2313 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2314 /* clear bd buffer */
2315 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2316 /* set bd status and length */
2317 out_be32((u32 __iomem *)bd, 0);
2318 bd += sizeof(struct qe_bd);
2319 }
2320 bd -= sizeof(struct qe_bd);
2321 /* set bd status and length */
2322 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
2323 }
2324
2325 return 0;
2326}
2327
2328static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
2329{
2330 struct ucc_geth_info *ug_info;
2331 struct ucc_fast_info *uf_info;
2332 int length;
2333 u16 i, j;
2334 u8 __iomem *bd;
2335
2336 ug_info = ugeth->ug_info;
2337 uf_info = &ug_info->uf_info;
2338
2339 /* Allocate Rx bds */
2340 for (j = 0; j < ug_info->numQueuesRx; j++) {
2341 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
2342 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2343 u32 align = 4;
2344 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2345 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2346 ugeth->rx_bd_ring_offset[j] =
2347 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2348 if (ugeth->rx_bd_ring_offset[j] != 0)
2349 ugeth->p_rx_bd_ring[j] =
2350 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
2351 align) & ~(align - 1));
2352 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2353 ugeth->rx_bd_ring_offset[j] =
2354 qe_muram_alloc(length,
2355 UCC_GETH_RX_BD_RING_ALIGNMENT);
2356 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
2357 ugeth->p_rx_bd_ring[j] =
2358 (u8 __iomem *) qe_muram_addr(ugeth->
2359 rx_bd_ring_offset[j]);
2360 }
2361 if (!ugeth->p_rx_bd_ring[j]) {
2362 if (netif_msg_ifup(ugeth))
2363 ugeth_err
2364 ("%s: Can not allocate memory for Rx bd rings.",
2365 __func__);
2366 return -ENOMEM;
2367 }
2368 }
2369
2370 /* Init Rx bds */
2371 for (j = 0; j < ug_info->numQueuesRx; j++) {
2372 /* Setup the skbuff rings */
2373 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2374 ugeth->ug_info->bdRingLenRx[j],
2375 GFP_KERNEL);
2376
2377 if (ugeth->rx_skbuff[j] == NULL) {
2378 if (netif_msg_ifup(ugeth))
2379 ugeth_err("%s: Could not allocate rx_skbuff",
2380 __func__);
2381 return -ENOMEM;
2382 }
2383
2384 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2385 ugeth->rx_skbuff[j][i] = NULL;
2386
2387 ugeth->skb_currx[j] = 0;
2388 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2389 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2390 /* set bd status and length */
2391 out_be32((u32 __iomem *)bd, R_I);
2392 /* clear bd buffer */
2393 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2394 bd += sizeof(struct qe_bd);
2395 }
2396 bd -= sizeof(struct qe_bd);
2397 /* set bd status and length */
2398 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
2399 }
2400
2401 return 0;
2402}
2403
2213static int ucc_geth_startup(struct ucc_geth_private *ugeth) 2404static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2214{ 2405{
2215 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2406 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
@@ -2222,11 +2413,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2222 int ret_val = -EINVAL; 2413 int ret_val = -EINVAL;
2223 u32 remoder = UCC_GETH_REMODER_INIT; 2414 u32 remoder = UCC_GETH_REMODER_INIT;
2224 u32 init_enet_pram_offset, cecr_subblock, command; 2415 u32 init_enet_pram_offset, cecr_subblock, command;
2225 u32 ifstat, i, j, size, l2qt, l3qt, length; 2416 u32 ifstat, i, j, size, l2qt, l3qt;
2226 u16 temoder = UCC_GETH_TEMODER_INIT; 2417 u16 temoder = UCC_GETH_TEMODER_INIT;
2227 u16 test; 2418 u16 test;
2228 u8 function_code = 0; 2419 u8 function_code = 0;
2229 u8 __iomem *bd;
2230 u8 __iomem *endOfRing; 2420 u8 __iomem *endOfRing;
2231 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2421 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2232 2422
@@ -2366,142 +2556,13 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2366 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), 2556 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2367 0, &uf_regs->upsmr, &ug_regs->uescr); 2557 0, &uf_regs->upsmr, &ug_regs->uescr);
2368 2558
2369 /* Allocate Tx bds */ 2559 ret_val = ucc_geth_alloc_tx(ugeth);
2370 for (j = 0; j < ug_info->numQueuesTx; j++) { 2560 if (ret_val != 0)
2371 /* Allocate in multiple of 2561 return ret_val;
2372 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2373 according to spec */
2374 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
2375 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2376 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2377 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
2378 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2379 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2380 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2381 u32 align = 4;
2382 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2383 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2384 ugeth->tx_bd_ring_offset[j] =
2385 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2386
2387 if (ugeth->tx_bd_ring_offset[j] != 0)
2388 ugeth->p_tx_bd_ring[j] =
2389 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
2390 align) & ~(align - 1));
2391 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2392 ugeth->tx_bd_ring_offset[j] =
2393 qe_muram_alloc(length,
2394 UCC_GETH_TX_BD_RING_ALIGNMENT);
2395 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
2396 ugeth->p_tx_bd_ring[j] =
2397 (u8 __iomem *) qe_muram_addr(ugeth->
2398 tx_bd_ring_offset[j]);
2399 }
2400 if (!ugeth->p_tx_bd_ring[j]) {
2401 if (netif_msg_ifup(ugeth))
2402 ugeth_err
2403 ("%s: Can not allocate memory for Tx bd rings.",
2404 __func__);
2405 return -ENOMEM;
2406 }
2407 /* Zero unused end of bd ring, according to spec */
2408 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
2409 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
2410 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
2411 }
2412
2413 /* Allocate Rx bds */
2414 for (j = 0; j < ug_info->numQueuesRx; j++) {
2415 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
2416 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2417 u32 align = 4;
2418 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2419 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2420 ugeth->rx_bd_ring_offset[j] =
2421 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2422 if (ugeth->rx_bd_ring_offset[j] != 0)
2423 ugeth->p_rx_bd_ring[j] =
2424 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
2425 align) & ~(align - 1));
2426 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2427 ugeth->rx_bd_ring_offset[j] =
2428 qe_muram_alloc(length,
2429 UCC_GETH_RX_BD_RING_ALIGNMENT);
2430 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
2431 ugeth->p_rx_bd_ring[j] =
2432 (u8 __iomem *) qe_muram_addr(ugeth->
2433 rx_bd_ring_offset[j]);
2434 }
2435 if (!ugeth->p_rx_bd_ring[j]) {
2436 if (netif_msg_ifup(ugeth))
2437 ugeth_err
2438 ("%s: Can not allocate memory for Rx bd rings.",
2439 __func__);
2440 return -ENOMEM;
2441 }
2442 }
2443
2444 /* Init Tx bds */
2445 for (j = 0; j < ug_info->numQueuesTx; j++) {
2446 /* Setup the skbuff rings */
2447 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2448 ugeth->ug_info->bdRingLenTx[j],
2449 GFP_KERNEL);
2450
2451 if (ugeth->tx_skbuff[j] == NULL) {
2452 if (netif_msg_ifup(ugeth))
2453 ugeth_err("%s: Could not allocate tx_skbuff",
2454 __func__);
2455 return -ENOMEM;
2456 }
2457
2458 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2459 ugeth->tx_skbuff[j][i] = NULL;
2460
2461 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2462 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2463 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2464 /* clear bd buffer */
2465 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2466 /* set bd status and length */
2467 out_be32((u32 __iomem *)bd, 0);
2468 bd += sizeof(struct qe_bd);
2469 }
2470 bd -= sizeof(struct qe_bd);
2471 /* set bd status and length */
2472 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
2473 }
2474
2475 /* Init Rx bds */
2476 for (j = 0; j < ug_info->numQueuesRx; j++) {
2477 /* Setup the skbuff rings */
2478 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2479 ugeth->ug_info->bdRingLenRx[j],
2480 GFP_KERNEL);
2481
2482 if (ugeth->rx_skbuff[j] == NULL) {
2483 if (netif_msg_ifup(ugeth))
2484 ugeth_err("%s: Could not allocate rx_skbuff",
2485 __func__);
2486 return -ENOMEM;
2487 }
2488
2489 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2490 ugeth->rx_skbuff[j][i] = NULL;
2491 2562
2492 ugeth->skb_currx[j] = 0; 2563 ret_val = ucc_geth_alloc_rx(ugeth);
2493 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; 2564 if (ret_val != 0)
2494 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { 2565 return ret_val;
2495 /* set bd status and length */
2496 out_be32((u32 __iomem *)bd, R_I);
2497 /* clear bd buffer */
2498 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2499 bd += sizeof(struct qe_bd);
2500 }
2501 bd -= sizeof(struct qe_bd);
2502 /* set bd status and length */
2503 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
2504 }
2505 2566
2506 /* 2567 /*
2507 * Global PRAM 2568 * Global PRAM