diff options
author | Cyril Chemparathy <cyril@ti.com> | 2010-09-15 10:11:30 -0400 |
---|---|---|
committer | Kevin Hilman <khilman@deeprootsystems.com> | 2010-09-24 10:40:31 -0400 |
commit | d4ef0d42313dffa1feb94142b4c4286169fd1b0e (patch) | |
tree | 5c656e049560a9bd28da73938c14f9a430283ab0 /drivers/net/davinci_emac.c | |
parent | 3ef0fdb2342cf58f617ce2bdcd133978629c2403 (diff) |
net: davinci_emac: cleanup unused cpdma code
Having switched over to the newly introduced cpdma layer, this patch now
removes a whole bunch of code that is now unused. This patch has been
maintained separate strictly for reasons of readability.
Signed-off-by: Cyril Chemparathy <cyril@ti.com>
Acked-by: David S. Miller <davem@davemloft.net>
Tested-by: Michael Williamson <michael.williamson@criticallink.com>
Tested-by: Caglar Akyuz <caglarakyuz@gmail.com>
Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
Diffstat (limited to 'drivers/net/davinci_emac.c')
-rw-r--r-- | drivers/net/davinci_emac.c | 930 |
1 files changed, 0 insertions, 930 deletions
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 67dbcfb5e894..2a628d17d178 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
@@ -127,7 +127,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; | |||
127 | /* EMAC register related defines */ | 127 | /* EMAC register related defines */ |
128 | #define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF) | 128 | #define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF) |
129 | #define EMAC_NUM_MULTICAST_BITS (64) | 129 | #define EMAC_NUM_MULTICAST_BITS (64) |
130 | #define EMAC_TEARDOWN_VALUE (0xFFFFFFFC) | ||
131 | #define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1) | 130 | #define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1) |
132 | #define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1) | 131 | #define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1) |
133 | #define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2) | 132 | #define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2) |
@@ -214,24 +213,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; | |||
214 | #define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */ | 213 | #define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */ |
215 | 214 | ||
216 | /* EMAC Peripheral Device Register Memory Layout structure */ | 215 | /* EMAC Peripheral Device Register Memory Layout structure */ |
217 | #define EMAC_TXIDVER 0x0 | ||
218 | #define EMAC_TXCONTROL 0x4 | ||
219 | #define EMAC_TXTEARDOWN 0x8 | ||
220 | #define EMAC_RXIDVER 0x10 | ||
221 | #define EMAC_RXCONTROL 0x14 | ||
222 | #define EMAC_RXTEARDOWN 0x18 | ||
223 | #define EMAC_TXINTSTATRAW 0x80 | ||
224 | #define EMAC_TXINTSTATMASKED 0x84 | ||
225 | #define EMAC_TXINTMASKSET 0x88 | ||
226 | #define EMAC_TXINTMASKCLEAR 0x8C | ||
227 | #define EMAC_MACINVECTOR 0x90 | 216 | #define EMAC_MACINVECTOR 0x90 |
228 | 217 | ||
229 | #define EMAC_DM646X_MACEOIVECTOR 0x94 | 218 | #define EMAC_DM646X_MACEOIVECTOR 0x94 |
230 | 219 | ||
231 | #define EMAC_RXINTSTATRAW 0xA0 | ||
232 | #define EMAC_RXINTSTATMASKED 0xA4 | ||
233 | #define EMAC_RXINTMASKSET 0xA8 | ||
234 | #define EMAC_RXINTMASKCLEAR 0xAC | ||
235 | #define EMAC_MACINTSTATRAW 0xB0 | 220 | #define EMAC_MACINTSTATRAW 0xB0 |
236 | #define EMAC_MACINTSTATMASKED 0xB4 | 221 | #define EMAC_MACINTSTATMASKED 0xB4 |
237 | #define EMAC_MACINTMASKSET 0xB8 | 222 | #define EMAC_MACINTMASKSET 0xB8 |
@@ -258,12 +243,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; | |||
258 | #define EMAC_MACADDRHI 0x504 | 243 | #define EMAC_MACADDRHI 0x504 |
259 | #define EMAC_MACINDEX 0x508 | 244 | #define EMAC_MACINDEX 0x508 |
260 | 245 | ||
261 | /* EMAC HDP and Completion registors */ | ||
262 | #define EMAC_TXHDP(ch) (0x600 + (ch * 4)) | ||
263 | #define EMAC_RXHDP(ch) (0x620 + (ch * 4)) | ||
264 | #define EMAC_TXCP(ch) (0x640 + (ch * 4)) | ||
265 | #define EMAC_RXCP(ch) (0x660 + (ch * 4)) | ||
266 | |||
267 | /* EMAC statistics registers */ | 246 | /* EMAC statistics registers */ |
268 | #define EMAC_RXGOODFRAMES 0x200 | 247 | #define EMAC_RXGOODFRAMES 0x200 |
269 | #define EMAC_RXBCASTFRAMES 0x204 | 248 | #define EMAC_RXBCASTFRAMES 0x204 |
@@ -328,120 +307,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; | |||
328 | /* EMAC Stats Clear Mask */ | 307 | /* EMAC Stats Clear Mask */ |
329 | #define EMAC_STATS_CLR_MASK (0xFFFFFFFF) | 308 | #define EMAC_STATS_CLR_MASK (0xFFFFFFFF) |
330 | 309 | ||
331 | /** net_buf_obj: EMAC network bufferdata structure | ||
332 | * | ||
333 | * EMAC network buffer data structure | ||
334 | */ | ||
335 | struct emac_netbufobj { | ||
336 | void *buf_token; | ||
337 | char *data_ptr; | ||
338 | int length; | ||
339 | }; | ||
340 | |||
341 | /** net_pkt_obj: EMAC network packet data structure | ||
342 | * | ||
343 | * EMAC network packet data structure - supports buffer list (for future) | ||
344 | */ | ||
345 | struct emac_netpktobj { | ||
346 | void *pkt_token; /* data token may hold tx/rx chan id */ | ||
347 | struct emac_netbufobj *buf_list; /* array of network buffer objects */ | ||
348 | int num_bufs; | ||
349 | int pkt_length; | ||
350 | }; | ||
351 | |||
352 | /** emac_tx_bd: EMAC TX Buffer descriptor data structure | ||
353 | * | ||
354 | * EMAC TX Buffer descriptor data structure | ||
355 | */ | ||
356 | struct emac_tx_bd { | ||
357 | int h_next; | ||
358 | int buff_ptr; | ||
359 | int off_b_len; | ||
360 | int mode; /* SOP, EOP, ownership, EOQ, teardown,Qstarv, length */ | ||
361 | struct emac_tx_bd __iomem *next; | ||
362 | void *buf_token; | ||
363 | }; | ||
364 | |||
365 | /** emac_txch: EMAC TX Channel data structure | ||
366 | * | ||
367 | * EMAC TX Channel data structure | ||
368 | */ | ||
369 | struct emac_txch { | ||
370 | /* Config related */ | ||
371 | u32 num_bd; | ||
372 | u32 service_max; | ||
373 | |||
374 | /* CPPI specific */ | ||
375 | u32 alloc_size; | ||
376 | void __iomem *bd_mem; | ||
377 | struct emac_tx_bd __iomem *bd_pool_head; | ||
378 | struct emac_tx_bd __iomem *active_queue_head; | ||
379 | struct emac_tx_bd __iomem *active_queue_tail; | ||
380 | struct emac_tx_bd __iomem *last_hw_bdprocessed; | ||
381 | u32 queue_active; | ||
382 | u32 teardown_pending; | ||
383 | u32 *tx_complete; | ||
384 | |||
385 | /** statistics */ | ||
386 | u32 proc_count; /* TX: # of times emac_tx_bdproc is called */ | ||
387 | u32 mis_queued_packets; | ||
388 | u32 queue_reinit; | ||
389 | u32 end_of_queue_add; | ||
390 | u32 out_of_tx_bd; | ||
391 | u32 no_active_pkts; /* IRQ when there were no packets to process */ | ||
392 | u32 active_queue_count; | ||
393 | }; | ||
394 | |||
395 | /** emac_rx_bd: EMAC RX Buffer descriptor data structure | ||
396 | * | ||
397 | * EMAC RX Buffer descriptor data structure | ||
398 | */ | ||
399 | struct emac_rx_bd { | ||
400 | int h_next; | ||
401 | int buff_ptr; | ||
402 | int off_b_len; | ||
403 | int mode; | ||
404 | struct emac_rx_bd __iomem *next; | ||
405 | void *data_ptr; | ||
406 | void *buf_token; | ||
407 | }; | ||
408 | |||
409 | /** emac_rxch: EMAC RX Channel data structure | ||
410 | * | ||
411 | * EMAC RX Channel data structure | ||
412 | */ | ||
413 | struct emac_rxch { | ||
414 | /* configuration info */ | ||
415 | u32 num_bd; | ||
416 | u32 service_max; | ||
417 | u32 buf_size; | ||
418 | char mac_addr[6]; | ||
419 | |||
420 | /** CPPI specific */ | ||
421 | u32 alloc_size; | ||
422 | void __iomem *bd_mem; | ||
423 | struct emac_rx_bd __iomem *bd_pool_head; | ||
424 | struct emac_rx_bd __iomem *active_queue_head; | ||
425 | struct emac_rx_bd __iomem *active_queue_tail; | ||
426 | u32 queue_active; | ||
427 | u32 teardown_pending; | ||
428 | |||
429 | /* packet and buffer objects */ | ||
430 | struct emac_netpktobj pkt_queue; | ||
431 | struct emac_netbufobj buf_queue; | ||
432 | |||
433 | /** statistics */ | ||
434 | u32 proc_count; /* number of times emac_rx_bdproc is called */ | ||
435 | u32 processed_bd; | ||
436 | u32 recycled_bd; | ||
437 | u32 out_of_rx_bd; | ||
438 | u32 out_of_rx_buffers; | ||
439 | u32 queue_reinit; | ||
440 | u32 end_of_queue_add; | ||
441 | u32 end_of_queue; | ||
442 | u32 mis_queued_packets; | ||
443 | }; | ||
444 | |||
445 | /* emac_priv: EMAC private data structure | 310 | /* emac_priv: EMAC private data structure |
446 | * | 311 | * |
447 | * EMAC adapter private data structure | 312 | * EMAC adapter private data structure |
@@ -452,17 +317,10 @@ struct emac_priv { | |||
452 | struct platform_device *pdev; | 317 | struct platform_device *pdev; |
453 | struct napi_struct napi; | 318 | struct napi_struct napi; |
454 | char mac_addr[6]; | 319 | char mac_addr[6]; |
455 | spinlock_t tx_lock; | ||
456 | spinlock_t rx_lock; | ||
457 | void __iomem *remap_addr; | 320 | void __iomem *remap_addr; |
458 | u32 emac_base_phys; | 321 | u32 emac_base_phys; |
459 | void __iomem *emac_base; | 322 | void __iomem *emac_base; |
460 | void __iomem *ctrl_base; | 323 | void __iomem *ctrl_base; |
461 | void __iomem *emac_ctrl_ram; | ||
462 | u32 ctrl_ram_size; | ||
463 | u32 hw_ram_addr; | ||
464 | struct emac_txch *txch[EMAC_DEF_MAX_TX_CH]; | ||
465 | struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH]; | ||
466 | struct cpdma_ctlr *dma; | 324 | struct cpdma_ctlr *dma; |
467 | struct cpdma_chan *txchan; | 325 | struct cpdma_chan *txchan; |
468 | struct cpdma_chan *rxchan; | 326 | struct cpdma_chan *rxchan; |
@@ -491,18 +349,6 @@ struct emac_priv { | |||
491 | static struct clk *emac_clk; | 349 | static struct clk *emac_clk; |
492 | static unsigned long emac_bus_frequency; | 350 | static unsigned long emac_bus_frequency; |
493 | 351 | ||
494 | #define emac_virt_to_phys(addr, priv) \ | ||
495 | (((u32 __force)(addr) - (u32 __force)(priv->emac_ctrl_ram)) \ | ||
496 | + priv->hw_ram_addr) | ||
497 | |||
498 | /* Cache macros - Packet buffers would be from skb pool which is cached */ | ||
499 | #define EMAC_VIRT_NOCACHE(addr) (addr) | ||
500 | |||
501 | /* DM644x does not have BD's in cached memory - so no cache functions */ | ||
502 | #define BD_CACHE_INVALIDATE(addr, size) | ||
503 | #define BD_CACHE_WRITEBACK(addr, size) | ||
504 | #define BD_CACHE_WRITEBACK_INVALIDATE(addr, size) | ||
505 | |||
506 | /* EMAC TX Host Error description strings */ | 352 | /* EMAC TX Host Error description strings */ |
507 | static char *emac_txhost_errcodes[16] = { | 353 | static char *emac_txhost_errcodes[16] = { |
508 | "No error", "SOP error", "Ownership bit not set in SOP buffer", | 354 | "No error", "SOP error", "Ownership bit not set in SOP buffer", |
@@ -545,20 +391,6 @@ static void emac_dump_regs(struct emac_priv *priv) | |||
545 | emac_ctrl_read(EMAC_CTRL_EWCTL), | 391 | emac_ctrl_read(EMAC_CTRL_EWCTL), |
546 | emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); | 392 | emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); |
547 | } | 393 | } |
548 | dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n", | ||
549 | emac_read(EMAC_TXIDVER), | ||
550 | ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"), | ||
551 | emac_read(EMAC_RXIDVER), | ||
552 | ((emac_read(EMAC_RXCONTROL)) ? "enabled" : "disabled")); | ||
553 | dev_info(emac_dev, "EMAC: TXIntRaw:%08X, TxIntMasked: %08X, "\ | ||
554 | "TxIntMasSet: %08X\n", emac_read(EMAC_TXINTSTATRAW), | ||
555 | emac_read(EMAC_TXINTSTATMASKED), emac_read(EMAC_TXINTMASKSET)); | ||
556 | dev_info(emac_dev, "EMAC: RXIntRaw:%08X, RxIntMasked: %08X, "\ | ||
557 | "RxIntMasSet: %08X\n", emac_read(EMAC_RXINTSTATRAW), | ||
558 | emac_read(EMAC_RXINTSTATMASKED), emac_read(EMAC_RXINTMASKSET)); | ||
559 | dev_info(emac_dev, "EMAC: MacIntRaw:%08X, MacIntMasked: %08X, "\ | ||
560 | "MacInVector=%08X\n", emac_read(EMAC_MACINTSTATRAW), | ||
561 | emac_read(EMAC_MACINTSTATMASKED), emac_read(EMAC_MACINVECTOR)); | ||
562 | dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n", | 394 | dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n", |
563 | emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL)); | 395 | emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL)); |
564 | dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\ | 396 | dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\ |
@@ -567,8 +399,6 @@ static void emac_dump_regs(struct emac_priv *priv) | |||
567 | dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\ | 399 | dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\ |
568 | "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL), | 400 | "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL), |
569 | emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG)); | 401 | emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG)); |
570 | dev_info(emac_dev, "EMAC: TXHDP[0]:%08X, RXHDP[0]: %08X\n", | ||
571 | emac_read(EMAC_TXHDP(0)), emac_read(EMAC_RXHDP(0))); | ||
572 | dev_info(emac_dev, "EMAC Statistics\n"); | 402 | dev_info(emac_dev, "EMAC Statistics\n"); |
573 | dev_info(emac_dev, "EMAC: rx_good_frames:%d\n", | 403 | dev_info(emac_dev, "EMAC: rx_good_frames:%d\n", |
574 | emac_read(EMAC_RXGOODFRAMES)); | 404 | emac_read(EMAC_RXGOODFRAMES)); |
@@ -1223,373 +1053,6 @@ static void emac_tx_handler(void *token, int len, int status) | |||
1223 | dev_kfree_skb_any(skb); | 1053 | dev_kfree_skb_any(skb); |
1224 | } | 1054 | } |
1225 | 1055 | ||
1226 | /** EMAC on-chip buffer descriptor memory | ||
1227 | * | ||
1228 | * WARNING: Please note that the on chip memory is used for both TX and RX | ||
1229 | * buffer descriptor queues and is equally divided between TX and RX desc's | ||
1230 | * If the number of TX or RX descriptors change this memory pointers need | ||
1231 | * to be adjusted. If external memory is allocated then these pointers can | ||
1232 | * pointer to the memory | ||
1233 | * | ||
1234 | */ | ||
1235 | #define EMAC_TX_BD_MEM(priv) ((priv)->emac_ctrl_ram) | ||
1236 | #define EMAC_RX_BD_MEM(priv) ((priv)->emac_ctrl_ram + \ | ||
1237 | (((priv)->ctrl_ram_size) >> 1)) | ||
1238 | |||
1239 | /** | ||
1240 | * emac_init_txch: TX channel initialization | ||
1241 | * @priv: The DaVinci EMAC private adapter structure | ||
1242 | * @ch: RX channel number | ||
1243 | * | ||
1244 | * Called during device init to setup a TX channel (allocate buffer desc | ||
1245 | * create free pool and keep ready for transmission | ||
1246 | * | ||
1247 | * Returns success(0) or mem alloc failures error code | ||
1248 | */ | ||
1249 | static int emac_init_txch(struct emac_priv *priv, u32 ch) | ||
1250 | { | ||
1251 | struct device *emac_dev = &priv->ndev->dev; | ||
1252 | u32 cnt, bd_size; | ||
1253 | void __iomem *mem; | ||
1254 | struct emac_tx_bd __iomem *curr_bd; | ||
1255 | struct emac_txch *txch = NULL; | ||
1256 | |||
1257 | txch = kzalloc(sizeof(struct emac_txch), GFP_KERNEL); | ||
1258 | if (NULL == txch) { | ||
1259 | dev_err(emac_dev, "DaVinci EMAC: TX Ch mem alloc failed"); | ||
1260 | return -ENOMEM; | ||
1261 | } | ||
1262 | priv->txch[ch] = txch; | ||
1263 | txch->service_max = EMAC_DEF_TX_MAX_SERVICE; | ||
1264 | txch->active_queue_head = NULL; | ||
1265 | txch->active_queue_tail = NULL; | ||
1266 | txch->queue_active = 0; | ||
1267 | txch->teardown_pending = 0; | ||
1268 | |||
1269 | /* allocate memory for TX CPPI channel on a 4 byte boundry */ | ||
1270 | txch->tx_complete = kzalloc(txch->service_max * sizeof(u32), | ||
1271 | GFP_KERNEL); | ||
1272 | if (NULL == txch->tx_complete) { | ||
1273 | dev_err(emac_dev, "DaVinci EMAC: Tx service mem alloc failed"); | ||
1274 | kfree(txch); | ||
1275 | return -ENOMEM; | ||
1276 | } | ||
1277 | |||
1278 | /* allocate buffer descriptor pool align every BD on four word | ||
1279 | * boundry for future requirements */ | ||
1280 | bd_size = (sizeof(struct emac_tx_bd) + 0xF) & ~0xF; | ||
1281 | txch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size; | ||
1282 | txch->alloc_size = (((bd_size * txch->num_bd) + 0xF) & ~0xF); | ||
1283 | |||
1284 | /* alloc TX BD memory */ | ||
1285 | txch->bd_mem = EMAC_TX_BD_MEM(priv); | ||
1286 | __memzero((void __force *)txch->bd_mem, txch->alloc_size); | ||
1287 | |||
1288 | /* initialize the BD linked list */ | ||
1289 | mem = (void __force __iomem *) | ||
1290 | (((u32 __force) txch->bd_mem + 0xF) & ~0xF); | ||
1291 | txch->bd_pool_head = NULL; | ||
1292 | for (cnt = 0; cnt < txch->num_bd; cnt++) { | ||
1293 | curr_bd = mem + (cnt * bd_size); | ||
1294 | curr_bd->next = txch->bd_pool_head; | ||
1295 | txch->bd_pool_head = curr_bd; | ||
1296 | } | ||
1297 | |||
1298 | /* reset statistics counters */ | ||
1299 | txch->out_of_tx_bd = 0; | ||
1300 | txch->no_active_pkts = 0; | ||
1301 | txch->active_queue_count = 0; | ||
1302 | |||
1303 | return 0; | ||
1304 | } | ||
1305 | |||
1306 | /** | ||
1307 | * emac_cleanup_txch: Book-keep function to clean TX channel resources | ||
1308 | * @priv: The DaVinci EMAC private adapter structure | ||
1309 | * @ch: TX channel number | ||
1310 | * | ||
1311 | * Called to clean up TX channel resources | ||
1312 | * | ||
1313 | */ | ||
1314 | static void emac_cleanup_txch(struct emac_priv *priv, u32 ch) | ||
1315 | { | ||
1316 | struct emac_txch *txch = priv->txch[ch]; | ||
1317 | |||
1318 | if (txch) { | ||
1319 | if (txch->bd_mem) | ||
1320 | txch->bd_mem = NULL; | ||
1321 | kfree(txch->tx_complete); | ||
1322 | kfree(txch); | ||
1323 | priv->txch[ch] = NULL; | ||
1324 | } | ||
1325 | } | ||
1326 | |||
1327 | /** | ||
1328 | * emac_net_tx_complete: TX packet completion function | ||
1329 | * @priv: The DaVinci EMAC private adapter structure | ||
1330 | * @net_data_tokens: packet token - skb pointer | ||
1331 | * @num_tokens: number of skb's to free | ||
1332 | * @ch: TX channel number | ||
1333 | * | ||
1334 | * Frees the skb once packet is transmitted | ||
1335 | * | ||
1336 | */ | ||
1337 | static int emac_net_tx_complete(struct emac_priv *priv, | ||
1338 | void **net_data_tokens, | ||
1339 | int num_tokens, u32 ch) | ||
1340 | { | ||
1341 | struct net_device *ndev = priv->ndev; | ||
1342 | u32 cnt; | ||
1343 | |||
1344 | if (unlikely(num_tokens && netif_queue_stopped(ndev))) | ||
1345 | netif_start_queue(ndev); | ||
1346 | for (cnt = 0; cnt < num_tokens; cnt++) { | ||
1347 | struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt]; | ||
1348 | if (skb == NULL) | ||
1349 | continue; | ||
1350 | ndev->stats.tx_packets++; | ||
1351 | ndev->stats.tx_bytes += skb->len; | ||
1352 | dev_kfree_skb_any(skb); | ||
1353 | } | ||
1354 | return 0; | ||
1355 | } | ||
1356 | |||
1357 | /** | ||
1358 | * emac_txch_teardown: TX channel teardown | ||
1359 | * @priv: The DaVinci EMAC private adapter structure | ||
1360 | * @ch: TX channel number | ||
1361 | * | ||
1362 | * Called to teardown TX channel | ||
1363 | * | ||
1364 | */ | ||
1365 | static void emac_txch_teardown(struct emac_priv *priv, u32 ch) | ||
1366 | { | ||
1367 | struct device *emac_dev = &priv->ndev->dev; | ||
1368 | u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */ | ||
1369 | struct emac_txch *txch = priv->txch[ch]; | ||
1370 | struct emac_tx_bd __iomem *curr_bd; | ||
1371 | |||
1372 | while ((emac_read(EMAC_TXCP(ch)) & EMAC_TEARDOWN_VALUE) != | ||
1373 | EMAC_TEARDOWN_VALUE) { | ||
1374 | /* wait till tx teardown complete */ | ||
1375 | cpu_relax(); /* TODO: check if this helps ... */ | ||
1376 | --teardown_cnt; | ||
1377 | if (0 == teardown_cnt) { | ||
1378 | dev_err(emac_dev, "EMAC: TX teardown aborted\n"); | ||
1379 | break; | ||
1380 | } | ||
1381 | } | ||
1382 | emac_write(EMAC_TXCP(ch), EMAC_TEARDOWN_VALUE); | ||
1383 | |||
1384 | /* process sent packets and return skb's to upper layer */ | ||
1385 | if (1 == txch->queue_active) { | ||
1386 | curr_bd = txch->active_queue_head; | ||
1387 | while (curr_bd != NULL) { | ||
1388 | dma_unmap_single(emac_dev, curr_bd->buff_ptr, | ||
1389 | curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, | ||
1390 | DMA_TO_DEVICE); | ||
1391 | |||
1392 | emac_net_tx_complete(priv, (void __force *) | ||
1393 | &curr_bd->buf_token, 1, ch); | ||
1394 | if (curr_bd != txch->active_queue_tail) | ||
1395 | curr_bd = curr_bd->next; | ||
1396 | else | ||
1397 | break; | ||
1398 | } | ||
1399 | txch->bd_pool_head = txch->active_queue_head; | ||
1400 | txch->active_queue_head = | ||
1401 | txch->active_queue_tail = NULL; | ||
1402 | } | ||
1403 | } | ||
1404 | |||
1405 | /** | ||
1406 | * emac_stop_txch: Stop TX channel operation | ||
1407 | * @priv: The DaVinci EMAC private adapter structure | ||
1408 | * @ch: TX channel number | ||
1409 | * | ||
1410 | * Called to stop TX channel operation | ||
1411 | * | ||
1412 | */ | ||
1413 | static void emac_stop_txch(struct emac_priv *priv, u32 ch) | ||
1414 | { | ||
1415 | struct emac_txch *txch = priv->txch[ch]; | ||
1416 | |||
1417 | if (txch) { | ||
1418 | txch->teardown_pending = 1; | ||
1419 | emac_write(EMAC_TXTEARDOWN, 0); | ||
1420 | emac_txch_teardown(priv, ch); | ||
1421 | txch->teardown_pending = 0; | ||
1422 | emac_write(EMAC_TXINTMASKCLEAR, BIT(ch)); | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | /** | ||
1427 | * emac_tx_bdproc: TX buffer descriptor (packet) processing | ||
1428 | * @priv: The DaVinci EMAC private adapter structure | ||
1429 | * @ch: TX channel number to process buffer descriptors for | ||
1430 | * @budget: number of packets allowed to process | ||
1431 | * @pending: indication to caller that packets are pending to process | ||
1432 | * | ||
1433 | * Processes TX buffer descriptors after packets are transmitted - checks | ||
1434 | * ownership bit on the TX * descriptor and requeues it to free pool & frees | ||
1435 | * the SKB buffer. Only "budget" number of packets are processed and | ||
1436 | * indication of pending packets provided to the caller | ||
1437 | * | ||
1438 | * Returns number of packets processed | ||
1439 | */ | ||
1440 | static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget) | ||
1441 | { | ||
1442 | struct device *emac_dev = &priv->ndev->dev; | ||
1443 | unsigned long flags; | ||
1444 | u32 frame_status; | ||
1445 | u32 pkts_processed = 0; | ||
1446 | u32 tx_complete_cnt = 0; | ||
1447 | struct emac_tx_bd __iomem *curr_bd; | ||
1448 | struct emac_txch *txch = priv->txch[ch]; | ||
1449 | u32 *tx_complete_ptr = txch->tx_complete; | ||
1450 | |||
1451 | if (unlikely(1 == txch->teardown_pending)) { | ||
1452 | if (netif_msg_tx_err(priv) && net_ratelimit()) { | ||
1453 | dev_err(emac_dev, "DaVinci EMAC:emac_tx_bdproc: "\ | ||
1454 | "teardown pending\n"); | ||
1455 | } | ||
1456 | return 0; /* dont handle any pkt completions */ | ||
1457 | } | ||
1458 | |||
1459 | ++txch->proc_count; | ||
1460 | spin_lock_irqsave(&priv->tx_lock, flags); | ||
1461 | curr_bd = txch->active_queue_head; | ||
1462 | if (NULL == curr_bd) { | ||
1463 | emac_write(EMAC_TXCP(ch), | ||
1464 | emac_virt_to_phys(txch->last_hw_bdprocessed, priv)); | ||
1465 | txch->no_active_pkts++; | ||
1466 | spin_unlock_irqrestore(&priv->tx_lock, flags); | ||
1467 | return 0; | ||
1468 | } | ||
1469 | BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE); | ||
1470 | frame_status = curr_bd->mode; | ||
1471 | while ((curr_bd) && | ||
1472 | ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) && | ||
1473 | (pkts_processed < budget)) { | ||
1474 | emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd, priv)); | ||
1475 | txch->active_queue_head = curr_bd->next; | ||
1476 | if (frame_status & EMAC_CPPI_EOQ_BIT) { | ||
1477 | if (curr_bd->next) { /* misqueued packet */ | ||
1478 | emac_write(EMAC_TXHDP(ch), curr_bd->h_next); | ||
1479 | ++txch->mis_queued_packets; | ||
1480 | } else { | ||
1481 | txch->queue_active = 0; /* end of queue */ | ||
1482 | } | ||
1483 | } | ||
1484 | |||
1485 | dma_unmap_single(emac_dev, curr_bd->buff_ptr, | ||
1486 | curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, | ||
1487 | DMA_TO_DEVICE); | ||
1488 | |||
1489 | *tx_complete_ptr = (u32) curr_bd->buf_token; | ||
1490 | ++tx_complete_ptr; | ||
1491 | ++tx_complete_cnt; | ||
1492 | curr_bd->next = txch->bd_pool_head; | ||
1493 | txch->bd_pool_head = curr_bd; | ||
1494 | --txch->active_queue_count; | ||
1495 | pkts_processed++; | ||
1496 | txch->last_hw_bdprocessed = curr_bd; | ||
1497 | curr_bd = txch->active_queue_head; | ||
1498 | if (curr_bd) { | ||
1499 | BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE); | ||
1500 | frame_status = curr_bd->mode; | ||
1501 | } | ||
1502 | } /* end of pkt processing loop */ | ||
1503 | |||
1504 | emac_net_tx_complete(priv, | ||
1505 | (void *)&txch->tx_complete[0], | ||
1506 | tx_complete_cnt, ch); | ||
1507 | spin_unlock_irqrestore(&priv->tx_lock, flags); | ||
1508 | return pkts_processed; | ||
1509 | } | ||
1510 | |||
1511 | #define EMAC_ERR_TX_OUT_OF_BD -1 | ||
1512 | |||
1513 | /** | ||
1514 | * emac_send: EMAC Transmit function (internal) | ||
1515 | * @priv: The DaVinci EMAC private adapter structure | ||
1516 | * @pkt: packet pointer (contains skb ptr) | ||
1517 | * @ch: TX channel number | ||
1518 | * | ||
1519 | * Called by the transmit function to queue the packet in EMAC hardware queue | ||
1520 | * | ||
1521 | * Returns success(0) or error code (typically out of desc's) | ||
1522 | */ | ||
1523 | static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch) | ||
1524 | { | ||
1525 | unsigned long flags; | ||
1526 | struct emac_tx_bd __iomem *curr_bd; | ||
1527 | struct emac_txch *txch; | ||
1528 | struct emac_netbufobj *buf_list; | ||
1529 | |||
1530 | txch = priv->txch[ch]; | ||
1531 | buf_list = pkt->buf_list; /* get handle to the buffer array */ | ||
1532 | |||
1533 | /* check packet size and pad if short */ | ||
1534 | if (pkt->pkt_length < EMAC_DEF_MIN_ETHPKTSIZE) { | ||
1535 | buf_list->length += (EMAC_DEF_MIN_ETHPKTSIZE - pkt->pkt_length); | ||
1536 | pkt->pkt_length = EMAC_DEF_MIN_ETHPKTSIZE; | ||
1537 | } | ||
1538 | |||
1539 | spin_lock_irqsave(&priv->tx_lock, flags); | ||
1540 | curr_bd = txch->bd_pool_head; | ||
1541 | if (curr_bd == NULL) { | ||
1542 | txch->out_of_tx_bd++; | ||
1543 | spin_unlock_irqrestore(&priv->tx_lock, flags); | ||
1544 | return EMAC_ERR_TX_OUT_OF_BD; | ||
1545 | } | ||
1546 | |||
1547 | txch->bd_pool_head = curr_bd->next; | ||
1548 | curr_bd->buf_token = buf_list->buf_token; | ||
1549 | curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr, | ||
1550 | buf_list->length, DMA_TO_DEVICE); | ||
1551 | curr_bd->off_b_len = buf_list->length; | ||
1552 | curr_bd->h_next = 0; | ||
1553 | curr_bd->next = NULL; | ||
1554 | curr_bd->mode = (EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT | | ||
1555 | EMAC_CPPI_EOP_BIT | pkt->pkt_length); | ||
1556 | |||
1557 | /* flush the packet from cache if write back cache is present */ | ||
1558 | BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE); | ||
1559 | |||
1560 | /* send the packet */ | ||
1561 | if (txch->active_queue_head == NULL) { | ||
1562 | txch->active_queue_head = curr_bd; | ||
1563 | txch->active_queue_tail = curr_bd; | ||
1564 | if (1 != txch->queue_active) { | ||
1565 | emac_write(EMAC_TXHDP(ch), | ||
1566 | emac_virt_to_phys(curr_bd, priv)); | ||
1567 | txch->queue_active = 1; | ||
1568 | } | ||
1569 | ++txch->queue_reinit; | ||
1570 | } else { | ||
1571 | register struct emac_tx_bd __iomem *tail_bd; | ||
1572 | register u32 frame_status; | ||
1573 | |||
1574 | tail_bd = txch->active_queue_tail; | ||
1575 | tail_bd->next = curr_bd; | ||
1576 | txch->active_queue_tail = curr_bd; | ||
1577 | tail_bd = EMAC_VIRT_NOCACHE(tail_bd); | ||
1578 | tail_bd->h_next = (int)emac_virt_to_phys(curr_bd, priv); | ||
1579 | frame_status = tail_bd->mode; | ||
1580 | if (frame_status & EMAC_CPPI_EOQ_BIT) { | ||
1581 | emac_write(EMAC_TXHDP(ch), | ||
1582 | emac_virt_to_phys(curr_bd, priv)); | ||
1583 | frame_status &= ~(EMAC_CPPI_EOQ_BIT); | ||
1584 | tail_bd->mode = frame_status; | ||
1585 | ++txch->end_of_queue_add; | ||
1586 | } | ||
1587 | } | ||
1588 | txch->active_queue_count++; | ||
1589 | spin_unlock_irqrestore(&priv->tx_lock, flags); | ||
1590 | return 0; | ||
1591 | } | ||
1592 | |||
1593 | /** | 1056 | /** |
1594 | * emac_dev_xmit: EMAC Transmit function | 1057 | * emac_dev_xmit: EMAC Transmit function |
1595 | * @skb: SKB pointer | 1058 | * @skb: SKB pointer |
@@ -1664,207 +1127,6 @@ static void emac_dev_tx_timeout(struct net_device *ndev) | |||
1664 | } | 1127 | } |
1665 | 1128 | ||
1666 | /** | 1129 | /** |
1667 | * emac_net_alloc_rx_buf: Allocate a skb for RX | ||
1668 | * @priv: The DaVinci EMAC private adapter structure | ||
1669 | * @buf_size: size of SKB data buffer to allocate | ||
1670 | * @data_token: data token returned (skb handle for storing in buffer desc) | ||
1671 | * @ch: RX channel number | ||
1672 | * | ||
1673 | * Called during RX channel setup - allocates skb buffer of required size | ||
1674 | * and provides the skb handle and allocated buffer data pointer to caller | ||
1675 | * | ||
1676 | * Returns skb data pointer or 0 on failure to alloc skb | ||
1677 | */ | ||
1678 | static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size, | ||
1679 | void **data_token, u32 ch) | ||
1680 | { | ||
1681 | struct net_device *ndev = priv->ndev; | ||
1682 | struct device *emac_dev = &ndev->dev; | ||
1683 | struct sk_buff *p_skb; | ||
1684 | |||
1685 | p_skb = dev_alloc_skb(buf_size); | ||
1686 | if (unlikely(NULL == p_skb)) { | ||
1687 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
1688 | dev_err(emac_dev, "DaVinci EMAC: failed to alloc skb"); | ||
1689 | return NULL; | ||
1690 | } | ||
1691 | |||
1692 | /* set device pointer in skb and reserve space for extra bytes */ | ||
1693 | p_skb->dev = ndev; | ||
1694 | skb_reserve(p_skb, NET_IP_ALIGN); | ||
1695 | *data_token = (void *) p_skb; | ||
1696 | return p_skb->data; | ||
1697 | } | ||
1698 | |||
1699 | /** | ||
1700 | * emac_init_rxch: RX channel initialization | ||
1701 | * @priv: The DaVinci EMAC private adapter structure | ||
1702 | * @ch: RX channel number | ||
1703 | * @param: mac address for RX channel | ||
1704 | * | ||
1705 | * Called during device init to setup a RX channel (allocate buffers and | ||
1706 | * buffer descriptors, create queue and keep ready for reception | ||
1707 | * | ||
1708 | * Returns success(0) or mem alloc failures error code | ||
1709 | */ | ||
1710 | static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param) | ||
1711 | { | ||
1712 | struct device *emac_dev = &priv->ndev->dev; | ||
1713 | u32 cnt, bd_size; | ||
1714 | void __iomem *mem; | ||
1715 | struct emac_rx_bd __iomem *curr_bd; | ||
1716 | struct emac_rxch *rxch = NULL; | ||
1717 | |||
1718 | rxch = kzalloc(sizeof(struct emac_rxch), GFP_KERNEL); | ||
1719 | if (NULL == rxch) { | ||
1720 | dev_err(emac_dev, "DaVinci EMAC: RX Ch mem alloc failed"); | ||
1721 | return -ENOMEM; | ||
1722 | } | ||
1723 | priv->rxch[ch] = rxch; | ||
1724 | rxch->buf_size = priv->rx_buf_size; | ||
1725 | rxch->service_max = EMAC_DEF_RX_MAX_SERVICE; | ||
1726 | rxch->queue_active = 0; | ||
1727 | rxch->teardown_pending = 0; | ||
1728 | |||
1729 | /* save mac address */ | ||
1730 | for (cnt = 0; cnt < 6; cnt++) | ||
1731 | rxch->mac_addr[cnt] = param[cnt]; | ||
1732 | |||
1733 | /* allocate buffer descriptor pool align every BD on four word | ||
1734 | * boundry for future requirements */ | ||
1735 | bd_size = (sizeof(struct emac_rx_bd) + 0xF) & ~0xF; | ||
1736 | rxch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size; | ||
1737 | rxch->alloc_size = (((bd_size * rxch->num_bd) + 0xF) & ~0xF); | ||
1738 | rxch->bd_mem = EMAC_RX_BD_MEM(priv); | ||
1739 | __memzero((void __force *)rxch->bd_mem, rxch->alloc_size); | ||
1740 | rxch->pkt_queue.buf_list = &rxch->buf_queue; | ||
1741 | |||
1742 | /* allocate RX buffer and initialize the BD linked list */ | ||
1743 | mem = (void __force __iomem *) | ||
1744 | (((u32 __force) rxch->bd_mem + 0xF) & ~0xF); | ||
1745 | rxch->active_queue_head = NULL; | ||
1746 | rxch->active_queue_tail = mem; | ||
1747 | for (cnt = 0; cnt < rxch->num_bd; cnt++) { | ||
1748 | curr_bd = mem + (cnt * bd_size); | ||
1749 | /* for future use the last parameter contains the BD ptr */ | ||
1750 | curr_bd->data_ptr = emac_net_alloc_rx_buf(priv, | ||
1751 | rxch->buf_size, | ||
1752 | (void __force **)&curr_bd->buf_token, | ||
1753 | EMAC_DEF_RX_CH); | ||
1754 | if (curr_bd->data_ptr == NULL) { | ||
1755 | dev_err(emac_dev, "DaVinci EMAC: RX buf mem alloc " \ | ||
1756 | "failed for ch %d\n", ch); | ||
1757 | kfree(rxch); | ||
1758 | return -ENOMEM; | ||
1759 | } | ||
1760 | |||
1761 | /* populate the hardware descriptor */ | ||
1762 | curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head, | ||
1763 | priv); | ||
1764 | curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr, | ||
1765 | rxch->buf_size, DMA_FROM_DEVICE); | ||
1766 | curr_bd->off_b_len = rxch->buf_size; | ||
1767 | curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; | ||
1768 | |||
1769 | /* write back to hardware memory */ | ||
1770 | BD_CACHE_WRITEBACK_INVALIDATE((u32) curr_bd, | ||
1771 | EMAC_BD_LENGTH_FOR_CACHE); | ||
1772 | curr_bd->next = rxch->active_queue_head; | ||
1773 | rxch->active_queue_head = curr_bd; | ||
1774 | } | ||
1775 | |||
1776 | /* At this point rxCppi->activeQueueHead points to the first | ||
1777 | RX BD ready to be given to RX HDP and rxch->active_queue_tail | ||
1778 | points to the last RX BD | ||
1779 | */ | ||
1780 | return 0; | ||
1781 | } | ||
1782 | |||
1783 | /** | ||
1784 | * emac_rxch_teardown: RX channel teardown | ||
1785 | * @priv: The DaVinci EMAC private adapter structure | ||
1786 | * @ch: RX channel number | ||
1787 | * | ||
1788 | * Called during device stop to teardown RX channel | ||
1789 | * | ||
1790 | */ | ||
1791 | static void emac_rxch_teardown(struct emac_priv *priv, u32 ch) | ||
1792 | { | ||
1793 | struct device *emac_dev = &priv->ndev->dev; | ||
1794 | u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */ | ||
1795 | |||
1796 | while ((emac_read(EMAC_RXCP(ch)) & EMAC_TEARDOWN_VALUE) != | ||
1797 | EMAC_TEARDOWN_VALUE) { | ||
1798 | /* wait till tx teardown complete */ | ||
1799 | cpu_relax(); /* TODO: check if this helps ... */ | ||
1800 | --teardown_cnt; | ||
1801 | if (0 == teardown_cnt) { | ||
1802 | dev_err(emac_dev, "EMAC: RX teardown aborted\n"); | ||
1803 | break; | ||
1804 | } | ||
1805 | } | ||
1806 | emac_write(EMAC_RXCP(ch), EMAC_TEARDOWN_VALUE); | ||
1807 | } | ||
1808 | |||
1809 | /** | ||
1810 | * emac_stop_rxch: Stop RX channel operation | ||
1811 | * @priv: The DaVinci EMAC private adapter structure | ||
1812 | * @ch: RX channel number | ||
1813 | * | ||
1814 | * Called during device stop to stop RX channel operation | ||
1815 | * | ||
1816 | */ | ||
1817 | static void emac_stop_rxch(struct emac_priv *priv, u32 ch) | ||
1818 | { | ||
1819 | struct emac_rxch *rxch = priv->rxch[ch]; | ||
1820 | |||
1821 | if (rxch) { | ||
1822 | rxch->teardown_pending = 1; | ||
1823 | emac_write(EMAC_RXTEARDOWN, ch); | ||
1824 | /* wait for teardown complete */ | ||
1825 | emac_rxch_teardown(priv, ch); | ||
1826 | rxch->teardown_pending = 0; | ||
1827 | emac_write(EMAC_RXINTMASKCLEAR, BIT(ch)); | ||
1828 | } | ||
1829 | } | ||
1830 | |||
1831 | /** | ||
1832 | * emac_cleanup_rxch: Book-keep function to clean RX channel resources | ||
1833 | * @priv: The DaVinci EMAC private adapter structure | ||
1834 | * @ch: RX channel number | ||
1835 | * | ||
1836 | * Called during device stop to clean up RX channel resources | ||
1837 | * | ||
1838 | */ | ||
1839 | static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch) | ||
1840 | { | ||
1841 | struct emac_rxch *rxch = priv->rxch[ch]; | ||
1842 | struct emac_rx_bd __iomem *curr_bd; | ||
1843 | |||
1844 | if (rxch) { | ||
1845 | /* free the receive buffers previously allocated */ | ||
1846 | curr_bd = rxch->active_queue_head; | ||
1847 | while (curr_bd) { | ||
1848 | if (curr_bd->buf_token) { | ||
1849 | dma_unmap_single(&priv->ndev->dev, | ||
1850 | curr_bd->buff_ptr, | ||
1851 | curr_bd->off_b_len | ||
1852 | & EMAC_RX_BD_BUF_SIZE, | ||
1853 | DMA_FROM_DEVICE); | ||
1854 | |||
1855 | dev_kfree_skb_any((struct sk_buff *)\ | ||
1856 | curr_bd->buf_token); | ||
1857 | } | ||
1858 | curr_bd = curr_bd->next; | ||
1859 | } | ||
1860 | if (rxch->bd_mem) | ||
1861 | rxch->bd_mem = NULL; | ||
1862 | kfree(rxch); | ||
1863 | priv->rxch[ch] = NULL; | ||
1864 | } | ||
1865 | } | ||
1866 | |||
1867 | /** | ||
1868 | * emac_set_type0addr: Set EMAC Type0 mac address | 1130 | * emac_set_type0addr: Set EMAC Type0 mac address |
1869 | * @priv: The DaVinci EMAC private adapter structure | 1131 | * @priv: The DaVinci EMAC private adapter structure |
1870 | * @ch: RX channel number | 1132 | * @ch: RX channel number |
@@ -2004,194 +1266,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) | |||
2004 | } | 1266 | } |
2005 | 1267 | ||
2006 | /** | 1268 | /** |
2007 | * emac_addbd_to_rx_queue: Recycle RX buffer descriptor | ||
2008 | * @priv: The DaVinci EMAC private adapter structure | ||
2009 | * @ch: RX channel number to process buffer descriptors for | ||
2010 | * @curr_bd: current buffer descriptor | ||
2011 | * @buffer: buffer pointer for descriptor | ||
2012 | * @buf_token: buffer token (stores skb information) | ||
2013 | * | ||
2014 | * Prepares the recycled buffer descriptor and addes it to hardware | ||
2015 | * receive queue - if queue empty this descriptor becomes the head | ||
2016 | * else addes the descriptor to end of queue | ||
2017 | * | ||
2018 | */ | ||
2019 | static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch, | ||
2020 | struct emac_rx_bd __iomem *curr_bd, | ||
2021 | char *buffer, void *buf_token) | ||
2022 | { | ||
2023 | struct emac_rxch *rxch = priv->rxch[ch]; | ||
2024 | |||
2025 | /* populate the hardware descriptor */ | ||
2026 | curr_bd->h_next = 0; | ||
2027 | curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer, | ||
2028 | rxch->buf_size, DMA_FROM_DEVICE); | ||
2029 | curr_bd->off_b_len = rxch->buf_size; | ||
2030 | curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT; | ||
2031 | curr_bd->next = NULL; | ||
2032 | curr_bd->data_ptr = buffer; | ||
2033 | curr_bd->buf_token = buf_token; | ||
2034 | |||
2035 | /* write back */ | ||
2036 | BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE); | ||
2037 | if (rxch->active_queue_head == NULL) { | ||
2038 | rxch->active_queue_head = curr_bd; | ||
2039 | rxch->active_queue_tail = curr_bd; | ||
2040 | if (0 != rxch->queue_active) { | ||
2041 | emac_write(EMAC_RXHDP(ch), | ||
2042 | emac_virt_to_phys(rxch->active_queue_head, priv)); | ||
2043 | rxch->queue_active = 1; | ||
2044 | } | ||
2045 | } else { | ||
2046 | struct emac_rx_bd __iomem *tail_bd; | ||
2047 | u32 frame_status; | ||
2048 | |||
2049 | tail_bd = rxch->active_queue_tail; | ||
2050 | rxch->active_queue_tail = curr_bd; | ||
2051 | tail_bd->next = curr_bd; | ||
2052 | tail_bd = EMAC_VIRT_NOCACHE(tail_bd); | ||
2053 | tail_bd->h_next = emac_virt_to_phys(curr_bd, priv); | ||
2054 | frame_status = tail_bd->mode; | ||
2055 | if (frame_status & EMAC_CPPI_EOQ_BIT) { | ||
2056 | emac_write(EMAC_RXHDP(ch), | ||
2057 | emac_virt_to_phys(curr_bd, priv)); | ||
2058 | frame_status &= ~(EMAC_CPPI_EOQ_BIT); | ||
2059 | tail_bd->mode = frame_status; | ||
2060 | ++rxch->end_of_queue_add; | ||
2061 | } | ||
2062 | } | ||
2063 | ++rxch->recycled_bd; | ||
2064 | } | ||
2065 | |||
2066 | /** | ||
2067 | * emac_net_rx_cb: Prepares packet and sends to upper layer | ||
2068 | * @priv: The DaVinci EMAC private adapter structure | ||
2069 | * @net_pkt_list: Network packet list (received packets) | ||
2070 | * | ||
2071 | * Invalidates packet buffer memory and sends the received packet to upper | ||
2072 | * layer | ||
2073 | * | ||
2074 | * Returns success or appropriate error code (none as of now) | ||
2075 | */ | ||
2076 | static int emac_net_rx_cb(struct emac_priv *priv, | ||
2077 | struct emac_netpktobj *net_pkt_list) | ||
2078 | { | ||
2079 | struct net_device *ndev = priv->ndev; | ||
2080 | struct sk_buff *p_skb = net_pkt_list->pkt_token; | ||
2081 | /* set length of packet */ | ||
2082 | skb_put(p_skb, net_pkt_list->pkt_length); | ||
2083 | p_skb->protocol = eth_type_trans(p_skb, priv->ndev); | ||
2084 | netif_receive_skb(p_skb); | ||
2085 | ndev->stats.rx_bytes += net_pkt_list->pkt_length; | ||
2086 | ndev->stats.rx_packets++; | ||
2087 | return 0; | ||
2088 | } | ||
2089 | |||
2090 | /** | ||
2091 | * emac_rx_bdproc: RX buffer descriptor (packet) processing | ||
2092 | * @priv: The DaVinci EMAC private adapter structure | ||
2093 | * @ch: RX channel number to process buffer descriptors for | ||
2094 | * @budget: number of packets allowed to process | ||
2095 | * @pending: indication to caller that packets are pending to process | ||
2096 | * | ||
2097 | * Processes RX buffer descriptors - checks ownership bit on the RX buffer | ||
2098 | * descriptor, sends the receive packet to upper layer, allocates a new SKB | ||
2099 | * and recycles the buffer descriptor (requeues it in hardware RX queue). | ||
2100 | * Only "budget" number of packets are processed and indication of pending | ||
2101 | * packets provided to the caller. | ||
2102 | * | ||
2103 | * Returns number of packets processed (and indication of pending packets) | ||
2104 | */ | ||
2105 | static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget) | ||
2106 | { | ||
2107 | unsigned long flags; | ||
2108 | u32 frame_status; | ||
2109 | u32 pkts_processed = 0; | ||
2110 | char *new_buffer; | ||
2111 | struct emac_rx_bd __iomem *curr_bd; | ||
2112 | struct emac_rx_bd __iomem *last_bd; | ||
2113 | struct emac_netpktobj *curr_pkt, pkt_obj; | ||
2114 | struct emac_netbufobj buf_obj; | ||
2115 | struct emac_netbufobj *rx_buf_obj; | ||
2116 | void *new_buf_token; | ||
2117 | struct emac_rxch *rxch = priv->rxch[ch]; | ||
2118 | |||
2119 | if (unlikely(1 == rxch->teardown_pending)) | ||
2120 | return 0; | ||
2121 | ++rxch->proc_count; | ||
2122 | spin_lock_irqsave(&priv->rx_lock, flags); | ||
2123 | pkt_obj.buf_list = &buf_obj; | ||
2124 | curr_pkt = &pkt_obj; | ||
2125 | curr_bd = rxch->active_queue_head; | ||
2126 | BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE); | ||
2127 | frame_status = curr_bd->mode; | ||
2128 | |||
2129 | while ((curr_bd) && | ||
2130 | ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) && | ||
2131 | (pkts_processed < budget)) { | ||
2132 | |||
2133 | new_buffer = emac_net_alloc_rx_buf(priv, rxch->buf_size, | ||
2134 | &new_buf_token, EMAC_DEF_RX_CH); | ||
2135 | if (unlikely(NULL == new_buffer)) { | ||
2136 | ++rxch->out_of_rx_buffers; | ||
2137 | goto end_emac_rx_bdproc; | ||
2138 | } | ||
2139 | |||
2140 | /* populate received packet data structure */ | ||
2141 | rx_buf_obj = &curr_pkt->buf_list[0]; | ||
2142 | rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr; | ||
2143 | rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE; | ||
2144 | rx_buf_obj->buf_token = curr_bd->buf_token; | ||
2145 | |||
2146 | dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr, | ||
2147 | curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE, | ||
2148 | DMA_FROM_DEVICE); | ||
2149 | |||
2150 | curr_pkt->pkt_token = curr_pkt->buf_list->buf_token; | ||
2151 | curr_pkt->num_bufs = 1; | ||
2152 | curr_pkt->pkt_length = | ||
2153 | (frame_status & EMAC_RX_BD_PKT_LENGTH_MASK); | ||
2154 | emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd, priv)); | ||
2155 | ++rxch->processed_bd; | ||
2156 | last_bd = curr_bd; | ||
2157 | curr_bd = last_bd->next; | ||
2158 | rxch->active_queue_head = curr_bd; | ||
2159 | |||
2160 | /* check if end of RX queue ? */ | ||
2161 | if (frame_status & EMAC_CPPI_EOQ_BIT) { | ||
2162 | if (curr_bd) { | ||
2163 | ++rxch->mis_queued_packets; | ||
2164 | emac_write(EMAC_RXHDP(ch), | ||
2165 | emac_virt_to_phys(curr_bd, priv)); | ||
2166 | } else { | ||
2167 | ++rxch->end_of_queue; | ||
2168 | rxch->queue_active = 0; | ||
2169 | } | ||
2170 | } | ||
2171 | |||
2172 | /* recycle BD */ | ||
2173 | emac_addbd_to_rx_queue(priv, ch, last_bd, new_buffer, | ||
2174 | new_buf_token); | ||
2175 | |||
2176 | /* return the packet to the user - BD ptr passed in | ||
2177 | * last parameter for potential *future* use */ | ||
2178 | spin_unlock_irqrestore(&priv->rx_lock, flags); | ||
2179 | emac_net_rx_cb(priv, curr_pkt); | ||
2180 | spin_lock_irqsave(&priv->rx_lock, flags); | ||
2181 | curr_bd = rxch->active_queue_head; | ||
2182 | if (curr_bd) { | ||
2183 | BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE); | ||
2184 | frame_status = curr_bd->mode; | ||
2185 | } | ||
2186 | ++pkts_processed; | ||
2187 | } | ||
2188 | |||
2189 | end_emac_rx_bdproc: | ||
2190 | spin_unlock_irqrestore(&priv->rx_lock, flags); | ||
2191 | return pkts_processed; | ||
2192 | } | ||
2193 | |||
2194 | /** | ||
2195 | * emac_hw_enable: Enable EMAC hardware for packet transmission/reception | 1269 | * emac_hw_enable: Enable EMAC hardware for packet transmission/reception |
2196 | * @priv: The DaVinci EMAC private adapter structure | 1270 | * @priv: The DaVinci EMAC private adapter structure |
2197 | * | 1271 | * |
@@ -2717,8 +1791,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
2717 | priv->ndev = ndev; | 1791 | priv->ndev = ndev; |
2718 | priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG); | 1792 | priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG); |
2719 | 1793 | ||
2720 | spin_lock_init(&priv->tx_lock); | ||
2721 | spin_lock_init(&priv->rx_lock); | ||
2722 | spin_lock_init(&priv->lock); | 1794 | spin_lock_init(&priv->lock); |
2723 | 1795 | ||
2724 | pdata = pdev->dev.platform_data; | 1796 | pdata = pdev->dev.platform_data; |
@@ -2766,8 +1838,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
2766 | ndev->base_addr = (unsigned long)priv->remap_addr; | 1838 | ndev->base_addr = (unsigned long)priv->remap_addr; |
2767 | 1839 | ||
2768 | priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; | 1840 | priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; |
2769 | priv->ctrl_ram_size = pdata->ctrl_ram_size; | ||
2770 | priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset; | ||
2771 | 1841 | ||
2772 | hw_ram_addr = pdata->hw_ram_addr; | 1842 | hw_ram_addr = pdata->hw_ram_addr; |
2773 | if (!hw_ram_addr) | 1843 | if (!hw_ram_addr) |